content
stringlengths
0
1.55M
# -*- coding: utf-8 -*- """ Created on Sat Aug 05 23:55:12 2018 @author: <NAME>, <NAME> """<import_stmt>sys<import_stmt>numpy<as>np<import_stmt>torch<import_stmt>torch.nn<as>nn<line_sep># Training <def_stmt>train args model device train_loader optimizer epoch iteration<block_start>model.train()<line_sep>criterion=nn.CrossEntropyLoss(size_average=<true>)# previous PyTorch ver. #criterion = nn.CrossEntropyLoss(reduction='sum') <for_stmt>i_batch,sample_batched enumerate(train_loader)<block_start>data,target=sample_batched["image"].to(device) sample_batched["label"].to(device)<line_sep>optimizer.zero_grad()<line_sep>output=model(data)<line_sep>pred=output.max(1 keepdim=<true>)[1]<line_sep>correct=pred.eq(target.view_as(pred)).sum().item()<line_sep>loss=criterion(output target)<line_sep>loss.backward()<line_sep>optimizer.step()<if_stmt>i_batch%args.log_interval<eq>0<block_start>sys.stdout.write("\repoch:{0:>3} iteration:{1:>6} train_loss: {2:.6f} train_accracy: {3:5.2f}%".format(epoch iteration loss.item() 100.<times>correct/float(len(sample_batched["label"]))))<line_sep>sys.stdout.flush()<block_end>iteration<augadd>1<block_end><block_end># Validation <def_stmt>val args model device test_loader iteration<block_start>model.eval()<line_sep>criterion=nn.CrossEntropyLoss(size_average=<false>)# previous PyTorch ver. #criterion = nn.CrossEntropyLoss(reduction='sum') test_loss=0<line_sep>correct=0<with_stmt>torch.no_grad()<block_start><for_stmt>i_batch,sample_batched enumerate(test_loader)<block_start>data,target=sample_batched["image"].to(device) sample_batched["label"].to(device)<line_sep>output=model(data)<line_sep>test_loss<augadd>criterion(output target).item()<line_sep>pred=output.max(1 keepdim=<true>)[1]<line_sep>correct<augadd>pred.eq(target.view_as(pred)).sum().item()<block_end><block_end>test_loss<augdiv>float(len(test_loader.dataset))<line_sep>correct<augdiv>float(len(test_loader.dataset))<line_sep>print("\nValidation: Accuracy: {0:.2f}% test_loss: {1:.6f}".format(100.<times>correct test_loss))<line_sep><return>test_loss 100.<times>correct<block_end>
# # This file is part of pretix (Community Edition). # # Copyright (C) 2014-2020 <NAME> and contributors # Copyright (C) 2020-2021 rami.io GmbH and contributors # # This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General # Public License as published by the Free Software Foundation in version 3 of the License. # # ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are # applicable granting you additional permissions and placing additional restrictions on your usage of this software. # Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive # this file, see <https://pretix.eu/about/en/license>. # # This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied # warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more # details. # # You should have received a copy of the GNU Affero General Public License along with this program. If not, see # <https://www.gnu.org/licenses/>. # # This file is based on an earlier version of pretix which was released under the Apache License 2.0. The full text of # the Apache License 2.0 can be obtained at <http://www.apache.org/licenses/LICENSE-2.0>. # # This file may have since been changed and any changes are released under the terms of AGPLv3 as described above. A # full history of changes and contributors is available at <https://github.com/pretix/pretix>. # # This file contains Apache-licensed contributions copyrighted by: <NAME> # # Unless required by applicable law or agreed to in writing, software distributed under the Apache License 2.0 is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under the License. <import_from_stmt>collections OrderedDict<import_from_stmt>django forms<import_from_stmt>django.dispatch receiver<import_from_stmt>django.utils.translation gettext_lazy<as>_<import_from_stmt>pretix.base.models OrderPosition<import_from_stmt>..exporter BaseExporter<import_from_stmt>..models Order<import_from_stmt>..signals register_data_exporters register_multievent_data_exporters <class_stmt>MailExporter(BaseExporter)<block_start>identifier='mailaddrs'<line_sep>verbose_name=_('Email addresses (text file)')<def_stmt>render self form_data:dict<block_start>qs=Order.objects.filter(event__in=self.events status__in=form_data['status']).prefetch_related('event')<line_sep>addrs=qs.values('email')<line_sep>pos=OrderPosition.objects.filter(order__event__in=self.events order__status__in=form_data['status']).values('attendee_email')<line_sep>data="\r\n".join(set(a['email']<for>a addrs<if>a['email'])|set(a['attendee_email']<for>a pos<if>a['attendee_email']))<if_stmt>self.is_multievent<block_start><return>'{}_pretixemails.txt'.format(self.events.first().organizer.slug) 'text/plain' data.encode("utf-8")<block_end><else_stmt><block_start><return>'{}_pretixemails.txt'.format(self.event.slug) 'text/plain' data.encode("utf-8")<block_end><block_end>@property<def_stmt>export_form_fields self<block_start><return>OrderedDict([('status' forms.MultipleChoiceField(label=_('Filter by status') initial=[Order.STATUS_PENDING Order.STATUS_PAID] choices=Order.STATUS_CHOICE widget=forms.CheckboxSelectMultiple required=<true>)) ])<block_end><block_end>@receiver(register_data_exporters dispatch_uid="exporter_mail")<def_stmt>register_mail_export sender **kwargs<block_start><return>MailExporter<block_end>@receiver(register_multievent_data_exporters dispatch_uid="multiexporter_mail")<def_stmt>register_multievent_mail_export sender **kwargs<block_start><return>MailExporter<block_end>
<import_stmt>terrascript<import_stmt>terrascript.provider<import_stmt>terrascript.resource<import_stmt>tests.shared<def_stmt>test_example_001 <block_start>config=terrascript.Terrascript()<line_sep>config<augadd>terrascript.provider.aws(region="us-east-1" version="~> 2.0")<line_sep>config<augadd>terrascript.resource.aws_vpc("example" cidr_block="10.0.0.0/16")<line_sep>tests.shared.assert_deep_equal(config "test_001.tf.json")<block_end>
<import_stmt>torchvision.transforms<as>transforms<import_stmt>torch.nn<as>nn<import_stmt>random<import_from_stmt>.image_transforms resize_4d_tensor_by_factor resize_4d_tensor_by_size<line_sep>imagenet_transform=transforms.Normalize(mean=[0.485 0.456 0.406] std=[0.229 0.224 0.225])<class_stmt>random_resize(nn.Module)<block_start><def_stmt>__init__ self max_size_factor min_size_factor<block_start>super().__init__()<line_sep>self.max_size_factor=max_size_factor<line_sep>self.min_size_factor=min_size_factor<block_end><def_stmt>forward self x# size = random.randint(a = 300, b = 600) # resized= resize_4d_tensor_by_size(x = x, height = size, width = size) <block_start>height_factor=random.uniform(a=self.min_size_factor b=self.max_size_factor)<line_sep>width_factor=random.uniform(a=self.min_size_factor b=self.max_size_factor)<line_sep>resized=resize_4d_tensor_by_factor(x=x height_factor=height_factor width_factor=width_factor)<line_sep><return>resized<block_end><block_end><class_stmt>pair_random_resize(nn.Module)<block_start><def_stmt>__init__ self max_size_factor min_size_factor<block_start>super().__init__()<line_sep>self.max_size_factor=max_size_factor<line_sep>self.min_size_factor=min_size_factor<block_end><def_stmt>forward self tensors=[]<block_start>height_factor=random.uniform(a=self.min_size_factor b=self.max_size_factor)<line_sep>width_factor=random.uniform(a=self.min_size_factor b=self.max_size_factor)<line_sep>outputs=[]<for_stmt>x tensors<block_start>resized_tensor=resize_4d_tensor_by_factor(x=x height_factor=height_factor width_factor=width_factor)<line_sep>outputs.append(resized_tensor)<block_end><return>outputs<block_end><block_end><class_stmt>pair_random_affine(nn.Module)<block_start><def_stmt>__init__ self degrees translate_x translate_y<block_start>super().__init__()<line_sep>self.degrees=degrees<line_sep>self.translate_x=translate_x<line_sep>self.translate_y=translate_y<line_sep>self.affine=transforms.RandomAffine(degrees=self.degrees translate=(self.translate_x self.translate_y))<block_end><def_stmt>forward self tensors=[]<block_start>params=self.affine.get_params(degrees=(-self.degrees self.degrees) translate=(self.translate_x self.translate_y) scale_ranges=(1 1) shears=(0 0) img_size=(tensors[0].shape[-2] tensors[0].shape[1]))<line_sep>outputs=[]<for_stmt>x tensors<block_start>affined=transforms.functional.affine(x *params)<line_sep>outputs.append(affined)<block_end><return>outputs<block_end><block_end>
<import_from_stmt>.base db<import_from_stmt>webservices docs<class_stmt>ElectionResult(db.Model)<block_start>__tablename__='ofec_election_result_mv'<line_sep>election_yr=db.Column(db.Integer primary_key=<true> doc=docs.ELECTION_YEAR)<line_sep>cand_office=db.Column(db.String primary_key=<true> doc=docs.OFFICE)<line_sep>cand_office_st=db.Column(db.String primary_key=<true> doc=docs.STATE_GENERIC)<line_sep>cand_office_district=db.Column(db.String primary_key=<true> doc=docs.DISTRICT)<line_sep>election_type=db.Column(db.String)<line_sep>fec_election_yr=db.Column(db.Integer)<line_sep>cand_id=db.Column(db.String doc=docs.CANDIDATE_ID)<line_sep>cand_name=db.Column(db.String doc=docs.CANDIDATE_NAME)<block_end><class_stmt>ElectionsList(db.Model)<block_start>__tablename__='ofec_elections_list_mv'<line_sep>idx=db.Column(db.Integer primary_key=<true>)<line_sep>sort_order=db.Column(db.Integer)<line_sep>office=db.Column(db.String doc=docs.OFFICE)<line_sep>state=db.Column(db.String doc=docs.STATE_GENERIC)<line_sep>district=db.Column(db.String doc=docs.DISTRICT)<line_sep>cycle=db.Column(db.Integer)<line_sep>incumbent_id=db.Column(db.String doc=docs.CANDIDATE_ID)<line_sep>incumbent_name=db.Column(db.String doc=docs.CANDIDATE_NAME)<block_end><class_stmt>ZipsDistricts(db.Model)<block_start>__table_args__={'schema':'staging'}<line_sep>__tablename__='ref_zip_to_district'<line_sep>zip_district_id=db.Column(db.Integer primary_key=<true>)<line_sep>district=db.Column(db.String doc=docs.DISTRICT)<line_sep>zip_code=db.Column(db.String)<line_sep>state_abbrevation=db.Column(db.String)<line_sep>active=db.Column(db.String)<block_end>
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. <import_stmt>itertools<import_from_stmt>torch.testing._internal.common_utils TestCase run_tests is_iterable_of_tensors<import_stmt>torch<import_from_stmt>torch Tensor<import_stmt>functools<import_from_stmt>torch.testing._internal.common_device_type instantiate_device_type_tests<import_from_stmt>torch.testing._internal.common_device_type ops<import_from_stmt>torch.testing._internal.common_device_type toleranceOverride tol<import_from_stmt>functorch_lagging_op_db functorch_lagging_op_db<import_from_stmt>functorch_additional_op_db additional_op_db<import_from_stmt>common_utils get_fallback_and_vmap_exhaustive get_exhaustive_batched_inputs xfail skip skipOps tol1 # tol2, opsToleranceOverride check_vmap_fallback <import_stmt>unittest<import_from_stmt>torch.utils._pytree tree_flatten tree_unflatten tree_map<import_from_stmt>functorch grad vjp vmap jacrev jacfwd<import_stmt>torch.autograd.forward_ad<as>fwAD<import_from_stmt>functorch._src.eager_transforms _as_tuple jvp<line_sep>aten=torch.ops.aten<line_sep># Version of autograd.grad that handles outputs that don't depend on inputs <def_stmt>_autograd_grad outputs inputs grad_outputs=<none> retain_graph=<false> create_graph=<true><block_start>inputs,inputs_spec=tree_flatten(inputs)<line_sep>result=[torch.zeros_like(inp)<for>inp inputs]<line_sep>diff_argnums=tuple(i<for>i,inp enumerate(inputs)<if>inp.requires_grad)<line_sep>inputs=tuple(inputs[i]<for>i diff_argnums)<if_stmt>grad_outputs<is><none><block_start>diff_outputs=tuple(out<for>out outputs<if>out.requires_grad)<block_end><else_stmt><block_start>something=[(out go)<for>out,go zip(outputs grad_outputs)<if>out.requires_grad]<if_stmt>len(something)<eq>0<block_start>diff_outputs,grad_outputs=() ()<block_end><else_stmt><block_start>diff_outputs,grad_outputs=zip(*something)<block_end><block_end><if_stmt>len(diff_outputs)<eq>0<block_start><return>tuple(torch.zeros_like(inp)<for>inp inputs)<block_end>grad_inputs=torch.autograd.grad(diff_outputs inputs grad_outputs retain_graph=retain_graph create_graph=create_graph allow_unused=<true>)<line_sep>grad_inputs=tuple(torch.zeros_like(inp)<if>gi<is><none><else>gi<for>gi,inp zip(grad_inputs inputs))<for_stmt>idx,grad_inp zip(diff_argnums grad_inputs)<block_start>result[idx]=grad_inp<block_end><return>tree_unflatten(result inputs_spec)<block_end><def_stmt>diff_arg arg requires_grad=<true><block_start><def_stmt>is_differentiable_arg arg<block_start><if_stmt>requires_grad<block_start><return>arg.requires_grad<block_end><else_stmt><block_start><return>arg.is_floating_point()<or>arg.is_complex()<block_end><block_end><if_stmt>is_iterable_of_tensors(arg)<block_start><if_stmt>all([is_differentiable_arg(a)<for>a arg])<block_start><return><true><block_end><if_stmt>all([<not>is_differentiable_arg(a)<for>a arg])<block_start><return><false><block_end><raise>RuntimeError("NYI: The test runner can't handle this")<block_end><return>isinstance(arg Tensor)<and>is_differentiable_arg(arg)<block_end># Given f, returns an f' such that: # - f' takes only positional arguments # - All arguments to f' are floating-point Tensors # - All outputs of f' are floating-point Tensors <def_stmt>normalize_op_input_output2 f args kwargs output_process_fn_grad=<none> requires_grad=<true><block_start>flat_args,args_spec=tree_flatten(args)<line_sep>diff_argnums=tuple(i<for>i,arg enumerate(flat_args)<if>diff_arg(arg requires_grad=requires_grad))<assert_stmt>len(diff_argnums)<g>0<line_sep>primals=tuple(flat_args[i]<for>i diff_argnums)<line_sep>@functools.wraps(f)<def_stmt>wrapped *primals<block_start>_args=list(flat_args)<for_stmt>num,arg zip(diff_argnums primals)<block_start>_args[num]=arg<block_end>_args=tree_unflatten(_args args_spec)<line_sep>result=f(*_args **kwargs)<if_stmt>output_process_fn_grad<is><not><none><block_start>result=output_process_fn_grad(result)<block_end><if_stmt>isinstance(result tuple)# TODO: Remove the following hack for namedtuples <block_start>result=tuple(result)<line_sep>result=tuple(r<for>r result<if>torch.is_floating_point(r))<assert_stmt>len(result)<g>0<block_end><return>result<block_end><return>wrapped primals<block_end># TODO: consolidate with normalize_op_input_output2 <def_stmt>normalize_op_input_output3 f args kwargs sample_args output_process_fn_grad=<none><block_start>flat_args,args_spec=tree_flatten(args)<line_sep>flat_sample_args,_=tree_flatten(sample_args)<line_sep>diff_argnums=tuple(i<for>i,(arg sample) enumerate(zip(flat_args flat_sample_args))<if>diff_arg(sample requires_grad=<true>))<assert_stmt>len(diff_argnums)<g>0<line_sep>primals=tuple(flat_args[i]<for>i diff_argnums)<line_sep>@functools.wraps(f)<def_stmt>wrapped *primals<block_start>_args=list(flat_args)<for_stmt>num,arg zip(diff_argnums primals)<block_start>_args[num]=arg<block_end>_args=tree_unflatten(_args args_spec)<line_sep>result=f(*_args **kwargs)<if_stmt>output_process_fn_grad<is><not><none><block_start>result=output_process_fn_grad(result)<block_end><if_stmt>isinstance(result tuple)# TODO: Remove the following hack for namedtuples <block_start>result=tuple(result)<line_sep>result=tuple(r<for>r result<if>torch.is_floating_point(r))<assert_stmt>len(result)<g>0<block_end><return>result<block_end><return>wrapped primals<block_end><def_stmt>normalize_op_input_output f sample requires_grad=<true><block_start>args=tuple([sample.input]+list(sample.args))<line_sep><return>normalize_op_input_output2(f args sample.kwargs sample.output_process_fn_grad requires_grad=requires_grad)<block_end><def_stmt>ref_vjp f *primals<block_start>result=f(*primals)<def_stmt>wrapped cotangents<block_start><return>_autograd_grad(_as_tuple(result) primals _as_tuple(cotangents))<block_end><return>result wrapped<block_end><def_stmt>simulate_jvp f primals tangents<block_start>primals_out,tangents_out=torch.autograd.functional.jvp(f primals tangents)<line_sep><return>primals_out tangents_out<block_end><def_stmt>ref_jvp f primals tangents<block_start><with_stmt>fwAD.dual_level()<block_start>duals=tuple(fwAD.make_dual(p t)<for>p,t zip(primals tangents))<line_sep>result_duals=f(*duals)<line_sep>result_duals,spec=tree_flatten(result_duals)<line_sep>primals_out,tangents_out=zip(*(fwAD.unpack_dual(d)<for>d result_duals))<line_sep><return>tree_unflatten(primals_out spec) tree_unflatten(tangents_out spec)<block_end><block_end><def_stmt>get_sample_cotangents f sample<block_start>fn,primals=normalize_op_input_output(f sample)<line_sep>output=fn(*primals)<line_sep><return>tree_map(torch.randn_like output)<block_end># returns a new function g(*args, *cotangents) # that computes vjps and (*args, cotangents) <def_stmt>get_vjp_fn_and_args_with_cotangents f sample cotangents<block_start>args=tuple([sample.input]+list(sample.args))<line_sep>kwargs=sample.kwargs<line_sep>flat_args,args_spec=tree_flatten(args)<line_sep>flat_cotangents,cotangents_spec=tree_flatten(cotangents)<line_sep>@functools.wraps(f)<def_stmt>wrapped *args<block_start><assert_stmt>len(args)<eq>len(flat_args)+len(flat_cotangents)<line_sep>actual_args=args[:len(flat_args)]<line_sep>cotangents=args[len(flat_args):]<line_sep>actual_args=tree_unflatten(actual_args args_spec)<line_sep>cotangents=tree_unflatten(cotangents cotangents_spec)<line_sep>fn,primals=normalize_op_input_output3(f actual_args kwargs flat_args sample.output_process_fn_grad)<line_sep>_,vjp_fn=vjp(fn *primals)<line_sep><return>vjp_fn(cotangents)<block_end><return>wrapped tuple(flat_args+flat_cotangents)<block_end># returns a new function g(*args, *cotangents) # that computes vjps and (*args, cotangents) using torch.autograd.grad <def_stmt>get_autograd_fn_and_args_with_cotangents f sample cotangents<block_start>args=tuple([sample.input]+list(sample.args))<line_sep>kwargs=sample.kwargs<line_sep>flat_args,args_spec=tree_flatten(args)<line_sep>flat_cotangents,cotangents_spec=tree_flatten(cotangents)<line_sep>@functools.wraps(f)<def_stmt>wrapped *args<block_start><assert_stmt>len(args)<eq>len(flat_args)+len(flat_cotangents)<line_sep>actual_args=args[:len(flat_args)]<line_sep>cotangents=args[len(flat_args):]<line_sep>actual_args=tree_unflatten(actual_args args_spec)<line_sep>cotangents=tree_unflatten(cotangents cotangents_spec)<line_sep>fn,primals=normalize_op_input_output3(f actual_args kwargs flat_args sample.output_process_fn_grad)<line_sep>out=fn(*primals)<line_sep>diff_wrt=tuple(primal<for>primal primals<if>(primal.requires_grad<or>primal.grad_fn<is><not><none>))<if_stmt>diff_wrt<block_start><return>torch.autograd.grad(out diff_wrt grad_outputs=cotangents)<block_end><else_stmt><block_start><return>(torch.ones(()) )<block_end><block_end># uuugh hack...this will need to be more generic <return>wrapped tuple(flat_args+flat_cotangents)<block_end># Returns a new function g(*args, *cotangents) that computes vjps and # sample (*args, *cotangents) <def_stmt>get_vjpfull_variant f sample<block_start>fn,primals=normalize_op_input_output(f sample)<line_sep>result=fn(*primals)<line_sep>cotangents=_as_tuple(tree_map(<lambda>x:torch.randn_like(x requires_grad=<true>) result))<line_sep>num_primals=len(primals)<line_sep>args=(*primals *cotangents)<line_sep>@functools.wraps(f)<def_stmt>wrapped *args<block_start>primals=args[:num_primals]<line_sep>cotangents=args[num_primals:]<line_sep>result,vjp_fn=vjp(fn *primals)<if_stmt>isinstance(result torch.Tensor)<block_start><assert_stmt>len(cotangents)<eq>1<line_sep>cotangents=cotangents[0]<block_end><return>vjp_fn(cotangents)<block_end><return>wrapped args<block_end><def_stmt>get_jvp_variant f sample# We want this higher-order variant of jvp, so that it can # be used to wrap vmap <block_start>fn,primals=normalize_op_input_output(f sample requires_grad=<false>)<line_sep>tangents=_as_tuple(tree_map(<lambda>x:torch.randn_like(x) primals))<line_sep>@functools.wraps(f)<def_stmt>wrapped *args<block_start>tangents=args<line_sep>primals_out,tangents_out=jvp(fn primals tangents)<if_stmt>isinstance(primals_out torch.Tensor)<block_start><return>(primals_out tangents_out)<block_end><else_stmt><block_start>flat_primals_out,_=tree_flatten(primals_out)<line_sep>flat_tangents_out,_=tree_flatten(tangents_out)<line_sep><return>tuple(flat_primals_out+flat_tangents_out)<block_end><block_end><return>wrapped tangents<block_end><def_stmt>get_jvp_variant_primals_tangents f sample# We want this higher-order variant of jvp, so that it can # be used to wrap vmap <block_start>fn,primals=normalize_op_input_output(f sample requires_grad=<false>)<line_sep>tangents=_as_tuple(tree_map(<lambda>x:torch.randn_like(x) primals))<line_sep>@functools.wraps(f)<def_stmt>wrapped *args<block_start>primals_in=args[:len(primals)]<line_sep>tangents_in=args[len(primals):]<line_sep>primals_out,tangents_out=jvp(fn primals_in tangents_in)<if_stmt>isinstance(primals_out torch.Tensor)<block_start><return>(primals_out tangents_out)<block_end><else_stmt><block_start>flat_primals_out,_=tree_flatten(primals_out)<line_sep>flat_tangents_out,_=tree_flatten(tangents_out)<line_sep><return>tuple(flat_primals_out+flat_tangents_out)<block_end><block_end><return>wrapped primals+tangents<block_end><def_stmt>is_inplace op variant<block_start><if_stmt>hasattr(variant "__wrapped__")<block_start><return>variant.__wrapped__<is>op.get_inplace()<block_end><return>variant<is>op.get_inplace()<block_end>vjp_fail={skip('nn.functional.dropout') # randomness testing artifact skip('nn.functional.rrelu') # randomness testing artifact skip('bernoulli') # randomness testing artifact skip('normal' '') # randomness testing artifact skip('normal' 'number_mean') # randomness testing artifact xfail('tensor_split') xfail('to_sparse') xfail('nn.functional.ctc_loss') skip('nn.functional.feature_alpha_dropout' 'with_train') # fails on cuda, runs okay on cpu skip('nn.functional.feature_alpha_dropout' 'without_train') # fails on cuda, runs okay on cpu skip('pca_lowrank' '') # fails on cuda, runs okay on cpu skip('svd_lowrank' '') # fails on cuda, runs okay on cpu skip('nn.functional.dropout2d' '') # fails on cuda, runs okay on cpu }<class_stmt>TestOperators(TestCase)<block_start>@ops(functorch_lagging_op_db+additional_op_db allowed_dtypes=(torch.float ))@skipOps('TestOperators' 'test_grad' vjp_fail.union({skip('nn.functional.fractional_max_pool2d') # fails on cuda, runs okay on cpu skip('nn.functional.fractional_max_pool3d') # fails on cuda, runs okay on cpu }))@opsToleranceOverride('TestOperators' 'test_grad' (tol1('nn.functional.binary_cross_entropy_with_logits' {torch.float32:tol(atol=1e-04 rtol=1e-04)}) ))<def_stmt>test_grad self device dtype op<block_start><if_stmt>op.name<in>vjp_fail<block_start>self.skipTest("Skipped; Expected failures")<line_sep><return><block_end><if_stmt><not>op.supports_autograd<block_start>self.skipTest("Skipped! Autograd not supported.")<line_sep><return><block_end>samples=op.sample_inputs(device dtype requires_grad=<true>)<line_sep># TODO: test in-place <if_stmt>is_inplace(op op.get_op())<block_start>self.skipTest("Skipped! NYI: inplace-testing not supported.")<line_sep><return><block_end><for_stmt>sample samples<block_start>args=[sample.input]+list(sample.args)<line_sep>kwargs=sample.kwargs<line_sep>diff_argnums=tuple(i<for>i,arg enumerate(args)<if>diff_arg(arg))<assert_stmt>len(diff_argnums)<g>0<line_sep>diff_args=tuple(args[i]<for>i diff_argnums)<def_stmt>wrapped_fn *args **kwargs<block_start>result=op(*args **kwargs)<if_stmt>sample.output_process_fn_grad<is><not><none><block_start>result=sample.output_process_fn_grad(result)<block_end># Reduce into single value for grad <if_stmt>isinstance(result torch.Tensor)<block_start><return>result.sum()<block_end>result=sum([res.sum()<for>res result])<line_sep><return>result<block_end>result=grad(wrapped_fn diff_argnums)(*args **kwargs)<line_sep>expected=_autograd_grad(_as_tuple(wrapped_fn(*args **kwargs)) diff_args)<line_sep>self.assertEqual(result expected)<block_end><block_end>@ops(functorch_lagging_op_db+additional_op_db allowed_dtypes=(torch.float ))@skipOps('TestOperators' 'test_jvp' set({skip('nn.functional.dropout') # randomness testing artifact; not actually a problem skip('nn.functional.rrelu') # randomness testing artifact; not actually a problem skip('nn.functional.fractional_max_pool2d') # fails on cuda, runs okay on cpu skip('nn.functional.fractional_max_pool3d') # fails on cuda, runs okay on cpu skip('nn.functional.max_pool1d') # fails on cpu, runs okay on cuda skip('nn.functional.feature_alpha_dropout' 'with_train') # fails on cuda, runs okay on cpu skip('nn.functional.feature_alpha_dropout' 'without_train') # fails on cuda, runs okay on cpu skip('pca_lowrank' '') # fails on cuda, runs okay on cpu skip('svd_lowrank' '') # fails on cuda, runs okay on cpu skip('nn.functional.dropout2d' '') # fails on cuda, runs okay on cpu # The following don't have a forward-mode AD formula in PyTorch core # (check derivatives.yaml). xfail('var_mean') xfail('std_mean') # ============================================= # NB: The above failures also fail using PyTorch core's # forward-mode AD and vmap. # The failures below are functorch-specific issues # ============================================= # Composite ops that do bad things. Need to be fixed in PyTorch core. # RuntimeError: Cannot access data pointer of Tensor that doesn't have storage xfail('tensor_split') skip('bernoulli') # cuda set seed randomness issues # BUG: runs and produces numerical differences skip('nn.functional.max_unpool1d') # fails everywhere except on mac skip('nn.functional.max_unpool2d') # fails everywhere except on windows skip('nn.functional.max_unpool3d') # fails everywhere except on mac }))@opsToleranceOverride('TestOperators' 'test_jvp' (tol1('nn.functional.conv_transpose3d' {torch.float32:tol(atol=1e-04 rtol=1.3e-06)} device_type='cuda') tol1('nn.functional.binary_cross_entropy_with_logits' {torch.float32:tol(atol=4e-04 rtol=4e-04)}) ))<def_stmt>test_jvp self device dtype op# TODO: when we change supports_autograd to supports_backward_ad, also change in this file <block_start>VJP_DECOMP={'nn.functional.logsigmoid' }<if_stmt>op.name<in>VJP_DECOMP<block_start>ref_jvp_local=simulate_jvp<block_end><else_stmt><block_start>ref_jvp_local=ref_jvp<block_end><if_stmt><not>op.supports_forward_ad<and>op.name<not><in>VJP_DECOMP<block_start>self.skipTest("Skipped! Forward AD not supported.")<line_sep><return><block_end>samples=op.sample_inputs(device dtype requires_grad=<true>)<line_sep># TODO: test in-place <if_stmt>is_inplace(op op.get_op())<block_start>self.skipTest("Skipped! NYI: inplace-testing not supported.")<line_sep><return><block_end><for_stmt>sample samples# NB: we used requires_grad=True to determine where the primals are, # but don't need that information otherwise <block_start>fn,primals=normalize_op_input_output(op sample requires_grad=<true>)<line_sep>primals=tree_map(<lambda>x:x.detach() primals)<line_sep>tangents=tree_map(<lambda>x:torch.randn_like(x) primals)<line_sep>primal_outs,tangent_outs=jvp(fn primals tangents)<line_sep>expected_primal_outs,expected_tangent_outs=ref_jvp_local(fn primals tangents)<line_sep>self.assertEqual(primal_outs expected_primal_outs)<line_sep>self.assertEqual(tangent_outs expected_tangent_outs)<block_end><block_end>@ops(functorch_lagging_op_db+additional_op_db allowed_dtypes=(torch.float ))@skipOps('TestOperators' 'test_vjp' vjp_fail.union({skip('nn.functional.fractional_max_pool2d') # fails on cpu, runs okay on cuda skip('nn.functional.fractional_max_pool3d') # fails on cpu, runs okay on cuda xfail('nn.functional.feature_alpha_dropout' 'with_train') xfail('pca_lowrank' '') xfail('nn.functional.dropout2d' '') xfail('nn.functional.feature_alpha_dropout' 'without_train') xfail('svd_lowrank' '') }))@opsToleranceOverride('TestOperators' 'test_vjp' (tol1('nn.functional.conv_transpose3d' {torch.float32:tol(atol=5e-05 rtol=9e-05)} device_type='cuda') tol1('nn.functional.binary_cross_entropy_with_logits' {torch.float32:tol(atol=1e-04 rtol=1e-04)}) ))<def_stmt>test_vjp self device dtype op<block_start><if_stmt><not>op.supports_autograd<block_start>self.skipTest("Skipped! Autograd not supported.")<line_sep><return><block_end>samples=op.sample_inputs(device dtype requires_grad=<true>)<line_sep># TODO: test in-place <if_stmt>is_inplace(op op.get_op())<block_start>self.skipTest("Skipped! NYI: inplace-testing not supported.")<line_sep><return><block_end><def_stmt>_test _op<block_start><for_stmt>sample samples<block_start>fn,primals=normalize_op_input_output(_op sample)<line_sep>result=fn(*primals)<line_sep>cotangents=tree_map(<lambda>x:torch.randn_like(x) result)<line_sep>out,vjp_fn=vjp(fn *primals)<line_sep>self.assertEqual(out result)<line_sep>result_vjps=vjp_fn(cotangents)<line_sep>_,vjp_fn=ref_vjp(fn *primals)<line_sep>expected_vjps=vjp_fn(cotangents)<line_sep>self.assertEqual(result_vjps expected_vjps)<block_end><block_end>_test(op)<for_stmt>a_op op.aliases<block_start>_test(a_op)<block_end><block_end>@ops(functorch_lagging_op_db+additional_op_db allowed_dtypes=(torch.float ))@skipOps('TestOperators' 'test_vjpvjp' vjp_fail.union({skip('nn.functional.max_unpool1d') # Flaky skip('nn.functional.max_unpool2d') # Flaky skip('nn.functional.fractional_max_pool2d') # randomness skip('nn.functional.fractional_max_pool3d') # randomness }))@opsToleranceOverride('TestOperators' 'test_vjpvjp' (tol1('nn.functional.conv_transpose3d' {torch.float32:tol(atol=5e-05 rtol=9e-05)} device_type='cuda') ))<def_stmt>test_vjpvjp self device dtype op<block_start><if_stmt><not>op.supports_autograd<block_start>self.skipTest("Skipped! Autograd not supported.")<line_sep><return><block_end><if_stmt><not>op.supports_gradgrad<block_start>self.skipTest("Skipped! Operation does not support gradgrad")<line_sep><return><block_end>samples=op.sample_inputs(device dtype requires_grad=<true>)<line_sep># TODO: test in-place <if_stmt>is_inplace(op op.get_op())<block_start>self.skipTest("Skipped! NYI: inplace-testing not supported.")<line_sep><return><block_end><for_stmt>sample samples<block_start>fn,args=get_vjpfull_variant(op sample)<line_sep>result=fn(*args)<line_sep>cotangents=tree_map(<lambda>x:torch.randn_like(x) result)<line_sep># Compute vjp of vjp _,vjp_fn=vjp(fn *args)<line_sep>result_vjps=vjp_fn(cotangents)<line_sep># Compute ref_vjp of vjp. We could have done ref_vjp of ref_vjp, # but since we're confident that vjp works by itself, this is # an equivalent way to test that. _,vjp_fn=ref_vjp(fn *args)<line_sep>expected_vjps=vjp_fn(cotangents)<line_sep>self.assertEqual(result_vjps expected_vjps)<block_end><block_end>@ops(functorch_lagging_op_db+additional_op_db allowed_dtypes=(torch.float ))@toleranceOverride({torch.float32:tol(atol=1e-04 rtol=1e-04)})<def_stmt>test_vmapvjpvjp self device dtype op<block_start>self.skipTest("Skipped; these tests take too long")<line_sep>op_skip=set({})<line_sep>op_skip=op_skip.union(vjp_fail)<if_stmt>op.name<in>op_skip<block_start>self.skipTest("Skipped; Expected failures")<line_sep><return><block_end><if_stmt><not>op.supports_autograd<block_start>self.skipTest("Skipped! Autograd not supported.")<line_sep><return><block_end><if_stmt><not>op.supports_gradgrad<block_start>self.skipTest("Skipped! Operation does not support gradgrad")<line_sep><return><block_end>samples=op.sample_inputs(device dtype requires_grad=<true>)<line_sep># TODO: test in-place <if_stmt>is_inplace(op op.get_op())<block_start>self.skipTest("Skipped! NYI: inplace-testing not supported.")<line_sep><return><block_end><for_stmt>sample samples<block_start>fn,args=get_vjpfull_variant(op sample)<line_sep>result=fn(*args)<line_sep>cotangents=tree_map(<lambda>x:torch.randn_like(x) result)<line_sep>cotangents,_=tree_flatten(cotangents)<line_sep>num_args=len(args)<line_sep>args_and_cotangents=tuple(args)+tuple(cotangents)<def_stmt>vjp_of_vjp *args_and_cotangents<block_start>args=args_and_cotangents[:num_args]<line_sep>cotangents=args_and_cotangents[num_args:]<line_sep>result,vjp_fn=vjp(fn *args)<line_sep>result_vjps=vjp_fn(cotangents)<line_sep>result,_=tree_flatten(result)<line_sep>result_vjps,_=tree_flatten(result_vjps)<line_sep><return>(*result *result_vjps)<block_end>generator=get_fallback_and_vmap_exhaustive(vjp_of_vjp args_and_cotangents {} opinfo=op)<for_stmt>loop_out,batched_out generator<block_start>self.assertEqual(loop_out batched_out)<block_end><block_end><block_end>vmapvjp_fail=vjp_fail.union({# The following are not bugs and are expected behavior xfail('masked_select') # Not possible due to dynamic shapes skip('bernoulli') # randomness skip('normal' '') # randomness skip('normal' 'number_mean') # randomness xfail('nn.functional.dropout') # randomness xfail('as_strided') # as_strided is too wild for us to support, wontfix xfail('index_put' '') # not possible due to dynamic shapes; we support a subset xfail('masked_scatter') # dynamic xfail('nn.functional.fractional_max_pool2d') # random xfail('nn.functional.fractional_max_pool3d') # random xfail('take') # dynamic # All of the following are bugs and need to be fixed skip('linalg.svdvals') # # really annoying thing where it passes correctness check but not has_batch_rule xfail('__getitem__' '') # dynamic error xfail('_masked.prod') # calls aten::item xfail('eig') # calls aten::item xfail('linalg.det' '') # calls .item() xfail('linalg.eig') # Uses aten::allclose xfail('linalg.eigh') # needs diag_scatter xfail('linalg.householder_product') # needs select_scatter xfail('linalg.slogdet') # calls .item() xfail('logdet') # calls .item() xfail('matrix_exp') # would benefit from narrow_scatter xfail('nanquantile') # checks q via a .item() call xfail('nn.functional.gaussian_nll_loss') # checks var for if any value < 0 xfail('prod') # calls nonzero xfail('put') xfail('quantile') # checks q via a .item() call xfail('stft') xfail('symeig') # would benefit from diag_scatter xfail('view_as_complex') # required rank 4 tensor to use channels_last format xfail('bfloat16') xfail('double') xfail('float') xfail('half') xfail('scatter_reduce' 'prod') # item call # NYI: querying is_contiguous inside of vmap for memory_format other than torch.contiguous_format xfail('nn.functional.max_unpool2d') xfail('nn.functional.max_unpool2d' 'grad') })<line_sep>@ops(functorch_lagging_op_db+additional_op_db allowed_dtypes=(torch.float ))@toleranceOverride({torch.float32:tol(atol=1e-04 rtol=1e-04)})@opsToleranceOverride('TestOperators' 'test_vmapvjp' (tol1('linalg.svd' {torch.float32:tol(atol=1.5e-04 rtol=1e-04)} device_type="cuda") tol1('svd' {torch.float32:tol(atol=1.5e-04 rtol=1e-04)} device_type="cuda") ))@skipOps('TestOperators' 'test_vmapvjp' vmapvjp_fail)<def_stmt>test_vmapvjp self device dtype op<block_start><if_stmt><not>op.supports_autograd<block_start>self.skipTest("Skipped! Autograd not supported.")<line_sep><return><block_end>samples=op.sample_inputs(device dtype requires_grad=<true>)<line_sep># TODO: test in-place <if_stmt>is_inplace(op op.get_op())<block_start>self.skipTest("Skipped! NYI: inplace-testing not supported.")<line_sep><return><block_end><for_stmt>sample samples<block_start>cotangents=get_sample_cotangents(op sample)<line_sep>fn,args=get_vjp_fn_and_args_with_cotangents(op sample cotangents)<for_stmt>loop_out,batched_out get_fallback_and_vmap_exhaustive(fn args {} opinfo=op)<block_start>self.assertEqual(loop_out batched_out)<block_end><block_end><block_end># There are several variations we care about # 1) primal batched (TODO) # 2) tangent batched (batched grads) <-- # 3) both batched (TODO) # The below tests (2) only. @ops(functorch_lagging_op_db allowed_dtypes=(torch.float ))@toleranceOverride({torch.float32:tol(atol=1e-04 rtol=1e-04)})@skipOps('TestOperators' 'test_vmapjvp' {skip('nn.functional.dropout') # randomness skip('nn.functional.rrelu') # randomness skip('nn.functional.fractional_max_pool2d') # randomness skip('nn.functional.fractional_max_pool3d') # randomness skip('bernoulli' '') # randomness skip('nn.functional.max_pool1d') # fails on cpu, runs on cuda # TODO: fails in core due to in-place batched nto non-batched # but fails here for a different reason xfail('linalg.householder_product') # Try to in-place batched tensor into non-batched tensor xfail('matrix_exp') # Apprently these support forward AD, but we get "Trying to use forward AD..." # These are cases where OpInfo has supports_forward_ad=True, but disables # the test xfail('var_mean') xfail('std_mean') # RuntimeError: expand: the number of sizes provided (1) must be greater or # equal to the number of dimensions in the tensor (2) xfail('nanquantile') xfail('quantile') # Not implemented xfail('scatter') # ============================================= # NB: The above failures also fail in PyTorch core. # The failures below only fail in functorch # ============================================= # Composite ops that do bad things. Need to be fixed in PyTorch core. # RuntimeError: Cannot access data pointer of Tensor that doesn't have storage xfail('tensor_split') # Causing multiple forward mode AD issues, needs investigation xfail('nn.functional.batch_norm') xfail('nn.functional.batch_norm' 'without_cudnn' device_type='cuda') skip('nn.functional.feature_alpha_dropout' 'with_train') skip('pca_lowrank' '') skip('nn.functional.dropout2d' '') skip('nn.functional.feature_alpha_dropout' 'without_train') skip('svd_lowrank' '') xfail('nn.functional.soft_margin_loss' '') xfail('stft') # something weird is happening with shapes xfail('double') # required rank 4 tensor to use channels_last format # BUG: runs and produces numerical differences skip('nn.functional.max_unpool1d' device_type='cpu') # fails everywhere except on mac skip('nn.functional.max_unpool2d') # fails everywhere except on mac skip('nn.functional.max_unpool3d') # fails everywhere except on mac xfail('put') # calls put_ during vmap with only vmaps over other, not self })<def_stmt>test_vmapjvp self device dtype op<block_start><if_stmt>is_inplace(op op.get_op())# TODO: test in-place <block_start>self.skipTest("Skipped! NYI: inplace-testing not supported.")<line_sep><return><block_end>samples=op.sample_inputs(device dtype requires_grad=<false>)<if_stmt><not>op.supports_forward_ad<block_start>self.skipTest("Skipped! Forward AD not supported.")<line_sep><return><block_end><for_stmt>sample samples<block_start>arg_values=[sample.input]+list(sample.args)<line_sep>kwarg_values=sample.kwargs<line_sep>args=tuple([*arg_values *kwarg_values])<line_sep>fn,args=get_jvp_variant(op sample)<for_stmt>loop_out,batched_out get_fallback_and_vmap_exhaustive(fn args {} opinfo=op bdims=(0 ))<block_start>self.assertEqual(loop_out batched_out)<block_end><block_end><block_end>vmapjvpall_fail={# The following are expected (not a bug) skip('bernoulli' '') # randomness skip('nn.functional.dropout') # randomness skip('nn.functional.rrelu') # randomness skip('nn.functional.dropout2d' '') skip('nn.functional.feature_alpha_dropout' 'without_train') skip('nn.functional.feature_alpha_dropout' 'with_train') xfail('nn.functional.fractional_max_pool2d') # Cannot access data pointer of Tensor that doesn't have storage xfail('nn.functional.fractional_max_pool3d') # Cannot access data pointer of Tensor that doesn't have storage # The following are bugs that we should fix skip('nn.functional.max_pool1d') # fails on cpu, runs on cuda xfail('nn.functional.batch_norm' device_type='cuda') xfail('nn.functional.batch_norm' 'without_cudnn' device_type='cuda') xfail('_masked.mean') xfail('_masked.prod') # Causing issues with multiple cpu levels of forward mode AD xfail('nn.functional.batch_norm' device_type='cpu') # https://github.com/pytorch/functorch/issues/857 skip('nn.functional.embedding' '') xfail('nn.functional.soft_margin_loss' '') xfail('nn.functional.binary_cross_entropy_with_logits' '') xfail('linalg.householder_product') xfail('tensor_split') xfail('quantile') xfail('var_mean') xfail('as_strided') xfail('nn.functional.gaussian_nll_loss') xfail('std_mean') xfail('scatter') xfail('matrix_exp') xfail('nanquantile') xfail('view_as_complex') xfail('prod') skip('pca_lowrank' '') skip('svd_lowrank' '') xfail('stft') # transpose_ fallback xfail('double') # required rank 4 tensor to use channels_last format skip('nn.functional.max_unpool1d') # Flaky, seems to sometimes his max_unpool2d skip('nn.functional.max_unpool2d') # fails everywhere except on mac skip('nn.functional.max_unpool3d') # fails everywhere except on mac xfail('put') # calls put_ during vmap with only vmaps over other, not self xfail('nn.functional.prelu') # Call Tensor.as_strided }<line_sep>@ops(functorch_lagging_op_db allowed_dtypes=(torch.float ))@opsToleranceOverride('TestOperators' 'test_vmapjvpall' (tol1('nn.functional.conv_transpose3d' {torch.float32:tol(atol=2e-04 rtol=9e-3)} device_type='cuda') ))@skipOps('TestOperators' 'test_vmapjvpall' vmapjvpall_fail)@toleranceOverride({torch.float32:tol(atol=1e-04 rtol=1e-04)})# This is technically a superset of test_vmapjvp. We should either delete test_vmapjvp # or figure out if we can split vmapjvpall. It's useful to keep test_vmapjvp intact # because that coresponds to "batched forward-mode AD" testing in PyTorch core <def_stmt>test_vmapjvpall self device dtype op<block_start><if_stmt>is_inplace(op op.get_op())# TODO: test in-place <block_start>self.skipTest("Skipped! NYI: inplace-testing not supported.")<line_sep><return><block_end>samples=op.sample_inputs(device dtype requires_grad=<false>)<if_stmt><not>op.supports_forward_ad<block_start>self.skipTest("Skipped! Forward AD not supported.")<line_sep><return><block_end><for_stmt>sample samples<block_start>arg_values=[sample.input]+list(sample.args)<line_sep>kwarg_values=sample.kwargs<line_sep>args=tuple([*arg_values *kwarg_values])<line_sep>fn,args=get_jvp_variant_primals_tangents(op sample)<for_stmt>loop_out,batched_out get_fallback_and_vmap_exhaustive(fn args {} opinfo=op)<block_start>self.assertEqual(loop_out batched_out)<block_end><block_end><block_end>@ops(functorch_lagging_op_db allowed_dtypes=(torch.float ))@skipOps('TestOperators' 'test_vmapjvpall_has_batch_rule' vmapjvpall_fail.union({xfail('linalg.solve_triangular') xfail('nn.functional.huber_loss') xfail('nn.functional.poisson_nll_loss') xfail('lu') xfail('cumprod') xfail('lu_solve') xfail('linalg.lstsq' 'grad_oriented') xfail('linalg.cholesky') xfail('linalg.qr') xfail('cross') xfail('qr') xfail('linalg.pinv') xfail('masked_fill') xfail('copysign') xfail('linalg.solve') xfail('linalg.eig') xfail('complex') xfail('linalg.pinv' 'hermitian') xfail('pinverse') skip('_masked.mean') # ??? xfail('linalg.cholesky_ex') xfail('masked_scatter') xfail('index_fill') xfail('take') xfail('linalg.eigvals') xfail('linalg.qr') xfail('linalg.tensorsolve') xfail('nn.functional.max_pool3d') xfail('vdot') xfail('linalg.cross') xfail('nn.functional.feature_alpha_dropout' 'without_train') xfail('linalg.lu_factor' '') xfail('nn.functional.dropout2d' '') xfail('nn.functional.kl_div' '') xfail('pca_lowrank' '') xfail('svd_lowrank' '') xfail('linalg.lu_factor_ex' '') xfail('nn.functional.feature_alpha_dropout' 'with_train') xfail('special.log_ndtr' '') xfail('fft.ihfft2') # conj_physical fallback xfail('fft.ihfftn') # conj_physical fallback xfail('istft') # col2im fallback xfail('polar') # complex fallback xfail('nn.functional.l1_loss' '') xfail('nn.functional.max_unpool3d' 'grad') xfail('nn.functional.smooth_l1_loss' '') xfail('nn.functional.max_unpool2d' 'grad') xfail('nn.functional.soft_margin_loss' '') xfail('nn.functional.binary_cross_entropy_with_logits' '') xfail('nn.functional.max_unpool1d' 'grad') xfail('nn.functional.embedding' '') xfail('lu_unpack') xfail('nn.functional.glu') xfail('nn.functional.bilinear') # trilinear doesn't have batching rule }))@toleranceOverride({torch.float32:tol(atol=1e-04 rtol=1e-04)})<def_stmt>test_vmapjvpall_has_batch_rule self device dtype op<block_start><if_stmt>is_inplace(op op.get_op())# TODO: test in-place <block_start>self.skipTest("Skipped! NYI: inplace-testing not supported.")<line_sep><return><block_end>samples=op.sample_inputs(device dtype requires_grad=<false>)<if_stmt><not>op.supports_forward_ad<block_start>self.skipTest("Skipped! Forward AD not supported.")<line_sep><return><block_end><def_stmt>test <block_start><for_stmt>sample samples<block_start>arg_values=[sample.input]+list(sample.args)<line_sep>kwarg_values=sample.kwargs<line_sep>args=tuple([*arg_values *kwarg_values])<line_sep>fn,args=get_jvp_variant_primals_tangents(op sample)<for_stmt>loop_out,batched_out get_fallback_and_vmap_exhaustive(fn args {} opinfo=op compute_loop_out=<false>)<block_start><pass><block_end><block_end><block_end>check_vmap_fallback(self test op dry_run=<false>)<block_end>@ops(functorch_lagging_op_db+additional_op_db allowed_dtypes=(torch.float ))@toleranceOverride({torch.float32:tol(atol=1e-04 rtol=1e-04)})@skipOps('TestOperators' 'test_vmapvjp_has_batch_rule' vmapvjp_fail.union({xfail('view_as_complex') xfail('cholesky') xfail('complex') xfail('copysign') xfail('cummax') xfail('cummin') xfail('cumprod') xfail('eig') xfail('nansum') xfail('nanmean') xfail('fmin') xfail('fmax') xfail('special.log_ndtr') xfail('index_copy') xfail('index_fill') xfail('linalg.cholesky') xfail('linalg.cholesky_ex') xfail('linalg.det') xfail('linalg.eig') xfail('linalg.eigh') xfail('linalg.eigvals') xfail('linalg.householder_product') xfail('linalg.lstsq' '') xfail('linalg.lstsq' 'grad_oriented') xfail('linalg.pinv') xfail('linalg.qr') xfail('linalg.pinv' 'hermitian') xfail('linalg.slogdet') xfail('linalg.solve') xfail('logdet') xfail('lu') xfail('lu_solve') xfail('lu_unpack') xfail('masked_fill') xfail('masked_scatter') xfail('masked_select') xfail('matrix_exp') xfail('nanquantile') xfail('pinverse') xfail('prod') xfail('put') xfail('quantile') xfail('renorm') xfail('symeig') xfail('take') xfail('tensor_split') xfail('to_sparse') xfail('unfold') xfail('vdot') xfail('nn.functional.dropout') xfail('_masked.prod') xfail('fft.ihfft2') xfail('fft.ihfftn') xfail('cross') xfail('linalg.cross') xfail('nn.functional.gaussian_nll_loss') xfail('nn.functional.huber_loss') xfail('nn.functional.poisson_nll_loss') xfail('nn.functional.bilinear') xfail('nn.functional.fractional_max_pool3d') xfail('as_strided') xfail('linalg.solve_triangular') xfail('stft') xfail('nn.functional.rrelu') xfail('nn.functional.embedding_bag') xfail('nn.functional.max_pool3d') xfail('istft') xfail('nn.functional.fractional_max_pool2d') xfail('linalg.tensorsolve') xfail('linalg.lu_factor' '') xfail('nn.functional.feature_alpha_dropout' 'with_train') xfail('nn.functional.kl_div' '') xfail('pca_lowrank' '') xfail('nn.functional.dropout2d' '') xfail('nn.functional.feature_alpha_dropout' 'without_train') xfail('svd_lowrank' '') xfail('linalg.lu_factor_ex' '') xfail('nn.functional.max_unpool2d' '') xfail('nn.functional.multi_margin_loss' '') xfail('nn.functional.multilabel_margin_loss' '') xfail('nn.functional.pdist' '') xfail('nn.functional.smooth_l1_loss' '') xfail('scatter_reduce' 'prod') xfail('scatter_reduce' 'amax') xfail('nn.functional.max_unpool1d' '') xfail('nn.functional.max_unpool3d' '') xfail('scatter_reduce' 'sum') xfail('scatter_reduce' 'mean') xfail('nn.functional.max_unpool3d' 'grad') xfail('nn.functional.soft_margin_loss' '') xfail('scatter_reduce' 'amin') xfail('nn.functional.max_unpool1d' 'grad') xfail('nn.functional.l1_loss' '') xfail('nn.functional.max_unpool2d' 'grad') xfail('qr') }))<def_stmt>test_vmapvjp_has_batch_rule self device dtype op<block_start><if_stmt><not>op.supports_autograd<block_start>self.skipTest("Skipped! Autograd not supported.")<line_sep><return><block_end>samples=op.sample_inputs(device dtype requires_grad=<true>)<line_sep># TODO: test in-place <if_stmt>is_inplace(op op.get_op())<block_start>self.skipTest("Skipped! NYI: inplace-testing not supported.")<line_sep><return><block_end><def_stmt>test <block_start><for_stmt>sample samples<block_start>cotangents=get_sample_cotangents(op sample)<line_sep>fn,args=get_vjp_fn_and_args_with_cotangents(op sample cotangents)<for_stmt>loop_out,batched_out get_fallback_and_vmap_exhaustive(fn args {} opinfo=op compute_loop_out=<false>)<block_start><pass><block_end><for_stmt>a_op op.aliases<block_start>fn,args=get_vjp_fn_and_args_with_cotangents(a_op sample cotangents)<for_stmt>loop_out,batched_out get_fallback_and_vmap_exhaustive(fn args {} opinfo=op compute_loop_out=<false>)<block_start><pass><block_end><block_end><block_end><block_end>check_vmap_fallback(self test op dry_run=<false>)<block_end>@ops(functorch_lagging_op_db+additional_op_db allowed_dtypes=(torch.float ))@skipOps('TestOperators' 'test_vjpvmap' vjp_fail.union({skip('bernoulli' '') # vjpvmap testing can't handle randomness skip('normal' '') # vjpvmap testing can't handle randomness skip('normal' 'number_mean') # vjpvmap testing can't handle randomness # fallback path doesn't work # All of the following are bugs and need to be fixed xfail('__getitem__' '') xfail('index_put' '') xfail('matrix_exp') xfail('view_as_complex') xfail('nn.functional.gaussian_nll_loss') xfail('masked_select') skip('nn.functional.fractional_max_pool3d') # generator works on cpu, fails on cuda xfail('__rpow__') # https://github.com/pytorch/functorch/issues/617 xfail('as_strided') skip('nn.functional.fractional_max_pool2d') # generator works on cpu, fails on cuda xfail('column_stack' '') xfail('nn.functional.dropout2d' '') xfail('svd_lowrank' '') xfail('pca_lowrank' '') xfail('nn.functional.feature_alpha_dropout' 'without_train') xfail('nn.functional.feature_alpha_dropout' 'with_train') xfail('clamp') # something weird happening with channels_last xfail('bfloat16') xfail('double') xfail('float') xfail('half') }))<def_stmt>test_vjpvmap self device dtype op# NB: there is no vjpvmap_has_batch_rule test because that is almost # certainly redundant with the vmap_has_batch_rule test in test_vmap.py # one-off skip <block_start><if_stmt>op.name<eq>'nn.functional.dropout'<block_start>self.skipTest("Skipped!")<block_end><if_stmt><not>op.supports_autograd# If the op doesn't support autograd, vmap(op) won't either <block_start>self.skipTest("Skipped! Autograd not supported.")<line_sep><return><block_end># TODO: test in-place <if_stmt>is_inplace(op op.get_op())<block_start>self.skipTest("Skipped! NYI: inplace-testing not supported.")<line_sep><return><block_end>samples=op.sample_inputs(device dtype requires_grad=<true>)<line_sep>batch_norm_fns=("nn.functional.batch_norm" "nn.functional.instance_norm")# instance norm calls batch norm is_batch_norm=op.name<in>batch_norm_fns<for_stmt>sample samples<block_start>args=[sample.input]+list(sample.args)<line_sep>kwargs=sample.kwargs<line_sep>generator=get_exhaustive_batched_inputs(args kwargs for_batch_norm=is_batch_norm)<for_stmt>batched_args,in_dims,kwargs generator<block_start>vmapped_op=vmap(op in_dims)<line_sep>fn,primals=normalize_op_input_output2(vmapped_op batched_args kwargs sample.output_process_fn_grad)<line_sep>result=fn(*primals)<line_sep>cotangents=tree_map(<lambda>x:torch.randn_like(x) result)<line_sep>_,vjp_fn=vjp(fn *primals)<line_sep>result_vjps=vjp_fn(cotangents)<line_sep>_,vjp_fn=ref_vjp(fn *primals)<line_sep>expected_vjps=vjp_fn(cotangents)<line_sep>self.assertEqual(result_vjps expected_vjps)<block_end><block_end><block_end><def_stmt>_compare_jacobians_of_vjp self fn cotangents_and_primals argnums=<none> atol_rtol=<none><block_start><if_stmt>argnums<is><none><block_start>argnums=tuple(range(len(cotangents_and_primals)))<block_end><def_stmt>get_vjp cotangents *primals<block_start>_,vjp_fn=vjp(fn *primals)<line_sep><return>vjp_fn(cotangents)<block_end>jacobian_jvp=jacfwd(get_vjp argnums)(*cotangents_and_primals)<line_sep>jacobian_vjp=jacrev(get_vjp argnums)(*cotangents_and_primals)<line_sep># For dtype changing operations, the jacobians have different dtype. jacobian_jvp=tree_map(<lambda>x:x.to(torch.float) jacobian_jvp)<line_sep>jacobian_vjp=tree_map(<lambda>x:x.to(torch.float) jacobian_vjp)<if_stmt>atol_rtol<is><not><none><block_start>(atol rtol)=atol_rtol<line_sep>self.assertEqual(jacobian_jvp jacobian_vjp atol=atol rtol=rtol)<block_end><else_stmt><block_start>self.assertEqual(jacobian_jvp jacobian_vjp)<block_end><block_end>@ops(functorch_lagging_op_db+additional_op_db allowed_dtypes=(torch.float ))@skipOps('TestOperators' 'test_jvpvjp' vjp_fail.union({# These are weirdly non-deterministic skip('nn.functional.fractional_max_pool2d') # Random skip('nn.functional.fractional_max_pool3d') # Random # RuntimeError: Trying to set a forward gradient that has a different size than that of the original Tensor, # this is not supported. Tensor is of size [5, 2, 3] while the given forward gradient is of size [1, 2, 3]. xfail('normal' '') xfail('_masked.amax' '') xfail('_masked.amin' '') xfail('_masked.log_softmax' '') xfail('_masked.softmax' '') xfail('_masked.softmin' '') xfail('amax' '') xfail('amin' '') xfail('cdist' '') xfail('cholesky' '') xfail('eig' '') xfail('linalg.det' '') xfail('linalg.matrix_norm' '') xfail('linalg.slogdet' '') xfail('logcumsumexp' '') xfail('logdet' '') xfail('nanmean' '') xfail('nansum' '') xfail('nn.functional.batch_norm' '') xfail('nn.functional.batch_norm' 'without_cudnn' device_type='cuda') xfail('nn.functional.embedding') xfail('nn.functional.embedding' 'functorch') xfail('nn.functional.embedding_bag' '') xfail('nn.functional.grid_sample' '') xfail('nn.functional.hardsigmoid' '') xfail('nn.functional.huber_loss' '') xfail('nn.functional.instance_norm' '') xfail('nn.functional.logsigmoid' '') xfail('nn.functional.pad' 'circular') xfail('nn.functional.softmin' '') xfail('nn.functional.softmin' 'with_dtype') xfail('renorm' '') xfail('std_mean' '') xfail('symeig' '') xfail('var_mean' '') xfail('nn.functional.feature_alpha_dropout' 'with_train') xfail('nn.functional.kl_div' '') xfail('pca_lowrank' '') xfail('nn.functional.dropout2d' '') xfail('nn.functional.feature_alpha_dropout' 'without_train') xfail('svd_lowrank' '') xfail('nn.functional.multilabel_margin_loss' '') xfail('nn.functional.multilabel_soft_margin_loss' '') xfail('scatter_reduce' 'amax') xfail('scatter_reduce' 'amin') xfail('nn.functional.soft_margin_loss' '') xfail('nn.functional.pdist' '') xfail('scatter_reduce' 'sum') xfail('nn.functional.multi_margin_loss' '') xfail('scatter_reduce' 'mean') xfail('scatter_reduce' 'prod') skip('linalg.householder_product' '' device_type='cuda') # flaky, I'm not sure why xfail('nn.functional.binary_cross_entropy_with_logits') }))<def_stmt>test_jvpvjp self device dtype op<block_start><if_stmt><not>op.supports_autograd<block_start>self.skipTest("Skipped! Autograd not supported.")<line_sep><return><block_end>samples=op.sample_inputs(device dtype requires_grad=<true>)<line_sep># TODO: test in-place <if_stmt>is_inplace(op op.get_op())<block_start>self.skipTest("Skipped! NYI: inplace-testing not supported.")<line_sep><return><block_end><for_stmt>sample samples<block_start>fn,primals=normalize_op_input_output(op sample)<line_sep>result=fn(*primals)<line_sep>cotangents=tree_map(<lambda>x:torch.randn_like(x) result)<line_sep>primals_tangents=tree_map(<lambda>x:torch.randn_like(x) primals)<line_sep>cotangents_tangents=tree_map(<lambda>x:torch.randn_like(x) cotangents)<if_stmt>isinstance(primals[0] torch.Tensor)<and>primals[0].numel()<eq>0# typically the first primal arg is the input. If the input has no elements, we will typically run # into an issue of "Expected Tensor but got None" <block_start><continue><block_end><def_stmt>push_vjp primals cotangents<block_start>_,vjp_fn=vjp(fn *primals)<line_sep><return>vjp_fn(cotangents)<block_end>result=jvp(push_vjp (primals cotangents) (primals_tangents cotangents_tangents))<line_sep>self.assertEqual(len(result) 2)<def_stmt>tree_map2 fn first second<block_start>flat_first,spec_first=tree_flatten(first)<line_sep>flat_second,spec_second=tree_flatten(second)<assert_stmt>spec_first<eq>spec_second<line_sep>flat_result=[fn(f s)<for>f,s zip(flat_first flat_second)]<line_sep><return>tree_unflatten(flat_result spec_first)<block_end><def_stmt>reference primals cotangents primals_tangents cotangents_tangents<block_start><with_stmt>fwAD.dual_level()<block_start>primal_duals=tree_map2(fwAD.make_dual primals primals_tangents)<line_sep>_,vjp_fn=ref_vjp(fn *primal_duals)<line_sep>cotangent_duals=tree_map2(fwAD.make_dual cotangents cotangents_tangents)<line_sep>result=vjp_fn(cotangent_duals)<line_sep>flat_result,spec=tree_flatten(result)<line_sep>primals_out,tangents_out=zip(*[fwAD.unpack_dual(r)<for>r flat_result])<line_sep>tangents_out=[t<if>t<is><not><none><else>torch.zeros_like(p)<for>p,t zip(primals_out tangents_out)]<line_sep>expected=(tree_unflatten(primals_out spec) tree_unflatten(tangents_out spec))<block_end><return>expected<block_end># HACK: obviously pytorch should also have the same coverage # For things that do have the same coverage, we test that jvp x vjp # are the same between PyTorch and functorch. For things that don't, # we check that jacfwd(vjp) and jacrev(vjp) are the same. This results # in slower tests. FUNCTORCH_HAS_FORMULA_BUT_NOT_PYTORCH={'nn.functional.nll_loss' 'softmax' 'log_softmax' 'nn.functional.cross_entropy' 'nn.functional.layer_norm'}<if_stmt>op.name<in>FUNCTORCH_HAS_FORMULA_BUT_NOT_PYTORCH<block_start>self.assertFalse(op.supports_fwgrad_bwgrad f"{op.name} now supports forward over reverse without a decomposition. "+"Please remove the decomposition version")<def_stmt>is_differentiable t<block_start><return>isinstance(t torch.Tensor)<and>t.dtype<eq>torch.float32<block_end>args=(cotangents *primals)<if_stmt>op.name<eq>'nn.functional.binary_cross_entropy'<block_start>argnums=(0 1)# targets is float32 but isn't differentiable atol_rtol=1.5e-4 1.3e-06<block_end><else_stmt><block_start>argnums=tuple(i<for>i range(len(args))<if>is_differentiable(args[i]))<line_sep>atol_rtol=<none><block_end>self._compare_jacobians_of_vjp(fn args argnums atol_rtol)<block_end><else_stmt><block_start>expected=reference(primals cotangents primals_tangents cotangents_tangents)<line_sep>self.assertEqual(result expected)<block_end><block_end><block_end><def_stmt>_make_extremal_inputs self shape device<block_start><if_stmt>shape<eq><none><block_start><return>(<none> )<block_end><return>(torch.full(shape -1000. device=device) torch.zeros(shape device=device) torch.full(shape 1000. device=device) )<block_end><def_stmt>_arg_and_kwarg_options self args_options kwargs_options<block_start><return>itertools.product(*args_options kwargs_options)<block_end><def_stmt>test_extremal_numerics_nll_loss self device<block_start>N,C=3 4<line_sep>d1,d2,d3=5 6 7<line_sep>shapes=(((N C) (N ) (C )) ((N C) (N ) <none>) ((N C d1 d2 d3) (N d1 d2 d3) (C )) ((N C d1 d2 d3) (N d1 d2 d3) <none>) )<line_sep>kwargs_options=({'ignore_index':0 'reduction':'mean'} {'reduction':'sum'} {'reduction':'none'} {})<for_stmt>input_shape,target_shape,weight_shape shapes<block_start>input_options=self._make_extremal_inputs(input_shape device)<for_stmt>input,kwargs self._arg_and_kwarg_options((input_options ) kwargs_options)<block_start><if_stmt>weight_shape<is><none><block_start>weight=<none><block_end><else_stmt><block_start>weight=torch.randn(weight_shape device=device)<block_end>target=torch.randint(0 C target_shape device=device)<line_sep>target[0]=1# since we're ignoring index 0, at least one element must be non-zero fn=functools.partial(torch.nn.functional.nll_loss target=target weight=weight **kwargs)<line_sep>result=fn(input)<line_sep>cotangents=torch.randn_like(result device=device)<line_sep>self._compare_jacobians_of_vjp(fn (cotangents input))<block_end><block_end><block_end><def_stmt>test_extremal_numerics_l1_loss self device<block_start>N,C,H,W=3 4 5 6<line_sep>shapes=((N C) (N C H) (N C H W))<line_sep>kwargs_options=({'reduction':'sum'} {'reduction':'none'} {})<for_stmt>shape shapes<block_start>input_options=self._make_extremal_inputs(shape device)<line_sep>target_options=self._make_extremal_inputs(shape device)<for_stmt>input,target,kwargs self._arg_and_kwarg_options((input_options target_options) kwargs_options)<block_start>result=torch.nn.functional.l1_loss(input target)<line_sep>cotangents=torch.randn_like(result device=device)<line_sep>self._compare_jacobians_of_vjp(torch.nn.functional.l1_loss (cotangents input target))<block_end><block_end><block_end><def_stmt>test_extremal_numerics_mse_loss self device<block_start>N,C,H,W=3 4 5 6<line_sep>shapes=((N C) (N C H) (N C H W))<line_sep>kwargs_options=({'reduction':'sum'} {'reduction':'none'} {})<for_stmt>shape shapes<block_start>input_options=self._make_extremal_inputs(shape device)<line_sep>target_options=self._make_extremal_inputs(shape device)<for_stmt>input,target,kwargs self._arg_and_kwarg_options((input_options target_options) kwargs_options)<block_start>result=torch.nn.functional.mse_loss(input target)<line_sep>cotangents=torch.randn_like(result device=device)<line_sep>self._compare_jacobians_of_vjp(torch.nn.functional.mse_loss (cotangents input target))<block_end><block_end><block_end><def_stmt>test_extremal_numerics_softmax self device<block_start>N,C,H,W=3 4 5 6<line_sep>shapes=((N C) (N C H) (N C H W))<line_sep>kwargs_options=({'dim':1} {})<for_stmt>shape shapes<block_start>input_options=self._make_extremal_inputs(shape device)<for_stmt>input,kwargs self._arg_and_kwarg_options((input_options ) kwargs_options)<block_start>result=torch.nn.functional.softmax(input)<line_sep>cotangents=torch.randn_like(result device=device)<line_sep>self._compare_jacobians_of_vjp(torch.nn.functional.softmax (cotangents input))<block_end><block_end><block_end><def_stmt>test_extremal_numerics_log_softmax self device<block_start>N,C,H,W=3 4 5 6<line_sep>shapes=((N C) (N C H) (N C H W))<line_sep>kwargs_options=({'dim':1} {})<for_stmt>shape shapes<block_start>input_options=self._make_extremal_inputs(shape device)<for_stmt>input,kwargs self._arg_and_kwarg_options((input_options ) kwargs_options)<block_start>result=torch.nn.functional.log_softmax(input)<line_sep>cotangents=torch.randn_like(result device=device)<line_sep>self._compare_jacobians_of_vjp(torch.nn.functional.log_softmax (cotangents input))<block_end><block_end><block_end><def_stmt>test_extremal_numerics_cross_entropy self device<block_start>N,C=3 4<line_sep>d1,d2,d3=5 6 7<line_sep>shapes=(((N C) (N ) (C )) ((N C) (N ) <none>) ((N C) (N C) (C )) ((N C) (N C) <none>) ((C ) () (C )) ((C ) () <none>) ((C ) (C ) (C )) ((C ) (C ) <none>) ((N C d1 d2 d3) (N d1 d2 d3) (C )) ((N C d1 d2 d3) (N d1 d2 d3) <none>) ((N C d1 d2 d3) (N C d1 d2 d3) (C )) ((N C d1 d2 d3) (N C d1 d2 d3) <none>) )<for_stmt>input_shape,target_shape,weight_shape shapes<block_start>input_options=self._make_extremal_inputs(input_shape device)<line_sep>kwargs_options=[{'reduction':'sum'} {'reduction':'none'} {}]<if_stmt>input_shape<ne>target_shape<block_start>kwargs_options.append({'ignore_index':0 'reduction':'mean'})<block_end><for_stmt>input,kwargs self._arg_and_kwarg_options((input_options ) kwargs_options)<block_start><if_stmt>weight_shape<is><none><block_start>weight=<none><block_end><else_stmt><block_start>weight=torch.randn(weight_shape device=device)<block_end><if_stmt>input_shape<eq>target_shape<block_start>target=torch.rand(target_shape device=device)<block_end><elif_stmt>len(target_shape)<eq>0<block_start>target=torch.tensor(1 device=device)# must be non-zero since ignore_index may be 0 <block_end><else_stmt><block_start>target=torch.randint(0 C target_shape device=device)<block_end>fn=functools.partial(torch.nn.functional.cross_entropy target=target weight=weight **kwargs)<line_sep>result=fn(input)<line_sep>cotangents=torch.randn_like(result device=device)<line_sep>self._compare_jacobians_of_vjp(fn (cotangents input) atol_rtol=(1e-4 1e-5))<block_end><block_end><block_end><def_stmt>test_extremal_numerics_binary_cross_entropy self device<block_start>N,C,H,W=3 4 5 6<line_sep>shapes=((N C) (N C H) (N C H W))<for_stmt>shape shapes<block_start>weight_options=self._make_extremal_inputs(shape device)<line_sep>kwargs_options=[{'reduction':'sum'} {'reduction':'none'} {}]<for_stmt>weight,kwargs self._arg_and_kwarg_options((weight_options ) kwargs_options)<block_start>input=torch.rand(shape device=device)<line_sep>target=torch.rand(shape device=device)<line_sep>fn=functools.partial(torch.nn.functional.binary_cross_entropy target=target weight=weight **kwargs)<line_sep>result=fn(input)<line_sep>cotangents=torch.randn_like(result device=device)<line_sep>self._compare_jacobians_of_vjp(fn (cotangents input) atol_rtol=(1e-4 2e-5))<block_end><block_end><block_end><def_stmt>test_extremal_numerics_layer_norm self device<block_start>N,C,H,W=3 4 5 6<line_sep>shapes=((N C) (N C H) (N C H W))<for_stmt>shape shapes<block_start>input_options=self._make_extremal_inputs(shape device)<line_sep>normalized_shape=shape[1:]<line_sep>weight_options=self._make_extremal_inputs(normalized_shape device)<line_sep>bias_options=self._make_extremal_inputs(normalized_shape device)<for_stmt>input,bias,weight self._arg_and_kwarg_options((input_options bias_options weight_options) ())<block_start><def_stmt>fn input weight bias<block_start><return>torch.nn.functional.layer_norm(input normalized_shape weight=weight bias=bias)<block_end>result=fn(input weight bias)<line_sep>cotangents=torch.randn_like(result device=device)<line_sep>self._compare_jacobians_of_vjp(fn (cotangents input weight bias))<block_end><block_end><block_end>@ops(filter(<lambda>op:op.name<eq>"nn.functional.group_norm" functorch_lagging_op_db+additional_op_db) allowed_dtypes=(torch.float32 torch.double))# TODO: generalize <def_stmt>test_group_norm_backward self device dtype op# hacky, only works since no group norm inputs can be scalars <block_start><def_stmt>was_skipped_from_batched_tensors batched_out batch_size<block_start><return>batched_out.shape<eq>(batch_size )<and>all(tuple(e<eq>1<for>e batched_out))<block_end>sample_inputs=op.sample_inputs(device dtype requires_grad=<true>)<for_stmt>sample_input sample_inputs<block_start>cotangents=get_sample_cotangents(op sample_input)<line_sep>f,args=get_autograd_fn_and_args_with_cotangents(op sample_input cotangents)<for_stmt>loop_out,batched_out get_fallback_and_vmap_exhaustive(f args {} opinfo=op)<block_start><if_stmt>all(was_skipped_from_batched_tensors(bo lo.shape[0])<for>(bo lo) zip(batched_out loop_out))<block_start><continue># we weren't able to use the batched tensor in autograd.grad <block_end>self.assertEqual(loop_out batched_out)<block_end><block_end><block_end><block_end>only_for=("cpu" "cuda")<line_sep>instantiate_device_type_tests(TestOperators globals() only_for=only_for)<if_stmt>__name__<eq>'__main__'<block_start>run_tests()<block_end>
<import_stmt>tushare<as>ts<import_stmt>config<class_stmt>StockFinancialDataFromTuShare<block_start><def_stmt>__init__ self<block_start>ts.set_token(config.TS_TOKEN)<line_sep>self.__pro=ts.pro_api()<block_end><def_stmt>init self<arrow>bool<block_start><pass><block_end><def_stmt>inited self<arrow>bool<block_start><pass><block_end># Validate this Collector is still valid or not. <def_stmt>validate self<arrow>bool<block_start><pass><block_end># Fetch data from internet. <def_stmt>fetch_data self **kw<arrow>bool<block_start><pass><block_end># Auto check and update data to DB. Depends on collector's implementation. <def_stmt>check_update self **kw<arrow>bool<block_start><pass><block_end># Force update all data in DB. <def_stmt>force_update self **kw<arrow>bool<block_start><pass><block_end><block_end>
<import_from_stmt>django.forms ModelForm<import_from_stmt>django.forms.widgets Widget<class_stmt>OverridenWidget(Widget)<block_start><pass><block_end><class_stmt>AlternateMediaForm(ModelForm)<block_start><class_stmt>Meta<block_start>widgets={"tags":OverridenWidget "file":OverridenWidget "thumbnail":OverridenWidget }<block_end><block_end>
<import_from_stmt>migen *<import_from_stmt>misoc.interconnect.csr AutoCSR CSRStorage<import_from_stmt>itertools zip_longest<line_sep># Basic programmable LED module <class_stmt>LED_outputs(Module AutoCSR)<block_start><def_stmt>__init__ self leds_raw leds_muxes=<none> active=1<block_start>""" leds_raw: output IOs for the LEDs leds_muxes: internal digital signals that could feed a LED """<line_sep>leds=Signal(len(leds_raw))<line_sep># Register containing the desired LED status self._out=CSRStorage(len(leds) atomic_write=<true>)<line_sep># For each LED, we generate a MUX register. # The MUX register can connect either the bit in the 'output' register or # signals supplied via led_muxes <if_stmt>leds_muxes<block_start><assert_stmt>len(leds_muxes)<eq>len(leds)<for_stmt>n range(len(leds))<block_start>name="mux_%d"%n<line_sep>attr=CSRStorage(8 atomic_write=<true> name=name)<line_sep>setattr(self "_%s"%name attr)<line_sep>mux_vals=[self._out.storage[n]]<if_stmt>leds_muxes[n]<block_start>mux_vals.extend(leds_muxes[n])<block_end>cases={k:leds[n].eq(v)<for>k,v enumerate(mux_vals)}<line_sep>self.comb<augadd>[leds[n].eq(0) Case(attr.storage cases)]<block_end><block_end><else_stmt><block_start>self.comb<augadd>[leds.eq(self._out.storage) ]<block_end>self.comb<augadd>[leds_raw.eq(leds<if>active<else>~leds)]<block_end><block_end>
# Copyright 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. <import_from_stmt>django.conf.urls url<import_from_stmt>. views<line_sep>urlpatterns=[url(r'^$' views.index name='index') url(r'^feed_timeline$' views.feed_timeline name='feed_timeline') url(r'^timeline$' views.timeline name='timeline') url(r'^bundle_tray$' views.bundle_tray name='bundle_tray') url(r'^inbox$' views.inbox name='inbox') url(r'^seen$' views.seen name='seen') ]<line_sep>
__author__='<NAME> <<EMAIL>>'<line_sep>__version__='0.0.1'<line_sep>
# Author <NAME>, copyright 2021 <import_from_stmt>.eddsakey EdDSAKey<import_from_stmt>ecdsa.keys BadSignatureError<import_from_stmt>ecdsa.der UnexpectedDER<import_from_stmt>.cryptomath numBits<import_from_stmt>.compat compatHMAC<class_stmt>Python_EdDSAKey(EdDSAKey)<block_start>""" Concrete implementation of EdDSA object backed by python-ecdsa. Object that uses the common, abstract API of asymmetric keys that uses the python-ecdsa library for the cryptographic operations. :vartype public_key: VerifyingKey :ivar public_key: python-ecdsa object for veryfying EdDSA signatures, if `private_key` is set, it should match it (should be able to verify signatures created by it) :vartype private_key: SigningKey :ivar private_key: python-ecdsa object for creating EdDSA signatures :vartype key_type: str :ivar key_type: type of assymetric algorithm used by the keys - for this objects it is either "Ed25519" or "Ed448" """<def_stmt>__init__ self public_key private_key=<none><block_start><if_stmt><not>public_key<and><not>private_key<block_start><raise>ValueError("at least one key must be provided")<block_end><if_stmt><not>public_key<block_start>public_key=private_key.verifying_key<block_end>self.curve_name=public_key.curve.name<line_sep>self.private_key=private_key<line_sep>self.public_key=public_key<line_sep>self.key_type=self.curve_name<block_end><def_stmt>__len__ self<block_start><return>numBits(self.public_key.curve.order)<block_end><def_stmt>hasPrivateKey self<block_start><return>bool(self.private_key)<block_end><def_stmt>acceptsPassword self<block_start><return><false><block_end>@staticmethod<def_stmt>generate bits<block_start><raise>NotImplementedError()<block_end><def_stmt>_hashAndSign self data<block_start><return>self.private_key.sign_deterministic(compatHMAC(data))<block_end><def_stmt>_hashAndVerify self signature data<block_start><try_stmt><block_start><return>self.public_key.verify(compatHMAC(signature) compatHMAC(data))<block_end># https://github.com/warner/python-ecdsa/issues/114 <except_stmt>(BadSignatureError UnexpectedDER IndexError AssertionError)<block_start><return><false><block_end><block_end><block_end>
<import_from_stmt>suplemon.linelight.color_map color_map<class_stmt>Syntax<block_start><def_stmt>get_comment self<block_start><return>("//" "")<block_end><def_stmt>get_color self raw_line<block_start>color=color_map["white"]<line_sep>line=raw_line.strip()<line_sep>keywords=("if" "else" "finally" "try" "catch" "foreach" "while" "continue" "pass" "break")<if_stmt>line.startswith(("include" "require"))<block_start>color=color_map["blue"]<block_end><elif_stmt>line.startswith(("class" "public" "private" "function"))<block_start>color=color_map["green"]<block_end><elif_stmt>line.startswith("def")<block_start>color=color_map["cyan"]<block_end><elif_stmt>line.startswith("return")<block_start>color=color_map["red"]<block_end><elif_stmt>line.startswith("$")<block_start>color=color_map["cyan"]<block_end><elif_stmt>line.startswith(("#" "//" "/*" "*/"))<block_start>color=color_map["magenta"]<block_end><elif_stmt>line.startswith(keywords)<block_start>color=color_map["yellow"]<block_end><return>color<block_end><block_end>
<import_from_stmt>typing List<import_from_stmt>.item_result Item<import_from_stmt>..sdk.models VariationsResult VariationSummary<class_stmt>ApiPrice<block_start>amount:float<line_sep>currency:str<line_sep>display_amount:str<block_end><class_stmt>ApiVariationDimension<block_start>display_name:str<line_sep>name:str<line_sep>values:List[str]<block_end><class_stmt>ApiVariationPrice<block_start>highest_price:ApiPrice<line_sep>lowest_price:ApiPrice<block_end><class_stmt>ApiVariationSummary(VariationSummary)<block_start>page_count:int<line_sep>price:ApiVariationPrice<line_sep>variation_count:int<line_sep>variation_dimensions:List[ApiVariationDimension]<block_end><class_stmt>VariationsResult(VariationsResult)<block_start>items:List[Item]<line_sep>variation_summary:ApiVariationSummary<block_end>
# !/usr/bin/env python # -- coding: utf-8 -- # @Time : 2020/6/28 18:04 # @Author : liumin # @File : LWnet.py <import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torchvision<import_stmt>torch.nn.functional<as>F<def_stmt>ConvBNReLU in_channels out_channels kernel_size stride padding dilation=1 groups=1<block_start><return>nn.Sequential(nn.Conv2d(in_channels=in_channels out_channels=out_channels kernel_size=kernel_size stride=stride padding=padding dilation=dilation groups=groups bias=<false>) nn.BatchNorm2d(out_channels) nn.ReLU6(inplace=<true>))<block_end><def_stmt>ConvBN in_channels out_channels kernel_size stride padding dilation=1 groups=1<block_start><return>nn.Sequential(nn.Conv2d(in_channels=in_channels out_channels=out_channels kernel_size=kernel_size stride=stride padding=padding dilation=dilation groups=groups bias=<false>) nn.BatchNorm2d(out_channels))<block_end><def_stmt>Conv1x1BNReLU in_channels out_channels<block_start><return>nn.Sequential(nn.Conv2d(in_channels=in_channels out_channels=out_channels kernel_size=1 stride=1 bias=<false>) nn.BatchNorm2d(out_channels) nn.ReLU6(inplace=<true>))<block_end><def_stmt>Conv1x1BN in_channels out_channels<block_start><return>nn.Sequential(nn.Conv2d(in_channels=in_channels out_channels=out_channels kernel_size=1 stride=1 bias=<false>) nn.BatchNorm2d(out_channels))<block_end><class_stmt>LWbottleneck(nn.Module)<block_start><def_stmt>__init__ self in_channels out_channels stride<block_start>super(LWbottleneck self).__init__()<line_sep>self.stride=stride<line_sep>self.pyramid_list=nn.ModuleList()<line_sep>self.pyramid_list.append(ConvBNReLU(in_channels in_channels kernel_size=[5 1] stride=stride padding=[2 0]))<line_sep>self.pyramid_list.append(ConvBNReLU(in_channels in_channels kernel_size=[1 5] stride=stride padding=[0 2]))<line_sep>self.pyramid_list.append(ConvBNReLU(in_channels in_channels kernel_size=[3 1] stride=stride padding=[1 0]))<line_sep>self.pyramid_list.append(ConvBNReLU(in_channels in_channels kernel_size=[1 3] stride=stride padding=[0 1]))<line_sep>self.pyramid_list.append(ConvBNReLU(in_channels in_channels kernel_size=[2 1] stride=stride padding=[1 0]))<line_sep>self.pyramid_list.append(ConvBNReLU(in_channels in_channels kernel_size=[1 2] stride=stride padding=[0 1]))<line_sep>self.pyramid_list.append(ConvBNReLU(in_channels in_channels kernel_size=2 stride=stride padding=1))<line_sep>self.pyramid_list.append(ConvBNReLU(in_channels in_channels kernel_size=3 stride=stride padding=1))<line_sep>self.shrink=Conv1x1BN(in_channels<times>8 out_channels)<block_end><def_stmt>forward self x<block_start>b,c,w,h=x.shape<if_stmt>self.stride<g>1<block_start>w,h=w<floordiv>self.stride h<floordiv>self.stride<block_end>outputs=[]<for_stmt>pyconv self.pyramid_list<block_start>pyconv_x=pyconv(x)<if_stmt>x.shape[2:]<ne>pyconv_x.shape[2:]<block_start>pyconv_x=pyconv_x[: : :w :h]<block_end>outputs.append(pyconv_x)<block_end>out=torch.cat(outputs 1)<line_sep><return>self.shrink(out)<block_end><block_end><class_stmt>Encoder(nn.Module)<block_start><def_stmt>__init__ self<block_start>super(Encoder self).__init__()<line_sep>self.stage1=nn.Sequential(ConvBNReLU(in_channels=3 out_channels=32 kernel_size=3 stride=2 padding=1) Conv1x1BN(in_channels=32 out_channels=16) )<line_sep>self.stage2=nn.Sequential(LWbottleneck(in_channels=16 out_channels=24 stride=2) LWbottleneck(in_channels=24 out_channels=24 stride=1) )<line_sep>self.stage3=nn.Sequential(LWbottleneck(in_channels=24 out_channels=32 stride=2) LWbottleneck(in_channels=32 out_channels=32 stride=1) )<line_sep>self.stage4=nn.Sequential(LWbottleneck(in_channels=32 out_channels=32 stride=2))<line_sep>self.stage5=nn.Sequential(LWbottleneck(in_channels=32 out_channels=64 stride=2) LWbottleneck(in_channels=64 out_channels=64 stride=1) LWbottleneck(in_channels=64 out_channels=64 stride=1) LWbottleneck(in_channels=64 out_channels=64 stride=1) )<line_sep>self.conv1=Conv1x1BN(in_channels=64 out_channels=320)<block_end><def_stmt>forward self x<block_start>x=self.stage1(x)<line_sep>x=self.stage2(x)<line_sep>x=F.pad(x pad=(0 1 0 1) mode='constant' value=0)<line_sep>out1=x=self.stage3(x)<line_sep>x=self.stage4(x)<line_sep>x=F.pad(x pad=(0 1 0 1) mode='constant' value=0)<line_sep>x=self.stage5(x)<line_sep>out2=self.conv1(x)<line_sep><return>out1 out2<block_end><block_end><class_stmt>ASPP(nn.Module)<block_start><def_stmt>__init__ self in_channels out_channels<block_start>super(ASPP self).__init__()<line_sep>self.depthwise1=ConvBNReLU(in_channels out_channels 3 1 6 dilation=6)<line_sep>self.depthwise2=ConvBNReLU(in_channels out_channels 3 1 12 dilation=12)<line_sep>self.depthwise3=ConvBNReLU(in_channels out_channels 3 1 18 dilation=18)<line_sep>self.pointconv=Conv1x1BN(in_channels out_channels)<block_end><def_stmt>forward self x<block_start>x1=self.depthwise1(x)<line_sep>x2=self.depthwise2(x)<line_sep>x3=self.depthwise3(x)<line_sep>x4=self.pointconv(x)<line_sep><return>torch.cat([x1 x2 x3 x4] dim=1)<block_end><block_end><class_stmt>Decoder(nn.Module)<block_start><def_stmt>__init__ self num_classes=2<block_start>super(Decoder self).__init__()<line_sep>self.aspp=ASPP(320 128)<line_sep>self.pconv1=Conv1x1BN(128<times>4 512)<line_sep>self.pconv2=Conv1x1BN(512+32 128)<line_sep>self.pconv3=Conv1x1BN(128 num_classes)<block_end><def_stmt>forward self x y<block_start>x=self.pconv1(self.aspp(x))<line_sep>x=F.interpolate(x y.shape[2:] align_corners=<true> mode='bilinear')<line_sep>x=torch.cat([x y] dim=1)<line_sep>out=self.pconv3(self.pconv2(x))<line_sep><return>out<block_end><block_end><class_stmt>LW_Network(nn.Module)<block_start><def_stmt>__init__ self num_classes=2<block_start>super(LW_Network self).__init__()<line_sep>self.encoder=Encoder()<line_sep>self.decoder=Decoder(num_classes)<block_end><def_stmt>forward self x<block_start>x1,x2=self.encoder(x)<line_sep>out=self.decoder(x2 x1)<line_sep><return>out<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>model=LW_Network()<line_sep>print(model)<line_sep>input=torch.randn(1 3 331 331)<line_sep>output=model(input)<line_sep>print(output.shape)<block_end>
<import_stmt>regex<import_stmt>pickle<import_stmt>os.path<import_from_stmt>pynab log root_dir<line_sep># category codes # these are stored in the db, as well CAT_GAME_NDS=1010<line_sep>CAT_GAME_PSP=1020<line_sep>CAT_GAME_WII=1030<line_sep>CAT_GAME_XBOX=1040<line_sep>CAT_GAME_XBOX360=1050<line_sep>CAT_GAME_WIIWARE=1060<line_sep>CAT_GAME_XBOX360DLC=1070<line_sep>CAT_GAME_PS3=1080<line_sep>CAT_MOVIE_FOREIGN=2010<line_sep>CAT_MOVIE_OTHER=2020<line_sep>CAT_MOVIE_SD=2030<line_sep>CAT_MOVIE_HD=2040<line_sep>CAT_MOVIE_BLURAY=2050<line_sep>CAT_MOVIE_3D=2060<line_sep>CAT_MUSIC_MP3=3010<line_sep>CAT_MUSIC_VIDEO=3020<line_sep>CAT_MUSIC_AUDIOBOOK=3030<line_sep>CAT_MUSIC_LOSSLESS=3040<line_sep>CAT_PC_0DAY=4010<line_sep>CAT_PC_ISO=4020<line_sep>CAT_PC_MAC=4030<line_sep>CAT_PC_MOBILEOTHER=4040<line_sep>CAT_PC_GAMES=4050<line_sep>CAT_PC_MOBILEIOS=4060<line_sep>CAT_PC_MOBILEANDROID=4070<line_sep>CAT_TV_FOREIGN=5020<line_sep>CAT_TV_SD=5030<line_sep>CAT_TV_HD=5040<line_sep>CAT_TV_OTHER=5050<line_sep>CAT_TV_SPORT=5060<line_sep>CAT_TV_ANIME=5070<line_sep>CAT_TV_DOCU=5080<line_sep>CAT_XXX_DVD=6010<line_sep>CAT_XXX_WMV=6020<line_sep>CAT_XXX_XVID=6030<line_sep>CAT_XXX_X264=6040<line_sep>CAT_XXX_PACK=6050<line_sep>CAT_XXX_IMAGESET=6060<line_sep>CAT_XXX_OTHER=6070<line_sep>CAT_BOOK_MAGS=7010<line_sep>CAT_BOOK_EBOOK=7020<line_sep>CAT_BOOK_COMICS=7030<line_sep>CAT_MISC_OTHER=8010<line_sep>CAT_PARENT_GAME=1000<line_sep>CAT_PARENT_MOVIE=2000<line_sep>CAT_PARENT_MUSIC=3000<line_sep>CAT_PARENT_PC=4000<line_sep>CAT_PARENT_TV=5000<line_sep>CAT_PARENT_XXX=6000<line_sep>CAT_PARENT_BOOK=7000<line_sep>CAT_PARENT_MISC=8000<line_sep>CATEGORISER=pickle.load(open(os.path.join(root_dir 'db/release_categoriser.pkl') 'rb'))<def_stmt>extract_features name<block_start><def_stmt>find reg str<block_start>res=regex.findall(reg str regex.I)<if_stmt>res<block_start><return>'|'.join(sorted(res))<block_end><else_stmt><block_start><return><none><block_end><block_end><return>{'length':len(name) 'tokens':len(regex.findall('[\w\']+' name)) 'resolution':find('(720|1080)' name) 'quality':find('(SDTV|HDTV|PDTV|WEB-?DL|WEBRIP|XVID|DIVX|DVDR|DVD-RIP|x264|dvd|XvidHD|AVC|AAC|VC\-?1|wmvhd|web\-dl|BRRIP|HDRIP|HDDVD|bddvd|BDRIP|webscr|bluray|bd?25|bd?50|blu-ray|BDREMUX)' name) '3d':bool(find('(3D)' name)) 'subgroup':find('\[(\w+)\]' name) 'filehash':bool(find('\[([0-9a-fA-F]{8})\]' name)) 'season':bool(find('(S\d{1,2})' name)) 'episode':bool(find('(E\d{1,2})' name)) 'airdate':bool(find('((?:\d{4}[.-/ ]\d{2}[.-/ ]\d{2})|(?:\d{2}[.-/ ]\d{2}[.-/ ]\d{4}))' name)) 'year':bool(find('[.-/ ](\d{4})[.-/ ]' name)) 'versus':bool(find('[.-/ ](vs?)[.-/ ]' name)) 'music':bool(find('((?:^VA(?:\-|\_|\ ))|(?:MP3|VBR|NMR|CDM|FLAC|\-(?:CDR?|EP|LP|SAT|2CD|FM|VINYL|DE|CABLE|TAPE)\-))' name)) 'ebook':bool(find('(e?\-?book|html|epub|pdf|mobi|azw|doc|isbn)' name)) 'comic':bool(find('(cbr|cbz)' name)) 'magazine':bool(find('(mag(?:s|azine?s?))' name)) 'sport':find('(epl|motogp|bellator|supercup|wtcc|bundesliga|uefa|espn|wwe|wwf|wcw|mma|ucf|fia|pga|nfl|ncaa|fifa|mlb|nrl|nhl|afl|nba|wimbledon|cricket)[\. -_]' name) 'xxx':bool(find('(xxx|imageset|porn|erotica)' name)) 'game':find('(PS3|3DS|NDS|PS4|XBOX|XBONE|WII|DLC|CONSOLE|PSP|X360|PS4)' name) 'foreign':bool(find('(seizoen|staffel|danish|flemish|dutch|Deutsch|nl\.?subbed|nl\.?sub|\.NL|\.ITA|norwegian|swedish|swesub|french|german|spanish|icelandic|finnish|Chinese\.Subbed|vostfr|Hebrew\.Dubbed|\.HEB\.|Nordic|Hebdub|NLSubs|NL\-Subs|NLSub|Deutsch| der |German | NL |\.PL\.)' name)) 'pc':bool(find('((?:v?\d\.\d\.)|(?:x64|32bit|64bit|exe))' name)) 'documentary':bool(find('(documentary|national geographic|natgeo)' name))}<block_end><def_stmt>determine_category name group_name=''<block_start>"""Categorise release based on release name and group name."""<line_sep>features=extract_features(name)<line_sep>features['name']=name<line_sep>features['group']=group_name<line_sep>category=int(CATEGORISER.classify(features))<line_sep>log.debug('category: ({}) [{}]: {}'.format(group_name name category))<line_sep><return>category<block_end>
# Copyright 2020 Makani Technologies LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for makani.avionics.motor.motor_client. This module uses snake case for new function names so that test functions can be consistent with cmd.Cmd methods without offending Lint. """<import_stmt>copy<import_stmt>re<import_stmt>socket<import_stmt>tempfile<import_stmt>textwrap<import_stmt>threading<import_stmt>time<import_stmt>unittest<import_from_stmt>makani.avionics.common actuator_types<import_from_stmt>makani.avionics.common cmd_client<import_from_stmt>makani.avionics.common pack_avionics_messages<import_from_stmt>makani.avionics.motor motor_client<import_from_stmt>makani.avionics.motor.firmware flags<import_from_stmt>makani.avionics.network aio_node<import_from_stmt>makani.lib.python test_util<import_stmt>numpy<line_sep>_TIMEOUT=0.01<line_sep>_EPS32=numpy.finfo(numpy.float32).eps<class_stmt>MulticastListener(cmd_client.AioThread)<block_start><def_stmt>__init__ self set_state_callback command_callback param_callback<block_start>super(MulticastListener self).__init__(['kMessageTypeControllerCommand' 'kMessageTypeDynoCommand' 'kMessageTypeMotorSetParam' 'kMessageTypeDynoMotorSetParam' 'kMessageTypeMotorSetState' 'kMessageTypeDynoMotorSetState' 'kMessageTypeMotorGetParam' 'kMessageTypeDynoMotorGetParam'] allowed_sources=['kAioNodeControllerA' 'kAioNodeOperator'] timeout=_TIMEOUT)<line_sep>self._set_state_callback=set_state_callback<line_sep>self._command_callback=command_callback<line_sep>self._param_callback=param_callback<block_end><def_stmt>_RunOnce self<block_start><try_stmt><block_start>_,header,message=self._client.Recv()<if_stmt>header.source<eq>aio_node.kAioNodeOperator<block_start><if_stmt>isinstance(message pack_avionics_messages.MotorSetStateMessage)<block_start>self._set_state_callback(message)<block_end><elif_stmt>(isinstance(message pack_avionics_messages.MotorSetParamMessage)<or>isinstance(message pack_avionics_messages.DynoMotorSetParamMessage))<block_start>self._param_callback(message)<block_end><elif_stmt>(isinstance(message pack_avionics_messages.MotorGetParamMessage)<or>isinstance(message pack_avionics_messages.DynoMotorGetParamMessage))<block_start>self._param_callback(message)<block_end><elif_stmt>isinstance(message pack_avionics_messages.DynoCommandMessage)<block_start>self._command_callback(message)<block_end><block_end><elif_stmt>header.source<eq>aio_node.kAioNodeControllerA<block_start><if_stmt>isinstance(message pack_avionics_messages.ControllerCommandMessage)<block_start>self._command_callback(message)<block_end><block_end><block_end><except_stmt>socket.timeout<block_start><pass><block_end><block_end><block_end><class_stmt>FakeMotor(cmd_client.AioThread)<block_start><def_stmt>__init__ self nickname<block_start>self._node_string=motor_client.AioNodeNameFromMotorNickname(nickname)<line_sep>self._index=motor_client.MOTORS.index(nickname)<line_sep>self._bitmask=1<lshift>self._index<line_sep>self._status=pack_avionics_messages.MotorStatusMessage()<line_sep>self._status.motor_status=flags.kMotorStatusInit<line_sep>self._status_lock=threading.Lock()<line_sep>self.running=<false><line_sep>self.params={v:0.0<for>v motor_client.MOTOR_PARAMS.itervalues()}<line_sep>self.torque=0.0<line_sep>self.speed_lower=0.0<line_sep>self.speed_upper=0.0<line_sep>super(FakeMotor self).__init__(['kMessageTypeMotorStatus' 'kMessageTypeMotorAckParam'] allowed_sources=[self._node_string] timeout=_TIMEOUT)<line_sep>self._multicast_listener=MulticastListener(self._HandleMotorSetStateMessage self._HandleControllerCommandMessage self._HandleParamMessage)<block_end><def_stmt>GetParam self param_name<block_start><return>self.params[motor_client.MOTOR_PARAMS[param_name]]<block_end><def_stmt>__enter__ self<block_start>self.start()<line_sep>self._multicast_listener.start()<line_sep><return>self<block_end><def_stmt>__exit__ self *args<block_start>self._multicast_listener.Exit()<line_sep>self._multicast_listener.join()<line_sep>self.Exit()<line_sep>self.join()<block_end><def_stmt>GetStatus self<block_start><with_stmt>self._status_lock<block_start><return>copy.copy(self._status)<block_end><block_end><def_stmt>GetError self<block_start><with_stmt>self._status_lock<block_start><return>self._status.motor_error<block_end><block_end><def_stmt>SetError self error<block_start><with_stmt>self._status_lock<block_start>self._status.motor_error=error<line_sep>self._status.motor_status<augor>flags.kMotorStatusError<block_end><block_end><def_stmt>SetWarning self warning<block_start><with_stmt>self._status_lock<block_start>self._status.motor_warning=warning<line_sep>self._status.motor_status<augor>flags.kMotorStatusError<block_end><block_end><def_stmt>ClearError self<block_start><with_stmt>self._status_lock<block_start>self._status.motor_error=flags.kMotorErrorNone<line_sep>self._status.motor_status<augand>~flags.kMotorStatusError<block_end><block_end><def_stmt>_HandleMotorSetStateMessage self message<block_start><if_stmt>(message.selected_motors&self._bitmask<and>message.command<eq>actuator_types.kActuatorStateCommandArm)<block_start><with_stmt>self._status_lock<block_start>self._status.motor_status=flags.kMotorStatusArmed<block_end><block_end><block_end><def_stmt>_HandleControllerCommandMessage self message<block_start>self.torque=message.motor_torque[self._index]<line_sep>self.speed_lower=message.motor_speed_lower_limit[self._index]<line_sep>self.speed_upper=message.motor_speed_upper_limit[self._index]<line_sep>self.running=bool(message.motor_command&flags.kMotorCommandRun)<if_stmt>message.motor_command&flags.kMotorCommandClearError<block_start>self.ClearError()<block_end><if_stmt>message.motor_command&flags.kMotorCommandDisarm<block_start><with_stmt>self._status_lock<block_start>self._status.motor_status<augand>~flags.kMotorStatusArmed<block_end><block_end><block_end><def_stmt>_HandleParamMessage self message<block_start><if_stmt>message.selected_motors&self._bitmask<block_start><if_stmt>isinstance(message pack_avionics_messages.MotorSetParamMessage)<block_start>self.params[message.id]=message.value<block_end>ack=pack_avionics_messages.MotorAckParamMessage()<line_sep>ack.id=message.id<line_sep>ack.value=self.params[message.id]<line_sep>self._client.Send(ack 'kMessageTypeMotorAckParam' self._node_string)<block_end><block_end><def_stmt>_RunOnce self<block_start><with_stmt>self._status_lock<block_start>self._client.Send(self._status 'kMessageTypeMotorStatus' self._node_string)<block_end>time.sleep(0.1)<block_end><block_end><class_stmt>MotorCommandClientTest(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>super(MotorCommandClientTest self).setUp()<line_sep>self.client=motor_client.MotorCommandClient()<line_sep>self.stdout=test_util.StdoutPatch()<block_end><def_stmt>tearDown self<block_start>super(MotorCommandClientTest self).tearDown()<with_stmt>self.stdout<block_start>self.client.onecmd('quit')<block_end><block_end><def_stmt>assert_eventually_true self func<block_start>num_tries=30<for_stmt>i xrange(num_tries)<block_start><if_stmt>func()<block_start><return><true><block_end><if_stmt>i<l>num_tries-1<block_start>time.sleep(0.1)<block_end><block_end>self.assertTrue(<false>)<block_end># pylint: disable=redundant-unittest-assert <def_stmt>test_do_set_targets self<block_start><with_stmt>self.stdout<block_start>self.client.onecmd('set_targets SBO')<line_sep>self.assertRegexpMatches(self.stdout.Read() '(?s).*SBO.*')<line_sep>self.client.onecmd('quit')<block_end><with_stmt>self.stdout<block_start>self.client.onecmd('set_targets SBI PTO')<line_sep>self.assertRegexpMatches(self.stdout.Read() '(?s).*SBI.*')<line_sep>self.assertRegexpMatches(self.stdout.Read() '(?s).*PTO.*')<block_end><block_end><def_stmt>test_do_set_targets_dyno self<block_start><with_stmt>self.stdout<block_start>self.client.onecmd('set_targets_dyno SBO')<line_sep>self.assertRegexpMatches(self.stdout.Read() '(?s).*SBO.*')<line_sep>self.client.onecmd('quit')<block_end><with_stmt>self.stdout<block_start>self.client.onecmd('set_targets_dyno SBI PTO')<line_sep>self.assertRegexpMatches(self.stdout.Read() '(?s).*SBI.*')<line_sep>self.assertRegexpMatches(self.stdout.Read() '(?s).*PTO.*')<block_end><block_end><def_stmt>test_do_arm_fail self<block_start><with_stmt>self.stdout FakeMotor('SBO')<block_start>self.client.onecmd('arm')<line_sep>self.assertRegexpMatches(self.stdout.Read() '(?s).*Invalid set of targets.*')<block_end><with_stmt>self.stdout FakeMotor('SBO')<block_start>self.client.onecmd('set_targets SBO')<line_sep>self.client.onecmd('arm SBO')<line_sep>self.assertRegexpMatches(self.stdout.Read() '(?s).*Wrong number of arguments.*')<block_end><block_end><def_stmt>test_do_arm_succeed self<block_start><with_stmt>self.stdout FakeMotor('SBO')<as>motor<block_start>self.client.onecmd('set_targets SBO')<line_sep>self.client.onecmd('arm')<line_sep>self.assertEqual(motor.GetStatus().motor_status flags.kMotorStatusArmed)<line_sep>self.assertRegexpMatches(self.stdout.Read() '(?s).*Successfully armed.*')<block_end><block_end><def_stmt>test_do_arm_multiple_motors self<block_start><with_stmt>self.stdout FakeMotor('SBI')<as>sbi FakeMotor('PTO')<as>pto<block_start>self.client.onecmd('set_targets SBI PTO')<line_sep>self.client.onecmd('arm')<line_sep>self.assertEqual(sbi.GetStatus().motor_status flags.kMotorStatusArmed)<line_sep>self.assertEqual(pto.GetStatus().motor_status flags.kMotorStatusArmed)<line_sep>self.assertRegexpMatches(self.stdout.Read() '(?s).*Successfully armed.*')<block_end><block_end><def_stmt>test_do_disarm self<block_start><with_stmt>self.stdout FakeMotor('SBO')<as>motor<block_start>self.client.onecmd('set_targets SBO')<line_sep>self.client.onecmd('arm')<line_sep>self.client.onecmd('disarm')<line_sep>self.assertEqual(motor.GetStatus().motor_status flags.kMotorStatusInit)<block_end><block_end><def_stmt>test_do_set_param self<block_start><with_stmt>self.stdout FakeMotor('SBO')<as>motor<block_start>self.client.onecmd('set_param SBO i_kp 3.14')<line_sep>self.assertRegexpMatches(self.stdout.Read() '(?s).*Setting i_kp to 3.14 on SBO.*')<line_sep>self.assertAlmostEqual(motor.GetParam('i_kp') 3.14 places=6)<block_end><block_end><def_stmt>test_do_get_param self<block_start><with_stmt>self.stdout FakeMotor('SBO')<block_start>self.client.onecmd('get_param SBO i_kp')<line_sep>self.assertRegexpMatches(self.stdout.Read() '(?s).*SBO i_kp: 0[^0-9]*')<block_end><block_end><def_stmt>test_do_run_fail self<block_start><with_stmt>self.stdout FakeMotor('SBO')<block_start>self.client.onecmd('run 1 s')<line_sep>self.assertRegexpMatches(self.stdout.Read() '(?s).*Invalid set of targets.*')<block_end><with_stmt>self.stdout FakeMotor('SBO')<block_start>self.client.onecmd('set_targets SBO')<line_sep>self.client.onecmd('run 1 s')<line_sep>self.assertRegexpMatches(self.stdout.Read() '(?s).*Invalid(?s).*status.*')<block_end><block_end>@unittest.skipIf(socket.gethostname().startswith('jenkins-') 'This test is flaky when run on GCE.')<def_stmt>test_do_run_succeed self<block_start><with_stmt>self.stdout FakeMotor('SBO')<as>motor<block_start>self.client.onecmd('set_targets SBO')<line_sep>self.client.onecmd('arm')<line_sep>self.client.onecmd('run 100 s')<line_sep>self.assertRegexpMatches(self.stdout.Read() '(?s).*Running.*')<line_sep>self.assert_eventually_true(<lambda>:motor.running)<block_end><block_end><def_stmt>test_do_stop_fail self<block_start><with_stmt>self.stdout FakeMotor('SBO')<block_start>self.client.onecmd('stop')<line_sep>self.assertRegexpMatches(self.stdout.Read() '(?s).*Not running.*')<block_end><block_end>@unittest.skipIf(socket.gethostname().startswith('jenkins-') 'This test is flaky when run on GCE.')<def_stmt>test_do_stop_succeed self<block_start><with_stmt>self.stdout FakeMotor('SBO')<as>motor<block_start>self.client.onecmd('set_targets SBO')<line_sep>self.client.onecmd('arm')<line_sep>self.client.onecmd('run 100 s')<line_sep>self.assert_eventually_true(<lambda>:motor.running)<line_sep>self.client.onecmd('stop')<line_sep>self.assertRegexpMatches(self.stdout.Read() '(?s).*Run stopped.*')<line_sep>self.assert_eventually_true(<lambda>:<not>motor.running)<block_end><block_end>@unittest.skipIf(socket.gethostname().startswith('jenkins-') 'This test is flaky when run on GCE.')<def_stmt>test_do_set_torque self<block_start><with_stmt>self.stdout FakeMotor('SBI') FakeMotor('SBO')<as>motor<block_start>self.client.onecmd('set_targets SBI')<line_sep>self.client.onecmd('set_targets_dyno SBO')<line_sep>self.client.onecmd('set_speed_limits -3.14 3.14')<line_sep>self.client.onecmd('set_torque 3.14')<line_sep>self.assertRegexpMatches(self.stdout.Read() '(?s).*Torque desired.*')<line_sep>self.assert_eventually_true(<lambda>:abs(motor.torque-3.14)/3.14<l>_EPS32)<line_sep>self.assert_eventually_true(<lambda>:abs(motor.speed_lower+3.14)/3.14<l>_EPS32)<line_sep>self.assert_eventually_true(<lambda>:abs(motor.speed_upper-3.14)/3.14<l>_EPS32)<block_end><block_end><def_stmt>test_do_set_torque_fail self<block_start><with_stmt>self.stdout FakeMotor('SBO')<block_start>self.client.onecmd('set_torque 3.14')<line_sep>self.assertRegexpMatches(self.stdout.Read() 'No dynos selected. Use "set_targets_dyno".')<line_sep>self.client.onecmd('set_targets_dyno SBO')<line_sep>self.client.onecmd('set_torque abc')<line_sep>self.assertRegexpMatches(self.stdout.Read() '(?s).*Invalid argument.*')<line_sep>self.client.onecmd('set_torque 3.14')<line_sep>self.assertRegexpMatches(self.stdout.Read() 'Omega limits not set. Use "set_speed_limits".')<block_end><block_end>@unittest.skipIf(socket.gethostname().startswith('jenkins-') 'This test is flaky when run on GCE.')<def_stmt>test_do_set_speed_limits self<block_start><with_stmt>self.stdout FakeMotor('SBO')<as>motor<block_start>self.client.onecmd('set_targets_dyno SBO')<line_sep>self.client.onecmd('set_speed_limits -3.14 3.14')<line_sep>self.assertRegexpMatches(self.stdout.Read() '(?s).*Omega limits set.*')<line_sep>self.assert_eventually_true(<lambda>:abs(motor.speed_lower+3.14)/3.14<l>_EPS32)<line_sep>self.assert_eventually_true(<lambda>:abs(motor.speed_upper-3.14)/3.14<l>_EPS32)<block_end><block_end><def_stmt>test_do_set_speed_limits_fail self<block_start><with_stmt>self.stdout FakeMotor('SBO')<block_start>self.client.onecmd('set_targets_dyno SBO')<line_sep>self.client.onecmd('set_speed_limits abc 20')<line_sep>self.assertRegexpMatches(self.stdout.Read() '(?s).*Invalid argument.*')<line_sep>self.client.onecmd('set_speed_limits 22 20')<line_sep>self.assertRegexpMatches(self.stdout.Read() '(?s).*Invalid(?s).*i.e. min value.*')<block_end><block_end>@unittest.skipIf(socket.gethostname().startswith('jenkins-') 'This test is flaky when run on GCE.')<def_stmt>test_do_set_omega self<block_start><with_stmt>self.stdout FakeMotor('SBO')<as>motor<block_start>self.client.onecmd('set_targets SBO')<line_sep>self.client.onecmd('set_omega 3.14')<line_sep>self.assertRegexpMatches(self.stdout.Read() 'Omega desired: 3.14')<line_sep>self.assert_eventually_true(<lambda>:abs(motor.speed_lower-motor.speed_upper)/3.14<l>_EPS32)<line_sep>self.assert_eventually_true(<lambda>:abs(motor.speed_lower-3.14)/3.14<l>_EPS32)<line_sep>self.assert_eventually_true(<lambda>:abs(motor.speed_upper-3.14)/3.14<l>_EPS32)<block_end><block_end>@unittest.skipIf(socket.gethostname().startswith('jenkins-') 'This test is flaky when run on GCE.')<def_stmt>test_do_ramp_omega self<block_start><with_stmt>self.stdout FakeMotor('SBO')<as>motor<block_start>self.client.onecmd('set_targets SBO')<line_sep>self.client.onecmd('ramp_omega 3.14 0.0')<line_sep>self.client.onecmd('arm')<line_sep>self.client.onecmd('run 5s')<line_sep>self.assertRegexpMatches(self.stdout.Read() '(?s).*Ramping.*')<line_sep>self.assert_eventually_true(<lambda>:abs(motor.speed_lower-3.14)/3.14<l>_EPS32)<line_sep>self.assert_eventually_true(<lambda>:abs(motor.speed_upper-3.14)/3.14<l>_EPS32)<line_sep>self.client.onecmd('ramp_omega 6.28 0.5')<line_sep>self.assertRegexpMatches(self.stdout.Read() '(?s).*Ramping.*')<line_sep>self.assert_eventually_true(<lambda>:abs(motor.speed_lower-6.28)/6.28<l>_EPS32)<line_sep>self.assert_eventually_true(<lambda>:abs(motor.speed_upper-6.28)/6.28<l>_EPS32)<block_end><block_end>@unittest.skipIf(socket.gethostname().startswith('jenkins-') 'This test is flaky when run on GCE.')<def_stmt>test_do_set_command_function_python_succeed self<block_start><with_stmt>tempfile.NamedTemporaryFile(suffix='.py')<as>python_file<block_start>data_length=3<times>len(motor_client.MOTORS)<line_sep>python_file.write(textwrap.dedent(""" t_step = 0.1 t_end = 1.0 def Cmd(t): command = [0.0] * %d command[0] = 3.14 command[8] = 3.14 command[16] = 3.14 return command"""%data_length))<line_sep>python_file.flush()<with_stmt>self.stdout FakeMotor('SBO')<as>motor<block_start>self.client.onecmd('set_targets SBO')<line_sep>self.client.onecmd('set_command_function '+python_file.name)<line_sep>self.assertRegexpMatches(self.stdout.Read() r'(?s).*Using %s to generate command '<concat>r'profile.*'%python_file.name)<line_sep>self.client.onecmd('arm')<line_sep>self.client.onecmd('run 10s')<line_sep>self.assert_eventually_true(<lambda>:abs(motor.torque-3.14)/3.14<l>_EPS32)<line_sep>self.assert_eventually_true(<lambda>:abs(motor.speed_lower-3.14)/3.14<l>_EPS32)<line_sep>self.assert_eventually_true(<lambda>:abs(motor.speed_upper-3.14)/3.14<l>_EPS32)<block_end><block_end><block_end><def_stmt>test_do_set_command_function_python_fail self<block_start><with_stmt>tempfile.NamedTemporaryFile(suffix='.py')<as>python_file<block_start>python_file.write('this will raise a syntax error')<line_sep>python_file.flush()<with_stmt>self.stdout<block_start>self.client.onecmd('set_targets SBO')<line_sep>self.client.onecmd('set_command_function '+python_file.name)<line_sep>self.assertRegexpMatches(self.stdout.Read() '(?s).*Generation of lookup table from %s '<concat>'failed.*'%python_file.name)<block_end><block_end><block_end>@unittest.skipIf(socket.gethostname().startswith('jenkins-') 'This test is flaky when run on GCE.')<def_stmt>test_do_set_command_function_text_succeed self<block_start><with_stmt>tempfile.NamedTemporaryFile(suffix='.txt')<as>text_file<block_start>text_file.write(textwrap.dedent(""" 0.0 3.14 1 1 1 1 1 1 1 3.14 1 1 1 1 1 1 1 3.14 1 1 1 1 1 1 1 100 3.14 1 1 1 1 1 1 1 3.14 1 1 1 1 1 1 1 3.14 1 1 1 1 1 1 1"""[1:]))<line_sep>text_file.flush()<with_stmt>self.stdout FakeMotor('SBO')<as>motor<block_start>self.client.onecmd('set_targets SBO')<line_sep>self.client.onecmd('set_command_function '+text_file.name)<line_sep>self.assertRegexpMatches(self.stdout.Read() r'(?s).*Using interpolated values from %s '<concat>r'for command profile.*'%text_file.name)<line_sep>self.client.onecmd('arm')<line_sep>self.client.onecmd('run 10s')<line_sep>self.assert_eventually_true(<lambda>:abs(motor.torque-3.14)/3.14<l>_EPS32)<block_end><block_end><block_end><def_stmt>test_do_set_command_function_text_fail self<block_start><with_stmt>tempfile.NamedTemporaryFile(suffix='.txt')<as>text_file<block_start>text_file.write('numpy.load will raise ValueError')<line_sep>text_file.flush()<with_stmt>self.stdout<block_start>self.client.onecmd('set_targets SBO')<line_sep>self.client.onecmd('set_command_function '+text_file.name)<line_sep>self.assertRegexpMatches(self.stdout.Read() '(?s).*Invalid input text file: %s.*'%text_file.name)<block_end><block_end><block_end>@unittest.skipIf(socket.gethostname().startswith('jenkins-') 'This test is flaky when run on GCE.')<def_stmt>test_do_set_command_function_limit_fail self<block_start><with_stmt>tempfile.NamedTemporaryFile(suffix='.txt')<as>text_file<block_start>text_file.write(textwrap.dedent(""" 0.0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 100 2000 1 1 1 1 1 1 1 2000 1 1 1 1 1 1 1 2000 1 1 1 1 1 1 1"""[1:]))<line_sep>text_file.flush()<with_stmt>self.stdout FakeMotor('SBO')<as>motor<block_start>self.client.onecmd('set_command_function '+text_file.name)<line_sep>self.assertRegexpMatches(self.stdout.Read() r'(?s).*Extreme(?s).*outside of '<concat>r'limits \[%f, %f\] detected.*'%(motor_client.TORQUE_MIN_LIMIT motor_client.TORQUE_MAX_LIMIT))<line_sep>self.client.onecmd('arm')<line_sep>self.client.onecmd('run 1s')<line_sep>self.assert_eventually_true(<lambda>:abs(motor.torque)<l>_EPS32)<block_end><block_end><block_end>@unittest.skipIf(socket.gethostname().startswith('jenkins-') 'This test is flaky when run on GCE.')<def_stmt>test_print_new_errors_asynchronously self<block_start>regex=re.compile('(?s).*SBO: kMotorErrorOverSpeed'<concat>' | kMotorErrorOverVoltage.*')<with_stmt>FakeMotor('SBO')<as>motor<block_start>motor.SetError(flags.kMotorErrorOverVoltage|flags.kMotorErrorOverSpeed)<with_stmt>self.stdout<block_start>self.client.onecmd('set_targets SBO')<line_sep>self.assert_eventually_true(<lambda>:regex.match(self.stdout.Read()))<line_sep>other_motors=[m<for>m motor_client.MOTORS<if>m<ne>'SBO']<line_sep># Make sure we only print the motor with an error. self.assertFalse(re.match('(?s).*(%s).*'%'|'.join(other_motors) self.stdout.Read()))<block_end><block_end><block_end>@unittest.skipIf(socket.gethostname().startswith('jenkins-') 'This test is flaky when run on GCE.')<def_stmt>test_print_new_warnings_asynchronously self<block_start>regex=re.compile('(?s).*SBO: kMotorWarningOverTempBoard'<concat>' | kMotorWarningOverTempStatorCore.*')<with_stmt>FakeMotor('SBO')<as>motor<block_start>motor.SetWarning(flags.kMotorWarningOverTempBoard|flags.kMotorWarningOverTempStatorCore)<with_stmt>self.stdout<block_start>self.client.onecmd('set_targets SBO')<line_sep>self.assert_eventually_true(<lambda>:regex.match(self.stdout.Read()))<line_sep>other_motors=[m<for>m motor_client.MOTORS<if>m<ne>'SBO']<line_sep># Make sure we only print the motor with an error. self.assertFalse(re.match('(?s).*(%s).*'%'|'.join(other_motors) self.stdout.Read()))<block_end><block_end><block_end>@unittest.skipIf(socket.gethostname().startswith('jenkins-') 'This test is flaky when run on GCE.')<def_stmt>test_do_get_errors self<block_start>regex=re.compile('(?s).*SBO: kMotorErrorOverSpeed'<concat>' | kMotorErrorOverVoltage.*')<with_stmt>FakeMotor('SBO')<as>motor<block_start>motor.SetError(flags.kMotorErrorOverVoltage|flags.kMotorErrorOverSpeed)<with_stmt>self.stdout<block_start>self.client.onecmd('set_targets SBO')<line_sep>self.assert_eventually_true(<lambda>:regex.match(self.stdout.Read()))<block_end><with_stmt>self.stdout# Reset stdout contents. <block_start>self.client.onecmd('get_errors')<line_sep>self.assertRegexpMatches(self.stdout.Read() regex)<block_end><block_end><block_end>@unittest.skipIf(socket.gethostname().startswith('jenkins-') 'This test is flaky when run on GCE.')<def_stmt>test_do_get_warnings self<block_start>regex=re.compile('(?s).*SBO: kMotorWarningOverTempBoard'<concat>' | kMotorWarningOverTempStatorCore.*')<with_stmt>FakeMotor('SBO')<as>motor<block_start>motor.SetWarning(flags.kMotorWarningOverTempBoard|flags.kMotorWarningOverTempStatorCore)<with_stmt>self.stdout<block_start>self.client.onecmd('set_targets SBO')<line_sep>self.assert_eventually_true(<lambda>:regex.match(self.stdout.Read()))<block_end><with_stmt>self.stdout# Reset stdout contents. <block_start>self.client.onecmd('get_errors')<line_sep>self.assertRegexpMatches(self.stdout.Read() regex)<block_end><block_end><block_end>@unittest.skipIf(socket.gethostname().startswith('jenkins-') 'This test is flaky when run on GCE.')<def_stmt>test_do_clear_errors self<block_start>regex=re.compile('(?s).*SBO: kMotorErrorOverVoltage.*')<with_stmt>self.stdout FakeMotor('SBO')<as>motor<block_start>motor.SetError(flags.kMotorErrorOverVoltage)<line_sep>self.client.onecmd('set_targets SBO')<line_sep>self.assert_eventually_true(<lambda>:regex.match(self.stdout.Read()))<line_sep>self.client.onecmd('clear_errors')<line_sep>self.assert_eventually_true(<lambda>:motor.GetError()<eq>flags.kMotorErrorNone)<block_end><block_end><def_stmt>test_do_source_fail self<block_start><with_stmt>tempfile.NamedTemporaryFile()<as>source_file<block_start>source_file.write(textwrap.dedent(""" set_targets SBO arm run 100 s"""[1:]))<line_sep>source_file.flush()<with_stmt>self.stdout FakeMotor('SBO')<block_start>self.client.onecmd('source '+source_file.name)<line_sep>regex=re.compile('(?s).*Only "set_param"-like commands.*')<line_sep>self.assertRegexpMatches(self.stdout.Read() regex)<block_end><block_end><block_end><def_stmt>test_do_source_succeed self<block_start><with_stmt>tempfile.NamedTemporaryFile()<as>source_file<block_start>source_file.write(textwrap.dedent(""" set_param SBO i_kp 3.14 set_param SBO cos_offset 0.2 # This is a comment. set_param SBO iq_lower_limit -1e-3 set_param SBO iq_upper_limit 245""")[1:])<line_sep>source_file.flush()<with_stmt>self.stdout FakeMotor('SBO')<as>motor<block_start>self.client.onecmd('source '+source_file.name)<line_sep>self.assertAlmostEqual(motor.GetParam('i_kp') 3.14 places=6)<line_sep>self.assertAlmostEqual(motor.GetParam('cos_offset') 0.2 places=6)<line_sep>self.assertAlmostEqual(motor.GetParam('iq_lower_limit') -1e-3 places=6)<line_sep>self.assertAlmostEqual(motor.GetParam('iq_upper_limit') 245 places=6)<block_end><block_end><block_end><def_stmt>test_do_source_track_errors self<block_start><with_stmt>tempfile.NamedTemporaryFile()<as>source_file<block_start>source_file.write(textwrap.dedent(""" set_param SBO i_kp 3.14 set_param SBO foo 0.2 set_param SBO omega_kp 0.1 set_param SBO bar 0.5""")[1:])<line_sep>source_file.flush()<with_stmt>self.stdout FakeMotor('SBO')<block_start>self.client.onecmd('source '+source_file.name)<line_sep>self.assertRegexpMatches(self.stdout.Read() '(?s).*Errors encountered.*Line 2.*foo.*Line 4.*bar.*')<block_end><block_end><block_end>@unittest.skipIf(socket.gethostname().startswith('jenkins-') 'This test is flaky when run on GCE.')<def_stmt>test_stop_running_on_error self<block_start>regex=re.compile('(?s).*SBO: kMotorErrorOverVoltage.*')<with_stmt>FakeMotor('SBO')<as>motor<block_start><with_stmt>self.stdout<block_start>self.client.onecmd('set_targets SBO')<line_sep>self.client.onecmd('arm')<line_sep>self.client.onecmd('run 1000 s')<line_sep>motor.SetError(flags.kMotorErrorOverVoltage)<line_sep>self.assert_eventually_true(<lambda>:regex.match(self.stdout.Read()))<line_sep>self.assert_eventually_true(<lambda>:<not>motor.running)<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
<import_stmt>os<import_stmt>subprocess<line_sep>os.environ['SMART_OPEN_TEST_MISSING_DEPS']='1'<line_sep>command=['pytest' 'smart_open/tests/test_package.py' '-v' '--cov' 'smart_open' '--cov-report' 'term-missing' ]<line_sep>subprocess.check_call(command)<line_sep>
<import_stmt>os<line_sep>os.environ['TF_CPP_MIN_LOG_LEVEL']='3'<import_stmt>tensorflow<as>tf<import_stmt>warnings<line_sep>warnings.filterwarnings("ignore")<import_stmt>logging<line_sep>logging.getLogger('tensorflow').disabled=<true><import_stmt>numpy<as>np<class_stmt>DKLITE(object)<block_start><def_stmt>__init__ self input_dim output_dim num_hidden=50 num_layers=2 learning_rate=0.001 reg_var=1.0 reg_rec=1.0<block_start>self.num_layers=num_layers<line_sep>self.output_dim=output_dim<line_sep>self.num_hidden=num_hidden<line_sep>self.input_dim=input_dim<line_sep>self.size_z=num_hidden<line_sep>self.ml_primal={}<line_sep>self.ker_inv={}<line_sep>self.params={}<line_sep>self.mean={}<line_sep>self.num={}<line_sep>''' Initialize parameter weight '''<line_sep>self.params=self.initialize_weights()<line_sep>self.mu=tf.reduce_mean(self.T)<line_sep>self.Z_train=self.Encoder(self.X)<line_sep>self.Z_test=self.Encoder(self.X_u)<line_sep>self.loss_1=tf.reduce_mean(tf.reduce_sum(tf.square(self.X-self.Decoder(self.Z_train)) axis=1))<line_sep>Z_0=tf.gather(self.Z_train tf.where(self.T<l>0.5)[: 0])<line_sep>Y_0=tf.gather(self.Y tf.where(self.T<l>0.5)[: 0])<line_sep>Z_1=tf.gather(self.Z_train tf.where(self.T<g>0.5)[: 0])<line_sep>Y_1=tf.gather(self.Y tf.where(self.T<g>0.5)[: 0])<line_sep>mean_0=tf.reduce_mean(Y_0)<line_sep>mean_1=tf.reduce_mean(Y_1)<line_sep>Y_0=(Y_0-mean_0)<line_sep>Y_1=(Y_1-mean_1)<line_sep>self.GP_NN(Y_0 Z_0 0)<line_sep>self.GP_NN(Y_1 Z_1 1)<line_sep>self.var_0=tf.reduce_mean(tf.diag_part(tf.matmul(Z_1 tf.matmul(self.ker_inv['0'] tf.transpose(Z_1)))))<line_sep>self.var_1=tf.reduce_mean(tf.diag_part(tf.matmul(Z_0 tf.matmul(self.ker_inv['1'] tf.transpose(Z_0)))))<line_sep>self.ele_var_0_tr=tf.diag_part(tf.matmul(self.Z_train tf.matmul(self.ker_inv['0'] tf.transpose(self.Z_train))))<line_sep>self.ele_var_1_tr=tf.diag_part(tf.matmul(self.Z_train tf.matmul(self.ker_inv['1'] tf.transpose(self.Z_train))))<line_sep>self.ele_var_0_te=tf.diag_part(tf.matmul(self.Z_test tf.matmul(self.ker_inv['0'] tf.transpose(self.Z_test))))<line_sep>self.ele_var_1_te=tf.diag_part(tf.matmul(self.Z_test tf.matmul(self.ker_inv['1'] tf.transpose(self.Z_test))))<line_sep>pred_tr_0=tf.matmul(self.Z_train self.mean['0'])+mean_0<line_sep>pred_tr_1=tf.matmul(self.Z_train self.mean['1'])+mean_1<line_sep>pred_te_0=tf.matmul(self.Z_test self.mean['0'])+mean_0<line_sep>pred_te_1=tf.matmul(self.Z_test self.mean['1'])+mean_1<line_sep>self.Y_train=tf.concat([pred_tr_0 pred_tr_1] axis=1)<line_sep>self.Y_test=tf.concat([pred_te_0 pred_te_1] axis=1)<line_sep>self.loss_0=self.ml_primal['0']+self.ml_primal['1']<line_sep>self.prediction_loss=self.ml_primal['0']+self.ml_primal['1']+reg_var<times>(self.var_0+self.var_1)+reg_rec<times>self.loss_1<line_sep>self.optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.prediction_loss)<line_sep>self.sess=tf.Session()<line_sep>self.sess.run(tf.global_variables_initializer())<block_end><def_stmt>element_var self X Y T X_u<block_start>var_0_tr,var_1_tr,var_0_te,var_1_te=self.sess.run([self.ele_var_0_tr self.ele_var_1_tr self.ele_var_0_te self.ele_var_1_te] feed_dict={self.X:X self.X_u:X_u self.Y:Y self.T:T})<line_sep><return>var_0_tr var_1_tr var_0_te var_1_te<block_end><def_stmt>embed self X Y T<block_start>Z=self.sess.run(self.Z_train feed_dict={self.X:X self.Y:Y self.T:T})<line_sep><return>Z<block_end><def_stmt>fit self X Y T num_iteration<block_start>loss_list=[]<for_stmt>i range(num_iteration)<block_start>loss,_=self.sess.run([self.prediction_loss self.optimizer] feed_dict={self.X:X self.Y:Y self.T:T})<line_sep>loss_list.append(np.sum(loss))<line_sep>diff_list=np.abs(np.diff(loss_list))<if_stmt>i<g>50<and>np.abs(np.mean(diff_list[-10:])-np.mean(diff_list[-40:-10]))<l>np.std(diff_list[-40:-10])<block_start><break><block_end><block_end><block_end><def_stmt>pred self X Y T X_u<block_start>Y_hat_train,Y_hat_test=self.sess.run([self.Y_train self.Y_test] feed_dict={self.X:X self.X_u:X_u self.Y:Y self.T:T})<line_sep><return>Y_hat_train Y_hat_test<block_end><def_stmt>destroy_graph self<block_start>tf.reset_default_graph()<block_end><def_stmt>Encoder self X<block_start>X_h=tf.nn.elu(tf.matmul(X self.params['e_w_in'])+self.params['e_b_in'])<for_stmt>layer_i range(self.num_layers)<block_start>X_h=tf.nn.elu(tf.matmul(X_h self.params['e_w_'+str(layer_i)])+self.params['e_b_'+str(layer_i)])<block_end>Z=tf.nn.elu(tf.matmul(X_h self.params['e_w_'+str(self.num_layers)])+self.params['e_b_'+str(self.num_layers)])<line_sep><return>Z<block_end><def_stmt>Decoder self Z<block_start>Z_pred=tf.nn.elu(tf.matmul(Z self.params['d_w_in'])+self.params['d_b_in'])<for_stmt>layer_i range(self.num_layers)<block_start>Z_pred=tf.nn.elu(tf.matmul(Z_pred self.params['d_w_'+str(layer_i)])+self.params['d_b_'+str(layer_i)])<block_end>X_p=tf.matmul(Z_pred self.params['d_w_'+str(self.num_layers)]+self.params['d_b_'+str(self.num_layers)])<line_sep><return>X_p<block_end><def_stmt>GP_NN self Y_f Z_f index<block_start>beta=tf.ones([1 1] tf.float32)<line_sep>lam=1000<times>tf.ones([1 1] tf.float32)<line_sep>r=beta/lam<line_sep>self.DD=tf.shape(Z_f)[1]<line_sep>phi_phi=tf.matmul(tf.transpose(Z_f) Z_f)<line_sep>Ker=r<times>phi_phi+tf.eye(tf.shape(Z_f)[1] dtype=tf.float32)<line_sep>L_matrix=tf.cholesky(Ker)<line_sep>L_inv_reduce=tf.linalg.triangular_solve(L_matrix rhs=tf.eye(self.DD dtype=tf.float32))<line_sep>L_y=tf.matmul(L_inv_reduce tf.matmul(tf.transpose(Z_f) Y_f))<line_sep>self.ker_inv[str(index)]=tf.matmul(tf.transpose(L_inv_reduce) L_inv_reduce)/lam<line_sep>self.mean[str(index)]=r<times>tf.matmul(tf.transpose(L_inv_reduce) L_y)<line_sep>term1=-tf.reduce_mean(tf.square(L_y))<line_sep>#term2 = tf.log(tf.linalg.diag_part(L_matrix)) / ((1-index)*tf.reduce_sum(1 - self.T) + (index)* tf.reduce_sum(self.T)) self.ml_primal[str(index)]=term1<block_end>#+ term2 <def_stmt>initialize_weights self<block_start>self.X=tf.placeholder(tf.float32 [<none> self.input_dim])<line_sep>self.X_u=tf.placeholder(tf.float32 [<none> self.input_dim])<line_sep>self.Y=tf.placeholder(tf.float32 [<none> 1])<line_sep>self.T=tf.placeholder(tf.float32 [<none> 1])<line_sep>all_weights={}<line_sep>''' Input layer of the encoder '''<line_sep>name_wi='e_w_in'<line_sep>all_weights[name_wi]=tf.get_variable(name=name_wi shape=[self.input_dim self.num_hidden] trainable=<true>)<line_sep>name_bi='e_b_in'<line_sep>all_weights[name_bi]=tf.get_variable(name=name_bi shape=[self.num_hidden] trainable=<true>)<line_sep>''' Hidden layer of the encoder '''<for_stmt>layer_i range(self.num_layers)<block_start>name_wi='e_w_'+str(layer_i)<line_sep>all_weights[name_wi]=tf.get_variable(name=name_wi shape=[self.num_hidden self.num_hidden] trainable=<true>)<line_sep>name_bi='e_b_'+str(layer_i)<line_sep>all_weights[name_bi]=tf.get_variable(name=name_bi shape=[self.num_hidden] trainable=<true>)<block_end>''' Final layer of the encoder '''<line_sep>name_wi='e_w_'+str(self.num_layers)<line_sep>all_weights[name_wi]=tf.get_variable(name=name_wi shape=[self.num_hidden self.size_z] trainable=<true>)<line_sep>name_bi='e_b_'+str(self.num_layers)<line_sep>all_weights[name_bi]=tf.get_variable(name=name_bi shape=[self.size_z] trainable=<true>)<line_sep>name_wi='e_w_out_0'<line_sep>all_weights[name_wi]=tf.get_variable(name=name_wi shape=[self.size_z self.output_dim] trainable=<true>)<line_sep>name_bi='e_b_out_0'<line_sep>all_weights[name_bi]=tf.get_variable(name=name_bi shape=[self.output_dim] trainable=<true>)<line_sep>name_wi='e_w_out_1'<line_sep>all_weights[name_wi]=tf.get_variable(name=name_wi shape=[self.size_z self.output_dim] trainable=<true>)<line_sep>name_bi='e_b_out_1'<line_sep>all_weights[name_bi]=tf.get_variable(name=name_bi shape=[self.output_dim] trainable=<true>)<line_sep>''' Input layer of the decoder '''<line_sep>name_wi='d_w_in'<line_sep>all_weights[name_wi]=tf.get_variable(name=name_wi shape=[self.size_z self.num_hidden] trainable=<true>)<line_sep>name_bi='d_b_in'<line_sep>all_weights[name_bi]=tf.get_variable(name=name_bi shape=[self.num_hidden] trainable=<true>)<line_sep>''' Hidden layer of the decoder '''<for_stmt>layer_i range(self.num_layers)<block_start>name_wi='d_w_'+str(layer_i)<line_sep>all_weights[name_wi]=tf.get_variable(name=name_wi shape=[self.num_hidden self.num_hidden] trainable=<true>)<line_sep>name_bi='d_b_'+str(layer_i)<line_sep>all_weights[name_bi]=tf.get_variable(name=name_bi shape=[self.num_hidden] trainable=<true>)<block_end>''' Final layer of the decoder '''<line_sep>name_wi='d_w_'+str(self.num_layers)<line_sep>all_weights[name_wi]=tf.get_variable(name=name_wi shape=[self.num_hidden self.input_dim] trainable=<true>)<line_sep>name_bi='d_b_'+str(self.num_layers)<line_sep>all_weights[name_bi]=tf.get_variable(name=name_bi shape=[(self.input_dim)] trainable=<true>)<line_sep><return>all_weights<block_end><block_end>
''' This utility is for Custom Exceptions. a) Stop_Test_Exception You can raise a generic exceptions using just a string. This is particularly useful when you want to end a test midway based on some condition. '''<class_stmt>Stop_Test_Exception(Exception)<block_start><def_stmt>__init__ self message<block_start>self.message=message<block_end><def_stmt>__str__ self<block_start><return>self.message<block_end><block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<import_from_stmt>PhysicsTools.PatAlgos.recoLayer0.jetCorrFactors_cfi *<import_from_stmt>JetMETCorrections.Configuration.JetCorrectionServicesAllAlgos_cff *<line_sep>## for scheduled mode patJetCorrectionsTask=cms.Task(patJetCorrFactors)<line_sep>patJetCorrections=cms.Sequence(patJetCorrectionsTask)<line_sep>
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>mergedtruth=cms.EDProducer("TrackingTruthProducer" mixLabel=cms.string('mix') simHitLabel=cms.string('g4SimHits') volumeRadius=cms.double(1200.0) vertexDistanceCut=cms.double(0.003) volumeZ=cms.double(3000.0) mergedBremsstrahlung=cms.bool(<true>) removeDeadModules=cms.bool(<false>) HepMCDataLabels=cms.vstring('generatorSmeared' 'generator' 'PythiaSource' 'source') useMultipleHepMCLabels=cms.bool(<false>) simHitCollections=cms.PSet(pixel=cms.vstring('g4SimHitsTrackerHitsPixelBarrelLowTof' 'g4SimHitsTrackerHitsPixelBarrelHighTof' 'g4SimHitsTrackerHitsPixelEndcapLowTof' 'g4SimHitsTrackerHitsPixelEndcapHighTof') tracker=cms.vstring('g4SimHitsTrackerHitsTIBLowTof' 'g4SimHitsTrackerHitsTIBHighTof' 'g4SimHitsTrackerHitsTIDLowTof' 'g4SimHitsTrackerHitsTIDHighTof' 'g4SimHitsTrackerHitsTOBLowTof' 'g4SimHitsTrackerHitsTOBHighTof' 'g4SimHitsTrackerHitsTECLowTof' 'g4SimHitsTrackerHitsTECHighTof') muon=cms.vstring('g4SimHitsMuonDTHits' 'g4SimHitsMuonCSCHits' 'g4SimHitsMuonRPCHits')))<line_sep>trackingParticles=cms.Sequence(mergedtruth)<line_sep>
""" The ``ui.NamedFrame`` class is a variation of the ``ui.Frame`` which lets you assign a name to the frame. Naming a frame allows you to refer to that frame by name in Javascript code, and as the target for a hyperlink. """<import_from_stmt>pyjamas.ui.SimplePanel SimplePanel<import_from_stmt>pyjamas.ui.VerticalPanel VerticalPanel<import_from_stmt>pyjamas.ui.NamedFrame NamedFrame<import_from_stmt>pyjamas.ui.HTML HTML<class_stmt>NamedFrameDemo(SimplePanel)<block_start><def_stmt>__init__ self<block_start>SimplePanel.__init__(self)<line_sep>vPanel=VerticalPanel(Spacing=5)<line_sep>frame=NamedFrame("myFrame" Width="100%" Height="200px")<line_sep>vPanel.add(frame)<line_sep>vPanel.add(HTML('<a href="http://google.com" target="myFrame">Google</a>'))<line_sep>vPanel.add(HTML('<a href="http://yahoo.com" target="myFrame">Yahoo</a>'))<line_sep>vPanel.add(HTML('<a href="http://pyjs.org" target="myFrame">Pyjamas</a>'))<line_sep>self.add(vPanel)<block_end><block_end>
"""Specify the jobs to run via config file. Binomial bridge bandit experiment. Binomial bridge with only binary reward at the end --> no conjugate update. See Figure 9 https://arxiv.org/pdf/1707.02038.pdf """<import_stmt>collections<import_stmt>functools<import_from_stmt>base.config_lib Config<import_from_stmt>base.experiment ExperimentNoAction<import_from_stmt>graph.agent_indep_binary BootstrapIndependentBBWithBinaryReward<import_from_stmt>graph.agent_indep_binary LaplaceIndependentBBWithBinaryReward<import_from_stmt>graph.agent_indep_binary StochasticLangevinMCMCIndependentBBWithBinaryReward<import_from_stmt>graph.agent_indep_binary EpsilonGreedyIndependentBBWithBinaryReward<import_from_stmt>graph.env_graph_bandit IndependentBinomialBridgeWithBinaryReward<def_stmt>get_config <block_start>"""Generates the config for the experiment."""<line_sep>name='graph_indep_binary_new'<line_sep>n_stages=20<line_sep>shape=2<line_sep>scale=0.5<line_sep>tol=0.001<line_sep>alpha=0.2<line_sep>beta=0.5<line_sep>langevin_batch_size=100<line_sep>langevin_step_count=200<line_sep>langevin_step_size=0.0005<line_sep>epsilon=0<line_sep>agents=collections.OrderedDict([('Langevin TS' functools.partial(EpsilonGreedyIndependentBBWithBinaryReward n_stages epsilon shape scale tol alpha beta))])<line_sep># agents = collections.OrderedDict( # [('Langevin TS', # functools.partial(StochasticLangevinMCMCIndependentBBWithBinaryReward, # n_stages, shape, scale, tol, alpha, beta, langevin_batch_size, # langevin_step_count, langevin_step_size)), # ('bootstrap TS', # functools.partial(BootstrapIndependentBBWithBinaryReward, # n_stages, shape, scale, tol, alpha, beta)), # ('Laplace TS', # functools.partial(LaplaceIndependentBBWithBinaryReward, # n_stages, shape, scale, tol, alpha, beta))] # ) environments=collections.OrderedDict([('env' functools.partial(IndependentBinomialBridgeWithBinaryReward n_stages shape scale))])<line_sep>experiments=collections.OrderedDict([(name ExperimentNoAction)])<line_sep>n_steps=500<line_sep>n_seeds=1000<line_sep>config=Config(name agents environments experiments n_steps n_seeds)<line_sep><return>config<block_end>
# -*- coding: utf-8 -*- """ @File : modules.py @Time : 2020/4/18 上午8:28 @Author : yizuotian @Description : """<import_from_stmt>typing List<import_from_stmt>activations *<import_from_stmt>layers fc_forward fc_backward global_avg_pooling_forward flatten_forward flatten_backward<import_from_stmt>layers_v2 conv_forward conv_backward max_pooling_forward max_pooling_backward global_avg_pooling_backward<import_from_stmt>losses *<import_from_stmt>optimizers *<line_sep># pyximport.install() # from clayers import * <class_stmt>BaseModule(object)<block_start><def_stmt>__init__ self name=''<block_start>""" :param name: 层名 """<line_sep>self.name=name<line_sep>self.weights=dict()# 权重参数字典 self.gradients=dict()# 梯度字典 self.in_features=<none><block_end># 输入的feature map <def_stmt>forward self x<block_start><pass><block_end><def_stmt>backward self in_gradient<block_start><pass><block_end><def_stmt>update_gradient self lr<block_start><pass><block_end><def_stmt>load_weights self weights<block_start>""" 加载权重 :param weights: :return: """<for_stmt>key self.weights.keys()<block_start>self.weights[key]=weights[key]<block_end><block_end><block_end><class_stmt>Model(BaseModule)<block_start>""" 网络模型 """<def_stmt>__init__ self layers:List[BaseModule] **kwargs<block_start>super(Model self).__init__(**kwargs)<line_sep>self.layers=layers<line_sep># 收集所有权重和梯度 <for_stmt>l self.layers<block_start>self.weights.update(l.weights)<line_sep>self.gradients.update(l.gradients)<block_end><block_end><def_stmt>forward self x<block_start><for_stmt>l self.layers<block_start>x=l.forward(x)<line_sep># print('forward layer:{},feature:{}'.format(l.name, np.max(x))) <block_end># 网络结果返回 <return>x<block_end><def_stmt>backward self in_gradient# 反向传播 <block_start><for_stmt>l self.layers[::-1]<block_start>in_gradient=l.backward(in_gradient)<line_sep># print('backward layer:{},gradient:{}'.format(l.name, np.max(in_gradient))) <block_end><block_end><def_stmt>update_gradient self lr<block_start><for_stmt>l self.layers<block_start>l.update_gradient(lr)<block_end><block_end><def_stmt>load_weights self weights<block_start>""" 加载模型权重 :param weights: :return: """<line_sep># 逐层加载权重 <for_stmt>l self.layers<block_start>l.load_weights(weights)<block_end><block_end><block_end><class_stmt>Linear(BaseModule)<block_start>""" 全连接层 """<def_stmt>__init__ self in_units out_units **kwargs<block_start>""" :param in_units: 输入神经元数 :param out_units: 输出神经元数 """<line_sep>super(Linear self).__init__(**kwargs)<line_sep># 权重参数 weight=np.random.randn(in_units out_units)<times>np.sqrt(2/in_units)<line_sep>bias=np.zeros(out_units)<line_sep># 权重对应的梯度 g_weight=np.zeros_like(weight)<line_sep>g_bias=np.zeros_like(bias)<line_sep># 权重和梯度的字典 self.weights={"{}_weight".format(self.name):weight "{}_bias".format(self.name):bias}<line_sep>self.gradients={"{}_weight".format(self.name):g_weight "{}_bias".format(self.name):g_bias}<block_end>@property<def_stmt>weight self<block_start><return>self.weights["{}_weight".format(self.name)]<block_end>@property<def_stmt>bias self<block_start><return>self.weights["{}_bias".format(self.name)]<block_end><def_stmt>set_gradient self name gradient<block_start>""" 更新梯度 :param name: weight 或 bias 中一个 :param gradient: :return: """<line_sep>self.gradients["{}_{}".format(self.name name)]=gradient<block_end><def_stmt>forward self x<block_start>""" :param x: [B,in_units] :return output: [B,out_units] """<line_sep>self.in_features=x<line_sep>output=fc_forward(x self.weight self.bias)<line_sep><return>output<block_end><def_stmt>backward self in_gradient<block_start>""" 梯度反向传播 :param in_gradient: 后一层传递过来的梯度,[B,out_units] :return out_gradient: 传递给前一层的梯度,[B,in_units] """<line_sep>g_weight,g_bias,out_gradient=fc_backward(in_gradient self.weight self.in_features)<line_sep>self.set_gradient('weight' g_weight)<line_sep>self.set_gradient('bias' g_bias)<line_sep><return>out_gradient<block_end><def_stmt>update_gradient self lr<block_start>""" 更新梯度 :param lr: :return: """<line_sep>self.weight<augsub>self.g_weight<times>lr<line_sep>self.bias<augsub>self.g_bias<times>lr<block_end><block_end><class_stmt>Conv2D(BaseModule)<block_start>""" 2D卷积层 """<def_stmt>__init__ self in_filters out_filters kernel=(3 3) padding=(1 1) stride=(1 1) **kwargs<block_start>super(Conv2D self).__init__(**kwargs)<line_sep>self.in_filters=in_filters<line_sep>self.out_filters=out_filters<line_sep>self.kernel=kernel<line_sep>self.padding=padding<line_sep>self.stride=stride<line_sep># 权重参数 fan_in=in_filters<times>kernel[0]<times>kernel[1]# 输入参数量 fan_out=out_filters<times>kernel[0]<times>kernel[1]# 输入参数量 weight=np.random.randn(in_filters out_filters *kernel)<times>np.sqrt(2/(fan_in+fan_out))<line_sep>bias=np.zeros(out_filters)<line_sep># 梯度 g_weight=np.zeros_like(weight)<line_sep>g_bias=np.zeros_like(bias)<line_sep># 权重和梯度的字典 self.weights={"{}_weight".format(self.name):weight "{}_bias".format(self.name):bias}<line_sep>self.gradients={"{}_weight".format(self.name):g_weight "{}_bias".format(self.name):g_bias}<block_end>@property<def_stmt>weight self<block_start><return>self.weights["{}_weight".format(self.name)]<block_end>@property<def_stmt>bias self<block_start><return>self.weights["{}_bias".format(self.name)]<block_end><def_stmt>set_gradient self name gradient<block_start>""" 更新梯度 :param name: weight 或 bias 中一个 :param gradient: :return: """<line_sep>self.gradients["{}_{}".format(self.name name)]=gradient<block_end><def_stmt>forward self x<block_start>""" :param x: [B,in_filters,H,W] :return output: [B,out_filters,H,W] """<line_sep>self.in_features=x<line_sep>output=conv_forward(x self.weight self.bias self.padding self.stride)<line_sep><return>output<block_end><def_stmt>backward self in_gradient<block_start>""" :param in_gradient: 后一层传递过来的梯度,[B,out_filters,H,W] :return out_gradient: 传递给前一层的梯度,[B,in_filters,H,W] """<line_sep>g_weight,g_bias,out_gradient=conv_backward(in_gradient self.weight self.in_features self.padding self.stride)<line_sep>self.set_gradient('weight' g_weight)<line_sep>self.set_gradient('bias' g_bias)<line_sep><return>out_gradient<block_end><def_stmt>update_gradient self lr<block_start>self.weight<augsub>self.g_weight<times>lr<line_sep>self.bias<augsub>self.g_bias<times>lr<block_end><block_end><class_stmt>ReLU(BaseModule)<block_start><def_stmt>__init__ self **kwargs<block_start>super(ReLU self).__init__(**kwargs)<block_end><def_stmt>forward self x<block_start>self.in_features=x<line_sep><return>relu_forward(x)<block_end><def_stmt>backward self in_gradient<block_start>""" :param in_gradient: 后一层传递过来的梯度 :return out_gradient: 传递给前一层的梯度 """<line_sep>out_gradient=relu_backward(in_gradient self.in_features)<line_sep><return>out_gradient<block_end><block_end><class_stmt>MaxPooling2D(BaseModule)<block_start>""" 最大池化层 """<def_stmt>__init__ self kernel=(2 2) stride=(2 2) padding=(0 0) **kwargs<block_start>""" :param kernel: 池化尺寸 :param stride: 步长 :param padding: padding :param kwargs: """<line_sep>super(MaxPooling2D self).__init__(**kwargs)<line_sep>self.kernel=kernel<line_sep>self.stride=stride<line_sep>self.padding=padding<block_end><def_stmt>forward self x<block_start>""" :param x: [B,C,H,W] :return output : [B,C,H',W'] """<line_sep>self.in_features=x<line_sep>output=max_pooling_forward(x self.kernel self.stride self.padding)<line_sep><return>output<block_end><def_stmt>backward self in_gradient<block_start>""" :param in_gradient: 后一层传递过来的梯度 :return out_gradient: 传递给前一层的梯度 """<line_sep>out_gradient=max_pooling_backward(in_gradient self.in_features self.kernel self.stride self.padding)<line_sep><return>out_gradient<block_end><block_end><class_stmt>GlobalAvgPooling2D(BaseModule)<block_start>""" 全局平均池化 """<def_stmt>__init__ self **kwargs<block_start>super(GlobalAvgPooling2D self).__init__(**kwargs)<block_end><def_stmt>forward self x<block_start>""" :param x: [B,C,H,W] :return output : [B,C,H',W'] """<line_sep>self.in_features=x<line_sep>output=global_avg_pooling_forward(x)<line_sep><return>output<block_end><def_stmt>backward self in_gradient<block_start>""" :param in_gradient: 后一层传递过来的梯度 :return out_gradient: 传递给前一层的梯度 """<line_sep>out_gradient=global_avg_pooling_backward(in_gradient self.in_features)<line_sep><return>out_gradient<block_end><block_end><class_stmt>Flatten(BaseModule)<block_start>""" 打平层 """<def_stmt>__init__ self **kwargs<block_start>super(Flatten self).__init__(**kwargs)<block_end><def_stmt>forward self x<block_start>self.in_features=x<line_sep><return>flatten_forward(x)<block_end><def_stmt>backward self in_gradient<block_start>""" :param in_gradient: 后一层传递过来的梯度 :return out_gradient: 传递给前一层的梯度 """<line_sep>out_gradient=flatten_backward(in_gradient self.in_features)<line_sep><return>out_gradient<block_end><block_end><def_stmt>test_linear # 实际的权重和偏置 <block_start>W=np.array([[3 7 4] [5 2 6]])<line_sep>b=np.array([2 9 3])<line_sep># 产生训练样本 x_data=np.random.randn(500 2)<line_sep>y_data=np.dot(x_data W)+b<def_stmt>next_sample batch_size=1<block_start>idx=np.random.randint(500)<line_sep><return>x_data[idx:idx+batch_size] y_data[idx:idx+batch_size]<block_end>fc_layer=Linear(2 3 name='fc1')<line_sep># fc_layer.weights['fc1_weight'] *= 1e-2 # 单层权重初始化要小 m=Model([fc_layer])<line_sep>sgd=SGD(m.weights lr=1e-3)<line_sep>i=0<line_sep>loss=1<while_stmt>loss<g>1e-15<block_start>x,y_true=next_sample(4)# 获取当前样本 # 前向传播 y=m.forward(x)<line_sep># 反向传播更新梯度 loss,dy=mean_squared_loss(y y_true)<line_sep>m.backward(dy)<line_sep># 更新梯度 sgd.iterate(m)<line_sep># 更新迭代次数 i<augadd>1<if_stmt>i%10000<eq>0<block_start>print("y_pred:{},y_true:{}".format(y y_true))<line_sep>print("\n迭代{}次,当前loss:{}, 当前权重:{},当前偏置{},梯度:{}".format(i loss m.layers[0].weight m.layers[0].bias m.layers[0].gradients))<line_sep># print(m.weights) <block_end><block_end>print('迭代{}次,当前权重:{} '.format(i m.layers[0].weights))<block_end><if_stmt>__name__<eq>'__main__'<block_start>test_linear()<block_end>
<import_from_stmt>typing Sequence<import_from_stmt>visions.backends.python.series_utils sequence_handle_none sequence_not_empty <import_from_stmt>visions.types.string String<line_sep>@String.contains_op.register@sequence_not_empty@sequence_handle_none<def_stmt>string_contains sequence:Sequence state:dict<arrow>bool<block_start><return>all(isinstance(v str)<for>v sequence)<block_end>
# Copyright 2018 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # <import_from_stmt>f5.bigip.tm.sys.disk Logical_Disk<import_stmt>pytest<import_from_stmt>requests HTTPError<class_stmt>TestLogicalDisk(object)<block_start><def_stmt>test_load_refresh self mgmt_root<block_start>d1=mgmt_root.tm.sys.disk.logical_disks.logical_disk.load(name='HD1')<assert_stmt>d1.name<eq>'HD1'<assert_stmt>d1.kind<eq>'tm:sys:disk:logical-disk:logical-diskstate'<assert_stmt>d1.mode<eq>'mixed'<line_sep>d2=mgmt_root.tm.sys.disk.logical_disks.logical_disk.load(name='HD1')<assert_stmt>d2.name<eq>d1.name<assert_stmt>d2.kind<eq>d1.kind<assert_stmt>d2.mode<eq>d1.mode<line_sep>d1.refresh()<assert_stmt>d1.name<eq>d2.name<assert_stmt>d1.kind<eq>d2.kind<assert_stmt>d1.mode<eq>d2.mode<block_end><def_stmt>test_load_no_object self mgmt_root<block_start>rc=mgmt_root.tm.sys.disk.logical_disks<with_stmt>pytest.raises(HTTPError)<as>err<block_start>rc.logical_disk.load(name='not_exists')<block_end><assert_stmt>err.value.response.status_code<eq>404<block_end><def_stmt>test_logical_disks_collection self mgmt_root<block_start>rc=mgmt_root.tm.sys.disk.logical_disks.get_collection()<assert_stmt>isinstance(rc list)<assert_stmt>len(rc)<assert_stmt>isinstance(rc[0] Logical_Disk)<block_end><block_end>
<import_from_stmt>torch nn<import_from_stmt>torch.nn.init xavier_uniform_<import_from_stmt>mylib.torch.nn.init zeros_initializer<class_stmt>Dense(nn.Linear)<block_start>r"""Fully connected linear layer with activation function. .. math:: y = activation(xW^T + b) Args: in_features (int): number of input feature :math:`x`. out_features (int): number of output features :math:`y`. bias (bool, optional): if False, the layer will not adapt bias :math:`b`. activation (callable, optional): if None, no activation function is used. weight_init (callable, optional): weight initializer from current weight. bias_init (callable, optional): bias initializer from current bias. """<def_stmt>__init__ self in_features out_features bias=<true> activation=<none> weight_init=xavier_uniform_ # weight_init=xavier_normal_, bias_init=zeros_initializer <block_start>self.weight_init=weight_init<line_sep>self.bias_init=bias_init<line_sep>self.activation=activation<line_sep># initialize linear layer y = xW^T + b super(Dense self).__init__(in_features out_features bias)<block_end><def_stmt>reset_parameters self<block_start>"""Reinitialize models weight and bias values."""<line_sep>self.weight_init(self.weight)<if_stmt>self.bias<is><not><none><block_start>self.bias_init(self.bias)<block_end><block_end><def_stmt>forward self inputs<block_start>"""Compute layer output. Args: inputs (dict of torch.Tensor): batch of input values. Returns: torch.Tensor: layer output. """<line_sep># compute linear layer y = xW^T + b y=super(Dense self).forward(inputs)<line_sep># add activation function <if_stmt>self.activation<block_start>y=self.activation(y)<block_end><return>y<block_end><block_end>
<import_stmt>requests<import_stmt>ray<import_from_stmt>ray serve<line_sep>serve.start()<line_sep>@serve.deployment<class_stmt>Counter<block_start><def_stmt>__init__ self<block_start>self.count=0<block_end><def_stmt>__call__ self *args<block_start>self.count<augadd>1<line_sep><return>{"count":self.count}<block_end><block_end># Deploy our class. Counter.deploy()<line_sep># Query our endpoint in two different ways: from HTTP and from Python. <assert_stmt>requests.get("http://127.0.0.1:8000/Counter").json()<eq>{"count":1}<assert_stmt>ray.get(Counter.get_handle().remote())<eq>{"count":2}<line_sep>
# dockerpty: test_tty.py. # # Copyright 2014 <NAME> <<EMAIL>> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_stmt>expects expect equal be_none be_true be_false<import_stmt>dockerpty.tty<as>tty<import_stmt>tests.util<as>util<import_stmt>os<import_stmt>pty<import_stmt>termios<import_stmt>tempfile<def_stmt>israw fd<block_start>__,__,__,flags,__,__,__=termios.tcgetattr(fd)<line_sep><return><not>flags&termios.ECHO<block_end><def_stmt>test_size_returns_none_for_non_tty <block_start><with_stmt>tempfile.TemporaryFile()<as>t<block_start>expect(tty.size(t)).to(be_none)<block_end><block_end><def_stmt>test_size_returns_a_tuple_for_a_tty <block_start>fd,__=pty.openpty()<line_sep>fd=os.fdopen(fd)<line_sep>util.set_pty_size(fd (43 120))<line_sep>expect(tty.size(fd)).to(equal((43 120)))<block_end><class_stmt>TestTerminal(object)<block_start><def_stmt>test_start_when_raw self<block_start>fd,__=pty.openpty()<line_sep>terminal=tty.Terminal(os.fdopen(fd) raw=<true>)<line_sep>expect(israw(fd)).to(be_false)<line_sep>terminal.start()<line_sep>expect(israw(fd)).to(be_true)<block_end><def_stmt>test_start_when_not_raw self<block_start>fd,__=pty.openpty()<line_sep>terminal=tty.Terminal(os.fdopen(fd) raw=<false>)<line_sep>expect(israw(fd)).to(be_false)<line_sep>terminal.start()<line_sep>expect(israw(fd)).to(be_false)<block_end><def_stmt>test_stop_when_raw self<block_start>fd,__=pty.openpty()<line_sep>terminal=tty.Terminal(os.fdopen(fd) raw=<true>)<line_sep>terminal.start()<line_sep>terminal.stop()<line_sep>expect(israw(fd)).to(be_false)<block_end><def_stmt>test_raw_with_block self<block_start>fd,__=pty.openpty()<line_sep>fd=os.fdopen(fd)<with_stmt>tty.Terminal(fd raw=<true>)<block_start>expect(israw(fd)).to(be_true)<block_end>expect(israw(fd)).to(be_false)<block_end><def_stmt>test_start_does_not_crash_when_fd_is_not_a_tty self<block_start><with_stmt>tempfile.TemporaryFile()<as>f<block_start>terminal=tty.Terminal(f raw=<true>)<line_sep>terminal.start()<line_sep>terminal.stop()<block_end><block_end><def_stmt>test_repr self<block_start>fd='some_fd'<line_sep>terminal=tty.Terminal(fd raw=<true>)<line_sep>expect(repr(terminal)).to(equal("Terminal(some_fd, raw=True)"))<block_end><block_end>
<import_stmt>pytest<import_stmt>shapely.geometry<import_from_stmt>descarteslabs scenes<import_from_stmt>.. GeoContext<import_from_stmt>descarteslabs.workflows.types.containers Tuple<import_from_stmt>descarteslabs.workflows.types.primitives Int Float<def_stmt>test_from_scenes_wrong_type <block_start><with_stmt>pytest.raises(TypeError match=r"expected a `descarteslabs\.scenes\.GeoContext`")<block_start>GeoContext.from_scenes("foo")<block_end><block_end><def_stmt>test_from_scenes_aoi <block_start>aoi=scenes.AOI(geometry=shapely.geometry.box(-60.0 30.0 -50.0 40.0) resolution=1 crs="EPSG:4326" align_pixels=<false> )<line_sep>ctx=GeoContext.from_scenes(aoi)<assert_stmt>ctx.graft[ctx.graft["returns"]][0]<eq>"wf.GeoContext.create"<line_sep>promoted=GeoContext._promote(aoi)<assert_stmt>promoted.graft[promoted.graft["returns"]][0]<eq>"wf.GeoContext.create"<block_end><def_stmt>test_from_scenes_tile <block_start>tile_dict={"geometry":{"coordinates":[[[-100.10534464886125 59.94175277369993] [-99.91065247366876 59.943240309707676] [-99.91334037259435 60.040922421458546] [-100.10860694364838 60.039429047992876] [-100.10534464886125 59.94175277369993] ]] "type":"Polygon" } "properties":{"cs_code":"EPSG:32614" "geotrans":[438240.0 20.0 0 6656320.0 0 -20.0] "key":"512:16:20.0:14:-6:649" "outputBounds":[438240.0 6645440.0 449120.0 6656320.0] "pad":16 "proj4":"+proj=utm +zone=14 +datum=WGS84 +units=m +no_defs " "resolution":20.0 "ti":-6 "tilesize":512 "tj":649 "zone":14 } "type":"Feature" }<line_sep>tile=scenes.DLTile(tile_dict)<line_sep>ctx=GeoContext.from_scenes(tile)<assert_stmt>ctx.graft[ctx.graft["returns"]][0]<eq>"wf.GeoContext.from_dltile_key"<line_sep>promoted=GeoContext._promote(tile)<assert_stmt>(promoted.graft[promoted.graft["returns"]][0]<eq>"wf.GeoContext.from_dltile_key")<block_end><def_stmt>test_from_scenes_xyztile <block_start>tile=scenes.XYZTile(3 5 4)<line_sep>ctx=GeoContext.from_scenes(tile)<assert_stmt>ctx.graft[ctx.graft["returns"]][0]<eq>"wf.GeoContext.from_xyz_tile"<line_sep>promoted=GeoContext._promote(tile)<assert_stmt>promoted.graft[promoted.graft["returns"]][0]<eq>"wf.GeoContext.from_xyz_tile"<block_end><def_stmt>test_promote_dltile_from_key <block_start>ctx=GeoContext.from_dltile_key("500:0:10.0:13:-17:790")<assert_stmt>GeoContext._promote(ctx)<is>ctx<block_end><def_stmt>test_promote_xyztile_from_xyz <block_start>ctx=GeoContext.from_xyz_tile(3 5 4)<assert_stmt>GeoContext._promote(ctx)<is>ctx<block_end>@pytest.mark.parametrize("attr" ["arr_shape" "gdal_geotrans" "projected_bounds"])<def_stmt>test_readonly_attributes attr<block_start>type_params=GeoContext._type_params[0]<line_sep>ctx=GeoContext.from_xyz_tile(3 5 4)<assert_stmt>isinstance(getattr(ctx attr) type_params[attr])<block_end><def_stmt>test_index_to_coords <block_start>aoi=scenes.AOI(geometry=shapely.geometry.box(-60.0 30.0 -50.0 40.0) resolution=1 crs="EPSG:4326" align_pixels=<false> )<line_sep>ctx=GeoContext.from_scenes(aoi)<line_sep>coords=ctx.index_to_coords(0 0)<assert_stmt>isinstance(coords Tuple[Float Float])<block_end><def_stmt>test_coords_to_index <block_start>aoi=scenes.AOI(geometry=shapely.geometry.box(-60.0 30.0 -50.0 40.0) resolution=1 crs="EPSG:4326" align_pixels=<false> )<line_sep>ctx=GeoContext.from_scenes(aoi)<line_sep>ctx=GeoContext._promote(ctx)<line_sep>index=ctx.coords_to_index(0.0 1.0)<assert_stmt>isinstance(index Tuple[Int Int])<block_end>
""" Misc. functions """<import_from_future_stmt> print_function<import_stmt>numpy<as>np<line_sep># default tolerences RTOL=1.001e-01<line_sep>ATOL=1.001e-01<line_sep>DTOL=5.001e-01<def_stmt>pair_similar dic1 data1 dic2 data2 verb=<false> atol=ATOL rtol=RTOL dtol=DTOL ignore_pipe_display=<false><block_start>""" Check a dic, data pair against a second dic, data pair for differences. Parameters ---------- dic1 : dict First dictionary of NMR parameters. data1 : ndarray First array of NMR data dic2 : dict Second dictionary of NMR parameters data2 : ndarray Second array of NMR data verb : bool, optional Set True for verbose reporting. atol : float, optional The absolute tolerent parameter to pass to numpy.allclose. rtol : float, optional The relative tolenance parameter to pass to numpy.allclose. Returns ------- r1 : bool True is data1 and data2 are similar, False if they differ. r2 : bool True is dic1 and dic2 are similar, False if they differ. """<line_sep>r1=isdatasimilar(data1 data2 verb atol rtol)<line_sep>r2=isdicsimilar(dict(dic1) dict(dic2) verb dtol ignore_pipe_display=ignore_pipe_display)<line_sep><return>r1 r2<block_end><def_stmt>isdatasimilar data1 data2 verb=<false> atol=ATOL rtol=RTOL<block_start>""" Check that two sets of NMR data are equal within a tolerance. Parameters ---------- data1 : ndarray First array of NMR data data2 : ndarray Second array of NMR data verb : bool, optional Set True for verbose reporting. atol : float, optional The absolute tolerent parameter to pass to numpy.allclose. rtol : float, optional The relative tolenance parameter to pass to numpy.allclose. Returns ------- r1 : bool True is data1 and data2 are similar, False if they differ. """<line_sep>r=<true><if_stmt>data1.dtype<ne>data2.dtype<block_start>r=<false><if_stmt>verb<block_start>print("Dtypes do not match:" data1.dtype data2.dtype)<block_end><block_end><if_stmt>data1.shape<ne>data2.shape<block_start>r=<false><if_stmt>verb<block_start>print("Shapes do not match:" data1.shape data2.shape)<block_end><block_end><if_stmt>np.allclose(data1 data2 rtol=rtol atol=atol)<is><false><block_start>r=<false><if_stmt>verb<block_start>print("Data does not match")<block_end><block_end><return>r<block_end><def_stmt>isitemsimilar v1 v2 verb=<false> dtol=DTOL<block_start>""" Compare two values for differences See :py:func:`isdicsimilar` for Parameters. """<line_sep>r=<true><line_sep># type checking <if_stmt>type(v1)<ne>type(v2)<block_start>r=<false><if_stmt>verb<block_start>print("Item has different type" type(v1) type(v2))<block_end><block_end># iterable checking <elif_stmt>isinstance(v1 dict)<block_start>r=r<and>isdicsimilar(v1 v2 verb=verb dtol=dtol)<block_end><elif_stmt>isinstance(v1 list)<block_start>r=r<and>islistsimilar(v1 v2 verb=verb dtol=dtol)<block_end># numeric type <elif_stmt>isinstance(v1 (int float))<block_start><if_stmt>abs(v1-v2)<g>dtol<block_start>r=<false><if_stmt>verb<block_start>print("Key mismatch:" v1 v2)<block_end><block_end><block_end># all other types: just check if equal <else_stmt><block_start><if_stmt>v1<ne>v2<block_start>r=<false><if_stmt>verb<block_start>print("Key mismatch:" v1 v2)<block_end><block_end><block_end><return>r<block_end><def_stmt>isdicsimilar dic1 dic2 verb=<false> dtol=DTOL ignore_pipe_display=<false><block_start>""" Compare two dictionaries for differences Float and int types compared within dtol. Lists and dictionaries are checked recursively all other checked by simple equivalence Parameters ---------- dic1 : dict First dictionary of NMR parameters. dic2 : dict Second dictionary of NMR parameters verb : bool, optional Set True for verbose reporting. dtol : float, optional Maximum allowable difference between int and float elements if dic1 and dic2. Returns ------- r1 : bool True is dic1 and dic2 are similar, False if they differ. """<line_sep># create copies of the two dictionaries dic1=dict(dic1)<line_sep>dic2=dict(dic2)<line_sep># set return value to True r=<true><line_sep># create sets kset1=set(dic1.keys())<line_sep>kset2=set(dic2.keys())<line_sep>dset=set.difference(kset1 kset2)<line_sep>iset=set.intersection(kset1 kset2)<if_stmt>ignore_pipe_display<is><true><block_start>iset.discard('FDMIN')<line_sep>iset.discard('FDMAX')<line_sep>iset.discard('FDDISPMIN')<line_sep>iset.discard('FDDISPMAX')<line_sep>iset.discard('FDSCALEFLAG')<block_end># print out any keys not in both dictionaries <if_stmt>len(dset)<ne>0<block_start>r=<false><if_stmt>verb<block_start>print("Keys not in both dictionaries:" dset)<block_end><block_end># loop over keys in both sets <for_stmt>k iset<block_start>v1,v2=dic1[k] dic2[k]<if_stmt><not>isitemsimilar(v1 v2 verb=verb dtol=dtol)<block_start>print("For key:" k)<line_sep>r=<false><block_end><block_end><return>r<block_end><def_stmt>islistsimilar l1 l2 verb=<false> dtol=DTOL<block_start>""" Compare two lists (or iterable) for differences See :py:func:`isdicsimilar` for Parameters. """<line_sep># set return value to True r=<true><line_sep># print out any keys not in both dictionaries <if_stmt>len(l1)<ne>len(l2)<block_start>r=<false><if_stmt>verb<block_start>print("Lists not of same length:" len(l1) len(l2))<block_end><block_end># loop over keys in both sets <for_stmt>v1,v2 zip(l1 l2)<block_start><if_stmt><not>isitemsimilar(v1 v2 verb=verb dtol=dtol)<block_start>r=<false><block_end><block_end><return>r<block_end>
""" Script for processing the VTB files and turning their trees into the desired tree syntax The VTB original trees are stored in the directory: VietTreebank_VLSP_SP73/Kho ngu lieu 10000 cay cu phap The script requires two arguments: 1. Original directory storing the original trees 2. New directory storing the converted trees """<import_stmt>os<import_stmt>argparse<def_stmt>convert_file org_dir new_dir<block_start>""" :param org_dir: original directory storing original trees :param new_dir: new directory storing formatted constituency trees This function writes new trees to the corresponding files in new_dir """<with_stmt>open(org_dir 'r')<as>reader open(new_dir 'w')<as>writer<block_start>content=reader.readlines()<for_stmt>line content<block_start>line=' '.join(line.split())<if_stmt>line<eq>''<block_start><continue><block_end><elif_stmt>line<eq>'<s>'<block_start>writer.write('(ROOT ')<block_end><elif_stmt>line<eq>'</s>'<block_start>writer.write(')\n')<block_end><else_stmt><block_start>writer.write(line)<block_end><block_end><block_end><block_end><def_stmt>main <block_start>""" Main function for the script Process args, loop through each file in the directory and convert to the desired tree format """<line_sep>parser=argparse.ArgumentParser(description="Script that converts a VTB Tree into the desired format" )<line_sep>parser.add_argument('org_dir' help='The location of the original directory storing original trees ')<line_sep>parser.add_argument('new_dir' help='The location of new directory storing the new formatted trees')<line_sep>args=parser.parse_args()<line_sep>org_dir=args.org_dir<line_sep>new_dir=args.new_dir<for_stmt>filename os.listdir(org_dir)<block_start>file_name,file_extension=os.path.splitext(filename)<line_sep># Only convert .prd files, skip the .raw files <if_stmt>file_extension<eq>'.raw'<block_start><continue><block_end>file_path=os.path.join(org_dir filename)<line_sep>new_path=os.path.join(new_dir file_name)<line_sep>new_file_path=f'{new_path}.mrg'<line_sep># Convert the tree and write to new_file_path convert_file(file_path new_file_path)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
""" Cisco_IOS_XR_asr9k_sc_envmon_admin_oper This module contains a collection of YANG definitions for Cisco IOS\-XR asr9k\-sc\-envmon package admin\-plane operational data. This module contains definitions for the following management objects\: environmental\-monitoring\: Admin Environmental Monitoring Operational data space Copyright (c) 2013\-2018 by Cisco Systems, Inc. All rights reserved. """<import_stmt>sys<import_from_stmt>collections OrderedDict<import_from_stmt>ydk.types Entity<as>_Entity_<import_from_stmt>ydk.types EntityPath Identity Enum YType YLeaf YLeafList YList LeafDataList Bits Empty Decimal64<import_from_stmt>ydk.types Entity EntityPath Identity Enum YType YLeaf YLeafList YList LeafDataList Bits Empty Decimal64<import_from_stmt>ydk.filters YFilter<import_from_stmt>ydk.errors YError YModelError<import_from_stmt>ydk.errors.error_handler handle_type_error<as>_handle_type_error<class_stmt>EnvironmentalMonitoring(_Entity_)<block_start>""" Admin Environmental Monitoring Operational data space .. attribute:: racks Table of racks **type**\: :py:class:`Racks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks>` **config**\: False """<line_sep>_prefix='asr9k-sc-envmon-admin-oper'<line_sep>_revision='2017-01-19'<def_stmt>__init__ self<block_start><if_stmt>sys.version_info<g>(3 )<block_start>super().__init__()<block_end><else_stmt><block_start>super(EnvironmentalMonitoring self).__init__()<block_end>self._top_entity=<none><line_sep>self.yang_name="environmental-monitoring"<line_sep>self.yang_parent_name="Cisco-IOS-XR-asr9k-sc-envmon-admin-oper"<line_sep>self.is_top_level_class=<true><line_sep>self.has_list_ancestor=<false><line_sep>self.ylist_key_names=[]<line_sep>self._child_classes=OrderedDict([("racks" ("racks" EnvironmentalMonitoring.Racks))])<line_sep>self._leafs=OrderedDict()<line_sep>self.racks=EnvironmentalMonitoring.Racks()<line_sep>self.racks.parent=self<line_sep>self._children_name_map["racks"]="racks"<line_sep>self._segment_path=<lambda>:"Cisco-IOS-XR-asr9k-sc-envmon-admin-oper:environmental-monitoring"<line_sep>self._is_frozen=<true><block_end><def_stmt>__setattr__ self name value<block_start>self._perform_setattr(EnvironmentalMonitoring [] name value)<block_end><class_stmt>Racks(_Entity_)<block_start>""" Table of racks .. attribute:: rack Number **type**\: list of :py:class:`Rack <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack>` **config**\: False """<line_sep>_prefix='asr9k-sc-envmon-admin-oper'<line_sep>_revision='2017-01-19'<def_stmt>__init__ self<block_start><if_stmt>sys.version_info<g>(3 )<block_start>super().__init__()<block_end><else_stmt><block_start>super(EnvironmentalMonitoring.Racks self).__init__()<block_end>self.yang_name="racks"<line_sep>self.yang_parent_name="environmental-monitoring"<line_sep>self.is_top_level_class=<false><line_sep>self.has_list_ancestor=<false><line_sep>self.ylist_key_names=[]<line_sep>self._child_classes=OrderedDict([("rack" ("rack" EnvironmentalMonitoring.Racks.Rack))])<line_sep>self._leafs=OrderedDict()<line_sep>self.rack=YList(self)<line_sep>self._segment_path=<lambda>:"racks"<line_sep>self._absolute_path=<lambda>:"Cisco-IOS-XR-asr9k-sc-envmon-admin-oper:environmental-monitoring/%s"%self._segment_path()<line_sep>self._is_frozen=<true><block_end><def_stmt>__setattr__ self name value<block_start>self._perform_setattr(EnvironmentalMonitoring.Racks [] name value)<block_end><class_stmt>Rack(_Entity_)<block_start>""" Number .. attribute:: rack (key) Rack number **type**\: int **range:** 0..4294967295 **config**\: False .. attribute:: slots Table of slots **type**\: :py:class:`Slots <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots>` **config**\: False """<line_sep>_prefix='asr9k-sc-envmon-admin-oper'<line_sep>_revision='2017-01-19'<def_stmt>__init__ self<block_start><if_stmt>sys.version_info<g>(3 )<block_start>super().__init__()<block_end><else_stmt><block_start>super(EnvironmentalMonitoring.Racks.Rack self).__init__()<block_end>self.yang_name="rack"<line_sep>self.yang_parent_name="racks"<line_sep>self.is_top_level_class=<false><line_sep>self.has_list_ancestor=<false><line_sep>self.ylist_key_names=['rack']<line_sep>self._child_classes=OrderedDict([("slots" ("slots" EnvironmentalMonitoring.Racks.Rack.Slots))])<line_sep>self._leafs=OrderedDict([('rack' (YLeaf(YType.uint32 'rack') ['int'])) ])<line_sep>self.rack=<none><line_sep>self.slots=EnvironmentalMonitoring.Racks.Rack.Slots()<line_sep>self.slots.parent=self<line_sep>self._children_name_map["slots"]="slots"<line_sep>self._segment_path=<lambda>:"rack"+"[rack='"+str(self.rack)+"']"<line_sep>self._absolute_path=<lambda>:"Cisco-IOS-XR-asr9k-sc-envmon-admin-oper:environmental-monitoring/racks/%s"%self._segment_path()<line_sep>self._is_frozen=<true><block_end><def_stmt>__setattr__ self name value<block_start>self._perform_setattr(EnvironmentalMonitoring.Racks.Rack ['rack'] name value)<block_end><class_stmt>Slots(_Entity_)<block_start>""" Table of slots .. attribute:: slot Name **type**\: list of :py:class:`Slot <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots.Slot>` **config**\: False """<line_sep>_prefix='asr9k-sc-envmon-admin-oper'<line_sep>_revision='2017-01-19'<def_stmt>__init__ self<block_start><if_stmt>sys.version_info<g>(3 )<block_start>super().__init__()<block_end><else_stmt><block_start>super(EnvironmentalMonitoring.Racks.Rack.Slots self).__init__()<block_end>self.yang_name="slots"<line_sep>self.yang_parent_name="rack"<line_sep>self.is_top_level_class=<false><line_sep>self.has_list_ancestor=<true><line_sep>self.ylist_key_names=[]<line_sep>self._child_classes=OrderedDict([("slot" ("slot" EnvironmentalMonitoring.Racks.Rack.Slots.Slot))])<line_sep>self._leafs=OrderedDict()<line_sep>self.slot=YList(self)<line_sep>self._segment_path=<lambda>:"slots"<line_sep>self._is_frozen=<true><block_end><def_stmt>__setattr__ self name value<block_start>self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots [] name value)<block_end><class_stmt>Slot(_Entity_)<block_start>""" Name .. attribute:: slot (key) Slot name **type**\: str **pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+ **config**\: False .. attribute:: modules Table of modules **type**\: :py:class:`Modules <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules>` **config**\: False """<line_sep>_prefix='asr9k-sc-envmon-admin-oper'<line_sep>_revision='2017-01-19'<def_stmt>__init__ self<block_start><if_stmt>sys.version_info<g>(3 )<block_start>super().__init__()<block_end><else_stmt><block_start>super(EnvironmentalMonitoring.Racks.Rack.Slots.Slot self).__init__()<block_end>self.yang_name="slot"<line_sep>self.yang_parent_name="slots"<line_sep>self.is_top_level_class=<false><line_sep>self.has_list_ancestor=<true><line_sep>self.ylist_key_names=['slot']<line_sep>self._child_classes=OrderedDict([("modules" ("modules" EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules))])<line_sep>self._leafs=OrderedDict([('slot' (YLeaf(YType.str 'slot') ['str'])) ])<line_sep>self.slot=<none><line_sep>self.modules=EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules()<line_sep>self.modules.parent=self<line_sep>self._children_name_map["modules"]="modules"<line_sep>self._segment_path=<lambda>:"slot"+"[slot='"+str(self.slot)+"']"<line_sep>self._is_frozen=<true><block_end><def_stmt>__setattr__ self name value<block_start>self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots.Slot ['slot'] name value)<block_end><class_stmt>Modules(_Entity_)<block_start>""" Table of modules .. attribute:: module Name **type**\: list of :py:class:`Module <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module>` **config**\: False """<line_sep>_prefix='asr9k-sc-envmon-admin-oper'<line_sep>_revision='2017-01-19'<def_stmt>__init__ self<block_start><if_stmt>sys.version_info<g>(3 )<block_start>super().__init__()<block_end><else_stmt><block_start>super(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules self).__init__()<block_end>self.yang_name="modules"<line_sep>self.yang_parent_name="slot"<line_sep>self.is_top_level_class=<false><line_sep>self.has_list_ancestor=<true><line_sep>self.ylist_key_names=[]<line_sep>self._child_classes=OrderedDict([("module" ("module" EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module))])<line_sep>self._leafs=OrderedDict()<line_sep>self.module=YList(self)<line_sep>self._segment_path=<lambda>:"modules"<line_sep>self._is_frozen=<true><block_end><def_stmt>__setattr__ self name value<block_start>self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules [] name value)<block_end><class_stmt>Module(_Entity_)<block_start>""" Name .. attribute:: module (key) Module name **type**\: str **pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+ **config**\: False .. attribute:: sensor_types Table of sensor types **type**\: :py:class:`SensorTypes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes>` **config**\: False .. attribute:: power Module Power Draw **type**\: :py:class:`Power <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.Power>` **config**\: False """<line_sep>_prefix='asr9k-sc-envmon-admin-oper'<line_sep>_revision='2017-01-19'<def_stmt>__init__ self<block_start><if_stmt>sys.version_info<g>(3 )<block_start>super().__init__()<block_end><else_stmt><block_start>super(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module self).__init__()<block_end>self.yang_name="module"<line_sep>self.yang_parent_name="modules"<line_sep>self.is_top_level_class=<false><line_sep>self.has_list_ancestor=<true><line_sep>self.ylist_key_names=['module']<line_sep>self._child_classes=OrderedDict([("sensor-types" ("sensor_types" EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes)) ("power" ("power" EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.Power))])<line_sep>self._leafs=OrderedDict([('module' (YLeaf(YType.str 'module') ['str'])) ])<line_sep>self.module=<none><line_sep>self.sensor_types=EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes()<line_sep>self.sensor_types.parent=self<line_sep>self._children_name_map["sensor_types"]="sensor-types"<line_sep>self.power=EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.Power()<line_sep>self.power.parent=self<line_sep>self._children_name_map["power"]="power"<line_sep>self._segment_path=<lambda>:"module"+"[module='"+str(self.module)+"']"<line_sep>self._is_frozen=<true><block_end><def_stmt>__setattr__ self name value<block_start>self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module ['module'] name value)<block_end><class_stmt>SensorTypes(_Entity_)<block_start>""" Table of sensor types .. attribute:: sensor_type Type of sensor **type**\: list of :py:class:`SensorType <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType>` **config**\: False """<line_sep>_prefix='asr9k-sc-envmon-admin-oper'<line_sep>_revision='2017-01-19'<def_stmt>__init__ self<block_start><if_stmt>sys.version_info<g>(3 )<block_start>super().__init__()<block_end><else_stmt><block_start>super(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes self).__init__()<block_end>self.yang_name="sensor-types"<line_sep>self.yang_parent_name="module"<line_sep>self.is_top_level_class=<false><line_sep>self.has_list_ancestor=<true><line_sep>self.ylist_key_names=[]<line_sep>self._child_classes=OrderedDict([("sensor-type" ("sensor_type" EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType))])<line_sep>self._leafs=OrderedDict()<line_sep>self.sensor_type=YList(self)<line_sep>self._segment_path=<lambda>:"sensor-types"<line_sep>self._is_frozen=<true><block_end><def_stmt>__setattr__ self name value<block_start>self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes [] name value)<block_end><class_stmt>SensorType(_Entity_)<block_start>""" Type of sensor .. attribute:: type (key) Sensor type **type**\: str **pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+ **config**\: False .. attribute:: sensor_names Table of sensors **type**\: :py:class:`SensorNames <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames>` **config**\: False """<line_sep>_prefix='asr9k-sc-envmon-admin-oper'<line_sep>_revision='2017-01-19'<def_stmt>__init__ self<block_start><if_stmt>sys.version_info<g>(3 )<block_start>super().__init__()<block_end><else_stmt><block_start>super(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType self).__init__()<block_end>self.yang_name="sensor-type"<line_sep>self.yang_parent_name="sensor-types"<line_sep>self.is_top_level_class=<false><line_sep>self.has_list_ancestor=<true><line_sep>self.ylist_key_names=['type']<line_sep>self._child_classes=OrderedDict([("sensor-names" ("sensor_names" EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames))])<line_sep>self._leafs=OrderedDict([('type' (YLeaf(YType.str 'type') ['str'])) ])<line_sep>self.type=<none><line_sep>self.sensor_names=EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames()<line_sep>self.sensor_names.parent=self<line_sep>self._children_name_map["sensor_names"]="sensor-names"<line_sep>self._segment_path=<lambda>:"sensor-type"+"[type='"+str(self.type)+"']"<line_sep>self._is_frozen=<true><block_end><def_stmt>__setattr__ self name value<block_start>self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType ['type'] name value)<block_end><class_stmt>SensorNames(_Entity_)<block_start>""" Table of sensors .. attribute:: sensor_name Name of sensor **type**\: list of :py:class:`SensorName <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName>` **config**\: False """<line_sep>_prefix='asr9k-sc-envmon-admin-oper'<line_sep>_revision='2017-01-19'<def_stmt>__init__ self<block_start><if_stmt>sys.version_info<g>(3 )<block_start>super().__init__()<block_end><else_stmt><block_start>super(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames self).__init__()<block_end>self.yang_name="sensor-names"<line_sep>self.yang_parent_name="sensor-type"<line_sep>self.is_top_level_class=<false><line_sep>self.has_list_ancestor=<true><line_sep>self.ylist_key_names=[]<line_sep>self._child_classes=OrderedDict([("sensor-name" ("sensor_name" EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName))])<line_sep>self._leafs=OrderedDict()<line_sep>self.sensor_name=YList(self)<line_sep>self._segment_path=<lambda>:"sensor-names"<line_sep>self._is_frozen=<true><block_end><def_stmt>__setattr__ self name value<block_start>self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames [] name value)<block_end><class_stmt>SensorName(_Entity_)<block_start>""" Name of sensor .. attribute:: name (key) Sensor name **type**\: str **pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+ **config**\: False .. attribute:: thresholds The threshold information **type**\: :py:class:`Thresholds <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds>` **config**\: False .. attribute:: value_detailed Detailed sensor information including the sensor value **type**\: :py:class:`ValueDetailed <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.ValueDetailed>` **config**\: False .. attribute:: value_brief The sensor value **type**\: str **pattern:** [0\-9a\-fA\-F]{1,8} **config**\: False """<line_sep>_prefix='asr9k-sc-envmon-admin-oper'<line_sep>_revision='2017-01-19'<def_stmt>__init__ self<block_start><if_stmt>sys.version_info<g>(3 )<block_start>super().__init__()<block_end><else_stmt><block_start>super(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName self).__init__()<block_end>self.yang_name="sensor-name"<line_sep>self.yang_parent_name="sensor-names"<line_sep>self.is_top_level_class=<false><line_sep>self.has_list_ancestor=<true><line_sep>self.ylist_key_names=['name']<line_sep>self._child_classes=OrderedDict([("thresholds" ("thresholds" EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds)) ("value-detailed" ("value_detailed" EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.ValueDetailed))])<line_sep>self._leafs=OrderedDict([('name' (YLeaf(YType.str 'name') ['str'])) ('value_brief' (YLeaf(YType.str 'value-brief') ['str'])) ])<line_sep>self.name=<none><line_sep>self.value_brief=<none><line_sep>self.thresholds=EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds()<line_sep>self.thresholds.parent=self<line_sep>self._children_name_map["thresholds"]="thresholds"<line_sep>self.value_detailed=EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.ValueDetailed()<line_sep>self.value_detailed.parent=self<line_sep>self._children_name_map["value_detailed"]="value-detailed"<line_sep>self._segment_path=<lambda>:"sensor-name"+"[name='"+str(self.name)+"']"<line_sep>self._is_frozen=<true><block_end><def_stmt>__setattr__ self name value<block_start>self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName ['name' 'value_brief'] name value)<block_end><class_stmt>Thresholds(_Entity_)<block_start>""" The threshold information .. attribute:: threshold Types of thresholds **type**\: list of :py:class:`Threshold <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds.Threshold>` **config**\: False """<line_sep>_prefix='asr9k-sc-envmon-admin-oper'<line_sep>_revision='2017-01-19'<def_stmt>__init__ self<block_start><if_stmt>sys.version_info<g>(3 )<block_start>super().__init__()<block_end><else_stmt><block_start>super(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds self).__init__()<block_end>self.yang_name="thresholds"<line_sep>self.yang_parent_name="sensor-name"<line_sep>self.is_top_level_class=<false><line_sep>self.has_list_ancestor=<true><line_sep>self.ylist_key_names=[]<line_sep>self._child_classes=OrderedDict([("threshold" ("threshold" EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds.Threshold))])<line_sep>self._leafs=OrderedDict()<line_sep>self.threshold=YList(self)<line_sep>self._segment_path=<lambda>:"thresholds"<line_sep>self._is_frozen=<true><block_end><def_stmt>__setattr__ self name value<block_start>self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds [] name value)<block_end><class_stmt>Threshold(_Entity_)<block_start>""" Types of thresholds .. attribute:: type (key) Threshold type **type**\: str **pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+ **config**\: False .. attribute:: value_detailed Detailed sensor threshold information **type**\: :py:class:`ValueDetailed <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds.Threshold.ValueDetailed>` **config**\: False .. attribute:: trap Threshold trap enable flag true\-ENABLE, false\-DISABLE **type**\: bool **config**\: False .. attribute:: value_brief Threshold value for the sensor **type**\: str **pattern:** [0\-9a\-fA\-F]{1,8} **config**\: False """<line_sep>_prefix='asr9k-sc-envmon-admin-oper'<line_sep>_revision='2017-01-19'<def_stmt>__init__ self<block_start><if_stmt>sys.version_info<g>(3 )<block_start>super().__init__()<block_end><else_stmt><block_start>super(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds.Threshold self).__init__()<block_end>self.yang_name="threshold"<line_sep>self.yang_parent_name="thresholds"<line_sep>self.is_top_level_class=<false><line_sep>self.has_list_ancestor=<true><line_sep>self.ylist_key_names=['type']<line_sep>self._child_classes=OrderedDict([("value-detailed" ("value_detailed" EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds.Threshold.ValueDetailed))])<line_sep>self._leafs=OrderedDict([('type' (YLeaf(YType.str 'type') ['str'])) ('trap' (YLeaf(YType.boolean 'trap') ['bool'])) ('value_brief' (YLeaf(YType.str 'value-brief') ['str'])) ])<line_sep>self.type=<none><line_sep>self.trap=<none><line_sep>self.value_brief=<none><line_sep>self.value_detailed=EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds.Threshold.ValueDetailed()<line_sep>self.value_detailed.parent=self<line_sep>self._children_name_map["value_detailed"]="value-detailed"<line_sep>self._segment_path=<lambda>:"threshold"+"[type='"+str(self.type)+"']"<line_sep>self._is_frozen=<true><block_end><def_stmt>__setattr__ self name value<block_start>self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds.Threshold ['type' 'trap' 'value_brief'] name value)<block_end><class_stmt>ValueDetailed(_Entity_)<block_start>""" Detailed sensor threshold information .. attribute:: threshold_severity Indicates minor, major, critical severities **type**\: int **range:** 0..4294967295 **config**\: False .. attribute:: threshold_relation Indicates relation between sensor value and threshold **type**\: int **range:** 0..4294967295 **config**\: False .. attribute:: threshold_value Value of the configured threshold **type**\: int **range:** 0..4294967295 **config**\: False .. attribute:: threshold_evaluation Indicates the result of the most recent evaluation of the thresholD **type**\: bool **config**\: False .. attribute:: threshold_notification_enabled Indicates whether or not a notification should result, in case of threshold violation **type**\: bool **config**\: False """<line_sep>_prefix='asr9k-sc-envmon-admin-oper'<line_sep>_revision='2017-01-19'<def_stmt>__init__ self<block_start><if_stmt>sys.version_info<g>(3 )<block_start>super().__init__()<block_end><else_stmt><block_start>super(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds.Threshold.ValueDetailed self).__init__()<block_end>self.yang_name="value-detailed"<line_sep>self.yang_parent_name="threshold"<line_sep>self.is_top_level_class=<false><line_sep>self.has_list_ancestor=<true><line_sep>self.ylist_key_names=[]<line_sep>self._child_classes=OrderedDict([])<line_sep>self._leafs=OrderedDict([('threshold_severity' (YLeaf(YType.uint32 'threshold-severity') ['int'])) ('threshold_relation' (YLeaf(YType.uint32 'threshold-relation') ['int'])) ('threshold_value' (YLeaf(YType.uint32 'threshold-value') ['int'])) ('threshold_evaluation' (YLeaf(YType.boolean 'threshold-evaluation') ['bool'])) ('threshold_notification_enabled' (YLeaf(YType.boolean 'threshold-notification-enabled') ['bool'])) ])<line_sep>self.threshold_severity=<none><line_sep>self.threshold_relation=<none><line_sep>self.threshold_value=<none><line_sep>self.threshold_evaluation=<none><line_sep>self.threshold_notification_enabled=<none><line_sep>self._segment_path=<lambda>:"value-detailed"<line_sep>self._is_frozen=<true><block_end><def_stmt>__setattr__ self name value<block_start>self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds.Threshold.ValueDetailed ['threshold_severity' 'threshold_relation' 'threshold_value' 'threshold_evaluation' 'threshold_notification_enabled'] name value)<block_end>@staticmethod<def_stmt>_meta_info <block_start><import_from_stmt>ydk.models.cisco_ios_xr._meta _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper<as>meta<line_sep><return>meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds.Threshold.ValueDetailed']['meta_info']<block_end><block_end>@staticmethod<def_stmt>_meta_info <block_start><import_from_stmt>ydk.models.cisco_ios_xr._meta _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper<as>meta<line_sep><return>meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds.Threshold']['meta_info']<block_end><block_end>@staticmethod<def_stmt>_meta_info <block_start><import_from_stmt>ydk.models.cisco_ios_xr._meta _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper<as>meta<line_sep><return>meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds']['meta_info']<block_end><block_end><class_stmt>ValueDetailed(_Entity_)<block_start>""" Detailed sensor information including the sensor value .. attribute:: field_validity_bitmap Sensor valid bitmap **type**\: int **range:** 0..4294967295 **config**\: False .. attribute:: device_description Device Name **type**\: str **length:** 0..50 **config**\: False .. attribute:: units Units of variable being read **type**\: str **length:** 0..50 **config**\: False .. attribute:: device_id Identifier for this device **type**\: int **range:** 0..4294967295 **config**\: False .. attribute:: value Current reading of sensor **type**\: int **range:** 0..4294967295 **config**\: False .. attribute:: alarm_type Indicates threshold violation **type**\: int **range:** 0..4294967295 **config**\: False .. attribute:: data_type Sensor data type enums **type**\: int **range:** 0..4294967295 **config**\: False .. attribute:: scale Sensor scale enums **type**\: int **range:** 0..4294967295 **config**\: False .. attribute:: precision Sensor precision range **type**\: int **range:** 0..4294967295 **config**\: False .. attribute:: status Sensor operation state enums **type**\: int **range:** 0..4294967295 **config**\: False .. attribute:: age_time_stamp Age of the sensor value; set to the current time if directly access the value from sensor **type**\: int **range:** 0..4294967295 **config**\: False .. attribute:: update_rate Sensor value update rate;set to 0 if sensor value is updated and evaluated immediately **type**\: int **range:** 0..4294967295 **config**\: False .. attribute:: average Average sensor value over time interval **type**\: int **range:** \-2147483648..2147483647 **config**\: False .. attribute:: minimum Minimum Sensor value over time interval **type**\: int **range:** \-2147483648..2147483647 **config**\: False .. attribute:: maximum Maximum Sensor value over time interval **type**\: int **range:** \-2147483648..2147483647 **config**\: False .. attribute:: interval Time Interval over which sensor value is monitored **type**\: int **range:** \-2147483648..2147483647 **config**\: False """<line_sep>_prefix='asr9k-sc-envmon-admin-oper'<line_sep>_revision='2017-01-19'<def_stmt>__init__ self<block_start><if_stmt>sys.version_info<g>(3 )<block_start>super().__init__()<block_end><else_stmt><block_start>super(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.ValueDetailed self).__init__()<block_end>self.yang_name="value-detailed"<line_sep>self.yang_parent_name="sensor-name"<line_sep>self.is_top_level_class=<false><line_sep>self.has_list_ancestor=<true><line_sep>self.ylist_key_names=[]<line_sep>self._child_classes=OrderedDict([])<line_sep>self._leafs=OrderedDict([('field_validity_bitmap' (YLeaf(YType.uint32 'field-validity-bitmap') ['int'])) ('device_description' (YLeaf(YType.str 'device-description') ['str'])) ('units' (YLeaf(YType.str 'units') ['str'])) ('device_id' (YLeaf(YType.uint32 'device-id') ['int'])) ('value' (YLeaf(YType.uint32 'value') ['int'])) ('alarm_type' (YLeaf(YType.uint32 'alarm-type') ['int'])) ('data_type' (YLeaf(YType.uint32 'data-type') ['int'])) ('scale' (YLeaf(YType.uint32 'scale') ['int'])) ('precision' (YLeaf(YType.uint32 'precision') ['int'])) ('status' (YLeaf(YType.uint32 'status') ['int'])) ('age_time_stamp' (YLeaf(YType.uint32 'age-time-stamp') ['int'])) ('update_rate' (YLeaf(YType.uint32 'update-rate') ['int'])) ('average' (YLeaf(YType.int32 'average') ['int'])) ('minimum' (YLeaf(YType.int32 'minimum') ['int'])) ('maximum' (YLeaf(YType.int32 'maximum') ['int'])) ('interval' (YLeaf(YType.int32 'interval') ['int'])) ])<line_sep>self.field_validity_bitmap=<none><line_sep>self.device_description=<none><line_sep>self.units=<none><line_sep>self.device_id=<none><line_sep>self.value=<none><line_sep>self.alarm_type=<none><line_sep>self.data_type=<none><line_sep>self.scale=<none><line_sep>self.precision=<none><line_sep>self.status=<none><line_sep>self.age_time_stamp=<none><line_sep>self.update_rate=<none><line_sep>self.average=<none><line_sep>self.minimum=<none><line_sep>self.maximum=<none><line_sep>self.interval=<none><line_sep>self._segment_path=<lambda>:"value-detailed"<line_sep>self._is_frozen=<true><block_end><def_stmt>__setattr__ self name value<block_start>self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.ValueDetailed ['field_validity_bitmap' 'device_description' 'units' 'device_id' 'value' 'alarm_type' 'data_type' 'scale' 'precision' 'status' 'age_time_stamp' 'update_rate' 'average' 'minimum' 'maximum' 'interval'] name value)<block_end>@staticmethod<def_stmt>_meta_info <block_start><import_from_stmt>ydk.models.cisco_ios_xr._meta _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper<as>meta<line_sep><return>meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.ValueDetailed']['meta_info']<block_end><block_end>@staticmethod<def_stmt>_meta_info <block_start><import_from_stmt>ydk.models.cisco_ios_xr._meta _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper<as>meta<line_sep><return>meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName']['meta_info']<block_end><block_end>@staticmethod<def_stmt>_meta_info <block_start><import_from_stmt>ydk.models.cisco_ios_xr._meta _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper<as>meta<line_sep><return>meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames']['meta_info']<block_end><block_end>@staticmethod<def_stmt>_meta_info <block_start><import_from_stmt>ydk.models.cisco_ios_xr._meta _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper<as>meta<line_sep><return>meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType']['meta_info']<block_end><block_end>@staticmethod<def_stmt>_meta_info <block_start><import_from_stmt>ydk.models.cisco_ios_xr._meta _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper<as>meta<line_sep><return>meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes']['meta_info']<block_end><block_end><class_stmt>Power(_Entity_)<block_start>""" Module Power Draw .. attribute:: power_bag Detailed power bag information **type**\: :py:class:`PowerBag <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.Power.PowerBag>` **config**\: False """<line_sep>_prefix='asr9k-sc-envmon-admin-oper'<line_sep>_revision='2017-01-19'<def_stmt>__init__ self<block_start><if_stmt>sys.version_info<g>(3 )<block_start>super().__init__()<block_end><else_stmt><block_start>super(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.Power self).__init__()<block_end>self.yang_name="power"<line_sep>self.yang_parent_name="module"<line_sep>self.is_top_level_class=<false><line_sep>self.has_list_ancestor=<true><line_sep>self.ylist_key_names=[]<line_sep>self._child_classes=OrderedDict([("power-bag" ("power_bag" EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.Power.PowerBag))])<line_sep>self._leafs=OrderedDict()<line_sep>self.power_bag=EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.Power.PowerBag()<line_sep>self.power_bag.parent=self<line_sep>self._children_name_map["power_bag"]="power-bag"<line_sep>self._segment_path=<lambda>:"power"<line_sep>self._is_frozen=<true><block_end><def_stmt>__setattr__ self name value<block_start>self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.Power [] name value)<block_end><class_stmt>PowerBag(_Entity_)<block_start>""" Detailed power bag information .. attribute:: power_value Current Power Value of the Unit **type**\: int **range:** \-2147483648..2147483647 **config**\: False .. attribute:: power_max_value Max Power Value of the Unit **type**\: int **range:** \-2147483648..2147483647 **config**\: False .. attribute:: power_unit_multiplier Unit Multiplier of Power **type**\: int **range:** 0..4294967295 **config**\: False .. attribute:: power_accuracy Accuracy of the Power Value **type**\: int **range:** 0..4294967295 **config**\: False .. attribute:: power_measure_caliber Measure Caliber **type**\: int **range:** 0..4294967295 **config**\: False .. attribute:: power_current_type Current Type of the Unit **type**\: int **range:** 0..4294967295 **config**\: False .. attribute:: power_origin The Power Origin of the Unit **type**\: int **range:** 0..4294967295 **config**\: False .. attribute:: power_admin_state Admin Status of the Unit **type**\: int **range:** 0..4294967295 **config**\: False .. attribute:: power_oper_state Oper Status of the Unit **type**\: int **range:** 0..4294967295 **config**\: False .. attribute:: power_state_enter_reason Enter Reason for the State **type**\: str **length:** 0..50 **config**\: False """<line_sep>_prefix='asr9k-sc-envmon-admin-oper'<line_sep>_revision='2017-01-19'<def_stmt>__init__ self<block_start><if_stmt>sys.version_info<g>(3 )<block_start>super().__init__()<block_end><else_stmt><block_start>super(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.Power.PowerBag self).__init__()<block_end>self.yang_name="power-bag"<line_sep>self.yang_parent_name="power"<line_sep>self.is_top_level_class=<false><line_sep>self.has_list_ancestor=<true><line_sep>self.ylist_key_names=[]<line_sep>self._child_classes=OrderedDict([])<line_sep>self._leafs=OrderedDict([('power_value' (YLeaf(YType.int32 'power-value') ['int'])) ('power_max_value' (YLeaf(YType.int32 'power-max-value') ['int'])) ('power_unit_multiplier' (YLeaf(YType.uint32 'power-unit-multiplier') ['int'])) ('power_accuracy' (YLeaf(YType.uint32 'power-accuracy') ['int'])) ('power_measure_caliber' (YLeaf(YType.uint32 'power-measure-caliber') ['int'])) ('power_current_type' (YLeaf(YType.uint32 'power-current-type') ['int'])) ('power_origin' (YLeaf(YType.uint32 'power-origin') ['int'])) ('power_admin_state' (YLeaf(YType.uint32 'power-admin-state') ['int'])) ('power_oper_state' (YLeaf(YType.uint32 'power-oper-state') ['int'])) ('power_state_enter_reason' (YLeaf(YType.str 'power-state-enter-reason') ['str'])) ])<line_sep>self.power_value=<none><line_sep>self.power_max_value=<none><line_sep>self.power_unit_multiplier=<none><line_sep>self.power_accuracy=<none><line_sep>self.power_measure_caliber=<none><line_sep>self.power_current_type=<none><line_sep>self.power_origin=<none><line_sep>self.power_admin_state=<none><line_sep>self.power_oper_state=<none><line_sep>self.power_state_enter_reason=<none><line_sep>self._segment_path=<lambda>:"power-bag"<line_sep>self._is_frozen=<true><block_end><def_stmt>__setattr__ self name value<block_start>self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.Power.PowerBag ['power_value' 'power_max_value' 'power_unit_multiplier' 'power_accuracy' 'power_measure_caliber' 'power_current_type' 'power_origin' 'power_admin_state' 'power_oper_state' 'power_state_enter_reason'] name value)<block_end>@staticmethod<def_stmt>_meta_info <block_start><import_from_stmt>ydk.models.cisco_ios_xr._meta _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper<as>meta<line_sep><return>meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.Power.PowerBag']['meta_info']<block_end><block_end>@staticmethod<def_stmt>_meta_info <block_start><import_from_stmt>ydk.models.cisco_ios_xr._meta _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper<as>meta<line_sep><return>meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.Power']['meta_info']<block_end><block_end>@staticmethod<def_stmt>_meta_info <block_start><import_from_stmt>ydk.models.cisco_ios_xr._meta _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper<as>meta<line_sep><return>meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module']['meta_info']<block_end><block_end>@staticmethod<def_stmt>_meta_info <block_start><import_from_stmt>ydk.models.cisco_ios_xr._meta _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper<as>meta<line_sep><return>meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules']['meta_info']<block_end><block_end>@staticmethod<def_stmt>_meta_info <block_start><import_from_stmt>ydk.models.cisco_ios_xr._meta _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper<as>meta<line_sep><return>meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots.Slot']['meta_info']<block_end><block_end>@staticmethod<def_stmt>_meta_info <block_start><import_from_stmt>ydk.models.cisco_ios_xr._meta _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper<as>meta<line_sep><return>meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots']['meta_info']<block_end><block_end>@staticmethod<def_stmt>_meta_info <block_start><import_from_stmt>ydk.models.cisco_ios_xr._meta _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper<as>meta<line_sep><return>meta._meta_table['EnvironmentalMonitoring.Racks.Rack']['meta_info']<block_end><block_end>@staticmethod<def_stmt>_meta_info <block_start><import_from_stmt>ydk.models.cisco_ios_xr._meta _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper<as>meta<line_sep><return>meta._meta_table['EnvironmentalMonitoring.Racks']['meta_info']<block_end><block_end><def_stmt>clone_ptr self<block_start>self._top_entity=EnvironmentalMonitoring()<line_sep><return>self._top_entity<block_end>@staticmethod<def_stmt>_meta_info <block_start><import_from_stmt>ydk.models.cisco_ios_xr._meta _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper<as>meta<line_sep><return>meta._meta_table['EnvironmentalMonitoring']['meta_info']<block_end><block_end>
<import_from_stmt>flask.cli FlaskGroup<import_from_stmt>project app db User<line_sep>cli=FlaskGroup(app)<line_sep>@cli.command("create_db")<def_stmt>create_db <block_start>db.drop_all()<line_sep>db.create_all()<line_sep>db.session.commit()<block_end>@cli.command("seed_db")<def_stmt>seed_db <block_start>db.session.add(User(email="<EMAIL>"))<line_sep>db.session.commit()<block_end><if_stmt>__name__<eq>"__main__"<block_start>cli()<block_end>
<import_from_stmt>.low_level_api *<import_from_stmt>.high_level_api *<import_from_stmt>.high_level_wcs_wrapper *<import_from_stmt>.utils *<import_from_stmt>.sliced_low_level_wcs *<line_sep>
<class_stmt>TypeUnknownParser<block_start>""" Parse invocations to a APIGateway resource with an unknown integration type """<def_stmt>invoke self request integration<block_start>_type=integration["type"]<line_sep><raise>NotImplementedError("The {0} type has not been implemented".format(_type))<block_end><block_end>
""" .. module: dispatch.plugins.dispatch_opsgenie.plugin :platform: Unix :copyright: (c) 2019 by Netflix Inc., see AUTHORS for more :license: Apache, see LICENSE for more details. """<import_stmt>logging<import_from_stmt>pydantic Field SecretStr<import_from_stmt>dispatch.config BaseConfigurationModel<import_from_stmt>dispatch.decorators apply counter timer<import_from_stmt>dispatch.plugins.bases OncallPlugin<import_from_stmt>.service get_oncall page_oncall<line_sep>__version__="0.1.0"<line_sep>log=logging.getLogger(__name__)<class_stmt>OpsgenieConfiguration(BaseConfigurationModel)<block_start>"""Opsgenie configuration description."""<line_sep>api_key:SecretStr=Field(title="API Key" description="This is the key used to talk to the Opsgenine API.")<block_end>@apply(counter exclude=["__init__"])@apply(timer exclude=["__init__"])<class_stmt>OpsGenieOncallPlugin(OncallPlugin)<block_start>title="OpsGenie Plugin - Oncall Management"<line_sep>slug="opsgenie-oncall"<line_sep>author="stefanm8"<line_sep>author_url="https://github.com/Netflix/dispatch"<line_sep>description="Uses Opsgenie to resolve and page oncall teams."<line_sep>version=__version__<def_stmt>__init__ self<block_start>self.configuration_schema=OpsgenieConfiguration<block_end><def_stmt>get self service_id:str **kwargs<block_start><return>get_oncall(self.configuration.api_key service_id)<block_end><def_stmt>page self service_id:str incident_name:str incident_title:str incident_description:str **kwargs <block_start><return>page_oncall(self.configuration.api_key service_id incident_name incident_title incident_description )<block_end><block_end>
<import_stmt>pytest<import_stmt>sqlalchemy<as>sa<import_from_stmt>sqlalchemy.orm dynamic_loader<import_from_stmt>sqlalchemy_utils.observer observes<line_sep>@pytest.fixture<def_stmt>Director Base<block_start><class_stmt>Director(Base)<block_start>__tablename__='director'<line_sep>id=sa.Column(sa.Integer primary_key=<true>)<line_sep>name=sa.Column(sa.String)<line_sep>movies=dynamic_loader('Movie' back_populates='director')<block_end><return>Director<block_end>@pytest.fixture<def_stmt>Movie Base Director<block_start><class_stmt>Movie(Base)<block_start>__tablename__='movie'<line_sep>id=sa.Column(sa.Integer primary_key=<true>)<line_sep>name=sa.Column(sa.String)<line_sep>director_id=sa.Column(sa.Integer sa.ForeignKey(Director.id))<line_sep>director=sa.orm.relationship(Director back_populates='movies')<line_sep>director_name=sa.Column(sa.String)<line_sep>@observes('director')<def_stmt>director_observer self director<block_start>self.director_name=director.name<block_end><block_end><return>Movie<block_end>@pytest.fixture<def_stmt>init_models Director Movie<block_start><pass><block_end>@pytest.mark.usefixtures('postgresql_dsn')<class_stmt>TestObservesForDynamicRelationship<block_start><def_stmt>test_add_observed_object self session Director Movie<block_start>steven=Director(name='<NAME>')<line_sep>session.add(steven)<line_sep>jaws=Movie(name='Jaws' director=steven)<line_sep>session.add(jaws)<line_sep>session.commit()<assert_stmt>jaws.director_name<eq>'<NAME>'<block_end><def_stmt>test_add_observed_object_from_backref self session Director Movie<block_start>jaws=Movie(name='Jaws')<line_sep>steven=Director(name='<NAME>' movies=[jaws])<line_sep>session.add(steven)<line_sep>session.add(jaws)<line_sep>session.commit()<assert_stmt>jaws.director_name<eq>'<NAME>'<block_end><block_end>
<import_stmt>click<import_stmt>json<import_stmt>os<import_from_stmt>xdg XDG_CONFIG_HOME<class_stmt>InvalidDataDirException(Exception)<block_start><pass><block_end><def_stmt>get_data_dir <block_start><if_stmt>os.environ.get('NLGEVAL_DATA')<block_start><if_stmt><not>os.path.exists(os.environ.get('NLGEVAL_DATA'))<block_start>click.secho("NLGEVAL_DATA variable is set but points to non-existent path." fg='red' err=<true>)<line_sep><raise>InvalidDataDirException()<block_end><return>os.environ.get('NLGEVAL_DATA')<block_end><else_stmt><block_start><try_stmt><block_start>cfg_file=os.path.join(XDG_CONFIG_HOME 'nlgeval' 'rc.json')<with_stmt>open(cfg_file 'rt')<as>f<block_start>rc=json.load(f)<if_stmt><not>os.path.exists(rc['data_path'])<block_start>click.secho("Data path found in {} does not exist: {} "%(cfg_file rc['data_path']) fg='red' err=<true>)<line_sep>click.secho("Run `nlg-eval --setup DATA_DIR' to download or set $NLGEVAL_DATA to an existing location" fg='red' err=<true>)<line_sep><raise>InvalidDataDirException()<block_end><return>rc['data_path']<block_end><block_end><except_stmt><block_start>click.secho("Could not determine location of data." fg='red' err=<true>)<line_sep>click.secho("Run `nlg-eval --setup DATA_DIR' to download or set $NLGEVAL_DATA to an existing location" fg='red' err=<true>)<line_sep><raise>InvalidDataDirException()<block_end><block_end><block_end>
# # Copyright 2020-2021 Picovoice Inc. # # You may not use this file except in compliance with the license. A copy of the license is located in the "LICENSE" # file accompanying this source. # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. # <import_stmt>logging<import_stmt>os<import_stmt>platform<import_stmt>subprocess<line_sep>log=logging.getLogger('PPN')<line_sep>log.setLevel(logging.WARNING)<def_stmt>_pv_linux_machine machine<block_start><if_stmt>machine<eq>'x86_64'<block_start><return>machine<block_end><elif_stmt>machine<eq>'aarch64'<block_start>arch_info='-'+machine<block_end><elif_stmt>machine<in>['armv7l' 'armv6l']<block_start>arch_info=''<block_end><else_stmt><block_start><raise>NotImplementedError("Unsupported CPU architecture: '%s'"%machine)<block_end>cpu_info=''<try_stmt><block_start>cpu_info=subprocess.check_output(['cat' '/proc/cpuinfo']).decode()<line_sep>cpu_part_list=[x<for>x cpu_info.split('\n')<if>'CPU part'<in>x]<line_sep>cpu_part=cpu_part_list[0].split(' ')[-1].lower()<block_end><except_stmt>Exception<as>error<block_start><raise>RuntimeError("Failed to identify the CPU with '%s'\nCPU info: %s"%(error cpu_info))<block_end><if_stmt>'0xb76'<eq>cpu_part<block_start><return>'arm11'+arch_info<block_end><elif_stmt>'0xc07'<eq>cpu_part<block_start><return>'cortex-a7'+arch_info<block_end><elif_stmt>'0xd03'<eq>cpu_part<block_start><return>'cortex-a53'+arch_info<block_end><elif_stmt>'0xd07'<eq>cpu_part<block_start><return>'cortex-a57'+arch_info<block_end><elif_stmt>'0xd08'<eq>cpu_part<block_start><return>'cortex-a72'+arch_info<block_end><elif_stmt>'0xc08'<eq>cpu_part<block_start><return>'beaglebone'+arch_info<block_end><elif_stmt>machine<eq>'armv7l'<block_start>log.warning('WARNING: Please be advised that this device (CPU part = %s) is not officially supported by Picovoice. '<concat>'Falling back to the armv6-based (Raspberry Pi Zero) library. This is not tested nor optimal.'%cpu_part)<line_sep><return>'arm11'<block_end><else_stmt><block_start><raise>NotImplementedError("Unsupported CPU: '%s'."%cpu_part)<block_end><block_end><def_stmt>_pv_platform <block_start>pv_system=platform.system()<if_stmt>pv_system<not><in>{'Darwin' 'Linux' 'Windows'}<block_start><raise>ValueError("Unsupported system '%s'."%pv_system)<block_end><if_stmt>pv_system<eq>'Linux'<block_start>pv_machine=_pv_linux_machine(platform.machine())<block_end><else_stmt><block_start>pv_machine=platform.machine()<block_end><return>pv_system pv_machine<block_end>_PV_SYSTEM,_PV_MACHINE=_pv_platform()<line_sep>_RASPBERRY_PI_MACHINES={'arm11' 'cortex-a7' 'cortex-a53' 'cortex-a72' 'cortex-a53-aarch64' 'cortex-a72-aarch64'}<line_sep>_JETSON_MACHINES={'cortex-a57-aarch64'}<def_stmt>pv_library_path relative<block_start><if_stmt>_PV_SYSTEM<eq>'Darwin'<block_start><if_stmt>_PV_MACHINE<eq>'x86_64'<block_start><return>os.path.join(os.path.dirname(__file__) relative 'lib/mac/x86_64/libpv_porcupine.dylib')<block_end><elif_stmt>_PV_MACHINE<eq>"arm64"<block_start><return>os.path.join(os.path.dirname(__file__) relative 'lib/mac/arm64/libpv_porcupine.dylib')<block_end><block_end><elif_stmt>_PV_SYSTEM<eq>'Linux'<block_start><if_stmt>_PV_MACHINE<eq>'x86_64'<block_start><return>os.path.join(os.path.dirname(__file__) relative 'lib/linux/x86_64/libpv_porcupine.so')<block_end><elif_stmt>_PV_MACHINE<in>_JETSON_MACHINES<block_start><return>os.path.join(os.path.dirname(__file__) relative 'lib/jetson/%s/libpv_porcupine.so'%_PV_MACHINE)<block_end><elif_stmt>_PV_MACHINE<in>_RASPBERRY_PI_MACHINES<block_start><return>os.path.join(os.path.dirname(__file__) relative 'lib/raspberry-pi/%s/libpv_porcupine.so'%_PV_MACHINE)<block_end><elif_stmt>_PV_MACHINE<eq>'beaglebone'<block_start><return>os.path.join(os.path.dirname(__file__) relative 'lib/beaglebone/libpv_porcupine.so')<block_end><block_end><elif_stmt>_PV_SYSTEM<eq>'Windows'<block_start><return>os.path.join(os.path.dirname(__file__) relative 'lib/windows/amd64/libpv_porcupine.dll')<block_end><raise>NotImplementedError('Unsupported platform.')<block_end><def_stmt>pv_model_path relative<block_start><return>os.path.join(os.path.dirname(__file__) relative 'lib/common/porcupine_params.pv')<block_end><def_stmt>pv_keyword_files_subdir <block_start><if_stmt>_PV_SYSTEM<eq>'Darwin'<block_start><return>'mac'<block_end><elif_stmt>_PV_SYSTEM<eq>'Linux'<block_start><if_stmt>_PV_MACHINE<eq>'x86_64'<block_start><return>'linux'<block_end><elif_stmt>_PV_MACHINE<in>_JETSON_MACHINES<block_start><return>'jetson'<block_end><elif_stmt>_PV_MACHINE<in>_RASPBERRY_PI_MACHINES<block_start><return>'raspberry-pi'<block_end><elif_stmt>_PV_MACHINE<eq>'beaglebone'<block_start><return>'beaglebone'<block_end><block_end><elif_stmt>_PV_SYSTEM<eq>'Windows'<block_start><return>'windows'<block_end><raise>NotImplementedError('Unsupported platform')<block_end><def_stmt>pv_keyword_paths relative<block_start>keyword_files_dir=os.path.join(os.path.dirname(__file__) relative 'resources/keyword_files' pv_keyword_files_subdir())<line_sep>res=dict()<for_stmt>x os.listdir(keyword_files_dir)<block_start>res[x.rsplit('_')[0]]=os.path.join(keyword_files_dir x)<block_end><return>res<block_end>
<import_stmt>pytest<import_from_stmt>uvicorn.importer ImportFromStringError import_from_string<def_stmt>test_invalid_format <arrow><none><block_start><with_stmt>pytest.raises(ImportFromStringError)<as>exc_info<block_start>import_from_string("example:")<block_end>expected='Import string "example:" must be in format "<module>:<attribute>".'<assert_stmt>expected<in>str(exc_info.value)<block_end><def_stmt>test_invalid_module <arrow><none><block_start><with_stmt>pytest.raises(ImportFromStringError)<as>exc_info<block_start>import_from_string("module_does_not_exist:myattr")<block_end>expected='Could not import module "module_does_not_exist".'<assert_stmt>expected<in>str(exc_info.value)<block_end><def_stmt>test_invalid_attr <arrow><none><block_start><with_stmt>pytest.raises(ImportFromStringError)<as>exc_info<block_start>import_from_string("tempfile:attr_does_not_exist")<block_end>expected='Attribute "attr_does_not_exist" not found in module "tempfile".'<assert_stmt>expected<in>str(exc_info.value)<block_end><def_stmt>test_internal_import_error <arrow><none><block_start><with_stmt>pytest.raises(ImportError)<block_start>import_from_string("tests.importer.raise_import_error:myattr")<block_end><block_end><def_stmt>test_valid_import <arrow><none><block_start>instance=import_from_string("tempfile:TemporaryFile")<import_from_stmt>tempfile TemporaryFile<assert_stmt>instance<eq>TemporaryFile<block_end><def_stmt>test_no_import_needed <arrow><none><block_start><import_from_stmt>tempfile TemporaryFile<line_sep>instance=import_from_string(TemporaryFile)<assert_stmt>instance<eq>TemporaryFile<block_end>
# Copyright 2021 Huawei Technologies Co., Ltd.All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Mapper module."""<import_from_stmt>mindconverter.graph_based_converter.common.utils reset_template_and_exchange_msg<import_from_stmt>mindconverter.graph_based_converter.constant WeightType<import_from_stmt>mindconverter.graph_based_converter.mapper.base AtenToMindSporeMapper<class_stmt>BroadcastToMapper(AtenToMindSporeMapper)<block_start>"""BroadcastTo mapper."""<line_sep>@staticmethod<def_stmt>_operation_name_in_ms *args **kwargs<block_start><return>"broadcast_to"<block_end>@staticmethod<def_stmt>_convert_trained_weights **kwargs<block_start>weights=kwargs.get("weights" list())<line_sep>args_name=["input" "shape" "implicit"]<line_sep>args_name_list=BroadcastToMapper.get_args_name_list(**kwargs args_name=args_name)<line_sep>trainable_params=dict()<for_stmt>weight weights<block_start>trainable_params[args_name_list[weight.location]]={"data":weight.value "location":weight.location "type":WeightType.PARAMETER.value "onnx_name":weight.name}<block_end><return>trainable_params<block_end>@staticmethod<def_stmt>_generate_snippet_template **kwargs<block_start>template,exchange_msg,outputs_list,outputs_mapping=AtenToMindSporeMapper._generate_snippet_template(**kwargs)<line_sep>raw_params=kwargs.get("raw_params")<if_stmt><not>raw_params<block_start><return>template exchange_msg outputs_list outputs_mapping<block_end>op=kwargs.get("operation")<line_sep>trainable_params=kwargs.get("trainable_params" dict())<line_sep>output_shape=raw_params.get("output_shape" tuple())<line_sep>variable_slot="var_0"<line_sep>args_name=["input" "shape" "implicit"]<line_sep>inputs,args,group_inputs=BroadcastToMapper._params_parser(raw_params=raw_params args_name=args_name trainable_params=trainable_params)<line_sep>args=BroadcastToMapper._get_args(variable_slot=variable_slot inputs=inputs args=args output_shape=output_shape)<line_sep>init_template_list=[f"self.{{{variable_slot}}}_{arg_name} = {{{arg_name}}}"<for>arg_name args]<line_sep>parameters_declared=dict()<for_stmt>name,trainable_param trainable_params.copy().items()<block_start>value=trainable_param["data"]<if_stmt>BroadcastToMapper.is_tensor(value)<block_start>variable_slot_param_name=f"{variable_slot}/{name}"<line_sep>init_template_list.append(f"self.{{{variable_slot}}}_{name} = {{{variable_slot_param_name}}}")<line_sep>parameters_declared[name]=""<block_end><else_stmt><block_start>args[name]=value.tolist()<line_sep>init_template_list.append(f"self.{{{variable_slot}}}_{name} = {{{name}}}")<line_sep>trainable_params.pop(name)<block_end><block_end>construct_template=f"opt_{{{variable_slot}}} = ms_np.{op}({inputs[0]}, ({', '.join(inputs[1:-1])}))"<line_sep>template,exchange_msg=reset_template_and_exchange_msg(template exchange_msg variable_slot init_template_list [construct_template] args trainable_params parameters_declared group_inputs)<line_sep><return>template exchange_msg outputs_list outputs_mapping<block_end>@staticmethod<def_stmt>_get_args **kwargs<block_start>"""Get args from params_parser."""<line_sep>variable_slot=kwargs.get("variable_slot")<line_sep>inputs=kwargs.get("inputs" list())<line_sep>args=kwargs.get("args" dict())<line_sep>output_shape=kwargs.get("output_shape" tuple())<line_sep>shape_name_list=[ipt.replace(f"self.{{{variable_slot}}}_" "")<for>ipt inputs[1:-1]]<for_stmt>idx,shape_name enumerate(shape_name_list)<block_start><if_stmt>isinstance(args.get(shape_name) int)<and>args.get(shape_name)<eq>-1<block_start>args[shape_name]=output_shape[idx]<block_end><block_end>args.pop("implicit")<line_sep><return>args<block_end><block_end>
"""Provides an easy way of generating several geometric objects. CONTAINS -------- vtkArrowSource vtkCylinderSource vtkSphereSource vtkPlaneSource vtkLineSource vtkCubeSource vtkConeSource vtkDiskSource vtkRegularPolygonSource vtkPyramid vtkPlatonicSolidSource vtkSuperquadricSource as well as some pure-python helpers. """<import_stmt>numpy<as>np<import_stmt>pyvista<import_from_stmt>pyvista _vtk<import_from_stmt>pyvista.utilities check_valid_vector<line_sep>NORMALS={'x':[1 0 0] 'y':[0 1 0] 'z':[0 0 1] '-x':[-1 0 0] '-y':[0 -1 0] '-z':[0 0 -1] }<def_stmt>translate surf center=[0. 0. 0.] direction=[1. 0. 0.]<block_start>"""Translate and orient a mesh to a new center and direction. By default, the input mesh is considered centered at the origin and facing in the x direction. """<line_sep>normx=np.array(direction)/np.linalg.norm(direction)<line_sep>normz=np.cross(normx [0 1.0 0.0000001])<line_sep>normz<augdiv>np.linalg.norm(normz)<line_sep>normy=np.cross(normz normx)<line_sep>trans=np.zeros((4 4))<line_sep>trans[:3 0]=normx<line_sep>trans[:3 1]=normy<line_sep>trans[:3 2]=normz<line_sep>trans[3 3]=1<line_sep>surf.transform(trans)<if_stmt><not>np.allclose(center [0. 0. 0.])<block_start>surf.points<augadd>np.array(center)<block_end><block_end><def_stmt>Cylinder center=(0.0 0.0 0.0) direction=(1.0 0.0 0.0) radius=0.5 height=1.0 resolution=100 capping=<true><block_start>"""Create the surface of a cylinder. See also :func:`pyvista.CylinderStructured`. Parameters ---------- center : sequence, optional Location of the centroid in ``[x, y, z]``. direction : sequence, optional Direction cylinder points to in ``[x, y, z]``. radius : float, optional Radius of the cylinder. height : float, optional Height of the cylinder. resolution : int, optional Number of points on the circular face of the cylinder. capping : bool, optional Cap cylinder ends with polygons. Default ``True``. Returns ------- pyvista.PolyData Cylinder surface. Examples -------- >>> import pyvista >>> import numpy as np >>> cylinder = pyvista.Cylinder(center=[1, 2, 3], direction=[1, 1, 1], ... radius=1, height=2) >>> cylinder.plot(show_edges=True, line_width=5, cpos='xy') """<line_sep>cylinderSource=_vtk.vtkCylinderSource()<line_sep>cylinderSource.SetRadius(radius)<line_sep>cylinderSource.SetHeight(height)<line_sep>cylinderSource.SetCapping(capping)<line_sep>cylinderSource.SetResolution(resolution)<line_sep>cylinderSource.Update()<line_sep>surf=pyvista.wrap(cylinderSource.GetOutput())<line_sep>surf.rotate_z(-90 inplace=<true>)<line_sep>translate(surf center direction)<line_sep><return>surf<block_end><def_stmt>CylinderStructured radius=0.5 height=1.0 center=(0. 0. 0.) direction=(1. 0. 0.) theta_resolution=32 z_resolution=10<block_start>"""Create a cylinder mesh as a :class:`pyvista.StructuredGrid`. The end caps are left open. This can create a surface mesh if a single value for the ``radius`` is given or a 3D mesh if multiple radii are given as a list/array in the ``radius`` argument. Parameters ---------- radius : float, sequence, optional Radius of the cylinder. If a sequence, then describes the radial coordinates of the cells as a range of values as specified by the ``radius``. height : float, optional Height of the cylinder along its Z-axis. center : sequence Location of the centroid in ``[x, y, z]``. direction : sequence Direction cylinder Z-axis in ``[x, y, z]``. theta_resolution : int, optional Number of points on the circular face of the cylinder. Ignored if ``radius`` is an iterable. z_resolution : int, optional Number of points along the height (Z-axis) of the cylinder. Returns ------- pyvista.StructuredGrid Structured cylinder. Examples -------- Default structured cylinder >>> import pyvista >>> mesh = pyvista.CylinderStructured() >>> mesh.plot(show_edges=True) Structured cylinder with an inner radius of 1, outer of 2, with 5 segments. >>> import numpy as np >>> mesh = pyvista.CylinderStructured(radius=np.linspace(1, 2, 5)) >>> mesh.plot(show_edges=True) """<line_sep># Define grid in polar coordinates r=np.array([radius]).ravel()<line_sep>nr=len(r)<line_sep>theta=np.linspace(0 2<times>np.pi num=theta_resolution)<line_sep>radius_matrix,theta_matrix=np.meshgrid(r theta)<line_sep># Transform to cartesian space X=radius_matrix<times>np.cos(theta_matrix)<line_sep>Y=radius_matrix<times>np.sin(theta_matrix)<line_sep># Make all the nodes in the grid xx=np.array([X]<times>z_resolution).ravel()<line_sep>yy=np.array([Y]<times>z_resolution).ravel()<line_sep>dz=height/(z_resolution-1)<line_sep>zz=np.empty(yy.size)<line_sep>zz=np.full((X.size z_resolution) dz)<line_sep>zz<augmul>np.arange(z_resolution)<line_sep>zz=zz.ravel(order='f')<line_sep># Create the grid grid=pyvista.StructuredGrid()<line_sep>grid.points=np.c_[xx yy zz]<line_sep>grid.dimensions=[nr theta_resolution z_resolution]<line_sep># Orient properly in user direction vx=np.array([0. 0. 1.])<if_stmt><not>np.allclose(vx direction)<block_start>direction<augdiv>np.linalg.norm(direction)<line_sep>vx<augsub>vx.dot(direction)<times>direction<line_sep>vx<augdiv>np.linalg.norm(vx)<line_sep>vy=np.cross(direction vx)<line_sep>rmtx=np.array([vx vy direction])<line_sep>grid.points=grid.points.dot(rmtx)<block_end># Translate to given center grid.points<augsub>np.array(grid.center)<line_sep>grid.points<augadd>np.array(center)<line_sep><return>grid<block_end><def_stmt>Arrow start=(0. 0. 0.) direction=(1. 0. 0.) tip_length=0.25 tip_radius=0.1 tip_resolution=20 shaft_radius=0.05 shaft_resolution=20 scale=<none><block_start>"""Create an arrow. Parameters ---------- start : iterable, optional Start location in ``[x, y, z]``. direction : iterable, optional Direction the arrow points to in ``[x, y, z]``. tip_length : float, optional Length of the tip. tip_radius : float, optional Radius of the tip. tip_resolution : int, optional Number of faces around the tip. shaft_radius : float, optional Radius of the shaft. shaft_resolution : int, optional Number of faces around the shaft. scale : float or str, optional Scale factor of the entire object, default is ``None`` (i.e. scale of 1). ``'auto'`` scales to length of direction array. Returns ------- pyvista.PolyData Arrow mesh. Examples -------- Plot a default arrow. >>> import pyvista >>> mesh = pyvista.Arrow() >>> mesh.plot(show_edges=True) """<line_sep># Create arrow object arrow=_vtk.vtkArrowSource()<line_sep>arrow.SetTipLength(tip_length)<line_sep>arrow.SetTipRadius(tip_radius)<line_sep>arrow.SetTipResolution(tip_resolution)<line_sep>arrow.SetShaftRadius(shaft_radius)<line_sep>arrow.SetShaftResolution(shaft_resolution)<line_sep>arrow.Update()<line_sep>surf=pyvista.wrap(arrow.GetOutput())<if_stmt>scale<eq>'auto'<block_start>scale=float(np.linalg.norm(direction))<block_end><if_stmt>isinstance(scale float)<or>isinstance(scale int)<block_start>surf.points<augmul>scale<block_end><elif_stmt>scale<is><not><none><block_start><raise>TypeError("Scale must be either float, int or 'auto'.")<block_end>translate(surf start direction)<line_sep><return>surf<block_end><def_stmt>Sphere radius=0.5 center=(0 0 0) direction=(0 0 1) theta_resolution=30 phi_resolution=30 start_theta=0 end_theta=360 start_phi=0 end_phi=180<block_start>"""Create a vtk Sphere. Parameters ---------- radius : float, optional Sphere radius. center : np.ndarray or list, optional Center in ``[x, y, z]``. direction : list or tuple or np.ndarray, optional Direction the top of the sphere points to in ``[x, y, z]``. theta_resolution : int , optional Set the number of points in the longitude direction (ranging from ``start_theta`` to ``end_theta``). phi_resolution : int, optional Set the number of points in the latitude direction (ranging from ``start_phi`` to ``end_phi``). start_theta : float, optional Starting longitude angle. end_theta : float, optional Ending longitude angle. start_phi : float, optional Starting latitude angle. end_phi : float, optional Ending latitude angle. Returns ------- pyvista.PolyData Sphere mesh. Examples -------- Create a sphere using default parameters. >>> import pyvista >>> sphere = pyvista.Sphere() >>> sphere.plot(show_edges=True) Create a quarter sphere by setting ``end_theta``. >>> sphere = pyvista.Sphere(end_theta=90) >>> out = sphere.plot(show_edges=True) """<line_sep>sphere=_vtk.vtkSphereSource()<line_sep>sphere.SetRadius(radius)<line_sep>sphere.SetThetaResolution(theta_resolution)<line_sep>sphere.SetPhiResolution(phi_resolution)<line_sep>sphere.SetStartTheta(start_theta)<line_sep>sphere.SetEndTheta(end_theta)<line_sep>sphere.SetStartPhi(start_phi)<line_sep>sphere.SetEndPhi(end_phi)<line_sep>sphere.Update()<line_sep>surf=pyvista.wrap(sphere.GetOutput())<line_sep>surf.rotate_y(-90 inplace=<true>)<line_sep>translate(surf center direction)<line_sep><return>surf<block_end><def_stmt>Plane center=(0 0 0) direction=(0 0 1) i_size=1 j_size=1 i_resolution=10 j_resolution=10<block_start>"""Create a plane. Parameters ---------- center : list or tuple or np.ndarray Location of the centroid in ``[x, y, z]``. direction : list or tuple or np.ndarray Direction of the plane's normal in ``[x, y, z]``. i_size : float Size of the plane in the i direction. j_size : float Size of the plane in the j direction. i_resolution : int Number of points on the plane in the i direction. j_resolution : int Number of points on the plane in the j direction. Returns ------- pyvista.PolyData Plane mesh. Examples -------- Create a default plane. >>> import pyvista >>> mesh = pyvista.Plane() >>> mesh.point_data.clear() >>> mesh.plot(show_edges=True) """<line_sep>planeSource=_vtk.vtkPlaneSource()<line_sep>planeSource.SetXResolution(i_resolution)<line_sep>planeSource.SetYResolution(j_resolution)<line_sep>planeSource.Update()<line_sep>surf=pyvista.wrap(planeSource.GetOutput())<line_sep>surf.points[: 0]<augmul>i_size<line_sep>surf.points[: 1]<augmul>j_size<line_sep>surf.rotate_y(-90 inplace=<true>)<line_sep>translate(surf center direction)<line_sep><return>surf<block_end><def_stmt>Line pointa=(-0.5 0. 0.) pointb=(0.5 0. 0.) resolution=1<block_start>"""Create a line. Parameters ---------- pointa : np.ndarray or list, optional Location in ``[x, y, z]``. pointb : np.ndarray or list, optional Location in ``[x, y, z]``. resolution : int, optional Number of pieces to divide line into. Returns ------- pyvista.PolyData Line mesh. Examples -------- Create a line between ``(0, 0, 0)`` and ``(0, 0, 1)``. >>> import pyvista >>> mesh = pyvista.Line((0, 0, 0), (0, 0, 1)) >>> mesh.plot(color='k', line_width=10) """<if_stmt>resolution<le>0<block_start><raise>ValueError('Resolution must be positive')<block_end><if_stmt>np.array(pointa).size<ne>3<block_start><raise>TypeError('Point A must be a length three tuple of floats.')<block_end><if_stmt>np.array(pointb).size<ne>3<block_start><raise>TypeError('Point B must be a length three tuple of floats.')<block_end>src=_vtk.vtkLineSource()<line_sep>src.SetPoint1(*pointa)<line_sep>src.SetPoint2(*pointb)<line_sep>src.SetResolution(resolution)<line_sep>src.Update()<line_sep>line=pyvista.wrap(src.GetOutput())<line_sep># Compute distance of every point along line compute=<lambda>p0 p1:np.sqrt(np.sum((p1-p0)<power>2 axis=1))<line_sep>distance=compute(np.array(pointa) line.points)<line_sep>line['Distance']=distance<line_sep><return>line<block_end><def_stmt>Tube pointa=(-0.5 0. 0.) pointb=(0.5 0. 0.) resolution=1 radius=1.0 n_sides=15<block_start>"""Create a tube. Parameters ---------- pointa : np.ndarray or list, optional Location in ``[x, y, z]``. pointb : np.ndarray or list, optional Location in ``[x, y, z]``. resolution : int, optional Number of pieces to divide tube into. radius : float, optional Minimum tube radius (minimum because the tube radius may vary). n_sides : int, optional Number of sides for the tube. Returns ------- pyvista.PolyData Tube mesh. Examples -------- Create a tube between ``(0, 0, 0)`` and ``(0, 0, 1)``. >>> import pyvista >>> mesh = pyvista.Tube((0, 0, 0), (0, 0, 1)) >>> mesh.plot() """<if_stmt>resolution<le>0<block_start><raise>ValueError('Resolution must be positive.')<block_end><if_stmt>np.array(pointa).size<ne>3<block_start><raise>TypeError('Point A must be a length three tuple of floats.')<block_end><if_stmt>np.array(pointb).size<ne>3<block_start><raise>TypeError('Point B must be a length three tuple of floats.')<block_end>line_src=_vtk.vtkLineSource()<line_sep>line_src.SetPoint1(*pointa)<line_sep>line_src.SetPoint2(*pointb)<line_sep>line_src.SetResolution(resolution)<line_sep>line_src.Update()<if_stmt>n_sides<l>3<block_start><raise>ValueError('Number of sides `n_sides` must be >= 3')<block_end>tube_filter=_vtk.vtkTubeFilter()<line_sep>tube_filter.SetInputConnection(line_src.GetOutputPort())<line_sep>tube_filter.SetRadius(radius)<line_sep>tube_filter.SetNumberOfSides(n_sides)<line_sep>tube_filter.Update()<line_sep><return>pyvista.wrap(tube_filter.GetOutput())<block_end><def_stmt>Cube center=(0.0 0.0 0.0) x_length=1.0 y_length=1.0 z_length=1.0 bounds=<none> clean=<true><block_start>"""Create a cube. It's possible to specify either the center and side lengths or just the bounds of the cube. If ``bounds`` are given, all other arguments are ignored. .. versionchanged:: 0.33.0 The cube is created using ``vtk.vtkCubeSource``. For compatibility with :func:`pyvista.PlatonicSolid`, face indices are also added as cell data. For full compatibility with :func:`PlatonicSolid() <pyvista.PlatonicSolid>`, one has to use ``x_length = y_length = z_length = 2 * radius / 3**0.5``. The cube points are also cleaned by default now, leaving only the 8 corners and a watertight (manifold) mesh. Parameters ---------- center : sequence, optional Center in ``[x, y, z]``. x_length : float, optional Length of the cube in the x-direction. y_length : float, optional Length of the cube in the y-direction. z_length : float, optional Length of the cube in the z-direction. bounds : sequence, optional Specify the bounding box of the cube. If given, all other size arguments are ignored. ``(xMin, xMax, yMin, yMax, zMin, zMax)``. clean : bool, optional Whether to clean the raw points of the mesh, making the cube manifold. Note that this will degrade the texture coordinates that come with the mesh, so if you plan to map a texture on the cube, consider setting this to ``False``. .. versionadded:: 0.33.0 Returns ------- pyvista.PolyData Mesh of the cube. Examples -------- Create a default cube. >>> import pyvista >>> mesh = pyvista.Cube() >>> mesh.plot(show_edges=True, line_width=5) """<line_sep>src=_vtk.vtkCubeSource()<if_stmt>bounds<is><not><none><block_start><if_stmt>np.array(bounds).size<ne>6<block_start><raise>TypeError('Bounds must be given as length 6 tuple: (xMin, xMax, yMin, yMax, zMin, zMax)')<block_end>src.SetBounds(bounds)<block_end><else_stmt><block_start>src.SetCenter(center)<line_sep>src.SetXLength(x_length)<line_sep>src.SetYLength(y_length)<line_sep>src.SetZLength(z_length)<block_end>src.Update()<line_sep>cube=pyvista.wrap(src.GetOutput())<line_sep># add face index data for compatibility with PlatonicSolid # but make it inactive for backwards compatibility cube.cell_data.set_array([1 4 0 3 5 2] ['FaceIndex'])<line_sep># clean duplicate points <if_stmt>clean<block_start>cube.clean(inplace=<true>)<block_end><return>cube<block_end><def_stmt>Box bounds=(-1. 1. -1. 1. -1. 1.) level=0 quads=<true><block_start>"""Create a box with solid faces for the given bounds. Parameters ---------- bounds : iterable, optional Specify the bounding box of the cube. ``(xMin, xMax, yMin, yMax, zMin, zMax)``. level : int, optional Level of subdivision of the faces. quads : bool, optional Flag to tell the source to generate either a quad or two triangle for a set of four points. Default ``True``. Returns ------- pyvista.PolyData Mesh of the box. Examples -------- Create a box with subdivision ``level=2``. >>> import pyvista >>> mesh = pyvista.Box(level=2) >>> mesh.plot(show_edges=True) """<if_stmt>np.array(bounds).size<ne>6<block_start><raise>TypeError('Bounds must be given as length 6 tuple: (xMin, xMax, yMin, yMax, zMin, zMax)')<block_end>src=_vtk.vtkTessellatedBoxSource()<line_sep>src.SetLevel(level)<if_stmt>quads<block_start>src.QuadsOn()<block_end><else_stmt><block_start>src.QuadsOff()<block_end>src.SetBounds(bounds)<line_sep>src.Update()<line_sep><return>pyvista.wrap(src.GetOutput())<block_end><def_stmt>Cone center=(0. 0. 0.) direction=(1. 0. 0.) height=1.0 radius=<none> capping=<true> angle=<none> resolution=6<block_start>"""Create a cone. Parameters ---------- center : iterable, optional Center in ``[x, y, z]``. Axis of the cone passes through this point. direction : iterable, optional Direction vector in ``[x, y, z]``. Orientation vector of the cone. height : float, optional Height along the cone in its specified direction. radius : float, optional Base radius of the cone. capping : bool, optional Enable or disable the capping the base of the cone with a polygon. angle : float, optional The angle in degrees between the axis of the cone and a generatrix. resolution : int, optional Number of facets used to represent the cone. Returns ------- pyvista.PolyData Cone mesh. Examples -------- Create a default Cone. >>> import pyvista >>> mesh = pyvista.Cone() >>> mesh.plot(show_edges=True, line_width=5) """<line_sep>src=_vtk.vtkConeSource()<line_sep>src.SetCapping(capping)<line_sep>src.SetDirection(direction)<line_sep>src.SetCenter(center)<line_sep>src.SetHeight(height)<if_stmt>angle<and>radius<block_start><raise>ValueError("Both radius and angle specified. They are mutually exclusive.")<block_end><elif_stmt>angle<and><not>radius<block_start>src.SetAngle(angle)<block_end><elif_stmt><not>angle<and>radius<block_start>src.SetRadius(radius)<block_end><elif_stmt><not>angle<and><not>radius<block_start>src.SetRadius(0.5)<block_end>src.SetResolution(resolution)<line_sep>src.Update()<line_sep><return>pyvista.wrap(src.GetOutput())<block_end><def_stmt>Polygon center=(0. 0. 0.) radius=1 normal=(0 0 1) n_sides=6<block_start>"""Create a polygon. Parameters ---------- center : iterable, optional Center in ``[x, y, z]``. Central axis of the polygon passes through this point. radius : float, optional The radius of the polygon. normal : iterable, optional Direction vector in ``[x, y, z]``. Orientation vector of the polygon. n_sides : int, optional Number of sides of the polygon. Returns ------- pyvista.PolyData Mesh of the polygon. Examples -------- Create an 8 sided polygon. >>> import pyvista >>> mesh = pyvista.Polygon(n_sides=8) >>> mesh.plot(show_edges=True, line_width=5) """<line_sep>src=_vtk.vtkRegularPolygonSource()<line_sep>src.SetCenter(center)<line_sep>src.SetNumberOfSides(n_sides)<line_sep>src.SetRadius(radius)<line_sep>src.SetNormal(normal)<line_sep>src.Update()<line_sep><return>pyvista.wrap(src.GetOutput())<block_end><def_stmt>Disc center=(0. 0. 0.) inner=0.25 outer=0.5 normal=(0 0 1) r_res=1 c_res=6<block_start>"""Create a polygonal disk with a hole in the center. The disk has zero height. The user can specify the inner and outer radius of the disk, and the radial and circumferential resolution of the polygonal representation. Parameters ---------- center : iterable Center in ``[x, y, z]``. Middle of the axis of the disc. inner : float, optional The inner radius. outer : float, optional The outer radius. normal : iterable Direction vector in ``[x, y, z]``. Orientation vector of the disc. r_res : int, optional Number of points in radial direction. c_res : int, optional Number of points in circumferential direction. Returns ------- pyvista.PolyData Disk mesh. Examples -------- Create a disc with 50 points in the circumferential direction. >>> import pyvista >>> mesh = pyvista.Disc(c_res=50) >>> mesh.plot(show_edges=True, line_width=5) """<line_sep>src=_vtk.vtkDiskSource()<line_sep>src.SetInnerRadius(inner)<line_sep>src.SetOuterRadius(outer)<line_sep>src.SetRadialResolution(r_res)<line_sep>src.SetCircumferentialResolution(c_res)<line_sep>src.Update()<line_sep>normal=np.array(normal)<line_sep>center=np.array(center)<line_sep>surf=pyvista.wrap(src.GetOutput())<line_sep>surf.rotate_y(90 inplace=<true>)<line_sep>translate(surf center normal)<line_sep><return>surf<block_end><def_stmt>Text3D string depth=0.5<block_start>"""Create 3D text from a string. Parameters ---------- string : str String to generate 3D text from. depth : float, optional Depth of the text. Defaults to ``0.5``. Returns ------- pyvista.PolyData 3D text mesh. Examples -------- >>> import pyvista >>> text_mesh = pyvista.Text3D('PyVista') >>> text_mesh.plot(cpos='xy') """<line_sep>vec_text=_vtk.vtkVectorText()<line_sep>vec_text.SetText(string)<line_sep>extrude=_vtk.vtkLinearExtrusionFilter()<line_sep>extrude.SetInputConnection(vec_text.GetOutputPort())<line_sep>extrude.SetExtrusionTypeToNormalExtrusion()<line_sep>extrude.SetVector(0 0 1)<line_sep>extrude.SetScaleFactor(depth)<line_sep>tri_filter=_vtk.vtkTriangleFilter()<line_sep>tri_filter.SetInputConnection(extrude.GetOutputPort())<line_sep>tri_filter.Update()<line_sep><return>pyvista.wrap(tri_filter.GetOutput())<block_end><def_stmt>Wavelet extent=(-10 10 -10 10 -10 10) center=(0 0 0) maximum=255 x_freq=60 y_freq=30 z_freq=40 x_mag=10 y_mag=18 z_mag=5 std=0.5 subsample_rate=1<block_start>"""Create a wavelet. Produces images with pixel values determined by ``Maximum*Gaussian*x_mag*sin(x_freq*x)*sin(y_freq*y)*cos(z_freq*z)`` Values are float scalars on point data with name ``"RTData"``. Parameters ---------- extent : sequence, optional Set/Get the extent of the whole output image. Default ``(-10, 10, -10, 10, -10, 10)``. center : list, optional Center of the wavelet. maximum : float, optional Maximum of the wavelet function. x_freq : float, optional Natural frequency in the x direction. y_freq : float, optional Natural frequency in the y direction. z_freq : float, optional Natural frequency in the z direction. x_mag : float, optional Magnitude in the x direction. y_mag : float, optional Magnitude in the y direction. z_mag : float, optional Magnitude in the z direction. std : float, optional Standard deviation. subsample_rate : int, optional The sub-sample rate. Returns ------- pyvista.PolyData Wavelet mesh. Examples -------- >>> import pyvista >>> wavelet = pyvista.Wavelet(extent=(0, 50, 0, 50, 0, 10), x_freq=20, ... y_freq=10, z_freq=1, x_mag=100, y_mag=100, ... z_mag=1000) >>> wavelet.plot(show_scalar_bar=False) Extract lower valued cells of the wavelet and create a surface from it. >>> thresh = wavelet.threshold(800).extract_surface() >>> thresh.plot(show_scalar_bar=False) Smooth it to create "waves" >>> waves = thresh.smooth(n_iter=100, relaxation_factor=0.1) >>> waves.plot(color='white', smooth_shading=True, show_edges=True) """<line_sep>wavelet_source=_vtk.vtkRTAnalyticSource()<line_sep>wavelet_source.SetWholeExtent(*extent)<line_sep>wavelet_source.SetCenter(center)<line_sep>wavelet_source.SetMaximum(maximum)<line_sep>wavelet_source.SetXFreq(x_freq)<line_sep>wavelet_source.SetYFreq(y_freq)<line_sep>wavelet_source.SetZFreq(z_freq)<line_sep>wavelet_source.SetXMag(x_mag)<line_sep>wavelet_source.SetYMag(y_mag)<line_sep>wavelet_source.SetZMag(z_mag)<line_sep>wavelet_source.SetStandardDeviation(std)<line_sep>wavelet_source.SetSubsampleRate(subsample_rate)<line_sep>wavelet_source.Update()<line_sep><return>pyvista.wrap(wavelet_source.GetOutput())<block_end><def_stmt>CircularArc pointa pointb center resolution=100 negative=<false><block_start>"""Create a circular arc defined by two endpoints and a center. The number of segments composing the polyline is controlled by setting the object resolution. Parameters ---------- pointa : sequence Position of the first end point. pointb : sequence Position of the other end point. center : sequence Center of the circle that defines the arc. resolution : int, optional The number of segments of the polyline that draws the arc. Resolution of 1 will just create a line. negative : bool, optional By default the arc spans the shortest angular sector between ``pointa`` and ``pointb``. By setting this to ``True``, the longest angular sector is used instead (i.e. the negative coterminal angle to the shortest one). Returns ------- pyvista.PolyData Circular arc mesh. Examples -------- Create a quarter arc centered at the origin in the xy plane. >>> import pyvista >>> arc = pyvista.CircularArc([-1, 0, 0], [0, 1, 0], [0, 0, 0]) >>> pl = pyvista.Plotter() >>> _ = pl.add_mesh(arc, color='k', line_width=10) >>> _ = pl.show_bounds(location='all', font_size=30, use_2d=True) >>> _ = pl.view_xy() >>> pl.show() """<line_sep>check_valid_vector(pointa 'pointa')<line_sep>check_valid_vector(pointb 'pointb')<line_sep>check_valid_vector(center 'center')<if_stmt><not>np.isclose(np.linalg.norm(np.array(pointa)-np.array(center)) np.linalg.norm(np.array(pointb)-np.array(center)) )<block_start><raise>ValueError("pointa and pointb are not equidistant from center")<block_end># fix half-arc bug: if a half arc travels directly through the # center point, it becomes a line pointb=list(pointb)<line_sep>pointb[0]<augsub>1E-10<line_sep>pointb[1]<augsub>1E-10<line_sep>arc=_vtk.vtkArcSource()<line_sep>arc.SetPoint1(*pointa)<line_sep>arc.SetPoint2(*pointb)<line_sep>arc.SetCenter(*center)<line_sep>arc.SetResolution(resolution)<line_sep>arc.SetNegative(negative)<line_sep>arc.Update()<line_sep>angle=np.deg2rad(arc.GetAngle())<line_sep>arc=pyvista.wrap(arc.GetOutput())<line_sep># Compute distance of every point along circular arc center=np.array(center).ravel()<line_sep>radius=np.sqrt(np.sum((arc.points[0]-center)<power>2 axis=0))<line_sep>angles=np.arange(0.0 1.0+1.0/resolution 1.0/resolution)<times>angle<line_sep>arc['Distance']=radius<times>angles<line_sep><return>arc<block_end><def_stmt>CircularArcFromNormal center resolution=100 normal=<none> polar=<none> angle=<none><block_start>"""Create a circular arc defined by normal to the plane of the arc, and an angle. The number of segments composing the polyline is controlled by setting the object resolution. Parameters ---------- center : sequence Center of the circle that defines the arc. resolution : int, optional The number of segments of the polyline that draws the arc. Resolution of 1 will just create a line. normal : sequence, optional The normal vector to the plane of the arc. By default it points in the positive Z direction. polar : sequence, optional Starting point of the arc in polar coordinates. By default it is the unit vector in the positive x direction. angle : float, optional Arc length (in degrees) beginning at the polar vector. The direction is counterclockwise. By default it is 90. Returns ------- pyvista.PolyData Circular arc mesh. Examples -------- Quarter arc centered at the origin in the xy plane. >>> import pyvista >>> normal = [0, 0, 1] >>> polar = [-1, 0, 0] >>> arc = pyvista.CircularArcFromNormal([0, 0, 0], normal=normal, polar=polar) >>> pl = pyvista.Plotter() >>> _ = pl.add_mesh(arc, color='k', line_width=10) >>> _ = pl.show_bounds(location='all', font_size=30, use_2d=True) >>> _ = pl.view_xy() >>> pl.show() """<line_sep>check_valid_vector(center 'center')<if_stmt>normal<is><none><block_start>normal=[0 0 1]<block_end><if_stmt>polar<is><none><block_start>polar=[1 0 0]<block_end><if_stmt>angle<is><none><block_start>angle=90.0<block_end>arc=_vtk.vtkArcSource()<line_sep>arc.SetCenter(*center)<line_sep>arc.SetResolution(resolution)<line_sep>arc.UseNormalAndAngleOn()<line_sep>check_valid_vector(normal 'normal')<line_sep>arc.SetNormal(*normal)<line_sep>check_valid_vector(polar 'polar')<line_sep>arc.SetPolarVector(*polar)<line_sep>arc.SetAngle(angle)<line_sep>arc.Update()<line_sep>angle=np.deg2rad(arc.GetAngle())<line_sep>arc=pyvista.wrap(arc.GetOutput())<line_sep># Compute distance of every point along circular arc center=np.array(center)<line_sep>radius=np.sqrt(np.sum((arc.points[0]-center)<power>2 axis=0))<line_sep>angles=np.linspace(0.0 angle resolution+1)<line_sep>arc['Distance']=radius<times>angles<line_sep><return>arc<block_end><def_stmt>Pyramid points=<none><block_start>"""Create a pyramid defined by 5 points. Parameters ---------- points : sequence, optional Points of the pyramid. Points are ordered such that the first four points are the four counterclockwise points on the quadrilateral face, and the last point is the apex. Defaults to pyramid in example. Returns ------- pyvista.UnstructuredGrid Unstructured grid containing a single pyramid cell. Examples -------- >>> import pyvista >>> pointa = [1.0, 1.0, 0.0] >>> pointb = [-1.0, 1.0, 0.0] >>> pointc = [-1.0, -1.0, 0.0] >>> pointd = [1.0, -1.0, 0.0] >>> pointe = [0.0, 0.0, 1.608] >>> pyramid = pyvista.Pyramid([pointa, pointb, pointc, pointd, pointe]) >>> pyramid.plot(show_edges=True, line_width=5) """<if_stmt>points<is><none><block_start>points=[[1.0 1.0 0.0] [-1.0 1.0 0.0] [-1.0 -1.0 0.0] [1.0 -1.0 0.0] [0.0 0.0 (4-2<power>0.5)<power>0.5]]<block_end><if_stmt>len(points)<ne>5<block_start><raise>TypeError('Points must be given as length 5 np.ndarray or list')<block_end>check_valid_vector(points[0] 'points[0]')<line_sep>check_valid_vector(points[1] 'points[1]')<line_sep>check_valid_vector(points[2] 'points[2]')<line_sep>check_valid_vector(points[3] 'points[3]')<line_sep>check_valid_vector(points[4] 'points[4]')<line_sep>pyramid=_vtk.vtkPyramid()<line_sep>pyramid.GetPointIds().SetId(0 0)<line_sep>pyramid.GetPointIds().SetId(1 1)<line_sep>pyramid.GetPointIds().SetId(2 2)<line_sep>pyramid.GetPointIds().SetId(3 3)<line_sep>pyramid.GetPointIds().SetId(4 4)<line_sep>ug=_vtk.vtkUnstructuredGrid()<line_sep>ug.SetPoints(pyvista.vtk_points(np.array(points) <false>))<line_sep>ug.InsertNextCell(pyramid.GetCellType() pyramid.GetPointIds())<line_sep><return>pyvista.wrap(ug)<block_end><def_stmt>Triangle points=<none><block_start>"""Create a triangle defined by 3 points. Parameters ---------- points : sequence, optional Points of the triangle. Defaults to a right isosceles triangle (see example). Returns ------- pyvista.PolyData Triangle mesh. Examples -------- >>> import pyvista >>> pointa = [0, 0, 0] >>> pointb = [1, 0, 0] >>> pointc = [0.5, 0.707, 0] >>> triangle = pyvista.Triangle([pointa, pointb, pointc]) >>> triangle.plot(show_edges=True, line_width=5) """<if_stmt>points<is><none><block_start>points=[[0 0 0] [1 0 0] [0.5 0.5<power>0.5 0]]<block_end><if_stmt>len(points)<ne>3<block_start><raise>TypeError('Points must be given as length 3 np.ndarray or list')<block_end>check_valid_vector(points[0] 'points[0]')<line_sep>check_valid_vector(points[1] 'points[1]')<line_sep>check_valid_vector(points[2] 'points[2]')<line_sep>cells=np.array([[3 0 1 2]])<line_sep><return>pyvista.wrap(pyvista.PolyData(points cells))<block_end><def_stmt>Rectangle points=<none><block_start>"""Create a rectangle defined by 4 points. Parameters ---------- points : sequence, optional Points of the rectangle. Defaults to a simple example. Returns ------- pyvista.PolyData Rectangle mesh. Examples -------- >>> import pyvista >>> pointa = [1.0, 0.0, 0.0] >>> pointb = [1.0, 1.0, 0.0] >>> pointc = [0.0, 1.0, 0.0] >>> pointd = [0.0, 0.0, 0.0] >>> rectangle = pyvista.Rectangle([pointa, pointb, pointc, pointd]) >>> rectangle.plot(show_edges=True, line_width=5) """<if_stmt>points<is><none><block_start>points=[[1.0 0.0 0.0] [1.0 1.0 0.0] [0.0 1.0 0.0] [0.0 0.0 0.0]]<block_end><if_stmt>len(points)<ne>4<block_start><raise>TypeError('Points must be given as length 4 np.ndarray or list')<block_end>check_valid_vector(points[0] 'points[0]')<line_sep>check_valid_vector(points[1] 'points[1]')<line_sep>check_valid_vector(points[2] 'points[2]')<line_sep>check_valid_vector(points[3] 'points[3]')<line_sep>cells=np.array([[4 0 1 2 3]])<line_sep><return>pyvista.wrap(pyvista.PolyData(points cells))<block_end><def_stmt>Circle radius=0.5 resolution=100<block_start>"""Create a single PolyData circle defined by radius in the XY plane. Parameters ---------- radius : float, optional Radius of circle. resolution : int, optional Number of points on the circle. Returns ------- pyvista.PolyData Circle mesh. Examples -------- >>> import pyvista >>> radius = 0.5 >>> circle = pyvista.Circle(radius) >>> circle.plot(show_edges=True, line_width=5) """<line_sep>points=np.zeros((resolution 3))<line_sep>theta=np.linspace(0.0 2.0<times>np.pi resolution)<line_sep>points[: 0]=radius<times>np.cos(theta)<line_sep>points[: 1]=radius<times>np.sin(theta)<line_sep>cells=np.array([np.append(np.array([resolution]) np.arange(resolution))])<line_sep><return>pyvista.wrap(pyvista.PolyData(points cells))<block_end><def_stmt>Superquadric center=(0. 0. 0.) scale=(1. 1. 1.) size=0.5 theta_roundness=1. phi_roundness=1. theta_resolution=16 phi_resolution=16 toroidal=<false> thickness=1/3<block_start>"""Create a superquadric. Parameters ---------- center : iterable, optional Center of the superquadric in ``[x, y, z]``. scale : iterable, optional Scale factors of the superquadric in ``[x, y, z]``. size : float, optional Superquadric isotropic size. theta_roundness : float, optional Superquadric east/west roundness. Values range from 0 (rectangular) to 1 (circular) to higher orders. phi_roundness : float, optional Superquadric north/south roundness. Values range from 0 (rectangular) to 1 (circular) to higher orders. theta_resolution : int, optional Number of points in the longitude direction. Values are rounded to nearest multiple of 4. phi_resolution : int, optional Number of points in the latitude direction. Values are rounded to nearest multiple of 8. toroidal : bool, optional Whether or not the superquadric is toroidal (``True``) or ellipsoidal (``False``). thickness : float, optional Superquadric ring thickness. Only applies if toroidal is set to ``True``. Returns ------- pyvista.PolyData Superquadric mesh. See Also -------- pyvista.ParametricSuperEllipsoid : Parametric superquadric if toroidal is ``False``. pyvista.ParametricSuperToroid : Parametric superquadric if toroidal is ``True``. Examples -------- >>> import pyvista >>> superquadric = pyvista.Superquadric(scale=(3., 1., 0.5), ... phi_roundness=0.1, ... theta_roundness=0.5) >>> superquadric.plot(show_edges=True) """<line_sep>superquadricSource=_vtk.vtkSuperquadricSource()<line_sep>superquadricSource.SetCenter(center)<line_sep>superquadricSource.SetScale(scale)<line_sep>superquadricSource.SetSize(size)<line_sep>superquadricSource.SetThetaRoundness(theta_roundness)<line_sep>superquadricSource.SetPhiRoundness(phi_roundness)<line_sep>superquadricSource.SetThetaResolution(round(theta_resolution/4)<times>4)<line_sep>superquadricSource.SetPhiResolution(round(phi_resolution/8)<times>8)<line_sep>superquadricSource.SetToroidal(toroidal)<line_sep>superquadricSource.SetThickness(thickness)<line_sep>superquadricSource.Update()<line_sep><return>pyvista.wrap(superquadricSource.GetOutput())<block_end><def_stmt>PlatonicSolid kind='tetrahedron' radius=1.0 center=(0.0 0.0 0.0)<block_start>"""Create a Platonic solid of a given size. Parameters ---------- kind : str or int, optional The kind of Platonic solid to create. Either the name of the polyhedron or an integer index: * ``'tetrahedron'`` or ``0`` * ``'cube'`` or ``1`` * ``'octahedron'`` or ``2`` * ``'icosahedron'`` or ``3`` * ``'dodecahedron'`` or ``4`` radius : float, optional The radius of the circumscribed sphere for the solid to create. center : sequence, optional Three-length sequence defining the center of the solid to create. Returns ------- pyvista.PolyData One of the five Platonic solids. Cell scalars are defined that assign integer labels to each face (with array name ``"FaceIndex"``). Examples -------- Create and plot a dodecahedron. >>> import pyvista >>> dodeca = pyvista.PlatonicSolid('dodecahedron') >>> dodeca.plot(categories=True) See :ref:`platonic_example` for more examples using this filter. """<line_sep>kinds={'tetrahedron':0 'cube':1 'octahedron':2 'icosahedron':3 'dodecahedron':4 }<if_stmt>isinstance(kind str)<block_start><if_stmt>kind<not><in>kinds<block_start><raise>ValueError(f'Invalid Platonic solid kind "{kind}".')<block_end>kind=kinds[kind]<block_end><elif_stmt>isinstance(kind int)<and>kind<not><in>range(5)<block_start><raise>ValueError(f'Invalid Platonic solid index "{kind}".')<block_end><elif_stmt><not>isinstance(kind int)<block_start><raise>ValueError('Invalid Platonic solid index type '<concat>f'"{type(kind).__name__}".')<block_end>check_valid_vector(center 'center')<line_sep>solid=_vtk.vtkPlatonicSolidSource()<line_sep>solid.SetSolidType(kind)<line_sep>solid.Update()<line_sep>solid=pyvista.wrap(solid.GetOutput())<line_sep>solid.scale(radius inplace=<true>)<line_sep>solid.points<augadd>np.asanyarray(center)-solid.center<line_sep># rename and activate cell scalars cell_data=solid.get_array(0)<line_sep>solid.clear_data()<line_sep>solid.cell_data['FaceIndex']=cell_data<line_sep><return>solid<block_end><def_stmt>Tetrahedron radius=1.0 center=(0.0 0.0 0.0)<block_start>"""Create a tetrahedron of a given size. A tetrahedron is composed of four congruent equilateral triangles. Parameters ---------- radius : float, optional The radius of the circumscribed sphere for the tetrahedron. center : sequence, optional Three-length sequence defining the center of the tetrahedron. Returns ------- pyvista.PolyData Mesh for the tetrahedron. Cell scalars are defined that assign integer labels to each face (with array name ``"FaceIndex"``). Examples -------- Create and plot a tetrahedron. >>> import pyvista >>> tetra = pyvista.Tetrahedron() >>> tetra.plot(categories=True) See :ref:`platonic_example` for more examples using this filter. """<line_sep><return>PlatonicSolid(kind='tetrahedron' radius=radius center=center)<block_end><def_stmt>Octahedron radius=1.0 center=(0.0 0.0 0.0)<block_start>"""Create an octahedron of a given size. An octahedron is composed of eight congruent equilateral triangles. Parameters ---------- radius : float, optional The radius of the circumscribed sphere for the octahedron. center : sequence, optional Three-length sequence defining the center of the octahedron. Returns ------- pyvista.PolyData Mesh for the octahedron. Cell scalars are defined that assign integer labels to each face (with array name ``"FaceIndex"``). Examples -------- Create and plot an octahedron. >>> import pyvista >>> tetra = pyvista.Octahedron() >>> tetra.plot(categories=True) See :ref:`platonic_example` for more examples using this filter. """<line_sep><return>PlatonicSolid(kind='octahedron' radius=radius center=center)<block_end><def_stmt>Dodecahedron radius=1.0 center=(0.0 0.0 0.0)<block_start>"""Create a dodecahedron of a given size. A dodecahedron is composed of twelve congruent regular pentagons. Parameters ---------- radius : float, optional The radius of the circumscribed sphere for the dodecahedron. center : sequence, optional Three-length sequence defining the center of the dodecahedron. Returns ------- pyvista.PolyData Mesh for the dodecahedron. Cell scalars are defined that assign integer labels to each face (with array name ``"FaceIndex"``). Examples -------- Create and plot a dodecahedron. >>> import pyvista >>> tetra = pyvista.Dodecahedron() >>> tetra.plot(categories=True) See :ref:`platonic_example` for more examples using this filter. """<line_sep><return>PlatonicSolid(kind='dodecahedron' radius=radius center=center)<block_end><def_stmt>Icosahedron radius=1.0 center=(0.0 0.0 0.0)<block_start>"""Create an icosahedron of a given size. An icosahedron is composed of twenty congruent equilateral triangles. Parameters ---------- radius : float, optional The radius of the circumscribed sphere for the icosahedron. center : sequence, optional Three-length sequence defining the center of the icosahedron. Returns ------- pyvista.PolyData Mesh for the icosahedron. Cell scalars are defined that assign integer labels to each face (with array name ``"FaceIndex"``). Examples -------- Create and plot an icosahedron. >>> import pyvista >>> tetra = pyvista.Icosahedron() >>> tetra.plot(categories=True) See :ref:`platonic_example` for more examples using this filter. """<line_sep><return>PlatonicSolid(kind='icosahedron' radius=radius center=center)<block_end>
"""Titiler API settings."""<import_stmt>pydantic<class_stmt>ApiSettings(pydantic.BaseSettings)<block_start>"""FASTAPI application settings."""<line_sep>name:str="titiler"<line_sep>cors_origins:str="*"<line_sep>cachecontrol:str="public, max-age=3600"<line_sep>root_path:str=""<line_sep>debug:bool=<false><line_sep>disable_cog:bool=<false><line_sep>disable_stac:bool=<false><line_sep>disable_mosaic:bool=<false><line_sep>lower_case_query_parameters:bool=<false><line_sep>@pydantic.validator("cors_origins")<def_stmt>parse_cors_origin cls v<block_start>"""Parse CORS origins."""<line_sep><return>[origin.strip()<for>origin v.split(",")]<block_end><class_stmt>Config<block_start>"""model config"""<line_sep>env_file=".env"<line_sep>env_prefix="TITILER_API_"<block_end><block_end>
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Finds desktop browsers that can be controlled by telemetry."""<import_stmt>logging<import_stmt>os<import_stmt>sys<import_from_stmt>telemetry.core browser<import_from_stmt>telemetry.core possible_browser<import_from_stmt>telemetry.core util<import_from_stmt>telemetry.core.backends.webdriver webdriver_ie_backend<import_from_stmt>telemetry.core.platform factory<import_from_stmt>telemetry.page cloud_storage<line_sep># Try to import the selenium python lib which may be not available. util.AddDirToPythonPath(util.GetChromiumSrcDir() 'third_party' 'webdriver' 'pylib')<try_stmt><block_start><import_from_stmt>selenium webdriver# pylint: disable=F0401 <block_end><except_stmt>ImportError<block_start>webdriver=<none><block_end>ALL_BROWSER_TYPES=[]<if_stmt>webdriver<block_start>ALL_BROWSER_TYPES=['internet-explorer' 'internet-explorer-x64']<block_end><else_stmt><block_start>logging.warning('Webdriver backend is unsupported without selenium pylib. '<concat>'For installation of selenium pylib, please refer to '<concat>'https://code.google.com/p/selenium/wiki/PythonBindings.')<block_end><class_stmt>PossibleWebDriverBrowser(possible_browser.PossibleBrowser)<block_start>"""A browser that can be controlled through webdriver API."""<def_stmt>__init__ self browser_type finder_options<block_start>target_os=sys.platform.lower()<line_sep>super(PossibleWebDriverBrowser self).__init__(browser_type target_os finder_options)<assert_stmt>browser_type<in>ALL_BROWSER_TYPES 'Please add %s to ALL_BROWSER_TYPES'%browser_type<block_end>@property<def_stmt>_platform_backend self<block_start><return>factory.GetPlatformBackendForCurrentOS()<block_end><def_stmt>CreateWebDriverBackend self platform_backend<block_start><raise>NotImplementedError()<block_end><def_stmt>Create self<block_start>backend=self.CreateWebDriverBackend(self._platform_backend)<line_sep><return>browser.Browser(backend self._platform_backend)<block_end><def_stmt>SupportsOptions self finder_options<block_start><if_stmt>len(finder_options.extensions_to_load)<ne>0<block_start><return><false><block_end><return><true><block_end><def_stmt>UpdateExecutableIfNeeded self<block_start><pass><block_end>@property<def_stmt>last_modification_time self<block_start><return>-1<block_end><block_end><class_stmt>PossibleDesktopIE(PossibleWebDriverBrowser)<block_start><def_stmt>__init__ self browser_type finder_options architecture<block_start>super(PossibleDesktopIE self).__init__(browser_type finder_options)<line_sep>self._architecture=architecture<block_end><def_stmt>CreateWebDriverBackend self platform_backend<block_start><assert_stmt>webdriver<def_stmt>DriverCreator <block_start>ie_driver_exe=os.path.join(util.GetTelemetryDir() 'bin' 'IEDriverServer_%s.exe'%self._architecture)<line_sep>cloud_storage.GetIfChanged(ie_driver_exe cloud_storage.PUBLIC_BUCKET)<line_sep><return>webdriver.Ie(executable_path=ie_driver_exe)<block_end><return>webdriver_ie_backend.WebDriverIEBackend(platform_backend DriverCreator self.finder_options.browser_options)<block_end><block_end><def_stmt>SelectDefaultBrowser _<block_start><return><none><block_end><def_stmt>FindAllAvailableBrowsers finder_options<block_start>"""Finds all the desktop browsers available on this machine."""<line_sep>browsers=[]<if_stmt><not>webdriver<block_start><return>browsers<block_end># Look for the IE browser in the standard location. <if_stmt>sys.platform.startswith('win')<block_start>ie_path=os.path.join('Internet Explorer' 'iexplore.exe')<line_sep>win_search_paths={'32':{'path':os.getenv('PROGRAMFILES(X86)') 'type':'internet-explorer'} '64':{'path':os.getenv('PROGRAMFILES') 'type':'internet-explorer-x64'}}<for_stmt>architecture,ie_info win_search_paths.iteritems()<block_start><if_stmt><not>ie_info['path']<block_start><continue><block_end><if_stmt>os.path.exists(os.path.join(ie_info['path'] ie_path))<block_start>browsers.append(PossibleDesktopIE(ie_info['type'] finder_options architecture))<block_end><block_end><block_end><return>browsers<block_end>
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (c) 2020 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## <import_stmt>doctest<import_stmt>re<import_from_stmt>zope.testing renormalizing<import_stmt>zc.buildout.testing<import_from_stmt>zc.buildout.tests easy_install_SetUp<import_from_stmt>zc.buildout.tests normalize_bang<def_stmt>default_cfg <block_start>r""" >>> home = tmpdir('home') >>> mkdir(home, '.buildout') >>> default_cfg = join(home, '.buildout', 'default.cfg') >>> write(default_cfg, ''' ... [debug] ... dec = 1 ... 2 ... inc = 1 ... ''') >>> write('buildout.cfg', ''' ... [buildout] ... ... [debug] ... dec -= 2 ... inc += 2 ... ''') >>> env = dict(HOME=home, USERPROFILE=home) >>> print_(system(buildout+' annotate debug', env=env), end='') <BLANKLINE> Annotated sections ================== <BLANKLINE> [debug] dec= 1 /home/.buildout/default.cfg -= buildout.cfg inc= 1 2 /home/.buildout/default.cfg += buildout.cfg """<block_end><def_stmt>default_cfg_extensions <block_start>r""" Add two extensions as develop eggs >>> mkdir('demo') >>> write('demo', 'demo.py', ''' ... import sys ... def ext(buildout): ... sys.stdout.write('demo %s %s\\n' % ('ext', sorted(buildout))) ... def unload(buildout): ... sys.stdout.write('demo %s %s\\n' % ('unload', sorted(buildout))) ... ''') >>> write('demo', 'setup.py', ''' ... from setuptools import setup ... ... setup( ... name = "demo", ... entry_points = { ... 'zc.buildout.extension': ['ext = demo:ext'], ... 'zc.buildout.unloadextension': ['ext = demo:unload'], ... }, ... ) ... ''') >>> mkdir('demo2') >>> write('demo2', 'demo2.py', ''' ... import sys ... def ext(buildout): ... sys.stdout.write('demo2 %s %s\\n' % ('ext', sorted(buildout))) ... def unload(buildout): ... sys.stdout.write('demo2 %s %s\\n' % ('unload', sorted(buildout))) ... ''') >>> write('demo2', 'setup.py', ''' ... from setuptools import setup ... ... setup( ... name = "demo2", ... entry_points = { ... 'zc.buildout.extension': ['ext = demo2:ext'], ... 'zc.buildout.unloadextension': ['ext = demo2:unload'], ... }, ... ) ... ''') >>> write('buildout.cfg', ''' ... [buildout] ... develop = demo demo2 ... parts = ... ''') Run buildout once without extensions to actually develop the eggs. (Develop happens after loading extensions.) >>> print_(system(buildout), end='') Develop: '/sample-buildout/demo' Develop: '/sample-buildout/demo2' >>> ls("develop-eggs") - demo.egg-link - demo2.egg-link - zc.recipe.egg.egg-link extensions in .buildout/default.cfg incremented in buildout.cfg >>> home = tmpdir('home') >>> mkdir(home, '.buildout') >>> default_cfg = join(home, '.buildout', 'default.cfg') >>> write(default_cfg, ''' ... [buildout] ... extensions = demo ... ''') >>> write('buildout.cfg', ''' ... [buildout] ... develop = demo demo2 ... extensions += demo2 ... parts = ... ''') >>> env = dict(HOME=home, USERPROFILE=home) >>> print_(system(buildout+' annotate buildout', env=env), end='') ... # doctest: +ELLIPSIS <BLANKLINE> Annotated sections ================== <BLANKLINE> [buildout] ... extensions= demo demo2 /home/.buildout/default.cfg += buildout.cfg ... versions= versions DEFAULT_VALUE """<block_end><def_stmt>with_extends_increment_in_base <block_start>r""" >>> home = tmpdir('home') >>> mkdir(home, '.buildout') >>> default_cfg = join(home, '.buildout', 'default.cfg') >>> write(default_cfg, ''' ... [buildout] ... extensions = demo ... ''') >>> write('base.cfg', ''' ... [buildout] ... extensions += demo2 ... ''') >>> write('buildout.cfg', ''' ... [buildout] ... extends = base.cfg ... parts = ... ''') >>> env = dict(HOME=home, USERPROFILE=home) >>> print_(system(buildout+' annotate buildout', env=env), end='') ... # doctest: +ELLIPSIS <BLANKLINE> Annotated sections ================== <BLANKLINE> [buildout] ... extensions= demo demo2 /home/.buildout/default.cfg += base.cfg ... versions= versions DEFAULT_VALUE """<block_end><def_stmt>with_extends_increment_in_base2 <block_start>r""" >>> home = tmpdir('home') >>> mkdir(home, '.buildout') >>> default_cfg = join(home, '.buildout', 'default.cfg') >>> write(default_cfg, ''' ... [buildout] ... extensions = demo ... ''') >>> write('base.cfg', ''' ... [buildout] ... ''') >>> write('base2.cfg', ''' ... [buildout] ... extensions += demo2 ... ''') >>> write('buildout.cfg', ''' ... [buildout] ... extends = base.cfg ... base2.cfg ... parts = ... ''') >>> env = dict(HOME=home, USERPROFILE=home) >>> print_(system(buildout+' annotate buildout', env=env), end='') ... # doctest: +ELLIPSIS <BLANKLINE> Annotated sections ================== <BLANKLINE> [buildout] ... extensions= demo demo2 /home/.buildout/default.cfg += base2.cfg ... versions= versions DEFAULT_VALUE """<block_end><def_stmt>with_extends_increment_in_base2_and_base3 <block_start>r""" >>> home = tmpdir('home') >>> mkdir(home, '.buildout') >>> default_cfg = join(home, '.buildout', 'default.cfg') >>> write(default_cfg, ''' ... [buildout] ... extensions = demo ... ''') >>> write('base.cfg', ''' ... [buildout] ... ''') >>> write('base2.cfg', ''' ... [buildout] ... extensions += demo2 ... ''') >>> write('base3.cfg', ''' ... [buildout] ... extensions += demo3 ... ''') >>> write('buildout.cfg', ''' ... [buildout] ... extends = base.cfg ... base2.cfg ... base3.cfg ... parts = ... ''') >>> env = dict(HOME=home, USERPROFILE=home) >>> print_(system(buildout+' annotate buildout', env=env), end='') ... # doctest: +ELLIPSIS <BLANKLINE> Annotated sections ================== <BLANKLINE> [buildout] ... extensions= demo demo2 demo3 /home/.buildout/default.cfg += base2.cfg += base3.cfg ... versions= versions DEFAULT_VALUE """<block_end><def_stmt>with_extends_increment_in_buildout <block_start>r""" >>> home = tmpdir('home') >>> mkdir(home, '.buildout') >>> default_cfg = join(home, '.buildout', 'default.cfg') >>> write(default_cfg, ''' ... [buildout] ... extensions = demo ... ''') >>> write('base.cfg', ''' ... [buildout] ... ''') >>> write('buildout.cfg', ''' ... [buildout] ... extends = base.cfg ... extensions += demo2 ... parts = ... ''') >>> env = dict(HOME=home, USERPROFILE=home) >>> print_(system(buildout+' annotate buildout', env=env), end='') ... # doctest: +ELLIPSIS <BLANKLINE> Annotated sections ================== <BLANKLINE> [buildout] ... extensions= demo demo2 /home/.buildout/default.cfg += buildout.cfg ... versions= versions DEFAULT_VALUE """<block_end><def_stmt>with_extends_increment_in_buildout_with_base_and_root <block_start>r""" >>> home = tmpdir('home') >>> mkdir(home, '.buildout') >>> default_cfg = join(home, '.buildout', 'default.cfg') >>> write(default_cfg, ''' ... [buildout] ... extensions = demo ... ''') >>> write('root.cfg', ''' ... [buildout] ... ''') >>> write('base.cfg', ''' ... [buildout] ... extends = root.cfg ... ''') >>> write('buildout.cfg', ''' ... [buildout] ... extends = base.cfg ... extensions += demo2 ... parts = ... ''') >>> env = dict(HOME=home, USERPROFILE=home) >>> print_(system(buildout+' annotate buildout', env=env), end='') ... # doctest: +ELLIPSIS <BLANKLINE> Annotated sections ================== <BLANKLINE> [buildout] ... extensions= demo demo2 /home/.buildout/default.cfg += buildout.cfg ... versions= versions DEFAULT_VALUE """<block_end><def_stmt>no_default_with_extends_increment_in_base2_and_base3 <block_start>r""" >>> write('base.cfg', ''' ... [buildout] ... ''') >>> write('base2.cfg', ''' ... [buildout] ... extensions += demo2 ... ''') >>> write('base3.cfg', ''' ... [buildout] ... extensions += demo3 ... ''') >>> write('buildout.cfg', ''' ... [buildout] ... extends = base.cfg ... base2.cfg ... base3.cfg ... parts = ... ''') >>> print_(system(buildout+' annotate buildout'), end='') ... # doctest: +ELLIPSIS <BLANKLINE> Annotated sections ================== <BLANKLINE> [buildout] ... extensions= demo2 demo3 IMPLICIT_VALUE += base2.cfg += base3.cfg ... versions= versions DEFAULT_VALUE """<block_end><def_stmt>test_suite <block_start><return>doctest.DocTestSuite(setUp=easy_install_SetUp tearDown=zc.buildout.testing.buildoutTearDown checker=renormalizing.RENormalizing([zc.buildout.testing.normalize_path zc.buildout.testing.normalize_endings zc.buildout.testing.normalize_script zc.buildout.testing.normalize_egg_py zc.buildout.testing.normalize___pycache__ zc.buildout.testing.not_found zc.buildout.testing.normalize_exception_type_for_python_2_and_3 zc.buildout.testing.adding_find_link zc.buildout.testing.python27_warning zc.buildout.testing.python27_warning_2 zc.buildout.testing.easyinstall_deprecated zc.buildout.testing.setuptools_deprecated zc.buildout.testing.pkg_resources_deprecated zc.buildout.testing.warnings_warn normalize_bang (re.compile(r'^(\w+\.)*(Missing\w+: )') '\2') (re.compile(r"buildout: Running \S*setup.py") 'buildout: Running setup.py') (re.compile(r'pip-\S+-') 'pip.egg') (re.compile(r'setuptools-\S+-') 'setuptools.egg') (re.compile(r'zc.buildout-\S+-') 'zc.buildout.egg') (re.compile(r'pip = \S+') 'pip = 20.0.0') (re.compile(r'setuptools = \S+') 'setuptools = 0.7.99') (re.compile(r'File "\S+one.py"') 'File "one.py"') (re.compile(r'We have a develop egg: (\S+) (\S+)') r'We have a develop egg: \1 V') (re.compile(r'Picked: setuptools = \S+') 'Picked: setuptools = V') (re.compile('[-d] pip') '- pip') (re.compile('[-d] setuptools') '- setuptools') (re.compile(r'\\[\\]?') '/') (re.compile('-q develop -mxN -d "/sample-buildout/develop-eggs') '-q develop -mxN -d /sample-buildout/develop-eggs') (re.compile(r'^[*]...') '...') # for # bug_92891 # bootstrap_crashes_with_egg_recipe_in_buildout_section (re.compile(r"Unused options for buildout: 'eggs' 'scripts'\.") "Unused options for buildout: 'scripts' 'eggs'.") # Python 3.4 changed the wording of NameErrors (re.compile('NameError: global name') 'NameError: name') # fix for test_distutils_scripts_using_import_are_properly_parsed # and test_distutils_scripts_using_from_are_properly_parsed # win32 apparently adds a " around sys.executable (re.compile('#!"python"') '#!python') ]) )<block_end>
""" Classes for interacting with Salesforce Bulk API """<try_stmt><block_start><import_from_stmt>collections OrderedDict<block_end><except_stmt>ImportError# Python < 2.7 <block_start><import_from_stmt>ordereddict OrderedDict<block_end><import_stmt>json<import_stmt>requests<import_from_stmt>time sleep<import_from_stmt>simple_salesforce.util call_salesforce<class_stmt>SFBulkHandler(object)<block_start>""" Bulk API request handler Intermediate class which allows us to use commands, such as 'sf.bulk.Contacts.create(...)' This is really just a middle layer, whose sole purpose is to allow the above syntax """<def_stmt>__init__ self session_id bulk_url proxies=<none> session=<none><block_start>"""Initialize the instance with the given parameters. Arguments: * session_id -- the session ID for authenticating to Salesforce * bulk_url -- API endpoint set in Salesforce instance * proxies -- the optional map of scheme to proxy server * session -- Custom requests session, created in calling code. This enables the use of requests Session features not otherwise exposed by simple_salesforce. """<line_sep>self.session_id=session_id<line_sep>self.session=session<or>requests.Session()<line_sep>self.bulk_url=bulk_url<line_sep># don't wipe out original proxies with None <if_stmt><not>session<and>proxies<is><not><none><block_start>self.session.proxies=proxies<block_end># Define these headers separate from Salesforce class, # as bulk uses a slightly different format self.headers={'Content-Type':'application/json' 'X-SFDC-Session':self.session_id 'X-PrettyPrint':'1'}<block_end><def_stmt>__getattr__ self name<block_start><return>SFBulkType(object_name=name bulk_url=self.bulk_url headers=self.headers session=self.session)<block_end><block_end><class_stmt>SFBulkType(object)<block_start>""" Interface to Bulk/Async API functions"""<def_stmt>__init__ self object_name bulk_url headers session<block_start>"""Initialize the instance with the given parameters. Arguments: * object_name -- the name of the type of SObject this represents, e.g. `Lead` or `Contact` * bulk_url -- API endpoint set in Salesforce instance * headers -- bulk API headers * session -- Custom requests session, created in calling code. This enables the use of requests Session features not otherwise exposed by simple_salesforce. """<line_sep>self.object_name=object_name<line_sep>self.bulk_url=bulk_url<line_sep>self.session=session<line_sep>self.headers=headers<block_end><def_stmt>_create_job self operation object_name external_id_field=<none><block_start>""" Create a bulk job Arguments: * operation -- Bulk operation to be performed by job * object_name -- SF object * external_id_field -- unique identifier field for upsert operations """<line_sep>payload={'operation':operation 'object':object_name 'contentType':'JSON'}<if_stmt>operation<eq>'upsert'<block_start>payload['externalIdFieldName']=external_id_field<block_end>url="{}{}".format(self.bulk_url 'job')<line_sep>result=call_salesforce(url=url method='POST' session=self.session headers=self.headers data=json.dumps(payload))<line_sep><return>result.json(object_pairs_hook=OrderedDict)<block_end><def_stmt>_close_job self job_id<block_start>""" Close a bulk job """<line_sep>payload={'state':'Closed'}<line_sep>url="{}{}{}".format(self.bulk_url 'job/' job_id)<line_sep>result=call_salesforce(url=url method='POST' session=self.session headers=self.headers data=json.dumps(payload))<line_sep><return>result.json(object_pairs_hook=OrderedDict)<block_end><def_stmt>_get_job self job_id<block_start>""" Get an existing job to check the status """<line_sep>url="{}{}{}".format(self.bulk_url 'job/' job_id)<line_sep>result=call_salesforce(url=url method='GET' session=self.session headers=self.headers)<line_sep><return>result.json(object_pairs_hook=OrderedDict)<block_end><def_stmt>_add_batch self job_id data operation<block_start>""" Add a set of data as a batch to an existing job Separating this out in case of later implementations involving multiple batches """<line_sep>url="{}{}{}{}".format(self.bulk_url 'job/' job_id '/batch')<if_stmt>operation<ne>'query'<block_start>data=json.dumps(data)<block_end>result=call_salesforce(url=url method='POST' session=self.session headers=self.headers data=data)<line_sep><return>result.json(object_pairs_hook=OrderedDict)<block_end><def_stmt>_get_batch self job_id batch_id<block_start>""" Get an existing batch to check the status """<line_sep>url="{}{}{}{}{}".format(self.bulk_url 'job/' job_id '/batch/' batch_id)<line_sep>result=call_salesforce(url=url method='GET' session=self.session headers=self.headers)<line_sep><return>result.json(object_pairs_hook=OrderedDict)<block_end><def_stmt>_get_batch_results self job_id batch_id operation<block_start>""" retrieve a set of results from a completed job """<line_sep>url="{}{}{}{}{}{}".format(self.bulk_url 'job/' job_id '/batch/' batch_id '/result')<line_sep>result=call_salesforce(url=url method='GET' session=self.session headers=self.headers)<if_stmt>operation<eq>'query'<block_start>url_query_results="{}{}{}".format(url '/' result.json()[0])<line_sep>query_result=call_salesforce(url=url_query_results method='GET' session=self.session headers=self.headers)<line_sep><return>query_result.json()<block_end><return>result.json()<block_end>#pylint: disable=R0913 <def_stmt>_bulk_operation self object_name operation data external_id_field=<none> wait=5<block_start>""" String together helper functions to create a complete end-to-end bulk API request Arguments: * object_name -- SF object * operation -- Bulk operation to be performed by job * data -- list of dict to be passed as a batch * external_id_field -- unique identifier field for upsert operations * wait -- seconds to sleep between checking batch status """<line_sep>job=self._create_job(object_name=object_name operation=operation external_id_field=external_id_field)<line_sep>batch=self._add_batch(job_id=job['id'] data=data operation=operation)<line_sep>self._close_job(job_id=job['id'])<line_sep>batch_status=self._get_batch(job_id=batch['jobId'] batch_id=batch['id'])['state']<while_stmt>batch_status<not><in>['Completed' 'Failed' 'Not Processed']<block_start>sleep(wait)<line_sep>batch_status=self._get_batch(job_id=batch['jobId'] batch_id=batch['id'])['state']<block_end>results=self._get_batch_results(job_id=batch['jobId'] batch_id=batch['id'] operation=operation)<line_sep><return>results<block_end># _bulk_operation wrappers to expose supported Salesforce bulk operations <def_stmt>delete self data<block_start>""" soft delete records """<line_sep>results=self._bulk_operation(object_name=self.object_name operation='delete' data=data)<line_sep><return>results<block_end><def_stmt>insert self data<block_start>""" insert records """<line_sep>results=self._bulk_operation(object_name=self.object_name operation='insert' data=data)<line_sep><return>results<block_end><def_stmt>upsert self data external_id_field<block_start>""" upsert records based on a unique identifier """<line_sep>results=self._bulk_operation(object_name=self.object_name operation='upsert' external_id_field=external_id_field data=data)<line_sep><return>results<block_end><def_stmt>update self data<block_start>""" update records """<line_sep>results=self._bulk_operation(object_name=self.object_name operation='update' data=data)<line_sep><return>results<block_end><def_stmt>hard_delete self data<block_start>""" hard delete records """<line_sep>results=self._bulk_operation(object_name=self.object_name operation='hardDelete' data=data)<line_sep><return>results<block_end><def_stmt>query self data<block_start>""" bulk query """<line_sep>results=self._bulk_operation(object_name=self.object_name operation='query' data=data)<line_sep><return>results<block_end><block_end>
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. <import_from_stmt>typing TYPE_CHECKING List Optional<import_stmt>attr<import_from_stmt>sqlalchemy.orm.session Session<import_from_stmt>airflow.utils.state State<if_stmt>TYPE_CHECKING<block_start><import_from_stmt>airflow.models.dagrun DagRun<import_from_stmt>airflow.models.taskinstance TaskInstance<block_end>@attr.define<class_stmt>DepContext<block_start>""" A base class for contexts that specifies which dependencies should be evaluated in the context for a task instance to satisfy the requirements of the context. Also stores state related to the context that can be used by dependency classes. For example there could be a SomeRunContext that subclasses this class which has dependencies for: - Making sure there are slots available on the infrastructure to run the task instance - A task-instance's task-specific dependencies are met (e.g. the previous task instance completed successfully) - ... :param deps: The context-specific dependencies that need to be evaluated for a task instance to run in this execution context. :param flag_upstream_failed: This is a hack to generate the upstream_failed state creation while checking to see whether the task instance is runnable. It was the shortest path to add the feature. This is bad since this class should be pure (no side effects). :param ignore_all_deps: Whether or not the context should ignore all ignorable dependencies. Overrides the other ignore_* parameters :param ignore_depends_on_past: Ignore depends_on_past parameter of DAGs (e.g. for Backfills) :param ignore_in_retry_period: Ignore the retry period for task instances :param ignore_in_reschedule_period: Ignore the reschedule period for task instances :param ignore_unmapped_tasks: Ignore errors about mapped tasks not yet being expanded :param ignore_task_deps: Ignore task-specific dependencies such as depends_on_past and trigger rule :param ignore_ti_state: Ignore the task instance's previous failure/success :param finished_tis: A list of all the finished task instances of this run """<line_sep>deps:set=attr.ib(factory=set)<line_sep>flag_upstream_failed:bool=<false><line_sep>ignore_all_deps:bool=<false><line_sep>ignore_depends_on_past:bool=<false><line_sep>ignore_in_retry_period:bool=<false><line_sep>ignore_in_reschedule_period:bool=<false><line_sep>ignore_task_deps:bool=<false><line_sep>ignore_ti_state:bool=<false><line_sep>ignore_unmapped_tasks:bool=<false><line_sep>finished_tis:Optional[List["TaskInstance"]]=<none><def_stmt>ensure_finished_tis self dag_run:"DagRun" session:Session<arrow>List["TaskInstance"]<block_start>""" This method makes sure finished_tis is populated if it's currently None. This is for the strange feature of running tasks without dag_run. :param dag_run: The DagRun for which to find finished tasks :return: A list of all the finished tasks of this DAG and execution_date :rtype: list[airflow.models.TaskInstance] """<if_stmt>self.finished_tis<is><none><block_start>finished_tis=dag_run.get_task_instances(state=State.finished session=session)<line_sep>self.finished_tis=finished_tis<block_end><else_stmt><block_start>finished_tis=self.finished_tis<block_end><return>finished_tis<block_end><block_end>
<import_from_stmt>test get_user_session cassette<import_from_stmt>test.resources.documents delete_all_documents create_document assert_core_document assert_bib_document assert_client_document assert_tags_document assert_all_document<def_stmt>test_should_get_document_core_view <block_start>session=get_user_session()<line_sep>delete_all_documents()<with_stmt>cassette('fixtures/resources/trash/get_document/get_document_core_view.yaml')<block_start>created_doc=create_document(session)<line_sep>created_doc.move_to_trash()<line_sep>doc=session.trash.get(created_doc.id)<line_sep>assert_core_document(doc)<block_end><block_end><def_stmt>test_should_get_document_bib_view <block_start>session=get_user_session()<line_sep>delete_all_documents()<with_stmt>cassette('fixtures/resources/trash/get_document/get_document_bib_view.yaml')<block_start>created_doc=create_document(session)<line_sep>created_doc.move_to_trash()<line_sep>doc=session.trash.get(created_doc.id view='bib')<line_sep>assert_core_document(doc)<line_sep>assert_bib_document(doc)<block_end><block_end><def_stmt>test_should_get_document_client_view <block_start>session=get_user_session()<line_sep>delete_all_documents()<with_stmt>cassette('fixtures/resources/trash/get_document/get_document_client_view.yaml')<block_start>created_doc=create_document(session)<line_sep>created_doc.move_to_trash()<line_sep>doc=session.trash.get(created_doc.id view='client')<line_sep>assert_core_document(doc)<line_sep>assert_client_document(doc)<block_end><block_end><def_stmt>test_should_get_document_tags_view <block_start>session=get_user_session()<line_sep>delete_all_documents()<with_stmt>cassette('fixtures/resources/trash/get_document/get_document_tags_view.yaml')<block_start>created_doc=create_document(session)<line_sep>created_doc.move_to_trash()<line_sep>doc=session.trash.get(created_doc.id view='tags')<line_sep>assert_core_document(doc)<line_sep>assert_tags_document(doc)<block_end><block_end><def_stmt>test_should_get_document_all_view <block_start>session=get_user_session()<line_sep>delete_all_documents()<with_stmt>cassette('fixtures/resources/trash/get_document/get_document_all_view.yaml')<block_start>created_doc=create_document(session)<line_sep>created_doc.move_to_trash()<line_sep>doc=session.trash.get(created_doc.id view='all')<line_sep>assert_all_document(doc)<block_end><block_end><def_stmt>test_should_be_able_to_get_profile_for_document <block_start>session=get_user_session()<line_sep>delete_all_documents()<with_stmt>cassette('fixtures/resources/trash/get_document/get_profile_for_document.yaml')<block_start>created_doc=create_document(session)<line_sep>created_doc.move_to_trash()<line_sep>doc=session.trash.get(created_doc.id)<line_sep>profile=session.profiles.me<assert_stmt>doc.profile.display_name<eq>profile.display_name<block_end><block_end>
"""Support for Tasmota fans."""<import_from_future_stmt> annotations<import_from_stmt>typing Any<import_from_stmt>hatasmota const<as>tasmota_const fan<as>tasmota_fan<import_from_stmt>hatasmota.entity TasmotaEntity<as>HATasmotaEntity<import_from_stmt>hatasmota.models DiscoveryHashType<import_from_stmt>homeassistant.components.fan DOMAIN<as>FAN_DOMAIN FanEntity FanEntityFeature <import_from_stmt>homeassistant.config_entries ConfigEntry<import_from_stmt>homeassistant.core HomeAssistant callback<import_from_stmt>homeassistant.helpers.dispatcher async_dispatcher_connect<import_from_stmt>homeassistant.helpers.entity_platform AddEntitiesCallback<import_from_stmt>homeassistant.util.percentage ordered_list_item_to_percentage percentage_to_ordered_list_item <import_from_stmt>.const DATA_REMOVE_DISCOVER_COMPONENT<import_from_stmt>.discovery TASMOTA_DISCOVERY_ENTITY_NEW<import_from_stmt>.mixins TasmotaAvailability TasmotaDiscoveryUpdate<line_sep>ORDERED_NAMED_FAN_SPEEDS=[tasmota_const.FAN_SPEED_LOW tasmota_const.FAN_SPEED_MEDIUM tasmota_const.FAN_SPEED_HIGH ]<line_sep># off is not included <async_keyword><def_stmt>async_setup_entry hass:HomeAssistant config_entry:ConfigEntry async_add_entities:AddEntitiesCallback <arrow><none><block_start>"""Set up Tasmota fan dynamically through discovery."""<line_sep>@callback<def_stmt>async_discover tasmota_entity:HATasmotaEntity discovery_hash:DiscoveryHashType<arrow><none><block_start>"""Discover and add a Tasmota fan."""<line_sep>async_add_entities([TasmotaFan(tasmota_entity=tasmota_entity discovery_hash=discovery_hash)])<block_end>hass.data[DATA_REMOVE_DISCOVER_COMPONENT.format(FAN_DOMAIN)]=async_dispatcher_connect(hass TASMOTA_DISCOVERY_ENTITY_NEW.format(FAN_DOMAIN) async_discover )<block_end><class_stmt>TasmotaFan(TasmotaAvailability TasmotaDiscoveryUpdate FanEntity )<block_start>"""Representation of a Tasmota fan."""<line_sep>_attr_supported_features=FanEntityFeature.SET_SPEED<line_sep>_tasmota_entity:tasmota_fan.TasmotaFan<def_stmt>__init__ self **kwds:Any<arrow><none><block_start>"""Initialize the Tasmota fan."""<line_sep>self._state:int|<none>=<none><line_sep>super().__init__(**kwds )<block_end><async_keyword><def_stmt>async_added_to_hass self<arrow><none><block_start>"""Subscribe to MQTT events."""<line_sep>self._tasmota_entity.set_on_state_callback(self.fan_state_updated)<line_sep><await>super().async_added_to_hass()<block_end>@callback<def_stmt>fan_state_updated self state:int **kwargs:Any<arrow><none><block_start>"""Handle state updates."""<line_sep>self._state=state<line_sep>self.async_write_ha_state()<block_end>@property<def_stmt>speed_count self<arrow>int<block_start>"""Return the number of speeds the fan supports."""<line_sep><return>len(ORDERED_NAMED_FAN_SPEEDS)<block_end>@property<def_stmt>percentage self<arrow>int|<none><block_start>"""Return the current speed percentage."""<if_stmt>self._state<is><none><block_start><return><none><block_end><if_stmt>self._state<eq>0<block_start><return>0<block_end><return>ordered_list_item_to_percentage(ORDERED_NAMED_FAN_SPEEDS self._state)<block_end><async_keyword><def_stmt>async_set_percentage self percentage:int<arrow><none><block_start>"""Set the speed of the fan."""<if_stmt>percentage<eq>0<block_start><await>self.async_turn_off()<block_end><else_stmt><block_start>tasmota_speed=percentage_to_ordered_list_item(ORDERED_NAMED_FAN_SPEEDS percentage)<line_sep><await>self._tasmota_entity.set_speed(tasmota_speed)<block_end><block_end><async_keyword><def_stmt>async_turn_on self percentage:int|<none>=<none> preset_mode:str|<none>=<none> **kwargs:Any <arrow><none><block_start>"""Turn the fan on."""<line_sep># Tasmota does not support turning a fan on with implicit speed <await>self.async_set_percentage(percentage<or>ordered_list_item_to_percentage(ORDERED_NAMED_FAN_SPEEDS tasmota_const.FAN_SPEED_MEDIUM))<block_end><async_keyword><def_stmt>async_turn_off self **kwargs:Any<arrow><none><block_start>"""Turn the fan off."""<line_sep><await>self._tasmota_entity.set_speed(tasmota_const.FAN_SPEED_OFF)<block_end><block_end>
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # Generated file, DO NOT EDIT # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------------------------- <import_from_stmt>msrest Serializer Deserializer<import_from_stmt>...client Client<import_from_stmt>. models<class_stmt>SearchClient(Client)<block_start>"""Search :param str base_url: Service URL :param Authentication creds: Authenticated credentials. """<def_stmt>__init__ self base_url=<none> creds=<none><block_start>super(SearchClient self).__init__(base_url creds)<line_sep>client_models={k:v<for>k,v models.__dict__.items()<if>isinstance(v type)}<line_sep>self._serialize=Serializer(client_models)<line_sep>self._deserialize=Deserializer(client_models)<block_end>resource_area_identifier='ea48a0a1-269c-42d8-b8ad-ddc8fcdcf578'<def_stmt>fetch_scroll_code_search_results self request project=<none><block_start>"""FetchScrollCodeSearchResults. [Preview API] Provides a set of results for the search text. :param :class:`<ScrollSearchRequest> <azure.devops.v6_0.search.models.ScrollSearchRequest>` request: The Code Search Request. :param str project: Project ID or project name :rtype: :class:`<CodeSearchResponse> <azure.devops.v6_0.search.models.CodeSearchResponse>` """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end>content=self._serialize.body(request 'ScrollSearchRequest')<line_sep>response=self._send(http_method='POST' location_id='852dac94-e8f7-45a2-9910-927ae35766a2' version='6.0-preview.1' route_values=route_values content=content)<line_sep><return>self._deserialize('CodeSearchResponse' response)<block_end><def_stmt>fetch_code_search_results self request project=<none><block_start>"""FetchCodeSearchResults. [Preview API] Provides a set of results for the search text. :param :class:`<CodeSearchRequest> <azure.devops.v6_0.search.models.CodeSearchRequest>` request: The Code Search Request. :param str project: Project ID or project name :rtype: :class:`<CodeSearchResponse> <azure.devops.v6_0.search.models.CodeSearchResponse>` """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end>content=self._serialize.body(request 'CodeSearchRequest')<line_sep>response=self._send(http_method='POST' location_id='e7f29993-5b82-4fca-9386-f5cfe683d524' version='6.0-preview.1' route_values=route_values content=content)<line_sep><return>self._deserialize('CodeSearchResponse' response)<block_end><def_stmt>fetch_package_search_results self request<block_start>"""FetchPackageSearchResults. [Preview API] Provides a set of results for the search text. :param :class:`<PackageSearchRequest> <azure.devops.v6_0.search.models.PackageSearchRequest>` request: The Package Search Request. :rtype: :class:`<PackageSearchResponse> <azure.devops.v6_0.search.models.PackageSearchResponse>` """<line_sep>content=self._serialize.body(request 'PackageSearchRequest')<line_sep>response=self._send(http_method='POST' location_id='f62ada48-eedc-4c8e-93f0-de870e4ecce0' version='6.0-preview.1' content=content)<line_sep>response_object=models.PackageSearchResponse()<line_sep>response_object.content=self._deserialize('PackageSearchResponseContent' response)<line_sep>response_object.activity_id=response.headers.get('ActivityId')<line_sep><return>response_object<block_end><def_stmt>get_repository_status self project repository<block_start>"""GetRepositoryStatus. [Preview API] Provides status of Repository. :param str project: Project ID or project name :param str repository: Repository ID or repository name. :rtype: :class:`<RepositoryStatusResponse> <azure.devops.v6_0.search.models.RepositoryStatusResponse>` """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end><if_stmt>repository<is><not><none><block_start>route_values['repository']=self._serialize.url('repository' repository 'str')<block_end>response=self._send(http_method='GET' location_id='1f60303c-7261-4387-80f1-742a2ecf2964' version='6.0-preview.1' route_values=route_values)<line_sep><return>self._deserialize('RepositoryStatusResponse' response)<block_end><def_stmt>get_tfvc_repository_status self project<block_start>"""GetTfvcRepositoryStatus. [Preview API] Provides status of TFVC Repository. :param str project: Project ID or project name :rtype: :class:`<TfvcRepositoryStatusResponse> <azure.devops.v6_0.search.models.TfvcRepositoryStatusResponse>` """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end>response=self._send(http_method='GET' location_id='d5bf4e52-e0af-4626-8c50-8a80b18fa69f' version='6.0-preview.1' route_values=route_values)<line_sep><return>self._deserialize('TfvcRepositoryStatusResponse' response)<block_end><def_stmt>fetch_wiki_search_results self request project=<none><block_start>"""FetchWikiSearchResults. [Preview API] Provides a set of results for the search request. :param :class:`<WikiSearchRequest> <azure.devops.v6_0.search.models.WikiSearchRequest>` request: The Wiki Search Request. :param str project: Project ID or project name :rtype: :class:`<WikiSearchResponse> <azure.devops.v6_0.search.models.WikiSearchResponse>` """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end>content=self._serialize.body(request 'WikiSearchRequest')<line_sep>response=self._send(http_method='POST' location_id='e90e7664-7049-4100-9a86-66b161d81080' version='6.0-preview.1' route_values=route_values content=content)<line_sep><return>self._deserialize('WikiSearchResponse' response)<block_end><def_stmt>fetch_work_item_search_results self request project=<none><block_start>"""FetchWorkItemSearchResults. [Preview API] Provides a set of results for the search text. :param :class:`<WorkItemSearchRequest> <azure.devops.v6_0.search.models.WorkItemSearchRequest>` request: The Work Item Search Request. :param str project: Project ID or project name :rtype: :class:`<WorkItemSearchResponse> <azure.devops.v6_0.search.models.WorkItemSearchResponse>` """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end>content=self._serialize.body(request 'WorkItemSearchRequest')<line_sep>response=self._send(http_method='POST' location_id='73b2c9e2-ff9e-4447-8cda-5f5b21ff7cae' version='6.0-preview.1' route_values=route_values content=content)<line_sep><return>self._deserialize('WorkItemSearchResponse' response)<block_end><block_end>
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries # SPDX-License-Identifier: MIT <import_stmt>time<import_stmt>board<import_stmt>digitalio<import_stmt>usb_hid<import_from_stmt>adafruit_hid.keyboard Keyboard<import_from_stmt>adafruit_hid.keycode Keycode<line_sep>kbd=Keyboard(usb_hid.devices)<line_sep># define buttons. these can be any physical switches/buttons, but the values # here work out-of-the-box with a CircuitPlayground Express' A and B buttons. swap=digitalio.DigitalInOut(board.D4)<line_sep>swap.direction=digitalio.Direction.INPUT<line_sep>swap.pull=digitalio.Pull.DOWN<line_sep>search=digitalio.DigitalInOut(board.D5)<line_sep>search.direction=digitalio.Direction.INPUT<line_sep>search.pull=digitalio.Pull.DOWN<while_stmt><true># press ALT+TAB to swap windows <block_start><if_stmt>swap.value<block_start>kbd.send(Keycode.ALT Keycode.TAB)<block_end># press CTRL+K, which in a web browser will open the search dialog <elif_stmt>search.value<block_start>kbd.send(Keycode.CONTROL Keycode.K)<block_end>time.sleep(0.1)<block_end>
<import_stmt>os<import_from_stmt>collections Iterable<import_from_stmt>jinja2 Environment<import_from_stmt>..commons utils<import_from_stmt>..datasets EXTRA FILENAMES<import_from_stmt>..globals CurrentConfig NotebookType<import_from_stmt>..types Any Optional<import_from_stmt>.display HTML Javascript<def_stmt>write_utf8_html_file file_name:str html_content:str<block_start><with_stmt>open(file_name "w+" encoding="utf-8")<as>html_file<block_start>html_file.write(html_content)<block_end><block_end><class_stmt>RenderEngine<block_start><def_stmt>__init__ self env:Optional[Environment]=<none><block_start>self.env=env<or>CurrentConfig.GLOBAL_ENV<block_end>@staticmethod<def_stmt>generate_js_link chart:Any<arrow>Any<block_start><if_stmt><not>chart.js_host<block_start>chart.js_host=CurrentConfig.ONLINE_HOST<block_end>links=[]<for_stmt>dep chart.js_dependencies.items# TODO: if? <block_start><if_stmt>dep.startswith("https://api.map.baidu.com")<block_start>links.append(dep)<block_end><if_stmt>dep<in>FILENAMES<block_start>f,ext=FILENAMES[dep]<line_sep>links.append("{}{}.{}".format(chart.js_host f ext))<block_end><else_stmt><block_start><for_stmt>url,files EXTRA.items()<block_start><if_stmt>dep<in>files<block_start>f,ext=files[dep]<line_sep>links.append("{}{}.{}".format(url f ext))<line_sep><break><block_end><block_end><block_end><block_end>chart.dependencies=links<line_sep><return>chart<block_end><def_stmt>render_chart_to_file self template_name:str chart:Any path:str **kwargs<block_start>""" Render a chart or page to local html files. :param chart: A Chart or Page object :param path: The destination file which the html code write to :param template_name: The name of template file. """<line_sep>tpl=self.env.get_template(template_name)<line_sep>html=utils.replace_placeholder(tpl.render(chart=self.generate_js_link(chart) **kwargs))<line_sep>write_utf8_html_file(path html)<block_end><def_stmt>render_chart_to_template self template_name:str chart:Any **kwargs<arrow>str<block_start>tpl=self.env.get_template(template_name)<line_sep><return>utils.replace_placeholder(tpl.render(chart=self.generate_js_link(chart) **kwargs))<block_end><def_stmt>render_chart_to_notebook self template_name:str **kwargs<arrow>str<block_start>tpl=self.env.get_template(template_name)<line_sep><return>utils.replace_placeholder(tpl.render(**kwargs))<block_end><block_end><def_stmt>render chart path:str template_name:str env:Optional[Environment] **kwargs<arrow>str<block_start>RenderEngine(env).render_chart_to_file(template_name=template_name chart=chart path=path **kwargs)<line_sep><return>os.path.abspath(path)<block_end><def_stmt>render_embed chart template_name:str env:Optional[Environment] **kwargs<arrow>str<block_start><return>RenderEngine(env).render_chart_to_template(template_name=template_name chart=chart **kwargs)<block_end><def_stmt>render_notebook self notebook_template lab_template<block_start>instance=self<if>isinstance(self Iterable)<else>(self )<if_stmt>CurrentConfig.NOTEBOOK_TYPE<eq>NotebookType.JUPYTER_NOTEBOOK<block_start>require_config=utils.produce_require_dict(self.js_dependencies self.js_host)<line_sep><return>HTML(RenderEngine().render_chart_to_notebook(template_name=notebook_template charts=instance config_items=require_config["config_items"] libraries=require_config["libraries"] ))<block_end><if_stmt>CurrentConfig.NOTEBOOK_TYPE<eq>NotebookType.JUPYTER_LAB<block_start><return>HTML(RenderEngine().render_chart_to_notebook(template_name=lab_template charts=instance))<block_end><if_stmt>CurrentConfig.NOTEBOOK_TYPE<eq>NotebookType.NTERACT<block_start><return>HTML(self.render_embed())<block_end><if_stmt>CurrentConfig.NOTEBOOK_TYPE<eq>NotebookType.ZEPPELIN<block_start>print("%html "+self.render_embed())<block_end><block_end><def_stmt>load_javascript chart<block_start>scripts=[]<for_stmt>dep chart.js_dependencies.items<block_start>f,ext=FILENAMES[dep]<line_sep>scripts.append("{}{}.{}".format(CurrentConfig.ONLINE_HOST f ext))<block_end><return>Javascript(lib=scripts)<block_end>
"""Test the activation quantized ops"""<import_stmt>math<import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>torch.nn functional<as>F<import_from_stmt>timeit_v2 py_benchmark<import_from_stmt>actnn QScheme QBNScheme config get_memory_usage compute_tensor_bytes<import_from_stmt>actnn.ops ext_backward_func ext_quantization<import_from_stmt>actnn.ops conv2d<as>quantized_conv2d batch_norm<as>quantized_batch_norm adaptive_avg_pool2d<as>quantized_adaptive_avg_pool2d<def_stmt>test_relu_correctness <block_start>print("========== ReLU Correctness Test ==========")<for_stmt>dtype ['float32' 'float16']<block_start>print(f"test {dtype}...")<line_sep>data_np=np.random.randn(128 56 56 31).astype(dtype)<def_stmt>test_implementation func<block_start>data=torch.tensor(data_np).to("cuda").requires_grad_()<line_sep>output=func(data)<line_sep>output.backward(torch.ones_like(output))<line_sep><return>[x.detach().cpu().numpy()<for>x [output data.grad]]<block_end>output_ref,grad_data_ref=test_implementation(F.relu)<line_sep>output_us,grad_data_us=test_implementation(ext_quantization.act_quantized_relu)<line_sep>np.testing.assert_allclose(output_ref output_us)<line_sep>np.testing.assert_allclose(grad_data_ref grad_data_us)<block_end><block_end><def_stmt>test_relu_memory <block_start>print("========== ReLU Memory Test ==========")<for_stmt>dtype ['float32' 'float16']<block_start>print(f"test {dtype}...")<line_sep>data_np=np.random.randn(128 56 56 32).astype(dtype)<def_stmt>test_implementation func<block_start>data=torch.tensor(data_np).to("cuda").requires_grad_()<line_sep>before=get_memory_usage()<for_stmt>i range(10)<block_start>data=func(data)<block_end>after=get_memory_usage()<line_sep><return>after-before<block_end>usage_ref=test_implementation(F.relu)<line_sep>usage_us=test_implementation(ext_quantization.act_quantized_relu)<line_sep>print("Exact. Usage: %.2f MB"%(usage_ref/2<power>20))<line_sep>print("Quantized. Usage: %.2f MB"%(usage_us/2<power>20))<block_end><block_end><def_stmt>test_relu_speed <block_start>print("========== ReLU Speed Test ==========")<for_stmt>dtype ['float32' 'float16']<block_start>print(f"test {dtype}...")<line_sep>data_np=np.random.randn(256 56 56 32).astype(dtype)<def_stmt>test_implementation func<block_start>data=torch.tensor(data_np).to("cuda").requires_grad_()<line_sep>stmt="func(data)"<line_sep>t_forward=py_benchmark(stmt {**globals() **locals()} setup="torch.cuda.synchronize()" finish="torch.cuda.synchronize()")<line_sep>output=func(data)<line_sep>head=torch.ones_like(output)<line_sep>stmt="output.backward(head, retain_graph=True)"<line_sep>t_backward=py_benchmark(stmt {**globals() **locals()} setup="torch.cuda.synchronize()" finish="torch.cuda.synchronize()")<line_sep><return>t_forward t_backward<block_end>forward_ref,backward_ref=test_implementation(F.relu)<line_sep>forward_us,backward_us=test_implementation(ext_quantization.act_quantized_relu)<line_sep>print("Exact. forward: %.2f ms\tbackward: %.2f ms\tsum: %.2f ms"%(forward_ref<times>1e3 backward_ref<times>1e3 (forward_ref+backward_ref)<times>1e3))<line_sep>print("Quantized. forward: %.2f ms\tbackward: %.2f ms\tsum: %.2f ms"%(forward_us<times>1e3 backward_us<times>1e3 (forward_us+backward_us)<times>1e3))<block_end><block_end><def_stmt>test_dropout_memory <block_start>print("========== Dropout Memory Test ==========")<for_stmt>dtype ['float32' 'float16']<block_start>print(f"test {dtype}...")<line_sep>data_np=np.random.randn(128 56 56 32).astype(dtype)<def_stmt>test_implementation func<block_start>data=torch.tensor(data_np).to("cuda").requires_grad_()<line_sep>before=get_memory_usage()<for_stmt>i range(10)<block_start>data=func(data 0.2)<block_end>after=get_memory_usage()<line_sep><return>after-before<block_end>usage_ref=test_implementation(F.dropout)<line_sep>usage_us=test_implementation(ext_quantization.act_quantized_dropout)<line_sep>print("Exact. Usage: %.2f MB"%(usage_ref/2<power>20))<line_sep>print("Quantized. Usage: %.2f MB"%(usage_us/2<power>20))<block_end><block_end><def_stmt>test_dropout_speed <block_start>print("========== Dropout Speed Test ==========")<for_stmt>dtype ['float32' 'float16']<block_start>print(f"test {dtype}...")<line_sep>data_np=np.random.randn(256 56 56 32).astype(dtype)<def_stmt>test_implementation func<block_start>data=torch.tensor(data_np).to("cuda").requires_grad_()<line_sep>stmt="func(data, 0.2)"<line_sep>t_forward=py_benchmark(stmt {**globals() **locals()} setup="torch.cuda.synchronize()" finish="torch.cuda.synchronize()")<line_sep>output=func(data 0.2)<line_sep>head=torch.ones_like(output)<line_sep>stmt="output.backward(head, retain_graph=True)"<line_sep>t_backward=py_benchmark(stmt {**globals() **locals()} setup="torch.cuda.synchronize()" finish="torch.cuda.synchronize()")<line_sep><return>t_forward t_backward<block_end>forward_ref,backward_ref=test_implementation(F.dropout)<line_sep>forward_us,backward_us=test_implementation(ext_quantization.act_quantized_dropout)<line_sep>print("Exact. forward: %.2f ms\tbackward: %.2f ms\tsum: %.2f ms"%(forward_ref<times>1e3 backward_ref<times>1e3 (forward_ref+backward_ref)<times>1e3))<line_sep>print("Quantized. forward: %.2f ms\tbackward: %.2f ms\tsum: %.2f ms"%(forward_us<times>1e3 backward_us<times>1e3 (forward_us+backward_us)<times>1e3))<block_end><block_end><def_stmt>test_adaptive_avg_pool2d_correctness <block_start>"""Test the correctness of computation results"""<line_sep># arguments and test data N,H,W,CI,CO,kernel_size,stride,padding,dilation,groups=4 28 28 256 256 3 1 1 1 1<line_sep>data_np=np.random.randn(N CI H W).astype('float32')<line_sep>head_np=np.random.randn(N CI 1 1).astype('float32')<line_sep>output_size=1 1<def_stmt>test_implementation func<block_start>torch.manual_seed(0)<line_sep>data=torch.tensor(data_np).to("cuda").requires_grad_()<line_sep>head=torch.tensor(head_np).to("cuda")<line_sep>output=func(data output_size)<line_sep>output.backward(head)<line_sep><return>[x.detach().cpu().numpy()<for>x [output data.grad]]<block_end>output_ref,grad_data_ref=test_implementation(F.adaptive_avg_pool2d)<line_sep>output_us,grad_data_us=test_implementation(quantized_adaptive_avg_pool2d.apply)<line_sep>atol=1e-4<line_sep>rtol=1e-4<line_sep>print("========== AdaptiveAvgPool2d Correctness Test ==========")<line_sep>np.testing.assert_allclose(output_ref output_us atol=atol rtol=rtol)<line_sep>np.testing.assert_allclose(grad_data_ref grad_data_us atol=atol rtol=rtol)<block_end><def_stmt>test_adaptive_avg_pool2d_memory <block_start>"""Test the memory usage"""<line_sep># arguments and test data N,H,W,CI=1024 4 4 1024<line_sep>data_np=np.random.randn(N CI H W).astype('float32')<line_sep>output_size=(1 1)<def_stmt>test_implementation func<block_start>data=torch.tensor(data_np).to("cuda").requires_grad_()<line_sep>output=func(data output_size)<for_stmt>i range(10)<block_start>output=func(output output_size)<block_end><return>get_memory_usage()-compute_tensor_bytes([data output])<block_end>usage_ref=test_implementation(F.adaptive_avg_pool2d)<line_sep>usage_us=test_implementation(quantized_adaptive_avg_pool2d.apply)<line_sep>print("========== AdaptiveAvgPool2d Memory Test ==========")<line_sep>print("Exact. Usage: %.3f MB"%(usage_ref/2<power>20))<line_sep>print("Quantized. Usage: %.2f MB"%(usage_us/2<power>20))<block_end><def_stmt>test_max_pool2d_correctness <block_start>"""Test the correctness of computation results"""<line_sep># arguments and test data N,H,W,CI,kernel_size,stride,padding,dilation=4 28 28 8 3 2 1 1<line_sep>ceil_mode,return_indices=<false> <false><line_sep>print("========== MaxPool2d Correctness Test ==========")<for_stmt>dtype ['float32' 'float16']<block_start>print(f"test {dtype}...")<line_sep>data_np=np.random.randn(N CI H W).astype(dtype)<def_stmt>test_implementation func<block_start>data=torch.tensor(data_np).to("cuda").requires_grad_()<line_sep>output=func(data (kernel_size kernel_size) (stride stride) (padding padding) (dilation dilation) ceil_mode return_indices)<line_sep>output.backward(torch.ones_like(output))<line_sep><return>[x.detach().cpu().numpy()<for>x [output data.grad]]<block_end>output_ref,grad_data_ref=test_implementation(F.max_pool2d)<line_sep>output_us,grad_data_us=test_implementation(ext_quantization.act_quantized_max_pool2d)<line_sep>atol=1e-4<line_sep>rtol=1e-4<line_sep>np.testing.assert_allclose(output_ref output_us atol=atol rtol=rtol)<line_sep>np.testing.assert_allclose(grad_data_ref grad_data_us atol=atol rtol=rtol)<block_end><block_end><def_stmt>test_max_pool2d_memory <block_start>"""Test the memory usage"""<line_sep># arguments and test data N,H,W,CI,kernel_size,stride,padding,dilation=128 28 28 8 3 2 1 1<line_sep>ceil_mode,return_indices=<false> <false><line_sep>print("========== MaxPool2d Memory Test ==========")<for_stmt>dtype ['float32' 'float16']<block_start>print(f"test {dtype}...")<line_sep>data_np=np.random.randn(N CI H W).astype(dtype)<def_stmt>test_implementation func<block_start>data=torch.tensor(data_np).to("cuda").requires_grad_()<line_sep>output=func(data (kernel_size kernel_size) (stride stride) (padding padding) (dilation dilation) ceil_mode return_indices)<line_sep><return>get_memory_usage()-compute_tensor_bytes([output data])<block_end>usage_ref=test_implementation(F.max_pool2d)<line_sep>usage_us=test_implementation(ext_quantization.act_quantized_max_pool2d)<line_sep>print("Exact. Usage: %.3f MB"%(usage_ref/2<power>20))<line_sep>print("Quantized. Usage: %.3f MB"%(usage_us/2<power>20))<block_end><block_end><def_stmt>test_max_pool2d_speed <block_start>"""Test the correctness of computation results"""<line_sep># arguments and test data N,H,W,CI,kernel_size,stride,padding,dilation=128 28 28 128 3 2 1 1<line_sep>ceil_mode,return_indices=<false> <false><line_sep>print("========== MaxPool2d Speed Test ==========")<for_stmt>dtype ['float32' 'float16']<block_start>print(f"test {dtype}...")<line_sep>data_np=np.random.randn(N CI H W).astype(dtype)<def_stmt>test_implementation func<block_start>data=torch.tensor(data_np).to("cuda").requires_grad_()<line_sep>stmt="func(data, (kernel_size, kernel_size), (stride, stride), (padding, padding),"<concat>"(dilation, dilation), ceil_mode, return_indices)"<line_sep>t_forward=py_benchmark(stmt {**globals() **locals()} setup="torch.cuda.synchronize()" finish="torch.cuda.synchronize()")<line_sep>output=func(data (kernel_size kernel_size) (stride stride) (padding padding) (dilation dilation) ceil_mode return_indices)<line_sep>head=torch.ones_like(output)<line_sep>stmt="output.backward(head, retain_graph=True)"<line_sep>t_backward=py_benchmark(stmt {**globals() **locals()} setup="torch.cuda.synchronize()" finish="torch.cuda.synchronize()")<line_sep><return>t_forward t_backward<block_end>forward_ref,backward_ref=test_implementation(F.max_pool2d)<line_sep>forward_us,backward_us=test_implementation(ext_quantization.act_quantized_max_pool2d)<line_sep>print("Exact. forward: %.2f ms\tbackward: %.2f ms\tsum: %.2f ms"%(forward_ref<times>1e3 backward_ref<times>1e3 (forward_ref+backward_ref)<times>1e3))<line_sep>print("Quantized. forward: %.2f ms\tbackward: %.2f ms\tsum: %.2f ms"%(forward_us<times>1e3 backward_us<times>1e3 (forward_us+backward_us)<times>1e3))<block_end><block_end><def_stmt>test_upsample_memory <block_start>"""Test the memory usage"""<line_sep># arguments and test data N,H,W,CI=128 28 28 8<line_sep>size,scale_factor,mode,align_corners=<none> 2 'bilinear' <false><line_sep>data_np=np.random.randn(N CI H W).astype('float32')<def_stmt>test_implementation func<block_start>data=torch.tensor(data_np).to("cuda").requires_grad_()<line_sep>output=func(data size scale_factor mode align_corners)<line_sep>output=func(output size scale_factor mode align_corners)<line_sep>output=func(output size scale_factor mode align_corners)<line_sep><return>get_memory_usage()-compute_tensor_bytes([output data])<block_end>usage_ref=test_implementation(F.interpolate)<line_sep>print("========== Upsample Memory Test ==========")<line_sep>print("Exact. Usage: %.3f MB"%(usage_ref/2<power>20))<block_end><def_stmt>test_bn_correctness # arguments and test data <block_start>N,H,W,CI=16 28 28 256<line_sep>data_np=np.random.randn(N CI H W).astype('float32')<times>0.01<line_sep>running_mean_np=np.random.randn(CI).astype('float32')<line_sep>running_var_np=np.random.randn(CI).astype('float32')<line_sep>bn_weight_np=np.random.randn(CI).astype('float32')<line_sep>bn_bias_np=np.random.randn(CI).astype('float32')<line_sep>training=<false><line_sep>bn_scheme=QBNScheme()<line_sep>config.compress_activation=<false><def_stmt>test_implementation func<block_start>torch.manual_seed(0)<line_sep>data=torch.tensor(data_np).to("cuda").requires_grad_()<line_sep>running_mean=torch.tensor(running_mean_np).to("cuda")<line_sep>running_var=torch.tensor(running_var_np).to("cuda")<line_sep>bn_weight=torch.tensor(bn_weight_np).to("cuda").requires_grad_()<line_sep>bn_bias=torch.tensor(bn_bias_np).to("cuda").requires_grad_()<if_stmt>func<eq>F.batch_norm<block_start>output=func(data running_mean running_var bn_weight bn_bias training 0.1 1e-5)<block_end><else_stmt><block_start>output=func(data running_mean running_var bn_weight bn_bias training 0.1 1e-5 bn_scheme)<block_end>output.backward(torch.ones_like(output))<line_sep><return>[x.detach().cpu().numpy()<for>x [output data.grad bn_weight.grad bn_bias.grad]]<block_end>output_ref,grad_data_ref,grad_weight_ref,grad_bias_ref=test_implementation(F.batch_norm)<line_sep>output_us,grad_data_us,grad_weight_us,grad_bias_us=test_implementation(quantized_batch_norm.apply)<line_sep>atol=1e-3<line_sep>rtol=1e-3<line_sep>print("========== BN Correctness Test ==========")<line_sep>np.testing.assert_allclose(output_ref output_us atol=atol rtol=rtol)<line_sep>np.testing.assert_allclose(grad_data_ref grad_data_us atol=atol rtol=rtol)<line_sep>np.testing.assert_allclose(grad_weight_ref grad_weight_us atol=atol rtol=rtol)<line_sep>np.testing.assert_allclose(grad_bias_ref grad_bias_us atol=atol rtol=rtol)<block_end><def_stmt>test_conv2d_correctness <block_start>"""Test the correctness of computation results"""<line_sep># arguments and test data N,H,W,CI,CO,kernel_size,stride,padding,dilation,groups=4 28 28 256 256 3 1 1 1 1<line_sep>print("========== Conv2d Correctness Test ==========")<for_stmt>dtype ['float32' 'float16']<block_start>print(f"test {dtype}...")<line_sep>data_np=np.random.randn(N CI H W).astype(dtype)<line_sep>weight_np=np.random.randn(CO CI<floordiv>groups kernel_size kernel_size).astype(dtype)<line_sep>bias_np=np.random.randn(CO).astype(dtype)<def_stmt>test_implementation func scheme<block_start>torch.manual_seed(0)<line_sep>data=torch.tensor(data_np).to("cuda").requires_grad_()<line_sep>weight=torch.tensor(weight_np).to("cuda").requires_grad_()<line_sep>bias=torch.tensor(bias_np).to("cuda").requires_grad_()<line_sep>output=func(data weight bias stride padding dilation groups scheme)<line_sep>output.backward(torch.ones_like(output))<line_sep><return>[x.detach().cpu().numpy()<for>x [output data.grad weight.grad bias.grad]]<block_end>config.activation_compression_bits=[16]<line_sep>config.initial_bits=16<line_sep>config.perlayer=<false><line_sep>config.use_gradient=<false><line_sep>scheme=QScheme(<none>)<line_sep>config.simulate=<true><line_sep>output_ref,grad_data_ref,grad_weight_ref,grad_bias_ref=test_implementation(quantized_conv2d.apply scheme)<line_sep>config.simulate=<false><line_sep>output_us,grad_data_us,grad_weight_us,grad_bias_us=test_implementation(quantized_conv2d.apply scheme)<line_sep>atol=1e-2<line_sep>rtol=1e-2<assert_stmt>output_ref.dtype<eq>output_us.dtype<line_sep>np.testing.assert_allclose(output_ref output_us atol=atol rtol=rtol)<line_sep>np.testing.assert_allclose(grad_data_ref grad_data_us atol=atol rtol=rtol)<line_sep>np.testing.assert_allclose(grad_weight_ref grad_weight_us atol=atol rtol=rtol)<line_sep>np.testing.assert_allclose(grad_bias_ref grad_bias_us atol=atol rtol=rtol)<block_end><block_end><def_stmt>test_conv2d_correctness_per_group_only <block_start>"""Test the correctness of computation results NOTE: This test will fail on large shapes or low bits. To make this test pass, we should disable stochastic noise. """<line_sep># arguments and test data N,H,W,CI,CO,kernel_size,stride,padding,dilation,groups=2 16 16 4 4 1 1 1 1 1<line_sep>print("========== Conv2d Correctness Test (per group only) ==========")<for_stmt>dtype ['float32' 'float16']<block_start>print(f"test {dtype}...")<line_sep>data_np=np.random.randn(N CI H W).astype(dtype)<line_sep>weight_np=np.random.randn(CO CI<floordiv>groups kernel_size kernel_size).astype(dtype)<line_sep>bias_np=np.random.randn(CO).astype(dtype)<def_stmt>test_implementation func scheme<block_start>torch.manual_seed(0)<line_sep>data=torch.tensor(data_np).to("cuda").requires_grad_()<line_sep>weight=torch.tensor(weight_np).to("cuda").requires_grad_()<line_sep>bias=torch.tensor(bias_np).to("cuda").requires_grad_()<line_sep>output=func(data weight bias stride padding dilation groups scheme)<line_sep>output.backward(torch.ones_like(output))<line_sep><return>[x.detach().cpu().numpy()<for>x [output data.grad weight.grad bias.grad]]<block_end>config.activation_compression_bits=[8]<line_sep>config.perlayer=<false><line_sep>config.use_gradient=<false><line_sep>config.simulate=<true><line_sep>output_ref,grad_data_ref,grad_weight_ref,grad_bias_ref=test_implementation(quantized_conv2d.apply <none>)<line_sep>config.simulate=<false><line_sep>output_us,grad_data_us,grad_weight_us,grad_bias_us=test_implementation(quantized_conv2d.apply <none>)<line_sep>atol=1e-1<line_sep>rtol=1e-1<assert_stmt>output_ref.dtype<eq>output_us.dtype<line_sep>np.testing.assert_allclose(output_ref output_us atol=atol rtol=rtol)<line_sep>np.testing.assert_allclose(grad_data_ref grad_data_us atol=atol rtol=rtol)<line_sep>np.testing.assert_allclose(grad_weight_ref grad_weight_us atol=atol rtol=rtol)<line_sep>np.testing.assert_allclose(grad_bias_ref grad_bias_us atol=atol rtol=rtol)<block_end><block_end><def_stmt>test_conv2d_speed <block_start>"""Test the speed of convolution layer"""<line_sep># arguments and test data N,H,W,CI,CO,kernel_size,stride,padding,dilation,groups=128 28 28 256 256 3 1 1 1 1<line_sep>print("========== Conv2d Speed Test ==========")<for_stmt>dtype ['float32' 'float16']<block_start>print(f"test {dtype}...")<line_sep>data_np=np.random.randn(N CI H W).astype(dtype)<line_sep>weight_np=np.random.randn(CO CI<floordiv>groups kernel_size kernel_size).astype(dtype)<line_sep>bias_np=np.random.randn(CO).astype(dtype)<line_sep>scheme=QScheme(<none>)<def_stmt>test_implementation func scheme<block_start>data=torch.tensor(data_np).to("cuda").requires_grad_()<line_sep>weight=torch.tensor(weight_np).to("cuda").requires_grad_()<line_sep>bias=torch.tensor(bias_np).to("cuda").requires_grad_()<if_stmt>func<eq>quantized_conv2d.apply<block_start>output=func(data weight bias stride padding dilation groups scheme)<line_sep>stmt="func(data, weight, bias, stride, padding, dilation, groups, scheme)"<block_end><else_stmt><block_start>output=func(data weight bias stride padding dilation groups)<line_sep>stmt="func(data, weight, bias, stride, padding, dilation, groups)"<block_end>t_forward=py_benchmark(stmt {**globals() **locals()} setup="torch.cuda.synchronize()" finish="torch.cuda.synchronize()")<line_sep>head=torch.ones_like(output)<line_sep>stmt="output.backward(head, retain_graph=True)"<line_sep>t_backward=py_benchmark(stmt {**globals() **locals()} setup="torch.cuda.synchronize()" finish="torch.cuda.synchronize()")<line_sep><return>t_forward t_backward<block_end>config.activation_compression_bits=[16]<line_sep>config.initial_bits=16<line_sep>config.perlayer=<false><line_sep>config.use_gradient=<false><line_sep>config.simulate=<false><line_sep>scheme=QScheme(<none>)<line_sep>forward_ref,backward_ref=test_implementation(F.conv2d <none>)<line_sep>forward_us,backward_us=test_implementation(quantized_conv2d.apply scheme)<line_sep>print("Exact. forward: %.2f ms\tbackward: %.2f ms\tsum: %.2f ms"%(forward_ref<times>1e3 backward_ref<times>1e3 (forward_ref+backward_ref)<times>1e3))<line_sep>print("Quantized. forward: %.2f ms\tbackward: %.2f ms\tsum: %.2f ms"%(forward_us<times>1e3 backward_us<times>1e3 (forward_us+backward_us)<times>1e3))<block_end><block_end><def_stmt>test_conv2d_memory_analytical <block_start>"""Compute the memory of activation analytically"""<line_sep># arguments and test data N,H,W,CI,CO,kernel_size,stride,padding,dilation,groups=256 28 28 256 256 3 1 1 1 1<line_sep>data_np=np.random.randn(N CI H W).astype('float32')<line_sep>weight_np=np.random.randn(CO CI<floordiv>groups kernel_size kernel_size).astype('float32')<line_sep>bias_np=np.random.randn(CO).astype('float32')<line_sep>running_mean=np.zeros((CO ) dtype='float32')<line_sep>running_var=np.ones((CO ) dtype='float32')<line_sep>bn_weight=np.random.randn(CO).astype('float32')<line_sep>bn_bias=np.random.randn(CO).astype('float32')<line_sep>scheme=QScheme(num_locations=kernel_size<power>2)<line_sep>bn_scheme=QBNScheme()<def_stmt>test_implementation conv_func relu_func bn_func n_layers=10<block_start>data=torch.tensor(data_np).to("cuda")<line_sep># allocate input and weights data=torch.tensor(data_np).to("cuda").requires_grad_(<false>)<line_sep>weights=[]<line_sep>running_means=[]<line_sep>running_vars=[]<line_sep>bn_weights=[]<line_sep>bn_biass=[]<for_stmt>i range(n_layers)<block_start>weights.append(torch.tensor(weight_np).to("cuda").requires_grad_())<line_sep>running_means.append(torch.tensor(running_mean).to("cuda"))<line_sep>running_vars.append(torch.tensor(running_var).to("cuda"))<line_sep>bn_weights.append(torch.tensor(bn_weight).to("cuda").requires_grad_())<line_sep>bn_biass.append(torch.tensor(bn_bias).to("cuda").requires_grad_())<block_end>before_size=get_memory_usage(<false>)<line_sep># forward n convolution layers output=data<for_stmt>i range(n_layers)<block_start><if_stmt>conv_func<eq>quantized_conv2d.apply<block_start>output=conv_func(output weights[i] <none> stride padding dilation groups scheme)<line_sep>output=bn_func(output running_means[i] running_vars[i] bn_weights[i] bn_biass[i] <true> 0.1 1e-5 bn_scheme)<block_end><else_stmt><block_start>output=conv_func(output weights[i] <none> stride padding dilation groups)<line_sep>output=bn_func(output running_means[i] running_vars[i] bn_weights[i] bn_biass[i] <true> 0.1 1e-5)<block_end>output=relu_func(output)<block_end>output=output.sum()<line_sep>after_size=get_memory_usage(<false>)<line_sep>output_size=compute_tensor_bytes(output)<line_sep><return>after_size/1024<power>2 (after_size-before_size-output_size)/1024<power>2<block_end>total_size_ref,act_size_ref=test_implementation(F.conv2d <lambda>x:F.relu(x inplace=<true>) F.batch_norm)<line_sep>config.simulate=<true><line_sep>total_size_sim,act_size_sim=test_implementation(quantized_conv2d.apply ext_quantization.act_quantized_relu quantized_batch_norm.apply)<line_sep>config.simulate=<false><line_sep>total_size_us,act_size_us=test_implementation(quantized_conv2d.apply ext_quantization.act_quantized_relu quantized_batch_norm.apply)<line_sep>print("========== Conv2d Activation Memory Test (bits = %d) =========="%(config.activation_compression_bits))<line_sep>print("Exact. Total: %7.2f MB\tAct: %7.2f MB"%(total_size_ref act_size_ref))<line_sep>print("Simulation. Total: %7.2f MB\tAct: %7.2f MB"%(total_size_sim act_size_sim))<line_sep>print("Quantized. Total: %7.2f MB\tAct: %7.2f MB"%(total_size_us act_size_us))<block_end><def_stmt>test_conv2d_memory_max_batch_size <block_start>"""Find the maximum batch size by gradually increasing the batch size until hitting Out-of-memory error"""<for_stmt>device ["cuda"]<block_start><def_stmt>test_implementation func n_layers batch_sizes<block_start><def_stmt>run_batch_size batch_size<block_start>N,H,W,CI,CO,kernel_size,stride,padding,dilation,groups=batch_size 28 28 256 256 3 1 1 1 1<line_sep>data_np=np.random.uniform(size=(N CI H W)).astype('float32')<line_sep>weight_np=np.random.uniform(size=(CO CI<floordiv>groups kernel_size kernel_size)).astype('float32')<line_sep>bias_np=np.random.uniform(size=(CO )).astype('float32')<line_sep># allocate input and weights data=torch.tensor(data_np).to("cuda").requires_grad_(<false>)<line_sep>weights=[]<for_stmt>i range(n_layers)<block_start>weight=torch.tensor(weight_np).to("cuda").requires_grad_()<line_sep>weights.append(weight)<block_end>before_size=get_memory_usage(<false>)<line_sep># forward n convolution layers output=data<for_stmt>i range(n_layers)<block_start>output=func(output weights[i] <none> stride padding dilation groups)<block_end>output=output.sum()<line_sep>after_size=get_memory_usage(<false>)<line_sep>output_size=compute_tensor_bytes(output)<line_sep><return>after_size/1024<power>2 (after_size-before_size-output_size)/1024<power>2<block_end># try gradually increased batch sizes <try_stmt><block_start><for_stmt>i,batch_size enumerate(batch_sizes)<block_start>total_size_ref,act_size_ref=run_batch_size(batch_size)<line_sep>print("batch_size: %4d\t"%batch_size end="")<line_sep>print("total_memory: %7.2f MB\tact_memory: %7.2f MB"%(total_size_ref act_size_ref))<block_end><block_end><except_stmt>RuntimeError<block_start><pass><block_end><finally_stmt><block_start>print("Maximum batch size: %d"%(batch_sizes[i-1]))<block_end><block_end>print("========== Conv2d Batch Size Test ==========")<line_sep>print("---> Exact")<line_sep>test_implementation(F.conv2d n_layers=50 batch_sizes=[100 200 250 300 350 400 450 500 1000])<line_sep>print("---> Quantized")<line_sep>test_implementation(act_quantized_conv2d.apply n_layers=50 batch_sizes=[100 200 250 500 1000 2200 2300 2400 3000 4000])<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>test_relu_correctness()<line_sep>test_relu_memory()<line_sep>test_relu_speed()<line_sep>#test_dropout_memory() #test_dropout_speed() #test_adaptive_avg_pool2d_correctness() #test_adaptive_avg_pool2d_memory() #test_max_pool2d_correctness() #test_max_pool2d_memory() #test_max_pool2d_speed() #test_upsample_memory() #test_bn_correctness() test_conv2d_correctness()<line_sep>#test_conv2d_correctness_per_group_only() #test_conv2d_speed() #config.activation_compression_bits = 2 #test_conv2d_memory_analytical() #config.activation_compression_bits = 2 #test_conv2d_memory_max_batch_size() <block_end>
<import_stmt>logging<import_from_stmt>xml.etree.cElementTree fromstring<import_from_stmt>xmljson yahoo<import_stmt>core<import_from_stmt>core.helpers Url<line_sep>logging=logging.getLogger(__name__)<def_stmt>search imdbid term<block_start>proxy_enabled=core.CONFIG['Server']['Proxy']['enabled']<line_sep>logging.info('Performing backlog search on TorrentDownloads for {}.'.format(imdbid))<line_sep>url='http://www.torrentdownloads.me/rss.xml?type=search&search={}'.format(term)<try_stmt><block_start><if_stmt>proxy_enabled<and>core.proxy.whitelist('http://www.torrentdownloads.me')<is><true><block_start>response=Url.open(url proxy_bypass=<true>).text<block_end><else_stmt><block_start>response=Url.open(url).text<block_end><if_stmt>response<block_start><return>_parse(response imdbid)<block_end><else_stmt><block_start><return>[]<block_end><block_end><except_stmt>(SystemExit KeyboardInterrupt)<block_start><raise><block_end><except_stmt>Exception<as>e<block_start>logging.error('TorrentDownloads search failed.' exc_info=<true>)<line_sep><return>[]<block_end><block_end><def_stmt>get_rss <block_start>proxy_enabled=core.CONFIG['Server']['Proxy']['enabled']<line_sep>logging.info('Fetching latest RSS from TorrentDownloads.')<line_sep>url='http://www.torrentdownloads.me/rss2/last/4'<try_stmt><block_start><if_stmt>proxy_enabled<and>core.proxy.whitelist('http://www.torrentdownloads.me')<is><true><block_start>response=Url.open(url proxy_bypass=<true>).text<block_end><else_stmt><block_start>response=Url.open(url).text<block_end><if_stmt>response<block_start><return>_parse(response <none>)<block_end><else_stmt><block_start><return>[]<block_end><block_end><except_stmt>(SystemExit KeyboardInterrupt)<block_start><raise><block_end><except_stmt>Exception<as>e<block_start>logging.error('TorrentDownloads RSS fetch failed.' exc_info=<true>)<line_sep><return>[]<block_end><block_end><def_stmt>_parse xml imdbid<block_start>logging.info('Parsing TorrentDownloads results.')<try_stmt><block_start>items=yahoo.data(fromstring(xml))['rss']['channel']['item']<block_end><except_stmt>Exception<as>e<block_start>logging.error('Unexpected XML format from TorrentDownloads.' exc_info=<true>)<line_sep><return>[]<block_end>results=[]<for_stmt>i items<block_start>result={}<try_stmt><block_start>result['score']=0<line_sep>result['size']=int(i['size'])<line_sep>result['status']='Available'<line_sep>result['pubdate']=<none><line_sep>result['title']=i['title']<line_sep>result['imdbid']=imdbid<line_sep>result['indexer']='TorrentDownloads'<line_sep>result['info_link']='http://www.torrentdownloads.me{}'.format(i['link'])<line_sep>result['torrentfile']=core.providers.torrent.magnet(i['info_hash'])<line_sep>result['guid']=i['info_hash']<line_sep>result['type']='magnet'<line_sep>result['downloadid']=<none><line_sep>result['freeleech']=0<line_sep>result['download_client']=<none><line_sep>result['seeders']=int(i['seeders'])<line_sep>results.append(result)<block_end><except_stmt>Exception<as>e<block_start>logging.error('Error parsing TorrentDownloads XML.' exc_info=<true>)<line_sep><continue><block_end><block_end>logging.info('Found {} results from TorrentDownloads.'.format(len(results)))<line_sep><return>results<block_end>
# coding=utf-8 <import_from_stmt>tests unittest<import_from_stmt>aliyunsdkcore.auth.credentials StsTokenCredential<import_from_stmt>aliyunsdkcore.auth.signers.sts_token_signer StsTokenSigner<import_from_stmt>aliyunsdkcore.request RpcRequest RoaRequest<class_stmt>TestStsTokenSigner(unittest.TestCase)<block_start><def_stmt>test_sts_token_signer self<block_start>credential=StsTokenCredential('sts_access_key_id' 'sts_access_key_secret' 'sts_token')<line_sep>signer=StsTokenSigner(credential)<line_sep># for rpc request=RpcRequest("product" "version" "action_name")<line_sep>self.assertIsNone(request.get_query_params().get("SecurityToken"))<line_sep>headers,url=signer.sign('cn-hangzhou' request)<line_sep>self.assertDictEqual(request.get_headers() {'x-acs-action':'action_name' 'x-acs-version':'version' 'x-sdk-invoke-type':'normal'})<line_sep>self.assertEqual(request.get_query_params().get("SecurityToken") 'sts_token')<line_sep># self.assertEqual(url, "/?SignatureVersion=1.0&Format=None" # "&Timestamp=2018-12-02T11%3A03%3A01Z&RegionId=cn-hangzhou" # "&AccessKeyId=access_key_id&SignatureMethod=HMAC-SHA1&Version=version" # "&Signature=AmdeJh1ZOW6PgwM3%2BROhEnbKII4%3D&Action=action_name" # "&SignatureNonce=d5e6e832-7f95-4f26-9e28-017f735721f8&SignatureType=') request=RoaRequest("product" "version" "action_name" uri_pattern="/")<line_sep>request.set_method('get')<line_sep>self.assertIsNone(request.get_headers().get("x-acs-security-token"))<line_sep>headers,url=signer.sign('cn-hangzhou' request)<line_sep>self.assertEqual(request.get_headers().get("x-acs-security-token") 'sts_token')<block_end><block_end>
<import_stmt>argparse<import_stmt>numpy<as>np<import_stmt>torch<as>th<import_stmt>torch.optim<as>optim<import_from_stmt>dgl.data PPIDataset<import_from_stmt>dgl.dataloading GraphDataLoader<import_from_stmt>sklearn.metrics f1_score<import_from_stmt>model GeniePath GeniePathLazy<def_stmt>evaluate model loss_fn dataloader device='cpu'<block_start>loss=0<line_sep>f1=0<line_sep>num_blocks=0<for_stmt>subgraph dataloader<block_start>subgraph=subgraph.to(device)<line_sep>label=subgraph.ndata['label'].to(device)<line_sep>feat=subgraph.ndata['feat']<line_sep>logits=model(subgraph feat)<line_sep># compute loss loss<augadd>loss_fn(logits label).item()<line_sep>predict=np.where(logits.data.cpu().numpy()<ge>0. 1 0)<line_sep>f1<augadd>f1_score(label.cpu() predict average='micro')<line_sep>num_blocks<augadd>1<block_end><return>f1/num_blocks loss/num_blocks<block_end><def_stmt>main args# Step 1: Prepare graph data and retrieve train/validation/test index ============================= # # Load dataset <block_start>train_dataset=PPIDataset(mode='train')<line_sep>valid_dataset=PPIDataset(mode='valid')<line_sep>test_dataset=PPIDataset(mode='test')<line_sep>train_dataloader=GraphDataLoader(train_dataset batch_size=args.batch_size)<line_sep>valid_dataloader=GraphDataLoader(valid_dataset batch_size=args.batch_size)<line_sep>test_dataloader=GraphDataLoader(test_dataset batch_size=args.batch_size)<line_sep># check cuda <if_stmt>args.gpu<ge>0<and>th.cuda.is_available()<block_start>device='cuda:{}'.format(args.gpu)<block_end><else_stmt><block_start>device='cpu'<block_end>num_classes=train_dataset.num_labels<line_sep># Extract node features graph=train_dataset[0]<line_sep>feat=graph.ndata['feat']<line_sep># Step 2: Create model =================================================================== # <if_stmt>args.lazy<block_start>model=GeniePathLazy(in_dim=feat.shape[-1] out_dim=num_classes hid_dim=args.hid_dim num_layers=args.num_layers num_heads=args.num_heads residual=args.residual)<block_end><else_stmt><block_start>model=GeniePath(in_dim=feat.shape[-1] out_dim=num_classes hid_dim=args.hid_dim num_layers=args.num_layers num_heads=args.num_heads residual=args.residual)<block_end>model=model.to(device)<line_sep># Step 3: Create training components ===================================================== # loss_fn=th.nn.BCEWithLogitsLoss()<line_sep>optimizer=optim.Adam(model.parameters() lr=args.lr)<line_sep># Step 4: training epochs =============================================================== # <for_stmt>epoch range(args.max_epoch)<block_start>model.train()<line_sep>tr_loss=0<line_sep>tr_f1=0<line_sep>num_blocks=0<for_stmt>subgraph train_dataloader<block_start>subgraph=subgraph.to(device)<line_sep>label=subgraph.ndata['label']<line_sep>feat=subgraph.ndata['feat']<line_sep>logits=model(subgraph feat)<line_sep># compute loss batch_loss=loss_fn(logits label)<line_sep>tr_loss<augadd>batch_loss.item()<line_sep>tr_predict=np.where(logits.data.cpu().numpy()<ge>0. 1 0)<line_sep>tr_f1<augadd>f1_score(label.cpu() tr_predict average='micro')<line_sep>num_blocks<augadd>1<line_sep># backward optimizer.zero_grad()<line_sep>batch_loss.backward()<line_sep>optimizer.step()<block_end># validation model.eval()<line_sep>val_f1,val_loss=evaluate(model loss_fn valid_dataloader device)<line_sep>print("In epoch {}, Train F1: {:.4f} | Train Loss: {:.4f}; Valid F1: {:.4f} | Valid loss: {:.4f}".format(epoch tr_f1/num_blocks tr_loss/num_blocks val_f1 val_loss))<block_end># Test after all epoch model.eval()<line_sep>test_f1,test_loss=evaluate(model loss_fn test_dataloader device)<line_sep>print("Test F1: {:.4f} | Test loss: {:.4f}".format(test_f1 test_loss))<block_end><if_stmt>__name__<eq>'__main__'<block_start>parser=argparse.ArgumentParser(description='GeniePath')<line_sep>parser.add_argument("--gpu" type=int default=-1 help="GPU Index. Default: -1, using CPU.")<line_sep>parser.add_argument("--hid_dim" type=int default=256 help="Hidden layer dimension")<line_sep>parser.add_argument("--num_layers" type=int default=3 help="Number of GeniePath layers")<line_sep>parser.add_argument("--max_epoch" type=int default=1000 help="The max number of epochs. Default: 1000")<line_sep>parser.add_argument("--lr" type=float default=0.0004 help="Learning rate. Default: 0.0004")<line_sep>parser.add_argument("--num_heads" type=int default=1 help="Number of head in breadth function. Default: 1")<line_sep>parser.add_argument("--residual" type=bool default=<false> help="Residual in GAT or not")<line_sep>parser.add_argument("--batch_size" type=int default=2 help="Batch size of graph dataloader")<line_sep>parser.add_argument("--lazy" type=bool default=<false> help="Variant GeniePath-Lazy")<line_sep>args=parser.parse_args()<line_sep>print(args)<line_sep>th.manual_seed(16)<line_sep>main(args)<block_end>
# date: 2019.04.09 # <import_stmt>tkinter<as>tk<line_sep># --- functions --- <def_stmt>get_text text<block_start>print(text)<block_end><def_stmt>get_widget widget<block_start>print(widget["text"])<line_sep>widget["text"]="DONE"<line_sep>widget["bg"]="green"<block_end><def_stmt>get_event event<block_start>print(event.widget["text"])<line_sep>event.widget["text"]="DONE"<line_sep>event.widget["bg"]="green"<block_end># --- main --- list_words=("One" "Two" "Three")<line_sep>root=tk.Tk()<line_sep># access button's text in function assigned to button <for_stmt>word list_words<block_start>btn=tk.Button(root text=word command=<lambda>txt=word:get_text(txt))<line_sep>btn.pack()<block_end># access button in function assigned to button <for_stmt>word list_words<block_start>btn=tk.Button(root text=word)<line_sep>btn["command"]=<lambda>widget=btn:get_widget(widget)<line_sep>btn.pack()<block_end># access button in function assigned to button <for_stmt>word list_words<block_start>btn=tk.Button(root text=word)<line_sep>btn.bind('<Button-1>' get_event)<line_sep>btn.pack()<block_end>root.mainloop()<line_sep>
# Copyright Materialize, Inc. and contributors. All rights reserved. # # Use of this software is governed by the Business Source License # included in the LICENSE file at the root of this repository. # # As of the Change Date specified in that file, in accordance with # the Business Source License, use of this software will be governed # by the Apache License, Version 2.0. <import_stmt>os<import_from_stmt>materialize.mzcompose Kafka Materialized SchemaRegistry Testdrive Workflow Zookeeper <line_sep>materialized=Materialized(options="--persistent-user-tables --persistent-kafka-upsert-source --disable-persistent-system-tables-test")<line_sep>mz_disable_user_indexes=Materialized(name="mz_disable_user_indexes" hostname="materialized" options="--persistent-user-tables --persistent-kafka-upsert-source --disable-persistent-system-tables-test --disable-user-indexes" )<line_sep># This instance of Mz is used for failpoint testing. By using --disable-persistent-system-tables-test # we ensure that only testdrive-initiated actions cause I/O. The --workers 1 is used due to #8739 mz_without_system_tables=Materialized(name="mz_without_system_tables" hostname="materialized" options="--persistent-user-tables --disable-persistent-system-tables-test --workers 1" )<line_sep>prerequisites=[Zookeeper() Kafka() SchemaRegistry()]<line_sep>services=[*prerequisites materialized mz_disable_user_indexes mz_without_system_tables Testdrive(no_reset=<true> seed=1) ]<line_sep>td_test=os.environ.pop("TD_TEST" "*")<def_stmt>workflow_persistence w:Workflow<block_start>workflow_kafka_sources(w)<line_sep>workflow_user_tables(w)<line_sep>workflow_failpoints(w)<line_sep>workflow_disable_user_indexes(w)<block_end><def_stmt>workflow_kafka_sources w:Workflow<block_start>w.start_and_wait_for_tcp(services=prerequisites timeout_secs=240)<line_sep>w.start_services(services=["materialized"])<line_sep>w.wait_for_mz(service="materialized")<line_sep>w.run_service(service="testdrive-svc" command=f"kafka-sources/*{td_test}*-before.td" )<line_sep>w.kill_services(services=["materialized"] signal="SIGKILL")<line_sep>w.start_services(services=["materialized"])<line_sep>w.wait_for_mz(service="materialized")<line_sep># And restart again, for extra stress w.kill_services(services=["materialized"] signal="SIGKILL")<line_sep>w.start_services(services=["materialized"])<line_sep>w.wait_for_mz(service="materialized")<line_sep>w.run_service(service="testdrive-svc" command=f"kafka-sources/*{td_test}*-after.td" )<line_sep># Do one more restart, just in case and just confirm that Mz is able to come up w.kill_services(services=["materialized"] signal="SIGKILL")<line_sep>w.start_services(services=["materialized"])<line_sep>w.wait_for_mz(service="materialized")<line_sep>w.kill_services(services=["materialized"] signal="SIGKILL")<line_sep>w.remove_services(services=["materialized" "testdrive-svc"] destroy_volumes=<true>)<line_sep>w.remove_volumes(volumes=["mzdata"])<block_end><def_stmt>workflow_user_tables w:Workflow<block_start>w.start_services(services=["materialized"])<line_sep>w.wait_for_mz(service="materialized")<line_sep>w.run_service(service="testdrive-svc" command=f"user-tables/table-persistence-before-{td_test}.td" )<line_sep>w.kill_services(services=["materialized"] signal="SIGKILL")<line_sep>w.start_services(services=["materialized"])<line_sep>w.run_service(service="testdrive-svc" command=f"user-tables/table-persistence-after-{td_test}.td" )<line_sep>w.kill_services(services=["materialized"] signal="SIGKILL")<line_sep>w.remove_services(services=["materialized" "testdrive-svc"] destroy_volumes=<true>)<line_sep>w.remove_volumes(volumes=["mzdata"])<block_end><def_stmt>workflow_failpoints w:Workflow<block_start>w.start_services(services=["mz_without_system_tables"])<line_sep>w.wait_for_mz(service="mz_without_system_tables")<line_sep>w.run_service(service="testdrive-svc" command=f"failpoints/{td_test}.td")<line_sep>w.kill_services(services=["mz_without_system_tables"] signal="SIGKILL")<line_sep>w.remove_services(services=["mz_without_system_tables" "testdrive-svc"] destroy_volumes=<true>)<line_sep>w.remove_volumes(volumes=["mzdata"])<block_end><def_stmt>workflow_disable_user_indexes w:Workflow<block_start>w.start_and_wait_for_tcp(services=prerequisites)<line_sep>w.start_services(services=["materialized"])<line_sep>w.wait_for_mz(service="materialized")<line_sep>w.run_service(service="testdrive-svc" command="disable-user-indexes/before.td" )<line_sep>w.kill_services(services=["materialized"] signal="SIGKILL")<line_sep>w.start_services(services=["mz_disable_user_indexes"])<line_sep>w.wait_for_mz(service="mz_disable_user_indexes")<line_sep>w.run_service(service="testdrive-svc" command="disable-user-indexes/after.td" )<line_sep>w.kill_services(services=["mz_disable_user_indexes"] signal="SIGKILL")<line_sep>w.remove_services(services=["materialized" "mz_disable_user_indexes" "testdrive-svc"] destroy_volumes=<true> )<line_sep>w.remove_volumes(volumes=["mzdata"])<block_end>
# Generated by Django 3.1 on 2020-08-28 01:34 <import_stmt>django.db.models.deletion<import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[("automation" "0004_auto_20200617_0332") ("agents" "0012_auto_20200810_0544") ("winupdate" "0002_auto_20200715_0445") ]<line_sep>operations=[migrations.AddField(model_name="winupdatepolicy" name="policy" field=models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.CASCADE related_name="winupdatepolicy" to="automation.policy" ) ) migrations.AlterField(model_name="winupdatepolicy" name="agent" field=models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.CASCADE related_name="winupdatepolicy" to="agents.agent" ) ) ]<block_end>
# ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------- <import_stmt>asyncio<import_stmt>pytest<import_from_stmt>_shared.test_case_async KeyVaultTestCase<import_from_stmt>_test_case client_setup get_decorator SecretsTestCase<line_sep>all_api_versions=get_decorator(is_async=<true>)<def_stmt>print *args<block_start><assert_stmt>all(arg<is><not><none><for>arg args)<block_end>@pytest.mark.asyncio<async_keyword><def_stmt>test_create_secret_client <block_start>vault_url="vault_url"<line_sep># pylint:disable=unused-variable # [START create_secret_client] <import_from_stmt>azure.identity.aio DefaultAzureCredential<import_from_stmt>azure.keyvault.secrets.aio SecretClient<line_sep># Create a SecretClient using default Azure credentials credential=DefaultAzureCredential()<line_sep>secret_client=SecretClient(vault_url credential)<line_sep># the client and credential should be closed when no longer needed # (both are also async context managers) <await>secret_client.close()<line_sep><await>credential.close()<line_sep># [END create_secret_client] <block_end><class_stmt>TestExamplesKeyVault(SecretsTestCase KeyVaultTestCase)<block_start>@all_api_versions()@client_setup<async_keyword><def_stmt>test_example_secret_crud_operations self client **kwargs<block_start>secret_client=client<line_sep>secret_name=self.get_resource_name("secret-name")<line_sep># [START set_secret] <import_from_stmt>dateutil parser<as>date_parse<line_sep>expires_on=date_parse.parse("2050-02-02T08:00:00.000Z")<line_sep># create a secret, setting optional arguments secret=<await>secret_client.set_secret(secret_name "secret-value" enabled=<true> expires_on=expires_on)<line_sep>print(secret.id)<line_sep>print(secret.name)<line_sep>print(secret.properties.enabled)<line_sep>print(secret.properties.expires_on)<line_sep># [END set_secret] secret_version=secret.properties.version<line_sep># [START get_secret] # get the latest version of a secret secret=<await>secret_client.get_secret(secret_name)<line_sep># alternatively, specify a version secret=<await>secret_client.get_secret(secret_name secret_version)<line_sep>print(secret.id)<line_sep>print(secret.name)<line_sep>print(secret.properties.version)<line_sep>print(secret.properties.vault_url)<line_sep># [END get_secret] # [START update_secret] # update attributes of an existing secret content_type="text/plain"<line_sep>tags={"foo":"updated tag"}<line_sep>updated_secret_properties=<await>secret_client.update_secret_properties(secret_name content_type=content_type tags=tags)<line_sep>print(updated_secret_properties.version)<line_sep>print(updated_secret_properties.updated_on)<line_sep>print(updated_secret_properties.content_type)<line_sep>print(updated_secret_properties.tags)<line_sep># [END update_secret] # [START delete_secret] # delete a secret deleted_secret=<await>secret_client.delete_secret(secret_name)<line_sep>print(deleted_secret.name)<line_sep># if the vault has soft-delete enabled, the secret's deleted_date, # scheduled purge date and recovery id are set print(deleted_secret.deleted_date)<line_sep>print(deleted_secret.scheduled_purge_date)<line_sep>print(deleted_secret.recovery_id)<line_sep># [END delete_secret] <block_end>@all_api_versions()@client_setup<async_keyword><def_stmt>test_example_secret_list_operations self client **kwargs<block_start>secret_client=client<for_stmt>i range(7)<block_start>secret_name=self.get_resource_name("secret{}".format(i))<line_sep><await>secret_client.set_secret(secret_name "value{}".format(i))<block_end># [START list_secrets] # gets a list of secrets in the vault secrets=secret_client.list_properties_of_secrets()<async_keyword><for_stmt>secret secrets# the list doesn't include values or versions of the secrets <block_start>print(secret.id)<line_sep>print(secret.name)<line_sep>print(secret.enabled)<block_end># [END list_secrets] # [START list_properties_of_secret_versions] # gets a list of all versions of a secret secret_versions=secret_client.list_properties_of_secret_versions("secret-name")<async_keyword><for_stmt>secret secret_versions# the list doesn't include the versions' values <block_start>print(secret.id)<line_sep>print(secret.enabled)<line_sep>print(secret.updated_on)<block_end># [END list_properties_of_secret_versions] # [START list_deleted_secrets] # gets a list of deleted secrets (requires soft-delete enabled for the vault) deleted_secrets=secret_client.list_deleted_secrets()<async_keyword><for_stmt>secret deleted_secrets# the list doesn't include values or versions of the deleted secrets <block_start>print(secret.id)<line_sep>print(secret.name)<line_sep>print(secret.scheduled_purge_date)<line_sep>print(secret.recovery_id)<line_sep>print(secret.deleted_date)<block_end># [END list_deleted_secrets] <block_end>@all_api_versions()@client_setup<async_keyword><def_stmt>test_example_secrets_backup_restore self client **kwargs<block_start>secret_client=client<line_sep>secret_name=self.get_resource_name("secret-name")<line_sep><await>secret_client.set_secret(secret_name "secret-value")<line_sep># [START backup_secret] # backup secret secret_backup=<await>secret_client.backup_secret(secret_name)<line_sep># returns the raw bytes of the backed up secret print(secret_backup)<line_sep># [END backup_secret] <await>secret_client.delete_secret(secret_name)<line_sep><await>secret_client.purge_deleted_secret(secret_name)<if_stmt>self.is_live<block_start><await>asyncio.sleep(60)<block_end># [START restore_secret_backup] # restores a backed up secret restored_secret=<await>secret_client.restore_secret_backup(secret_backup)<line_sep>print(restored_secret.id)<line_sep>print(restored_secret.version)<line_sep># [END restore_secret_backup] <block_end>@all_api_versions()@client_setup<async_keyword><def_stmt>test_example_secrets_recover self client **kwargs<block_start>secret_client=client<line_sep>secret_name=self.get_resource_name("secret-name")<line_sep><await>secret_client.set_secret(secret_name "secret-value")<line_sep><await>secret_client.delete_secret(secret_name)<line_sep># [START get_deleted_secret] # gets a deleted secret (requires soft-delete enabled for the vault) deleted_secret=<await>secret_client.get_deleted_secret(secret_name)<line_sep>print(deleted_secret.name)<line_sep># [END get_deleted_secret] # [START recover_deleted_secret] # recover deleted secret to the latest version recovered_secret=<await>secret_client.recover_deleted_secret(secret_name)<line_sep>print(recovered_secret.id)<line_sep>print(recovered_secret.name)<line_sep># [END recover_deleted_secret] <block_end><block_end>
<import_stmt>esphome.codegen<as>cg<import_stmt>esphome.config_validation<as>cv<import_from_stmt>esphome.components spi pn532<import_from_stmt>esphome.const CONF_ID<line_sep>AUTO_LOAD=["pn532"]<line_sep>CODEOWNERS=["@OttoWinter" "@jesserockz"]<line_sep>DEPENDENCIES=["spi"]<line_sep>MULTI_CONF=<true><line_sep>pn532_spi_ns=cg.esphome_ns.namespace("pn532_spi")<line_sep>PN532Spi=pn532_spi_ns.class_("PN532Spi" pn532.PN532 spi.SPIDevice)<line_sep>CONFIG_SCHEMA=cv.All(pn532.PN532_SCHEMA.extend({cv.GenerateID():cv.declare_id(PN532Spi) }).extend(spi.spi_device_schema(cs_pin_required=<true>)))<async_keyword><def_stmt>to_code config<block_start>var=cg.new_Pvariable(config[CONF_ID])<line_sep><await>pn532.setup_pn532(var config)<line_sep><await>spi.register_spi_device(var config)<block_end>
<import_from_stmt>flask session<import_from_stmt>cloud_inquisitor.app _import_templates<import_from_stmt>cloud_inquisitor.constants ROLE_ADMIN HTTP<import_from_stmt>cloud_inquisitor.database db<import_from_stmt>cloud_inquisitor.log auditlog<import_from_stmt>cloud_inquisitor.plugins BaseView<import_from_stmt>cloud_inquisitor.schema Role Template<import_from_stmt>cloud_inquisitor.utils MenuItem diff<import_from_stmt>cloud_inquisitor.wrappers check_auth rollback<class_stmt>TemplateList(BaseView)<block_start>URLS=['/api/v1/templates']<line_sep>MENU_ITEMS=[MenuItem('admin' 'Templates' 'template.list' 'template' order=4)]<line_sep>@rollback@check_auth(ROLE_ADMIN)<def_stmt>get self<block_start>templates=db.Template.all()<line_sep><return>self.make_response({'templates':templates 'templateCount':len(templates)})<block_end>@rollback@check_auth(ROLE_ADMIN)<def_stmt>post self<block_start>"""Create a new template"""<line_sep>self.reqparse.add_argument('templateName' type=str required=<true>)<line_sep>self.reqparse.add_argument('template' type=str required=<true>)<line_sep>args=self.reqparse.parse_args()<line_sep>template=db.Template.find_one(template_name=args['templateName'])<if_stmt>template<block_start><return>self.make_response('Template already exists, update the existing template instead' HTTP.CONFLICT)<block_end>template=Template()<line_sep>template.template_name=args['templateName']<line_sep>template.template=args['template']<line_sep>db.session.add(template)<line_sep>db.session.commit()<line_sep>auditlog(event='template.create' actor=session['user'].username data=args)<line_sep><return>self.make_response('Template {} has been created'.format(template.template_name) HTTP.CREATED)<block_end>@rollback@check_auth(ROLE_ADMIN)<def_stmt>put self<block_start>"""Re-import all templates, overwriting any local changes made"""<try_stmt><block_start>_import_templates(force=<true>)<line_sep><return>self.make_response('Imported templates')<block_end><except_stmt><block_start>self.log.exception('Failed importing templates')<line_sep><return>self.make_response('Failed importing templates' HTTP.SERVER_ERROR)<block_end><block_end><block_end><class_stmt>TemplateGet(BaseView)<block_start>URLS=['/api/v1/template/<string:template_name>']<line_sep>@rollback@check_auth(ROLE_ADMIN)<def_stmt>get self template_name<block_start>"""Get a specific template"""<line_sep>template=db.Template.find_one(template_name=template_name)<if_stmt><not>template<block_start><return>self.make_response('No such template found' HTTP.NOT_FOUND)<block_end><return>self.make_response({'template':template})<block_end>@rollback@check_auth(ROLE_ADMIN)<def_stmt>put self template_name<block_start>"""Update a template"""<line_sep>self.reqparse.add_argument('template' type=str required=<true>)<line_sep>args=self.reqparse.parse_args()<line_sep>template=db.Template.find_one(template_name=template_name)<if_stmt><not>template<block_start><return>self.make_response('No such template found' HTTP.NOT_FOUND)<block_end>changes=diff(template.template args['template'])<line_sep>template.template=args['template']<line_sep>template.is_modified=<true><line_sep>db.session.add(template)<line_sep>db.session.commit()<line_sep>auditlog(event='template.update' actor=session['user'].username data={'template_name':template_name 'template_changes':changes})<line_sep><return>self.make_response('Template {} has been updated'.format(template_name))<block_end>@rollback@check_auth(ROLE_ADMIN)<def_stmt>delete self template_name<block_start>"""Delete a template"""<line_sep>template=db.Template.find_one(template_name=template_name)<if_stmt><not>template<block_start><return>self.make_response('No such template found' HTTP.NOT_FOUND)<block_end>db.session.delete(template)<line_sep>db.session.commit()<line_sep>auditlog(event='template.delete' actor=session['user'].username data={'template_name':template_name})<line_sep><return>self.make_response({'message':'Template has been deleted' 'templateName':template_name})<block_end><block_end>
# coding: utf-8 <import_stmt>os<import_stmt>shutil<import_stmt>pickle<import_stmt>librosa<import_stmt>argparse<import_stmt>pandas<as>pd<import_stmt>numpy<as>np<import_from_stmt>glob glob<import_from_stmt>tqdm tqdm<import_from_stmt>PIL Image<import_from_stmt>facenet_pytorch MTCNN InceptionResnetV1<import_stmt>torch<import_from_stmt>transformers BertTokenizer BertModel<import_from_stmt>torch.utils.data Dataset DataLoader<class_stmt>MDataPreLoader(Dataset)<block_start><def_stmt>__init__ self args<block_start>self.working_dir=args.working_dir<line_sep>self.df=args.df<line_sep>self.annotation_dict={"Negative":0 "Neutral":1 "Positive":2}<line_sep># toolkits path self.openface2Path=args.openface2Path<line_sep># bert tokenizer_class=BertTokenizer<if_stmt>args.language<eq>'cn'<block_start>self.pretrainedBertPath='pretrained_model/bert_cn'<line_sep>self.tokenizer=tokenizer_class.from_pretrained('pretrained_model/bert_cn')<block_end><else_stmt><block_start>self.pretrainedBertPath='pretrained_model/bert_en'<line_sep>self.tokenizer=tokenizer_class.from_pretrained('pretrained_model/bert_en' do_lower_case=<true>)<block_end><block_end><def_stmt>__len__ self<block_start><return>len(self.df)<block_end><def_stmt>__getVideoEmbedding self video_path tmp_dir pool_size=3<block_start>faces_feature_dir=os.path.join(tmp_dir 'Faces')<line_sep>os.mkdir(faces_feature_dir)<line_sep>cmd=self.openface2Path+' -f '+video_path+' -out_dir '+faces_feature_dir<line_sep>os.system(cmd)<line_sep># read features features,local_features=[] []<line_sep>df_path=glob(os.path.join(faces_feature_dir '*.csv'))<if_stmt>len(df_path)<g>0<block_start>df_path=df_path[0]<line_sep>df=pd.read_csv(df_path)<for_stmt>i range(len(df))<block_start>local_features.append(np.array(df.loc[i][df.columns[5:]]))<if_stmt>(i+1)%pool_size<eq>0<block_start>features.append(np.array(local_features).mean(axis=0))<line_sep>local_features=[]<block_end><block_end><if_stmt>len(local_features)<ne>0<block_start>features.append(np.array(local_features).mean(axis=0))<block_end><block_end><return>np.array(features)<block_end><def_stmt>__getAudioEmbedding self video_path audio_path# use ffmpeg to extract audio <block_start>cmd='ffmpeg -i '+video_path+' -f wav -vn '+audio_path+' -loglevel quiet'<line_sep>os.system(cmd)<line_sep># get features y,sr=librosa.load(audio_path)<line_sep># using librosa to get audio features (f0, mfcc, cqt) hop_length=512# hop_length smaller, seq_len larger f0=librosa.feature.zero_crossing_rate(y hop_length=hop_length).T# (seq_len, 1) mfcc=librosa.feature.mfcc(y=y sr=sr hop_length=hop_length htk=<true>).T# (seq_len, 20) cqt=librosa.feature.chroma_cqt(y=y sr=sr hop_length=hop_length).T# (seq_len, 12) <return>np.concatenate([f0 mfcc cqt] axis=-1)<block_end><def_stmt>__getTextEmbedding self text# directory is fine <block_start>tokenizer=BertTokenizer.from_pretrained(self.pretrainedBertPath)<line_sep>model=BertModel.from_pretrained(self.pretrainedBertPath)<line_sep># add_special_tokens will add start and end token input_ids=torch.tensor([tokenizer.encode(text add_special_tokens=<true>)])<with_stmt>torch.no_grad()<block_start>last_hidden_states=model(input_ids)[0]# Models outputs are now tuples <block_end><return>last_hidden_states.squeeze().numpy()<block_end><def_stmt>__preTextforBert self text<block_start>tokens_a=self.tokenizer.tokenize(text invertable=<true>)<line_sep>tokens=["[CLS]"]+tokens_a+["[SEP]"]<line_sep>segment_ids=[0]<times>len(tokens)<line_sep>input_ids=self.tokenizer.convert_tokens_to_ids(tokens)<line_sep>input_mask=[1]<times>len(input_ids)<line_sep>input_ids=np.expand_dims(input_ids 1)<line_sep>input_mask=np.expand_dims(input_mask 1)<line_sep>segment_ids=np.expand_dims(segment_ids 1)<line_sep>text_bert=np.concatenate([input_ids input_mask segment_ids] axis=1)<line_sep><return>text_bert<block_end><def_stmt>__getitem__ self index<block_start>tmp_dir=os.path.join(self.working_dir f'Processed/tmp-{index}')<if_stmt><not>os.path.exists(tmp_dir)<block_start>os.makedirs(tmp_dir)<block_end>video_id,clip_id,text,label,annotation,mode,_=self.df.loc[index]<line_sep>cur_id=video_id+'$_$'+clip_id<line_sep># video video_path=os.path.join(self.working_dir 'Raw' video_id clip_id+'.mp4')<line_sep>embedding_V=self.__getVideoEmbedding(video_path tmp_dir)<line_sep>seq_V=embedding_V.shape[0]<line_sep># audio audio_path=os.path.join(tmp_dir 'tmp.wav')<line_sep>embedding_A=self.__getAudioEmbedding(video_path audio_path)<line_sep>seq_A=embedding_A.shape[0]<line_sep># text embedding_T=self.__getTextEmbedding(text)<line_sep>text_bert=self.__preTextforBert(text)<line_sep>seq_T=embedding_T.shape[0]<line_sep>ret={'id':cur_id 'audio':embedding_A 'vision':embedding_V 'raw_text':text 'text':embedding_T 'text_bert':text_bert 'audio_lengths':seq_A 'vision_lengths':seq_V 'annotations':annotation 'classification_labels':self.annotation_dict[annotation] 'regression_labels':label 'mode':mode}<line_sep># clear tmp dir to save space shutil.rmtree(tmp_dir)<line_sep><return>ret<block_end><block_end><class_stmt>MDataPre()<block_start><def_stmt>__init__ self args<block_start>self.working_dir=args.working_dir<line_sep># padding self.padding_mode='zeros'<line_sep>self.padding_location='back'<block_end><def_stmt>__padding self feature MAX_LEN<block_start>""" mode: zero: padding with 0 normal: padding with normal distribution location: front / back """<assert_stmt>self.padding_mode<in>['zeros' 'normal']<assert_stmt>self.padding_location<in>['front' 'back']<line_sep>length=feature.shape[0]<if_stmt>length<ge>MAX_LEN<block_start><return>feature[:MAX_LEN :]<block_end><if_stmt>self.padding_mode<eq>"zeros"<block_start>pad=np.zeros([MAX_LEN-length feature.shape[-1]])<block_end><elif_stmt>self.padding_mode<eq>"normal"<block_start>mean,std=feature.mean() feature.std()<line_sep>pad=np.random.normal(mean std (MAX_LEN-length feature.shape[1]))<block_end>feature=np.concatenate([pad feature] axis=0)<if>(self.padding_location<eq>"front")<else>np.concatenate((feature pad) axis=0)<line_sep><return>feature<block_end><def_stmt>__paddingSequence self sequences<block_start><if_stmt>len(sequences)<eq>0<block_start><return>sequences<block_end>feature_dim=sequences[0].shape[-1]<line_sep>lens=[s.shape[0]<for>s sequences]<line_sep># confirm length using (mean + std) final_length=int(np.mean(lens)+3<times>np.std(lens))<line_sep># padding sequences to final_length final_sequence=np.zeros([len(sequences) final_length feature_dim])<for_stmt>i,s enumerate(sequences)<block_start><if_stmt>len(s)<ne>0<block_start>final_sequence[i]=self.__padding(s final_length)<block_end><block_end><return>final_sequence<block_end><def_stmt>__collate_fn self batch<block_start>ret={k:[]<for>k batch[0].keys()}<for_stmt>b batch<block_start><for_stmt>k,v b.items()<block_start>ret[k].append(v)<block_end><block_end><return>ret<block_end><def_stmt>run self<block_start>output_path=os.path.join(self.working_dir 'Processed/features.pkl')<line_sep># load last point <if_stmt>os.path.exists(output_path)<block_start><with_stmt>open(output_path 'rb')<as>f<block_start>data=pickle.load(f)<block_end>last_row_idx=len(data['id'])<block_end><else_stmt><block_start>data={"id":[] "raw_text":[] "audio":[] "vision":[] "text":[] "text_bert":[] "audio_lengths":[] "vision_lengths":[] "annotations":[] "classification_labels":[] "regression_labels":[] "mode":[]}<line_sep>last_row_idx=0<block_end>args.df=pd.read_csv(os.path.join(self.working_dir 'label.csv') dtype={'clip_id':str 'video_id':str 'text':str})<line_sep>args.df=args.df[last_row_idx:]<line_sep>dataloader=DataLoader(MDataPreLoader(args) batch_size=64 num_workers=8 shuffle=<false> collate_fn=self.__collate_fn)<line_sep>isEnd=<false><try_stmt><block_start><with_stmt>tqdm(dataloader)<as>td<block_start><for_stmt>batch_data td<block_start><for_stmt>k,v batch_data.items()<block_start>data[k].extend(v)<block_end><block_end><block_end>isEnd=<true><block_end><except_stmt>Exception<as>e<block_start>print(e)<block_end><finally_stmt><block_start><try_stmt><block_start><if_stmt>isEnd# padding <block_start><for_stmt>item ['audio' 'vision' 'text' 'text_bert']<block_start>data[item]=self.__paddingSequence(data[item])<block_end># data['mode'] = list(args.df['mode']) # split train, valid, test inx_dict={mode+'_index':[i<for>i,v enumerate(data['mode'])<if>v<eq>mode]<for>mode ['train' 'valid' 'test']}<line_sep>data.pop('mode')<line_sep>final_data={k:{}<for>k ['train' 'valid' 'test']}<for_stmt>mode ['train' 'valid' 'test']<block_start>indexes=inx_dict[mode+'_index']<for_stmt>item data.keys()<block_start><if_stmt>isinstance(data[item] list)<block_start>final_data[mode][item]=[data[item][v]<for>v indexes]<block_end><else_stmt><block_start>final_data[mode][item]=data[item][indexes]<block_end><block_end><block_end>data=final_data<block_end><block_end><except_stmt>Exception<as>e<block_start>print(e)<block_end><finally_stmt><block_start><with_stmt>open(output_path 'wb')<as>wf<block_start>pickle.dump(data wf protocol=4)<block_end><block_end>print('Features are saved in %s!'%output_path)<block_end><block_end><block_end><def_stmt>parse_args <block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--working_dir' type=str default='/home/sharing/disk3/dataset/multimodal-sentiment-dataset/StandardDatasets/MOSEI' help='path to datasets')<line_sep>parser.add_argument('--language' type=str default="en" help='en / cn')<line_sep>parser.add_argument('--openface2Path' type=str default="/home/iyuge2/ToolKits/OpenFace/build/bin/FeatureExtraction" help='path to FeatureExtraction tool in openface2')<line_sep><return>parser.parse_args()<block_end><if_stmt>__name__<eq>"__main__"<block_start>args=parse_args()<line_sep>dp=MDataPre(args)<line_sep>dp.run()<block_end>
# flake8: noqa <import_from_stmt>. serialization<import_from_stmt>.client InfluxDBClient InfluxDBError InfluxDBWriteError<import_from_stmt>.iterutils iterpoints<import_from_stmt>.serialization.usertype *<line_sep>__version__='0.9.0'<line_sep>
<import_stmt>json<import_stmt>os<import_stmt>random<import_from_stmt>unittest.mock patch<import_stmt>cv2<import_stmt>numpy<as>np<import_stmt>pytest<import_stmt>albumentations<as>A<import_stmt>albumentations.augmentations.functional<as>F<import_from_stmt>albumentations.core.serialization SERIALIZABLE_REGISTRY shorten_class_name<import_from_stmt>albumentations.core.transforms_interface ImageOnlyTransform<import_from_stmt>.conftest skipif_no_torch<import_from_stmt>.utils OpenMock check_all_augs_exists get_dual_transforms get_image_only_transforms get_transforms set_seed <line_sep>TEST_SEEDS=(0 1 42 111 9999)<line_sep>@pytest.mark.parametrize(["augmentation_cls" "params"] get_transforms(custom_arguments={A.Crop:{"y_min":0 "y_max":10 "x_min":0 "x_max":10} A.CenterCrop:{"height":10 "width":10} A.CropNonEmptyMaskIfExists:{"height":10 "width":10} A.RandomCrop:{"height":10 "width":10} A.RandomResizedCrop:{"height":10 "width":10} A.RandomSizedCrop:{"min_max_height":(4 8) "height":10 "width":10} A.CropAndPad:{"px":10} A.Resize:{"height":10 "width":10} } except_augmentations={A.RandomCropNearBBox A.RandomSizedBBoxSafeCrop A.FDA A.HistogramMatching A.PixelDistributionAdaptation A.Lambda A.TemplateTransform } ) )@pytest.mark.parametrize("p" [0.5 1])@pytest.mark.parametrize("seed" TEST_SEEDS)@pytest.mark.parametrize("always_apply" (<false> <true>))<def_stmt>test_augmentations_serialization augmentation_cls params p seed image mask always_apply<block_start>aug=augmentation_cls(p=p always_apply=always_apply **params)<line_sep>serialized_aug=A.to_dict(aug)<line_sep>deserialized_aug=A.from_dict(serialized_aug)<line_sep>set_seed(seed)<line_sep>aug_data=aug(image=image mask=mask)<line_sep>set_seed(seed)<line_sep>deserialized_aug_data=deserialized_aug(image=image mask=mask)<assert_stmt>np.array_equal(aug_data["image"] deserialized_aug_data["image"])<assert_stmt>np.array_equal(aug_data["mask"] deserialized_aug_data["mask"])<block_end>AUGMENTATION_CLS_PARAMS=[[A.ImageCompression {"quality_lower":10 "quality_upper":80 "compression_type":A.ImageCompression.ImageCompressionType.WEBP } ] [A.JpegCompression {"quality_lower":10 "quality_upper":80}] [A.HueSaturationValue {"hue_shift_limit":70 "sat_shift_limit":95 "val_shift_limit":55}] [A.RGBShift {"r_shift_limit":70 "g_shift_limit":80 "b_shift_limit":40}] [A.RandomBrightnessContrast {"brightness_limit":0.5 "contrast_limit":0.8}] [A.Blur {"blur_limit":3}] [A.MotionBlur {"blur_limit":3}] [A.MedianBlur {"blur_limit":3}] [A.GaussianBlur {"blur_limit":3}] [A.GaussNoise {"var_limit":(20 90) "mean":10 "per_channel":<false>}] [A.CLAHE {"clip_limit":2 "tile_grid_size":(12 12)}] [A.RandomGamma {"gamma_limit":(10 90)}] [A.Cutout {"num_holes":4 "max_h_size":4 "max_w_size":4}] [A.CoarseDropout {"max_holes":4 "max_height":4 "max_width":4}] [A.RandomSnow {"snow_point_lower":0.2 "snow_point_upper":0.4 "brightness_coeff":4}] [A.RandomRain {"slant_lower":-5 "slant_upper":5 "drop_length":15 "drop_width":2 "drop_color":(100 100 100) "blur_value":3 "brightness_coefficient":0.5 "rain_type":"heavy" } ] [A.RandomFog {"fog_coef_lower":0.2 "fog_coef_upper":0.8 "alpha_coef":0.11}] [A.RandomSunFlare {"flare_roi":(0.1 0.1 0.9 0.6) "angle_lower":0.1 "angle_upper":0.95 "num_flare_circles_lower":7 "num_flare_circles_upper":11 "src_radius":300 "src_color":(200 200 200) } ] [A.RandomShadow {"shadow_roi":(0.1 0.4 0.9 0.9) "num_shadows_lower":2 "num_shadows_upper":4 "shadow_dimension":8 } ] [A.PadIfNeeded {"min_height":512 "min_width":512 "border_mode":cv2.BORDER_CONSTANT "value":(10 10 10)} ] [A.Rotate {"limit":120 "interpolation":cv2.INTER_CUBIC "border_mode":cv2.BORDER_CONSTANT "value":(10 10 10) } ] [A.SafeRotate {"limit":120 "interpolation":cv2.INTER_CUBIC "border_mode":cv2.BORDER_CONSTANT "value":(10 10 10) } ] [A.ShiftScaleRotate {"shift_limit":0.2 "scale_limit":0.2 "rotate_limit":70 "interpolation":cv2.INTER_CUBIC "border_mode":cv2.BORDER_CONSTANT "value":(10 10 10) } ] [A.ShiftScaleRotate {"shift_limit_x":0.3 "shift_limit_y":0.4 "scale_limit":0.2 "rotate_limit":70 "interpolation":cv2.INTER_CUBIC "border_mode":cv2.BORDER_CONSTANT "value":(10 10 10) } ] [A.OpticalDistortion {"distort_limit":0.2 "shift_limit":0.2 "interpolation":cv2.INTER_CUBIC "border_mode":cv2.BORDER_CONSTANT "value":(10 10 10) } ] [A.GridDistortion {"num_steps":10 "distort_limit":0.5 "interpolation":cv2.INTER_CUBIC "border_mode":cv2.BORDER_CONSTANT "value":(10 10 10) } ] [A.ElasticTransform {"alpha":2 "sigma":25 "alpha_affine":40 "interpolation":cv2.INTER_CUBIC "border_mode":cv2.BORDER_CONSTANT "value":(10 10 10) } ] [A.CenterCrop {"height":10 "width":10}] [A.RandomCrop {"height":10 "width":10}] [A.CropNonEmptyMaskIfExists {"height":10 "width":10}] [A.RandomSizedCrop {"min_max_height":(4 8) "height":10 "width":10}] [A.Crop {"x_max":64 "y_max":64}] [A.ToFloat {"max_value":16536}] [A.Normalize {"mean":(0.385 0.356 0.306) "std":(0.129 0.124 0.125) "max_pixel_value":100.0}] [A.RandomBrightness {"limit":0.4}] [A.RandomContrast {"limit":0.4}] [A.RandomScale {"scale_limit":0.2 "interpolation":cv2.INTER_CUBIC}] [A.Resize {"height":64 "width":64}] [A.SmallestMaxSize {"max_size":64 "interpolation":cv2.INTER_CUBIC}] [A.LongestMaxSize {"max_size":128 "interpolation":cv2.INTER_CUBIC}] [A.RandomGridShuffle {"grid":(5 5)}] [A.Solarize {"threshold":32}] [A.Posterize {"num_bits":1}] [A.Equalize {"mode":"pil" "by_channels":<false>}] [A.MultiplicativeNoise {"multiplier":(0.7 2.3) "per_channel":<true> "elementwise":<true>}] [A.ColorJitter {"brightness":[0.2 0.3] "contrast":[0.7 0.9] "saturation":[1.2 1.7] "hue":[-0.2 0.1]} ] [A.Perspective {"scale":0.5 "keep_size":<false> "pad_mode":cv2.BORDER_REFLECT_101 "pad_val":10 "mask_pad_val":100 "fit_output":<true> "interpolation":cv2.INTER_CUBIC } ] [A.Sharpen {"alpha":[0.2 0.5] "lightness":[0.5 1.0]}] [A.Emboss {"alpha":[0.2 0.5] "strength":[0.5 1.0]}] [A.RandomToneCurve {"scale":0.2}] [A.CropAndPad {"px":10 "keep_size":<false> "sample_independently":<false> "interpolation":cv2.INTER_CUBIC "pad_cval_mask":[10 20 30] "pad_cval":[11 12 13] "pad_mode":cv2.BORDER_REFLECT101 } ] [A.Superpixels {"p_replace":(0.5 0.7) "n_segments":(20 30) "max_size":25 "interpolation":cv2.INTER_CUBIC} ] [A.Affine {"scale":0.5 "translate_percent":0.7 "translate_px":<none> "rotate":33 "shear":21 "interpolation":cv2.INTER_CUBIC "cval":25 "cval_mask":1 "mode":cv2.BORDER_REFLECT "fit_output":<true> } ] [A.Affine {"scale":{"x":[0.3 0.5] "y":[0.1 0.2]} "translate_percent":<none> "translate_px":{"x":[10 200] "y":[5 101]} "rotate":[333 360] "shear":{"x":[31 38] "y":[41 48]} "interpolation":3 "cval":[10 20 30] "cval_mask":1 "mode":cv2.BORDER_REFLECT "fit_output":<true> } ] [A.PiecewiseAffine {"scale":0.33 "nb_rows":(10 20) "nb_cols":33 "interpolation":2 "mask_interpolation":1 "cval":10 "cval_mask":20 "mode":"edge" "absolute_scale":<true> "keypoints_threshold":0.1 } ] [A.ChannelDropout dict(channel_drop_range=(1 2) fill_value=1)] [A.ChannelShuffle {}] [A.Downscale dict(scale_min=0.5 scale_max=0.75 interpolation=cv2.INTER_LINEAR)] [A.Flip {}] [A.FromFloat dict(dtype="uint8" max_value=1)] [A.HorizontalFlip {}] [A.ISONoise dict(color_shift=(0.2 0.3) intensity=(0.7 0.9))] [A.InvertImg {}] [A.MaskDropout dict(max_objects=2 image_fill_value=10 mask_fill_value=20)] [A.NoOp {}] [A.RandomResizedCrop dict(height=20 width=30 scale=(0.5 0.6) ratio=(0.8 0.9))] [A.FancyPCA dict(alpha=0.3)] [A.RandomRotate90 {}] [A.ToGray {}] [A.ToSepia {}] [A.Transpose {}] [A.VerticalFlip {}] [A.RingingOvershoot dict(blur_limit=(7 15) cutoff=(np.pi/5 np.pi/2))] [A.UnsharpMask {"blur_limit":3 "sigma_limit":0.5 "alpha":0.2 "threshold":15}] [A.AdvancedBlur dict(blur_limit=(3 5) rotate_limit=(60 90))] [A.PixelDropout {"dropout_prob":0.1 "per_channel":<true> "drop_value":<none>}] [A.PixelDropout {"dropout_prob":0.1 "per_channel":<false> "drop_value":<none> "mask_drop_value":15}] ]<line_sep>AUGMENTATION_CLS_EXCEPT={A.FDA A.HistogramMatching A.PixelDistributionAdaptation A.Lambda A.RandomCropNearBBox A.RandomSizedBBoxSafeCrop A.GridDropout A.GlassBlur A.TemplateTransform }<line_sep>@pytest.mark.parametrize(["augmentation_cls" "params"] check_all_augs_exists(AUGMENTATION_CLS_PARAMS AUGMENTATION_CLS_EXCEPT))@pytest.mark.parametrize("p" [0.5 1])@pytest.mark.parametrize("seed" TEST_SEEDS)@pytest.mark.parametrize("always_apply" (<false> <true>))<def_stmt>test_augmentations_serialization_with_custom_parameters augmentation_cls params p seed image mask always_apply<block_start>aug=augmentation_cls(p=p always_apply=always_apply **params)<line_sep>serialized_aug=A.to_dict(aug)<line_sep>deserialized_aug=A.from_dict(serialized_aug)<line_sep>set_seed(seed)<line_sep>aug_data=aug(image=image mask=mask)<line_sep>set_seed(seed)<line_sep>deserialized_aug_data=deserialized_aug(image=image mask=mask)<assert_stmt>np.array_equal(aug_data["image"] deserialized_aug_data["image"])<assert_stmt>np.array_equal(aug_data["mask"] deserialized_aug_data["mask"])<block_end>@pytest.mark.parametrize(["augmentation_cls" "params"] check_all_augs_exists(AUGMENTATION_CLS_PARAMS AUGMENTATION_CLS_EXCEPT))@pytest.mark.parametrize("p" [0.5 1])@pytest.mark.parametrize("seed" TEST_SEEDS)@pytest.mark.parametrize("always_apply" (<false> <true>))@pytest.mark.parametrize("data_format" ("yaml" ))<def_stmt>test_augmentations_serialization_to_file_with_custom_parameters augmentation_cls params p seed image mask always_apply data_format<block_start><with_stmt>patch("builtins.open" OpenMock())<block_start>aug=augmentation_cls(p=p always_apply=always_apply **params)<line_sep>filepath="serialized.{}".format(data_format)<line_sep>A.save(aug filepath data_format=data_format)<line_sep>deserialized_aug=A.load(filepath data_format=data_format)<line_sep>set_seed(seed)<line_sep>aug_data=aug(image=image mask=mask)<line_sep>set_seed(seed)<line_sep>deserialized_aug_data=deserialized_aug(image=image mask=mask)<assert_stmt>np.array_equal(aug_data["image"] deserialized_aug_data["image"])<assert_stmt>np.array_equal(aug_data["mask"] deserialized_aug_data["mask"])<block_end><block_end>@pytest.mark.parametrize(["augmentation_cls" "params"] get_transforms(custom_arguments={A.Crop:{"y_min":0 "y_max":10 "x_min":0 "x_max":10} A.CenterCrop:{"height":10 "width":10} A.CropNonEmptyMaskIfExists:{"height":10 "width":10} A.RandomCrop:{"height":10 "width":10} A.RandomResizedCrop:{"height":10 "width":10} A.RandomSizedCrop:{"min_max_height":(4 8) "height":10 "width":10} A.CropAndPad:{"px":10} A.Resize:{"height":10 "width":10} A.RandomSizedBBoxSafeCrop:{"height":10 "width":10} } except_augmentations={A.RandomCropNearBBox A.FDA A.HistogramMatching A.PixelDistributionAdaptation A.Lambda A.CoarseDropout A.CropNonEmptyMaskIfExists A.ElasticTransform A.GridDistortion A.RandomGridShuffle A.GridDropout A.MaskDropout A.OpticalDistortion A.TemplateTransform } ) )@pytest.mark.parametrize("p" [0.5 1])@pytest.mark.parametrize("seed" TEST_SEEDS)@pytest.mark.parametrize("always_apply" (<false> <true>))<def_stmt>test_augmentations_for_bboxes_serialization augmentation_cls params p seed image albumentations_bboxes always_apply<block_start>aug=augmentation_cls(p=p always_apply=always_apply **params)<line_sep>serialized_aug=A.to_dict(aug)<line_sep>deserialized_aug=A.from_dict(serialized_aug)<line_sep>set_seed(seed)<line_sep>aug_data=aug(image=image bboxes=albumentations_bboxes)<line_sep>set_seed(seed)<line_sep>deserialized_aug_data=deserialized_aug(image=image bboxes=albumentations_bboxes)<assert_stmt>np.array_equal(aug_data["image"] deserialized_aug_data["image"])<assert_stmt>np.array_equal(aug_data["bboxes"] deserialized_aug_data["bboxes"])<block_end>@pytest.mark.parametrize(["augmentation_cls" "params"] get_transforms(custom_arguments={A.Crop:{"y_min":0 "y_max":10 "x_min":0 "x_max":10} A.CenterCrop:{"height":10 "width":10} A.CropNonEmptyMaskIfExists:{"height":10 "width":10} A.RandomCrop:{"height":10 "width":10} A.RandomResizedCrop:{"height":10 "width":10} A.RandomSizedCrop:{"min_max_height":(4 8) "height":10 "width":10} A.CropAndPad:{"px":10} A.Resize:{"height":10 "width":10} } except_augmentations={A.RandomCropNearBBox A.FDA A.HistogramMatching A.PixelDistributionAdaptation A.Lambda A.CoarseDropout A.CropNonEmptyMaskIfExists A.ElasticTransform A.GridDistortion A.RandomGridShuffle A.GridDropout A.MaskDropout A.OpticalDistortion A.RandomSizedBBoxSafeCrop A.TemplateTransform } ) )@pytest.mark.parametrize("p" [0.5 1])@pytest.mark.parametrize("seed" TEST_SEEDS)@pytest.mark.parametrize("always_apply" (<false> <true>))<def_stmt>test_augmentations_for_keypoints_serialization augmentation_cls params p seed image keypoints always_apply<block_start>aug=augmentation_cls(p=p always_apply=always_apply **params)<line_sep>serialized_aug=A.to_dict(aug)<line_sep>deserialized_aug=A.from_dict(serialized_aug)<line_sep>set_seed(seed)<line_sep>aug_data=aug(image=image keypoints=keypoints)<line_sep>set_seed(seed)<line_sep>deserialized_aug_data=deserialized_aug(image=image keypoints=keypoints)<assert_stmt>np.array_equal(aug_data["image"] deserialized_aug_data["image"])<assert_stmt>np.array_equal(aug_data["keypoints"] deserialized_aug_data["keypoints"])<block_end>@pytest.mark.parametrize(["augmentation_cls" "params" "call_params"] [[A.RandomCropNearBBox {"max_part_shift":0.15} {"cropping_bbox":[-59 77 177 231]}]] )@pytest.mark.parametrize("p" [0.5 1])@pytest.mark.parametrize("seed" TEST_SEEDS)@pytest.mark.parametrize("always_apply" (<false> <true>))<def_stmt>test_augmentations_serialization_with_call_params augmentation_cls params call_params p seed image always_apply<block_start>aug=augmentation_cls(p=p always_apply=always_apply **params)<line_sep>annotations={"image":image **call_params}<line_sep>serialized_aug=A.to_dict(aug)<line_sep>deserialized_aug=A.from_dict(serialized_aug)<line_sep>set_seed(seed)<line_sep>aug_data=aug(**annotations)<line_sep>set_seed(seed)<line_sep>deserialized_aug_data=deserialized_aug(**annotations)<assert_stmt>np.array_equal(aug_data["image"] deserialized_aug_data["image"])<block_end><def_stmt>test_from_float_serialization float_image<block_start>aug=A.FromFloat(p=1 dtype="uint8")<line_sep>serialized_aug=A.to_dict(aug)<line_sep>deserialized_aug=A.from_dict(serialized_aug)<line_sep>aug_data=aug(image=float_image)<line_sep>deserialized_aug_data=deserialized_aug(image=float_image)<assert_stmt>np.array_equal(aug_data["image"] deserialized_aug_data["image"])<block_end>@pytest.mark.parametrize("seed" TEST_SEEDS)<def_stmt>test_transform_pipeline_serialization seed image mask<block_start>aug=A.Compose([A.OneOrOther(A.Compose([A.Resize(1024 1024) A.RandomSizedCrop(min_max_height=(256 1024) height=512 width=512 p=1) A.OneOf([A.RandomSizedCrop(min_max_height=(256 512) height=384 width=384 p=0.5) A.RandomSizedCrop(min_max_height=(256 512) height=512 width=512 p=0.5) ]) ]) A.Compose([A.Resize(1024 1024) A.RandomSizedCrop(min_max_height=(256 1025) height=256 width=256 p=1) A.OneOf([A.HueSaturationValue(p=0.5) A.RGBShift(p=0.7)] p=1) ]) ) A.SomeOf([A.HorizontalFlip(p=1) A.Transpose(p=1) A.HueSaturationValue(p=0.5) A.RandomBrightnessContrast(p=0.5) ] 2 replace=<false> ) ])<line_sep>serialized_aug=A.to_dict(aug)<line_sep>deserialized_aug=A.from_dict(serialized_aug)<line_sep>set_seed(seed)<line_sep>aug_data=aug(image=image mask=mask)<line_sep>set_seed(seed)<line_sep>deserialized_aug_data=deserialized_aug(image=image mask=mask)<assert_stmt>np.array_equal(aug_data["image"] deserialized_aug_data["image"])<assert_stmt>np.array_equal(aug_data["mask"] deserialized_aug_data["mask"])<block_end>@pytest.mark.parametrize(["bboxes" "bbox_format" "labels"] [([(20 30 40 50)] "coco" [1]) ([(20 30 40 50 99) (10 40 30 20 9)] "coco" [1 2]) ([(20 30 60 80)] "pascal_voc" [2]) ([(20 30 60 80 99)] "pascal_voc" [1]) ([(0.2 0.3 0.4 0.5)] "yolo" [2]) ([(0.2 0.3 0.4 0.5 99)] "yolo" [1]) ] )@pytest.mark.parametrize("seed" TEST_SEEDS)<def_stmt>test_transform_pipeline_serialization_with_bboxes seed image bboxes bbox_format labels<block_start>aug=A.Compose([A.OneOrOther(A.Compose([A.RandomRotate90() A.OneOf([A.HorizontalFlip(p=0.5) A.VerticalFlip(p=0.5)])]) A.Compose([A.Rotate(p=0.5) A.OneOf([A.HueSaturationValue(p=0.5) A.RGBShift(p=0.7)] p=1)]) ) A.SomeOf([A.HorizontalFlip(p=1) A.Transpose(p=1) A.HueSaturationValue(p=0.5) A.RandomBrightnessContrast(p=0.5) ] n=5 ) ] bbox_params={"format":bbox_format "label_fields":["labels"]} )<line_sep>serialized_aug=A.to_dict(aug)<line_sep>deserialized_aug=A.from_dict(serialized_aug)<line_sep>set_seed(seed)<line_sep>aug_data=aug(image=image bboxes=bboxes labels=labels)<line_sep>set_seed(seed)<line_sep>deserialized_aug_data=deserialized_aug(image=image bboxes=bboxes labels=labels)<assert_stmt>np.array_equal(aug_data["image"] deserialized_aug_data["image"])<assert_stmt>np.array_equal(aug_data["bboxes"] deserialized_aug_data["bboxes"])<block_end>@pytest.mark.parametrize(["keypoints" "keypoint_format" "labels"] [([(20 30 40 50)] "xyas" [1]) ([(20 30 40 50 99) (10 40 30 20 9)] "xy" [1 2]) ([(20 30 60 80)] "yx" [2]) ([(20 30 60 80 99)] "xys" [1]) ] )@pytest.mark.parametrize("seed" TEST_SEEDS)<def_stmt>test_transform_pipeline_serialization_with_keypoints seed image keypoints keypoint_format labels<block_start>aug=A.Compose([A.OneOrOther(A.Compose([A.RandomRotate90() A.OneOf([A.HorizontalFlip(p=0.5) A.VerticalFlip(p=0.5)])]) A.Compose([A.Rotate(p=0.5) A.OneOf([A.HueSaturationValue(p=0.5) A.RGBShift(p=0.7)] p=1)]) ) A.SomeOf(n=2 transforms=[A.HorizontalFlip(p=1) A.Transpose(p=1) A.HueSaturationValue(p=0.5) A.RandomBrightnessContrast(p=0.5) ] replace=<false> ) ] keypoint_params={"format":keypoint_format "label_fields":["labels"]} )<line_sep>serialized_aug=A.to_dict(aug)<line_sep>deserialized_aug=A.from_dict(serialized_aug)<line_sep>set_seed(seed)<line_sep>aug_data=aug(image=image keypoints=keypoints labels=labels)<line_sep>set_seed(seed)<line_sep>deserialized_aug_data=deserialized_aug(image=image keypoints=keypoints labels=labels)<assert_stmt>np.array_equal(aug_data["image"] deserialized_aug_data["image"])<assert_stmt>np.array_equal(aug_data["keypoints"] deserialized_aug_data["keypoints"])<block_end>@pytest.mark.parametrize(["augmentation_cls" "params"] get_image_only_transforms(except_augmentations={A.HistogramMatching A.FDA A.PixelDistributionAdaptation A.TemplateTransform} ) )@pytest.mark.parametrize("seed" TEST_SEEDS)<def_stmt>test_additional_targets_for_image_only_serialization augmentation_cls params image seed<block_start>aug=A.Compose([augmentation_cls(always_apply=<true> **params)] additional_targets={"image2":"image"})<line_sep>image2=image.copy()<line_sep>serialized_aug=A.to_dict(aug)<line_sep>deserialized_aug=A.from_dict(serialized_aug)<line_sep>set_seed(seed)<line_sep>aug_data=aug(image=image image2=image2)<line_sep>set_seed(seed)<line_sep>deserialized_aug_data=deserialized_aug(image=image image2=image2)<assert_stmt>np.array_equal(aug_data["image"] deserialized_aug_data["image"])<assert_stmt>np.array_equal(aug_data["image2"] deserialized_aug_data["image2"])<block_end>@pytest.mark.parametrize("seed" TEST_SEEDS)@pytest.mark.parametrize("p" [1])<def_stmt>test_lambda_serialization image mask albumentations_bboxes keypoints seed p<block_start><def_stmt>vflip_image image **kwargs<block_start><return>F.vflip(image)<block_end><def_stmt>vflip_mask mask **kwargs<block_start><return>F.vflip(mask)<block_end><def_stmt>vflip_bbox bbox **kwargs<block_start><return>F.bbox_vflip(bbox **kwargs)<block_end><def_stmt>vflip_keypoint keypoint **kwargs<block_start><return>F.keypoint_vflip(keypoint **kwargs)<block_end>aug=A.Lambda(name="vflip" image=vflip_image mask=vflip_mask bbox=vflip_bbox keypoint=vflip_keypoint p=p)<line_sep>serialized_aug=A.to_dict(aug)<line_sep>deserialized_aug=A.from_dict(serialized_aug lambda_transforms={"vflip":aug})<line_sep>set_seed(seed)<line_sep>aug_data=aug(image=image mask=mask bboxes=albumentations_bboxes keypoints=keypoints)<line_sep>set_seed(seed)<line_sep>deserialized_aug_data=deserialized_aug(image=image mask=mask bboxes=albumentations_bboxes keypoints=keypoints)<assert_stmt>np.array_equal(aug_data["image"] deserialized_aug_data["image"])<assert_stmt>np.array_equal(aug_data["mask"] deserialized_aug_data["mask"])<assert_stmt>np.array_equal(aug_data["bboxes"] deserialized_aug_data["bboxes"])<assert_stmt>np.array_equal(aug_data["keypoints"] deserialized_aug_data["keypoints"])<block_end><def_stmt>test_serialization_v2_conversion_without_totensor <block_start>current_directory=os.path.dirname(os.path.abspath(__file__))<line_sep>files_directory=os.path.join(current_directory "files")<line_sep>transform_1_1_0=A.load(os.path.join(files_directory "transform_v1.1.0_without_totensor.json"))<with_stmt>open(os.path.join(files_directory "output_v1.1.0_without_totensor.json"))<as>f<block_start>output_1_1_0=json.load(f)<block_end>np.random.seed(42)<line_sep>image=np.random.randint(low=0 high=255 size=(256 256 3) dtype=np.uint8)<line_sep>random.seed(42)<line_sep>transformed_image=transform_1_1_0(image=image)["image"]<assert_stmt>transformed_image.tolist()<eq>output_1_1_0<block_end>@skipif_no_torch<def_stmt>test_serialization_v2_conversion_with_totensor <block_start>current_directory=os.path.dirname(os.path.abspath(__file__))<line_sep>files_directory=os.path.join(current_directory "files")<line_sep>transform_1_1_0=A.load(os.path.join(files_directory "transform_v1.1.0_with_totensor.json"))<with_stmt>open(os.path.join(files_directory "output_v1.1.0_with_totensor.json"))<as>f<block_start>output_1_1_0=json.load(f)<block_end>np.random.seed(42)<line_sep>image=np.random.randint(low=0 high=255 size=(256 256 3) dtype=np.uint8)<line_sep>random.seed(42)<line_sep>transformed_image=transform_1_1_0(image=image)["image"]<assert_stmt>transformed_image.numpy().tolist()<eq>output_1_1_0<block_end><def_stmt>test_serialization_v2_without_totensor <block_start>current_directory=os.path.dirname(os.path.abspath(__file__))<line_sep>files_directory=os.path.join(current_directory "files")<line_sep>transform=A.load(os.path.join(files_directory "transform_serialization_v2_without_totensor.json"))<with_stmt>open(os.path.join(files_directory "output_v1.1.0_without_totensor.json"))<as>f<block_start>output_1_1_0=json.load(f)<block_end>np.random.seed(42)<line_sep>image=np.random.randint(low=0 high=255 size=(256 256 3) dtype=np.uint8)<line_sep>random.seed(42)<line_sep>transformed_image=transform(image=image)["image"]<assert_stmt>transformed_image.tolist()<eq>output_1_1_0<block_end>@skipif_no_torch<def_stmt>test_serialization_v2_with_totensor <block_start>current_directory=os.path.dirname(os.path.abspath(__file__))<line_sep>files_directory=os.path.join(current_directory "files")<line_sep>transform=A.load(os.path.join(files_directory "transform_serialization_v2_with_totensor.json"))<with_stmt>open(os.path.join(files_directory "output_v1.1.0_with_totensor.json"))<as>f<block_start>output_1_1_0=json.load(f)<block_end>np.random.seed(42)<line_sep>image=np.random.randint(low=0 high=255 size=(256 256 3) dtype=np.uint8)<line_sep>random.seed(42)<line_sep>transformed_image=transform(image=image)["image"]<assert_stmt>transformed_image.numpy().tolist()<eq>output_1_1_0<block_end><def_stmt>test_custom_transform_with_overlapping_name <block_start><class_stmt>HorizontalFlip(ImageOnlyTransform)<block_start><pass><block_end><assert_stmt>SERIALIZABLE_REGISTRY["HorizontalFlip"]<eq>A.HorizontalFlip<assert_stmt>SERIALIZABLE_REGISTRY["tests.test_serialization.HorizontalFlip"]<eq>HorizontalFlip<block_end><def_stmt>test_serialization_v2_to_dict <block_start>transform=A.Compose([A.HorizontalFlip()])<line_sep>transform_dict=A.to_dict(transform)["transform"]<assert_stmt>transform_dict<eq>{"__class_fullname__":"Compose" "p":1.0 "transforms":[{"__class_fullname__":"HorizontalFlip" "always_apply":<false> "p":0.5}] "bbox_params":<none> "keypoint_params":<none> "additional_targets":{} }<block_end>@pytest.mark.parametrize(["class_fullname" "expected_short_class_name"] [["albumentations.augmentations.transforms.HorizontalFlip" "HorizontalFlip"] ["HorizontalFlip" "HorizontalFlip"] ["some_module.HorizontalFlip" "some_module.HorizontalFlip"] ] )<def_stmt>test_shorten_class_name class_fullname expected_short_class_name<block_start><assert_stmt>shorten_class_name(class_fullname)<eq>expected_short_class_name<block_end>@pytest.mark.parametrize("seed" TEST_SEEDS)@pytest.mark.parametrize("p" [1])<def_stmt>test_template_transform_serialization image template seed p<block_start>template_transform=A.TemplateTransform(name="template" templates=template p=p)<line_sep>aug=A.Compose([A.Flip() template_transform A.Blur()])<line_sep>serialized_aug=A.to_dict(aug)<line_sep>deserialized_aug=A.from_dict(serialized_aug lambda_transforms={"template":template_transform})<line_sep>set_seed(seed)<line_sep>aug_data=aug(image=image)<line_sep>set_seed(seed)<line_sep>deserialized_aug_data=deserialized_aug(image=image)<assert_stmt>np.array_equal(aug_data["image"] deserialized_aug_data["image"])<block_end>
<import_from_stmt>typing Tuple<import_stmt>numpy<as>np<import_from_stmt>...core ContinuousParameter InformationSourceParameter ParameterSpace<import_from_stmt>...core.loop.user_function MultiSourceFunctionWrapper<def_stmt>multi_fidelity_hartmann_3d <arrow>Tuple[MultiSourceFunctionWrapper ParameterSpace]<block_start>r""" The function is given by: .. math:: f(x, \alpha) = -\sum_{i=1}^{4} \alpha_i \exp \left( -\sum_{j=1}^{3} A_{i,j}\left( x_j - P_{i, j} \right)^2 \right) where .. math:: \mathbf{A} = \begin{bmatrix} 3.0 & 10 & 30 \\ 0.1 & 10 & 35 \\ 3.0 & 10 & 30 \\ 0.1 & 10 & 35 \end{bmatrix} .. math:: \mathbf{P} = 10^{-4} \begin{bmatrix} 3689 & 1170 & 2673 \\ 4699 & 4387 & 7470 \\ 1091 & 8732 & 5547 \\ 381 & 5743 & 8828 \end{bmatrix} The high fidelity function is given by setting: .. math:: \alpha = (1.0, 1.2, 3.0, 3.2)^T The middle fidelity is given by setting: .. math:: \alpha = (1.01, 1.19, 2.9, 3.3)^T The low fidelity is given by setting: .. math:: \alpha = (1.02, 1.18, 2.8, 3.4)^T The domain is given by: .. math:: \mathbf{x}_i \in (0, 1) Reference: https://www.sfu.ca/~ssurjano/hart3.html :return: Tuple of MultiSourceFunctionWrapper and ParameterSpace """<line_sep>A=np.array([[3 10 30] [0.1 10 35] [3 10 30] [0.1 10 35]])<line_sep>P=1e-4<times>np.array([[3689 1170 2673] [4699 4387 7470] [1091 8732 5547] [381 5743 8828]])<line_sep>alpha=np.array([1.0 1.2 3.0 3.2])<line_sep>delta=np.array([0.01 -0.01 -0.1 0.1])<def_stmt>high x<block_start>res=0<for_stmt>i range(4)<block_start>temp=0<for_stmt>j range(3)<block_start>temp<augsub>A[i][j]<times>np.power(x[: j]-P[i][j] 2)<block_end>res<augadd>alpha[i]<times>np.exp(temp)<block_end><return>res[: <none>]<block_end><def_stmt>medium x<block_start>alpha_m=alpha+delta<line_sep>res=0<for_stmt>i range(4)<block_start>temp=0<for_stmt>j range(3)<block_start>temp<augsub>A[i][j]<times>np.power(x[: j]-P[i][j] 2)<block_end>res<augadd>alpha_m[i]<times>np.exp(temp)<block_end><return>res[: <none>]<block_end><def_stmt>low x<block_start>alpha_l=alpha+2<times>delta<line_sep>res=0<for_stmt>i range(4)<block_start>temp=0<for_stmt>j range(3)<block_start>temp<augsub>A[i][j]<times>np.power(x[: j]-P[i][j] 2)<block_end>res<augadd>alpha_l[i]<times>np.exp(temp)<block_end><return>res[: <none>]<block_end>space=ParameterSpace([ContinuousParameter("x1" 0.0 1.0) ContinuousParameter("x2" 0.0 1.0) ContinuousParameter("x3" 0.0 1.0) InformationSourceParameter(3) ])<line_sep>fcn_wrapper=MultiSourceFunctionWrapper([low medium high])<line_sep><return>fcn_wrapper space<block_end>
<import_from_stmt>.vnnsttd TdApi<import_from_stmt>.nst_constant *<line_sep>
# MktdataPublisher.py <import_from_future_stmt> print_function<import_from_future_stmt> absolute_import<import_stmt>time<import_from_stmt>optparse OptionParser OptionValueError<import_stmt>datetime<import_stmt>threading<import_stmt>os<import_stmt>platform<as>plat<import_stmt>sys<if_stmt>sys.version_info<ge>(3 8)<and>plat.system().lower()<eq>"windows"# pylint: disable=no-member <block_start><with_stmt>os.add_dll_directory(os.getenv('BLPAPI_LIBDIR'))<block_start><import_stmt>blpapi<block_end><block_end><else_stmt><block_start><import_stmt>blpapi<block_end>PERMISSION_REQUEST=blpapi.Name("PermissionRequest")<line_sep>RESOLUTION_SUCCESS=blpapi.Name("ResolutionSuccess")<line_sep>SESSION_TERMINATED=blpapi.Name("SessionTerminated")<line_sep>TOPICS=blpapi.Name("topics")<line_sep>TOPIC_CREATED=blpapi.Name("TopicCreated")<line_sep>TOPIC_SUBSCRIBED=blpapi.Name("TopicSubscribed")<line_sep>TOPIC_UNSUBSCRIBED=blpapi.Name("TopicUnsubscribed")<line_sep>TOPIC_RECAP=blpapi.Name("TopicRecap")<class_stmt>MyStream(object)<block_start><def_stmt>__init__ self sid="" fields=<none><block_start>self.id=sid<line_sep>self.fields=fields<if>fields<else>[]<line_sep>self.lastValue=0<line_sep>self.topic=blpapi.Topic()<line_sep>self.isSubscribed=<false><block_end><def_stmt>fillData self eventFormatter elementDef<block_start><for_stmt>i,f enumerate(self.fields)<block_start><if_stmt><not>elementDef.typeDefinition().hasElementDefinition(f)<block_start>print("Invalid field '%s'"%f)<line_sep><continue><block_end>fieldDef=elementDef.typeDefinition().getElementDefinition(f)<line_sep>fieldType=fieldDef.typeDefinition().datatype()<line_sep>value=<none><if_stmt>fieldType<eq>blpapi.DataType.BOOL<block_start>value=bool((self.lastValue+i)%2<eq>0)<block_end><elif_stmt>fieldType<eq>blpapi.DataType.CHAR<block_start>value=chr((self.lastValue+i)%100+32)<block_end><elif_stmt>fieldType<eq>blpapi.DataType.INT32<or>fieldType<eq>blpapi.DataType.INT64<block_start>value=self.lastValue+i<block_end><elif_stmt>fieldType<eq>blpapi.DataType.FLOAT32<or>fieldType<eq>blpapi.DataType.FLOAT64<block_start>value=(self.lastValue+i)<times>1.1<block_end><elif_stmt>fieldType<eq>blpapi.DataType.STRING<block_start>value="S%d"%(self.lastValue+i)<block_end><elif_stmt>fieldType<eq>blpapi.DataType.DATE<or>fieldType<eq>blpapi.DataType.TIME<or>fieldType<eq>blpapi.DataType.DATETIME<block_start>value=datetime.datetime.today()<line_sep>value.replace(day=(self.lastValue/100)%28+1)<line_sep>value.replace(microsecond=i<times>1000)<block_end>eventFormatter.setElement(f value)<block_end><block_end><def_stmt>fillDataNull self eventFormatter elementDef<block_start><for_stmt>f self.fields<block_start><if_stmt><not>elementDef.typeDefinition().hasElementDefinition(f)<block_start>print("Invalid field '%s'"%f)<line_sep><continue><block_end>fieldDef=elementDef.typeDefinition().getElementDefinition(f)<if_stmt>fieldDef.typeDefinition().isSimpleType()# Publishing NULL value <block_start>eventFormatter.setElementNull(f)<block_end><block_end><block_end><def_stmt>next self<block_start>self.lastValue<augadd>1<block_end><def_stmt>isAvailable self<block_start><return>self.topic.isValid()<and>self.isSubscribed<block_end><block_end><class_stmt>MyEventHandler(object)<block_start><def_stmt>__init__ self serviceName messageType fields eids resolveSubServiceCode mutex stop condition<block_start>self.serviceName=serviceName<line_sep>self.messageType=messageType<line_sep>self.fields=fields<line_sep>self.eids=eids<line_sep>self.resolveSubServiceCode=resolveSubServiceCode<line_sep>self.mutex=mutex<line_sep>self.stop=stop<line_sep>self.condition=condition<line_sep>self.streams=dict()<line_sep>self.availableTopicCount=0<block_end><def_stmt>processEvent self event session<block_start><if_stmt>event.eventType()<eq>blpapi.Event.SESSION_STATUS<block_start><for_stmt>msg event<block_start>print(msg)<if_stmt>msg.messageType()<eq>SESSION_TERMINATED<block_start>self.stop.set()<block_end><block_end><block_end><elif_stmt>event.eventType()<eq>blpapi.Event.TOPIC_STATUS<block_start>topicList=blpapi.TopicList()<for_stmt>msg event<block_start>print(msg)<if_stmt>msg.messageType()<eq>TOPIC_SUBSCRIBED<block_start>topicStr=msg.getElementAsString("topic")<with_stmt>self.mutex<block_start><if_stmt>topicStr<not><in>self.streams# TopicList knows how to add an entry based on a # TOPIC_SUBSCRIBED message. <block_start>topicList.add(msg)<line_sep>self.streams[topicStr]=MyStream(topicStr self.fields)<block_end>stream=self.streams[topicStr]<line_sep>stream.isSubscribed=<true><if_stmt>stream.isAvailable()<block_start>self.availableTopicCount<augadd>1<line_sep>self.condition.notifyAll()<block_end><block_end><block_end><elif_stmt>msg.messageType()<eq>TOPIC_UNSUBSCRIBED<block_start>topicStr=msg.getElementAsString("topic")<with_stmt>self.mutex<block_start><if_stmt>topicStr<not><in>self.streams# We should never be coming here. # TOPIC_UNSUBSCRIBED can not come before # a TOPIC_SUBSCRIBED or TOPIC_CREATED <block_start><continue><block_end>stream=self.streams[topicStr]<if_stmt>stream.isAvailable()<block_start>self.availableTopicCount<augsub>1<line_sep>self.condition.notifyAll()<block_end>stream.isSubscribed=<false><block_end><block_end><elif_stmt>msg.messageType()<eq>TOPIC_CREATED<block_start>topicStr=msg.getElementAsString("topic")<with_stmt>self.mutex<block_start><if_stmt>topicStr<not><in>self.streams<block_start>self.streams[topicStr]=MyStream(topicStr self.fields)<block_end>stream=self.streams[topicStr]<try_stmt><block_start>stream.topic=session.getTopic(msg)<block_end><except_stmt>blpapi.Exception<as>e<block_start>print("Exception while processing "<concat>"TOPIC_CREATED: %s"%e)<line_sep><continue><block_end><if_stmt>stream.isAvailable()<block_start>self.availableTopicCount<augadd>1<line_sep>self.condition.notifyAll()<block_end><block_end><block_end><elif_stmt>msg.messageType()<eq>TOPIC_RECAP# Here we send a recap in response to a Recap request. <block_start><try_stmt><block_start>topicStr=msg.getElementAsString("topic")<line_sep>recapEvent=<none><with_stmt>self.mutex<block_start><if_stmt>topicStr<not><in>self.streams<block_start><continue><block_end>stream=self.streams[topicStr]<if_stmt><not>stream.isAvailable()<block_start><continue><block_end>topic=session.getTopic(msg)<line_sep>service=topic.service()<line_sep>recapCid=msg.correlationIds()[0]<line_sep>recapEvent=service.createPublishEvent()<line_sep>elementDef=service.getEventDefinition(self.messageType)<line_sep>eventFormatter=blpapi.EventFormatter(recapEvent)<line_sep>eventFormatter.appendRecapMessage(topic recapCid)<line_sep>stream.fillData(eventFormatter elementDef)<block_end>session.publish(recapEvent)<block_end><except_stmt>blpapi.Exception<as>e<block_start>print("Exception while processing TOPIC_RECAP: %s"%e)<line_sep><continue><block_end><block_end><block_end><if_stmt>topicList.size()<g>0# createTopicsAsync will result in RESOLUTION_STATUS, # TOPIC_CREATED events. <block_start>session.createTopicsAsync(topicList)<block_end><block_end><elif_stmt>event.eventType()<eq>blpapi.Event.RESOLUTION_STATUS<block_start><for_stmt>msg event<block_start>print(msg)<block_end><block_end><elif_stmt>event.eventType()<eq>blpapi.Event.REQUEST<block_start>service=session.getService(self.serviceName)<for_stmt>msg event<block_start>print(msg)<if_stmt>msg.messageType()<eq>PERMISSION_REQUEST# Similar to createPublishEvent. We assume just one # service - self.serviceName. A responseEvent can only be # for single request so we can specify the correlationId - # which establishes context - when we create the Event. <block_start>response=service.createResponseEvent(msg.correlationIds()[0])<line_sep>permission=1# ALLOWED: 0, DENIED: 1 ef=blpapi.EventFormatter(response)<if_stmt>msg.hasElement("uuid")<block_start>msg.getElementAsInteger("uuid")<line_sep>permission=0<block_end><if_stmt>msg.hasElement("applicationId")<block_start>msg.getElementAsInteger("applicationId")<line_sep>permission=0<block_end># In appendResponse the string is the name of the # operation, the correlationId indicates which request we # are responding to. ef.appendResponse("PermissionResponse")<line_sep>ef.pushElement("topicPermissions")<line_sep># For each of the topics in the request, add an entry to # the response. topicsElement=msg.getElement(TOPICS).values()<for_stmt>topic topicsElement<block_start>ef.appendElement()<line_sep>ef.setElement("topic" topic)<if_stmt>self.resolveSubServiceCode<block_start><try_stmt><block_start>ef.setElement("subServiceCode" self.resolveSubServiceCode)<line_sep>print(("Mapping topic %s to subServiceCode %s"%(topic self.resolveSubServiceCode)))<block_end><except_stmt>blpapi.Exception<block_start>print("subServiceCode could not be set."<concat>" Resolving without subServiceCode")<block_end><block_end>ef.setElement("result" permission)<if_stmt>permission<eq>1# DENIED <block_start>ef.pushElement("reason")<line_sep>ef.setElement("source" "My Publisher Name")<line_sep>ef.setElement("category" "NOT_AUTHORIZED")<line_sep>ef.setElement("subcategory" "Publisher Controlled")<line_sep>ef.setElement("description" "Permission denied by My Publisher Name")<line_sep>ef.popElement()<block_end><elif_stmt>self.eids<block_start>ef.pushElement("permissions")<line_sep>ef.appendElement()<line_sep>ef.setElement("permissionService" "//blp/blpperm")<line_sep>ef.pushElement("eids")<for_stmt>e self.eids<block_start>ef.appendValue(e)<block_end>ef.popElement()<line_sep>ef.popElement()<line_sep>ef.popElement()<block_end>ef.popElement()<block_end>ef.popElement()<line_sep># Service is implicit in the Event. sendResponse has a # second parameter - partialResponse - that defaults to # false. session.sendResponse(response)<block_end><block_end><block_end><else_stmt><block_start><for_stmt>msg event<block_start>print(msg)<block_end><block_end><return><true><block_end><block_end><def_stmt>authOptionCallback _option _opt value parser<block_start>"""Parse authorization options from user input"""<line_sep>vals=value.split('=' 1)<if_stmt>value<eq>"user"<block_start>authUser=blpapi.AuthUser.createWithLogonName()<line_sep>authOptions=blpapi.AuthOptions.createWithUser(authUser)<block_end><elif_stmt>value<eq>"none"<block_start>authOptions=<none><block_end><elif_stmt>vals[0]<eq>"app"<and>len(vals)<eq>2<block_start>appName=vals[1]<line_sep>authOptions=blpapi.AuthOptions.createWithApp(appName)<block_end><elif_stmt>vals[0]<eq>"userapp"<and>len(vals)<eq>2<block_start>appName=vals[1]<line_sep>authUser=blpapi.AuthUser.createWithLogonName()<line_sep>authOptions=blpapi.AuthOptions.createWithUserAndApp(authUser appName)<block_end><elif_stmt>vals[0]<eq>"dir"<and>len(vals)<eq>2<block_start>activeDirectoryProperty=vals[1]<line_sep>authUser=blpapi.AuthUser.createWithActiveDirectoryProperty(activeDirectoryProperty)<line_sep>authOptions=blpapi.AuthOptions.createWithUser(authUser)<block_end><elif_stmt>vals[0]<eq>"manual"<block_start>parts=[]<if_stmt>len(vals)<eq>2<block_start>parts=vals[1].split(',')<block_end><if_stmt>len(parts)<ne>3<block_start><raise>OptionValueError("Invalid auth option {}".format(value))<block_end>appName,ip,userId=parts<line_sep>authUser=blpapi.AuthUser.createWithManualOptions(userId ip)<line_sep>authOptions=blpapi.AuthOptions.createWithUserAndApp(authUser appName)<block_end><else_stmt><block_start><raise>OptionValueError("Invalid auth option '{}'".format(value))<block_end>parser.values.auth={'option':authOptions}<block_end><def_stmt>parseCmdLine <block_start>parser=OptionParser(description="Publish market data.")<line_sep>parser.add_option("-a" "--ip" dest="hosts" help="server name or IP (default: localhost)" metavar="ipAddress" action="append" default=[])<line_sep>parser.add_option("-p" dest="port" type="int" help="server port (default: %default)" metavar="tcpPort" default=8194)<line_sep>parser.add_option("-s" dest="service" help="service name (default: %default)" metavar="service" default="//viper/mktdata")<line_sep>parser.add_option("-f" dest="fields" help="field to subscribe to (default: LAST_PRICE)" metavar="field" action="append" default=[])<line_sep>parser.add_option("-m" dest="messageType" help="type of published event (default: %default)" metavar="messageType" default="MarketDataEvents")<line_sep>parser.add_option("-e" dest="eids" help="permission eid for all subscriptions" metavar="EID" action="append" default=[])<line_sep>parser.add_option("-g" dest="groupId" help="publisher groupId (defaults to unique value)" metavar="groupId")<line_sep>parser.add_option("-r" "--pri" type="int" dest="priority" help="set publisher priority level (default: %default)" metavar="priority" default=10)<line_sep>parser.add_option("-c" type="int" dest="clearInterval" help="number of events after which cache will be "<concat>"cleared (default: 0 i.e cache never cleared)" metavar="clearInterval" default=0)<line_sep>parser.add_option("--auth" dest="auth" help="authentication option: "<concat>"user|none|app=<app>|userapp=<app>|dir=<property>"<concat>"|manual=<app,ip,user>"<concat>" (default: user)\n"<concat>"'none' is applicable to Desktop API product "<concat>"that requires Bloomberg Professional service "<concat>"to be installed locally." metavar="option" action="callback" callback=authOptionCallback type="string" default={"option":blpapi.AuthOptions.createWithUser(blpapi.AuthUser.createWithLogonName())})<line_sep>parser.add_option("--ssc" dest="ssc" help="active sub-service code option: "<concat>"<begin>,<end>,<priority>" metavar="ssc" default="")<line_sep>parser.add_option("--rssc" dest="rssc" help="sub-service code to be used in resolves" metavar="rssc" default="")<line_sep>(options _)=parser.parse_args()<if_stmt><not>options.hosts<block_start>options.hosts=["localhost"]<block_end><if_stmt><not>options.fields<block_start>options.fields=["LAST_PRICE"]<block_end><return>options<block_end><def_stmt>activate options session<block_start><if_stmt>options.ssc<block_start>sscBegin,sscEnd,sscPriority=map(int options.ssc.split(","))<line_sep>print(("Activating sub service code range [%s, %s] @ %s"%(sscBegin sscEnd sscPriority)))<line_sep>session.activateSubServiceCodeRange(options.service sscBegin sscEnd sscPriority)<block_end><block_end><def_stmt>deactivate options session<block_start><if_stmt>options.ssc<block_start>sscBegin,sscEnd,sscPriority=map(int options.ssc.split(","))<line_sep>print(("DeActivating sub service code range [%s, %s] @ %s"%(sscBegin sscEnd sscPriority)))<line_sep>session.deactivateSubServiceCodeRange(options.service sscBegin sscEnd)<block_end><block_end><def_stmt>main <block_start>options=parseCmdLine()<line_sep># Fill SessionOptions sessionOptions=blpapi.SessionOptions()<for_stmt>idx,host enumerate(options.hosts)<block_start>sessionOptions.setServerAddress(host options.port idx)<block_end>sessionOptions.setSessionIdentityOptions(options.auth['option'])<line_sep>sessionOptions.setAutoRestartOnDisconnection(<true>)<line_sep># NOTE: If running without a backup server, make many attempts to # connect/reconnect to give that host a chance to come back up (the # larger the number, the longer it will take for SessionStartupFailure # to come on startup, or SessionTerminated due to inability to fail # over). We don't have to do that in a redundant configuration - it's # expected at least one server is up and reachable at any given time, # so only try to connect to each server once. sessionOptions.setNumStartAttempts(1<if>len(options.hosts)<g>1<else>1000)<line_sep>print("Connecting to port %d on %s"%(options.port " ".join(options.hosts)))<line_sep>PUBLISH_MESSAGE_TYPE=blpapi.Name(options.messageType)<line_sep>mutex=threading.Lock()<line_sep>stop=threading.Event()<line_sep>condition=threading.Condition(mutex)<line_sep>myEventHandler=MyEventHandler(options.service PUBLISH_MESSAGE_TYPE options.fields options.eids options.rssc mutex stop condition)<line_sep># Create a Session session=blpapi.ProviderSession(sessionOptions myEventHandler.processEvent)<line_sep># Start a Session <if_stmt><not>session.start()<block_start>print("Failed to start session.")<line_sep><return><block_end>serviceOptions=blpapi.ServiceRegistrationOptions()<if_stmt>options.groupId<is><not><none><block_start>serviceOptions.setGroupId(options.groupId)<block_end>serviceOptions.setServicePriority(options.priority)<if_stmt>options.ssc<block_start>sscBegin,sscEnd,sscPriority=map(int options.ssc.split(","))<line_sep>print(("Adding active sub service code range [%s, %s] @ %s"%(sscBegin sscEnd sscPriority)))<try_stmt><block_start>serviceOptions.addActiveSubServiceCodeRange(sscBegin sscEnd sscPriority)<block_end><except_stmt>blpapi.Exception<as>e<block_start>print(("FAILED to add active sub service codes."<concat>" Exception %s"%e.description()))<block_end><block_end><try_stmt><block_start><if_stmt><not>session.registerService(options.service session.getAuthorizedIdentity() serviceOptions)<block_start>print("Failed to register '%s'"%options.service)<line_sep><return><block_end>service=session.getService(options.service)<line_sep>elementDef=service.getEventDefinition(PUBLISH_MESSAGE_TYPE)<line_sep>eventCount=0<line_sep>numPublished=0<while_stmt><not>stop.is_set()<block_start>event=service.createPublishEvent()<with_stmt>condition<block_start><while_stmt>myEventHandler.availableTopicCount<eq>0# Set timeout to 1 - give a chance for CTRL-C <block_start>condition.wait(1)<if_stmt>stop.is_set()<block_start><return><block_end><block_end>publishNull=<false><if_stmt>(options.clearInterval<g>0<and>eventCount<eq>options.clearInterval)<block_start>eventCount=0<line_sep>publishNull=<true><block_end>eventFormatter=blpapi.EventFormatter(event)<for_stmt>_,stream myEventHandler.streams.items()<block_start><if_stmt><not>stream.isAvailable()<block_start><continue><block_end>eventFormatter.appendMessage(PUBLISH_MESSAGE_TYPE stream.topic)<if_stmt>publishNull<block_start>stream.fillDataNull(eventFormatter elementDef)<block_end><else_stmt><block_start>eventCount<augadd>1<line_sep>stream.next()<line_sep>stream.fillData(eventFormatter elementDef)<block_end><block_end><block_end><for_stmt>msg event<block_start>print(msg)<block_end>session.publish(event)<line_sep>time.sleep(1)<line_sep>numPublished<augadd>1<if_stmt>numPublished%10<eq>0<block_start>deactivate(options session)<line_sep>time.sleep(30)<line_sep>activate(options session)<block_end><block_end><block_end><finally_stmt># Stop the session <block_start>session.stop()<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>print("MktdataPublisher")<try_stmt><block_start>main()<block_end><except_stmt>KeyboardInterrupt<block_start>print("Ctrl+C pressed. Stopping...")<block_end><block_end>__copyright__=""" Copyright 2012. Bloomberg Finance L.P. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """<line_sep>
# -*- coding: utf-8 -*- # ------------------------------------------------------------------------------ # # Copyright 2018-2019 Fetch.AI Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ------------------------------------------------------------------------------ """This module contains the tests of the http protocol package."""<import_stmt>sys<import_from_stmt>typing Type<import_from_stmt>unittest mock<import_stmt>pytest<import_from_stmt>aea.common Address<import_from_stmt>aea.exceptions AEAEnforceError<import_from_stmt>aea.mail.base Envelope<import_from_stmt>aea.protocols.base Message<import_from_stmt>aea.protocols.dialogue.base Dialogue<as>BaseDialogue<import_from_stmt>aea.protocols.dialogue.base DialogueLabel<import_stmt>packages<import_from_stmt>packages.fetchai.protocols.http.dialogues HttpDialogue HttpDialogues<import_from_stmt>packages.fetchai.protocols.http.message HttpMessage<import_from_stmt>packages.fetchai.protocols.http.message _default_logger<as>http_message_logger <import_from_stmt>tests.conftest ROOT_DIR<line_sep>sys.path.append(ROOT_DIR)<def_stmt>test_request_serialization <block_start>"""Test the serialization for 'request' speech-act works."""<line_sep>msg=HttpMessage(performative=HttpMessage.Performative.REQUEST method="some_method" url="url" version="some_version" headers="some_headers" body=b"some_body" )<line_sep>msg.to="receiver"<line_sep>envelope=Envelope(to=msg.to sender="sender" message=msg )<line_sep>envelope_bytes=envelope.encode()<line_sep>actual_envelope=Envelope.decode(envelope_bytes)<line_sep>expected_envelope=envelope<assert_stmt>expected_envelope.to<eq>actual_envelope.to<assert_stmt>expected_envelope.sender<eq>actual_envelope.sender<assert_stmt>(expected_envelope.protocol_specification_id<eq>actual_envelope.protocol_specification_id)<assert_stmt>expected_envelope.message<ne>actual_envelope.message<line_sep>actual_msg=HttpMessage.serializer.decode(actual_envelope.message)<line_sep>actual_msg.to=actual_envelope.to<line_sep>actual_msg.sender=actual_envelope.sender<line_sep>expected_msg=msg<assert_stmt>expected_msg<eq>actual_msg<block_end><def_stmt>test_response_serialization <block_start>"""Test the serialization for 'response' speech-act works."""<line_sep>msg=HttpMessage(message_id=2 target=1 performative=HttpMessage.Performative.RESPONSE version="some_version" status_code=1 status_text="some_status_text" headers="some_headers" body=b"some_body" )<line_sep>msg.to="receiver"<line_sep>envelope=Envelope(to=msg.to sender="sender" message=msg )<line_sep>envelope_bytes=envelope.encode()<line_sep>actual_envelope=Envelope.decode(envelope_bytes)<line_sep>expected_envelope=envelope<assert_stmt>expected_envelope.to<eq>actual_envelope.to<assert_stmt>expected_envelope.sender<eq>actual_envelope.sender<assert_stmt>(expected_envelope.protocol_specification_id<eq>actual_envelope.protocol_specification_id)<assert_stmt>expected_envelope.message<ne>actual_envelope.message<line_sep>actual_msg=HttpMessage.serializer.decode(actual_envelope.message)<line_sep>actual_msg.to=actual_envelope.to<line_sep>actual_msg.sender=actual_envelope.sender<line_sep>expected_msg=msg<assert_stmt>expected_msg<eq>actual_msg<block_end><def_stmt>test_performative_string_value <block_start>"""Test the string value of the performatives."""<assert_stmt>(str(HttpMessage.Performative.REQUEST)<eq>"request") "The str value must be request"<assert_stmt>(str(HttpMessage.Performative.RESPONSE)<eq>"response") "The str value must be response"<block_end><def_stmt>test_encoding_unknown_performative <block_start>"""Test that we raise an exception when the performative is unknown during encoding."""<line_sep>msg=HttpMessage(performative=HttpMessage.Performative.REQUEST method="some_method" url="url" version="some_version" headers="some_headers" body=b"some_body" )<with_stmt>pytest.raises(ValueError match="Performative not valid:")<block_start><with_stmt>mock.patch.object(HttpMessage.Performative "__eq__" return_value=<false>)<block_start>HttpMessage.serializer.encode(msg)<block_end><block_end><block_end><def_stmt>test_decoding_unknown_performative <block_start>"""Test that we raise an exception when the performative is unknown during decoding."""<line_sep>msg=HttpMessage(performative=HttpMessage.Performative.REQUEST method="some_method" url="url" version="some_version" headers="some_headers" body=b"some_body" )<line_sep>encoded_msg=HttpMessage.serializer.encode(msg)<with_stmt>pytest.raises(ValueError match="Performative not valid:")<block_start><with_stmt>mock.patch.object(HttpMessage.Performative "__eq__" return_value=<false>)<block_start>HttpMessage.serializer.decode(encoded_msg)<block_end><block_end><block_end>@mock.patch.object(packages.fetchai.protocols.http.message "enforce" side_effect=AEAEnforceError("some error") )<def_stmt>test_incorrect_message mocked_enforce<block_start>"""Test that we raise an exception when the message is incorrect."""<with_stmt>mock.patch.object(http_message_logger "error")<as>mock_logger<block_start>HttpMessage(performative=HttpMessage.Performative.REQUEST method="some_method" url="url" version="some_version" headers="some_headers" body=b"some_body" )<line_sep>mock_logger.assert_any_call("some error")<block_end><block_end><class_stmt>TestDialogues<block_start>"""Tests http dialogues."""<line_sep>@classmethod<def_stmt>setup_class cls<block_start>"""Set up the test."""<line_sep>cls.agent_addr="agent address"<line_sep>cls.server_addr="server address"<line_sep>cls.agent_dialogues=AgentDialogues(cls.agent_addr)<line_sep>cls.server_dialogues=ServerDialogues(cls.server_addr)<block_end><def_stmt>test_create_self_initiated self<block_start>"""Test the self initialisation of a dialogue."""<line_sep>result=self.agent_dialogues._create_self_initiated(dialogue_opponent_addr=self.server_addr dialogue_reference=(str(0) "") role=HttpDialogue.Role.CLIENT )<assert_stmt>isinstance(result HttpDialogue)<assert_stmt>result.role<eq>HttpDialogue.Role.CLIENT "The role must be client."<block_end><def_stmt>test_create_opponent_initiated self<block_start>"""Test the opponent initialisation of a dialogue."""<line_sep>result=self.agent_dialogues._create_opponent_initiated(dialogue_opponent_addr=self.server_addr dialogue_reference=(str(0) "") role=HttpDialogue.Role.CLIENT )<assert_stmt>isinstance(result HttpDialogue)<assert_stmt>result.role<eq>HttpDialogue.Role.CLIENT "The role must be client."<block_end><block_end><class_stmt>AgentDialogue(HttpDialogue)<block_start>"""The dialogue class maintains state of a dialogue and manages it."""<def_stmt>__init__ self dialogue_label:DialogueLabel self_address:Address role:BaseDialogue.Role message_class:Type[HttpMessage] <arrow><none><block_start>""" Initialize a dialogue. :param dialogue_label: the identifier of the dialogue :param self_address: the address of the entity for whom this dialogue is maintained :param role: the role of the agent this dialogue is maintained for :return: None """<line_sep>HttpDialogue.__init__(self dialogue_label=dialogue_label self_address=self_address role=role message_class=message_class )<block_end><block_end><class_stmt>AgentDialogues(HttpDialogues)<block_start>"""The dialogues class keeps track of all dialogues."""<def_stmt>__init__ self self_address:Address<arrow><none><block_start>""" Initialize dialogues. :return: None """<def_stmt>role_from_first_message # pylint: disable=unused-argument message:Message receiver_address:Address<arrow>BaseDialogue.Role<block_start>"""Infer the role of the agent from an incoming/outgoing first message :param message: an incoming/outgoing first message :param receiver_address: the address of the receiving agent :return: The role of the agent """<line_sep><return>HttpDialogue.Role.CLIENT<block_end>HttpDialogues.__init__(self self_address=self_address role_from_first_message=role_from_first_message dialogue_class=AgentDialogue )<block_end><block_end><class_stmt>ServerDialogue(HttpDialogue)<block_start>"""The dialogue class maintains state of a dialogue and manages it."""<def_stmt>__init__ self dialogue_label:DialogueLabel self_address:Address role:BaseDialogue.Role message_class:Type[HttpMessage] <arrow><none><block_start>""" Initialize a dialogue. :param dialogue_label: the identifier of the dialogue :param self_address: the address of the entity for whom this dialogue is maintained :param role: the role of the agent this dialogue is maintained for :return: None """<line_sep>HttpDialogue.__init__(self dialogue_label=dialogue_label self_address=self_address role=role message_class=message_class )<block_end><block_end><class_stmt>ServerDialogues(HttpDialogues)<block_start>"""The dialogues class keeps track of all dialogues."""<def_stmt>__init__ self self_address:Address<arrow><none><block_start>""" Initialize dialogues. :return: None """<def_stmt>role_from_first_message # pylint: disable=unused-argument message:Message receiver_address:Address<arrow>BaseDialogue.Role<block_start>"""Infer the role of the agent from an incoming/outgoing first message :param message: an incoming/outgoing first message :param receiver_address: the address of the receiving agent :return: The role of the agent """<line_sep><return>HttpDialogue.Role.SERVER<block_end>HttpDialogues.__init__(self self_address=self_address role_from_first_message=role_from_first_message dialogue_class=ServerDialogue )<block_end><block_end>
<import_stmt>os<import_stmt>apprise<line_sep>valid_tokens={'base_url':'' 'watch_url':'' 'watch_uuid':'' 'watch_title':'' 'watch_tag':'' 'diff_url':'' 'preview_url':'' 'current_snapshot':''}<def_stmt>process_notification n_object datastore<block_start><import_stmt>logging<line_sep>log=logging.getLogger('apprise')<line_sep>log.setLevel('TRACE')<line_sep>apobj=apprise.Apprise(debug=<true>)<for_stmt>url n_object['notification_urls']<block_start>url=url.strip()<line_sep>print(">> Process Notification: AppRise notifying {}".format(url))<line_sep>apobj.add(url)<block_end># Get the notification body from datastore n_body=n_object['notification_body']<line_sep>n_title=n_object['notification_title']<line_sep># Insert variables into the notification content notification_parameters=create_notification_parameters(n_object datastore)<for_stmt>n_k notification_parameters<block_start>token='{'+n_k+'}'<line_sep>val=notification_parameters[n_k]<line_sep>n_title=n_title.replace(token val)<line_sep>n_body=n_body.replace(token val)<block_end>apobj.notify(body=n_body title=n_title)<block_end># Notification title + body content parameters get created here. <def_stmt>create_notification_parameters n_object datastore<block_start><import_from_stmt>copy deepcopy<line_sep># in the case we send a test notification from the main settings, there is no UUID. uuid=n_object['uuid']<if>'uuid'<in>n_object<else>''<if_stmt>uuid<ne>''<block_start>watch_title=datastore.data['watching'][uuid]['title']<line_sep>watch_tag=datastore.data['watching'][uuid]['tag']<block_end><else_stmt><block_start>watch_title='Change Detection'<line_sep>watch_tag=''<block_end># Create URLs to customise the notification with base_url=datastore.data['settings']['application']['base_url']<line_sep>watch_url=n_object['watch_url']<line_sep># Re #148 - Some people have just {base_url} in the body or title, but this may break some notification services # like 'Join', so it's always best to atleast set something obvious so that they are not broken. <if_stmt>base_url<eq>''<block_start>base_url="<base-url-env-var-not-set>"<block_end>diff_url="{}/diff/{}".format(base_url uuid)<line_sep>preview_url="{}/preview/{}".format(base_url uuid)<line_sep># Not sure deepcopy is needed here, but why not tokens=deepcopy(valid_tokens)<line_sep># Valid_tokens also used as a field validator tokens.update({'base_url':base_url<if>base_url<is><not><none><else>'' 'watch_url':watch_url 'watch_uuid':uuid 'watch_title':watch_title<if>watch_title<is><not><none><else>'' 'watch_tag':watch_tag<if>watch_tag<is><not><none><else>'' 'diff_url':diff_url 'preview_url':preview_url 'current_snapshot':n_object['current_snapshot']<if>'current_snapshot'<in>n_object<else>''})<line_sep><return>tokens<block_end>
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name, unused-variable, unused-argument """1D convolution operators."""<import_from_stmt>.conv2d conv<def_stmt>conv1d data kernel strides=1 padding="VALID" dilation=1 layout="NCW" out_dtype=<none><block_start>"""1D convolution forward operator. Parameters ---------- data : tvm.te.Tensor 3-D input shape [batch, in_channel, in_width] for layout == 'NCW' and [batch, in_width, in_channel] for layout == 'NWC' kernel : tvm.te.Tensor 3-D kernel with shape [num_filter, in_channel, filter_size] for layout == 'NCW' and [filter_size, in_channel, num_filter] for layout == 'NWC' strides : int or tuple The spatial stride along width padding : int or str Padding size, or ['VALID', 'SAME'] dilation : int or tuple Dilation rate if convolution should be dilated. layout : str How input data is laid out, must be one of ['NCW', 'NWC'] out_dtype : str The output data type. If None then output is same type as input. """<line_sep><return>conv(data kernel strides padding dilation 1 layout out_dtype)<block_end><def_stmt>conv1d_nwc data kernel strides=1 padding="VALID" dilation=1 out_dtype=<none><block_start>"""1D convolution in NWC layout. See :py:func:`conv` for details on parameters"""<line_sep><return>conv(data kernel strides padding dilation 1 "NWC" out_dtype=out_dtype)<block_end><def_stmt>conv1d_ncw data kernel strides=1 padding="VALID" dilation=1 out_dtype=<none><block_start>"""1D convolution in NCW layout. See :py:func:`conv` for details on parameters"""<line_sep><return>conv(data kernel strides padding dilation 1 "NCW" out_dtype=out_dtype)<block_end><def_stmt>group_conv1d_nwc data kernel strides=1 padding="VALID" dilation=1 groups=1 out_dtype=<none><block_start>"""1D convolution forward operator for NWC layout. Parameters ---------- data : tvm.te.Tensor 3-D with shape [batch, in_width, in_channel] kernel : tvm.te.Tensor 3-D with shape [filter_size, in_channel, num_filter] strides : int or tuple The spatial stride along width padding : int, tuple, or str Padding size can be an integer for equal padding, a tuple of (left, right) or a string in ['VALID', 'SAME']. dilation : int or tuple Dilation rate if convolution should be dilated. groups : int Number of groups out_dtype : str The output data type. If None then output is same type as input. """<line_sep><return>conv(data kernel strides padding dilation groups "NWC" out_dtype=out_dtype)<block_end><def_stmt>group_conv1d_ncw data kernel strides=1 padding="VALID" dilation=1 groups=1 out_dtype=<none><block_start>"""1D convolution forward operator for NCW layout. Parameters ---------- data : tvm.te.Tensor 3-D with shape [batch, in_channel, in_width] kernel : tvm.te.Tensor 3-D with shape [num_filter, in_channel, filter_size] strides : int or tuple The spatial stride along width padding : int, tuple, or str Padding size can be an integer for equal padding, a tuple of (left, right) or a string in ['VALID', 'SAME']. dilation : int or tuple Dilation rate if convolution should be dilated. groups : int Number of groups out_dtype : str The output data type. If None then output is same type as input. """<line_sep><return>conv(data kernel strides padding dilation groups "NCW" out_dtype=out_dtype)<block_end>
########################################################## # # pinout tests # # Use a user-defined temporary directory if # you have problems with multiple harddrives (like I do): # # >>> pytest --basetemp=temp # ########################################################## <import_stmt>filecmp<import_stmt>pytest<import_stmt>re<import_stmt>shutil<import_stmt>uuid<import_from_stmt>pathlib Path<import_from_stmt>importlib reload<import_from_stmt>pinout manager<import_from_stmt>pinout config<def_stmt>re_sub_ids re_m<block_start>id=re_m.group(0).split("_")<line_sep>id="unique_id_replaced-for-testing_"+id[-1]<line_sep><return>id<block_end><def_stmt>mk_test_file src dest<block_start>shutil.copyfile(src dest)<with_stmt>src.open()<as>f<block_start>data=f.read()<line_sep># sub ids id=re.compile(r"(?<=id=\").+(?=\")")<line_sep>data=re.sub(id re_sub_ids data)<line_sep># sub hrefs id=re.compile(r"(?<=href=\"#).+(?=\")")<line_sep>data=re.sub(id re_sub_ids data)<line_sep># sub clip-path urls id=re.compile(r"(?<=clip-path=\"url\(#).+(?=\")")<line_sep>data=re.sub(id re_sub_ids data)<line_sep># write modified file data to testfile dest.write_text(data)<block_end><return>dest<block_end>@pytest.mark.parametrize("module_path, ref_path" [("../samples/arduino/arduino/uno/arduino_uno.py" "../samples/arduino/pinout_arduino_uno_rev3.svg" ) ("../samples/arduino/arduino/rp2040/arduino_nano_rp2040_connect.py" "../samples/arduino/pinout_arduino_nano_rp2040_connect.svg" ) ("../samples/attiny85/attiny85.py" "../samples/attiny85/pinout_attiny85.svg" ) ("../samples/clip_path/pinout_diagram.py" "../samples/clip_path/diagram.svg" ) ("../samples/full_sample/pinout_diagram.py" "../samples/full_sample/pinout_diagram.svg" ) ("../samples/panel_layout/panel_layout.py" "../samples/panel_layout/output/panel_layout.svg" ) ("../samples/panel_layout/populated_layout.py" "../samples/panel_layout/output/populated_layout.svg" ) ("../samples/pci-express/pinout_x1.py" "../samples/pci-express/pinout_x1.svg" ) ("../samples/section_pullout/pinout_diagram.py" "../samples/section_pullout/diagram.svg" ) ("../samples/teensy_4.0/pinout_diagram.py" "../samples/teensy_4.0/teensy_4.0_front_pinout_diagram.svg" ) ] )<def_stmt>test_output_against_reference tmp_path module_path ref_path# Config requires reloading between tests to to ensure # is in default state. <block_start>reload(config)<line_sep>module_path=Path(module_path)<line_sep>ref_path=Path(ref_path)<line_sep># Export a temp file in same location as reference: # Required for relative links to be identical. tempsvg=ref_path.parent/f"temp_pytest_{str(uuid.uuid4())}.svg"<line_sep>manager.export_diagram(module_path tempsvg overwrite=<true> )<line_sep># Create files for comparison. Unique ids are converted to match file1=mk_test_file(tempsvg tmp_path/f"test_file.svg")<line_sep>file2=mk_test_file(ref_path tmp_path/f"ref_file.svg")<line_sep># Remove temp file tempsvg.unlink()<line_sep># Test files are identical <assert_stmt>filecmp.cmp(file1 file2 shallow=<false>)<block_end>
# Copyright (C) 2018-2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 <import_from_stmt>openvino.tools.mo.graph.graph Node Graph<import_from_stmt>openvino.tools.mo.ops.op Op<class_stmt>Exit(Op)<block_start>op="Exit"<def_stmt>__init__ self graph:Graph attrs:dict<block_start>mandatory_props={'type':<none> 'op':__class__.op 'infer':Exit.exit_infer 'in_ports_count':1 }<line_sep>super().__init__(graph mandatory_props attrs)<block_end>@staticmethod<def_stmt>exit_infer node:Node<block_start>output_shape=node.in_port(0).data.get_shape()<line_sep>output_value=node.in_port(0).data.get_value()<for_stmt>port node.out_ports()<block_start><if_stmt><not>node.out_port(port).disconnected()<block_start>node.out_port(port).data.set_shape(output_shape)<if_stmt>output_value<is><not><none><block_start>node.out_port(port).data.set_value(output_value)<block_end><block_end><block_end><block_end><block_end>
<import_stmt>os<import_from_stmt>unittest TestCase skipIf<import_from_stmt>app.main ALLOWED_TASKS<import_from_stmt>app.validation ffmpeg_read<import_from_stmt>starlette.testclient TestClient<import_from_stmt>tests.test_api TESTABLE_MODELS<line_sep>@skipIf("text-to-speech"<not><in>ALLOWED_TASKS "text-to-speech not implemented" )<class_stmt>AudioSourceSeparationTestCase(TestCase)<block_start><def_stmt>setUp self<block_start>model_id=TESTABLE_MODELS["text-to-speech"]<line_sep>self.old_model_id=os.getenv("MODEL_ID")<line_sep>self.old_task=os.getenv("TASK")<line_sep>os.environ["MODEL_ID"]=model_id<line_sep>os.environ["TASK"]="text-to-speech"<import_from_stmt>app.main app<line_sep>self.app=app<block_end><def_stmt>tearDown self<block_start>os.environ["MODEL_ID"]=self.old_model_id<line_sep>os.environ["TASK"]=self.old_task<block_end><def_stmt>test_simple self<block_start><with_stmt>TestClient(self.app)<as>client<block_start>response=client.post("/" json={"inputs":"This is some text"})<block_end>self.assertEqual(response.status_code 200 )<line_sep>self.assertEqual(response.header["content-type"] "audio/wav")<line_sep>audio=ffmpeg_read(response.content)<line_sep>self.assertEqual(audio.shape (10 ))<block_end><def_stmt>test_malformed_input self<block_start><with_stmt>TestClient(self.app)<as>client<block_start>response=client.post("/" data=b"This is some test")<block_end>self.assertEqual(response.status_code 400 )<line_sep>self.assertEqual(response.content b'{"error":"Malformed soundfile"}')<block_end><block_end>
r""" Finite dimensional graded commutative algebras AUTHORS: - <NAME> (2021): initial version """<line_sep>#***************************************************************************** # Copyright (C) 2021 <NAME> <m.jung at vu.nl> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # http://www.gnu.org/licenses/ #***************************************************************************** <import_from_stmt>sage.combinat.free_module CombinatorialFreeModule<import_from_stmt>sage.categories.algebras Algebras<import_from_stmt>sage.misc.cachefunc cached_method<import_from_stmt>sage.combinat.integer_vector_weighted WeightedIntegerVectors<import_from_stmt>sage.rings.ring Algebra<import_from_stmt>sage.misc.functional is_odd is_even<import_from_stmt>sage.sets.disjoint_union_enumerated_sets DisjointUnionEnumeratedSets<import_from_stmt>sage.sets.condition_set ConditionSet<import_from_stmt>sage.rings.integer_ring ZZ<class_stmt>FiniteGCAlgebra(CombinatorialFreeModule Algebra)<block_start>r""" Finite dimensional graded commutative algebras. A finite dimensional graded commutative algebra `A` is an integer-graded algebra satisfying the super-algebra relation w.r.t. the degree modulo 2. More precisely, `A` has a graded ring structure .. MATH:: A = \bigoplus_{i=0}^n A_i, where `n \in \NN` is the finite maximal degree, and the multiplication satisfies .. MATH:: A_i \cdot A_j \subset \begin{cases}A_{i+j} & \text{if $i+j\leq n$}, \\ 0 & \text{if $i+j > n$},\end{cases} as well as the super-algebra relation .. MATH:: x y = (-1)^{ij} y x for all homogeneous elements `x \in A_i` and `y \in A_j`. Such an algebra is multiplicatively generated by a set of single monomials `\{ x_1, \ldots, x_k \}`, where each `x_i` is given a certain degree `\mathrm{deg}(x_i)`. To that end, this algebra can be given a vector space basis, and the basis vectors are of the form `x_1^{w_1} \cdots x_n^{ w_k}`, where `\sum_{i=1}^k \mathrm{deg}(x_i) \, w_i \leq n` and .. MATH:: w_i \in \begin{cases} \ZZ_2 & \text{if $\mathrm{deg}(x_i)$ is odd}, \\ \NN & \text{if $\mathrm{deg}(x_i)$ is even}. \end{cases} Typical examples of finite dimensional graded commutative algebras are cohomology rings over finite dimensional CW-complexes. INPUT: - ``base`` -- the base field - ``names`` -- (optional) names of the generators: a list of strings or a single string with the names separated by commas. If not specified, the generators are named "x0", "x1",... - ``degrees`` -- (optional) a tuple or list specifying the degrees of the generators; if omitted, each generator is given degree 1, and if both ``names`` and ``degrees`` are omitted, an error is raised. - ``max_degree`` -- the maximal degree of the graded algebra. - ``mul_symbol`` -- (optional) symbol used for multiplication. If omitted, the string "*" is used. - ``mul_latex_symbol`` -- (optional) latex symbol used for multiplication. If omitted, the empty string is used. EXAMPLES:: sage: A.<x,y,z,t> = GradedCommutativeAlgebra(QQ, degrees=(1,2,2,3), max_degree=6) sage: A Graded commutative algebra with generators ('x', 'y', 'z', 't') in degrees (1, 2, 2, 3) with maximal degree 6 sage: t*x + x*t 0 sage: x^2 0 sage: x*t^2 0 sage: x*y^2+z*t x*y^2 + z*t The generators can be returned with :meth:`algebra_generators`:: sage: F = A.algebra_generators(); F Family (x, y, z, t) sage: [g.degree() for g in F] [1, 2, 2, 3] We can also return the basis:: sage: list(A.basis()) [1, x, z, y, t, x*z, x*y, x*t, z^2, y*z, y^2, z*t, y*t, x*z^2, x*y*z, x*y^2] Depending on the context, the multiplication can be given a different symbol:: sage: A.<x,y,z,t> = GradedCommutativeAlgebra(QQ, degrees=(1,2,6,6), max_degree=10, mul_symbol='⌣', mul_latex_symbol=r'\smile') sage: x*y^2 + x*t x⌣y^2 + x⌣t sage: latex(x*y^2 - z*x) x\smile y^{2} - x\smile z .. NOTE:: Notice, when the argument ``max_degree`` in the global namespace is omitted, an instance of the class :class:`sage.algebras.commutative_dga.GCAlgebra` is created instead:: sage: A.<x,y,z,t> = GradedCommutativeAlgebra(QQ, degrees=(1,2,6,6)) sage: type(A) <class 'sage.algebras.commutative_dga.GCAlgebra_with_category'> """<line_sep>@staticmethod<def_stmt>__classcall_private__ cls base names=<none> degrees=<none> max_degree=<none> category=<none> **kwargs<block_start>r""" Normalize the input for the :meth:`__init__` method and the unique representation. INPUT: - ``base`` -- the base ring of the algebra - ``max_degree`` -- the maximal degree of the algebra - ``names`` -- the names of the variables; by default, set to ``x1``, ``x2``, etc. - ``degrees`` -- the degrees of the generators; by default, set to 1 TESTS:: sage: A1 = GradedCommutativeAlgebra(GF(2), 'x,y', (3, 6), max_degree=12) sage: A2 = GradedCommutativeAlgebra(GF(2), ['x', 'y'], [3, 6], max_degree=12) sage: A1 is A2 True """<if_stmt>max_degree<is><none><block_start><raise>TypeError("max_degree must be specified")<block_end><if_stmt>names<is><none><block_start><if_stmt>degrees<is><none><block_start><raise>ValueError("You must specify names or degrees")<block_end><else_stmt><block_start>n=len(degrees)<block_end>names=tuple('x{}'.format(i)<for>i range(n))<block_end><elif_stmt>isinstance(names str)<block_start>names=tuple(names.split(','))<line_sep>n=len(names)<block_end><else_stmt><block_start>n=len(names)<line_sep>names=tuple(names)<block_end><if_stmt>degrees<is><none><block_start>degrees=tuple([1<for>_ range(n)])<block_end><else_stmt><block_start>degrees=tuple(degrees)<block_end><return>super().__classcall__(cls base=base names=names degrees=degrees max_degree=max_degree category=category **kwargs)<block_end><def_stmt>__init__ self base names degrees max_degree category=<none> **kwargs<block_start>r""" Construct a commutative graded algebra with finite degree. TESTS:: sage: A.<x,y,z,t> = GradedCommutativeAlgebra(QQ, max_degree=6) sage: TestSuite(A).run() sage: A = GradedCommutativeAlgebra(QQ, ('x','y','z'), [2,3,4], max_degree=8) sage: TestSuite(A).run() sage: A = GradedCommutativeAlgebra(QQ, ('x','y','z','t'), [1,2,3,4], max_degree=10) sage: TestSuite(A).run() """<import_from_stmt>sage.arith.misc gcd<if_stmt>max_degree<not><in>ZZ<block_start><raise>TypeError('max_degree must be an integer')<block_end><if_stmt>max_degree<l>max(degrees)<block_start><raise>ValueError(f'max_degree must not deceed {max(degrees)}')<block_end>self._names=names<line_sep>self.__ngens=len(self._names)<line_sep>self._degrees=degrees<line_sep>self._max_deg=max_degree<line_sep>self._weighted_vectors=WeightedIntegerVectors(degrees)<line_sep>self._mul_symbol=kwargs.pop('mul_symbol' '*')<line_sep>self._mul_latex_symbol=kwargs.pop('mul_latex_symbol' '')<line_sep>step=gcd(degrees)<line_sep>universe=DisjointUnionEnumeratedSets(self._weighted_vectors.subset(k)<for>k range(0 max_degree step))<line_sep>base_cat=Algebras(base).WithBasis().Super().Supercommutative().FiniteDimensional()<line_sep>category=base_cat.or_subcategory(category join=<true>)<line_sep>indices=ConditionSet(universe self._valid_index)<line_sep>sorting_key=self._weighted_vectors.grading<line_sep>CombinatorialFreeModule.__init__(self base indices sorting_key=sorting_key category=category)<block_end><def_stmt>_valid_index self w<block_start>r""" Return whether ``w`` is a valid index; no multiple powers in odd degrees. TESTS:: sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(1,2,3), max_degree=8) sage: w1 = A._weighted_vectors([1,2,1]) sage: w2 = A._weighted_vectors([1,2,2]) sage: A._valid_index(w1) True sage: A._valid_index(w2) False """<line_sep><return><not>any(i<g>1<for>i,d zip(w self._degrees)<if>is_odd(d))<block_end><def_stmt>_repr_ self<block_start>""" Return the string representation of ``self``. TESTS:: sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(1,2,3), max_degree=8) sage: A._repr_() "Graded commutative algebra with generators ('x', 'y', 'z') in degrees (1, 2, 3) with maximal degree 8" sage: A # indirect doctest Graded commutative algebra with generators ('x', 'y', 'z') in degrees (1, 2, 3) with maximal degree 8 """<line_sep>desc=f'Graded commutative algebra with generators {self._names} in '<line_sep>desc<augadd>f'degrees {self._degrees} with maximal degree {self._max_deg}'<line_sep><return>desc<block_end><def_stmt>ngens self<block_start>r""" Return the number of generators of ``self``. EXAMPLES:: sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(4,8,2), max_degree=10) sage: A.ngens() 3 """<line_sep><return>self.__ngens<block_end>@cached_method<def_stmt>product_on_basis self w1 w2<block_start>r""" Return the product of two indices within the algebra. EXAMPLES:: sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(4,8,2), max_degree=10) sage: z*x x*z sage: x^3 0 sage: 5*z + 4*z*x 5*z + 4*x*z :: sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(1,2,3), max_degree=5) sage: 2*x*y 2*x*y sage: x^2 0 sage: x*z x*z sage: z*x -x*z sage: x*y*z 0 TESTS:: sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(4,8,2), max_degree=10) sage: weighted_vectors = A._weighted_vectors sage: w1 = A._weighted_vectors([1,0,1]) sage: w2 = A._weighted_vectors([0,0,0]) sage: A.product_on_basis(w1, w2) x*z :: sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(1,2,3), max_degree=5) sage: weighted_vectors = A._weighted_vectors sage: w1 = A._weighted_vectors([1,0,0]) sage: w2 = A._weighted_vectors([0,0,1]) sage: A.product_on_basis(w1, w2) x*z sage: A.product_on_basis(w2, w1) -x*z :: sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(1,2,3), max_degree=10) sage: weighted_vectors = A._weighted_vectors sage: w1 = A._weighted_vectors([1,1,0]) sage: w2 = A._weighted_vectors([0,1,1]) sage: A.product_on_basis(w1, w2) x*y^2*z sage: A.product_on_basis(w2, w1) -x*y^2*z """<line_sep>grading=self._weighted_vectors.grading<line_sep>deg_left=grading(w1)<line_sep>deg_right=grading(w2)<line_sep>deg_tot=deg_left+deg_right<if_stmt>deg_tot<g>self._max_deg<block_start><return>self.zero()<block_end>w_tot=self._weighted_vectors([sum(w)<for>w zip(w1 w2)])<if_stmt><not>self._valid_index(w_tot)<block_start><return>self.zero()<block_end># determine sign n=self.__ngens<line_sep>c=0<for_stmt>p,i,d zip(reversed(range(n)) reversed(w1) reversed(self._degrees))<block_start><if_stmt>is_even(d)<or>i<eq>0<block_start><continue><block_end><for_stmt>q,j,b zip(range(n) w2 self._degrees)<block_start><if_stmt>q<eq>p<block_start><break><block_end><if_stmt>j<eq>0<or>is_even(b)<block_start><continue><block_end>c<augadd>1<block_end><block_end><return>(-1)<power>c<times>self.monomial(w_tot)<block_end><def_stmt>degree_on_basis self i<block_start>r""" Return the degree of a homogeneous element with index `i`. EXAMPLES:: sage: A.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=(2,4,6), max_degree=7) sage: a.degree() 2 sage: (2*a*b).degree() 6 sage: (a+b).degree() Traceback (most recent call last): ... ValueError: element is not homogeneous TESTS:: sage: A.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=(2,4,6), max_degree=7) sage: weighted_vectors = A._weighted_vectors sage: i = A._weighted_vectors([1,1,0]) sage: A.degree_on_basis(i) 6 """<line_sep><return>self._weighted_vectors.grading(i)<block_end><def_stmt>_repr_term self w<block_start>r""" Return the string representation of basis with index ``w``. TESTS:: sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(1,2,3), max_degree=8) sage: w = A._weighted_vectors([1,2,1]) sage: A._repr_term(w) 'x*y^2*z' sage: x*y^2*z # indirect doctest x*y^2*z :: sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(1,2,3), max_degree=8, mul_symbol='⌣') sage: w = A._weighted_vectors([1,2,1]) sage: A._repr_term(w) 'x⌣y^2⌣z' sage: x*y^2*z # indirect doctest x⌣y^2⌣z """<line_sep># Trivial case: <if_stmt>sum(w)<eq>0<block_start><return>'1'<block_end># Non-trivial case: terms=[]<for_stmt>i range(len(w))<block_start><if_stmt>w[i]<eq>0<block_start><continue><block_end><elif_stmt>w[i]<eq>1<block_start>terms.append(self._names[i])<block_end><else_stmt><block_start>terms.append(self._names[i]+f'^{w[i]}')<block_end><block_end><return>self._mul_symbol.join(terms)<block_end><def_stmt>_latex_term self w<block_start>r""" Return the LaTeX representation of basis with index ``w``. TESTS:: sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(1,2,3), max_degree=8) sage: w = A._weighted_vectors([1,2,1]) sage: A._latex_term(w) 'x y^{2} z' sage: latex(x*y^2*z) # indirect doctest x y^{2} z :: sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(1,2,3), max_degree=8, mul_latex_symbol=r'\smile') sage: A._latex_term(w) 'x\\smile y^{2}\\smile z' sage: latex(x*y^2*z) # indirect doctest x\smile y^{2}\smile z """<line_sep># Trivial case: <if_stmt>sum(w)<eq>0<block_start><return>'1'<block_end># Non-trivial case: terms=[]<for_stmt>i range(len(w))<block_start><if_stmt>w[i]<eq>0<block_start><continue><block_end><elif_stmt>w[i]<eq>1<block_start>terms.append(self._names[i])<block_end><else_stmt><block_start>terms.append(self._names[i]+'^{'+str(w[i])+'}')<block_end><block_end>latex_mul=self._mul_latex_symbol+' '# add whitespace <return>latex_mul.join(terms)<block_end><def_stmt>algebra_generators self<block_start>r""" Return the generators of ``self`` as a :class:`sage.sets.family.TrivialFamily`. EXAMPLES:: sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(4,8,2), max_degree=10) sage: A.algebra_generators() Family (x, y, z) """<import_from_stmt>sage.sets.family Family<line_sep><return>Family(self.gens())<block_end>@cached_method<def_stmt>one_basis self<block_start>r""" Return the index of the one element of ``self``. EXAMPLES:: sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(4,8,2), max_degree=10) sage: ind = A.one_basis(); ind [0, 0, 0] sage: A.monomial(ind) 1 sage: A.one() # indirect doctest 1 """<line_sep>n=len(self._degrees)<line_sep><return>self._weighted_vectors([0<for>_ range(n)])<block_end><def_stmt>gens self<block_start>r""" Return the generators of ``self`` as a list. EXAMPLES:: sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(4,8,2), max_degree=10) sage: A.gens() [x, y, z] """<line_sep>n=len(self._degrees)<line_sep>zero=[0<for>_ range(n)]<line_sep>indices=[]<for_stmt>k range(n)<block_start>ind=list(zero)<line_sep>ind[k]=1<line_sep>indices.append(self._weighted_vectors(ind))<block_end><return>[self.monomial(ind)<for>ind indices]<block_end>@cached_method<def_stmt>gen self i<block_start>r""" Return the `i`-th generator of ``self``. EXAMPLES:: sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(4,8,2), max_degree=10) sage: A.gen(0) x sage: A.gen(1) y sage: A.gen(2) z """<line_sep><return>self.gens()[i]<block_end><def_stmt>maximal_degree self<block_start>r""" Return the maximal degree of ``self``. EXAMPLES:: sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(1,2,3), max_degree=8) sage: A.maximal_degree() 8 """<line_sep><return>self._max_deg<block_end>max_degree=maximal_degree<block_end>
_base_=['../_base_/models/mask_rcnn_r50_fpn.py' '../_base_/datasets/cityscapes_instance.py' '../_base_/default_runtime.py']<line_sep>model=dict(pretrained=<none> roi_head=dict(bbox_head=dict(type='Shared2FCBBoxHead' in_channels=256 fc_out_channels=1024 roi_feat_size=7 num_classes=8 bbox_coder=dict(type='DeltaXYWHBBoxCoder' target_means=[0. 0. 0. 0.] target_stds=[0.1 0.1 0.2 0.2]) reg_class_agnostic=<false> loss_cls=dict(type='CrossEntropyLoss' use_sigmoid=<false> loss_weight=1.0) loss_bbox=dict(type='SmoothL1Loss' beta=1.0 loss_weight=1.0)) mask_head=dict(type='FCNMaskHead' num_convs=4 in_channels=256 conv_out_channels=256 num_classes=8 loss_mask=dict(type='CrossEntropyLoss' use_mask=<true> loss_weight=1.0))))<line_sep># optimizer # lr is set for a batch size of 8 optimizer=dict(type='SGD' lr=0.01 momentum=0.9 weight_decay=0.0001)<line_sep>optimizer_config=dict(grad_clip=<none>)<line_sep># learning policy lr_config=dict(policy='step' warmup='linear' warmup_iters=500 warmup_ratio=0.001 # [7] yields higher performance than [6] step=[7])<line_sep>runner=dict(type='EpochBasedRunner' max_epochs=8)<line_sep># actual epoch = 8 * 8 = 64 log_config=dict(interval=100)<line_sep># For better, more stable performance initialize from COCO load_from='https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth'# noqa
<import_from_stmt>social.backends.dropbox DropboxOAuth<as>DropboxBackend<line_sep>
# ================================================================================================== # Copyright 2014 Twitter, Inc. # -------------------------------------------------------------------------------------------------- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this work except in compliance with the License. # You may obtain a copy of the License in the LICENSE file, or at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ================================================================================================== <def_stmt>sizeof_fmt num<block_start><for_stmt>x ('' 'KB' 'MB' 'GB')<block_start><if_stmt>num<l>1024.0<block_start><if_stmt>x<eq>''<block_start><return>"%d%s"%(num x)<block_end><else_stmt><block_start><return>"%3.1f%s"%(num x)<block_end><block_end>num<augdiv>1024.0<block_end><return>"%3.1f%s"%(num 'TB')<block_end><class_stmt>Counters(object)<block_start>ALL=-1<line_sep>WRITES=0<line_sep>READS=1<line_sep>CREATE=2<line_sep>SET_DATA=3<line_sep>GET_DATA=4<line_sep>DELETE=5<line_sep>GET_CHILDREN=6<line_sep>EXISTS=7<line_sep>CREATE_BYTES=8<line_sep>SET_DATA_BYTES=9<line_sep>GET_DATA_BYTES=10<line_sep>DELETE_BYTES=11<line_sep>GET_CHILDREN_BYTES=12<line_sep>EXISTS_BYTES=13<block_end>CountersByName={"all":Counters.ALL "writes":Counters.WRITES "reads":Counters.READS "create":Counters.CREATE "getdata":Counters.GET_DATA "setdata":Counters.SET_DATA "delete":Counters.DELETE "getchildren":Counters.GET_CHILDREN "getchildren_bytes":Counters.GET_CHILDREN_BYTES "create_bytes":Counters.CREATE_BYTES "getdata_bytes":Counters.GET_DATA_BYTES "setdata_bytes":Counters.SET_DATA_BYTES "delete_bytes":Counters.DELETE_BYTES }<def_stmt>counter_to_str counter<block_start><for_stmt>name,c CountersByName.items()<block_start><if_stmt>counter<eq>c<block_start><return>name<block_end><block_end><return>""<block_end>
# Generated by Django 3.1.13 on 2021-11-18 04:49 <import_stmt>django.db.models.deletion<import_from_stmt>django.db migrations<import_from_stmt>django.db models<import_from_stmt>koku.database set_partition_mode<import_from_stmt>koku.database unset_partition_mode<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[("api" "0050_exchangerates") ("reporting" "0209_gcp_partables")]<line_sep>operations=[migrations.RunPython(code=set_partition_mode reverse_code=unset_partition_mode) migrations.CreateModel(name="OCPAWSStorageSummaryP" fields=[("id" models.UUIDField(primary_key=<true> serialize=<false>)) ("usage_start" models.DateField()) ("usage_end" models.DateField()) ("cluster_id" models.CharField(max_length=50 null=<true>)) ("cluster_alias" models.CharField(max_length=256 null=<true>)) ("usage_account_id" models.CharField(max_length=50)) ("product_family" models.CharField(max_length=150 null=<true>)) ("usage_amount" models.DecimalField(decimal_places=15 max_digits=33 null=<true>)) ("unit" models.CharField(max_length=63 null=<true>)) ("unblended_cost" models.DecimalField(decimal_places=15 max_digits=33 null=<true>)) ("markup_cost" models.DecimalField(decimal_places=15 max_digits=33 null=<true>)) ("currency_code" models.CharField(max_length=10)) ("account_alias" models.ForeignKey(null=<true> on_delete=django.db.models.deletion.DO_NOTHING to="reporting.awsaccountalias") ) ("source_uuid" models.ForeignKey(db_column="source_uuid" null=<true> on_delete=django.db.models.deletion.CASCADE to="api.provider" ) ) ] options={"db_table":"reporting_ocpaws_storage_summary_p"} ) migrations.RunSQL(sql="ALTER TABLE reporting_ocpaws_storage_summary_p ALTER COLUMN id SET DEFAULT uuid_generate_v4()" reverse_sql="select 1" ) migrations.CreateModel(name="OCPAWSNetworkSummaryP" fields=[("id" models.UUIDField(primary_key=<true> serialize=<false>)) ("usage_start" models.DateField()) ("usage_end" models.DateField()) ("cluster_id" models.CharField(max_length=50 null=<true>)) ("cluster_alias" models.CharField(max_length=256 null=<true>)) ("usage_account_id" models.CharField(max_length=50)) ("product_code" models.CharField(max_length=50)) ("usage_amount" models.DecimalField(decimal_places=15 max_digits=33 null=<true>)) ("unit" models.CharField(max_length=63 null=<true>)) ("unblended_cost" models.DecimalField(decimal_places=15 max_digits=33 null=<true>)) ("markup_cost" models.DecimalField(decimal_places=15 max_digits=33 null=<true>)) ("currency_code" models.CharField(max_length=10)) ("account_alias" models.ForeignKey(null=<true> on_delete=django.db.models.deletion.DO_NOTHING to="reporting.awsaccountalias") ) ("source_uuid" models.ForeignKey(db_column="source_uuid" null=<true> on_delete=django.db.models.deletion.CASCADE to="api.provider" ) ) ] options={"db_table":"reporting_ocpaws_network_summary_p"} ) migrations.RunSQL(sql="ALTER TABLE reporting_ocpaws_network_summary_p ALTER COLUMN id SET DEFAULT uuid_generate_v4()" reverse_sql="select 1" ) migrations.CreateModel(name="OCPAWSDatabaseSummaryP" fields=[("id" models.UUIDField(primary_key=<true> serialize=<false>)) ("usage_start" models.DateField()) ("usage_end" models.DateField()) ("cluster_id" models.CharField(max_length=50 null=<true>)) ("cluster_alias" models.CharField(max_length=256 null=<true>)) ("usage_account_id" models.CharField(max_length=50)) ("product_code" models.CharField(max_length=50)) ("usage_amount" models.DecimalField(decimal_places=15 max_digits=33 null=<true>)) ("unit" models.CharField(max_length=63 null=<true>)) ("unblended_cost" models.DecimalField(decimal_places=15 max_digits=33 null=<true>)) ("markup_cost" models.DecimalField(decimal_places=15 max_digits=33 null=<true>)) ("currency_code" models.CharField(max_length=10)) ("account_alias" models.ForeignKey(null=<true> on_delete=django.db.models.deletion.DO_NOTHING to="reporting.awsaccountalias") ) ("source_uuid" models.ForeignKey(db_column="source_uuid" null=<true> on_delete=django.db.models.deletion.CASCADE to="api.provider" ) ) ] options={"db_table":"reporting_ocpaws_database_summary_p"} ) migrations.RunSQL(sql="ALTER TABLE reporting_ocpaws_database_summary_p ALTER COLUMN id SET DEFAULT uuid_generate_v4()" reverse_sql="select 1" ) migrations.CreateModel(name="OCPAWSCostSummaryP" fields=[("id" models.UUIDField(primary_key=<true> serialize=<false>)) ("usage_start" models.DateField()) ("usage_end" models.DateField()) ("cluster_id" models.CharField(max_length=50 null=<true>)) ("cluster_alias" models.CharField(max_length=256 null=<true>)) ("unblended_cost" models.DecimalField(decimal_places=15 max_digits=33 null=<true>)) ("markup_cost" models.DecimalField(decimal_places=15 max_digits=33 null=<true>)) ("currency_code" models.CharField(max_length=10)) ("source_uuid" models.ForeignKey(db_column="source_uuid" null=<true> on_delete=django.db.models.deletion.CASCADE to="api.provider" ) ) ] options={"db_table":"reporting_ocpaws_cost_summary_p"} ) migrations.RunSQL(sql="ALTER TABLE reporting_ocpaws_cost_summary_p ALTER COLUMN id SET DEFAULT uuid_generate_v4()" reverse_sql="select 1" ) migrations.CreateModel(name="OCPAWSCostSummaryByServiceP" fields=[("id" models.UUIDField(primary_key=<true> serialize=<false>)) ("usage_start" models.DateField()) ("usage_end" models.DateField()) ("cluster_id" models.CharField(max_length=50 null=<true>)) ("cluster_alias" models.CharField(max_length=256 null=<true>)) ("usage_account_id" models.CharField(max_length=50)) ("product_code" models.CharField(max_length=50)) ("product_family" models.CharField(max_length=150 null=<true>)) ("unblended_cost" models.DecimalField(decimal_places=15 max_digits=33 null=<true>)) ("markup_cost" models.DecimalField(decimal_places=15 max_digits=33 null=<true>)) ("currency_code" models.CharField(max_length=10)) ("account_alias" models.ForeignKey(null=<true> on_delete=django.db.models.deletion.DO_NOTHING to="reporting.awsaccountalias") ) ("source_uuid" models.ForeignKey(db_column="source_uuid" null=<true> on_delete=django.db.models.deletion.CASCADE to="api.provider" ) ) ] options={"db_table":"reporting_ocpaws_cost_summary_by_service_p"} ) migrations.RunSQL(sql="ALTER TABLE reporting_ocpaws_cost_summary_by_service_p ALTER COLUMN id SET DEFAULT uuid_generate_v4()" reverse_sql="select 1" ) migrations.CreateModel(name="OCPAWSCostSummaryByRegionP" fields=[("id" models.UUIDField(primary_key=<true> serialize=<false>)) ("usage_start" models.DateField()) ("usage_end" models.DateField()) ("cluster_id" models.CharField(max_length=50 null=<true>)) ("cluster_alias" models.CharField(max_length=256 null=<true>)) ("usage_account_id" models.CharField(max_length=50)) ("region" models.CharField(max_length=50 null=<true>)) ("availability_zone" models.CharField(max_length=50 null=<true>)) ("unblended_cost" models.DecimalField(decimal_places=15 max_digits=33 null=<true>)) ("markup_cost" models.DecimalField(decimal_places=15 max_digits=33 null=<true>)) ("currency_code" models.CharField(max_length=10)) ("account_alias" models.ForeignKey(null=<true> on_delete=django.db.models.deletion.DO_NOTHING to="reporting.awsaccountalias") ) ("source_uuid" models.ForeignKey(db_column="source_uuid" null=<true> on_delete=django.db.models.deletion.CASCADE to="api.provider" ) ) ] options={"db_table":"reporting_ocpaws_cost_summary_by_region_p"} ) migrations.RunSQL(sql="ALTER TABLE reporting_ocpaws_cost_summary_by_region_p ALTER COLUMN id SET DEFAULT uuid_generate_v4()" reverse_sql="select 1" ) migrations.CreateModel(name="OCPAWSCostSummaryByAccountP" fields=[("id" models.UUIDField(primary_key=<true> serialize=<false>)) ("usage_start" models.DateField()) ("usage_end" models.DateField()) ("cluster_id" models.CharField(max_length=50 null=<true>)) ("cluster_alias" models.CharField(max_length=256 null=<true>)) ("usage_account_id" models.CharField(max_length=50)) ("unblended_cost" models.DecimalField(decimal_places=15 max_digits=33 null=<true>)) ("markup_cost" models.DecimalField(decimal_places=15 max_digits=33 null=<true>)) ("currency_code" models.CharField(max_length=10)) ("account_alias" models.ForeignKey(null=<true> on_delete=django.db.models.deletion.DO_NOTHING to="reporting.awsaccountalias") ) ("source_uuid" models.ForeignKey(db_column="source_uuid" null=<true> on_delete=django.db.models.deletion.CASCADE to="api.provider" ) ) ] options={"db_table":"reporting_ocpaws_cost_summary_by_account_p"} ) migrations.RunSQL(sql="ALTER TABLE reporting_ocpaws_cost_summary_by_account_p ALTER COLUMN id SET DEFAULT uuid_generate_v4()" reverse_sql="select 1" ) migrations.CreateModel(name="OCPAWSComputeSummaryP" fields=[("id" models.UUIDField(primary_key=<true> serialize=<false>)) ("usage_start" models.DateField()) ("usage_end" models.DateField()) ("cluster_id" models.CharField(max_length=50 null=<true>)) ("cluster_alias" models.CharField(max_length=256 null=<true>)) ("usage_account_id" models.CharField(max_length=50)) ("instance_type" models.CharField(max_length=50 null=<true>)) ("resource_id" models.CharField(max_length=253 null=<true>)) ("usage_amount" models.DecimalField(decimal_places=15 max_digits=33 null=<true>)) ("unit" models.CharField(max_length=63 null=<true>)) ("unblended_cost" models.DecimalField(decimal_places=15 max_digits=33 null=<true>)) ("markup_cost" models.DecimalField(decimal_places=15 max_digits=33 null=<true>)) ("currency_code" models.CharField(max_length=10)) ("account_alias" models.ForeignKey(null=<true> on_delete=django.db.models.deletion.DO_NOTHING to="reporting.awsaccountalias") ) ("source_uuid" models.ForeignKey(db_column="source_uuid" null=<true> on_delete=django.db.models.deletion.CASCADE to="api.provider" ) ) ] options={"db_table":"reporting_ocpaws_compute_summary_p"} ) migrations.RunSQL(sql="ALTER TABLE reporting_ocpaws_compute_summary_p ALTER COLUMN id SET DEFAULT uuid_generate_v4()" reverse_sql="select 1" ) migrations.AddIndex(model_name="ocpawsstoragesummaryp" index=models.Index(fields=["usage_start"] name="ocpawsstorsumm_usst")) migrations.AddIndex(model_name="ocpawsstoragesummaryp" index=models.Index(fields=["product_family"] name="ocpawsstorsumm_product_fam") ) migrations.AddIndex(model_name="ocpawsnetworksummaryp" index=models.Index(fields=["usage_start"] name="ocpawsnetsumm_usst")) migrations.AddIndex(model_name="ocpawsnetworksummaryp" index=models.Index(fields=["product_code"] name="ocpawsnetsumm_product_cd") ) migrations.AddIndex(model_name="ocpawsdatabasesummaryp" index=models.Index(fields=["usage_start"] name="ocpawsdbsumm_usst")) migrations.AddIndex(model_name="ocpawsdatabasesummaryp" index=models.Index(fields=["product_code"] name="ocpawsdbsumm_product_cd") ) migrations.AddIndex(model_name="ocpawscostsummaryp" index=models.Index(fields=["usage_start"] name="ocpawscostsumm_usst")) migrations.AddIndex(model_name="ocpawscostsummarybyservicep" index=models.Index(fields=["usage_start"] name="ocpawscostsumm_svc_usst") ) migrations.AddIndex(model_name="ocpawscostsummarybyservicep" index=models.Index(fields=["product_code"] name="ocpawscostsumm_svc_prod_cd") ) migrations.AddIndex(model_name="ocpawscostsummarybyregionp" index=models.Index(fields=["usage_start"] name="ocpawscostsumm_reg_usst") ) migrations.AddIndex(model_name="ocpawscostsummarybyregionp" index=models.Index(fields=["region"] name="ocpawscostsumm_reg_region") ) migrations.AddIndex(model_name="ocpawscostsummarybyregionp" index=models.Index(fields=["availability_zone"] name="ocpawscostsumm_reg_zone") ) migrations.AddIndex(model_name="ocpawscostsummarybyaccountp" index=models.Index(fields=["usage_start"] name="ocpawscostsumm_acct_usst") ) migrations.AddIndex(model_name="ocpawscomputesummaryp" index=models.Index(fields=["usage_start"] name="ocpawscompsumm_usst")) migrations.AddIndex(model_name="ocpawscomputesummaryp" index=models.Index(fields=["instance_type"] name="ocpawscompsumm_insttyp") ) migrations.RunPython(code=unset_partition_mode reverse_code=set_partition_mode) ]<block_end>
""" defines: - get_solid_skin_faces(model) """<import_from_stmt>collections defaultdict<import_from_stmt>copy deepcopy<import_from_stmt>pyNastran.bdf.field_writer_8 print_card_8<import_from_stmt>pyNastran.bdf.field_writer_16 print_card_16<def_stmt>write_skin_solid_faces model skin_filename write_solids=<false> write_shells=<true> size=8 is_double=<false> encoding=<none><block_start>""" Writes the skinned elements Parameters ---------- model : BDF() the BDF object skin_filename : str the file to write write_solids : bool; default=False write solid elements that have skinned faces write_shells : bool; default=False write newly created shell elements if there are shells in the model, doesn't write these size : int; default=8 the field width is_double : bool; default=False double precision flag encoding : str; default=None -> system default the string encoding """<if_stmt>(len(model.element_ids)<eq>0<or>len(model.material_ids)<eq>0<or>len(model.property_ids)<eq>0)<block_start><return><block_end>eid_set,face_map=get_solid_skin_faces(model)<if_stmt>len(eid_set)<eq>0<block_start><return><block_end>eid_set_to_write=set()<line_sep>nid_set_to_write=set()<line_sep>mid_set_to_write=set()<if_stmt>write_solids<block_start><for_stmt>face,eids eid_set.items()<block_start>eid_set_to_write.update(eids)<for_stmt>eid eids<block_start>elem=model.elements[eid]<line_sep>pid=elem.Pid()<line_sep>prop=model.properties[pid]# PSOLID mid=prop.Mid()<line_sep>#print(prop) nid_set_to_write.update(elem.node_ids)<line_sep>mid_set_to_write.add(mid)<line_sep>#print('added_mid (a) =', mid) <block_end><block_end><block_end><elif_stmt>write_shells<block_start><for_stmt>face,eids eid_set.items()<block_start>eid_set_to_write.update(eids)<line_sep>nid_set_to_write.update(face)<for_stmt>eid eids<block_start>elem=model.elements[eid]<line_sep>pid=elem.Pid()<line_sep>prop=model.properties[pid]# PSOLID <if_stmt>prop.type<in>['PSOLID' 'PLSOLID']<block_start>mid=prop.Mid()<block_end><elif_stmt>prop.type<in>['PCOMPS' 'PCOMPLS' 'PCOMP' 'PCOMPG']<block_start>mid=prop.mids[0]<block_end><else_stmt><block_start><raise>NotImplementedError(prop)<block_end>#except TypeError: #model.log.warning('TypeError: skipping:%s' % prop) #raise #except AttributeError: #model.log.warning('skipping:%s' % prop) #continue mid_set_to_write.add(mid)<line_sep>#print('added eid=%s pid=%s mid=%s (b)' % (eid, pid, mid)) <block_end><block_end><block_end><else_stmt><block_start><raise>RuntimeError('write_solids=False write_shells=False')<block_end>eids_to_write=list(eid_set_to_write)<line_sep>nids_to_write=list(nid_set_to_write)<line_sep>mids_to_write=list(mid_set_to_write)<line_sep>#element_ids_to_delete = set(model.element_ids) - eids_to_write eid_shell=max(model.elements)+1<line_sep>pid_shell=max(model.properties)+1<line_sep>mid_shell=max(model.materials)+1<line_sep>_write_skin_solid_faces(model skin_filename face_map nids_to_write eids_to_write mids_to_write eid_set eid_shell pid_shell mid_shell write_solids=write_solids write_shells=write_shells size=size is_double=is_double encoding=encoding)<block_end><def_stmt>get_solid_skin_faces model<block_start>""" Gets the elements and faces that are skinned from solid elements. This doesn't include internal faces or existing shells. Parameters ---------- model : BDF() the BDF object Returns ------- eid_set : Dict[sorted_face] = eids sorted_face : tuple(int, int, ...) the face nids in sorted order eids : List[int] list of element ids with that face face_map : Dict[sorted_face] = face sorted_face : tuple(int, int, ...) the face nids in sorted order face : List(int, int, ...) the face nids """<line_sep>eid_faces=model.get_element_faces()<line_sep>face_set=defaultdict(int)<line_sep>eid_set=defaultdict(list)<line_sep>face_map={}<for_stmt>eid,face eid_faces#print(eid, face) <block_start>raw_face=deepcopy(face)<try_stmt><block_start>face.sort()<block_end><except_stmt>Exception<block_start>print('face = %s'%str(face))<line_sep><raise><block_end>tface=tuple(face)<line_sep>#print(tface) face_set[tface]<augadd>1<line_sep>eid_set[tface].append(eid)<line_sep>face_map[tface]=raw_face<block_end>#print('eid_set:') #for tface, eidset in eid_set.items(): #print(tface, eidset) #print('face_set:') #for tface, faceset in face_set.items(): #print(tface, faceset) #print('face_map:') #for tface, facemap in face_map.items(): #print(tface, facemap) del_faces=[]<for_stmt>face,face_count face_set.items()<block_start><if_stmt>face_count<eq>2<block_start>del_faces.append(face)<block_end><block_end><for_stmt>face del_faces<block_start><del_stmt>face_set[face]<del_stmt>eid_set[face]<block_end><return>eid_set face_map<block_end><def_stmt>_write_skin_solid_faces model skin_filename face_map nids_to_write eids_to_write mids_to_write eid_set eid_shell pid_shell mid_shell write_solids=<false> write_shells=<true> size=8 is_double=<false> encoding=<none><block_start>""" helper method for ``write_skin_solid_faces`` Parameters ---------- model : BDF() the BDF object skin_filename : str the file to write face_map : dict[sorted_face] : face sorted_face : List[int, int, int] / List[int, int, int, int] face : List[int, int, int] / List[int, int, int, int] nids_to_write : List[int, int, ...] list of node ids to write eids_to_write : List[int, int, ...] list of element ids to write mids_to_write : List[int, int, ...] list of material ids to write eid_set : dict[face] : eids ??? eid_shell : int the next id to use for the shell id pid_shell : int the next id to use for the shell property mid_shell : int the next id to use for the shell material write_solids : bool; default=False write solid elements that have skinned faces write_shells : bool; default=True write shell elements size : int; default=8 the field width is_double : bool; default=False double precision flag encoding : str; default=None -> system default the string encoding """<line_sep>encoding=model.get_encoding(encoding)<with_stmt>open(skin_filename 'w' encoding=encoding)<as>bdf_file<block_start>bdf_file.write('$ pyNastran: punch=True\n')<for_stmt>nid sorted(nids_to_write)<block_start><if_stmt>nid<is><none><block_start><continue><block_end>node=model.nodes[nid]<line_sep>bdf_file.write(node.write_card(size=size is_double=is_double))<block_end><for_stmt>cid,coord model.coords.items()<block_start><if_stmt>cid<eq>0<block_start><continue><block_end>bdf_file.write(coord.write_card(size=size is_double=is_double))<block_end><if_stmt>write_solids<block_start><for_stmt>eid sorted(eids_to_write)<block_start>elem=model.elements[eid]<line_sep>bdf_file.write(elem.write_card(size=size))<block_end><for_stmt>pid,prop model.properties.items()<block_start>bdf_file.write(prop.write_card(size=size is_double=is_double))<block_end><for_stmt>mid sorted(mids_to_write)<block_start>material=model.materials[mid]<line_sep>bdf_file.write(material.write_card(size=size is_double=is_double))<block_end><del_stmt>eid pid mid<block_end><if_stmt>write_shells<block_start>mids_to_write.sort()<for_stmt>imid,mid enumerate(mids_to_write)<block_start>card=['PSHELL' pid_shell+imid mid_shell+imid 0.1]<try_stmt><block_start>msg=print_card_8(card)<block_end><except_stmt>RuntimeError<block_start>msg=print_card_16(card)<block_end>bdf_file.write(msg)<line_sep>card=['MAT1' mid_shell+imid 3.e7 <none> 0.3]<line_sep>#bdf_file.write(model.materials[mid].comment) <try_stmt><block_start>msg=print_card_8(card)<block_end><except_stmt>RuntimeError<block_start>msg=print_card_16(card)<block_end>bdf_file.write(msg)<block_end><for_stmt>face,eids eid_set.items()<block_start>face_raw=face_map[face]<line_sep>nface=len(face)<line_sep>#print("eids =", eids) #assert len(eids) == 1, eids #for eid in sorted(eids): #elem = model.elements[eid] #print(elem) #break <assert_stmt>len(eids)<eq>1 eids<line_sep>elem=model.elements[eids[0]]<line_sep>#pid = next(model.properties.keys()) pid=elem.Pid()<line_sep>prop=model.properties[pid]<if_stmt>prop.type<in>['PSOLID']# 'PSHELL', <block_start>mid=prop.Mid()<block_end><elif_stmt>prop.type<in>['PCOMPS' 'PCOMPLS']# 'PSHELL', #print(prop.get_stats()) <block_start>mid=prop.Mid()<block_end>#elif prop.type in ['PCOMP', 'PCOMPG']: #mid = prop.mids[0] <else_stmt><block_start><raise>NotImplementedError(prop)<block_end>#print('mids_to_write = %s' % mids_to_write) #print('mids = ', model.materials.keys()) imid=mids_to_write.index(mid)<if_stmt>nface<eq>3<block_start>card=['CTRIA3' eid_shell pid_shell+imid]+list(face_raw)<block_end><elif_stmt>nface<eq>4<block_start>card=['CQUAD4' eid_shell pid_shell+imid]+list(face_raw)<block_end><elif_stmt>nface<eq>4<block_start>card=['CQUAD4' eid_shell pid_shell+imid]+list(face_raw)<block_end><elif_stmt>nface<eq>6<block_start>card=['CTRIA6' eid_shell pid_shell+imid]+list(face_raw)<block_end><elif_stmt>nface<eq>8<block_start>card=['CQUAD8' eid_shell pid_shell+imid]+list(face_raw)<block_end><else_stmt><block_start><raise>NotImplementedError('face=%s len(face)=%s'%(face nface))<block_end><try_stmt><block_start>msg=print_card_8(card)<block_end><except_stmt>RuntimeError<block_start>msg=print_card_16(card)<block_end>bdf_file.write(msg)<line_sep>eid_shell<augadd>1<line_sep>#elem = model.elements[eid] #bdf_file.write(elem.write_card(size=size)) <block_end>#for pid, prop in model.properties.items(): #bdf_file.write(prop.write_card(size=size, is_double=is_double)) <block_end>bdf_file.write('ENDDATA\n')<block_end>#if 0: #model = model.__class__.__init__() #model.read_bdf(skin_filename) <block_end>
<import_from_future_stmt> print_function<def_stmt>printMat M Ncol=7 title=<none><block_start><if_stmt>title<block_start>print(title+'\n')<block_end><for_stmt>row range(M.shape[0])<block_start>tab=0<for_stmt>col range(M.shape[1])<block_start>tab<augadd>1<line_sep>print(" %10.6f"%M[row col])<if_stmt>tab<eq>Ncol<and>col<ne>(M.shape[1]-1)<block_start>print("\n")<line_sep>tab=0<block_end><block_end>print("\n")<block_end><return><block_end><def_stmt>printMatString M Ncol=7 title=<none><block_start><if_stmt>title<block_start>print(title+'\n')<block_end>s=''<for_stmt>row range(M.shape[0])<block_start>tab=0<for_stmt>col range(M.shape[1])<block_start>tab<augadd>1<line_sep>s<augadd>" %10.6f"%M[row col]<if_stmt>tab<eq>Ncol<and>col<ne>(M.shape[1]-1)<block_start>s<augadd>'\n'<line_sep>tab=0<block_end><block_end>s<augadd>'\n'<block_end><return>s<block_end><def_stmt>printArray M Ncol=7 title=<none><block_start><if_stmt>title<block_start>print(title+'\n')<block_end>tab=0<for_stmt>col,entry enumerate(M)<block_start>tab<augadd>1<line_sep>print(" %10.6f"%M[col])<if_stmt>tab<eq>Ncol<and>col<ne>(len(M)-1)<block_start>print("\n")<line_sep>tab=0<block_end><block_end>print("\n")<line_sep><return><block_end><def_stmt>printArrayString M Ncol=7 title=<none><block_start><if_stmt>title<block_start>print(title+'\n')<block_end>tab=0<line_sep>s=''<for_stmt>i,entry enumerate(M)<block_start>tab<augadd>1<line_sep>s<augadd>" %10.6f"%entry<if_stmt>tab<eq>Ncol<and>i<ne>(len(M)-1)<block_start>s<augadd>'\n'<line_sep>tab=0<block_end><block_end>s<augadd>'\n'<line_sep><return>s<block_end><def_stmt>printGeomGrad geom grad<block_start>print("\tGeometry and Gradient\n")<line_sep>Natom=geom.shape[0]<for_stmt>i range(Natom)<block_start>print("\t%20.10f%20.10f%20.10f\n"%(geom[i 0] geom[i 1] geom[i 2]))<block_end>print("\n")<for_stmt>i range(Natom)<block_start>print("\t%20.10f%20.10f%20.10f\n"%(grad[3<times>i+0] grad[3<times>i+1] grad[3<times>i+2]))<block_end><block_end>
<import_stmt>unittest<import_from_stmt>kinto.core.testing get_user_headers<import_from_stmt>.support MINIMALIST_BUCKET MINIMALIST_COLLECTION MINIMALIST_GROUP MINIMALIST_RECORD BaseWebTest <class_stmt>PermissionsTest(BaseWebTest unittest.TestCase)<block_start>@classmethod<def_stmt>setUpClass cls<block_start>super().setUpClass()<line_sep>cls.alice_headers={**cls.headers **get_user_headers("alice")}<line_sep>cls.bob_headers={**cls.headers **get_user_headers("bob")}<line_sep>cls.alice_principal=("basicauth:<PASSWORD>")<line_sep>cls.bob_principal=("basicauth:<PASSWORD>")<block_end><block_end><class_stmt>BucketPermissionsTest(PermissionsTest)<block_start><def_stmt>setUp self<block_start>bucket={**MINIMALIST_BUCKET "permissions":{"read":[self.alice_principal]}}<line_sep>self.app.put_json("/buckets/sodas" bucket headers=self.headers)<block_end><def_stmt>test_creation_is_allowed_to_authenticated_by_default self<block_start>self.app.put_json("/buckets/beer" MINIMALIST_BUCKET headers=self.headers)<block_end><def_stmt>test_current_user_receives_write_permission_on_creation self<block_start>resp=self.app.put_json("/buckets/beer" MINIMALIST_BUCKET headers=self.headers)<line_sep>permissions=resp.json["permissions"]<line_sep>self.assertIn(self.principal permissions["write"])<block_end><def_stmt>test_can_read_if_allowed self<block_start>self.app.get("/buckets/sodas" headers=self.alice_headers)<block_end><def_stmt>test_cannot_write_if_not_allowed self<block_start>self.app.put_json("/buckets/sodas" MINIMALIST_BUCKET headers=self.alice_headers status=403)<block_end><def_stmt>test_permissions_are_not_returned_if_can_only_read self<block_start>resp=self.app.get("/buckets/sodas" headers=self.alice_headers)<line_sep>self.assertEqual(resp.json["permissions"] {})<block_end><def_stmt>test_permissions_are_returned_if_can_write self<block_start>resp=self.app.get("/buckets/sodas" headers=self.headers)<line_sep>self.assertIn("write" resp.json["permissions"])<block_end><def_stmt>test_cannot_post_existing_id_if_cannot_read self<block_start>self.app.get("/buckets/sodas" headers=self.bob_headers status=403)<line_sep>self.app.post_json("/buckets" {"data":{"id":"sodas"}} headers=self.bob_headers status=403)<block_end><def_stmt>test_can_post_existing_id_if_can_read self<block_start>self.app.patch_json("/buckets/sodas" {"data":{"marker":<true>} "permissions":{"read":["system.Authenticated"]}} headers=self.headers )<line_sep>resp=self.app.post_json("/buckets" {"data":{"id":"sodas"}} headers=self.bob_headers)<assert_stmt>resp.json["data"]["marker"]<block_end><block_end><class_stmt>CollectionPermissionsTest(PermissionsTest)<block_start><def_stmt>setUp self<block_start>bucket={**MINIMALIST_BUCKET "permissions":{"read":[self.alice_principal] "write":[self.bob_principal]} }<line_sep>self.app.put_json("/buckets/beer" bucket headers=self.headers)<line_sep>self.app.put_json("/buckets/beer/collections/barley" MINIMALIST_COLLECTION headers=self.headers )<block_end><def_stmt>test_passing_unicode_on_parent_id_is_supported self<block_start>self.app.get("/buckets/block%C2%93%C2%96sts/collections/barley" headers=self.alice_headers status=403 )<block_end><def_stmt>test_read_is_allowed_if_read_on_bucket self<block_start>self.app.get("/buckets/beer/collections/barley" headers=self.alice_headers)<block_end><def_stmt>test_read_is_allowed_if_write_on_bucket self<block_start>self.app.get("/buckets/beer/collections/barley" headers=self.bob_headers)<block_end><def_stmt>test_cannot_read_if_not_allowed self<block_start>headers={**self.headers **get_user_headers("jean-louis")}<line_sep>self.app.get("/buckets/beer/collections/barley" headers=headers status=403)<block_end><def_stmt>test_cannot_write_if_not_allowed self<block_start>self.app.put_json("/buckets/beer/collections/barley" MINIMALIST_COLLECTION headers=self.alice_headers status=403 )<block_end><def_stmt>test_permission_backend_prevent_sql_injections self<block_start>self.app.get("/buckets/beer'" headers=self.headers status=403)<line_sep>self.app.get("/buckets/beer'/collections/barley" headers=self.headers status=403)<line_sep>self.app.get("/buckets/beer'/groups/barley" headers=self.headers status=403)<line_sep>self.app.get("/buckets/beer/collections/barley'" headers=self.headers status=400)<line_sep># XXX: We should validate the collection ID on the records collection endpoint. #1077 self.app.get("/buckets/beer/collections/barley'/records" headers=self.headers status=404 )<line_sep>self.app.get("/buckets/beer/groups/barley'" headers=self.headers status=400)<block_end><block_end><class_stmt>GroupPermissionsTest(PermissionsTest)<block_start><def_stmt>setUp self<block_start>bucket={**MINIMALIST_BUCKET "permissions":{"read":[self.alice_principal] "write":[self.bob_principal]} }<line_sep>self.app.put_json("/buckets/beer" bucket headers=self.headers)<line_sep>self.app.put_json("/buckets/beer/groups/moderators" MINIMALIST_GROUP headers=self.headers)<block_end><def_stmt>test_creation_is_allowed_if_write_on_bucket self<block_start>self.app.post_json("/buckets/beer/groups" MINIMALIST_GROUP headers=self.headers)<block_end><def_stmt>test_read_is_allowed_if_read_on_bucket self<block_start>self.app.get("/buckets/beer/groups/moderators" headers=self.alice_headers)<block_end><def_stmt>test_read_is_allowed_if_write_on_bucket self<block_start>self.app.get("/buckets/beer/groups/moderators" headers=self.bob_headers)<block_end><def_stmt>test_cannot_read_if_not_allowed self<block_start>headers={**self.headers **get_user_headers("jean-louis")}<line_sep>self.app.get("/buckets/beer/groups/moderators" headers=headers status=403)<block_end><def_stmt>test_cannot_write_if_not_allowed self<block_start>self.app.put_json("/buckets/beer/groups/moderators" MINIMALIST_GROUP headers=self.alice_headers status=403 )<block_end><def_stmt>test_creation_is_forbidden_is_no_write_on_bucket self<block_start>self.app.post_json("/buckets/beer/groups" MINIMALIST_GROUP headers=self.alice_headers status=403 )<block_end><block_end><class_stmt>RecordPermissionsTest(PermissionsTest)<block_start><def_stmt>setUp self<block_start>bucket={**MINIMALIST_BUCKET "permissions":{"write":[self.alice_principal]}}<line_sep>self.app.put_json("/buckets/beer" bucket headers=self.headers)<line_sep>collection={**MINIMALIST_COLLECTION "permissions":{"write":[self.bob_principal]} }<line_sep>self.app.put_json("/buckets/beer/collections/barley" collection headers=self.headers)<block_end><def_stmt>test_creation_is_allowed_if_write_on_bucket self<block_start>self.app.post_json("/buckets/beer/collections/barley/records" MINIMALIST_RECORD headers=self.alice_headers )<block_end><def_stmt>test_creation_is_allowed_if_write_on_collection self<block_start>self.app.post_json("/buckets/beer/collections/barley/records" MINIMALIST_RECORD headers=self.bob_headers )<block_end><def_stmt>test_creation_is_forbidden_is_no_write_on_bucket_nor_collection self<block_start>headers={**self.headers **get_user_headers("jean-louis")}<line_sep>self.app.post_json("/buckets/beer/collections/barley/records" MINIMALIST_RECORD headers=headers status=403 )<block_end><def_stmt>test_record_permissions_are_modified_by_patch self<block_start>collection_url="/buckets/beer/collections/barley/records"<line_sep>resp=self.app.post_json(collection_url MINIMALIST_RECORD headers=self.headers)<line_sep>record=resp.json["data"]<line_sep>resp=self.app.patch_json("{}/{}".format(collection_url record["id"]) {"permissions":{"read":["fxa:user"]}} headers=self.headers )<line_sep>self.assertIn("fxa:user" resp.json["permissions"]["read"])<block_end><block_end><class_stmt>ChildrenCreationTest(PermissionsTest)<block_start><def_stmt>setUp self<block_start>self.app.put_json("/buckets/create" {"permissions":{"group:create":["system.Authenticated"]}} headers=self.alice_headers )<line_sep>self.app.put_json("/buckets/write" {"permissions":{"write":["system.Authenticated"]}} headers=self.alice_headers )<line_sep>self.app.put_json("/buckets/read" {"permissions":{"read":["system.Authenticated"]}} headers=self.alice_headers )<for_stmt>parent ("create" "write" "read")<block_start>self.app.put_json("/buckets/{}/groups/child".format(parent) MINIMALIST_GROUP headers=self.alice_headers )<block_end>self.bob_headers_safe_creation=dict({"If-None-Match":"*"} **self.bob_headers)<block_end><def_stmt>test_cannot_read_others_objects_if_only_allowed_to_create self<block_start>self.app.get("/buckets/create/groups/child" headers=self.bob_headers status=403)<block_end><def_stmt>test_safe_creation_with_put_returns_412_if_allowed_to_create self<block_start>self.app.put_json("/buckets/create/groups/child" MINIMALIST_GROUP headers=self.bob_headers_safe_creation status=412 )<block_end><def_stmt>test_safe_creation_with_post_returns_412_if_allowed_to_create_and_read self<block_start>self.app.patch_json("/buckets/create/groups/child" {"permissions":{"read":["system.Authenticated"]}} headers=self.alice_headers )<line_sep>self.app.post_json("/buckets/create/groups" {"data":{"id":"child" "members":[]}} headers=self.bob_headers_safe_creation status=412 )<block_end><def_stmt>test_safe_creation_with_put_returns_412_if_allowed_to_write self<block_start>self.app.put_json("/buckets/write/groups/child" MINIMALIST_GROUP headers=self.bob_headers_safe_creation status=412 )<block_end><def_stmt>test_safe_creation_with_post_returns_412_if_allowed_to_write self<block_start>self.app.post_json("/buckets/write/groups" {"data":{"id":"child" "members":[]}} headers=self.bob_headers_safe_creation status=412 )<block_end><def_stmt>test_safe_creation_with_put_returns_403_if_only_allowed_to_read self<block_start>self.app.put_json("/buckets/read/groups/child" MINIMALIST_GROUP headers=self.bob_headers_safe_creation status=403 )<block_end><def_stmt>test_safe_creation_with_post_returns_403_if_not_allowed_to_read self<block_start>self.app.post_json("/buckets/create/groups" {"data":{"id":"child" "members":[]}} headers=self.bob_headers_safe_creation status=403 )<block_end><def_stmt>test_safe_creation_with_post_returns_412_if_only_allowed_to_read self<block_start>self.app.post_json("/buckets/read/groups" {"data":{"id":"child" "members":[]}} headers=self.bob_headers_safe_creation status=412 )<block_end><def_stmt>test_delete_returns_404_on_unknown_if_only_allowed_to_read self<block_start>self.app.delete("/buckets/read/groups/g1" headers=self.bob_headers status=404)<block_end><def_stmt>test_patch_returns_404_on_unknown_if_only_allowed_to_read self<block_start>self.app.patch_json("/buckets/read/groups/g1" {"data":{"members":[]}} headers=self.bob_headers status=404 )<block_end><block_end><class_stmt>ParentMetadataTest(PermissionsTest)<block_start><def_stmt>setUp self<block_start>self.app.put_json("/buckets/beer" {"permissions":{"collection:create":[self.bob_principal]}} headers=self.headers )<line_sep>self.app.put_json("/buckets/beer/collections/wheat" headers=self.headers)<line_sep>self.app.put_json("/buckets/beer/collections/root" headers=self.headers)<line_sep>self.app.put_json("/buckets/beer/collections/barley" {"permissions":{"record:create":[self.alice_principal]}} headers=self.bob_headers )<block_end><def_stmt>test_parent_metadata_can_be_read_if_allowed_to_create_child self<block_start>self.app.get("/buckets/beer" headers=self.bob_headers)<line_sep>self.app.get("/buckets/beer/collections/barley" headers=self.alice_headers)<block_end><def_stmt>test_parent_metadata_cannot_be_read_if_not_allowed_to_create_child self<block_start>self.app.get("/buckets/beer" headers=get_user_headers("jean:paul") status=403)<line_sep>self.app.get("/buckets/beer/collections/barley" headers=get_user_headers("mahmud:hatim") status=403 )<block_end><def_stmt>test_list_can_be_obtained_if_allowed_to_create self<block_start>resp=self.app.get("/buckets/beer/collections" headers=self.bob_headers)<line_sep>self.assertEqual(len(resp.json["data"]) 1)<line_sep>self.assertEqual(resp.json["data"][0]["id"] "barley")<line_sep>resp=self.app.get("/buckets/beer/collections/barley/records" headers=self.alice_headers)<line_sep>self.assertEqual(resp.json["data"] [])<block_end><def_stmt>test_list_is_denied_if_not_allowed_to_create self<block_start>self.app.get("/buckets/beer/collections" headers=get_user_headers("jean:paul") status=403 )<line_sep>self.app.get("/buckets/beer/collections/barley/records" headers=get_user_headers("mahmud:hatim") status=403 )<block_end><block_end><class_stmt>DisabledExplicitPermissionsTest(BaseWebTest unittest.TestCase)<block_start>@classmethod<def_stmt>setUpClass cls<block_start>super().setUpClass()<line_sep>cls.alice_headers={**cls.headers **get_user_headers("alice")}<line_sep>cls.alice_principal=("basicauth:d5b0026601f1b251974e09548d44155e16812e3c64ff7ae053fe3542e2ca1570")<block_end>@classmethod<def_stmt>get_app_settings cls extras=<none><block_start>settings=super().get_app_settings(extras)<line_sep>settings["explicit_permissions"]="false"<line_sep>settings["experimental_permissions_endpoint"]="true"<line_sep><return>settings<block_end><def_stmt>setUp self<block_start>self.app.put_json("/buckets/write" {"permissions":{"write":["system.Authenticated"]}} headers=self.headers )<line_sep>self.app.put_json("/buckets/write/collections/test" {"permissions":{"write":["system.Authenticated"]}} headers=self.alice_headers )<block_end><def_stmt>test_can_create_and_access_child_object self<block_start>self.app.put("/buckets/write/collections/test/records/1" headers=self.alice_headers )<line_sep>self.app.get("/buckets/write/collections/test/records/1" headers=self.alice_headers )<block_end><def_stmt>test_current_user_is_not_added_to_object_permissions self<block_start>resp=self.app.put_json("/buckets/write/collections/test/records/1" {"permissions":{"write":["system.Authenticated"] "read":["ldap:chantal"]}} headers=self.alice_headers )<line_sep>self.assertEqual(resp.json["permissions"] {"write":["system.Authenticated"] "read":["ldap:chantal"]} )<block_end><def_stmt>test_child_objects_are_not_listed_in_permission_endpoint self<block_start>self.app.put("/buckets/write/collections/test/records/1" headers=self.alice_headers )<line_sep>resp=self.app.get("/permissions" headers=self.alice_headers)<line_sep>perms=resp.json["data"]<line_sep>self.assertEqual(sorted(p["uri"]<for>p perms) ["/" "/buckets/write" "/buckets/write/collections/test"] )<block_end><def_stmt>test_write_via_groups self<block_start>self.app.put_json("/buckets/viagroup" {"permissions":{"write":[self.principal]}} headers=self.headers )<line_sep>self.app.put_json("/buckets/viagroup/collections/c" {"permissions":{"write":["/buckets/viagroup/groups/editors"]}} headers=self.headers )<line_sep>self.app.put_json("/buckets/viagroup/groups/editors" {"data":{"members":[self.alice_principal]}} headers=self.headers )<line_sep>self.app.post_json("/buckets/viagroup/collections/c/records" {} headers=self.alice_headers )<block_end><block_end>
name='causalml'<line_sep>__version__='0.11.1'<line_sep>__all__=['dataset' 'features' 'feature_selection' 'inference' 'match' 'metrics' 'optimize' 'propensity']<line_sep>
""" extract indexes for alignment. """<import_stmt>argparse<import_stmt>glob<import_stmt>multiprocessing<import_from_stmt>functools partial<import_from_stmt>pathlib Path<import_from_stmt>pprint pprint<import_from_stmt>typing Tuple<import_stmt>librosa<import_stmt>numpy<import_stmt>tqdm<import_from_stmt>yukarin.acoustic_feature AcousticFeature<import_from_stmt>yukarin.align_indexes AlignIndexes<import_from_stmt>yukarin.param AcousticParam<import_from_stmt>yukarin.utility.json_utility save_arguments<line_sep>base_acoustic_param=AcousticParam()<line_sep>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--input_feature_glob1' '-if1')<line_sep>parser.add_argument('--input_feature_glob2' '-if2')<line_sep>parser.add_argument('--input_indexes' '-ii')<line_sep>parser.add_argument('--output' '-o' type=Path)<line_sep>parser.add_argument('--sampling_rate' type=int default=base_acoustic_param.sampling_rate)<line_sep>parser.add_argument('--frame_period' type=float default=base_acoustic_param.frame_period)<line_sep>parser.add_argument('--alpha' type=float default=base_acoustic_param.alpha)<line_sep>parser.add_argument('--disable_overwrite' action='store_true')<line_sep>arguments=parser.parse_args()<def_stmt>generate_aligned_wave pair_path:Tuple[Path Path Path] sampling_rate:int frame_period:float alpha:float <block_start>path_feature1,path_feature2,path_indexes=pair_path<if_stmt>path_feature1.stem<ne>path_feature2.stem<block_start>print('warning: the file names are different' path_feature1 path_feature2)<block_end><if_stmt>path_feature1.stem<ne>path_indexes.stem<block_start>print('warning: the file names are different' path_feature1 path_indexes)<block_end>out=Path(arguments.output path_indexes.stem+'.wav')<if_stmt>arguments.disable_overwrite<block_start><return><block_end>feature1=AcousticFeature.load(path=path_feature1)<line_sep>feature2=AcousticFeature.load(path=path_feature2)<line_sep>feature1.sp=AcousticFeature.mc2sp(feature1.mc sampling_rate=sampling_rate alpha=alpha)<line_sep>feature2.sp=AcousticFeature.mc2sp(feature2.mc sampling_rate=sampling_rate alpha=alpha)<line_sep>feature1.ap=AcousticFeature.decode_ap(feature1.coded_ap sampling_rate=sampling_rate)<line_sep>feature2.ap=AcousticFeature.decode_ap(feature2.coded_ap sampling_rate=sampling_rate)<line_sep>align_indexes=AlignIndexes.load(path=path_indexes)<line_sep>align_indexes.feature1=feature1<line_sep>align_indexes.feature2=feature2<line_sep>wave1=align_indexes.get_aligned_feature1().decode(sampling_rate=sampling_rate frame_period=frame_period)<line_sep>wave2=align_indexes.get_aligned_feature2().decode(sampling_rate=sampling_rate frame_period=frame_period)<line_sep># save y=numpy.vstack([wave1.wave wave2.wave])<line_sep>librosa.output.write_wav(str(out) y sr=sampling_rate)<block_end><def_stmt>main <block_start>pprint(vars(arguments))<line_sep>arguments.output.mkdir(exist_ok=<true>)<line_sep>save_arguments(arguments arguments.output/'arguments.json')<line_sep>path_feature1={Path(p).stem:Path(p)<for>p glob.glob(arguments.input_feature_glob1)}<line_sep>path_feature2={Path(p).stem:Path(p)<for>p glob.glob(arguments.input_feature_glob2)}<line_sep>path_indexes={Path(p).stem:Path(p)<for>p glob.glob(arguments.input_indexes)}<line_sep>fn_both_list=set(path_feature1.keys())&set(path_indexes.keys())<line_sep>pool=multiprocessing.Pool()<line_sep>generate=partial(generate_aligned_wave sampling_rate=arguments.sampling_rate frame_period=arguments.frame_period alpha=arguments.alpha )<line_sep>it=pool.imap(generate ((path_feature1[fn] path_feature2[fn] path_indexes[fn])<for>fn fn_both_list))<line_sep>list(tqdm.tqdm(it total=len(path_feature1)))<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
# Graphics / Primitives # Use the #graphics module to render and update shapes. # --- <import_from_stmt>h2o_wave site ui graphics<as>g<line_sep># Create some shapes arc=g.arc(r1=25 r2=50 a1=90 a2=180)<line_sep>circle=g.circle(cx=25 cy=25 r=25)<line_sep>ellipse=g.ellipse(cx=25 cy=25 rx=25 ry=20)<line_sep>image=g.image(width=50 height=50 href='https://www.python.org/static/community_logos/python-powered-h-140x182.png')<line_sep>line=g.line(x1=0 y1=0 x2=50 y2=50)<line_sep>path=g.path(d='M 0,0 L 50,50 L 50,0 L 0,50 z')<line_sep>path2=g.path(d=g.p().M(0 0).L(50 50).L(50 0).L(0 50).z().d())# same effect as above, but programmable. path3=g.p().M(0 0).L(50 50).L(50 0).L(0 50).z().path()# same effect as above, but a tad more concise. polygon=g.polygon(points='0,0 50,50 50,0 0,50')<line_sep>polyline=g.polyline(points='0,0 50,50 50,0 0,50')<line_sep>rect=g.rect(x=0 y=0 width=50 height=50)<line_sep>rounded_rect=g.rect(x=0 y=0 width=50 height=50 rx=10)<line_sep>text=g.text(x=0 y=48 text='Z' font_size='4em')<line_sep># Collect 'em all shapes=[arc circle ellipse image line path path2 path3 polygon polyline rect rounded_rect text]<line_sep># Apply fill/stroke for each shape <for_stmt>shape shapes<block_start>shape.fill='none'<if>g.type_of(shape)<eq>'polyline'<else>'crimson'<line_sep>shape.stroke='darkred'<line_sep>shape.stroke_width=5<block_end># Lay out shapes vertically y=10<for_stmt>shape shapes<block_start>shape.transform=f'translate(10,{y})'<line_sep>y<augadd>60<block_end># Add shapes to the graphics card page=site['/demo']<line_sep>page['example']=ui.graphics_card(box='1 1 1 10' view_box='0 0 70 800' width='100%' height='100%' stage=g.stage(arc=arc circle=circle ellipse=ellipse image=image line=line path=path path2=path2 path3=path3 polygon=polygon polyline=polyline rect=rect rounded_rect=rounded_rect text=text ) )<line_sep>page.save()<line_sep>
<import_stmt>demistomock<as>demisto<import_from_stmt>CommonServerPython *<import_stmt>urllib3<import_from_stmt>typing Any Dict<line_sep># Disable insecure warnings urllib3.disable_warnings()<class_stmt>Client(BaseClient)<block_start>@logger<def_stmt>__init__ self headers verify=<false> proxy=<false><block_start>url='https://api.cloudconvert.com/v2'<line_sep>super().__init__(url headers=headers verify=verify proxy=proxy)<block_end>@logger<def_stmt>upload_url self arguments:Dict[str Any]<arrow>Dict[str Any]<block_start>""" Upload the file given as url to the API's server, for later conversion. Note - this operation is called 'import' by the API. Args: arguments: dict containing the request arguments, should contain the field 'url' Returns: dict containing the results of the upload action as returned from the API (status, task ID, etc.) ``Dict[str, Any]`` """<line_sep><return>self._http_request(method='POST' url_suffix='import/url' data=arguments ok_codes=(200 201 422) )<block_end>@logger<def_stmt>upload_entry_id self file_path:str file_name:str<arrow>Dict[str Any]<block_start>""" Upload the file given as a war room entry id to the API's server, for later conversion Note - this operation is called 'import' by the API. Args: file_path: path to given file, derived from the entry id file_name: name of file, including format suffix Returns: dict containing the results of the upload action as returned from the API (status, task ID, etc.) ``Dict[str, Any]`` """<line_sep>response_get_form=self._http_request(method='POST' url_suffix='import/upload')<line_sep>form=dict_safe_get(response_get_form ('data' 'result' 'form') default_return_value={})<line_sep>port_url=form.get('url')<line_sep>params=form.get('parameters')<if_stmt>port_url<is><none><or>params<is><none><block_start><raise>ValueError('Failed to initiate an upload operation')<block_end>file_dict={'file':(file_name open(file_path 'rb'))}<line_sep>self._http_request(method='POST' url_suffix=<none> full_url=port_url files=file_dict empty_valid_codes=[201 204] return_empty_response=<true> data=params)<line_sep># As shown, this operation has two requests # The data about the operation is within the first request's response, # So in order to keep the operation's data, we should return the first request's response, # But first we should remove fields that are no longer true, such as ones that indicates that # The second request has not been done yet <if_stmt>response_get_form.get('data')<block_start>response_get_form.get('data').pop('message' <none>)<line_sep>response_get_form.get('data').pop('result' <none>)<block_end><return>response_get_form<block_end>@logger<def_stmt>convert self arguments:Dict[str Any]<arrow>Dict[str Any]<block_start>""" Convert a file to desired format, given the file was priorly uploaded to the API's server Args: arguments: dict containing the request arguments, should contain the fields 'task_id' and 'output_format' Returns: dict containing the results of the convert action as returned from the API (status, task ID, etc.) ``Dict[str, Any]`` """<line_sep>arguments['input']=arguments.pop('task_id')<line_sep><return>self._http_request(method='POST' url_suffix='convert' data=arguments ok_codes=(200 201 422) )<block_end><def_stmt>check_status self arguments:Dict[str Any]<arrow>Dict[str Any]<block_start>""" Check the status of a request sent to the API's server Args: arguments: dict containing the request arguments, should contain the field 'task_id' Returns: dict containing the results of the check status action as returned from the API (status, task ID, etc.) ``Dict[str, Any]`` """<line_sep>task_id=arguments.get('task_id')<line_sep><return>self._http_request(method='GET' url_suffix=f'/tasks/{task_id}' ok_codes=(200 201 422) )<block_end>@logger<def_stmt>download_url self arguments:Dict[str Any]<arrow>Dict[str Any]<block_start>""" Download a converted file to a url Note - this operation is called 'export' by the API. Args: arguments: dict containing the request arguments, should contain the field 'task_id' of the desired file Returns: dict containing the results of the download action as returned from the API (status, task ID, etc.) if the action was complete, the result url will be a part of this dict. If the request is pending, one should retrieve the url via the 'check_status' command ``Dict[str, Any]`` """<line_sep>arguments['input']=arguments.pop('task_id')<line_sep><return>self._http_request(method='POST' url_suffix='/export/url' data=arguments ok_codes=(200 201 422) )<block_end>@logger<def_stmt>get_file_from_url self url:str<block_start>""" Call a GET http request in order to get the file data given as url Args: url: url containing a file Returns: request response, containing the data of the file """<line_sep># Saving the headers of this client instance # The HTTP request that gets the file data needs to have no headers # Passing an empty dictionary to _http_request cause it to use this client's headers by default session_headers=self._headers<line_sep>self._headers={}<try_stmt><block_start>results=self._http_request(method='GET' url_suffix=<none> full_url=url headers={} resp_type='response' )<line_sep><return>results.content<block_end><finally_stmt><block_start>self._headers=session_headers<block_end><block_end><block_end>@logger<def_stmt>raise_error_if_no_data results:Dict[str Any]<block_start>""" This function checks if No 'data' field was returned from the request, meaning the input was invalid Args: results: a dict containing the request's results Returns: raises error if there is no 'data' field, with the matching error message returned from the server if no error message was given from the server, suggests the other optional errors """<if_stmt>results.get('data')<is><none><block_start><if_stmt>results.get('message')<block_start><raise>ValueError(results.get('message'))<block_end><else_stmt><block_start><raise>ValueError('No response from server, the server could be temporary unavailable or it is handling too '<concat>'many requests. Please try again later.')<block_end><block_end><block_end>@logger<def_stmt>upload_command client:Client arguments:Dict[str Any]<block_start>""" Upload a file to the API for later conversion Args: client: CloudConvert client to use arguments: All command arguments - either 'url' or 'entry_id'. Returns: CommandResults object containing the results of the upload action as returned from the API and its readable output """<if_stmt>arguments.get('url')<block_start><if_stmt>arguments.get('entry_id')<block_start><raise>ValueError('Both url and entry id were inserted - please insert only one.')<block_end>results=client.upload_url(arguments)<block_end><elif_stmt>arguments.get('entry_id')<block_start>demisto.debug('getting the path of the file from its entry id')<line_sep>result=demisto.getFilePath(arguments.get('entry_id'))<if_stmt><not>result<block_start><raise>ValueError('No file was found for given entry id')<block_end>file_path,file_name=result['path'] result['name']<line_sep>results=client.upload_entry_id(file_path file_name)<block_end><else_stmt><block_start><raise>ValueError('No url or entry id specified.')<block_end>raise_error_if_no_data(results)<line_sep>format_operation_title(results)<line_sep>results_data=results.get('data')<line_sep>readable_output=tableToMarkdown('Upload Results' remove_empty_elements(results_data) headers=('id' 'operation' 'created_at' 'status') headerTransform=string_to_table_header )<line_sep><return>CommandResults(readable_output=readable_output outputs_prefix='CloudConvert.Task' outputs_key_field='id' raw_response=results outputs=remove_empty_elements(results_data) )<block_end>@logger<def_stmt>convert_command client:Client arguments:Dict[str Any]<block_start>""" Convert a file that was priorly uploaded Args: client: CloudConvert client to use arguments: All command arguments, the fields 'task_id' and 'output_format' Returns: CommandResults object containing the results of the convert action as returned from the API and its readable output """<line_sep>results=client.convert(arguments)<line_sep>raise_error_if_no_data(results)<line_sep>results_data=results.get('data')<line_sep>readable_output=tableToMarkdown('Convert Results' remove_empty_elements(results_data) headers=('id' 'operation' 'created_at' 'status' 'depends_on_task_ids') headerTransform=string_to_table_header )<line_sep><return>CommandResults(readable_output=readable_output outputs_prefix='CloudConvert.Task' outputs_key_field='id' raw_response=results outputs=remove_empty_elements(results_data) )<block_end>@logger<def_stmt>check_status_command client:Client arguments:Dict[str Any]<block_start>""" Check status of an existing operation using it's task id Args: client: CloudConvert client to use arguments: All command arguments, the field 'task_id' Note: When the checked operation is 'download', the field 'create_war_room_entry' should be set according to the chosen download method, true if downloading as war room entry and false if not. This way a war room entry containing the file will be created if needed. Returns: CommandResults object containing the results of the check status action as returned from the API and its readable output OR if the argument create_war_room_entry is set to True, then a war room entry is also being created. """<line_sep>results=client.check_status(arguments)<line_sep>raise_error_if_no_data(results)<line_sep>format_operation_title(results)<line_sep>results_data=results.get('data' {})<line_sep># If checking on an download to entry operation, manually change the operation name # This is because the 'download as entry' operation is our variation on the export to url operation, # hence not distinguished as a different operation by the API <if_stmt>argToBoolean(arguments.get('create_war_room_entry' <false>))<and>results_data.get('operation')<eq>'download/url'<block_start>results['data']['operation']='download/entry'<block_end># Check if an download to war room entry operation is finished # If it did - create the entry <if_stmt>results_data.get('status')<eq>'finished'<and>argToBoolean(arguments.get('create_war_room_entry' 'False'))<and>results_data.get('operation')<eq>'download/entry'<block_start>modify_results_dict(results_data)<line_sep>url=results_data.get('url')<line_sep>file_name=results_data.get('file_name')<line_sep>file_data=client.get_file_from_url(url)<line_sep>war_room_file=fileResult(filename=file_name data=file_data file_type=entryTypes['entryInfoFile'])<line_sep>readable_output=tableToMarkdown('Check Status Results' remove_empty_elements(results_data) headers=('id' 'operation' 'created_at' 'status' 'depends_on_task_ids' 'file_name' 'url') headerTransform=string_to_table_header )<line_sep>return_results(CommandResults(outputs_prefix='CloudConvert.Task' outputs_key_field='id' raw_response=results readable_output=readable_output outputs=remove_empty_elements(results_data)))<line_sep><return>war_room_file<block_end><else_stmt><block_start>modify_results_dict(results_data)<line_sep>readable_output=tableToMarkdown('Check Status Results' remove_empty_elements(results_data) headers=('id' 'operation' 'created_at' 'status' 'depends_on_task_ids' 'file_name' 'url') headerTransform=string_to_table_header )<line_sep><return>CommandResults(readable_output=readable_output outputs_prefix='CloudConvert.Task' outputs_key_field='id' raw_response=results outputs=remove_empty_elements(results_data) )<block_end><block_end><def_stmt>modify_results_dict results_data:Dict[str Any]<block_start>""" The results of the specific file converted/uploaded/downloaded are sub-values of some keys, so parse the results field to the outer scope of the dict Args: results_data: the dict under the 'data' field in the response's results """<if_stmt>results_data.get('result')<block_start>results_info=results_data.get('result' {}).get('files')<if_stmt>results_info<block_start>results_data['file_name']=results_info[0].get('filename')<line_sep>results_data['url']=results_info[0].get('url')<line_sep>results_data['size']=results_info[0].get('size')<block_end><block_end><block_end>@logger<def_stmt>download_command client:Client arguments:Dict[str Any]<block_start>""" Download a converted file back to the user, either as a url or directly as a war room entry Note: in order to get the resulted url/entry of the file you need to use a check-status command as well, since the response of the download command is usually responded before the file is fully downloaded (hence the 'status' field is 'waiting', and not 'finished') Args: client: CloudConvert client to use arguments: All command arguments, the fields 'task_id', and 'download_as' (url/war_room_entry) Returns: CommandResults object containing the results of the download action as returned from the API, and its readable """<line_sep># Call download as url request # In both url and war room entry we still first get a url results=client.download_url(arguments)<line_sep>raise_error_if_no_data(results)<line_sep># If downloading as war room entry, manually change the operation name # This is because the 'download as entry' operation is our variation on the export to url operation, # hence not distinguished as a different operation by the API <if_stmt>arguments['download_as']<eq>'war_room_entry'<block_start>results['data']['operation']='download/entry'<block_end><else_stmt><block_start>format_operation_title(results)<block_end>results_data=results.get('data')<line_sep>readable_output=tableToMarkdown('Download Results' remove_empty_elements(results_data) headers=('id' 'operation' 'created_at' 'status' 'depends_on_task_ids') headerTransform=string_to_table_header )<line_sep><return>CommandResults(readable_output=readable_output outputs_prefix='CloudConvert.Task' outputs_key_field='id' raw_response=results outputs=remove_empty_elements(results_data) )<block_end><def_stmt>test_module client:Client<block_start>""" Returning 'ok' indicates that the integration works like it suppose to. Connection to the service is successful. Args: client: CloudConvert client Returns: 'ok' if test passed, anything else will fail the test """<line_sep>dummy_url='https://raw.githubusercontent.com/demisto/content/master/TestData/pdfworking.pdf'<line_sep>result=client.upload_url({'url':dummy_url})<if_stmt>result.get('data')<block_start><return>'ok'<block_end><elif_stmt>result.get('message')<eq>"Unauthenticated."<block_start><return>'Authorization Error: make sure API Key is correctly set'<block_end><elif_stmt>result.get('message')<block_start><return>result.get('message')<block_end><else_stmt><block_start><return>'No response from server, the server could be temporary unavailable or it is handling too '<concat>'many requests. Please try again later.'<block_end><block_end><def_stmt>format_operation_title results:Dict[str Any]<block_start>""" This function is being used in order to change the titles of the operations that are done by the API and are returned in the response to titles that makes more sense for the users actions, and matches the API's use in our system. Args: results: The response from the http request """<line_sep>title_exchange_dict={'import/url':'upload/url' 'import/upload':'upload/entry' 'export/url':'download/url'}<line_sep>operation=results['data']['operation']<line_sep>results['data']['operation']=title_exchange_dict[operation]<if>operation<in>title_exchange_dict.keys()<else>operation<block_end><def_stmt>main <arrow><none><block_start><try_stmt><block_start>command=demisto.command()<line_sep>params=demisto.params()<line_sep>api_key=params.get('apikey')<line_sep>verify=<not>params.get('insecure' <false>)<line_sep>proxy=params.get('proxy' <false>)<line_sep>headers={'Authorization':f'Bearer {api_key}'}<line_sep>client=Client(headers verify proxy)<if_stmt>command<eq>'cloudconvert-upload'<block_start>return_results(upload_command(client demisto.args()))<block_end><elif_stmt>command<eq>'cloudconvert-convert'<block_start>return_results(convert_command(client demisto.args()))<block_end><elif_stmt>command<eq>'cloudconvert-check-status'<block_start>return_results(check_status_command(client demisto.args()))<block_end><elif_stmt>command<eq>'cloudconvert-download'<block_start>return_results(download_command(client demisto.args()))<block_end><elif_stmt>command<eq>'test-module'<block_start>return_results(test_module(client))<block_end><block_end><except_stmt>Exception<as>e<block_start>err_msg='Task id not found or expired'<if>'No query results for model'<in>str(e)<else>('No more conversion minutes for today for this user'<if>'Payment Required'<in>str(e)<else>str(e))<line_sep>return_error(f'Failed to execute {command} command. Error: {err_msg}' error=traceback.format_exc())<block_end><block_end><if_stmt>__name__<in>('__main__' '__builtin__' 'builtins')<block_start>main()<block_end>
# Generated by Django 1.11 on 2018-04-25 18:29 <import_from_stmt>django.db migrations<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('server' '0069_remove_machine_install_log') ]<line_sep>operations=[migrations.RemoveField(model_name='machine' name='install_log_hash' ) ]<block_end>
<import_stmt>hashlib<import_stmt>os<import_stmt>shutil<import_stmt>tempfile<import_from_stmt>unittest TestCase<import_from_stmt>unittest.mock patch<import_from_stmt>samcli.lib.utils.hash dir_checksum str_checksum<class_stmt>TestHash(TestCase)<block_start><def_stmt>setUp self<block_start>self.temp_dir=tempfile.mkdtemp()<block_end><def_stmt>tearDown self<block_start>shutil.rmtree(self.temp_dir ignore_errors=<true>)<block_end><def_stmt>test_dir_hash_independent_of_location self<block_start>temp_dir1=os.path.join(self.temp_dir "temp-dir-1")<line_sep>os.mkdir(temp_dir1)<with_stmt>open(os.path.join(temp_dir1 "test-file") "w+")<as>f<block_start>f.write("Testfile")<block_end>checksum1=dir_checksum(temp_dir1)<line_sep>temp_dir2=shutil.move(temp_dir1 os.path.join(self.temp_dir "temp-dir-2"))<line_sep>checksum2=dir_checksum(temp_dir2)<line_sep>self.assertEqual(checksum1 checksum2)<block_end><def_stmt>test_dir_hash_independent_of_file_order self<block_start>file1=tempfile.NamedTemporaryFile(delete=<false> dir=self.temp_dir)<line_sep>file1.write(b"Testfile")<line_sep>file1.close()<line_sep>file2=tempfile.NamedTemporaryFile(delete=<false> dir=self.temp_dir)<line_sep>file2.write(b"Testfile")<line_sep>file2.close()<line_sep>dir_checksums={}<with_stmt>patch("os.walk")<as>mockwalk<block_start>mockwalk.return_value=[(self.temp_dir [] [file1.name file2.name ] ) ]<line_sep>dir_checksums["first"]=dir_checksum(self.temp_dir)<block_end><with_stmt>patch("os.walk")<as>mockwalk<block_start>mockwalk.return_value=[(self.temp_dir [] [file2.name file1.name ] ) ]<line_sep>dir_checksums["second"]=dir_checksum(self.temp_dir)<block_end>self.assertEqual(dir_checksums["first"] dir_checksums["second"])<block_end><def_stmt>test_dir_hash_same_contents_diff_file_per_directory self<block_start>_file=tempfile.NamedTemporaryFile(delete=<false> dir=self.temp_dir)<line_sep>_file.write(b"Testfile")<line_sep>_file.close()<line_sep>checksum_before=dir_checksum(os.path.dirname(_file.name))<line_sep>shutil.move(os.path.abspath(_file.name) os.path.join(os.path.dirname(_file.name) "different_name"))<line_sep>checksum_after=dir_checksum(os.path.dirname(_file.name))<line_sep>self.assertNotEqual(checksum_before checksum_after)<block_end><def_stmt>test_dir_hash_with_ignore_list self<block_start>_file=tempfile.NamedTemporaryFile(delete=<false> dir=self.temp_dir)<line_sep>_file.write(b"Testfile")<line_sep>_file.close()<line_sep>dir_path=os.path.dirname(_file.name)<line_sep>checksum_before=dir_checksum(dir_path)<line_sep># add a file to .aws-sam/ aws_sam_dir_path=os.path.join(dir_path ".aws-sam")<line_sep>os.mkdir(aws_sam_dir_path)<line_sep>_new_file=tempfile.NamedTemporaryFile(delete=<false> dir=aws_sam_dir_path)<line_sep>_new_file.write(b"dummy")<line_sep>_new_file.close()<line_sep>checksum_after=dir_checksum(os.path.dirname(_file.name))<line_sep>self.assertNotEqual(checksum_before checksum_after)<line_sep>checksum_after_with_ignore_list=dir_checksum(os.path.dirname(_file.name) ignore_list=[".aws-sam"])<line_sep>self.assertEqual(checksum_before checksum_after_with_ignore_list)<block_end><def_stmt>test_hashing_method self<block_start>_file=tempfile.NamedTemporaryFile(delete=<false> dir=self.temp_dir)<line_sep>_file.write(b"Testfile")<line_sep>_file.close()<line_sep>checksum_sha256=dir_checksum(os.path.dirname(_file.name) hash_generator=hashlib.sha256())<line_sep>checksum_md5=dir_checksum(os.path.dirname(_file.name) hashlib.md5())<line_sep>checksum_default=dir_checksum(os.path.dirname(_file.name))<line_sep>self.assertEqual(checksum_default checksum_md5)<line_sep>self.assertNotEqual(checksum_md5 checksum_sha256)<block_end><def_stmt>test_dir_cyclic_links self<block_start>_file=tempfile.NamedTemporaryFile(delete=<false> dir=self.temp_dir)<line_sep>_file.write(b"Testfile")<line_sep>_file.close()<line_sep>os.symlink(os.path.abspath(_file.name) os.path.join(os.path.dirname(_file.name) "symlink"))<line_sep>os.symlink(os.path.join(os.path.dirname(_file.name) "symlink") os.path.join(os.path.dirname(_file.name) "symlink2"))<line_sep>os.unlink(os.path.abspath(_file.name))<line_sep>os.symlink(os.path.join(os.path.dirname(_file.name) "symlink2") os.path.abspath(_file.name))<with_stmt>self.assertRaises(OSError)<as>ex<block_start>dir_checksum(os.path.dirname(_file.name))<line_sep>self.assertIn("Too many levels of symbolic links" ex.message)<block_end><block_end><def_stmt>test_str_checksum self<block_start>checksum=str_checksum("Hello, World!")<line_sep>self.assertEqual(checksum "65a8e27d8879283831b664bd8b7f0ad4")<block_end><block_end>
# filter wizard # no-frills tool for quickly filtering docked compounds, etc. <import_stmt>os sys<import_from_stmt>pymol.wizard Wizard<import_from_stmt>pymol cmd<import_stmt>traceback<line_sep># global dictionary for saving result on a per-object basis static_dict={}<line_sep># last/current object being filtered default_object=<none><line_sep># browing mode default_browse=1<line_sep>accept_str="Accept"<line_sep>defer_str="Defer"<line_sep>reject_str="Reject"<line_sep># class definition (class name must match wizard name with cap) <class_stmt>Filter(Wizard)<block_start><def_stmt>migrate_session self version<block_start><if_stmt>version<ge>self.cmd.get_version()[2]<block_start><return><block_end># remap old "title" state identifiers to new identifiers # (changed in 1.7.0.0) <for_stmt>object,sdo self.dict.items()<block_start><if_stmt><not>sdo<block_start><continue><block_end># build title -> ident mapping tota=self.cmd.count_states(object)<line_sep>title2ident=dict((self.cmd.get_title(object state) self.get_ident(object state))<for>state range(1 tota+1))<line_sep># build remapped sdo <try_stmt><block_start>new_sdo={}<for_stmt>title,value sdo.items()<block_start>new_sdo[title2ident[title]]=value<block_end>sdo.clear()<line_sep>sdo.update(new_sdo)<block_end><except_stmt>KeyError# lookup by title failed, we can assume that this instance # already uses new identifiers <block_start><return><block_end><block_end>self.load_state_dict()<block_end><def_stmt>__init__ self _self=cmd# initialize parent class <block_start>Wizard.__init__(self _self)<line_sep>self.update_object_menu()<line_sep># restore previous state from global storage self.dict=static_dict<line_sep>self.object=default_object<if>default_object<in>self.avail_objects<else><none><line_sep>self.browse=default_browse<line_sep>self.state_dict={}<line_sep># if we don't have a current object, choose the first multi-state object <if_stmt><not>self.object<and>self.avail_objects<block_start>self.object=self.avail_objects[0]<block_end># menu for self.menu['browse']=[[2 'Browse Mode' ''] [1 'Browse All' 'cmd.get_wizard().set_browse(1)'] [1 'Browse Accepted' 'cmd.get_wizard().set_browse(2)'] [1 'Browse Rejected' 'cmd.get_wizard().set_browse(3)'] [1 'Browse Deferred' 'cmd.get_wizard().set_browse(4)'] [1 'Browse Remaining' 'cmd.get_wizard().set_browse(5)'] ]<line_sep>self.menu['create']=[[2 'Create Filtered Object' ''] [1 'Accepted' 'cmd.get_wizard().create_object("Accept")'] [1 'Rejected' 'cmd.get_wizard().create_object("Reject")'] [1 'Deferred' 'cmd.get_wizard().create_object("Defer")'] ]<line_sep>self.count_object()<line_sep>self.load_state_dict()<line_sep>self.update_object_menu()<line_sep>cmd.set_key('F1' <lambda>s=self:s.accept())<line_sep>cmd.set_key('F2' <lambda>s=self:s.reject())<line_sep>cmd.set_key('F3' <lambda>s=self:s.defer())<line_sep>cmd.set_key('right' <lambda>s=self:s.forward())<line_sep>cmd.set_key('left' <lambda>s=self:s.backward())<block_end><def_stmt>do_select self name<block_start><try_stmt><block_start>obj_name=cmd.index('first ?'+name)[0][0]<line_sep>self.set_object(obj_name)<block_end><except_stmt><block_start><pass><block_end>self.cmd.deselect()<block_end><def_stmt>do_pick self bondFlag<block_start>self.do_select('pk1')<line_sep>self.cmd.unpick()<block_end><def_stmt>do_state self state<block_start>cmd.refresh_wizard()<block_end><def_stmt>get_event_mask self<block_start><return>Wizard.event_mask_pick+Wizard.event_mask_select+Wizard.event_mask_state<block_end><def_stmt>update_object_menu self# find objects with > 1 state <block_start>self.avail_objects=[]<for_stmt>a cmd.get_names('objects')<block_start><if_stmt>cmd.get_type(a)<eq>'object:molecule'<block_start><if_stmt>cmd.count_states(a)<g>1<block_start>self.avail_objects.append(a)<block_end><block_end><block_end># now create the object menu self.menu['object']=[[2 'Select Object' '']]<for_stmt>a self.avail_objects<block_start>self.menu['object'].append([1 a 'cmd.get_wizard().set_object("%s")'%(a)])<block_end>self.menu['object'].append([1 'None' 'cmd.get_wizard().set_object(None)'])<block_end><def_stmt>set_browse self browse# allow user to focus on only a subset of the compounds <block_start>self.browse=browse<if_stmt>self.browse<eq>1<block_start>print(" Filter: Browsing all compounds.")<line_sep>cmd.mset()# all states visible <block_end><elif_stmt>self.object<is><none><block_start>print(" Filter-Error: please choose an object first")<block_end><else_stmt><block_start>self.check_object_dict()<if_stmt>self.browse<eq>2<block_start>print(" Filter: Browsing accepted compounds.")<line_sep>target=accept_str<block_end><elif_stmt>self.browse<eq>3<block_start>print(" Filter: Browsing rejected compounds.")<line_sep>target=reject_str<block_end><elif_stmt>self.browse<eq>4<block_start>print(" Filter: Browsing deferred compounds.")<line_sep>target=defer_str<block_end>lst=[]<line_sep>sd=self.state_dict<line_sep>sdo=self.dict[self.object]<if_stmt>self.browse<l>5<block_start><for_stmt>a list(sdo.keys())<block_start><if_stmt>sdo[a]<eq>target<block_start>lst.append(sd[a])<block_end><block_end><block_end><else_stmt><block_start>print(" Filter: Browsing remaining compounds")<for_stmt>a sd.keys()<block_start><if_stmt>a<not><in>sdo<block_start>lst.append(sd[a])<block_end><block_end><block_end>lst.sort()<if_stmt>len(lst)<eq>0<block_start>print(" Filter-Error: No matching compounds.")<block_end>cmd.mset(' '.join(map(str lst)))<line_sep>cmd.rewind()<block_end>cmd.refresh_wizard()<block_end><def_stmt>check_object_dict self# make sure we have a valid entry for this object in our dictionary <block_start><if_stmt>self.object<not><in>self.dict<block_start>self.dict[self.object]={}<block_end><block_end># create dictionary to store results <def_stmt>adjust self decision inc# utility routine to increment/decrement counters <block_start><if_stmt>decision<eq>accept_str<block_start>self.acce=self.acce+inc<block_end><elif_stmt>decision<eq>reject_str<block_start>self.reje=self.reje+inc<block_end><elif_stmt>decision<eq>defer_str<block_start>self.defe=self.defe+inc<block_end><block_end><def_stmt>load_state_dict self# establish relationship between names and states # ASSUMPTION: identifiers will be unique <block_start>self.state_dict={}<line_sep>sd=self.state_dict<line_sep>so=self.object<if_stmt>so<is><not><none><block_start>cnt=cmd.count_states(so)<for_stmt>a range(1 cnt+1)<block_start>sd[self.get_ident(so a)]=a<block_end><block_end><block_end><def_stmt>count_object self# record how many molecular are in an object, etc. <block_start>self.check_object_dict()<if_stmt>self.object<is><not><none><block_start>self.acce=0<line_sep>self.reje=0<line_sep>self.defe=0<line_sep>self.togo=0<line_sep>self.tota=cmd.count_states(self.object)<line_sep>sdo=self.dict[self.object]<line_sep>self.togo=self.tota-len(sdo)<for_stmt>a list(sdo.keys())<block_start>dec=sdo[a]<line_sep>self.adjust(dec 1)<block_end><block_end><block_end><def_stmt>set_object self obj_name<block_start>self.object=obj_name<line_sep>self.count_object()<line_sep>self.load_state_dict()<line_sep>cmd.refresh_wizard()<block_end><def_stmt>get_panel self# returns Wizard panel for PyMOL to display # 1 = title/text # 2 = button # 3 = pop-up menu <block_start>self.update_object_menu()<if_stmt>self.object<is><not><none><block_start>save_str='Save %s.txt'%self.object<block_end><else_stmt><block_start>save_str=""<block_end><return>[[1 'Filtering Wizard' ''] [3 self.menu['browse'][self.browse][1] 'browse'] [3 'Object: %s'%(self.object) 'object'] [2 'Accept (F1)' 'cmd.get_wizard().accept()'] [2 'Reject (F2)' 'cmd.get_wizard().reject()'] [2 'Defer (F3)' 'cmd.get_wizard().defer()'] [2 'Forward (->)' 'cmd.get_wizard().forward()'] [2 'Back (<-)' 'cmd.get_wizard().backward()'] [3 'Create Filtered Object' 'create'] [2 save_str 'cmd.get_wizard().save()'] [2 'Refresh' 'cmd.refresh_wizard()'] [2 'Done' 'cmd.set_wizard()'] ]<block_end><def_stmt>get_ident self object state<block_start><return>'%d/%d %s'%(state self.tota self.cmd.get_title(self.object state))<block_end><def_stmt>get_prompt self# returns text prompt <block_start>self.prompt=<none><if_stmt>self.object<is><none><block_start>self.prompt=['Please select a multi-state object...']<block_end><else_stmt><block_start>self.prompt=['%s: %d accepted, %d rejected, %d deferred, %d remaining'%(self.object self.acce self.reje self.defe self.togo)]<line_sep>state=cmd.get_object_state(self.object)<line_sep>ident=self.get_ident(self.object state)<line_sep>sdo=self.dict[self.object]<if_stmt>ident<in>sdo<block_start>self.prompt.append('%s: %s'%(ident sdo[ident]))<block_end><else_stmt><block_start>self.prompt.append('%s?'%(ident))<block_end><block_end><return>self.prompt<block_end><def_stmt>count self entry str# keep track of how many compounds are in which category <block_start>self.check_object_dict()<line_sep>sdo=self.dict[self.object]<if_stmt>entry<in>sdo<block_start>self.adjust(sdo[entry] -1)<block_end><else_stmt><block_start>self.togo=self.togo-1<block_end>sdo[entry]=str<line_sep>self.adjust(sdo[entry] 1)<block_end><def_stmt>accept self# accept compound and advance <block_start><if_stmt>self.object<is><none><block_start>print(" Filter-Error: Please choose an object first")<block_end><else_stmt><block_start>state=cmd.get_object_state(self.object)<line_sep>ident=self.get_ident(self.object state)<line_sep>print(" Filter: Accepting '%s'"%ident)<line_sep>self.count(ident accept_str)<block_end>cmd.forward()<line_sep>cmd.refresh_wizard()<block_end><def_stmt>reject self# reject compound and advance <block_start><if_stmt>self.object<is><none><block_start>print(" Filter-Error: Please choose an object first")<block_end><else_stmt><block_start>state=cmd.get_object_state(self.object)<line_sep>ident=self.get_ident(self.object state)<line_sep>print(" Filter: Rejecting '%s'"%ident)<line_sep>self.check_object_dict()<line_sep>self.count(ident reject_str)<block_end>cmd.forward()<line_sep>cmd.refresh_wizard()<block_end><def_stmt>defer self# defer compound and advance <block_start><if_stmt>self.object<is><none><block_start>print(" Filter-Error: Please choose an object first")<block_end><else_stmt><block_start>state=cmd.get_object_state(self.object)<line_sep>ident=self.get_ident(self.object state)<line_sep>print(" Filter: Deferring '%s'"%ident)<line_sep>self.check_object_dict()<line_sep>self.count(ident defer_str)<block_end>cmd.forward()<line_sep>cmd.refresh_wizard()<block_end><def_stmt>forward self# go forward and update information <block_start>cmd.forward()<line_sep>cmd.refresh_wizard()<block_end><def_stmt>backward self# go backward and update information <block_start>cmd.backward()<line_sep>cmd.refresh_wizard()<block_end><def_stmt>create_object self what='Accept'<block_start><if_stmt><not>self.object<block_start>print(" Filter-Error: Please choose an object first")<line_sep><return><block_end>name=self.cmd.get_unused_name(self.object+'_'+what 0)<line_sep>sdo=self.dict[self.object]<line_sep>lst=[self.state_dict[ident]<for>(ident w) sdo.items()<if>w<eq>what]<for_stmt>state sorted(lst)<block_start>self.cmd.create(name self.object state -1)<block_end><block_end><def_stmt>save self# write compounds to a file <block_start><if_stmt>self.object<is><none><block_start>print(" Filter-Error: please choose an object first")<block_end><else_stmt><block_start>self.check_object_dict()<line_sep>fname=self.object+".txt"<try_stmt><block_start>f=open(fname 'w')<line_sep>f.close()<block_end><except_stmt><block_start>print(" Filter-Warning: '"+fname+"' in current directory is not writable.")<line_sep>print(" Filter-Warning: attempting to write in home directory.")<line_sep>fname=cmd.exp_path(os.path.join('~' fname))<block_end><try_stmt><block_start>f=open(fname 'w')<line_sep>sd=self.state_dict<line_sep>sdo=self.dict[self.object]<line_sep>f.write('Object\t"%s"\n'%(self.object))<line_sep>f.write('Total\t%d\nAccepted\t%d\nRejected\t%d\nDeferred\t%d\nRemaining\t%d\n\n'%(self.tota self.acce self.reje self.defe self.togo))<line_sep># sort output in order of states lst=[]<for_stmt>a sd.keys()<block_start>lst.append((sd[a] a))<block_end>lst.sort()<line_sep># write list with decisions <for_stmt>a lst<block_start><if_stmt>a[1]<in>sdo<block_start>f.write('%d\t"%s"\t"%s"\n'%(a[0] a[1] sdo[a[1]]))<block_end><else_stmt><block_start>f.write('%d\t"%s"\t"?"\n'%(a[0] a[1]))<block_end><block_end>f.close()<line_sep>print(" Filter: Wrote '%s'."%fname)<block_end><except_stmt><block_start>traceback.print_exc()<line_sep>print(" Filter-Error: Unable to write '%s'."%fname)<block_end><block_end><block_end><def_stmt>cleanup self# save current state in global vars... <block_start><global>default_object default_browse static_dict<line_sep>default_object=self.object<line_sep>default_browse=self.browse<line_sep>static_dict=self.dict<line_sep># restore key actions cmd.set_key('F1' <none>)<line_sep>cmd.set_key('F2' <none>)<line_sep>cmd.set_key('F3' <none>)<line_sep>cmd.set_key('right' cmd.forward)<line_sep>cmd.set_key('left' cmd.backward)<block_end><block_end>
<import_from_stmt>.. haven_utils<import_from_stmt>.. haven_results<as>hr<import_from_stmt>.. haven_utils<as>hu<import_from_stmt>.. haven_share<as>hd<import_stmt>os<import_stmt>pprint<import_stmt>json<import_stmt>copy<import_stmt>pprint<import_stmt>pandas<as>pd<import_from_stmt>. widgets<as>wdg<try_stmt><block_start><import_stmt>ast<import_from_stmt>ipywidgets Button HBox VBox<import_from_stmt>ipywidgets widgets<import_from_stmt>IPython.display display<import_from_stmt>IPython.core.display Javascript display HTML<import_from_stmt>IPython.display FileLink FileLinks<import_from_stmt>ipywidgets.widgets.interaction show_inline_matplotlib_plots<block_end><except_stmt>Exception<block_start>print("widgets not available...")<block_end><def_stmt>images_tab self output<block_start>db=self<if_stmt>db.vars.get("legend_list")<is><none><block_start>db.vars["legend_list"]=hu.get_diff_hparam(db.rm.exp_list)<block_end>w_legend=wdg.SelectMultiple(header="Legend:" options=db.rm.exp_params db_vars=db.vars var="legend_list")<line_sep>w_n_exps=wdg.Text("n_exps:" default="3" type="int" db_vars=db.vars var="n_exps")<line_sep>w_n_images=wdg.Text("n_images:" default="5" type="int" db_vars=db.vars var="n_images")<line_sep>w_figsize=wdg.Text("figsize:" default="(10,5)" type="tuple" db_vars=db.vars var="figsize")<line_sep>w_dirname=wdg.Text("dirname:" default="images" type="str" db_vars=db.vars var="dirname")<line_sep>bdownload=widgets.Button(description="Download Images" layout=self.layout_button)<line_sep>bdownload_out=widgets.Output(layout=self.layout_button)<line_sep>bdownload_zip=widgets.Button(description="Download Images zipped" layout=self.layout_button)<line_sep>bdownload_zip_out=widgets.Output(layout=self.layout_button)<line_sep>brefresh=widgets.Button(description="Display Images")<line_sep>button=widgets.VBox([widgets.HBox([w_legend.get_widget() w_n_exps.get_widget() w_n_images.get_widget() w_figsize.get_widget() w_dirname.get_widget() ]) widgets.HBox([brefresh bdownload bdownload_out bdownload_zip bdownload_zip_out]) ])<line_sep>output_plot=widgets.Output()<with_stmt>output<block_start>display(button)<line_sep>display(output_plot)<block_end><def_stmt>on_clicked b<block_start>output_plot.clear_output()<with_stmt>output_plot<block_start>self.update_rm()<line_sep>self.rm_original.fig_image_list=self.rm.get_images(legend_list=w_legend.update() n_images=w_n_images.update() n_exps=w_n_exps.update() figsize=w_figsize.update() dirname=w_dirname.update() )<line_sep>show_inline_matplotlib_plots()<block_end><block_end>brefresh.on_click(on_clicked)<def_stmt>on_download_clicked b<block_start>fname="images"<import_from_stmt>matplotlib.backends.backend_pdf PdfPages<import_stmt>matplotlib.pyplot<as>plt<line_sep>pp=PdfPages(fname)<for_stmt>fig self.rm_original.fig_image_list<block_start>fig.savefig(pp format="pdf")<block_end>pp.close()<line_sep>bdownload_out.clear_output()<with_stmt>bdownload_out<block_start>display(FileLink(fname result_html_prefix="Download: "))<block_end><block_end><def_stmt>on_download_clicked_zip b<block_start>fname="results.zip"<line_sep>bdownload_zip_out.clear_output()<with_stmt>bdownload_zip_out<block_start><import_stmt>zipfile glob<line_sep>exp_id_list=[hu.hash_dict(exp_dict)<for>exp_dict self.rm.exp_list]<line_sep>zipf=zipfile.ZipFile(fname "w" zipfile.ZIP_DEFLATED)<for_stmt>exp_id exp_id_list<block_start>abs_path_list=glob.glob(os.path.join(self.rm.savedir_base exp_id "images" "*"))<for_stmt>abs_path abs_path_list# weq <block_start>iname=os.path.split(abs_path)[-1]<line_sep>rel_path=f"{exp_id}_{iname}"<line_sep>zipf.write(abs_path rel_path)<block_end><block_end>zipf.close()<line_sep># self.rm.to_zip(savedir_base="", fname=fname, fname_list=self.vars["fname_list"]) <block_end>bdownload_zip_out.clear_output()<with_stmt>bdownload_zip_out<block_start>display("%d exps zipped."%len(self.rm.exp_list))<block_end>display(FileLink(fname result_html_prefix="Download: "))<block_end>bdownload.on_click(on_download_clicked)<line_sep>bdownload_zip.on_click(on_download_clicked_zip)<block_end>
"""Cutting utilities working with audio."""<import_stmt>numpy<as>np<def_stmt>find_audio_period clip min_time=0.1 max_time=2 time_resolution=0.01<block_start>"""Finds the period, in seconds of an audioclip. Parameters ---------- min_time : float, optional Minimum bound for the returned value. max_time : float, optional Maximum bound for the returned value. time_resolution : float, optional Numerical precision. """<line_sep>chunksize=int(time_resolution<times>clip.fps)<line_sep>chunk_duration=1.0<times>chunksize/clip.fps<line_sep># v denotes the list of volumes v=np.array([(chunk<power>2).sum()<for>chunk clip.iter_chunks(chunksize)])<line_sep>v=v-v.mean()<line_sep>corrs=np.correlate(v v mode="full")[-len(v):]<line_sep>corrs[:int(min_time/chunk_duration)]=0<line_sep>corrs[int(max_time/chunk_duration):]=0<line_sep><return>chunk_duration<times>np.argmax(corrs)<block_end>
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) <import_stmt>os<import_from_stmt>spack *<class_stmt>Kadath(CMakePackage)<block_start>"""KADATH SPECTRAL SOLVER. The Frankfurt University/Kadath (FUKA) Initial Data solver branch is a collection of ID solvers aimed at delivering consistent initial data (ID) solutions to the eXtended Conformal Thin-Sandwich (XCTS) formulation of Einstein's field equations for a variety of compact object configurations to include extremely compact, asymmetric, and mixed spin binaries. """<line_sep>homepage="https://kadath.obspm.fr/fuka/"<line_sep>git="https://gitlab.obspm.fr/grandcle/Kadath.git"<line_sep>maintainers=['eschnett']<line_sep>version('fuka' branch='fuka')<line_sep>variant('mpi' default=<true> description='Enable MPI support')<line_sep>variant('codes' multi=<true> description="Codes to enable" values=('none' 'BBH' 'BH' 'BHNS' 'BNS' 'NS') default='none')<line_sep>depends_on('blas')<line_sep>depends_on('boost cxxstd=17')# kadath uses std=C++17 depends_on('cmake @2.8:' type='build')<line_sep>depends_on('fftw-api @3:')<line_sep>depends_on('gsl')<line_sep>depends_on('lapack')<line_sep>depends_on('mpi' when='+mpi')<line_sep>depends_on('pgplot')<line_sep>depends_on('scalapack')<line_sep>root_cmakelists_dir='build_release'<def_stmt>patch self<block_start><for_stmt>code self.spec.variants['codes'].value<block_start><if_stmt>code<ne>'none'# Disable unwanted explicit include directory settings <block_start>filter_file(r"include_directories\(/usr" "# include_directories(/usr" join_path("codes" code "CMakeLists.txt"))<block_end><block_end><block_end><def_stmt>setup_build_environment self env<block_start>env.set('HOME_KADATH' self.stage.source_path)<block_end><def_stmt>cmake_args self<block_start><return>[# kadath uses a non-standard option to enable MPI self.define_from_variant('PAR_VERSION' 'mpi') ]<block_end><def_stmt>cmake self spec prefix<block_start>options=self.std_cmake_args<line_sep>options<augadd>self.cmake_args()<line_sep>options.append(os.path.abspath(self.root_cmakelists_dir))<with_stmt>working_dir(self.build_directory create=<true>)<block_start>cmake(*options)<block_end><for_stmt>code self.spec.variants['codes'].value<block_start><if_stmt>code<ne>'none'<block_start><with_stmt>working_dir(join_path("codes" code))<block_start>cmake(*options)<block_end><block_end><block_end><block_end><def_stmt>build self spec prefix<block_start><with_stmt>working_dir(self.build_directory)<block_start>make(*self.build_targets)<block_end><for_stmt>code self.spec.variants['codes'].value<block_start><if_stmt>code<ne>'none'<block_start><with_stmt>working_dir(join_path("codes" code))<block_start>make(*self.build_targets)<block_end><block_end><block_end><block_end><def_stmt>install self spec prefix<block_start>mkdirp(prefix.include)<line_sep>install_tree('include' prefix.include)<line_sep>mkdirp(prefix.lib)<line_sep>install_tree('lib' prefix.lib)<block_end><block_end>
# test_refs.py -- tests for refs.py # encoding: utf-8 # Copyright (C) 2013 <NAME> <<EMAIL>> # # Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU # General Public License as public by the Free Software Foundation; version 2.0 # or (at your option) any later version. You can redistribute it and/or # modify it under the terms of either of these two licenses. # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # You should have received a copy of the licenses; if not, see # <http://www.gnu.org/licenses/> for a copy of the GNU General Public License # and <http://www.apache.org/licenses/LICENSE-2.0> for a copy of the Apache # License, Version 2.0. # """Tests for dulwich.refs."""<import_from_stmt>io BytesIO<import_stmt>os<import_stmt>sys<import_stmt>tempfile<import_from_stmt>dulwich errors<import_from_stmt>dulwich.file GitFile <import_from_stmt>dulwich.objects ZERO_SHA<import_from_stmt>dulwich.refs DictRefsContainer InfoRefsContainer check_ref_format _split_ref_line parse_symref_value read_packed_refs_with_peeled read_packed_refs strip_peeled_refs write_packed_refs <import_from_stmt>dulwich.repo Repo<import_from_stmt>dulwich.tests SkipTest TestCase <import_from_stmt>dulwich.tests.utils open_repo tear_down_repo <class_stmt>CheckRefFormatTests(TestCase)<block_start>"""Tests for the check_ref_format function. These are the same tests as in the git test suite. """<def_stmt>test_valid self<block_start>self.assertTrue(check_ref_format(b"heads/foo"))<line_sep>self.assertTrue(check_ref_format(b"foo/bar/baz"))<line_sep>self.assertTrue(check_ref_format(b"refs///heads/foo"))<line_sep>self.assertTrue(check_ref_format(b"foo./bar"))<line_sep>self.assertTrue(check_ref_format(b"heads/foo@bar"))<line_sep>self.assertTrue(check_ref_format(b"heads/fix.lock.error"))<block_end><def_stmt>test_invalid self<block_start>self.assertFalse(check_ref_format(b"foo"))<line_sep>self.assertFalse(check_ref_format(b"heads/foo/"))<line_sep>self.assertFalse(check_ref_format(b"./foo"))<line_sep>self.assertFalse(check_ref_format(b".refs/foo"))<line_sep>self.assertFalse(check_ref_format(b"heads/foo..bar"))<line_sep>self.assertFalse(check_ref_format(b"heads/foo?bar"))<line_sep>self.assertFalse(check_ref_format(b"heads/foo.lock"))<line_sep>self.assertFalse(check_ref_format(b"heads/v@{ation"))<line_sep>self.assertFalse(check_ref_format(b"heads/foo\bar"))<block_end><block_end>ONES=b"1"<times>40<line_sep>TWOS=b"2"<times>40<line_sep>THREES=b"3"<times>40<line_sep>FOURS=b"4"<times>40<class_stmt>PackedRefsFileTests(TestCase)<block_start><def_stmt>test_split_ref_line_errors self<block_start>self.assertRaises(errors.PackedRefsException _split_ref_line b"singlefield")<line_sep>self.assertRaises(errors.PackedRefsException _split_ref_line b"badsha name")<line_sep>self.assertRaises(errors.PackedRefsException _split_ref_line ONES+b" bad/../refname" )<block_end><def_stmt>test_read_without_peeled self<block_start>f=BytesIO(b"\n".join([b"# comment" ONES+b" ref/1" TWOS+b" ref/2"]))<line_sep>self.assertEqual([(ONES b"ref/1") (TWOS b"ref/2")] list(read_packed_refs(f)))<block_end><def_stmt>test_read_without_peeled_errors self<block_start>f=BytesIO(b"\n".join([ONES+b" ref/1" b"^"+TWOS]))<line_sep>self.assertRaises(errors.PackedRefsException list read_packed_refs(f))<block_end><def_stmt>test_read_with_peeled self<block_start>f=BytesIO(b"\n".join([ONES+b" ref/1" TWOS+b" ref/2" b"^"+THREES FOURS+b" ref/4" ]))<line_sep>self.assertEqual([(ONES b"ref/1" <none>) (TWOS b"ref/2" THREES) (FOURS b"ref/4" <none>) ] list(read_packed_refs_with_peeled(f)) )<block_end><def_stmt>test_read_with_peeled_errors self<block_start>f=BytesIO(b"\n".join([b"^"+TWOS ONES+b" ref/1"]))<line_sep>self.assertRaises(errors.PackedRefsException list read_packed_refs(f))<line_sep>f=BytesIO(b"\n".join([ONES+b" ref/1" b"^"+TWOS b"^"+THREES]))<line_sep>self.assertRaises(errors.PackedRefsException list read_packed_refs(f))<block_end><def_stmt>test_write_with_peeled self<block_start>f=BytesIO()<line_sep>write_packed_refs(f {b"ref/1":ONES b"ref/2":TWOS} {b"ref/1":THREES})<line_sep>self.assertEqual(b"\n".join([b"# pack-refs with: peeled" ONES+b" ref/1" b"^"+THREES TWOS+b" ref/2" ])+b"\n" f.getvalue() )<block_end><def_stmt>test_write_without_peeled self<block_start>f=BytesIO()<line_sep>write_packed_refs(f {b"ref/1":ONES b"ref/2":TWOS})<line_sep>self.assertEqual(b"\n".join([ONES+b" ref/1" TWOS+b" ref/2"])+b"\n" f.getvalue() )<block_end><block_end># Dict of refs that we expect all RefsContainerTests subclasses to define. _TEST_REFS={b"HEAD":b"<PASSWORD>493e<PASSWORD>ec" b"refs/heads/40-char-ref-aaaaaaaaaaaaaaaaaa":b"<PASSWORD>f02ec" b"refs/heads/master":b"<PASSWORD>" b"refs/heads/packed":b"<PASSWORD>" b"refs/tags/refs-0.1":b"df6800012397fb85c56e7418dd4eb9405dee075c" b"refs/tags/refs-0.2":b"3ec9c43c84ff242e3ef4a9fc5bc111fd780a76a8" b"refs/heads/loop":b"ref: refs/heads/loop" }<class_stmt>RefsContainerTests(object)<block_start><def_stmt>test_keys self<block_start>actual_keys=set(self._refs.keys())<line_sep>self.assertEqual(set(self._refs.allkeys()) actual_keys)<line_sep>self.assertEqual(set(_TEST_REFS.keys()) actual_keys)<line_sep>actual_keys=self._refs.keys(b"refs/heads")<line_sep>actual_keys.discard(b"loop")<line_sep>self.assertEqual([b"40-char-ref-aaaaaaaaaaaaaaaaaa" b"master" b"packed"] sorted(actual_keys) )<line_sep>self.assertEqual([b"refs-0.1" b"refs-0.2"] sorted(self._refs.keys(b"refs/tags")))<block_end><def_stmt>test_iter self<block_start>actual_keys=set(self._refs.keys())<line_sep>self.assertEqual(set(self._refs) actual_keys)<line_sep>self.assertEqual(set(_TEST_REFS.keys()) actual_keys)<block_end><def_stmt>test_as_dict self# refs/heads/loop does not show up even if it exists <block_start>expected_refs=dict(_TEST_REFS)<del_stmt>expected_refs[b"refs/heads/loop"]<line_sep>self.assertEqual(expected_refs self._refs.as_dict())<block_end><def_stmt>test_get_symrefs self<block_start>self._refs.set_symbolic_ref(b"refs/heads/src" b"refs/heads/dst")<line_sep>symrefs=self._refs.get_symrefs()<if_stmt>b"HEAD"<in>symrefs<block_start>symrefs.pop(b"HEAD")<block_end>self.assertEqual({b"refs/heads/src":b"refs/heads/dst" b"refs/heads/loop":b"refs/heads/loop" } symrefs )<block_end><def_stmt>test_setitem self<block_start>self._refs[b"refs/some/ref"]=b"42d06bd4b77fed026b154d16493e5deab78f02ec"<line_sep>self.assertEqual(b"42d06bd4b77fed026b154d16493e5deab78f02ec" self._refs[b"refs/some/ref"] )<line_sep>self.assertRaises(errors.RefFormatError self._refs.__setitem__ b"notrefs/foo" b"42d06bd4b77fed026b154d16493e5deab78f02ec" )<block_end><def_stmt>test_set_if_equals self<block_start>nines=b"9"<times>40<line_sep>self.assertFalse(self._refs.set_if_equals(b"HEAD" b"c0ffee" nines))<line_sep>self.assertEqual(b"42d06bd4b77fed026b154d16493e5deab78f02ec" self._refs[b"HEAD"])<line_sep>self.assertTrue(self._refs.set_if_equals(b"HEAD" b"42d06bd4b77fed026b154d16493e5deab78f02ec" nines))<line_sep>self.assertEqual(nines self._refs[b"HEAD"])<line_sep># Setting the ref again is a no-op, but will return True. self.assertTrue(self._refs.set_if_equals(b"HEAD" nines nines))<line_sep>self.assertEqual(nines self._refs[b"HEAD"])<line_sep>self.assertTrue(self._refs.set_if_equals(b"refs/heads/master" <none> nines))<line_sep>self.assertEqual(nines self._refs[b"refs/heads/master"])<line_sep>self.assertTrue(self._refs.set_if_equals(b"refs/heads/nonexistant" ZERO_SHA nines))<line_sep>self.assertEqual(nines self._refs[b"refs/heads/nonexistant"])<block_end><def_stmt>test_add_if_new self<block_start>nines=b"9"<times>40<line_sep>self.assertFalse(self._refs.add_if_new(b"refs/heads/master" nines))<line_sep>self.assertEqual(b"42d06bd4b77fed026b154d16493e5deab78f02ec" self._refs[b"refs/heads/master"] )<line_sep>self.assertTrue(self._refs.add_if_new(b"refs/some/ref" nines))<line_sep>self.assertEqual(nines self._refs[b"refs/some/ref"])<block_end><def_stmt>test_set_symbolic_ref self<block_start>self._refs.set_symbolic_ref(b"refs/heads/symbolic" b"refs/heads/master")<line_sep>self.assertEqual(b"ref: refs/heads/master" self._refs.read_loose_ref(b"refs/heads/symbolic") )<line_sep>self.assertEqual(b"42d06bd4b77fed026b154d16493e5deab78f02ec" self._refs[b"refs/heads/symbolic"] )<block_end><def_stmt>test_set_symbolic_ref_overwrite self<block_start>nines=b"9"<times>40<line_sep>self.assertNotIn(b"refs/heads/symbolic" self._refs)<line_sep>self._refs[b"refs/heads/symbolic"]=nines<line_sep>self.assertEqual(nines self._refs.read_loose_ref(b"refs/heads/symbolic"))<line_sep>self._refs.set_symbolic_ref(b"refs/heads/symbolic" b"refs/heads/master")<line_sep>self.assertEqual(b"ref: refs/heads/master" self._refs.read_loose_ref(b"refs/heads/symbolic") )<line_sep>self.assertEqual(b"42d06bd4b77fed026b154d16493e5deab78f02ec" self._refs[b"refs/heads/symbolic"] )<block_end><def_stmt>test_check_refname self<block_start>self._refs._check_refname(b"HEAD")<line_sep>self._refs._check_refname(b"refs/stash")<line_sep>self._refs._check_refname(b"refs/heads/foo")<line_sep>self.assertRaises(errors.RefFormatError self._refs._check_refname b"refs")<line_sep>self.assertRaises(errors.RefFormatError self._refs._check_refname b"notrefs/foo")<block_end><def_stmt>test_contains self<block_start>self.assertIn(b"refs/heads/master" self._refs)<line_sep>self.assertNotIn(b"refs/heads/bar" self._refs)<block_end><def_stmt>test_delitem self<block_start>self.assertEqual(b"42d06bd4b77fed026b154d16493e5deab78f02ec" self._refs[b"refs/heads/master"] )<del_stmt>self._refs[b"refs/heads/master"]<line_sep>self.assertRaises(KeyError <lambda>:self._refs[b"refs/heads/master"])<block_end><def_stmt>test_remove_if_equals self<block_start>self.assertFalse(self._refs.remove_if_equals(b"HEAD" b"c0ffee"))<line_sep>self.assertEqual(b"42d06bd4b77fed026b154d16493e5deab78f02ec" self._refs[b"HEAD"])<line_sep>self.assertTrue(self._refs.remove_if_equals(b"refs/tags/refs-0.2" b"3ec9c43c84ff242e3ef4a9fc5bc111fd780a76a8" ))<line_sep>self.assertTrue(self._refs.remove_if_equals(b"refs/tags/refs-0.2" ZERO_SHA))<line_sep>self.assertNotIn(b"refs/tags/refs-0.2" self._refs)<block_end><def_stmt>test_import_refs_name self<block_start>self._refs[b"refs/remotes/origin/other"]=b"48d01bd4b77fed026b154d16493e5deab78f02ec"<line_sep>self._refs.import_refs(b"refs/remotes/origin" {b"master":b"4<PASSWORD>6b154d16493e5deab78f02ec"} )<line_sep>self.assertEqual(b"42d06bd4b77fed026b154d16493e5deab78f02ec" self._refs[b"refs/remotes/origin/master"] )<line_sep>self.assertEqual(b"48d01bd4b77fed026b154d16493e5deab78f02ec" self._refs[b"refs/remotes/origin/other"] )<block_end><def_stmt>test_import_refs_name_prune self<block_start>self._refs[b"refs/remotes/origin/other"]=b"48d01bd4b77fed026b154d16493e5deab78f02ec"<line_sep>self._refs.import_refs(b"refs/remotes/origin" {b"master":b"<PASSWORD>1<PASSWORD>3e5deab78f02ec"} prune=<true> )<line_sep>self.assertEqual(b"42d06bd4b77fed026b154d16493e5deab78f02ec" self._refs[b"refs/remotes/origin/master"] )<line_sep>self.assertNotIn(b"refs/remotes/origin/other" self._refs)<block_end><def_stmt>test_watch self<block_start><try_stmt><block_start>watcher=self._refs.watch()<block_end><except_stmt>(NotImplementedError ImportError)<block_start>self.skipTest("watching not supported")<block_end><with_stmt>watcher<block_start>self._refs[b"refs/remotes/origin/other"]=b"48d01bd4b77fed026b154d16493e5deab78f02ec"<line_sep>change=next(watcher)<line_sep>self.assertEqual((b"refs/remotes/origin/other" b"48d01bd4b77fed026b154d16493e5deab78f02ec" ) change )<line_sep>self._refs[b"refs/remotes/origin/other"]=b"48d01bd4b77fed026b154d16493e5deab78f02ed"<line_sep>change=next(watcher)<line_sep>self.assertEqual((b"refs/remotes/origin/other" b"48d01bd4b77fed026b154d16493e5deab78f02ed" ) change )<del_stmt>self._refs[b"refs/remotes/origin/other"]<line_sep>change=next(watcher)<line_sep>self.assertEqual((b"refs/remotes/origin/other" <none>) change)<block_end><block_end><block_end><class_stmt>DictRefsContainerTests(RefsContainerTests TestCase)<block_start><def_stmt>setUp self<block_start>TestCase.setUp(self)<line_sep>self._refs=DictRefsContainer(dict(_TEST_REFS))<block_end><def_stmt>test_invalid_refname self# FIXME: Move this test into RefsContainerTests, but requires # some way of injecting invalid refs. <block_start>self._refs._refs[b"refs/stash"]=b"00"<times>20<line_sep>expected_refs=dict(_TEST_REFS)<del_stmt>expected_refs[b"refs/heads/loop"]<line_sep>expected_refs[b"refs/stash"]=b"00"<times>20<line_sep>self.assertEqual(expected_refs self._refs.as_dict())<block_end><block_end><class_stmt>DiskRefsContainerTests(RefsContainerTests TestCase)<block_start><def_stmt>setUp self<block_start>TestCase.setUp(self)<line_sep>self._repo=open_repo("refs.git")<line_sep>self.addCleanup(tear_down_repo self._repo)<line_sep>self._refs=self._repo.refs<block_end><def_stmt>test_get_packed_refs self<block_start>self.assertEqual({b"refs/heads/packed":b"42d06bd4b77fed026b154d16493e5deab78f02ec" b"refs/tags/refs-0.1":b"df6800012397fb85c56e7418dd4eb9405dee075c" } self._refs.get_packed_refs() )<block_end><def_stmt>test_get_peeled_not_packed self# not packed <block_start>self.assertEqual(<none> self._refs.get_peeled(b"refs/tags/refs-0.2"))<line_sep>self.assertEqual(b"3ec9c43c84ff242e3ef4a9fc5bc111fd780a76a8" self._refs[b"refs/tags/refs-0.2"] )<line_sep># packed, known not peelable self.assertEqual(self._refs[b"refs/heads/packed"] self._refs.get_peeled(b"refs/heads/packed") )<line_sep># packed, peeled self.assertEqual(b"42d06bd4b77fed026b154d16493e5deab78f02ec" self._refs.get_peeled(b"refs/tags/refs-0.1") )<block_end><def_stmt>test_setitem self<block_start>RefsContainerTests.test_setitem(self)<line_sep>path=os.path.join(self._refs.path b"refs" b"some" b"ref")<with_stmt>open(path "rb")<as>f<block_start>self.assertEqual(b"42d06bd4b77fed026b154d16493e5deab78f02ec" f.read()[:40])<block_end>self.assertRaises(OSError self._refs.__setitem__ b"refs/some/ref/sub" b"42d06bd4b77fed026b154d16493e5deab78f02ec" )<block_end><def_stmt>test_delete_refs_container self# We shouldn't delete the refs directory <block_start>self._refs[b'refs/heads/blah']=b"42d06bd4b77fed026b154d16493e5deab78f02ec"<for_stmt>ref self._refs.allkeys()<block_start><del_stmt>self._refs[ref]<block_end>self.assertTrue(os.path.exists(os.path.join(self._refs.path b'refs')))<block_end><def_stmt>test_setitem_packed self<block_start><with_stmt>open(os.path.join(self._refs.path b"packed-refs") "w")<as>f<block_start>f.write("# pack-refs with: peeled fully-peeled sorted \n")<line_sep>f.write("42d06bd4b77fed026b154d16493e5deab78f02ec refs/heads/packed\n")<block_end># It's allowed to set a new ref on a packed ref, the new ref will be # placed outside on refs/ self._refs[b"refs/heads/packed"]=b"3ec9c43c84ff242e3ef4a9fc5bc111fd780a76a8"<line_sep>packed_ref_path=os.path.join(self._refs.path b"refs" b"heads" b"packed")<with_stmt>open(packed_ref_path "rb")<as>f<block_start>self.assertEqual(b"3ec9c43c84ff242e3ef4a9fc5bc111fd780a76a8" f.read()[:40])<block_end>self.assertRaises(OSError self._refs.__setitem__ b"refs/heads/packed/sub" b"42d06bd4b77fed026b154d16493e5deab78f02ec" )<block_end><def_stmt>test_setitem_symbolic self<block_start>ones=b"1"<times>40<line_sep>self._refs[b"HEAD"]=ones<line_sep>self.assertEqual(ones self._refs[b"HEAD"])<line_sep># ensure HEAD was not modified f=open(os.path.join(self._refs.path b"HEAD") "rb")<line_sep>v=next(iter(f)).rstrip(b"\n\r")<line_sep>f.close()<line_sep>self.assertEqual(b"ref: refs/heads/master" v)<line_sep># ensure the symbolic link was written through f=open(os.path.join(self._refs.path b"refs" b"heads" b"master") "rb")<line_sep>self.assertEqual(ones f.read()[:40])<line_sep>f.close()<block_end><def_stmt>test_set_if_equals self<block_start>RefsContainerTests.test_set_if_equals(self)<line_sep># ensure symref was followed self.assertEqual(b"9"<times>40 self._refs[b"refs/heads/master"])<line_sep># ensure lockfile was deleted self.assertFalse(os.path.exists(os.path.join(self._refs.path b"refs" b"heads" b"master.lock")))<line_sep>self.assertFalse(os.path.exists(os.path.join(self._refs.path b"HEAD.lock")))<block_end><def_stmt>test_add_if_new_packed self# don't overwrite packed ref <block_start>self.assertFalse(self._refs.add_if_new(b"refs/tags/refs-0.1" b"9"<times>40))<line_sep>self.assertEqual(b"df6800012397fb85c56e7418dd4eb9405dee075c" self._refs[b"refs/tags/refs-0.1"] )<block_end><def_stmt>test_add_if_new_symbolic self# Use an empty repo instead of the default. <block_start>repo_dir=os.path.join(tempfile.mkdtemp() "test")<line_sep>os.makedirs(repo_dir)<line_sep>repo=Repo.init(repo_dir)<line_sep>self.addCleanup(tear_down_repo repo)<line_sep>refs=repo.refs<line_sep>nines=b"9"<times>40<line_sep>self.assertEqual(b"ref: refs/heads/master" refs.read_ref(b"HEAD"))<line_sep>self.assertNotIn(b"refs/heads/master" refs)<line_sep>self.assertTrue(refs.add_if_new(b"HEAD" nines))<line_sep>self.assertEqual(b"ref: refs/heads/master" refs.read_ref(b"HEAD"))<line_sep>self.assertEqual(nines refs[b"HEAD"])<line_sep>self.assertEqual(nines refs[b"refs/heads/master"])<line_sep>self.assertFalse(refs.add_if_new(b"HEAD" b"1"<times>40))<line_sep>self.assertEqual(nines refs[b"HEAD"])<line_sep>self.assertEqual(nines refs[b"refs/heads/master"])<block_end><def_stmt>test_follow self<block_start>self.assertEqual(([b"HEAD" b"refs/heads/master"] b"42d06bd4b77fed026b154d16493e5deab78f02ec" ) self._refs.follow(b"HEAD") )<line_sep>self.assertEqual(([b"refs/heads/master"] b"42d06bd4b77fed026b154d16493e5deab78f02ec" ) self._refs.follow(b"refs/heads/master") )<line_sep>self.assertRaises(KeyError self._refs.follow b"refs/heads/loop")<block_end><def_stmt>test_delitem self<block_start>RefsContainerTests.test_delitem(self)<line_sep>ref_file=os.path.join(self._refs.path b"refs" b"heads" b"master")<line_sep>self.assertFalse(os.path.exists(ref_file))<line_sep>self.assertNotIn(b"refs/heads/master" self._refs.get_packed_refs())<block_end><def_stmt>test_delitem_symbolic self<block_start>self.assertEqual(b"ref: refs/heads/master" self._refs.read_loose_ref(b"HEAD"))<del_stmt>self._refs[b"HEAD"]<line_sep>self.assertRaises(KeyError <lambda>:self._refs[b"HEAD"])<line_sep>self.assertEqual(b"42d06bd4b77fed026b154d16493e5deab78f02ec" self._refs[b"refs/heads/master"] )<line_sep>self.assertFalse(os.path.exists(os.path.join(self._refs.path b"HEAD")))<block_end><def_stmt>test_remove_if_equals_symref self# HEAD is a symref, so shouldn't equal its dereferenced value <block_start>self.assertFalse(self._refs.remove_if_equals(b"HEAD" b"42d06bd4b77fed026b154d16493e5deab78f02ec"))<line_sep>self.assertTrue(self._refs.remove_if_equals(b"refs/heads/master" b"42d06bd4b77fed026b154d16493e5deab78f02ec" ))<line_sep>self.assertRaises(KeyError <lambda>:self._refs[b"refs/heads/master"])<line_sep># HEAD is now a broken symref self.assertRaises(KeyError <lambda>:self._refs[b"HEAD"])<line_sep>self.assertEqual(b"ref: refs/heads/master" self._refs.read_loose_ref(b"HEAD"))<line_sep>self.assertFalse(os.path.exists(os.path.join(self._refs.path b"refs" b"heads" b"master.lock")))<line_sep>self.assertFalse(os.path.exists(os.path.join(self._refs.path b"HEAD.lock")))<block_end><def_stmt>test_remove_packed_without_peeled self<block_start>refs_file=os.path.join(self._repo.path "packed-refs")<line_sep>f=GitFile(refs_file)<line_sep>refs_data=f.read()<line_sep>f.close()<line_sep>f=GitFile(refs_file "wb")<line_sep>f.write(b"\n".join(line<for>line refs_data.split(b"\n")<if><not>line<or>line[0]<not><in>b"#^"))<line_sep>f.close()<line_sep>self._repo=Repo(self._repo.path)<line_sep>refs=self._repo.refs<line_sep>self.assertTrue(refs.remove_if_equals(b"refs/heads/packed" b"42d06bd4b77fed026b154d16493e5deab78f02ec" ))<block_end><def_stmt>test_remove_if_equals_packed self# test removing ref that is only packed <block_start>self.assertEqual(b"df6800012397fb85c56e7418dd4eb9405dee075c" self._refs[b"refs/tags/refs-0.1"] )<line_sep>self.assertTrue(self._refs.remove_if_equals(b"refs/tags/refs-0.1" b"df6800012397fb85c56e7418dd4eb9405dee075c" ))<line_sep>self.assertRaises(KeyError <lambda>:self._refs[b"refs/tags/refs-0.1"])<block_end><def_stmt>test_remove_parent self<block_start>self._refs[b"refs/heads/foo/bar"]=b"df6800012397fb85c56e7418dd4eb9405dee075c"<del_stmt>self._refs[b"refs/heads/foo/bar"]<line_sep>ref_file=os.path.join(self._refs.path b"refs" b"heads" b"foo" b"bar" )<line_sep>self.assertFalse(os.path.exists(ref_file))<line_sep>ref_file=os.path.join(self._refs.path b"refs" b"heads" b"foo")<line_sep>self.assertFalse(os.path.exists(ref_file))<line_sep>ref_file=os.path.join(self._refs.path b"refs" b"heads")<line_sep>self.assertTrue(os.path.exists(ref_file))<line_sep>self._refs[b"refs/heads/foo"]=b"df6800012397fb85c56e7418dd4eb9405dee075c"<block_end><def_stmt>test_read_ref self<block_start>self.assertEqual(b"ref: refs/heads/master" self._refs.read_ref(b"HEAD"))<line_sep>self.assertEqual(b"42d06bd4b77fed026b154d16493e5deab78f02ec" self._refs.read_ref(b"refs/heads/packed") )<line_sep>self.assertEqual(<none> self._refs.read_ref(b"nonexistant"))<block_end><def_stmt>test_read_loose_ref self<block_start>self._refs[b"refs/heads/foo"]=b"df6800012397fb85c56e7418dd4eb9405dee075c"<line_sep>self.assertEqual(<none> self._refs.read_ref(b"refs/heads/foo/bar"))<block_end><def_stmt>test_non_ascii self<block_start><try_stmt><block_start>encoded_ref=os.fsencode(u"refs/tags/schön")<block_end><except_stmt>UnicodeEncodeError<block_start><raise>SkipTest("filesystem encoding doesn't support special character")<block_end>p=os.path.join(os.fsencode(self._repo.path) encoded_ref)<with_stmt>open(p "w")<as>f<block_start>f.write("00"<times>20)<block_end>expected_refs=dict(_TEST_REFS)<line_sep>expected_refs[encoded_ref]=b"00"<times>20<del_stmt>expected_refs[b"refs/heads/loop"]<line_sep>self.assertEqual(expected_refs self._repo.get_refs())<block_end><def_stmt>test_cyrillic self<block_start><if_stmt>sys.platform<in>("darwin" "win32")<block_start><raise>SkipTest("filesystem encoding doesn't support arbitrary bytes")<block_end># reported in https://github.com/dulwich/dulwich/issues/608 name=b"\xcd\xee\xe2\xe0\xff\xe2\xe5\xf2\xea\xe01"<line_sep>encoded_ref=b"refs/heads/"+name<with_stmt>open(os.path.join(os.fsencode(self._repo.path) encoded_ref) "w")<as>f<block_start>f.write("00"<times>20)<block_end>expected_refs=set(_TEST_REFS.keys())<line_sep>expected_refs.add(encoded_ref)<line_sep>self.assertEqual(expected_refs set(self._repo.refs.allkeys()))<line_sep>self.assertEqual({r[len(b"refs/"):]<for>r expected_refs<if>r.startswith(b"refs/")} set(self._repo.refs.subkeys(b"refs/")) )<line_sep>expected_refs.remove(b"refs/heads/loop")<line_sep>expected_refs.add(b"HEAD")<line_sep>self.assertEqual(expected_refs set(self._repo.get_refs().keys()))<block_end><block_end>_TEST_REFS_SERIALIZED=(b"42d06bd4b77fed026b154d16493e5deab78f02ec\t"<concat>b"refs/heads/40-char-ref-aaaaaaaaaaaaaaaaaa\n"<concat>b"42d06bd4b77fed026b154d16493e5deab78f02ec\trefs/heads/master\n"<concat>b"42d06bd4b77fed026b154d16493e5deab78f02ec\trefs/heads/packed\n"<concat>b"df6800012397fb85c56e7418dd4eb9405dee075c\trefs/tags/refs-0.1\n"<concat>b"3ec9c43c84ff242e3ef4a9fc5bc111fd780a76a8\trefs/tags/refs-0.2\n")<class_stmt>InfoRefsContainerTests(TestCase)<block_start><def_stmt>test_invalid_refname self<block_start>text=_TEST_REFS_SERIALIZED+b"00"<times>20+b"\trefs/stash\n"<line_sep>refs=InfoRefsContainer(BytesIO(text))<line_sep>expected_refs=dict(_TEST_REFS)<del_stmt>expected_refs[b"HEAD"]<line_sep>expected_refs[b"refs/stash"]=b"00"<times>20<del_stmt>expected_refs[b"refs/heads/loop"]<line_sep>self.assertEqual(expected_refs refs.as_dict())<block_end><def_stmt>test_keys self<block_start>refs=InfoRefsContainer(BytesIO(_TEST_REFS_SERIALIZED))<line_sep>actual_keys=set(refs.keys())<line_sep>self.assertEqual(set(refs.allkeys()) actual_keys)<line_sep>expected_refs=dict(_TEST_REFS)<del_stmt>expected_refs[b"HEAD"]<del_stmt>expected_refs[b"refs/heads/loop"]<line_sep>self.assertEqual(set(expected_refs.keys()) actual_keys)<line_sep>actual_keys=refs.keys(b"refs/heads")<line_sep>actual_keys.discard(b"loop")<line_sep>self.assertEqual([b"40-char-ref-aaaaaaaaaaaaaaaaaa" b"master" b"packed"] sorted(actual_keys) )<line_sep>self.assertEqual([b"refs-0.1" b"refs-0.2"] sorted(refs.keys(b"refs/tags")))<block_end><def_stmt>test_as_dict self<block_start>refs=InfoRefsContainer(BytesIO(_TEST_REFS_SERIALIZED))<line_sep># refs/heads/loop does not show up even if it exists expected_refs=dict(_TEST_REFS)<del_stmt>expected_refs[b"HEAD"]<del_stmt>expected_refs[b"refs/heads/loop"]<line_sep>self.assertEqual(expected_refs refs.as_dict())<block_end><def_stmt>test_contains self<block_start>refs=InfoRefsContainer(BytesIO(_TEST_REFS_SERIALIZED))<line_sep>self.assertIn(b"refs/heads/master" refs)<line_sep>self.assertNotIn(b"refs/heads/bar" refs)<block_end><def_stmt>test_get_peeled self<block_start>refs=InfoRefsContainer(BytesIO(_TEST_REFS_SERIALIZED))<line_sep># refs/heads/loop does not show up even if it exists self.assertEqual(_TEST_REFS[b"refs/heads/master"] refs.get_peeled(b"refs/heads/master") )<block_end><block_end><class_stmt>ParseSymrefValueTests(TestCase)<block_start><def_stmt>test_valid self<block_start>self.assertEqual(b"refs/heads/foo" parse_symref_value(b"ref: refs/heads/foo"))<block_end><def_stmt>test_invalid self<block_start>self.assertRaises(ValueError parse_symref_value b"foobar")<block_end><block_end><class_stmt>StripPeeledRefsTests(TestCase)<block_start>all_refs={b"refs/heads/master":b"8843d7f92416211de9ebb963ff4ce28125932878" b"refs/heads/testing":b"186a005b134d8639a58b6731c7c1ea821a6eedba" b"refs/tags/1.0.0":b"a93db4b0360cc635a2b93675010bac8d101f73f0" b"refs/tags/1.0.0^{}":b"a93db4b0360cc635a2b93675010bac8d101f73f0" b"refs/tags/2.0.0":b"0749936d0956c661ac8f8d3483774509c165f89e" b"refs/tags/2.0.0^{}":b"0749936d0956c661ac8f8d3483774509c165f89e" }<line_sep>non_peeled_refs={b"refs/heads/master":b"8843d7f92416211de9ebb963ff4ce28125932878" b"refs/heads/testing":b"186a005b134d8639a58b6731c7c1ea821a6eedba" b"refs/tags/1.0.0":b"a93db4b0360cc635a2b93675010bac8d101f73f0" b"refs/tags/2.0.0":b"0749936d0956c661ac8f8d3483774509c165f89e" }<def_stmt>test_strip_peeled_refs self# Simple check of two dicts <block_start>self.assertEqual(strip_peeled_refs(self.all_refs) self.non_peeled_refs)<block_end><block_end>
# -*- coding: utf-8 -*- # # Copyright 2012 <NAME> (http://jamesthornton.com) # BSD License (see LICENSE for details) # """ An interface for interacting with indices on Rexster. """<import_from_stmt>bulbs.utils initialize_element initialize_elements get_one_result<class_stmt>IndexProxy(object)<block_start>"""Abstract base class the index proxies."""<def_stmt>__init__ self index_class client# The index class for this proxy, e.g. ManualIndex. <block_start>self.index_class=index_class<line_sep># The Client object for the database. self.client=client<block_end><block_end><class_stmt>VertexIndexProxy(IndexProxy)<block_start>""" Manage vertex indices on Rexster. :param index_class: The index class for this proxy, e.g. ManualIndex. :type index_class: Index :param client: The Client object for the database. :type client: bulbs.rexster.client.RexsterClient :ivar index_class: Index class. :ivar client: RexsterClient object. """<def_stmt>create self index_name<block_start>""" Creates an Vertex index and returns it. :param index_name: Index name. :type index_name: str :rtype: bulbs.rexster.index.Index """<line_sep><raise>NotImplementedError<block_end><def_stmt>get self index_name="vertex"<block_start>""" Returns the Index object with the specified name or None if not found. :param index_name: Index name. :type index_name: str :rtype: bulbs.rexster.index.Index """<line_sep>index=self.index_class(self.client <none>)<line_sep>index.base_type="vertex"<line_sep>index._index_name=index_name<line_sep>self.client.registry.add_index(index_name index)<line_sep><return>index<block_end><def_stmt>get_or_create self index_name="vertex" index_params=<none><block_start>""" Get a Vertex Index or create it if it doesn't exist. :param index_name: Index name. :type index_name: str :rtype: bulbs.rexster.index.Index """<line_sep><return>self.get(index_name)<block_end><def_stmt>delete self index_name<block_start>""" Deletes an index and returns the Response. :param index_name: Index name. :type index_name: str :rtype: bulbs.rexster.client.RexsterResponse """<line_sep><raise>NotImplementedError<block_end><block_end><class_stmt>EdgeIndexProxy(IndexProxy)<block_start>""" Manage edge indices on Rexster. :param index_class: The index class for this proxy, e.g. ManualIndex. :type index_class: Index :param client: The Client object for the database. :type client: bulbs.rexster.client.RexsterClient :ivar index_class: Index class. :ivar client: RexsterClient object. """<def_stmt>create self index_name *args **kwds<block_start>""" Adds an index to the database and returns it. index_keys must be a string in this format: '[k1,k2]' Don't pass actual list b/c keys get double quoted. :param index_name: The name of the index to create. :param index_class: The class of the elements stored in the index. Either vertex or edge. """<line_sep><raise>NotImplementedError<block_end><def_stmt>get self index_name="edge"<block_start>""" Returns the Index object with the specified name or None if not found. :param index_name: Index name. :type index_name: str :rtype: bulbs.rexster.index.Index """<line_sep>index=self.index_class(self.client <none>)<line_sep>index.base_type="edge"<line_sep>index._index_name=index_name<line_sep>self.client.registry.add_index(index_name index)<line_sep><return>index<block_end><def_stmt>get_or_create self index_name="edge" index_params=<none><block_start>""" Get an Edge Index or create it if it doesn't exist. :param index_name: Index name. :type index_name: str :rtype: bulbs.rexster.index.Index """<line_sep><return>self.get(index_name)<block_end><def_stmt>delete self index_name<block_start>""" Deletes an index and returns the Response. :param index_name: Index name. :type index_name: str :rtype: bulbs.rexster.client.RexsterResponse """<line_sep><raise>NotImplementedError<block_end><block_end># # Index Containers (Titan only supports KeyIndex so far) # <class_stmt>Index(object)<block_start>"""Abstract base class for an index."""<def_stmt>__init__ self client result<block_start>self.client=client<line_sep>self.result=result<line_sep>self.base_type=<none># set by Factory.get_index self._index_name=<none><block_end># ditto # the index_name is actually ignored with Titan, # but setting it like normal to make tests pass @classmethod<def_stmt>get_proxy_class cls base_type<block_start>""" Returns the IndexProxy class. :param base_type: Index base type, either vertex or edge. :type base_type: str :rtype: class """<line_sep>class_map=dict(vertex=VertexIndexProxy edge=EdgeIndexProxy)<line_sep><return>class_map[base_type]<block_end>@property<def_stmt>index_name self<block_start>""" Returns the index name. :rtype: str """<line_sep># faking the index name as "vertex" <return>self._index_name<block_end>@property<def_stmt>index_class self<block_start>""" Returns the index class, either vertex or edge. :rtype: class """<line_sep><return>self.base_type<block_end>@property<def_stmt>index_type self<block_start>""" Returns the index type, which will either be automatic or manual. :rtype: str """<line_sep><return>"automatic"<block_end><def_stmt>count self key=<none> value=<none> **pair<block_start>""" Return a count of all elements with 'key' equal to 'value' in the index. :param key: The index key. This is optional because you can instead supply a key/value pair such as name="James". :param value: The index key's value. This is optional because you can instead supply a key/value pair such as name="James". :param pair: Optional keyword param. Instead of supplying key=name and value = 'James', you can supply a key/value pair in the form of name='James'. """<line_sep><raise>NotImplementedError<block_end><def_stmt>_get_key_value self key value pair<block_start>"""Return the key and value, regardless of how it was entered."""<if_stmt>pair<block_start>key,value=pair.popitem()<block_end><return>key value<block_end><def_stmt>_get_method self **method_map<block_start>method_name=method_map[self.index_class]<line_sep>method=getattr(self.client method_name)<line_sep><return>method<block_end><def_stmt>lookup self key=<none> value=<none> **pair<block_start>""" Return a generator containing all the elements with key property equal to value in the index. :param key: The index key. This is optional because you can instead supply a key/value pair such as name="James". :param value: The index key's value. This is optional because you can instead supply a key/value pair such as name="James". :param raw: Optional keyword param. If set to True, it won't try to initialize the results. Defaults to False. :param pair: Optional keyword param. Instead of supplying key=name and value = 'James', you can supply a key/value pair in the form of name='James'. """<line_sep>key,value=self._get_key_value(key value pair)<line_sep>resp=self.client.lookup_vertex(self.index_name key value)<line_sep><return>initialize_elements(self.client resp)<block_end><def_stmt>get_unique self key=<none> value=<none> **pair<block_start>""" Returns a max of 1 elements matching the key/value pair in the index. :param key: The index key. This is optional because you can instead supply a key/value pair such as name="James". :param value: The index key's value. This is optional because you can instead supply a key/value pair such as name="James". :param pair: Optional keyword param. Instead of supplying key=name and value = 'James', you can supply a key/value pair in the form of name='James'. """<line_sep>key,value=self._get_key_value(key value pair)<line_sep>resp=self.client.lookup_vertex(self.index_name key value)<if_stmt>resp.total_size<g>0<block_start>result=get_one_result(resp)<line_sep><return>initialize_element(self.client result)<block_end><block_end><block_end><class_stmt>KeyIndex(Index)<block_start><def_stmt>keys self<block_start>"""Return the index's keys."""<line_sep># Titan does not support edge indices. resp=self.client.get_vertex_keys()<line_sep><return>[result.raw<for>result resp.results]<block_end><def_stmt>create_key self key# TODO: You can't create a key if prop already exists - workaround? <block_start><if_stmt>self.base_type<is>"edge"<block_start><return>self.create_edge_key(key)<block_end><return>self.create_vertex_key(key)<block_end><def_stmt>create_vertex_key self key<block_start><return>self.client.create_vertex_key_index(key)<block_end><def_stmt>create_edge_key self key<block_start><return>self.client.create_vertex_key_index(key)<block_end><def_stmt>rebuild self<block_start><raise>NotImplementedError# (for now) # need class_map b/c the Blueprints need capitalized class names, # but Rexster returns lower-case class names for index_class method_map=dict(vertex=self.client.rebuild_vertex_index edge=self.client.rebuild_edge_index)<line_sep>rebuild_method=method_map.get(self.index_class)<line_sep>resp=rebuild_method(self.index_name)<line_sep><return>list(resp.results)<block_end><block_end>
x=y=1<line_sep>print(x y)<line_sep>x=y=z=1<line_sep>print(x y z)<line_sep>
""" This module contains the class Objective Author: <NAME>, <NAME> """<import_from_stmt>zoopt.solution Solution<import_from_stmt>zoopt.utils.zoo_global pos_inf<import_from_stmt>zoopt.utils.tool_function ToolFunction<import_stmt>numpy<as>np<class_stmt>Objective<block_start>""" This class represents the objective function and its associated variables """<def_stmt>__init__ self func=<none> dim=<none> constraint=<none> resample_func=<none><block_start>""" Initialization. :param func: objective function defined by the user :param dim: a Dimension object, which describes the search space. :param constraint: constraint function for POSS :param resample_func: resample function for SSRacos :param reducedim: whether to use sequential random embedding """<line_sep>self.__func=func<line_sep>self.__dim=dim<line_sep># the function for inheriting solution attachment self.__inherit=self.default_inherit<line_sep>self.__post_inherit=self.default_post_inherit<line_sep># the constraint function self.__constraint=constraint<line_sep># the history of optimization self.__history=[]<line_sep>self.__resample_times=1<line_sep>self.__resample_func=self.resample_func<if>resample_func<is><none><else>resample_func<line_sep>self.__balance_rate=1<line_sep># for sequential random embedding self.__reducedim=<false><line_sep>self.__A=<none><line_sep>self.__last_x=<none><block_end><def_stmt>parameter_set self parameter<block_start>""" Use a Parameter object to set attributes in Objective object. :param parameter: a Parameter object :return: no return """<if_stmt>parameter.get_noise_handling()<is><true><and>parameter.get_suppression()<is><true><block_start>self.__balance_rate=parameter.get_balance_rate()<block_end><if_stmt>parameter.get_noise_handling()<is><true><and>parameter.get_resampling()<is><true><block_start>self.__resample_times=parameter.get_resample_times()<block_end><if_stmt>parameter.get_high_dim_handling()<is><true><and>parameter.get_reducedim()<is><true><block_start>self.__reducedim=<true><block_end><block_end><def_stmt>construct_solution self x parent=<none><block_start>""" Construct a solution from x :param x: a list :param parent: the attached structure :return: solution """<line_sep>new_solution=Solution()<line_sep>new_solution.set_x(x)<line_sep>new_solution.set_attach(self.__inherit(parent))<line_sep><return>new_solution<block_end><def_stmt>eval self solution<block_start>""" Use the objective function to evaluate a solution. :param solution: :return: value of fx(evaluation result) will be returned """<line_sep>res=[]<for_stmt>i range(self.__resample_times)<block_start><if_stmt>self.__reducedim<is><false><block_start>val=self.__func(solution)<block_end><else_stmt><block_start>x=solution.get_x()<line_sep>x_origin=x[0]<times>np.array(self.__last_x.get_x())+np.dot(self.__A np.array(x[1:]))<line_sep>val=self.__func(Solution(x=x_origin))<block_end>res.append(val)<line_sep>self.__history.append(val)<block_end>value=sum(res)/float(len(res))<line_sep>solution.set_value(value)<line_sep>solution.set_post_attach(self.__post_inherit())<line_sep><return>value<block_end><def_stmt>resample self solution repeat_times<block_start>""" Resample function for value suppression. :param solution: a Solution object :param repeat_times: repeat times :return: repeat times """<if_stmt>solution.get_resample_value()<is><none><block_start>solution.set_resample_value(self.__resample_func(solution repeat_times))<line_sep>solution.set_value((1-self.__balance_rate)<times>solution.get_value()+self.__balance_rate<times>solution.get_resample_value())<line_sep>solution.set_post_attach(self.__post_inherit())<line_sep><return>repeat_times<block_end><else_stmt><block_start><return>0<block_end><block_end><def_stmt>resample_func self solution iteration_num<block_start>result=[]<for_stmt>i range(iteration_num)<block_start>result.append(self.eval(solution))<block_end><return>sum(result)<times>1.0/len(result)<block_end><def_stmt>eval_constraint self solution<block_start>solution.set_value([self.eval(solution) self.__constraint(solution)])<line_sep>solution.set_post_attach(self.__post_inherit())<block_end><def_stmt>set_func self func<block_start>""" Set the objective function :param func: the objective function :return: no return value """<line_sep>self.__func=func<block_end><def_stmt>get_func self<block_start><return>self.__func<block_end><def_stmt>set_dim self dim<block_start>self.__dim=dim<block_end><def_stmt>get_dim self<block_start><return>self.__dim<block_end><def_stmt>set_inherit_func self inherit_func<block_start>self.__inherit=inherit_func<block_end><def_stmt>set_post_inherit_func self inherit_func<block_start>self.__post_inherit=inherit_func<block_end><def_stmt>get_post_inherit_func self<block_start><return>self.__post_inherit<block_end><def_stmt>get_inherit_func self<block_start><return>self.__inherit<block_end><def_stmt>set_constraint self constraint<block_start>self.__constraint=constraint<line_sep><return><block_end><def_stmt>get_constraint self<block_start><return>self.__constraint<block_end><def_stmt>set_history self history<block_start>self.__history=history<block_end><def_stmt>get_history self<block_start><return>self.__history<block_end><def_stmt>get_history_bestsofar self<block_start>""" Get the best-so-far history. """<line_sep>history_bestsofar=[]<line_sep>bestsofar=pos_inf<for_stmt>i range(len(self.__history))<block_start><if_stmt>self.__history[i]<l>bestsofar<block_start>bestsofar=self.__history[i]<block_end>history_bestsofar.append(bestsofar)<block_end><return>history_bestsofar<block_end><def_stmt>get_reducedim self<block_start><return>self.__reducedim<block_end><def_stmt>get_last_x self<block_start><return>self.__last_x<block_end><def_stmt>get_A self<block_start><return>self.__A<block_end><def_stmt>set_A self A<block_start>self.__A=A<block_end><def_stmt>set_last_x self x<block_start>self.__last_x=x<block_end><def_stmt>clean_history self<block_start>""" clean the optimization history """<line_sep>self.__history=[]<block_end>@staticmethod<def_stmt>default_inherit parent=<none><block_start>""" Default inherited function. :param parent: the parent structure :return: None """<line_sep><return><none><block_end>@staticmethod<def_stmt>default_post_inherit parent=<none><block_start>""" Default post inherited function. :param parent: the parent structure :return: None """<line_sep><return><none><block_end><block_end>
#! /usr/bin/env python3 <import_stmt>argparse<import_stmt>shutil<import_stmt>socket<import_from_stmt>concurrent.futures ThreadPoolExecutor<import_from_stmt>tempfile TemporaryDirectory<import_from_stmt>typing NamedTuple<import_stmt>zmq<import_from_stmt>test_zmq.zstack ZStack<line_sep>SEED=b'E21bEA7DeaE981cBabCECd9FAeF4e340'<line_sep>EXPECTED_ZMQ_REPLY='{"type": "DIAGNOSE", "text": "ZMQ connection is possible"}'<line_sep>EXPECTED_TCP_REPLY='{"type": "DIAGNOSE", "text": "TCP connection is possible"}'<line_sep>QUIT=<false><line_sep>HA=NamedTuple("HA" [("host" str) ("port" int)])<def_stmt>msg_handler zstack msg<block_start>_,frm=msg<line_sep>print(msg)<line_sep>zstack.send(EXPECTED_ZMQ_REPLY frm)<block_end><def_stmt>loop <block_start>loop=zmq.asyncio.ZMQEventLoop()<line_sep><return>loop<block_end><class_stmt>SafeTemporaryDirectory(TemporaryDirectory)<block_start>@classmethod<def_stmt>_cleanup cls name warn_message<block_start>shutil.rmtree(name ignore_errors=<true>)<block_end><def_stmt>cleanup self<block_start><if_stmt>self._finalizer.detach()<block_start>shutil.rmtree(self.name ignore_errors=<true>)<block_end><block_end><block_end><def_stmt>up_zmq_server server_ha<block_start>print("ZMQ_SERVER: Seed is {}".format(SEED))<line_sep>server=ZStack(name='Test_zmq' ha=server_ha basedirpath=base_dir msgHandler=msg_handler seed=SEED onlyListener=<true>)<line_sep>server.start()<line_sep><return>server<block_end><def_stmt>up_tcp_server server_ha<block_start><with_stmt>socket.socket(socket.AF_INET socket.SOCK_STREAM)<as>s<block_start>s.bind(server_ha)<line_sep>s.listen()<line_sep>print("TCP_SERVER: Listen clients on {}".format(server_ha))<while_stmt><true><block_start>conn,addr=s.accept()<with_stmt>conn<block_start>print('TCP_SERVER: Connected by' addr)<while_stmt><true><block_start>data=conn.recv(1024)<if_stmt>data<block_start>print("TCP_SERVER: Received {} from client through tcp".format(data))<line_sep>conn.sendall(EXPECTED_TCP_REPLY.encode())<line_sep><break><block_end><block_end><block_end><block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--zmq_port' help="Port which will be used for ZMQ client's connections")<line_sep>parser.add_argument('--tcp_port' help="Port which will be used for TCP client's connections")<line_sep>parser.add_argument('--addr' help="Address which will used for incoming client's connection. 0.0.0.0 by default" default='0.0.0.0' required=<false>)<line_sep>args=parser.parse_args()<line_sep>zmq_server_ha=HA(args.addr int(args.zmq_port)<if>args.zmq_port<else>'9999')<line_sep>tcp_server_ha=HA(args.addr int(args.tcp_port)<if>args.tcp_port<else>10000)<with_stmt>SafeTemporaryDirectory()<as>base_dir<block_start>zmq_server=up_zmq_server(zmq_server_ha)<line_sep>tpe=ThreadPoolExecutor(max_workers=4)<line_sep>tpe.submit(up_tcp_server tcp_server_ha)<async_keyword><def_stmt>wrapper <block_start><while_stmt><true><block_start><await>zmq_server.service()<block_end><block_end>looper=loop()<try_stmt><block_start>looper.run_until_complete(wrapper())<block_end><except_stmt>KeyboardInterrupt<block_start>zmq_server.stop()<line_sep>tpe.shutdown(wait=<false>)<line_sep>print("Server was stopped")<line_sep>exit(0)<block_end><block_end><block_end>
#/u/GoldenSights <import_stmt>sys<import_stmt>traceback<import_stmt>time<import_stmt>datetime<import_stmt>sqlite3<import_stmt>json<import_stmt>praw<line_sep>'''USER CONFIGURATION'''<line_sep>"""GENERAL"""<line_sep>APP_ID=""<line_sep>APP_SECRET=""<line_sep>APP_URI=""<line_sep>APP_REFRESH=""<line_sep># https://www.reddit.com/comments/3cm1p8/how_to_make_your_bot_use_oauth2/ USERAGENT="/r/Excel Clippy Office Assistant all-in-one moderator."<line_sep># This is a short description of what the bot does. # For example "/u/GoldenSights' Newsletter bot" SUBREDDIT="Goldtesting"<line_sep># This is the sub or list of subs to scan for new posts. # For a single sub, use "sub1". # For multiple subreddits, use "sub1+sub2+sub3+..." PLAY_BOOT_SOUND=<true><line_sep>#Play boot.wav MAXPOSTS=100<line_sep># How many posts to get from the /new queue at once WAIT=30<line_sep># The number of seconds between cycles. The bot is completely inactive during # this time """**************"""<line_sep>"""CLIPPYPOINTS™ """<line_sep>"""**************"""<line_sep>POINT_STRING_USR=["Solution Verified"]<line_sep># OP can use this string to award points in his thread. POINT_STRING_MOD=["+1 Point"]<line_sep># Moderators can use this to give points at any time. POINT_FLAIR_CSS="points"<line_sep># The CSS class associated with point flair # Set to "" for none POINT_REPLY="You have awarded one point to _parent_"<line_sep># This is the phrase that User will receive # _parent_ will be replaced by the username of the Parent. POINT_EXEMPT=[]<line_sep># Any usernames in this list will not receive points. # Perhaps they have special flair. POINT_OP_ONLY=<true><line_sep># Is OP the only person who can give points? # I recommend setting this to False. Other users might have the same question # and would like to reward a good answer. POINT_PER_THREAD=200<line_sep># How many points can be distributed in a single thread? POINT_DO_EXPLAIN=<true><line_sep># If the max-per-thread is reached and someone tries to give a point, reply to # them saying that the max has already been reached POINT_EXPLAIN=""" Sorry, but %d point(s) have already been distributed in this thread. This is the maximum allowed at this time. """%POINT_PER_THREAD<line_sep># If EXPLAINMAX is True, this will be said to someone who tries to give a # point after max is reached POINT_EXPLAIN_OP_ONLY=""" Hi! It looks like you are trying to award a point and you are not the OP! I am here to assist you! What would you like help with? [ClippyPoints^(TM)?](/r/excel/wiki/clippy) [Flair Descriptions](http://www.reddit.com/r/excel/wiki/index) """<line_sep>"""**************"""<line_sep>"""FLAIR REMINDER"""<line_sep>"""**************"""<line_sep>FLAIR_WARN_DELAY=86400<line_sep># This is the time, IN SECONDS, the user has to reply to the first comment. # If he does not respond by this time, post is removed NCDELAY=172800<line_sep>FLAIR_WARN_MESSAGE=""" Hi! It looks like you are trying to ask a question! Since you have not responded in the last 24 hours, I am here to assist you! If your questions has been solved, please be sure to update the flair. Would you like help? [Help Changing Your Flair?](https://www.reddit.com/r/excel/wiki/flair) [Asking Question and Sharing Data](https://www.reddit.com/r/excel/wiki/sharingquestions) """<line_sep># This is what the bot tells you when you dont meet the DELAY. Uses reddit's # usual Markdown formatting FLAIR_IGNORE_MODS=<false><line_sep># Do you want the bot to ignore posts made by moderators? # Use True or False (With capitals! No quotations!) FLAIR_IGNORE_SELF=<false><line_sep>#Do you want the bot to ignore selfposts? FLAIR_SOLVED="solved"<line_sep>FLAIR_UNSOLVED="unsolved"<line_sep>FLAIR_CHAL="challenge"<line_sep>FLAIR_MANN="Mod Announcement"<line_sep>FLAIR_MNEWS="Mod News"<line_sep>FLAIR_WAITING="Waiting on OP"<line_sep>FLAIR_DISCUSS="discussion"<line_sep>FLAIR_ADVERTISEMENT="advertisement"<line_sep>FLAIR_TEMPLATE="User Template"<line_sep>FLAIR_PROTIP="pro tip"<line_sep>FLAIR_TRIGGERS=["that works" "perfect" "thank you so much" "huge help" "figured it out" "got it" "thanks for your help"]<line_sep>#These encourage OP to change flair / award point FLAIR_REMINDER=""" Hi! It looks like you received an answer to your question! Since the top is still marked as unsolved, I am here to assist you! If your questions has been solved, please be sure to update the flair. Would you like help? [Help Changing Your Flair?](http://www.reddit.com/r/excel/wiki/index) [Flair Descriptions](http://www.reddit.com/r/excel/wiki/index) """<line_sep>"""******************"""<line_sep>"""FUNCTION REFERENCE"""<line_sep>"""******************"""<line_sep>DICT_TRIGGER="clippy: "<line_sep># The trigger phrase for perfoming a lookup DICT_FILE='reference.txt'<line_sep># The file with the Keys/Values DICT_RESULT_FORM="_value_"<line_sep># This is the form that the result will take # You may use _key_ and _value_ to inject the key/value from the dict. # You may delete one or both of these injectors. DICT_LEVENSHTEIN=<false><line_sep># If this is True it will use a function that is slow but can find # misspelled keys # If this is False it will use a simple function that is very fast but can # only find keys which are spelled exactly DICT_FAIL=""" Hi! It looks like you're looking for help with an Excel function! Unfortunately I have not learned that function yet. If you'd like to change that, [message the moderators](http://www.reddit.com/message/compose?to=%2Fr%2Fexcel)! """<line_sep># The comment which is created when a function is requested # but not in the file """***************"""<line_sep>"""WELCOME MESSAGE"""<line_sep>"""***************"""<line_sep>WELCOME_SUBJECT="""Welcome to /r/Excel, I am here to help!"""<line_sep>WELCOME_MESSAGE=""" Hi %s! It looks like you are new to posting in /r/Excel. Did you know we have a few ways to help you receive better help? How can I help you? [How to Share Your Questions](/r/excel/wiki/sharingquestions) [Changing Link Flair](/r/excel/wiki/flair) [ClippyPoints^TM](/r/excel/wiki/clippy) ^This ^message ^is ^auto-generated ^and ^is ^not ^monitored ^on ^a ^regular ^basis, ^replies ^to ^this ^message ^may ^not ^go ^answered. ^Remember ^to [^contact ^the ^moderators](http://www.reddit.com/message/compose?to=%2Fr%2Fexcel) ^to ^guarantee ^a ^response """<line_sep># Sent to the user if he has created his first post in the subreddit '''All done!'''<class_stmt>ClippyPoints<block_start><def_stmt>incrementflair self subreddit username#Returns True if the operation was successful <block_start><if_stmt>isinstance(subreddit str)<block_start>subreddit=r.get_subreddit(subreddit)<block_end>success=<false><line_sep>print('\t\tChecking flair for '+username)<line_sep>flairs=subreddit.get_flair(username)<line_sep>flairs=flairs['flair_text']<if_stmt>flairs<is><not><none><and>flairs<ne>''<block_start>print('\t\t:'+flairs)<try_stmt><block_start>flairs=int(flairs)<line_sep>flairs<augadd>1<line_sep>flairs=str(flairs)<line_sep>success=<true><block_end><except_stmt>ValueError<block_start>print('\t\tCould not convert flair to a number.')<block_end><block_end><else_stmt><block_start>print('\t\tNo current flair. 1 point')<line_sep>flairs='1'<line_sep>success=<true><block_end><if_stmt>success<block_start>print('\t\tAssigning Flair: '+flairs)<line_sep>subreddit.set_flair(username flair_text=flairs flair_css_class=POINT_FLAIR_CSS)<block_end><return>success<block_end><def_stmt>receive self comments<block_start>print('\tClippyPoints received comments.')<line_sep>subreddit=r.get_subreddit(SUBREDDIT)<for_stmt>comment comments<block_start>cid=comment.id<line_sep>cur.execute('SELECT * FROM clippy_points WHERE ID=?' [cid])<if_stmt><not>cur.fetchone()<block_start>print(cid)<line_sep>cbody=comment.body.lower()<try_stmt><block_start><if_stmt><not>comment.is_root<block_start>cauthor=comment.author.name<line_sep>print('\tChecking subreddit moderators')<line_sep>moderators=[user.name<for>user subreddit.get_moderators()]<line_sep>byuser=<false><if_stmt>cauthor<not><in>moderators<and>any(flag.lower()<in>cbody<for>flag POINT_STRING_USR)<block_start>byuser=<true><block_end><if_stmt>byuser<or>((cauthor<in>moderators<and>any(flag.lower()<in>cbody<for>flag POINT_STRING_MOD)))<block_start>print('\tFlagged %s.'%cid)<line_sep>print('\t\tFetching parent and Submission data.')<line_sep>parentcom=r.get_info(thing_id=comment.parent_id)<line_sep>pauthor=parentcom.author.name<line_sep>op=comment.submission.author.name<line_sep>opid=comment.submission.id<if_stmt>pauthor<ne>cauthor<block_start><if_stmt><not>any(exempt.lower()<eq>pauthor.lower()<for>exempt POINT_EXEMPT)<block_start><if_stmt>POINT_OP_ONLY<is><false><or>cauthor<eq>op<or>cauthor<in>moderators<block_start>cur.execute('SELECT * FROM clippy_points_s WHERE ID=?' [opid])<line_sep>fetched=cur.fetchone()<if_stmt><not>fetched<block_start>cur.execute('INSERT INTO clippy_points_s VALUES(?, ?)' [opid 0])<line_sep>fetched=0<block_end><else_stmt><block_start>fetched=fetched[1]<block_end><if_stmt>fetched<l>POINT_PER_THREAD<block_start><if_stmt>self.incrementflair(subreddit pauthor)<block_start>print('\t\tWriting reply')<line_sep>comment_confirm=comment.reply(POINT_REPLY.replace('_parent_' pauthor))<line_sep>comment_confirm.distinguish()<line_sep>cur.execute('UPDATE clippy_points_s SET count=? WHERE ID=?' [fetched+1 opid])<block_end><if_stmt>byuser<block_start>comment.submission.set_flair(flair_text=FLAIR_SOLVED flair_css_class="solvedcase")<block_end><block_end><else_stmt><block_start>print('\t\tMaxPerThread has been reached')<if_stmt>EXPLAINMAX<is><true><block_start>print('\t\tWriting reply')<line_sep>comment.reply(POINT_EXPLAIN)<block_end><block_end><block_end><else_stmt><block_start>print('\tOther users cannot give points.')<line_sep>#comment_confirm = comment.reply(EXPLAINOPONLY) #comment_confirm.distinguish() <block_end><block_end><else_stmt><block_start>print('\t\tParent is on the exempt list.')<block_end><block_end><else_stmt><block_start>print('\t\tCannot give points to self.')<block_end><block_end><block_end><else_stmt><block_start>print('\t\tRoot comment. Ignoring.')<block_end><block_end><except_stmt>AttributeError<block_start>print('\t\tCould not fetch usernames. Cannot proceed.')<block_end>cur.execute('INSERT INTO clippy_points VALUES(?)' [cid])<block_end>sql.commit()<block_end>print('\tClippyPoints finished')<block_end><block_end><class_stmt>ClippyFlairReminder<block_start><def_stmt>receive self posts<block_start>print('\tClippyFlair received submissions')<line_sep>now=datetime.datetime.now()<line_sep>subreddit=r.get_subreddit(SUBREDDIT)<line_sep>print('\tChecking subreddit moderators')<line_sep>moderators=[user.name<for>user subreddit.get_moderators()]<for_stmt>post posts<block_start>found=<false><line_sep>ctimes=[]<line_sep>pid=post.id<try_stmt><block_start>pauthor=post.author.name<block_end><except_stmt>AttributeError<block_start>pauthor='[deleted]'<block_end>ptime=post.created_utc<line_sep>curtime=getTime(<true>)<line_sep>ctime=curtime<line_sep>cur.execute('SELECT * FROM clippy_flair WHERE id=?' [pid])<if_stmt><not>cur.fetchone()<block_start><if_stmt>post.is_self<is><false><or>FLAIR_IGNORE_SELF<is><false><block_start><if_stmt>pauthor<not><in>moderators<or>FLAIR_IGNORE_MODS<is><false><block_start>comments=praw.helpers.flatten_tree(post.comments)<try_stmt><block_start>flair=post.link_flair_text.lower()<block_end><except_stmt>AttributeError<block_start>flair=''<block_end><if_stmt>flair<eq>FLAIR_UNSOLVED.lower()<block_start>print(pid+': Unsolved')<for_stmt>comment comments<block_start><try_stmt><block_start>cauthor=comment.author.name<block_end><except_stmt>AttributeError<block_start>cauthor='[deleted]'<block_end><if_stmt>cauthor<ne>pauthor<block_start>found=<true><line_sep><break><block_end><block_end><if_stmt><not>found<block_start>print('\tNo comments by another user. No action taken.')<block_end><else_stmt><block_start>print('\tFound comment by other user. Marking as Waiting.')<line_sep>post.set_flair(flair_text=FLAIR_WAITING flair_css_class="waitingonop")<block_end><block_end><elif_stmt>flair<eq>FLAIR_WAITING.lower()<block_start>print(pid+': Waiting')<for_stmt>comment comments<block_start><try_stmt><block_start>cauthor=comment.author.name<block_end><except_stmt>AttributeError<block_start>cauthor='[deleted]'<block_end><if_stmt>cauthor<eq>pauthor<block_start>found=<true><line_sep>pbody=comment.body.lower()<block_end><else_stmt><block_start>ctimes.append(comment.created_utc)<block_end><block_end><if_stmt>found<is><true><block_start><if_stmt><not>any(trigger<in>pbody<for>trigger POINT_STRING_USR)<block_start>print('\tFound comment by OP. All clear, changing flair back to unsolved.')<line_sep>post.set_flair(flair_text=FLAIR_UNSOLVED flair_css_class="notsolvedcase")<line_sep>#print('\tUpvoting comment..') #post.upvote() cur.execute('INSERT INTO clippy_flair VALUES(?)' [pid])<if_stmt>any(key.lower()<in>pbody<for>key FLAIR_TRIGGERS)<block_start>print('Replying to '+pid+' by '+pauthor)<line_sep>comment.reply(FLAIR_REMINDER)<line_sep>newcomment.distinguish()<block_end><block_end><block_end><elif_stmt>found<is><false><and>len(ctimes)<g>0<block_start>print('\tNo comments by OP. Checking time limit.')<line_sep>ctime=min(ctimes)<line_sep>difference=curtime-ctime<if_stmt>difference<g>FLAIR_WARN_DELAY<block_start>print('\tTime is up.')<line_sep>print('\tLeaving Comment')<line_sep>newcomment=post.add_comment(FLAIR_WARN_MESSAGE)<line_sep>print('\tDistinguishing Comment')<line_sep>newcomment.distinguish()<line_sep>cur.execute('INSERT INTO clippy_flair VALUES(?)' [pid])<block_end><else_stmt><block_start>differences=str('%.0f'%(FLAIR_WARN_DELAY-difference))<line_sep>print('\tStill has '+differences+'s.')<block_end><block_end><elif_stmt>found<is><false><and>len(ctimes)<eq>0<block_start>print('\tNo comments by OP, but no other comments are available.')<block_end><block_end><else_stmt><block_start>print(pid+': Neither flair')<if_stmt>flair<eq>FLAIR_DISCUSS.lower()<block_start>print(pid+': is a discussion post, adding to ignore list...')<line_sep>cur.execute('INSERT INTO clippy_flair VALUES(?)' [pid])<block_end><if_stmt>flair<eq>FLAIR_ADVERTISEMENT.lower()<block_start>print(pid+': is an advertisement post, adding to ignore list...')<line_sep>cur.execute('INSERT INTO clippy_flair VALUES(?)' [pid])<block_end><if_stmt>flair<eq>FLAIR_TEMPLATE.lower()<block_start>print(pid+': is a User Template post, adding to ignore list...')<line_sep>cur.execute('INSERT INTO clippy_flair VALUES(?)' [pid])<block_end><if_stmt>flair<eq>FLAIR_PROTIP.lower()<block_start>print(pid+': is a ProTip post, adding to ignore list...')<line_sep>cur.execute('INSERT INTO clippy_flair VALUES(?)' [pid])<block_end><if_stmt>flair<eq>FLAIR_SOLVED.lower()<block_start>print(pid+': is a SOLVED post, adding to ignore list...')<line_sep>cur.execute('INSERT INTO clippy_flair VALUES(?)' [pid])<block_end><if_stmt>flair<eq>FLAIR_MANN.lower()<block_start>print(pid+': is a Mod Annoucement post, adding to ignore list...')<line_sep>cur.execute('INSERT INTO clippy_flair VALUES(?)' [pid])<block_end><if_stmt>flair<eq>FLAIR_MNEWS.lower()<block_start>print(pid+': is a Mod News post, adding to ignore list...')<line_sep>cur.execute('INSERT INTO clippy_flair VALUES(?)' [pid])<block_end><else_stmt><block_start>cur.execute('SELECT * FROM clippy_flair WHERE id=?' [pid])<if_stmt><not>cur.fetchone()<block_start>print('\tAssigning Flair')<line_sep>post.set_flair(flair_text=FLAIR_UNSOLVED flair_css_class="notsolvedcase")<block_end><else_stmt>#cur.execute('INSERT INTO flair VALUES("%s")' % pid) <block_start><if_stmt>pauthor<in>moderators<and>FLAIR_IGNORE_MODS<is><true><block_start>print(pid+', '+pauthor+': Ignoring Moderator')<line_sep>cur.execute('INSERT INTO clippy_flair VALUES(?)' [pid])<block_end><block_end><block_end><block_end><block_end><block_end><block_end><if_stmt>post.is_self<is><true><and>FLAIR_IGNORE_SELF<is><true><block_start>print(pid+', '+pauthor+': Ignoring Selfpost')<line_sep>cur.execute('INSERT INTO clippy_flair VALUES(?)' [pid])<block_end>sql.commit()<block_end>print('\tClippyFlair finished')<block_end><block_end><class_stmt>ClippyReference<block_start><def_stmt>__init__ self<block_start><with_stmt>open(DICT_FILE 'r')<as>f<block_start>self.DICT=json.loads(f.read())<block_end><block_end><def_stmt>levenshtein self s1 s2#Levenshtein algorithm to figure out how close two strings are two each other #Courtesy http://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Levenshtein_distance#Python <block_start><if_stmt>len(s1)<l>len(s2)<block_start><return>self.levenshtein(s2 s1)<block_end># len(s1) >= len(s2) <if_stmt>len(s2)<eq>0<block_start><return>len(s1)<block_end>previous_row=range(len(s2)+1)<for_stmt>i,c1 enumerate(s1)<block_start>current_row=[i+1]<for_stmt>j,c2 enumerate(s2)<block_start>insertions=previous_row[j+1]+1# j+1 instead of j since previous_row and current_row are one character longer deletions=current_row[j]+1# than s2 substitutions=previous_row[j]+(c1<ne>c2)<line_sep>current_row.append(min(insertions deletions substitutions))<block_end>previous_row=current_row<block_end><return>previous_row[-1]<block_end><def_stmt>findsuper self comment tolerance=1<block_start>results=[]<line_sep>used=[]<for_stmt>itemname self.DICT<block_start>itemlength=len(itemname.split())<line_sep>pos=0<line_sep>commentsplit=comment.split()<line_sep>end=<false><while_stmt><not>end<block_start><try_stmt><block_start>gram=commentsplit[pos:pos+itemlength]<line_sep>gramjoin=' '.join(gram)<line_sep>lev=self.levenshtein(itemname gramjoin)<if_stmt>lev<le>tolerance<block_start><if_stmt>itemname<not><in>used<block_start>used.append(itemname)<line_sep>result=DICT_RESULT_FORM<line_sep>result=result.replace('_key_' itemname)<line_sep>result=result.replace('_value_' self.DICT[itemname])<line_sep>results.append(result)<block_end><block_end>pos<augadd>1<if_stmt>pos<g>len(commentsplit)<block_start>end=<true><block_end><block_end><except_stmt>IndexError<block_start>end=<true><block_end><block_end><block_end><return>results<block_end><def_stmt>findsimple self comment<block_start>results=[]<for_stmt>itemname self.DICT<block_start><if_stmt>itemname.lower()<in>comment.lower()<block_start>result=DICT_RESULT_FORM<line_sep>result=result.replace('_key_' itemname)<line_sep>result=result.replace('_value_' self.DICT[itemname])<line_sep>results.append(result)<block_end><block_end><return>results<block_end><def_stmt>receive self comments<block_start>lev="True"<if>DICT_LEVENSHTEIN<else>"False"<line_sep>print('\tClippyReference received comments (Lev: %s)'%lev)<for_stmt>comment comments<block_start>results=[]<line_sep>cid=comment.id<try_stmt><block_start>cauthor=comment.author.name<line_sep>cur.execute('SELECT * FROM clippy_reference WHERE ID=?' [cid])<if_stmt><not>cur.fetchone()<block_start>print('\t'+cid)<if_stmt>cauthor.lower()<ne>r.user.name.lower()<block_start>cbody=comment.body.lower()<if_stmt>DICT_LEVENSHTEIN<is><true><block_start>results=self.findsuper(cbody)<block_end><else_stmt><block_start>results=self.findsimple(cbody)<block_end><if_stmt>DICT_TRIGGER.lower()<in>cbody.lower()<and>(len(results)<eq>0)#They made a request, but we didn't find anything <block_start>results.append(DICT_FAIL)<block_end><if_stmt>len(results)<g>0<block_start>newcomment='\n\n'.join(results)<line_sep>print('\t\tReplying to %s with %d items...'%(cauthor len(results)) end="")<line_sep>sys.stdout.flush()<line_sep>comment.reply(newcomment)<line_sep>print('done.')<block_end><block_end><else_stmt>#Will not reply to self <block_start><pass><block_end>cur.execute('INSERT INTO clippy_reference VALUES(?)' [cid])<block_end>sql.commit()<block_end><except_stmt>AttributeError# Comment Author is deleted <block_start><pass><block_end><block_end>print('\tClippyReference finished')<block_end><block_end><class_stmt>ClippyWelcome<block_start><def_stmt>receive self posts<block_start>print('\tClippyWelcome received submissions')<for_stmt>post posts<block_start><try_stmt><block_start>pauthor=post.author.name<line_sep>pid=post.id<line_sep>cur.execute('SELECT * FROM clippy_welcome WHERE NAME=?' [pauthor])<if_stmt><not>cur.fetchone()<block_start>print('\t'+pid)<line_sep>print('\t\tFound new user: '+pauthor)<line_sep>print('\t\tSending message...' end="")<line_sep>sys.stdout.flush()<line_sep>#r.send_message(pauthor, WELCOME_SUBJECT, WELCOME_MESSAGE%pauthor, captcha=None) cur.execute('INSERT INTO clippy_welcome VALUES(?, ?)' (pauthor pid))<line_sep>print('done.')<block_end>sql.commit()<block_end><except_stmt>AttributeError#Post author is deleted <block_start><pass><block_end><block_end>print('\tClippyWelcome finished')<block_end><block_end><def_stmt>getTime bool<block_start>timeNow=datetime.datetime.now(datetime.timezone.utc)<line_sep>timeUnix=timeNow.timestamp()<if_stmt>bool<is><false><block_start><return>timeNow<block_end><else_stmt><block_start><return>timeUnix<block_end><block_end><def_stmt>clippy_manager <block_start><try_stmt><block_start>subreddit=r.get_subreddit(SUBREDDIT)<line_sep>print('Getting new comments')<line_sep>newcomments=list(subreddit.get_comments(limit=MAXPOSTS))<line_sep>clippyreference.receive(newcomments)<line_sep>clippypoints.receive(newcomments)<line_sep>print('Getting new submissions')<line_sep>newposts=list(subreddit.get_new(limit=MAXPOSTS))<line_sep>clippywelcome.receive(newposts)<line_sep>clippyflair.receive(newposts)<block_end><except_stmt>Exception<block_start>traceback.print_exc()<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>sql=sqlite3.connect('superclippy.db')<line_sep>cur=sql.cursor()<line_sep>cur.execute('CREATE TABLE IF NOT EXISTS clippy_welcome(NAME TEXT, ID TEXT)')<line_sep>cur.execute('CREATE TABLE IF NOT EXISTS clippy_reference(ID TEXT)')<line_sep>cur.execute('CREATE TABLE IF NOT EXISTS clippy_points(ID TEXT)')<line_sep>cur.execute('CREATE TABLE IF NOT EXISTS clippy_points_s(ID TEXT, count INT)')<line_sep>cur.execute('CREATE TABLE IF NOT EXISTS clippy_flair(id TEXT)')<line_sep>print('Loaded SQL Database')<line_sep>sql.commit()<if_stmt>PLAY_BOOT_SOUND<block_start><try_stmt><block_start><import_stmt>winsound<import_stmt>threading<def_stmt>bootsound <block_start>winsound.PlaySound('boot.wav' winsound.SND_FILENAME)<block_end>soundthread=threading.Thread(target=bootsound)<line_sep>soundthread.daemon=<true><line_sep>soundthread.start()<block_end><except_stmt>Exception<block_start><pass><block_end><block_end>print('Logging in...' end="")<try_stmt><block_start><import_stmt>bot<line_sep>USERAGENT=bot.aG<block_end><except_stmt>ImportError<block_start><pass><block_end>sys.stdout.flush()<line_sep>r=praw.Reddit(USERAGENT)<line_sep>r.set_oauth_app_info(APP_ID APP_SECRET APP_URI)<line_sep>r.refresh_access_information(APP_REFRESH)<line_sep>print('done.')<line_sep>print('Starting Points...' end="")<line_sep>clippypoints=ClippyPoints()<line_sep>print('done.')<line_sep>print('Starting Welcome...' end="")<line_sep>clippywelcome=ClippyWelcome()<line_sep>print('done.')<line_sep>print('Starting Flair...' end="")<line_sep>clippyflair=ClippyFlairReminder()<line_sep>print('done.')<line_sep>print('Starting Reference...' end="")<line_sep>clippyreference=ClippyReference()<line_sep>print('done.')<while_stmt><true><block_start>clippy_manager()<line_sep>print('Sleeping %d seconds.\n\n'%WAIT)<line_sep>time.sleep(WAIT)<block_end><block_end>