content
stringlengths 0
1.55M
|
---|
<import_from_stmt>django.urls path<import_from_stmt>django.views.generic TemplateView<line_sep>app_name='menu'<line_sep>urlpatterns=[path('' TemplateView.as_view(template_name='menu/main_menu.html') name='main') path('page1' TemplateView.as_view(template_name='menu/main_menu.html') name='page1') path('page2' TemplateView.as_view(template_name='menu/main_menu.html') name='page2') path('page3' TemplateView.as_view(template_name='menu/main_menu.html') name='page3') ]<line_sep>
|
<import_stmt>boost_histogram<as>bh<def_stmt>test_subclass <block_start>NEW_FAMILY=object()<class_stmt>MyHist(bh.Histogram family=NEW_FAMILY)<block_start><pass><block_end><class_stmt>MyRegular(bh.axis.Regular family=NEW_FAMILY)<block_start>__slots__=()<block_end><class_stmt>MyIntStorage(bh.storage.Int64 family=NEW_FAMILY)<block_start><pass><block_end><class_stmt>MyPowTransform(bh.axis.transform.Pow family=NEW_FAMILY)<block_start><pass><block_end>h=MyHist(MyRegular(10 0 2 transform=MyPowTransform(2)) storage=MyIntStorage())<assert_stmt>type(h)<eq>MyHist<assert_stmt>h._storage_type<eq>MyIntStorage<assert_stmt>type(h.axes[0])<eq>MyRegular<assert_stmt>type(h.axes[0].transform)<eq>MyPowTransform<block_end><def_stmt>test_subclass_hist_only <block_start><class_stmt>MyHist(bh.Histogram)<block_start><pass><block_end>h=MyHist(bh.axis.Regular(10 0 2))<assert_stmt>type(h)<eq>MyHist<assert_stmt>type(h.axes[0])<eq>bh.axis.Regular<block_end>
|
<import_from_future_stmt> absolute_import<import_from_stmt>falcon HTTPBadRequest<import_from_stmt>iris.webhooks.webhook webhook<class_stmt>rackspace(webhook)<block_start><def_stmt>validate_post self body<block_start><if_stmt><not>all(k<in>body<for>k ("event_id" "details"))<block_start><raise>HTTPBadRequest('missing event_id and/or details attributes')<block_end><block_end><def_stmt>on_post self req resp<block_start>'''
This endpoint is compatible with the webhook posts from Rackspace.
Configure a Rackspace notification to post to a URL with the following
parameters
"http://iris:16649/v0/webhooks/rackspace?application=test-app&key=abc&plan=teamA"
Where application points to an application and key in Iris.
For every POST from Rackspace, a new incident will be created, if the plan label
is attached to an alert.
'''<line_sep>plan=req.get_param('plan' required=<false>)<if_stmt>plan<is><none><block_start><raise>HTTPBadRequest('missing plan in rackspace webhook url parameters')<block_end>super().on_post(req resp plan)<block_end><block_end>
|
"""
This is the main entry point for Voltron from the debugger host's perspective.
This file is loaded into the debugger through whatever means the given host
supports.
LLDB:
(lldb) command script import /path/to/voltron/entry.py
GDB:
(gdb) source /path/to/voltron/entry.py
VDB:
(vdb) script /path/to/voltron/entry.py
WinDbg/CDB (via PyKD):
> .load pykd.pyd
> !py --global C:\path\to\voltron\entry.py
"""<line_sep>log=<none><try_stmt># fix path if it's clobbered by brew
<block_start><import_stmt>sys<if_stmt>sys.platform<eq>'darwin'<block_start>py_base='/System/Library/Frameworks/Python.framework/Versions/2.7/'<line_sep>new_path=['lib/python27.zip' 'lib/python2.7' 'lib/python2.7/plat-darwin' 'lib/python2.7/plat-mac' 'lib/python2.7/plat-mac/lib-scriptpackages' 'Extras/lib/python' 'lib/python2.7/lib-tk' 'lib/python2.7/lib-old' 'lib/python2.7/lib-dynload']<line_sep>sys.path=[p<for>p sys.path<if>'Cellar'<not><in>p]+[py_base+p<for>p new_path]<block_end><block_end><except_stmt><block_start><pass><block_end><try_stmt><block_start><import_stmt>logging<import_stmt>os<import_stmt>sys<line_sep>blessed=<none><import_stmt>blessed<line_sep># add vtrace to the path so that dbg_vdb.py can import from vdb/vtrace.
<if_stmt>"vtrace"<in>locals()<block_start><def_stmt>parent_directory the_path<block_start><return>os.path.abspath(os.path.join(the_path os.pardir))<block_end><def_stmt>add_vdb_to_path vtrace<block_start>sys.path.append(parent_directory(parent_directory(vtrace.__file__)))<block_end>add_vdb_to_path(vtrace)<block_end><else_stmt><block_start><pass><block_end><import_stmt>voltron<import_from_stmt>voltron.plugin pm<import_from_stmt>voltron.core Server<line_sep>log=voltron.setup_logging('debugger')<line_sep># figure out in which debugger host we are running
args=[]<line_sep>host=<none><try_stmt><block_start><import_stmt>lldb<line_sep>host="lldb"<def_stmt>invoke *args<block_start>voltron.command._invoke(*args)<block_end><block_end><except_stmt>ImportError<block_start><pass><block_end><try_stmt><block_start><import_stmt>gdb<line_sep>host="gdb"<block_end><except_stmt>ImportError<block_start><pass><block_end><try_stmt><block_start><import_stmt>pykd<line_sep>host="windbg"<block_end><except_stmt><block_start><pass><block_end><if_stmt>"vtrace"<in>locals()<block_start>host="vdb"<line_sep>args=[db]<block_end><if_stmt><not>host<block_start><raise>Exception("No debugger host is present")<block_end># register any plugins that were loaded
pm.register_plugins()<line_sep># get the debugger plugin for the host we're in
plugin=pm.debugger_plugin_for_host(host)<if_stmt><not>voltron.server# set up command and adaptor instances
<block_start>voltron.debugger=plugin.adaptor_class(*args)<line_sep>voltron.command=plugin.command_class(*args)<line_sep># register command plugins now that we have a debugger host loaded
pm.register_command_plugins()<line_sep># create and start the voltron server
voltron.server=Server()<line_sep>voltron.server.start()<line_sep>print(blessed.Terminal().bold_red("Voltron loaded."))<if_stmt>host<eq>'lldb'<and><not>voltron.command.registered<block_start>print("Run `voltron init` after you load a target.")<block_end><block_end><block_end><except_stmt>Exception<as>e<block_start><import_stmt>traceback<line_sep>msg=("An error occurred while loading Voltron:\n\n{}"<concat>"\nPlease ensure Voltron is installed correctly per the documentation: "<concat>"https://github.com/snare/voltron/wiki/Installation").format(traceback.format_exc())<if_stmt>blessed<block_start>msg=blessed.Terminal().bold_red(msg)<block_end><if_stmt>log<block_start>log.exception("Exception raised while loading Voltron")<block_end>print(msg)<block_end>
|
<import_stmt>torch<import_stmt>torch.nn<as>nn<import_from_stmt>clstm ConvLSTMCell<import_stmt>argparse<import_stmt>torch.nn.functional<as>f<import_from_stmt>torch.autograd Variable<import_from_stmt>torchvision transforms models<import_stmt>torch.nn<as>nn<import_stmt>math<import_from_stmt>vision VGG16 ResNet34 ResNet50 ResNet101<import_stmt>sys<line_sep>sys.path.append("..")<import_from_stmt>utils.utils get_skip_dims<class_stmt>FeatureExtractor(nn.Module)<block_start>'''
Returns base network to extract visual features from image
'''<def_stmt>__init__ self args<block_start>super(FeatureExtractor self).__init__()<line_sep>skip_dims_in=get_skip_dims(args.base_model)<if_stmt>args.base_model<eq>'resnet34'<block_start>self.base=ResNet34()<line_sep>self.base.load_state_dict(models.resnet34(pretrained=<true>).state_dict())<block_end><elif_stmt>args.base_model<eq>'resnet50'<block_start>self.base=ResNet50()<line_sep>self.base.load_state_dict(models.resnet50(pretrained=<true>).state_dict())<block_end><elif_stmt>args.base_model<eq>'resnet101'<block_start>self.base=ResNet101()<line_sep>self.base.load_state_dict(models.resnet101(pretrained=<true>).state_dict())<block_end><elif_stmt>args.base_model<eq>'vgg16'<block_start>self.base=VGG16()<line_sep>self.base.load_state_dict(models.vgg16(pretrained=<true>).state_dict())<block_end><else_stmt><block_start><raise>Exception("The base model you chose is not supported !")<block_end>self.hidden_size=args.hidden_size<line_sep>self.kernel_size=args.kernel_size<line_sep>self.padding=0<if>self.kernel_size<eq>1<else>1<line_sep>self.sk5=nn.Conv2d(skip_dims_in[0] self.hidden_size self.kernel_size padding=self.padding)<line_sep>self.sk4=nn.Conv2d(skip_dims_in[1] self.hidden_size self.kernel_size padding=self.padding)<line_sep>self.sk3=nn.Conv2d(skip_dims_in[2] self.hidden_size/2 self.kernel_size padding=self.padding)<line_sep>self.sk2=nn.Conv2d(skip_dims_in[3] self.hidden_size/4 self.kernel_size padding=self.padding)<line_sep>self.sk1=nn.Conv2d(skip_dims_in[4] self.hidden_size/8 self.kernel_size padding=self.padding)<line_sep>self.bn5=nn.BatchNorm2d(self.hidden_size)<line_sep>self.bn4=nn.BatchNorm2d(self.hidden_size)<line_sep>self.bn3=nn.BatchNorm2d(self.hidden_size/2)<line_sep>self.bn2=nn.BatchNorm2d(self.hidden_size/4)<line_sep>self.bn1=nn.BatchNorm2d(self.hidden_size/8)<block_end><def_stmt>forward self x semseg=<false> raw=<false><block_start>x5,x4,x3,x2,x1=self.base(x)<line_sep>x5_skip=self.bn5(self.sk5(x5))<line_sep>x4_skip=self.bn4(self.sk4(x4))<line_sep>x3_skip=self.bn3(self.sk3(x3))<line_sep>x2_skip=self.bn2(self.sk2(x2))<line_sep>x1_skip=self.bn1(self.sk1(x1))<if_stmt>semseg<block_start><return>x5<block_end><elif_stmt>raw<block_start><return>x5 x4 x3 x2 x1<block_end><else_stmt><block_start><return>x5_skip x4_skip x3_skip x2_skip x1_skip<block_end><block_end><block_end><class_stmt>RSIS(nn.Module)<block_start>"""
The recurrent decoder
"""<def_stmt>__init__ self args<block_start>super(RSIS self).__init__()<line_sep>skip_dims_in=get_skip_dims(args.base_model)<line_sep>self.hidden_size=args.hidden_size<line_sep>self.num_classes=args.num_classes<line_sep>self.kernel_size=args.kernel_size<line_sep>padding=0<if>self.kernel_size<eq>1<else>1<line_sep>self.dropout=args.dropout<line_sep>self.dropout_stop=args.dropout_stop<line_sep>self.dropout_cls=args.dropout_cls<line_sep>self.skip_mode=args.skip_mode<line_sep># convlstms have decreasing dimension as width and height increase
skip_dims_out=[self.hidden_size self.hidden_size/2 self.hidden_size/4 self.hidden_size/8 self.hidden_size/16]<line_sep># initialize layers for each deconv stage
self.clstm_list=nn.ModuleList()<line_sep># 5 is the number of deconv steps that we need to reach image size in the output
<for_stmt>i range(len(skip_dims_out))<block_start><if_stmt>i<eq>0<block_start>clstm_in_dim=self.hidden_size<block_end><else_stmt><block_start>clstm_in_dim=skip_dims_out[i-1]<if_stmt>self.skip_mode<eq>'concat'<block_start>clstm_in_dim<augmul>2<block_end><block_end>clstm_i=ConvLSTMCell(args clstm_in_dim skip_dims_out[i] self.kernel_size padding=padding)<line_sep>self.clstm_list.append(clstm_i)<block_end>self.conv_out=nn.Conv2d(skip_dims_out[-1] 1 self.kernel_size padding=padding)<line_sep># calculate the dimensionality of classification vector
# side class activations are taken from the output of the convlstm
# therefore we need to compute the sum of the dimensionality of outputs
# from all convlstm layers
fc_dim=0<for_stmt>sk skip_dims_out<block_start>fc_dim<augadd>sk<block_end>self.fc_class=nn.Linear(fc_dim self.num_classes)<line_sep>self.fc_stop=nn.Linear(fc_dim 1)<block_end><def_stmt>forward self skip_feats prev_hidden_list<block_start>clstm_in=skip_feats[0]<line_sep>skip_feats=skip_feats[1:]<line_sep>side_feats=[]<line_sep>hidden_list=[]<for_stmt>i range(len(skip_feats)+1)# hidden states will be initialized the first time forward is called
<block_start><if_stmt>prev_hidden_list<is><none><block_start>state=self.clstm_list[i](clstm_in <none>)<block_end><else_stmt># else we take the ones from the previous step for the forward pass
<block_start>state=self.clstm_list[i](clstm_in prev_hidden_list[i])<block_end>hidden_list.append(state)<line_sep>hidden=state[0]<if_stmt>self.dropout<g>0<block_start>hidden=nn.Dropout2d(self.dropout)(hidden)<block_end>side_feats.append(nn.MaxPool2d(clstm_in.size()[2:])(hidden))<line_sep># apply skip connection
<if_stmt>i<l>len(skip_feats)<block_start>skip_vec=skip_feats[i]<line_sep>upsample=nn.UpsamplingBilinear2d(size=(skip_vec.size()[-2] skip_vec.size()[-1]))<line_sep>hidden=upsample(hidden)<line_sep># skip connection
<if_stmt>self.skip_mode<eq>'concat'<block_start>clstm_in=torch.cat([hidden skip_vec] 1)<block_end><elif_stmt>self.skip_mode<eq>'sum'<block_start>clstm_in=hidden+skip_vec<block_end><elif_stmt>self.skip_mode<eq>'mul'<block_start>clstm_in=hidden<times>skip_vec<block_end><elif_stmt>self.skip_mode<eq>'none'<block_start>clstm_in=hidden<block_end><else_stmt><block_start><raise>Exception('Skip connection mode not supported !')<block_end><block_end><else_stmt><block_start>self.upsample=nn.UpsamplingBilinear2d(size=(hidden.size()[-2]<times>2 hidden.size()[-1]<times>2))<line_sep>hidden=self.upsample(hidden)<line_sep>clstm_in=hidden<block_end><block_end>out_mask=self.conv_out(clstm_in)<line_sep># classification branch
side_feats=torch.cat(side_feats 1).squeeze()<if_stmt>self.dropout_cls<g>0<block_start>class_feats=nn.Dropout(self.dropout_cls)(side_feats)<block_end><else_stmt><block_start>class_feats=side_feats<block_end>class_feats=self.fc_class(class_feats)<if_stmt>self.dropout_stop<g>0<block_start>stop_feats=nn.Dropout(self.dropout_stop)(side_feats)<block_end><else_stmt><block_start>stop_feats=side_feats<block_end>stop_probs=self.fc_stop(stop_feats)<line_sep># the log is computed in the objective function
class_probs=nn.Softmax()(class_feats)<line_sep><return>out_mask class_probs stop_probs hidden_list<block_end><block_end>
|
<import_stmt>unittest<import_stmt>Mariana.layers<as>ML<import_stmt>Mariana.layers<as>ML<import_stmt>Mariana.decorators<as>dec<import_stmt>Mariana.costs<as>MC<import_stmt>Mariana.regularizations<as>MR<import_stmt>Mariana.scenari<as>MS<import_stmt>Mariana.activations<as>MA<import_stmt>Mariana.training.datasetmaps<as>MD<import_stmt>theano.tensor<as>tt<import_stmt>numpy<class_stmt>DastasetMapsTests(unittest.TestCase)<block_start><def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_classSets self<block_start><def_stmt>sample cls<block_start>o=cls.getAll("onehot")<line_sep>n=cls.getAll("classNumber")<line_sep>p=cls.getAll("input")<line_sep><return>o n p<block_end>l1=numpy.arange(100)<line_sep>l2=numpy.arange(10)+10<line_sep>cls=MD.ClassSets(sets=[("l1" l1) ("l2" l2)] sampleSize=len(l1))<line_sep>o,n,p=sample(cls)<for_stmt>i xrange(len(o))<block_start><if_stmt>n[i]<eq>0.<block_start>self.assertEquals(o[i][1] 0.)<line_sep>self.assertEquals(o[i][0] 1.)<block_end><else_stmt><block_start>self.assertEquals(o[i][0] 0.)<line_sep>self.assertEquals(o[i][1] 1.)<block_end><block_end>nbTrials=10000<line_sep>nb2=0.<for_stmt>i xrange(nbTrials)<block_start>o,n,p=sample(cls)<for_stmt>j xrange(len(p))<block_start><if_stmt>p[j]<g>10<block_start>nb2<augadd>1<block_end><block_end><block_end>f=nb2/float(len(p)<times>nbTrials)<line_sep>r=abs(f-0.5)<line_sep>self.assertTrue(r<l>2)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start><import_stmt>Mariana.settings<as>MSET<line_sep>MSET.VERBOSE=<false><line_sep>unittest.main()<block_end>
|
#This file may not be empty so i can easily upload it to github
|
<import_stmt>torch<import_from_stmt>torch.nn.functional conv2d<def_stmt>_laplacian y h<block_start>"""Laplacian operator"""<line_sep>operator=h<power>(-2)<times>torch.tensor([[[[0.0 1.0 0.0] [1.0 -4.0 1.0] [0.0 1.0 0.0]]]])<line_sep>y=y.unsqueeze(1)<line_sep># y = pad(y,pad=(0,0,1,1), mode='circular')
# y = pad(y,pad=(1,1,0,0),mode='circular')
<return>conv2d(y operator padding=1).squeeze(1)<block_end>
|
<import_stmt>os<import_stmt>KratosMultiphysics<import_from_stmt>KratosMultiphysics Logger<line_sep>Logger.GetDefaultOutput().SetSeverity(Logger.Severity.WARNING)<import_stmt>KratosMultiphysics.DEMApplication<as>DEM<import_stmt>KratosMultiphysics.KratosUnittest<as>KratosUnittest<import_stmt>KratosMultiphysics.DEMApplication.DEM_analysis_stage<import_stmt>auxiliary_functions_for_tests<line_sep>this_working_dir_backup=os.getcwd()<def_stmt>GetFilePath fileName<block_start><return>os.path.join(os.path.dirname(os.path.realpath(__file__)) fileName)<block_end><class_stmt>DEM3D_ContactTestSolution(KratosMultiphysics.DEMApplication.DEM_analysis_stage.DEMAnalysisStage KratosUnittest.TestCase)<block_start>@classmethod<def_stmt>GetMainPath self<block_start><return>os.path.join(os.path.dirname(os.path.realpath(__file__)) "DEM3D_contact_tests_files")<block_end><def_stmt>GetProblemNameWithPath self<block_start><return>os.path.join(self.main_path self.DEM_parameters["problem_name"].GetString())<block_end><def_stmt>FinalizeSolutionStep self<block_start>super().FinalizeSolutionStep()<line_sep>tolerance=1.001<for_stmt>node self.rigid_face_model_part.Nodes<block_start>dem_pressure=node.GetSolutionStepValue(DEM.DEM_PRESSURE)<line_sep>contact_force=node.GetSolutionStepValue(DEM.CONTACT_FORCES_Z)<if_stmt>node.Id<eq>9<block_start><if_stmt>self.time<g>0.35<block_start>self.assertAlmostEqual(dem_pressure 1621 delta=tolerance)<line_sep>self.assertAlmostEqual(contact_force -6484 delta=tolerance)<block_end><block_end><if_stmt>node.Id<eq>13<block_start><if_stmt>self.time<g>0.35<block_start>self.assertAlmostEqual(dem_pressure 841 delta=tolerance)<line_sep>self.assertAlmostEqual(contact_force -3366 delta=tolerance)<block_end><block_end><block_end><block_end><def_stmt>Finalize self<block_start>self.procedures.RemoveFoldersWithResults(str(self.main_path) str(self.problem_name) '')<line_sep>super().Finalize()<block_end><block_end><class_stmt>TestDEM3DContact(KratosUnittest.TestCase)<block_start><def_stmt>setUp self<block_start><pass><block_end>@classmethod<def_stmt>test_DEM3D_contact self<block_start>path=os.path.join(os.path.dirname(os.path.realpath(__file__)) "DEM3D_contact_tests_files")<line_sep>parameters_file_name=os.path.join(path "ProjectParametersDEM.json")<line_sep>model=KratosMultiphysics.Model()<line_sep># Test parallel computation.
<with_stmt>open(parameters_file_name 'r')<as>parameter_file<block_start>project_parameters=KratosMultiphysics.Parameters(parameter_file.read())<block_end>DEM3D_ContactTestSolution(model project_parameters).Run()<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>Logger.GetDefaultOutput().SetSeverity(Logger.Severity.WARNING)<line_sep>KratosUnittest.main()<block_end>
|
<import_stmt>collections<import_from_stmt>typing Callable Optional<import_from_stmt>rx from_ from_future operators<as>ops<import_from_stmt>rx.core Observable<import_from_stmt>rx.core.typing Mapper MapperIndexed<import_from_stmt>rx.internal.utils is_future<def_stmt>_flat_map_internal source mapper=<none> mapper_indexed=<none><block_start><def_stmt>projection x i<block_start>mapper_result=mapper(x)<if>mapper<else>mapper_indexed(x i)<if_stmt>is_future(mapper_result)<block_start>result=from_future(mapper_result)<block_end><elif_stmt>isinstance(mapper_result collections.abc.Iterable)<block_start>result=from_(mapper_result)<block_end><else_stmt><block_start>result=mapper_result<block_end><return>result<block_end><return>source.pipe(ops.map_indexed(projection) ops.merge_all())<block_end><def_stmt>_flat_map mapper:Optional[Mapper]=<none><arrow>Callable[[Observable] Observable]<block_start><def_stmt>flat_map source:Observable<arrow>Observable<block_start>"""One of the Following:
Projects each element of an observable sequence to an observable
sequence and merges the resulting observable sequences into one
observable sequence.
Example:
>>> flat_map(source)
Args:
source: Source observable to flat map.
Returns:
An operator function that takes a source observable and returns
an observable sequence whose elements are the result of invoking
the one-to-many transform function on each element of the
input sequence .
"""<if_stmt>callable(mapper)<block_start>ret=_flat_map_internal(source mapper=mapper)<block_end><else_stmt><block_start>ret=_flat_map_internal(source mapper=<lambda>_:mapper)<block_end><return>ret<block_end><return>flat_map<block_end><def_stmt>_flat_map_indexed mapper_indexed:Optional[MapperIndexed]=<none><arrow>Callable[[Observable] Observable]<block_start><def_stmt>flat_map_indexed source:Observable<arrow>Observable<block_start>"""One of the Following:
Projects each element of an observable sequence to an observable
sequence and merges the resulting observable sequences into one
observable sequence.
Example:
>>> flat_map_indexed(source)
Args:
source: Source observable to flat map.
Returns:
An observable sequence whose elements are the result of invoking
the one-to-many transform function on each element of the input
sequence.
"""<if_stmt>callable(mapper_indexed)<block_start>ret=_flat_map_internal(source mapper_indexed=mapper_indexed)<block_end><else_stmt><block_start>ret=_flat_map_internal(source mapper=<lambda>_:mapper_indexed)<block_end><return>ret<block_end><return>flat_map_indexed<block_end><def_stmt>_flat_map_latest mapper:Mapper<arrow>Callable[[Observable] Observable]<block_start><def_stmt>flat_map_latest source:Observable<arrow>Observable<block_start>"""Projects each element of an observable sequence into a new
sequence of observable sequences by incorporating the element's
index and then transforms an observable sequence of observable
sequences into an observable sequence producing values only
from the most recent observable sequence.
Args:
source: Source observable to flat map latest.
Returns:
An observable sequence whose elements are the result of
invoking the transform function on each element of source
producing an observable of Observable sequences and that at
any point in time produces the elements of the most recent
inner observable sequence that has been received.
"""<line_sep><return>source.pipe(ops.map(mapper) ops.switch_latest())<block_end><return>flat_map_latest<block_end>
|
<import_stmt>os<import_from_stmt>tabulate tabulate<import_from_stmt>textclf.data.dictionary Dictionary LabelDictionary<import_from_stmt>textclf.config PreprocessConfig<import_from_stmt>textclf.utils.raw_data tokenize_file create_tokenizer get_label_prob build_label2id <class_stmt>TextClfRawData(object)<block_start>"""对数据进行预处理。分词、构建词典、保存成二进制形式方便读入"""<def_stmt>__init__ self config:PreprocessConfig<block_start>"""
:param config:预处理的设置
:type config: PreprocessConfig
"""<line_sep>self.config=config<line_sep>self.tokenizer=create_tokenizer(config.tokenizer)<line_sep>self.train_pairs=tokenize_file(os.path.join(config.datadir config.train_file) self.tokenizer)<line_sep>self.valid_pairs=tokenize_file(os.path.join(config.datadir config.valid_file) self.tokenizer)<line_sep>self.test_pairs=tokenize_file(os.path.join(config.datadir config.test_file) self.tokenizer)<line_sep>self.dictionary=self._build_dictionary()<line_sep>self.label2id=build_label2id([label<for>_,label self.train_pairs])<block_end><def_stmt>_build_dictionary self<block_start>dictionary=Dictionary()<for_stmt>text,_ self.train_pairs<block_start>dictionary.add_sentence(text)# build dict
<block_end>dictionary.finalize(nwords=self.config.nwords threshold=self.config.min_word_count)<line_sep><return>dictionary<block_end><def_stmt>describe self<block_start>"""输出数据的信息:类别分布、字典大小
"""<line_sep>headers=["" self.config.train_file self.config.valid_file self.config.test_file]<line_sep>train_label_prob=get_label_prob([label<for>_,label self.train_pairs])<line_sep>valid_label_prob=get_label_prob([label<for>_,label self.valid_pairs])<line_sep>test_label_prob=get_label_prob([label<for>_,label self.test_pairs])<line_sep>label_table=[]<for_stmt>label train_label_prob<block_start>label_table.append([label train_label_prob[label] valid_label_prob[label] test_label_prob[label]])<block_end>label_table.append(["Sum" len(self.train_pairs) len(self.valid_pairs) len(self.test_pairs)])<line_sep>print("Label Prob:")<line_sep>print(tabulate(label_table headers tablefmt="grid" floatfmt=".4f"))<line_sep>print(f"Dictionary Size: {len(self.dictionary)}")<block_end><block_end>
|
###############################################################################
# GDB Script to improve introspection of array types when debugging software
# using Enoki. Copy this file to "~/.gdb" (creating the directory, if not
# present) and then apppend the following line to the file "~/.gdbinit"
# (again, creating it if, not already present):
###############################################################################
# set print pretty
# source ~/.gdb/enoki_gdb.py
###############################################################################
<import_stmt>gdb<line_sep>simple_types={'bool' 'char' 'unsigned char' 'short' 'unsigned short' 'int' 'unsigned int' 'long' 'unsigned long' 'long long' 'unsigned long long' 'float' 'double'}<class_stmt>EnokiIterator<block_start><def_stmt>__init__ self instance size<block_start>self.instance=instance<line_sep>self.size=size<line_sep>self.index=0<block_end><def_stmt>__iter__ self<block_start><return>self<block_end><def_stmt>__next__ self<block_start><if_stmt>self.index<ge>self.size<block_start><raise>StopIteration<block_end>result=('[%i]'%self.index self.instance[self.index])<line_sep>self.index<augadd>1<line_sep><return>result<block_end><def_stmt>next self<block_start><return>self.__next__()<block_end><block_end><class_stmt>EnokiStaticArrayPrinter<block_start><def_stmt>__init__ self instance<block_start>self.instance=instance<line_sep>itype=self.instance.type.strip_typedefs()<line_sep># Extract derived type
<if_stmt>'StaticArrayImpl'<in>str(itype)<block_start>itype=itype.template_argument(3)<block_end><try_stmt><block_start>data=self.instance['m_data']['_M_elems']<line_sep>self.data_type=data.type.strip_typedefs().target()<block_end><except_stmt>Exception<block_start>self.data_type=itype.template_argument(0)<block_end># Determine the size and data type
self.size=int(str(itype.template_argument(1)))<line_sep>self.is_simple=str(self.data_type)<in>simple_types<line_sep>self.type_size=self.data_type.sizeof<line_sep>self.is_mask='Mask'<in>str(itype)<try_stmt><block_start>_=instance['k']<line_sep>self.kmask=<true><block_end><except_stmt>Exception<block_start>self.kmask=<false><block_end><block_end><def_stmt>entry self i<block_start><if_stmt>i<l>0<or>i<ge>self.size<block_start><return><none><block_end>addr=int(self.instance.address)+self.type_size<times>i<line_sep>cmd='*((%s *) 0x%x)'%(str(self.data_type) addr)<line_sep><return>str(gdb.parse_and_eval(cmd))<block_end><def_stmt>children self<block_start><if_stmt>self.is_simple<block_start><return>[]<block_end><else_stmt><block_start><return>EnokiIterator(self.instance['m_data']['_M_elems'] self.size)<block_end><block_end><def_stmt>to_string self<block_start><if_stmt>self.is_simple<block_start><if_stmt><not>self.is_mask<block_start>result=[self.entry(i)<for>i range(self.size)]<block_end><else_stmt><block_start><if_stmt>self.kmask# AVX512 mask register
<block_start>result=list(reversed(format(int(self.instance['k']) '0%ib'%self.size)))<block_end><else_stmt><block_start>result=[<none>]<times>self.size<for_stmt>i range(self.size)<block_start>value=self.entry(i)<line_sep>result[i]='0'<if>(value<eq>'0'<or>value<eq>'false')<else>'1'<block_end><block_end><block_end><return>'['+', '.join(result)+']'<block_end><else_stmt><block_start><return>''<block_end><block_end><block_end><class_stmt>EnokiDynamicArrayPrinter<block_start><def_stmt>__init__ self instance<block_start>self.instance=instance<line_sep>itype=self.instance.type.strip_typedefs()<line_sep>self.size=int(str(self.instance['m_size']))<line_sep>self.packet_count=int(str(self.instance['m_packets_allocated']))<line_sep>self.packet_type=itype.template_argument(0)<line_sep>self.packet_size=self.packet_type.sizeof<line_sep>self.data=int(str(instance['m_packets']['_M_t']['_M_t']['_M_head_impl']) 0)<line_sep>self.limit=20<block_end><def_stmt>to_string self<block_start>values=[]<for_stmt>i range(self.packet_count)<block_start>addr=int(self.data)+self.packet_size<times>i<line_sep>cmd='*((%s *) 0x%x)'%(str(self.packet_type) addr)<line_sep>value=str(gdb.parse_and_eval(cmd))<assert_stmt>value[-1]<eq>']'<line_sep>values<augadd>value[value.rfind('[')+1:-1].split(', ')<if_stmt>len(values)<g>self.size<block_start>values=values[0:self.size]<line_sep><break><block_end><if_stmt>len(values)<g>self.limit<block_start><break><block_end><block_end><if_stmt>len(values)<g>self.limit<block_start>values=values[0:self.limit]<line_sep>values.append(".. %i skipped .."%(self.size-self.limit))<block_end><return>'['+', '.join(values)+']'<block_end><block_end># Static Enoki arrays
regexp_1=r'(enoki::)?(Array|Packet|Complex|Matrix|'<concat>'Quaternion|StaticArrayImpl)(Mask)?<.+>'<line_sep># Mitsuba 2 is one of the main users of Enoki. For convenience, also
# declare its custom array types here
regexp_2=r'(mitsuba::)?(Vector|Point|Normal|Spectrum|Color)<.+>'<line_sep>regexp_combined=r'^(%s)|(%s)$'%(regexp_1 regexp_2)<line_sep>p=gdb.printing.RegexpCollectionPrettyPrinter("enoki")<line_sep>p.add_printer("static" regexp_combined EnokiStaticArrayPrinter)<line_sep>p.add_printer("dynamic" r'^(enoki::)?DynamicArray(Impl)?<.+>$' EnokiDynamicArrayPrinter)<line_sep>o=gdb.current_objfile()<line_sep>gdb.printing.register_pretty_printer(o p)<line_sep>
|
<class_stmt>AuthenticationError(Exception)<block_start><pass><block_end><class_stmt>MarketClosedError(Exception)<block_start><pass><block_end><class_stmt>MarketEmptyError(Exception)<block_start><pass><block_end>
|
<import_stmt>logging<import_from_stmt>gdmix.drivers.fixed_effect_driver FixedEffectDriver<import_from_stmt>gdmix.drivers.random_effect_driver RandomEffectDriver<import_from_stmt>gdmix.factory.model_factory ModelFactory<import_from_stmt>gdmix.util constants<line_sep>logger=logging.getLogger(__name__)<line_sep>logger.setLevel(logging.INFO)<class_stmt>DriverFactory<block_start>"""
Provider class for creating driver and dependencies
NOTE - for now, only Estimator-based linear models are supported. In the future, the factory will also
accept model type as an input parameter
"""<line_sep>@staticmethod<def_stmt>get_driver base_training_params raw_model_params<block_start>"""
Create driver and associated dependencies, based on type. Only linear, estimator-based models supported
for now
:param base_training_params: Parsed base training parameters common to all models. This could including
path to training data, validation data, metadata file path, learning rate etc.
:param raw_model_params: Raw model parameters, representing model-specific requirements. For example, a
CNN might expose filter_size as a parameter, a text-based model might expose the size it's word embedding matrix
as a parameter
:return: Fixed or Random effect driver
"""<line_sep>driver=DriverFactory.drivers[base_training_params.stage]<line_sep>model=ModelFactory.get_model(base_training_params raw_model_params)<line_sep>logger.info(f"Instantiating model {model} and driver {driver}")<line_sep><return>driver(base_training_params=base_training_params model=model)<block_end>drivers={constants.FIXED_EFFECT:FixedEffectDriver constants.RANDOM_EFFECT:RandomEffectDriver}<block_end>
|
# coding=utf-8
# Copyright 2021 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Basic recurrent models for testing simple tasks."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_stmt>tensor2tensor.layers common_video<import_from_stmt>tensor2tensor.models.video basic_stochastic<import_from_stmt>tensor2tensor.utils registry<line_sep>@registry.register_model<class_stmt>NextFrameBasicRecurrent(basic_stochastic.NextFrameBasicStochasticDiscrete)<block_start>"""Basic next-frame recurrent model."""<line_sep>@property<def_stmt>is_recurrent_model self<block_start><return><true><block_end><def_stmt>middle_network self layer internal_states<block_start>lstm_func=common_video.conv_lstm_2d<line_sep>hp=self.hparams<line_sep>lstm_states=internal_states<if_stmt>lstm_states<is><none><block_start>lstm_states=[<none>]<times>hp.num_lstm_layers<block_end># LSTM layers
x=layer<for_stmt>j range(hp.num_lstm_layers)<block_start>x,lstm_states[j]=lstm_func(x lstm_states[j] hp.num_lstm_filters)<block_end><return>x lstm_states<block_end><block_end>@registry.register_hparams<def_stmt>next_frame_basic_recurrent <block_start>"""Basic 2-frame recurrent model with stochastic tower."""<line_sep>hparams=basic_stochastic.next_frame_basic_stochastic_discrete()<line_sep>hparams.filter_double_steps=2<line_sep>hparams.hidden_size=64<line_sep>hparams.video_num_input_frames=4<line_sep>hparams.video_num_target_frames=4<line_sep>hparams.concat_internal_states=<false><line_sep>hparams.add_hparam("num_lstm_layers" 2)<line_sep>hparams.add_hparam("num_lstm_filters" 256)<line_sep><return>hparams<block_end>
|
# Copyright Least Authority Enterprises.
# See LICENSE for details.
"""
Adapter from IServiceMaker-like interface to setuptools console-entrypoint
interface.
Premise
=======
Given:
* twist is the focus of efforts to make a good client-oriented command-line
driver for Twisted-based applications.
* kubetop is a client-y, command-line, Twisted-based application.
* Accounting for custom scripts in setup.py with setuptools is a lot harder
than just using the ``console_script`` feature.
Therefore:
* Implement application code to the twist interface.
* Build a single utility for adapting that interface to the ``console_script``
interface.
Theory of Operation
===================
#. Applications provide ``Options`` and ``makeService``, the main pieces of
``IServiceMaker``.
#. We provide an object which can be called as a ``console_script``
entrypoint.
#. That object hooks ``Options`` and ``makeService`` up to the internals of
``twist`` (which are *totally* private, sigh).
"""<import_from_stmt>sys stdout argv<import_from_stmt>os.path expanduser<import_stmt>attr<import_from_stmt>twisted.application.twist _options<import_from_stmt>twisted.application.twist._twist Twist<line_sep>@attr.s(frozen=<true>)<class_stmt>MainService(object)<block_start>tapname="kubetop"<line_sep>description="kubetop"<line_sep>options=attr.ib()<line_sep>makeService=attr.ib()<block_end>@attr.s<class_stmt>TwistMain(object)<block_start>options=attr.ib()<line_sep>make_service=attr.ib()<line_sep>exit_status=0<line_sep>exit_message=<none><def_stmt>exit self reason=<none><block_start><if_stmt>reason<is><not><none><block_start>self.exit_status=1<line_sep>self.exit_message=reason.getTraceback()<block_end><import_from_stmt>twisted.internet reactor<line_sep>reactor.stop()<block_end><def_stmt>__call__ self<block_start>_options.getPlugins=<lambda>iface:[MainService(self.options self._make_service) ]<line_sep>t=Twist()<line_sep>log_flag=u"--log-file"<line_sep>log_file=u"~/.kubetop.log"<line_sep>app_name=u"kubetop"<if_stmt>str<is>bytes# sys.argv must be bytes Python 2
<block_start>log_flag=log_flag.encode("ascii")<line_sep>log_file=log_file.encode("ascii")<line_sep>app_name=app_name.encode("ascii")<block_end>t.main([argv[0] log_flag expanduser(log_file) app_name ]+argv[1:])<if_stmt>self.exit_message<block_start>stdout.write(self.exit_message)<block_end><raise>SystemExit(self.exit_status)<block_end><def_stmt>_make_service self options<block_start><return>self.make_service(self options)<block_end><block_end>
|
<import_from_future_stmt> print_function<import_stmt>sys<line_sep>sys.path.insert(1 "../../../")<import_stmt>h2o<import_from_stmt>tests pyunit_utils<import_from_stmt>h2o.utils.typechecks assert_is_type<import_from_stmt>random randrange<import_stmt>numpy<as>np<import_from_stmt>h2o.frame H2OFrame<def_stmt>h2o_H2OFrame_stats <block_start>"""
Python API test: h2o.frame.H2OFrame.max(), h2o.frame.H2OFrame.mean(), h2o.frame.H2OFrame.median(),
h2o.frame.H2OFrame.min(),
"""<line_sep>row_num=randrange(1 10)<line_sep>col_num=randrange(1 10)<line_sep>python_lists=np.random.randint(-5 5 (row_num col_num))<line_sep>h2oframe=h2o.H2OFrame(python_obj=python_lists)<assert_stmt>abs(h2oframe.max()-np.ndarray.max(python_lists))<l>1e-12 "h2o.H2OFrame.max() command is not working."<assert_stmt>abs(h2oframe.min()-np.ndarray.min(python_lists))<l>1e-12 "h2o.H2OFrame.min() command is not working."<line_sep>h2oMean=h2oframe.mean(skipna=<false> axis=0)<line_sep>assert_is_type(h2oMean H2OFrame)<line_sep>numpmean=list(np.mean(python_lists axis=0))<line_sep>h2omean=h2oMean.as_data_frame(use_pandas=<true> header=<false>)<assert_stmt>pyunit_utils.equal_two_arrays(numpmean h2omean.values.tolist()[0] 1e-12 1e-6) "h2o.H2OFrame.mean() command is not working."<line_sep>h2oMedian=h2oframe.median(na_rm=<true>)<line_sep>assert_is_type(h2oMedian list)<line_sep>numpmedian=list(np.median(python_lists axis=0))<assert_stmt>pyunit_utils.equal_two_arrays(numpmedian h2oMedian 1e-12 1e-6) "h2o.H2OFrame.median() command is not working."<block_end>pyunit_utils.standalone_test(h2o_H2OFrame_stats)<line_sep>
|
# 9th Solutions
#--------------------------
n=int(input())<line_sep>d={}<for_stmt>i range(n)<block_start>x=input().split()<line_sep>d[x[0]]=x[1]<block_end><while_stmt><true><block_start><try_stmt><block_start>name=input()<if_stmt>name<in>d<block_start>print(name '=' d[name] sep='')<block_end><else_stmt><block_start>print('Not found')<block_end><block_end><except_stmt><block_start><break><block_end><block_end>
|
# coding=utf-8
#!/usr/bin/env python3
<import_stmt>asyncio<import_from_stmt>proxybroker Broker<import_from_stmt>requests get<import_from_stmt>libs.utils print_success<import_from_stmt>libs.utils print_error<import_from_stmt>libs.utils ask_question<import_from_stmt>libs.utils print_status<async_keyword><def_stmt>show proxies proxy_list<block_start><while_stmt>(len(proxy_list)<l>50)<block_start>proxy=<await>proxies.get()<if_stmt>proxy<is><none><block_start><break><block_end>print_success("["+str(len(proxy_list)+1)+"/50]" "Proxy found:" proxy.as_json()["host"]+":"+str(proxy.as_json()["port"]))<line_sep>proxy_list.append(proxy.as_json()["host"]+":"+str(proxy.as_json()["port"]))<line_sep><pass><block_end><pass><block_end><def_stmt>find_proxies <block_start>proxy_list=[]<line_sep>proxies=asyncio.Queue()<line_sep>broker=Broker(proxies)<line_sep>tasks=asyncio.gather(broker.find(types=['HTTPS'] limit=50) show(proxies proxy_list))<line_sep>loop=asyncio.get_event_loop()<line_sep>loop.run_until_complete(tasks)<if_stmt>(len(proxy_list)%5<ne>0<and>len(proxy_list)<g>5)<block_start>proxy_list=proxy_list[:len(proxy_list)-(len(proxy_list)%5)]<block_end><return>proxy_list<block_end>
|
<import_stmt>pytest<import_from_stmt>mock patch<import_from_stmt>data.runmigration run_alembic_migration<import_from_stmt>alembic.script ScriptDirectory<import_from_stmt>test.fixtures *<line_sep>@pytest.mark.parametrize("db_uri, is_valid" [("postgresql://devtable:password@quay-postgres/registry_database" <true>) ("postgresql://devtable:password%25@quay-postgres/registry_database" <false>) ("postgresql://devtable:password%%25@quay-postgres/registry_database" <true>) ("postgresql://devtable@db:password@quay-postgres/registry_database" <true>) ] )<def_stmt>test_alembic_db_uri db_uri is_valid<block_start>"""
Test if the given URI is escaped for string interpolation (Python's configparser).
"""<with_stmt>patch("alembic.script.ScriptDirectory.run_env")<as>m<block_start><if_stmt>is_valid<block_start>run_alembic_migration(db_uri)<block_end><else_stmt><block_start><with_stmt>pytest.raises(ValueError)<block_start>run_alembic_migration(db_uri)<block_end><block_end><block_end><block_end>
|
# Generated by Django 2.2.9 on 2020-01-27 20:30
<import_from_stmt>django.db migrations<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('capdb' '0089_auto_20200127_1957') ]<line_sep>operations=[migrations.RemoveField(model_name='casetext' name='metadata' ) migrations.RemoveIndex(model_name='casemetadata' name='idx_in_scope' ) migrations.RemoveIndex(model_name='casemetadata' name='idx_in_scope_reporter' ) migrations.RemoveIndex(model_name='casemetadata' name='idx_in_scope_jurisdiction' ) migrations.RemoveIndex(model_name='casemetadata' name='idx_in_scope_court' ) migrations.RemoveField(model_name='casemetadata' name='court_name' ) migrations.RemoveField(model_name='casemetadata' name='court_name_abbreviation' ) migrations.RemoveField(model_name='casemetadata' name='court_slug' ) migrations.RemoveField(model_name='casemetadata' name='jurisdiction_name' ) migrations.RemoveField(model_name='casemetadata' name='jurisdiction_name_long' ) migrations.RemoveField(model_name='casemetadata' name='jurisdiction_slug' ) migrations.RemoveField(model_name='casemetadata' name='jurisdiction_whitelisted' ) migrations.RemoveField(model_name='historicalcasemetadata' name='court_name' ) migrations.RemoveField(model_name='historicalcasemetadata' name='court_name_abbreviation' ) migrations.RemoveField(model_name='historicalcasemetadata' name='court_slug' ) migrations.RemoveField(model_name='historicalcasemetadata' name='jurisdiction_name' ) migrations.RemoveField(model_name='historicalcasemetadata' name='jurisdiction_name_long' ) migrations.RemoveField(model_name='historicalcasemetadata' name='jurisdiction_slug' ) migrations.RemoveField(model_name='historicalcasemetadata' name='jurisdiction_whitelisted' ) migrations.DeleteModel(name='CaseText' ) ]<block_end>
|
<import_stmt>logging<import_from_stmt>collections namedtuple<import_from_stmt>typing List Optional<import_from_stmt>. vlrlist<import_from_stmt>.known GeoAsciiParamsVlr GeoDoubleParamsVlr GeoKeyDirectoryVlr<line_sep>GeoTiffKey=namedtuple("GeoTiffKey" ("id" "value"))<line_sep>logger=logging.getLogger(__name__)<line_sep>GTModelTypeGeoKey=1024<line_sep>GTRasterTypeGeoKey=1025<line_sep>GTCitationGeoKey=1026<line_sep>GeogCitationGeoKey=2049<line_sep>GeogAngularUnitsGeoKey=2054<line_sep>ProjectedCSTypeGeoKey=3072<line_sep>ProjLinearUnitsGeoKey=3076<def_stmt>parse_geo_tiff_keys_from_vlrs vlr_list:vlrlist.VLRList<arrow>List[GeoTiffKey]<block_start>"""Gets the 3 GeoTiff vlrs from the vlr_list and parse them into
a nicer structure
Parameters
----------
vlr_list: laspy.vrls.vlrslist.VLRList list of vlrs from a las file
Raises
------
IndexError if any of the needed GeoTiffVLR is not found in the list
Returns
-------
List of GeoTiff keys parsed from the VLRs
"""<line_sep>geo_key_dir=vlr_list.get_by_id(GeoKeyDirectoryVlr.official_user_id() GeoKeyDirectoryVlr.official_record_ids())[0]<try_stmt><block_start>geo_doubles=vlr_list.get_by_id(GeoDoubleParamsVlr.official_user_id() GeoDoubleParamsVlr.official_record_ids() )[0]<block_end><except_stmt>IndexError<block_start>geo_doubles=<none><block_end><try_stmt><block_start>geo_ascii=vlr_list.get_by_id(GeoAsciiParamsVlr.official_user_id() GeoAsciiParamsVlr.official_record_ids() )[0]<block_end><except_stmt>IndexError<block_start>geo_ascii=<none><block_end><return>parse_geo_tiff(geo_key_dir geo_doubles geo_ascii)<block_end><def_stmt>parse_geo_tiff key_dir_vlr:GeoKeyDirectoryVlr double_vlr:Optional[GeoDoubleParamsVlr] ascii_vlr:Optional[GeoAsciiParamsVlr] <arrow>List[GeoTiffKey]<block_start>"""Parses the GeoTiff VLRs information into nicer structs"""<line_sep>geotiff_keys=[]<for_stmt>k key_dir_vlr.geo_keys<block_start><if_stmt>k.tiff_tag_location<eq>0<block_start>value=k.value_offset<block_end><elif_stmt>k.tiff_tag_location<eq>34736<block_start><if_stmt>double_vlr<is><none><block_start><raise>RuntimeError("Geotiff tag location points to GeoDoubleParams, "<concat>"but it does not exists")<block_end>value=double_vlr.doubles[k.value_offset]<block_end><elif_stmt>k.tiff_tag_location<eq>34737<block_start><if_stmt>ascii_vlr<is><none><block_start><raise>RuntimeError("Geotiff tag location points to GeoAsciiParams, "<concat>"but it does not exists")<block_end>value=ascii_vlr.string(k.value_offset k.count)<block_end><else_stmt><block_start>logger.warning("GeoTiffKey with unknown tiff tag location ({})".format(k.tiff_tag_location))<line_sep><continue><block_end>geotiff_keys.append(GeoTiffKey(k.id value))<block_end><return>geotiff_keys<block_end>
|
<import_stmt>ConfigParser<import_stmt>io<import_stmt>os<line_sep>base_dir=os.getenv("XDG_CONFIG_DIR" os.path.join(os.path.expanduser("~") ".config"))<line_sep>config_dir=os.path.join(base_dir "systemd/user")<def_stmt>is_available <block_start><return>any(os.access(os.path.join(path "systemctl") os.X_OK)<for>path os.getenv("PATH").split(os.pathsep))<block_end><def_stmt>get_timer_path job_id<block_start><return>os.path.join(config_dir job_id+".timer")<block_end><def_stmt>get_service_path job_id<block_start><return>os.path.join(config_dir job_id+".service")<block_end><def_stmt>new_config <block_start>config=ConfigParser.RawConfigParser()<line_sep>config.optionxform=str<line_sep><return>config<block_end><def_stmt>parse_config timer_cfg<block_start>config=new_config()<line_sep>config.readfp(io.BytesIO(timer_cfg))<line_sep>period=config.get("Timer" "OnCalendar").split('/')[1]<line_sep><return>{"period":period "delay":"0"}<block_end><def_stmt>get_job job_id<block_start><with_stmt>open(get_timer_path(job_id) "r")<as>f<block_start>cfg=parse_config(f.read())<line_sep>cfg["id"]=job_id<line_sep><return>cfg<block_end><block_end><def_stmt>write_config config file_path<block_start><with_stmt>open(file_path "w")<as>f<block_start>config.write(f)<block_end><block_end><def_stmt>update_job job<block_start>job_id=job["id"]<line_sep>period=job["period"]<line_sep>command=job["command"]<line_sep># Timer
config=new_config()<line_sep>config.add_section("Unit")<line_sep>config.set("Unit" "Description" "Bups backup manager timer")<line_sep>config.add_section("Timer")<line_sep>config.set("Timer" "OnCalendar" "*-*-1/%d"%period)<line_sep>config.set("Timer" "Persistent" "true")<line_sep>config.add_section("Install")<line_sep>config.set("Install" "WantedBy" "timers.target")<line_sep>write_config(config get_timer_path(job_id))<line_sep># Create service
config=new_config()<line_sep>config.add_section("Unit")<line_sep>config.set("Unit" "Description" "Bups backup manager service")<line_sep>config.add_section("Service")<line_sep>config.set("Service" "Type" "simple")<line_sep>config.set("Service" "ExecStart" command)<line_sep>write_config(config get_service_path(job_id))<line_sep># Notify systemd
call_systemctl(["daemon-reload"])<line_sep>call_systemctl(["enable" get_timer_path(job_id)])<line_sep>call_systemctl(["start" job_id])<block_end><def_stmt>remove_job job_id<block_start>timer_path=get_timer_path(job_id)<line_sep>service_path=get_service_path(job_id)<line_sep>timer_basename=os.path.basename(timer_path)<line_sep>call_systemctl(["stop" timer_basename])<line_sep>call_systemctl(["disable" timer_basename])<line_sep>os.remove(timer_path)<line_sep>os.remove(service_path)<block_end><def_stmt>call_systemctl args<block_start>cmd="systemctl --user %s"%" ".join(args)<if_stmt>os.system(cmd)<ne>0<block_start><raise>IOError("Failed to run command: %"%cmd)<block_end><block_end>
|
"""
Secure - file ``/var/log/secure``
==================================
"""<import_from_stmt>.. Syslog parser<import_from_stmt>insights.specs Specs<line_sep>@parser(Specs.secure)<class_stmt>Secure(Syslog)<block_start>"""Class for parsing the ``/var/log/secure`` file.
Sample log text::
Aug 24 09:31:39 localhost polkitd[822]: Finished loading, compiling and executing 6 rules
Aug 24 09:31:39 localhost polkitd[822]: Acquired the name org.freedesktop.PolicyKit1 on the system bus
Aug 25 13:52:54 localhost sshd[23085]: pam_unix(sshd:session): session opened for user zjj by (uid=0)
Aug 25 13:52:54 localhost sshd[23085]: error: openpty: No such file or directory
.. note::
Please refer to its super-class :class:`insights.core.Syslog`
.. note::
Because timestamps in the secure log by default have no year,
the year of the logs will be inferred from the year in your
timestamp. This will also work around December/January crossovers.
Examples:
>>> secure = shared[Secure]
>>> secure.get('session opened')
[{'timestamp':'Aug 25 13:52:54',
'hostname':'localhost',
'procname': 'sshd[23085]',
'message': 'pam_unix(sshd:session): session opened for user zjj by (uid=0)',
'raw_message': 'Aug 25 13:52:54 localhost sshd[23085]: pam_unix(sshd:session): session opened for user zjj by (uid=0)'
}]
>>> len(list(secure.get_after(datetime(2017, 8, 25, 0, 0, 0))))
2
"""<line_sep>time_format='%b %d %H:%M:%S'<block_end>
|
<import_stmt>pybullet<as>p<line_sep>p.connect(p.GUI)<line_sep>plane=p.loadURDF("plane.urdf")<line_sep>visualData=p.getVisualShapeData(plane p.VISUAL_SHAPE_DATA_TEXTURE_UNIQUE_IDS)<line_sep>print(visualData)<line_sep>curTexUid=visualData[0][8]<line_sep>print(curTexUid)<line_sep>texUid=p.loadTexture("tex256.png")<line_sep>print("texUid=" texUid)<line_sep>p.changeVisualShape(plane -1 textureUniqueId=texUid)<for_stmt>i range(100)<block_start>p.getCameraImage(320 200)<block_end>p.changeVisualShape(plane -1 textureUniqueId=curTexUid)<for_stmt>i range(100)<block_start>p.getCameraImage(320 200)<block_end>
|
<import_stmt>random<import_stmt>numpy<as>np<import_from_stmt>torch.utils.data.dataset Dataset<import_from_stmt>config cfg<class_stmt>MultipleDatasets(Dataset)<block_start><def_stmt>__init__ self dbs make_same_len=<true><block_start>self.dbs=dbs<line_sep>self.db_num=len(self.dbs)<line_sep>self.max_db_data_num=max([len(db)<for>db dbs])<line_sep>self.db_len_cumsum=np.cumsum([len(db)<for>db dbs])<line_sep>self.make_same_len=make_same_len<block_end><def_stmt>__len__ self# all dbs have the same length
<block_start><if_stmt>self.make_same_len<block_start><return>self.max_db_data_num<times>self.db_num<block_end># each db has different length
<else_stmt><block_start><return>sum([len(db)<for>db self.dbs])<block_end><block_end><def_stmt>__getitem__ self index<block_start><if_stmt>self.make_same_len<block_start>db_idx=index<floordiv>self.max_db_data_num<line_sep>data_idx=index%self.max_db_data_num<if_stmt>data_idx<ge>len(self.dbs[db_idx])<times>(self.max_db_data_num<floordiv>len(self.dbs[db_idx]))# last batch: random sampling
<block_start>data_idx=random.randint(0 len(self.dbs[db_idx])-1)<block_end><else_stmt># before last batch: use modular
<block_start>data_idx=data_idx%len(self.dbs[db_idx])<block_end><block_end><else_stmt><block_start><for_stmt>i range(self.db_num)<block_start><if_stmt>index<l>self.db_len_cumsum[i]<block_start>db_idx=i<line_sep><break><block_end><block_end><if_stmt>db_idx<eq>0<block_start>data_idx=index<block_end><else_stmt><block_start>data_idx=index-self.db_len_cumsum[db_idx-1]<block_end><block_end><return>self.dbs[db_idx][data_idx]<block_end><block_end>
|
[a *b *d a c]<line_sep>
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for models.definitions.us_model_definitions."""<import_stmt>unittest<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_from_stmt>covid_epidemiology.src constants<import_from_stmt>covid_epidemiology.src.models.definitions us_model_definitions<class_stmt>TestStateModelDefinition(unittest.TestCase)<block_start><def_stmt>test_get_ts_features self<block_start>expected_ts_features={constants.DEATH:constants.JHU_DEATH_FEATURE_KEY constants.CONFIRMED:constants.JHU_CONFIRMED_FEATURE_KEY constants.RECOVERED_DOC:constants.RECOVERED_FEATURE_KEY constants.HOSPITALIZED:constants.HOSPITALIZED_FEATURE_KEY constants.HOSPITALIZED_INCREASE:constants.HOSPITALIZED_INCREASE_FEATURE_KEY constants.ICU:constants.ICU_FEATURE_KEY constants.VENTILATOR:constants.VENTILATOR_FEATURE_KEY constants.MOBILITY_INDEX:constants.MOBILITY_INDEX constants.MOBILITY_SAMPLES:constants.MOBILITY_SAMPLES constants.TOTAL_TESTS:constants.TOTAL_TESTS constants.AMP_RESTAURANTS:constants.AMP_RESTAURANTS constants.AMP_NON_ESSENTIAL_BUSINESS:constants.AMP_NON_ESSENTIAL_BUSINESS constants.AMP_STAY_AT_HOME:constants.AMP_STAY_AT_HOME constants.AMP_SCHOOLS_SECONDARY_EDUCATION:constants.AMP_SCHOOLS_SECONDARY_EDUCATION constants.AMP_EMERGENCY_DECLARATION:constants.AMP_EMERGENCY_DECLARATION constants.AMP_GATHERINGS:constants.AMP_GATHERINGS constants.AMP_FACE_MASKS:constants.AMP_FACE_MASKS constants.DOW_WINDOW:constants.DOW_WINDOW constants.AVERAGE_TEMPERATURE:constants.AVERAGE_TEMPERATURE constants.MAX_TEMPERATURE:constants.MAX_TEMPERATURE constants.MIN_TEMPERATURE:constants.MIN_TEMPERATURE constants.RAINFALL:constants.RAINFALL constants.SNOWFALL:constants.SNOWFALL constants.COMMERCIAL_SCORE:constants.COMMERCIAL_SCORE constants.ANTIGEN_POSITIVE:constants.ANTIGEN_POSITIVE constants.ANTIGEN_TOTAL:constants.ANTIGEN_TOTAL constants.ANTIBODY_NEGATIVE:constants.ANTIBODY_NEGATIVE constants.ANTIBODY_TOTAL:constants.ANTIBODY_TOTAL constants.SYMPTOM_COUGH:constants.SYMPTOM_COUGH constants.SYMPTOM_CHILLS:constants.SYMPTOM_CHILLS constants.SYMPTOM_ANOSMIA:constants.SYMPTOM_ANOSMIA constants.SYMPTOM_INFECTION:constants.SYMPTOM_INFECTION constants.SYMPTOM_CHEST_PAIN:constants.SYMPTOM_CHEST_PAIN constants.SYMPTOM_FEVER:constants.SYMPTOM_FEVER constants.SYMPTOM_SHORTNESSBREATH:constants.SYMPTOM_SHORTNESSBREATH constants.VACCINES_GOVEX_FIRST_DOSE_TOTAL:constants.VACCINES_GOVEX_FIRST_DOSE_TOTAL constants.VACCINES_GOVEX_SECOND_DOSE_TOTAL:constants.VACCINES_GOVEX_SECOND_DOSE_TOTAL }<line_sep>state_model=us_model_definitions.StateModelDefinition(gt_source=constants.GT_SOURCE_JHU)<line_sep>actual_ts_features=state_model.get_ts_features()<line_sep>np.testing.assert_equal(expected_ts_features actual_ts_features)<block_end><def_stmt>test_get_ts_features_to_preprocess self<block_start>expected_ts_features={constants.MOBILITY_INDEX constants.MOBILITY_SAMPLES constants.AMP_RESTAURANTS constants.AMP_NON_ESSENTIAL_BUSINESS constants.AMP_STAY_AT_HOME constants.AMP_SCHOOLS_SECONDARY_EDUCATION constants.AMP_EMERGENCY_DECLARATION constants.AMP_GATHERINGS constants.AMP_FACE_MASKS constants.CONFIRMED_PER_TESTS constants.DEATH_PREPROCESSED constants.CONFIRMED_PREPROCESSED constants.DOW_WINDOW constants.TOTAL_TESTS_PER_CAPITA constants.TOTAL_TESTS constants.AVERAGE_TEMPERATURE constants.MAX_TEMPERATURE constants.MIN_TEMPERATURE constants.RAINFALL constants.SNOWFALL constants.COMMERCIAL_SCORE constants.ANTIGEN_POSITIVE_RATIO constants.ANTIBODY_NEGATIVE_RATIO constants.SYMPTOM_COUGH constants.SYMPTOM_CHILLS constants.SYMPTOM_ANOSMIA constants.SYMPTOM_INFECTION constants.SYMPTOM_CHEST_PAIN constants.SYMPTOM_FEVER constants.SYMPTOM_SHORTNESSBREATH constants.VACCINATED_RATIO_FIRST_DOSE_PER_DAY_PREPROCESSED constants.VACCINATED_RATIO_SECOND_DOSE_PER_DAY_PREPROCESSED }<line_sep>state_model=us_model_definitions.StateModelDefinition(gt_source=constants.GT_SOURCE_JHU)<line_sep>actual_ts_features=state_model.get_ts_features_to_preprocess()<line_sep>np.testing.assert_equal(expected_ts_features actual_ts_features)<block_end><def_stmt>test_extract_ts_state_features self<block_start>ts_data=pd.DataFrame([{"feature_name":constants.JHU_CONFIRMED_FEATURE_KEY "feature_value":100 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.JHU_CONFIRMED_FEATURE_KEY "feature_value":200 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.JHU_DEATH_FEATURE_KEY "feature_value":10 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.JHU_DEATH_FEATURE_KEY "feature_value":float("nan") # Not populated should ffill to 10.
"dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.HOSPITALIZED_FEATURE_KEY "feature_value":100 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.HOSPITALIZED_FEATURE_KEY "feature_value":200 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.ICU_FEATURE_KEY "feature_value":2 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.ICU_FEATURE_KEY "feature_value":5 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.VENTILATOR_FEATURE_KEY "feature_value":50 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.VENTILATOR_FEATURE_KEY "feature_value":100 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.MOBILITY_INDEX "feature_value":1.0 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.MOBILITY_INDEX "feature_value":1.2 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.MOBILITY_SAMPLES "feature_value":1.0 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.MOBILITY_SAMPLES "feature_value":1.2 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.TOTAL_TESTS "feature_value":1.0 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.TOTAL_TESTS "feature_value":1.2 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.AMP_GATHERINGS "feature_value":1.0 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.AMP_GATHERINGS "feature_value":1.2 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.AMP_EMERGENCY_DECLARATION "feature_value":1.0 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.AMP_EMERGENCY_DECLARATION "feature_value":1.2 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.AMP_SCHOOLS_SECONDARY_EDUCATION "feature_value":1.0 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.AMP_SCHOOLS_SECONDARY_EDUCATION "feature_value":1.2 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.AMP_RESTAURANTS "feature_value":1.0 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.AMP_RESTAURANTS "feature_value":1.2 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.AMP_NON_ESSENTIAL_BUSINESS "feature_value":1.0 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.AMP_NON_ESSENTIAL_BUSINESS "feature_value":1.2 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.AMP_STAY_AT_HOME "feature_value":1.0 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.AMP_STAY_AT_HOME "feature_value":1.2 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.AMP_FACE_MASKS "feature_value":1.0 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.AMP_FACE_MASKS "feature_value":1.2 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.AVERAGE_TEMPERATURE "feature_value":1.0 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.AVERAGE_TEMPERATURE "feature_value":1.2 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.MAX_TEMPERATURE "feature_value":1.0 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.MAX_TEMPERATURE "feature_value":1.2 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.MIN_TEMPERATURE "feature_value":1.0 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.MIN_TEMPERATURE "feature_value":1.2 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.RAINFALL "feature_value":1.0 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.RAINFALL "feature_value":1.2 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.SNOWFALL "feature_value":1.0 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.SNOWFALL "feature_value":1.2 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.COMMERCIAL_SCORE "feature_value":1.0 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.COMMERCIAL_SCORE "feature_value":1.2 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.ANTIGEN_POSITIVE "feature_value":1.0 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.ANTIGEN_POSITIVE "feature_value":1.2 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.ANTIGEN_TOTAL "feature_value":1.0 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.ANTIGEN_TOTAL "feature_value":1.2 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.ANTIBODY_NEGATIVE "feature_value":1.0 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.ANTIBODY_NEGATIVE "feature_value":1.2 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.ANTIBODY_TOTAL "feature_value":1.0 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.ANTIBODY_TOTAL "feature_value":1.2 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.RECOVERED_FEATURE_KEY "feature_value":12 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.RECOVERED_FEATURE_KEY "feature_value":11 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.HOSPITALIZED_INCREASE_FEATURE_KEY "feature_value":16 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.HOSPITALIZED_INCREASE_FEATURE_KEY "feature_value":14 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.SYMPTOM_COUGH "feature_value":0.6 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.SYMPTOM_COUGH "feature_value":0.7 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.SYMPTOM_CHILLS "feature_value":0.6 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.SYMPTOM_CHILLS "feature_value":0.7 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.SYMPTOM_ANOSMIA "feature_value":0.6 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.SYMPTOM_ANOSMIA "feature_value":0.7 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.SYMPTOM_INFECTION "feature_value":0.6 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.SYMPTOM_INFECTION "feature_value":0.7 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.SYMPTOM_CHEST_PAIN "feature_value":0.6 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.SYMPTOM_CHEST_PAIN "feature_value":0.7 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.SYMPTOM_FEVER "feature_value":0.6 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.SYMPTOM_FEVER "feature_value":0.7 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.SYMPTOM_SHORTNESSBREATH "feature_value":0.6 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.SYMPTOM_SHORTNESSBREATH "feature_value":0.7 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.VACCINES_GOVEX_FIRST_DOSE_TOTAL "feature_value":10 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.VACCINES_GOVEX_FIRST_DOSE_TOTAL "feature_value":20 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.VACCINES_GOVEX_SECOND_DOSE_TOTAL "feature_value":5 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.VACCINES_GOVEX_SECOND_DOSE_TOTAL "feature_value":10 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} ])<line_sep>static_data=pd.DataFrame([{"feature_name":constants.AQI_MEAN "feature_value":105 "geo_id":"4059"} {"feature_name":constants.AREA "feature_value":10 "geo_id":"4058"} {"feature_name":constants.AREA "feature_value":10 "geo_id":"4059"} {"feature_name":constants.INCOME_PER_CAPITA "feature_value":120 "geo_id":"4058"} {"feature_name":constants.INCOME_PER_CAPITA "feature_value":100 "geo_id":"4059"} {"feature_name":constants.POPULATION "feature_value":70 "geo_id":"4059"} {"feature_name":constants.POPULATION "feature_value":50 "geo_id":"4058"} {"feature_name":constants.POPULATION "feature_value":10 "geo_id":"4057"}])<line_sep>state_model=us_model_definitions.StateModelDefinition(gt_source="JHU")<line_sep>static_features,_=state_model._extract_static_features(static_data=static_data locations=["4059"])<line_sep>actual,_=state_model._extract_ts_features(ts_data=ts_data static_features=static_features locations=["4059"] training_window_size=2)<line_sep>expected={constants.CONFIRMED:{"4059":np.array([100 200] dtype="float32")} constants.DEATH:{"4059":[10 np.nan]} constants.DEATH_PREPROCESSED:{"4059":[0 0]} constants.ICU:{"4059":np.array([2 5] dtype="float32")} constants.INFECTED:<none> constants.HOSPITALIZED:{"4059":np.array([100 200] dtype="float32")} constants.MOBILITY_INDEX:{"4059":np.array([1 0] dtype="float32")} constants.VENTILATOR:{"4059":np.array([50 100] dtype="float32")} constants.RECOVERED_DOC:{"4059":np.array([11 12] dtype="float32")} constants.HOSPITALIZED_INCREASE:{"4059":np.array([14 16] dtype="float32")} constants.HOSPITALIZED_CUMULATIVE:{"4059":np.array([14 30] dtype="float32")} constants.TOTAL_TESTS_PER_CAPITA:{"4059":np.array([1 0] dtype="float32")} }<for_stmt>ts_feature_name expected<block_start>self.assertIn(ts_feature_name actual)<line_sep>np.testing.assert_equal(actual[ts_feature_name] expected[ts_feature_name] "Feature name {} is not aligned.".format(ts_feature_name))<block_end><block_end><def_stmt>test_get_static_features self<block_start>expected_static_features={constants.POPULATION:constants.POPULATION constants.INCOME_PER_CAPITA:constants.INCOME_PER_CAPITA constants.POPULATION_DENSITY_PER_SQKM:constants.POPULATION_DENSITY_PER_SQKM constants.HOUSEHOLD_FOOD_STAMP:constants.HOUSEHOLD_FOOD_STAMP constants.KAISER_POPULATION:constants.KAISER_POPULATION constants.KAISER_60P_POPULATION:constants.KAISER_60P_POPULATION constants.ICU_BEDS:constants.ICU_BEDS constants.HOUSEHOLDS:constants.HOUSEHOLDS constants.HOSPITAL_RATING1:constants.HOSPITAL_RATING1 constants.HOSPITAL_RATING2:constants.HOSPITAL_RATING2 constants.HOSPITAL_RATING3:constants.HOSPITAL_RATING3 constants.HOSPITAL_RATING4:constants.HOSPITAL_RATING4 constants.HOSPITAL_RATING5:constants.HOSPITAL_RATING5 constants.AQI_MEAN:constants.AQI_MEAN constants.NON_EMERGENCY_SERVICES:constants.NON_EMERGENCY_SERVICES constants.EMERGENCY_SERVICES:constants.EMERGENCY_SERVICES constants.HOSPITAL_ACUTE_CARE:constants.HOSPITAL_ACUTE_CARE constants.CRITICAL_ACCESS_HOSPITAL:constants.CRITICAL_ACCESS_HOSPITAL constants.PATIENCE_EXPERIENCE_SAME:constants.PATIENCE_EXPERIENCE_SAME constants.PATIENCE_EXPERIENCE_BELOW:constants.PATIENCE_EXPERIENCE_BELOW constants.PATIENCE_EXPERIENCE_ABOVE:constants.PATIENCE_EXPERIENCE_ABOVE }<line_sep>state_model=us_model_definitions.StateModelDefinition(gt_source=constants.GT_SOURCE_JHU)<line_sep>actual_static_features=state_model.get_static_features()<line_sep>np.testing.assert_equal(expected_static_features actual_static_features)<block_end><def_stmt>test_extract_state_static_features self<block_start>static_data=pd.DataFrame([{"feature_name":constants.AQI_MEAN "feature_value":105 "geo_id":"4059"} {"feature_name":constants.AREA "feature_value":10 "geo_id":"4058"} {"feature_name":constants.AREA "feature_value":10 "geo_id":"4059"} {"feature_name":constants.INCOME_PER_CAPITA "feature_value":120 "geo_id":"4058"} {"feature_name":constants.INCOME_PER_CAPITA "feature_value":100 "geo_id":"4059"} {"feature_name":constants.POPULATION "feature_value":70 "geo_id":"4059"} {"feature_name":constants.POPULATION "feature_value":50 "geo_id":"4058"} {"feature_name":constants.POPULATION "feature_value":10 "geo_id":"4057"}])<line_sep>state_model=us_model_definitions.StateModelDefinition(gt_source="JHU")<line_sep>actual,_=state_model._extract_static_features(static_data=static_data locations=["4059" "4058"])<line_sep>expected={constants.AQI_MEAN:{"4059":0 "4058":0} constants.INCOME_PER_CAPITA:{"4059":0 "4058":1} constants.POPULATION:{"4059":70 "4058":50} constants.POPULATION_DENSITY_PER_SQKM:{"4059":0 "4058":0} }<for_stmt>static_feature_name expected<block_start>self.assertEqual(actual[static_feature_name] expected[static_feature_name])<block_end><block_end><block_end><class_stmt>TestCountyModelDefinition(unittest.TestCase)<block_start><def_stmt>test_get_ts_features self<block_start>expected_ts_features={constants.DEATH:constants.JHU_COUNTY_DEATH_FEATURE_KEY constants.CONFIRMED:constants.JHU_COUNTY_CONFIRMED_FEATURE_KEY constants.RECOVERED_DOC:constants.CSRP_RECOVERED_FEATURE_KEY constants.HOSPITALIZED:constants.CHA_HOSPITALIZED_FEATURE_KEY constants.HOSPITALIZED_CUMULATIVE:constants.CHA_HOSPITALIZED_CUMULATIVE_FEATURE_KEY constants.ICU:constants.CSRP_ICU_FEATURE_KEY constants.MOBILITY_INDEX:constants.MOBILITY_INDEX constants.MOBILITY_SAMPLES:constants.MOBILITY_SAMPLES constants.CSRP_TESTS:constants.CSRP_TESTS constants.AMP_RESTAURANTS:constants.AMP_RESTAURANTS constants.AMP_NON_ESSENTIAL_BUSINESS:constants.AMP_NON_ESSENTIAL_BUSINESS constants.AMP_STAY_AT_HOME:constants.AMP_STAY_AT_HOME constants.AMP_SCHOOLS_SECONDARY_EDUCATION:constants.AMP_SCHOOLS_SECONDARY_EDUCATION constants.AMP_EMERGENCY_DECLARATION:constants.AMP_EMERGENCY_DECLARATION constants.AMP_GATHERINGS:constants.AMP_GATHERINGS constants.AMP_FACE_MASKS:constants.AMP_FACE_MASKS constants.DOW_WINDOW:constants.DOW_WINDOW constants.VACCINES_GOVEX_FIRST_DOSE_TOTAL:constants.VACCINES_GOVEX_FIRST_DOSE_TOTAL constants.VACCINES_GOVEX_SECOND_DOSE_TOTAL:constants.VACCINES_GOVEX_SECOND_DOSE_TOTAL }<line_sep>county_model=us_model_definitions.CountyModelDefinition(gt_source=constants.GT_SOURCE_JHU)<line_sep>actual_ts_features=county_model.get_ts_features()<line_sep>np.testing.assert_equal(expected_ts_features actual_ts_features)<block_end><def_stmt>test_get_ts_features_to_preprocess self<block_start>expected_ts_features={constants.MOBILITY_INDEX constants.MOBILITY_SAMPLES constants.CSRP_TESTS constants.CONFIRMED_PER_CSRP_TESTS constants.TOTAL_TESTS_PER_CAPITA constants.AMP_RESTAURANTS constants.AMP_NON_ESSENTIAL_BUSINESS constants.AMP_STAY_AT_HOME constants.AMP_SCHOOLS_SECONDARY_EDUCATION constants.AMP_EMERGENCY_DECLARATION constants.AMP_GATHERINGS constants.AMP_FACE_MASKS constants.DEATH_PREPROCESSED constants.CONFIRMED_PREPROCESSED constants.DOW_WINDOW constants.TOTAL_TESTS_PER_CAPITA constants.VACCINATED_RATIO_FIRST_DOSE_PER_DAY_PREPROCESSED constants.VACCINATED_RATIO_SECOND_DOSE_PER_DAY_PREPROCESSED }<line_sep>county_model=us_model_definitions.CountyModelDefinition(gt_source=constants.GT_SOURCE_JHU)<line_sep>actual_ts_features=county_model.get_ts_features_to_preprocess()<line_sep>np.testing.assert_equal(expected_ts_features actual_ts_features)<block_end><def_stmt>test_extract_ts_county_features self<block_start>ts_data=pd.DataFrame([{"feature_name":"confirmed_cases" "feature_value":100 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":"confirmed_cases" "feature_value":200 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":"deaths" "feature_value":10 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":"deaths" "feature_value":13 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.MOBILITY_INDEX "feature_value":0.0 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.MOBILITY_INDEX "feature_value":1.0 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.MOBILITY_SAMPLES "feature_value":10 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.MOBILITY_SAMPLES "feature_value":12 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.CSRP_TESTS "feature_value":70 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.CSRP_TESTS "feature_value":140 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.AMP_GATHERINGS "feature_value":1.0 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.AMP_GATHERINGS "feature_value":1.2 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.AMP_EMERGENCY_DECLARATION "feature_value":1.0 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.AMP_EMERGENCY_DECLARATION "feature_value":1.2 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.AMP_SCHOOLS_SECONDARY_EDUCATION "feature_value":1.0 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.AMP_SCHOOLS_SECONDARY_EDUCATION "feature_value":1.2 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.AMP_RESTAURANTS "feature_value":1.0 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.AMP_RESTAURANTS "feature_value":1.2 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.AMP_NON_ESSENTIAL_BUSINESS "feature_value":1.0 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.AMP_NON_ESSENTIAL_BUSINESS "feature_value":1.2 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.AMP_STAY_AT_HOME "feature_value":1.0 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.AMP_STAY_AT_HOME "feature_value":1.2 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.AMP_FACE_MASKS "feature_value":1.0 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.AMP_FACE_MASKS "feature_value":1.2 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.CSRP_RECOVERED_FEATURE_KEY "feature_value":12 "dt":np.datetime64("2020-01-23") "geo_id":"4059" } {"feature_name":constants.CSRP_RECOVERED_FEATURE_KEY "feature_value":11 "dt":np.datetime64("2020-01-22") "geo_id":"4059" } {"feature_name":constants.CHA_HOSPITALIZED_FEATURE_KEY "feature_value":100 "dt":np.datetime64("2020-01-22") "geo_id":"4059" } {"feature_name":constants.CHA_HOSPITALIZED_FEATURE_KEY "feature_value":200 "dt":np.datetime64("2020-01-23") "geo_id":"4059" } {"feature_name":constants.CHA_HOSPITALIZED_CUMULATIVE_FEATURE_KEY "feature_value":200 "dt":np.datetime64("2020-01-22") "geo_id":"4059" } {"feature_name":constants.CHA_HOSPITALIZED_CUMULATIVE_FEATURE_KEY "feature_value":300 "dt":np.datetime64("2020-01-23") "geo_id":"4059" } {"feature_name":constants.CSRP_ICU_FEATURE_KEY "feature_value":20 "dt":np.datetime64("2020-01-22") "geo_id":"4059" } {"feature_name":constants.CSRP_ICU_FEATURE_KEY "feature_value":30 "dt":np.datetime64("2020-01-23") "geo_id":"4059" } {"feature_name":constants.VACCINES_GOVEX_FIRST_DOSE_TOTAL "feature_value":10 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.VACCINES_GOVEX_FIRST_DOSE_TOTAL "feature_value":20 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} {"feature_name":constants.VACCINES_GOVEX_SECOND_DOSE_TOTAL "feature_value":5 "dt":np.datetime64("2020-01-22") "geo_id":"4059"} {"feature_name":constants.VACCINES_GOVEX_SECOND_DOSE_TOTAL "feature_value":10 "dt":np.datetime64("2020-01-23") "geo_id":"4059"} ])<line_sep>static_data=pd.DataFrame([{"feature_name":constants.AREA "feature_value":10 "geo_id":"4059"} {"feature_name":constants.AREA "feature_value":10 "geo_id":"4058"} {"feature_name":constants.INCOME_PER_CAPITA "feature_value":120 "geo_id":"4058"} {"feature_name":constants.INCOME_PER_CAPITA "feature_value":100 "geo_id":"4059"} {"feature_name":constants.COUNTY_POPULATION "feature_value":70 "geo_id":"4059"} {"feature_name":constants.COUNTY_POPULATION "feature_value":50 "geo_id":"4058"} {"feature_name":constants.COUNTY_POPULATION "feature_value":10 "geo_id":"4057"}])<line_sep>state_model=us_model_definitions.CountyModelDefinition(gt_source="USAFACTS")<line_sep>static_features,_=state_model._extract_static_features(static_data=static_data locations=["4059"])<line_sep>actual,_=state_model._extract_ts_features(ts_data=ts_data static_features=static_features locations=["4059"] training_window_size=2)<line_sep>expected={constants.DEATH:{"4059":np.array([10 13] dtype="float32")} constants.CONFIRMED:{"4059":np.array([100 200] dtype="float32")} constants.MOBILITY_SAMPLES:{"4059":np.array([0 1] dtype="float32")} constants.MOBILITY_INDEX:{"4059":np.array([0 1] dtype="float32")} constants.CSRP_TESTS:{"4059":np.array([0 1] dtype="float32")} constants.RECOVERED_DOC:{"4059":np.array([11 12] dtype="float32") } constants.HOSPITALIZED:{"4059":np.array([100 200] dtype="float32") } constants.HOSPITALIZED_CUMULATIVE:{"4059":np.array([200 300] dtype="float32") } constants.ICU:{"4059":np.array([20 30] dtype="float32") } constants.TOTAL_TESTS_PER_CAPITA:{"4059":np.array([0 0] dtype="float32") } }<for_stmt>ts_feature_name expected<block_start>self.assertIn(ts_feature_name actual)<line_sep>np.testing.assert_equal(actual[ts_feature_name] expected[ts_feature_name] "Unexpected value for feature %s"%ts_feature_name)<block_end><block_end><def_stmt>test_get_static_features self<block_start>county_model=us_model_definitions.CountyModelDefinition(gt_source=constants.GT_SOURCE_JHU)<line_sep>actual_static_features=county_model.get_static_features()<line_sep>self.assertEqual(len(actual_static_features) 51)<block_end><def_stmt>test_get_all_locations self<block_start>input_df=pd.DataFrame({constants.GEO_ID_COLUMN:["4059" "4060" "4061" "4062"]})<line_sep># Exclude FIPS 15005 (Kalawao County, no longer exist)
expected_locations={"4059" "4060" "4061" "4062"}<line_sep>county_model=us_model_definitions.CountyModelDefinition(gt_source=constants.GT_SOURCE_JHU)<line_sep>actual_locations=county_model.get_all_locations(input_df)<line_sep>np.testing.assert_equal(expected_locations actual_locations)<block_end><def_stmt>test_extract_county_static_features self<block_start>static_data=pd.DataFrame([{"feature_name":constants.AREA "feature_value":10 "geo_id":"4059"} {"feature_name":constants.AREA "feature_value":10 "geo_id":"4058"} {"feature_name":constants.INCOME_PER_CAPITA "feature_value":120 "geo_id":"4058"} {"feature_name":constants.INCOME_PER_CAPITA "feature_value":100 "geo_id":"4059"} {"feature_name":constants.COUNTY_POPULATION "feature_value":70 "geo_id":"4059"} {"feature_name":constants.COUNTY_POPULATION "feature_value":50 "geo_id":"4058"} {"feature_name":constants.COUNTY_POPULATION "feature_value":10 "geo_id":"4057"}])<line_sep>county_model=us_model_definitions.CountyModelDefinition(gt_source="JHU")<line_sep>actual,_=county_model._extract_static_features(static_data=static_data locations=["4059" "4058"])<line_sep>expected={constants.INCOME_PER_CAPITA:{"4059":0 "4058":1} constants.POPULATION:{"4059":70 "4058":50}}<for_stmt>static_feature_name expected<block_start>self.assertEqual(actual[static_feature_name] expected[static_feature_name] "Unexpected value for feature %s"%static_feature_name)<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 - 2019, doudoudzj
# All rights reserved.
#
# InPanel is distributed under the terms of the New BSD License.
# The full license can be found in 'LICENSE'.
"""Module for Lighttpd configuration management."""<def_stmt>web_response self<block_start>action=self.get_argument('action' '')<if_stmt>action<eq>'getsettings'<block_start>self.write({'code':0 'msg':'Lighttpd 配置信息获取成功!' 'data':get_config()})<block_end><elif_stmt>action<eq>'savesettings'<block_start>self.write({'code':0 'msg':'Lighttpd 服务配置保存成功!' 'data':set_config(self)})<block_end><return><block_end><def_stmt>get_config <block_start><return>dict()<block_end><def_stmt>set_config self<block_start><return>dict()<block_end>
|
<import_from_stmt>typing Dict Any<import_from_stmt>platypush.message.event Event<class_stmt>FoursquareCheckinEvent(Event)<block_start>"""
Event triggered when a new check-in occurs.
"""<def_stmt>__init__ self checkin:Dict[str Any] *args **kwargs<block_start>super().__init__(*args checkin=checkin **kwargs)<block_end><block_end># vim:sw=4:ts=4:et:
|
_base_="base.py"<line_sep>fold=1<line_sep>percent=1<line_sep>data=dict(samples_per_gpu=1 workers_per_gpu=1 train=dict(ann_file="data/coco/annotations/semi_supervised/instances_train2017.${fold}@${percent}.json" img_prefix="data/coco/train2017/" ) )<line_sep>work_dir="work_dirs/${cfg_name}/${percent}/${fold}"<line_sep>log_config=dict(interval=50 hooks=[dict(type="TextLoggerHook") dict(type="WandbLoggerHook" init_kwargs=dict(project="pre_release" name="${cfg_name}" config=dict(fold="${fold}" percent="${percent}" work_dirs="${work_dir}" total_step="${runner.max_iters}" ) ) by_epoch=<false> ) ] )<line_sep>
|
<import_stmt>os<import_stmt>cupy<import_stmt>numpy<as>np<import_stmt>pytest<import_stmt>skimage.data<import_from_stmt>PIL Image<import_stmt>cucim.core.operations.spatial<as>spt<def_stmt>get_input_arr <block_start>img=skimage.data.astronaut()<line_sep>arr=np.asarray(img)<line_sep>arr=np.transpose(arr)<line_sep><return>arr<block_end><def_stmt>get_rotated_data <block_start>dirname=os.path.dirname(__file__)<line_sep>img1=Image.open(os.path.join(os.path.abspath(dirname) "rotated.png"))<line_sep>arr_o=np.asarray(img1)<line_sep>arr_o=np.transpose(arr_o)<line_sep><return>arr_o<block_end><def_stmt>test_rotate90_param <block_start>arr=get_input_arr()<with_stmt>pytest.raises(TypeError)<block_start>img=Image.fromarray(arr.T 'RGB')<line_sep>spt.image_rotate_90(img 1 [1 2])<block_end><block_end><def_stmt>test_rotate90_numpy_input <block_start>arr=get_input_arr()<line_sep>rotate90_arr=get_rotated_data()<line_sep>output=spt.image_rotate_90(arr 1 [1 2])<assert_stmt>np.allclose(output rotate90_arr)<block_end><def_stmt>test_rotate90_cupy_input <block_start>arr=get_input_arr()<line_sep>rotate90_arr=get_rotated_data()<line_sep>cupy_arr=cupy.asarray(arr)<line_sep>cupy_output=spt.image_rotate_90(cupy_arr 1 [1 2])<line_sep>np_output=cupy.asnumpy(cupy_output)<assert_stmt>np.allclose(np_output rotate90_arr)<block_end><def_stmt>test_rotate90_batchinput <block_start>arr=get_input_arr()<line_sep>rotate90_arr=get_rotated_data()<line_sep>arr_batch=np.stack((arr )<times>8 axis=0)<line_sep>np_output=spt.image_rotate_90(arr_batch 1 [2 3])<assert_stmt>np_output.shape[0]<eq>8<for_stmt>i range(np_output.shape[0])<block_start><assert_stmt>np.allclose(np_output[i] rotate90_arr)<block_end><block_end>
|
<import_from_stmt>pycalphad.core.constants INTERNAL_CONSTRAINT_SCALING<import_from_stmt>pycalphad.codegen.sympydiff_utils build_constraint_functions<import_from_stmt>collections namedtuple<line_sep>ConstraintTuple=namedtuple('ConstraintTuple' ['internal_cons_func' 'internal_cons_jac' 'internal_cons_hess' 'num_internal_cons'])<def_stmt>build_constraints mod variables parameters=<none><block_start>internal_constraints=mod.get_internal_constraints()<line_sep>internal_constraints=[INTERNAL_CONSTRAINT_SCALING<times>x<for>x internal_constraints]<line_sep>cf_output=build_constraint_functions(variables internal_constraints parameters=parameters)<line_sep>internal_cons_func=cf_output.cons_func<line_sep>internal_cons_jac=cf_output.cons_jac<line_sep>internal_cons_hess=cf_output.cons_hess<line_sep><return>ConstraintTuple(internal_cons_func=internal_cons_func internal_cons_jac=internal_cons_jac internal_cons_hess=internal_cons_hess num_internal_cons=len(internal_constraints))<block_end>
|
<import_from_stmt>functools update_wrapper partial<import_stmt>json<import_stmt>logging<import_from_stmt>cachebrowser.models Host<import_stmt>click<import_from_stmt>cachebrowser.api.core APIManager APIRequest<import_from_stmt>cachebrowser.bootstrap BootstrapError<line_sep>main_commands=['hostcli' 'cdncli' 'bootstrap']<line_sep>api=APIManager()<line_sep>logger=logging.getLogger(__name__)<def_stmt>forward_to_api route params=<none><block_start><def_stmt>wrapper func<block_start>@click.pass_obj<def_stmt>inner context **kwargs<block_start>request_params=params.copy()<if>params<else>{}<line_sep>request_params.update(kwargs)<line_sep>request=APIRequest(route request_params)<line_sep>request.reply=partial(func context)<line_sep>api.handle_api_request(context request)<block_end><return>update_wrapper(inner func)<block_end><return>wrapper<block_end>@click.group('host')<def_stmt>hostcli <block_start><pass><block_end>@hostcli.command('add')@forward_to_api('/hosts/add')@click.argument('hostname')@click.argument('cdn')@click.option('--ssl/--no-ssl' 'ssl' default=<true>)<def_stmt>addhost context<block_start>click.echo("New host added")<block_end>@hostcli.command('list')@forward_to_api('/hosts' {'page':0 'num_per_page':0})<def_stmt>listhost context hosts<block_start>click.echo('\n'.join([host['hostname']<for>host hosts]))<block_end>@click.group('cdn')<def_stmt>cdncli <block_start><pass><block_end>@cdncli.command('add')@forward_to_api('/cdns/add')@click.argument('id')@click.option('--name')@click.option('--edge-server')<def_stmt>addcdn context<block_start>click.echo("New CDN added")<block_end>@cdncli.command('list')@forward_to_api('/cdns' {'page':0 'num_per_page':0})<def_stmt>listhost context cdns<block_start>click.echo('\n'.join([cdn['id']<for>cdn cdns]))<block_end>@click.command('bootstrap')@click.option('--save/--no-save' is_flag=<true> default=<false> help="Save bootstrap information to database (default --no-save)")@click.argument('hostname')@click.pass_obj<def_stmt>bootstrap context save hostname<block_start><try_stmt><block_start>host_data=context.bootstrapper.lookup_host(hostname)<block_end><except_stmt>BootstrapError<block_start>logger.warning("No bootstrap information found for host '{}'".format(hostname))<line_sep><return><block_end>logger.info(json.dumps(host_data indent=4))<if_stmt>save<block_start>host=Host(**host_data)<line_sep>host.save()<block_end><block_end>
|
<import_stmt>numpy<as>np<import_from_stmt>rllab.envs.mujoco.hill.hill_env HillEnv<import_from_stmt>rllab.envs.mujoco.walker2d_env Walker2DEnv<import_from_stmt>rllab.misc.overrides overrides<import_stmt>rllab.envs.mujoco.hill.terrain<as>terrain<import_from_stmt>rllab.spaces Box<class_stmt>Walker2DHillEnv(HillEnv)<block_start>MODEL_CLASS=Walker2DEnv<line_sep>@overrides<def_stmt>_mod_hfield self hfield# clear a flat patch for the robot to start off from
<block_start><return>terrain.clear_patch(hfield Box(np.array([-2.0 -2.0]) np.array([-0.5 -0.5])))<block_end><block_end>
|
# ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
<import_stmt>os<import_from_stmt>twitter.common.contextutil pushd temporary_dir<def_stmt>test_simple_pushd <block_start>pre_cwd=os.getcwd()<with_stmt>temporary_dir()<as>tempdir<block_start><with_stmt>pushd(tempdir)<as>path<block_start><assert_stmt>path<eq>tempdir<assert_stmt>os.getcwd()<eq>os.path.realpath(tempdir)<block_end><assert_stmt>os.getcwd()<eq>pre_cwd<block_end><assert_stmt>os.getcwd()<eq>pre_cwd<block_end><def_stmt>test_nested_pushd <block_start>pre_cwd=os.getcwd()<with_stmt>temporary_dir()<as>tempdir1<block_start><with_stmt>pushd(tempdir1)<as>path1<block_start><assert_stmt>os.getcwd()<eq>os.path.realpath(tempdir1)<with_stmt>temporary_dir(root_dir=tempdir1)<as>tempdir2<block_start><with_stmt>pushd(tempdir2)<as>path2<block_start><assert_stmt>os.getcwd()<eq>os.path.realpath(tempdir2)<block_end><assert_stmt>os.getcwd()<eq>os.path.realpath(tempdir1)<block_end><assert_stmt>os.getcwd()<eq>os.path.realpath(tempdir1)<block_end><assert_stmt>os.getcwd()<eq>pre_cwd<block_end><assert_stmt>os.getcwd()<eq>pre_cwd<block_end>
|
"""An example of embedding a RichJupyterWidget with an in-process kernel.
We recommend using a kernel in a separate process as the normal option - see
embed_qtconsole.py for more information. In-process kernels are not well
supported.
To run this example:
python3 inprocess_qtconsole.py
"""<import_from_stmt>qtpy QtWidgets<import_from_stmt>qtconsole.rich_jupyter_widget RichJupyterWidget<import_from_stmt>qtconsole.inprocess QtInProcessKernelManager<def_stmt>show <block_start><global>ipython_widget# Prevent from being garbage collected
# Create an in-process kernel
kernel_manager=QtInProcessKernelManager()<line_sep>kernel_manager.start_kernel(show_banner=<false>)<line_sep>kernel=kernel_manager.kernel<line_sep>kernel.gui='qt4'<line_sep>kernel_client=kernel_manager.client()<line_sep>kernel_client.start_channels()<line_sep>ipython_widget=RichJupyterWidget()<line_sep>ipython_widget.kernel_manager=kernel_manager<line_sep>ipython_widget.kernel_client=kernel_client<line_sep>ipython_widget.show()<block_end><if_stmt>__name__<eq>"__main__"<block_start>app=QtWidgets.QApplication([])<line_sep>show()<line_sep>app.exec_()<block_end>
|
"""This is the receiver for the tokenizer model."""<import_from_stmt>typing NamedTuple Optional<import_from_stmt>lexos.receivers.base_receiver BaseReceiver<class_stmt>TokenizerOption(NamedTuple)<block_start>"""The typed tuple to hold tokenizer front end option."""<line_sep>start:Optional[int]<line_sep>length:Optional[int]<line_sep>search:Optional[str]<line_sep>sort_column:Optional[int]<line_sep>sort_method:Optional[bool]<line_sep>csv_documents_as_rows:Optional[bool]<block_end><class_stmt>TokenizerReceiver(BaseReceiver)<block_start>"""Get the tokenizer table orientation from front end."""<def_stmt>__init__ self<block_start>"""Initialize the class."""<line_sep>super().__init__()<block_end><def_stmt>options_from_front_end self<arrow>TokenizerOption<block_start>"""Get the tokenizer orientation from front end.
:return: a TokenizerTableOrientation object that holds the orientation.
"""<line_sep># This exception is here because when header is requested, values
# above related to data table drawing are not passed in.
<try_stmt><block_start>start=int(self._front_end_data["tokenizer_table_page_number"])<line_sep>search=self._front_end_data["tokenizer_table_search_input"]<line_sep>length=int(self._front_end_data["tokenizer_table_row_count"])<line_sep>sort_method=bool(self._front_end_data["tokenizer_table_sort_mode"]<eq>"Ascending")<line_sep>sort_column=int(self._front_end_data["tokenizer_table_selected_column"])<line_sep>csv_documents_as_rows=bool(self._front_end_data["csv_orientation"]<eq>"Documents as Rows"<if>"csv_orientation"<in>self._front_end_data<else><true>)<block_end><except_stmt>KeyError<block_start>start=<none><line_sep>search=<none><line_sep>length=<none><line_sep>sort_method=<none><line_sep>sort_column=<none><line_sep>csv_documents_as_rows=<none><block_end># Pack everything and returns it as a NamedTuple.
<return>TokenizerOption(start=start length=length search=search sort_column=sort_column sort_method=sort_method csv_documents_as_rows=csv_documents_as_rows)<block_end><block_end>
|
<import_from_stmt>testing.test_interpreter BaseTestInterpreter<class_stmt>TestStandardModule(BaseTestInterpreter)<block_start><def_stmt>test_escapeshellarg self<block_start>output=self.run('''
echo escapeshellarg("xyz");
echo escapeshellarg('$X');
echo escapeshellarg("'");
echo escapeshellarg("x'y\\"z");
echo escapeshellarg("\\\\");
''')<assert_stmt>self.unwrap(output[0])<eq>"'xyz'"<assert_stmt>self.unwrap(output[1])<eq>"'$X'"<assert_stmt>self.unwrap(output[2])<eq>"''\\'''"<assert_stmt>self.unwrap(output[3])<eq>"'x'\\''y\"z'"<assert_stmt>self.unwrap(output[4])<eq>"'\\'"<block_end><def_stmt>test_shell_exec self<block_start>output=self.run('''
echo shell_exec('doesnotexist');
echo shell_exec('echo 0');
''')<assert_stmt>output[0]<eq>self.space.w_Null<assert_stmt>self.space.str_w(output[1])<eq>"0\n"<block_end><def_stmt>test_exec self<block_start>output=self.run('''
echo exec('doesnotexist');
echo exec('echo a && echo b');
''')<assert_stmt>output[0]<eq>self.space.wrap('')<assert_stmt>self.space.str_w(output[1])<eq>"b"<block_end><def_stmt>test_exec_error self<block_start><with_stmt>self.warnings(['Warning: exec(): Cannot execute a blank command'])<block_start>output=self.run('''
echo exec('');
echo exec(123);
''')<block_end><assert_stmt>output<eq>[self.space.w_False self.space.wrap('')]<block_end><def_stmt>test_exec_2 self<block_start>output=self.run('''
$arr = array('foo');
echo exec('echo a && echo b', $arr);
echo $arr;
''')<assert_stmt>map(self.space.str_w output[1].as_list_w())<eq>['foo' 'a' 'b']<block_end><block_end>
|
<import_from_stmt>django.test SimpleTestCase override_settings<import_from_stmt>corehq.apps.hqwebapp.login_utils get_custom_login_page<class_stmt>TestCustomLogin(SimpleTestCase)<block_start>@override_settings(CUSTOM_LANDING_TEMPLATE=<none>)<def_stmt>test_nothing_configured self<block_start>self.assertEqual(<none> get_custom_login_page('example.com'))<block_end>@override_settings(CUSTOM_LANDING_TEMPLATE='custom/login.html')<def_stmt>test_string_configured self<block_start>self.assertEqual('custom/login.html' get_custom_login_page('example.com'))<block_end>@override_settings(CUSTOM_LANDING_TEMPLATE={'example.com':'custom/login.html'})<def_stmt>test_dict_match self<block_start>self.assertEqual('custom/login.html' get_custom_login_page('example.com'))<block_end>@override_settings(CUSTOM_LANDING_TEMPLATE={'example.com':'custom/login.html'})<def_stmt>test_dict_mismatch self<block_start>self.assertEqual(<none> get_custom_login_page('commcarehq.org'))<block_end>@override_settings(CUSTOM_LANDING_TEMPLATE={'example.com':'custom/login.html' 'default':'normal/login.html'})<def_stmt>test_dict_default self<block_start>self.assertEqual('custom/login.html' get_custom_login_page('example.com'))<line_sep>self.assertEqual('normal/login.html' get_custom_login_page('commcarehq.org'))<block_end><block_end>
|
<import_from_stmt>cape_privacy.coordinator.auth.api_token create_api_token<def_stmt>test_api_token <block_start>token_id="imatokenid"<line_sep>secret="<KEY>"<line_sep>token=create_api_token(token_id secret)<assert_stmt>token.token_id<eq>token_id<assert_stmt>token.secret<eq>bytes(secret "utf-8")<assert_stmt>token.version<eq>1<block_end>
|
<import_stmt>tkinter<as>tk<def_stmt>hello <block_start>print("hello!")<block_end><def_stmt>popup event<block_start>menu.post(event.x_root event.y_root)<line_sep>menu.focus()<block_end><def_stmt>popup_close event<block_start>menu.unpost()<block_end>root=tk.Tk()<line_sep># frame
frame=tk.Frame(root width=512 height=512)<line_sep>frame.pack()<line_sep># popup menu
menu=tk.Menu(root tearoff=0)<line_sep>menu.add_command(label="Undo" command=hello)<line_sep>menu.add_command(label="Redo" command=hello)<line_sep># events
frame.bind("<Button-3>" popup)<line_sep>frame.bind("<Button-1>" popup_close)<line_sep>menu.bind("<Escape>" popup_close)<line_sep>root.mainloop()<line_sep>
|
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>process=cms.Process("CSC HLT DQM")<line_sep>#-------------------------------------------------
# DQM Module Configuration
#-------------------------------------------------
process.load("DQM.CSCMonitorModule.csc_hlt_dqm_sourceclient_cfi")<line_sep>#----------------------------
# Event Source
#-----------------------------
process.load("DQM.Integration.test.inputsource_live_cfi")<line_sep>#process.EventStreamHttpReader.consumerName = 'CSC HLT DQM Consumer'
#process.EventStreamHttpReader.sourceURL = "http://localhost:50082/urn:xdaq-application:lid=29"
#----------------------------
# DQM Environment
#-----------------------------
process.load("DQMServices.Core.DQM_cfg")<line_sep>process.load("DQMServices.Components.DQMEnvironment_cfi")<line_sep>#----------------------------
# DQM Playback Environment
#-----------------------------
process.load("DQM.Integration.test.environment_playback_cfi")<line_sep>process.dqmEnv.subSystemFolder="CSC"<line_sep>process.DQM.collectorHost='pccmsdqm02.cern.ch'<line_sep>#process.DQM.collectorHost = 'localhost'
process.dqmSaver.dirName='.'<line_sep>#--------------------------
# Message Logger
#--------------------------
MessageLogger=cms.Service("MessageLogger" suppressInfo=cms.untracked.vstring('source') suppressDebug=cms.untracked.vstring('source') suppressWarning=cms.untracked.vstring('source') cout=cms.untracked.PSet(threshold=cms.untracked.string('INFO') WARNING=cms.untracked.PSet(limit=cms.untracked.int32(0)) noLineBreaks=cms.untracked.bool(<false>)) detailedInfo=cms.untracked.PSet(threshold=cms.untracked.string('INFO')) critical=cms.untracked.PSet(threshold=cms.untracked.string('ERROR')) debug=cms.untracked.PSet(threshold=cms.untracked.string('DEBUG')) debugModules=cms.untracked.vstring('CSCHLTMonitormodule') destinations=cms.untracked.vstring(# 'debug',
# 'detailedInfo',
# 'critical',
# 'cout'
))<line_sep>#--------------------------
# Sequences
#--------------------------
process.p=cms.Path(process.dqmCSCClient+process.dqmEnv+process.dqmSaver)<line_sep>
|
# -*- coding: utf-8 -*-
# @ Time : 2021/4/6 14:46
# @ Author : Redtree
# @ File : db_manager
# @ Desc : 单独将flask_migrate部分功能移出,不与flask_app本地IDE工具调试冲突。
<import_from_stmt>__init__ manager<line_sep>manager.run()<line_sep>'''
在工程根目录下,运行
python db_migrate_manager.py db init
python db_migrate_manager.py db migrate
python db_migrate_manager.py db upgrade
python db_migrate_manager.py db --help
'''<line_sep>
|
"""Common functionalities for OSI-450 dataset cmorization."""<import_stmt>logging<import_stmt>os<import_stmt>glob<import_from_stmt>datetime datetime timedelta<import_from_stmt>calendar monthrange isleap<import_stmt>numpy<as>np<import_stmt>iris<import_stmt>iris.exceptions<import_from_stmt>iris.cube Cube CubeList<import_from_stmt>iris.coords AuxCoord<import_from_stmt>iris.coord_categorisation add_day_of_year<import_from_stmt>esmvalcore.preprocessor monthly_statistics<import_from_stmt>.utilities set_global_atts convert_timeunits fix_var_metadata save_variable <line_sep>logger=logging.getLogger(__name__)<class_stmt>OSICmorizer()<block_start>"""Cmorizer for OSI-450 datasets."""<def_stmt>__init__ self in_dir out_dir cfg hemisphere<block_start>self.in_dir=in_dir<line_sep>self.out_dir=out_dir<line_sep>self.cfg=cfg<line_sep>self.hemisphere=hemisphere<line_sep>self.min_days=self.cfg['custom'].get('min_days' 50)<block_end><def_stmt>cmorize self<block_start>"""Cmorize OSI-450 or OSI-409 dataset."""<line_sep>logger.info("Starting cmorization for Tier%s OBS files: %s" self.cfg['attributes']['tier'] self.cfg['attributes']['dataset_id'])<line_sep>logger.info("Input data from: %s" self.in_dir)<line_sep>logger.info("Output will be written to: %s" self.out_dir)<line_sep># run the cmorization
first_run=<true><for_stmt>var,vals self.cfg['variables'].items()<block_start>var_info={}<for_stmt>mip vals['mip']<block_start>var_info[mip]=self.cfg['cmor_table'].get_variable(mip var)<block_end>file_pattern='{0}_{1}_{2}_*.nc'.format(vals['raw'] self.hemisphere vals['grid'])<for_stmt>year os.listdir(self.in_dir)<block_start>year=int(year)<line_sep>logger.info("CMORizing var %s for year %s" var year)<line_sep>raw_info={'name':vals['raw'] 'file':os.path.join(self.in_dir str(year) '??' file_pattern)}<line_sep>self._extract_variable(var_info raw_info year vals['mip'])<if_stmt>first_run<block_start>sample_file=glob.glob(os.path.join(self.in_dir str(year) '01' file_pattern))[0]<line_sep>cube=iris.load_cube(sample_file iris.Constraint(# pylint: disable=cell-var-from-loop
cube_func=<lambda>c:c.var_name<eq>raw_info['name']))<line_sep>self._create_areacello(cube)<line_sep>first_run=<false><block_end><block_end><block_end><block_end><def_stmt>_extract_variable self var_infos raw_info year mips<block_start>"""Extract to all vars."""<line_sep>cubes=iris.load(raw_info['file'] iris.Constraint(cube_func=<lambda>c:c.var_name<eq>raw_info['name']))<line_sep>tracking_ids=self._unify_attributes(cubes)<line_sep>cube=cubes.concatenate_cube()<del_stmt>cubes<if_stmt>tracking_ids<block_start>cube.attributes['tracking_ids']=tracking_ids<block_end>cube.coord('projection_x_coordinate').var_name='x'<line_sep>cube.coord('projection_y_coordinate').var_name='y'<line_sep>lon_coord=cube.coord('longitude')<line_sep>lon_coord.points[lon_coord.points<l>0]<augadd>360<line_sep>source_cube=cube<line_sep>attrs=self.cfg['attributes']<for_stmt>mip mips<block_start>var_info=var_infos[mip]<line_sep>attrs['mip']=mip<if_stmt>var_info.frequency<eq>'mon'<block_start>cube=monthly_statistics(source_cube)<line_sep>cube=self._fill_months(cube)<block_end><elif_stmt>var_info.frequency<eq>'day'<block_start>cube=self._fill_days(source_cube year)<block_end><if_stmt><not>cube<block_start><continue><block_end>logger.debug(cube)<line_sep>fix_var_metadata(cube var_info)<line_sep>convert_timeunits(cube year)<line_sep>set_global_atts(cube attrs)<line_sep>self._try_remove_coord(cube 'year')<line_sep>self._try_remove_coord(cube 'day_of_year')<line_sep>self._try_remove_coord(cube 'month_number')<line_sep>self._try_remove_coord(cube 'day_of_month')<line_sep>save_variable(cube var_info.short_name self.out_dir attrs)<block_end><return>cube<block_end>@staticmethod<def_stmt>_try_remove_coord cube coord<block_start><try_stmt><block_start>cube.remove_coord(coord)<block_end><except_stmt>iris.exceptions.CoordinateNotFoundError<block_start><pass><block_end><block_end>@staticmethod<def_stmt>_fill_months cube<block_start><if_stmt>cube.coord('time').shape[0]<eq>12<block_start><return>cube<block_end>cubes=CubeList(cube.slices_over('time'))<line_sep>model_cube=cubes[0].copy()<for_stmt>month range(1 13)<block_start>month_constraint=iris.Constraint(# pylint: disable=cell-var-from-loop
time=<lambda>cell:cell.point.month<eq>month)<if_stmt>cubes.extract(month_constraint)<block_start><continue><block_end>cubes.append(OSICmorizer._create_nan_cube(model_cube month month=<true>))<block_end>cube=cubes.merge_cube()<line_sep><return>cube<block_end><def_stmt>_fill_days self cube year<block_start><if_stmt>cube.coord('time').shape[0]<l>self.min_days<block_start>logger.warning('Only %s days available. Skip generation of daily files' cube.coord('time').shape[0])<line_sep><return><none><block_end>total_days=366<if>isleap(year)<else>365<if_stmt>cube.coord('time').shape[0]<l>total_days<block_start>cubes=OSICmorizer._add_nan_timesteps(cube total_days)<line_sep>cube=cubes.merge_cube()<line_sep>cube.remove_coord('day_of_year')<del_stmt>cubes<block_end><return>cube<block_end>@staticmethod<def_stmt>_add_nan_timesteps cube total_days<block_start>add_day_of_year(cube 'time')<line_sep>cubes=CubeList(cube.slices_over('time'))<line_sep>model_cube=cubes[0].copy()<line_sep>model_cube.remove_coord('day_of_year')<for_stmt>day_of_year range(total_days)<block_start>day_constraint=iris.Constraint(day_of_year=day_of_year+1)<if_stmt>cubes.extract(day_constraint)<block_start><continue><block_end>nan_cube=OSICmorizer._create_nan_cube(model_cube day_of_year month=<false>)<line_sep>add_day_of_year(nan_cube 'time')<line_sep>cubes.append(nan_cube)<block_end><del_stmt>model_cube<line_sep><return>cubes<block_end>@staticmethod<def_stmt>_create_nan_cube model_cube num month<block_start>nan_cube=model_cube.copy(np.ma.masked_all(model_cube.shape dtype=model_cube.dtype))<line_sep>time_coord=nan_cube.coord('time')<line_sep>nan_cube.remove_coord(time_coord)<line_sep>date=time_coord.cell(0).point<if_stmt>month<block_start>date=datetime(date.year num date.day)<line_sep>bounds=(datetime(date.year num 1) datetime(date.year num monthrange(date.year num)[1]))<block_end><else_stmt><block_start>date=datetime(date.year 1 1 12)+timedelta(days=num)<line_sep>bounds=(datetime(date.year 1 1)+timedelta(days=num) datetime(date.year 1 1 23 59)+timedelta(days=num))<block_end>date=time_coord.units.date2num(date)<line_sep>bounds=(time_coord.units.date2num(bounds[0]) time_coord.units.date2num(bounds[1]) )<line_sep>nan_cube.add_aux_coord(AuxCoord([date] standard_name=time_coord.standard_name var_name=time_coord.var_name long_name=time_coord.long_name units=time_coord.units attributes=time_coord.attributes bounds=[bounds] ))<line_sep><return>nan_cube<block_end>@staticmethod<def_stmt>_unify_attributes cubes<block_start>tracking_ids=[]<for_stmt>cube cubes# OSI-409 and OSI-450 do not have the same attributes
<block_start><try_stmt><block_start>tracking_ids.append(cube.attributes['tracking_id'])<block_end><except_stmt>KeyError<block_start><pass><block_end>to_remove=['time_coverage_start' 'time_coverage_end' 'history' 'tracking_id' 'start_date' 'stop_date' ]<for_stmt>attr to_remove<block_start><try_stmt><block_start><del_stmt>cube.attributes[attr]<block_end><except_stmt>KeyError<block_start><pass><block_end><block_end><block_end><return>tracking_ids<block_end><def_stmt>_create_areacello self sample_cube<block_start><if_stmt><not>self.cfg['custom'].get('create_areacello' <false>)<block_start><return><block_end>var_info=self.cfg['cmor_table'].get_variable('fx' 'areacello')<line_sep>lat_coord=sample_cube.coord('latitude')<line_sep>self.cfg['attributes']['mip']='fx'<line_sep>cube=Cube(np.full(lat_coord.shape self.cfg['custom']['grid_cell_size'] np.float32) standard_name=var_info.standard_name long_name=var_info.long_name var_name=var_info.short_name units='m2' )<line_sep>cube.add_aux_coord(lat_coord (0 1))<line_sep>cube.add_aux_coord(sample_cube.coord('longitude') (0 1))<line_sep>cube.add_dim_coord(sample_cube.coord('projection_y_coordinate') 0)<line_sep>cube.add_dim_coord(sample_cube.coord('projection_x_coordinate') 1)<line_sep>cube.coord('projection_x_coordinate').var_name='x'<line_sep>cube.coord('projection_y_coordinate').var_name='y'<line_sep>fix_var_metadata(cube var_info)<line_sep>set_global_atts(cube self.cfg['attributes'])<line_sep>save_variable(cube var_info.short_name self.out_dir self.cfg['attributes'] zlib=<true>)<block_end><block_end>
|
<import_from_stmt>.fixtures elasticsearch<def_stmt>test_base_os host<block_start><assert_stmt>host.system_info.distribution<eq>'centos'<assert_stmt>host.system_info.release<eq>'7'<block_end><def_stmt>test_no_core_files_exist_in_root host<block_start>core_file_check_cmdline='ls -l /core*'<assert_stmt>host.run(core_file_check_cmdline).exit_status<ne>0<block_end><def_stmt>test_all_elasticsearch_files_are_gid_0 host<block_start>check_for_files_with_gid_0_command=("cd /usr/share && "<concat>"find ./elasticsearch ! -gid 0 | "<concat>"egrep '.*'")<assert_stmt>host.run(check_for_files_with_gid_0_command).exit_status<ne>0<block_end>
|
r"""
Disallows to use incorrect magic comments.
That's how a basic ``comment`` type token looks like:
.. code:: python
TokenInfo(
type=57 (COMMENT),
string='# noqa: WPS100',
start=(1, 4),
end=(1, 16),
line="u'' # noqa: WPS100\n",
)
All comments have the same type.
"""<import_stmt>re<import_stmt>tokenize<import_from_stmt>token ENDMARKER<import_from_stmt>typing ClassVar<import_from_stmt>typing.re Pattern<import_from_stmt>typing_extensions Final final<import_from_stmt>wemake_python_styleguide.constants MAX_NO_COVER_COMMENTS STDIN<import_from_stmt>wemake_python_styleguide.logic.system is_executable_file is_windows<import_from_stmt>wemake_python_styleguide.logic.tokens.constants NEWLINES<import_from_stmt>wemake_python_styleguide.logic.tokens.strings get_comment_text<import_from_stmt>wemake_python_styleguide.violations.best_practices EmptyCommentViolation ForbiddenInlineIgnoreViolation OveruseOfNoCoverCommentViolation OveruseOfNoqaCommentViolation ShebangViolation WrongDocCommentViolation WrongMagicCommentViolation <import_from_stmt>wemake_python_styleguide.visitors.base BaseTokenVisitor<line_sep>EMPTY_STRING:Final=''<line_sep>SENTINEL_TOKEN:Final=tokenize.TokenInfo(type=ENDMARKER string=EMPTY_STRING start=(0 0) end=(0 0) line=EMPTY_STRING )<line_sep>@final<class_stmt>WrongCommentVisitor(BaseTokenVisitor)<block_start>"""Checks comment tokens."""<line_sep>_no_cover:ClassVar[Pattern]=re.compile(r'^pragma:\s+no\s+cover')<line_sep>_type_check:ClassVar[Pattern]=re.compile(r'^type:\s?([\w\d\[\]\'\"\.]+)$' )<def_stmt>__init__ self *args **kwargs<arrow><none><block_start>"""Initializes a counter."""<line_sep>super().__init__(*args **kwargs)<line_sep>self._no_cover_count=0<block_end><def_stmt>visit_comment self token:tokenize.TokenInfo<arrow><none><block_start>"""Performs comment checks."""<line_sep>self._check_typed_ast(token)<line_sep>self._check_empty_doc_comment(token)<line_sep>self._check_cover_comments(token)<block_end><def_stmt>_check_typed_ast self token:tokenize.TokenInfo<arrow><none><block_start>comment_text=get_comment_text(token)<line_sep>match=self._type_check.match(comment_text)<if_stmt><not>match<block_start><return><block_end>declared_type=match.groups()[0].strip()<if_stmt><not>declared_type.startswith('ignore')<block_start>self.add_violation(WrongMagicCommentViolation(token text=comment_text) )<block_end><block_end><def_stmt>_check_empty_doc_comment self token:tokenize.TokenInfo<arrow><none><block_start><if_stmt>get_comment_text(token)<eq>':'<block_start>self.add_violation(WrongDocCommentViolation(token))<block_end><block_end><def_stmt>_check_cover_comments self token:tokenize.TokenInfo<arrow><none><block_start>comment_text=get_comment_text(token)<line_sep>match=self._no_cover.match(comment_text)<if_stmt><not>match<block_start><return><block_end>self._no_cover_count<augadd>1<block_end><def_stmt>_post_visit self<arrow><none><block_start><if_stmt>self._no_cover_count<g>MAX_NO_COVER_COMMENTS<block_start>self.add_violation(OveruseOfNoCoverCommentViolation(text=str(self._no_cover_count) baseline=MAX_NO_COVER_COMMENTS ) )<block_end><block_end><block_end>@final<class_stmt>EmptyCommentVisitor(BaseTokenVisitor)<block_start>"""Checks empty comment tokens."""<def_stmt>__init__ self *args **kwargs<arrow><none><block_start>"""Initializes fields to track empty comments."""<line_sep>super().__init__(*args **kwargs)<line_sep>self._line_num=-1<line_sep>self._prev_comment_line_num=-1<line_sep>self._prev_non_empty=-1<line_sep>self._in_same_block=<true><line_sep>self._block_alerted=<false><line_sep>self._reserved_token=SENTINEL_TOKEN<block_end><def_stmt>visit_comment self token:tokenize.TokenInfo<arrow><none><block_start>"""Performs comment checks."""<line_sep>self._check_empty_comment(token)<block_end><def_stmt>_check_empty_comment self token:tokenize.TokenInfo<arrow><none><block_start>self._line_num=token.start[0]<line_sep>self._check_same_block(token)<line_sep># Triggering reserved token to be added
<if_stmt><not>self._in_same_block<and>self._has_reserved_token()<block_start>self.add_violation(EmptyCommentViolation(self._reserved_token))<line_sep>self._block_alerted=<true><line_sep>self._reserved_token=SENTINEL_TOKEN<block_end><if_stmt>get_comment_text(token)<eq>EMPTY_STRING<block_start><if_stmt><not>self._in_same_block# Stand alone empty comment or first empty comment in a block
<block_start>self.add_violation(EmptyCommentViolation(token))<line_sep>self._block_alerted=<true><line_sep>self._in_same_block=<true><block_end>to_reserve=(# Empty comment right after non-empty, block not yet alerted
self._is_consecutive(self._prev_non_empty)<and>self._in_same_block<and><not>self._block_alerted)<if_stmt>to_reserve<block_start>self._reserved_token=token<block_end><block_end><else_stmt><block_start>self._prev_non_empty=self._line_num<if_stmt>self._in_same_block<block_start>self._reserved_token=SENTINEL_TOKEN<block_end><block_end>self._prev_comment_line_num=token.start[0]<block_end><def_stmt>_check_same_block self token:tokenize.TokenInfo<arrow><none><block_start>self._in_same_block=(self._is_consecutive(self._prev_comment_line_num)<and>token.line.lstrip()[0]<eq>'#'# is inline comment
)<if_stmt><not>self._in_same_block<block_start>self._block_alerted=<false><block_end><block_end><def_stmt>_is_consecutive self prev_line_num:int<arrow>bool<block_start><return>(self._line_num-prev_line_num<eq>1)<block_end><def_stmt>_has_reserved_token self<arrow>bool<block_start><return>(self._reserved_token<ne>SENTINEL_TOKEN)<block_end><def_stmt>_post_visit self<arrow><none><block_start><if_stmt>self._has_reserved_token()<and><not>self._block_alerted<block_start>self.add_violation(EmptyCommentViolation(self._reserved_token))<block_end><block_end><block_end>@final<class_stmt>ShebangVisitor(BaseTokenVisitor)<block_start>"""
Checks the first shebang in the file.
Code is insipired by https://github.com/xuhdev/flake8-executable
"""<line_sep>_shebang:ClassVar[Pattern]=re.compile(r'(\s*)#!')<line_sep>_python_executable:ClassVar[str]='python'<def_stmt>visit_comment self token:tokenize.TokenInfo<arrow><none><block_start>"""Checks if there is an executable mismatch."""<if_stmt><not>self._is_first_comment(token)<block_start><return><block_end># this is a regular comment, not a shebang
is_shebang=self._is_valid_shebang_line(token)<line_sep>self._check_executable_mismatch(token is_shebang=is_shebang)<if_stmt>is_shebang<block_start>self._check_valid_shebang(token)<block_end><block_end><def_stmt>_check_executable_mismatch self token:tokenize.TokenInfo * is_shebang:bool <arrow><none><block_start><if_stmt>is_windows()<or>self.filename<eq>STDIN# Windows does not have this concept of "executable" file.
# The same for STDIN inputs.
<block_start><return><block_end>is_executable=is_executable_file(self.filename)<if_stmt>is_executable<and><not>is_shebang<block_start>self.add_violation(ShebangViolation(text='file is executable but no shebang is present' ) )<block_end><elif_stmt><not>is_executable<and>is_shebang<block_start>self.add_violation(ShebangViolation(text='shebang is present but the file is not executable' ) )<block_end><block_end><def_stmt>_check_valid_shebang self token:tokenize.TokenInfo<arrow><none><block_start><if_stmt>self._python_executable<not><in>token.line<block_start>self.add_violation(ShebangViolation(text='shebang is present but does not contain `python`' ) )<block_end><if_stmt>token.start[1]<ne>0<block_start>self.add_violation(ShebangViolation(text='there is a whitespace before shebang' ) )<block_end><if_stmt>token.start[0]<ne>1<block_start>self.add_violation(ShebangViolation(text='there are blank or comment lines before shebang' ) )<block_end><block_end><def_stmt>_is_first_comment self token:tokenize.TokenInfo<arrow>bool<block_start>all_tokens=iter(self.file_tokens)<line_sep>current_token=next(all_tokens)<while_stmt><true><block_start><if_stmt>current_token<eq>token<block_start><return><true><block_end><elif_stmt>current_token.exact_type<not><in>NEWLINES<block_start><break><block_end>current_token=next(all_tokens)<block_end><return><false><block_end><def_stmt>_is_valid_shebang_line self token:tokenize.TokenInfo<arrow>bool<block_start><return>self._shebang.match(token.line)<is><not><none><block_end><block_end>@final<class_stmt>NoqaVisitor(BaseTokenVisitor)<block_start>"""Checks noqa comment tokens."""<line_sep>_noqa_check:ClassVar[Pattern]=re.compile(r'^(noqa:?)($|[A-Z\d\,\s]+)')<def_stmt>__init__ self *args **kwargs<arrow><none><block_start>"""Initializes a counter."""<line_sep>super().__init__(*args **kwargs)<line_sep>self._noqa_count=0<block_end><def_stmt>visit_comment self token:tokenize.TokenInfo<arrow><none><block_start>"""Performs comment checks."""<line_sep>self._check_noqa(token)<block_end><def_stmt>_check_noqa self token:tokenize.TokenInfo<arrow><none><block_start>comment_text=get_comment_text(token)<line_sep>match=self._noqa_check.match(comment_text)<if_stmt><not>match<block_start><return><block_end>self._noqa_count<augadd>1<line_sep>excludes=match.groups()[1].strip()<line_sep>prefix=match.groups()[0].strip()<if_stmt><not>excludes<or>prefix[-1]<ne>':'# We cannot pass the actual line here,
# since it will be ignored due to `# noqa` comment:
<block_start>self.add_violation(WrongMagicCommentViolation(text=comment_text))<line_sep><return><block_end>self._check_forbidden_noqa(excludes)<block_end><def_stmt>_check_forbidden_noqa self noqa_excludes<arrow><none><block_start>excludes_list=[ex.strip()<for>ex noqa_excludes.split(',')]<line_sep>forbidden_noqa=EMPTY_STRING.join(self.options.forbidden_inline_ignore)<for_stmt>noqa_code forbidden_noqa.split(',')<block_start>noqa_code=noqa_code.strip()<if_stmt>noqa_code<in>excludes_list<block_start>self.add_violation(ForbiddenInlineIgnoreViolation(text=str(noqa_excludes)) )<line_sep><return><block_end><if_stmt><not>noqa_code.isalpha()<block_start><continue><block_end><for_stmt>excluded excludes_list<block_start><if_stmt>re.fullmatch(r'{0}($|\d+)'.format(noqa_code) excluded)<block_start>self.add_violation(ForbiddenInlineIgnoreViolation(text=str(noqa_excludes)) )<line_sep><return><block_end><block_end><block_end><block_end><def_stmt>_post_visit self<arrow><none><block_start><if_stmt>self._noqa_count<g>self.options.max_noqa_comments<block_start>self.add_violation(OveruseOfNoqaCommentViolation(text=str(self._noqa_count)) )<block_end><block_end><block_end>
|
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
<import_stmt>pytest<import_from_stmt>datetime datetime timedelta<import_from_stmt>azure.core.exceptions HttpResponseError<import_from_stmt>azure.storage.blob BlobServiceClient BlobType BlobBlock BlobSasPermissions ContainerEncryptionScope generate_blob_sas generate_account_sas ResourceTypes AccountSasPermissions generate_container_sas ContainerSasPermissions <import_from_stmt>settings.testcase BlobPreparer<import_from_stmt>devtools_testutils.storage StorageTestCase<line_sep># ------------------------------------------------------------------------------
# The encryption scope are pre-created using management plane tool ArmClient.
# So we can directly use the scope in the test.
TEST_ENCRYPTION_KEY_SCOPE="antjoscope1"<line_sep>TEST_CONTAINER_ENCRYPTION_KEY_SCOPE=ContainerEncryptionScope(default_encryption_scope="containerscope")<line_sep>TEST_CONTAINER_ENCRYPTION_KEY_SCOPE_DENY_OVERRIDE={"default_encryption_scope":"containerscope" "prevent_encryption_scope_override":<true>}<line_sep>TEST_SAS_ENCRYPTION_SCOPE="testscope1"<line_sep>TEST_SAS_ENCRYPTION_SCOPE_2="testscope2"<line_sep># ------------------------------------------------------------------------------
<class_stmt>StorageCPKNTest(StorageTestCase)<block_start><def_stmt>_setup self bsc<block_start>self.config=bsc._config<line_sep>self.container_name=self.get_resource_name('utcontainer')<line_sep># prep some test data so that they can be used in upload tests
self.byte_data=self.get_random_bytes(64<times>1024)<if_stmt>self.is_live<block_start><try_stmt><block_start>bsc.create_container(self.container_name)<block_end><except_stmt><block_start><pass><block_end><block_end><block_end><def_stmt>_teardown self bsc<block_start><if_stmt>self.is_live<block_start><try_stmt><block_start>bsc.delete_container(self.container_name)<block_end><except_stmt><block_start><pass><block_end><block_end><return>super(StorageCPKNTest self).tearDown()<block_end># --Helpers-----------------------------------------------------------------
<def_stmt>_get_blob_reference self<block_start><return>self.get_resource_name("cpk")<block_end><def_stmt>_create_block_blob self bsc blob_name=<none> data=<none> encryption_scope=<none> max_concurrency=1 overwrite=<false><block_start>blob_name=blob_name<if>blob_name<else>self._get_blob_reference()<line_sep>blob_client=bsc.get_blob_client(self.container_name blob_name)<line_sep>data=data<if>data<else>b''<line_sep>resp=blob_client.upload_blob(data encryption_scope=encryption_scope max_concurrency=max_concurrency overwrite=overwrite)<line_sep><return>blob_client resp<block_end><def_stmt>_create_append_blob self bsc encryption_scope=<none><block_start>blob_name=self._get_blob_reference()<line_sep>blob=bsc.get_blob_client(self.container_name blob_name)<line_sep>blob.create_append_blob(encryption_scope=encryption_scope)<line_sep><return>blob<block_end><def_stmt>_create_page_blob self bsc encryption_scope=<none><block_start>blob_name=self._get_blob_reference()<line_sep>blob=bsc.get_blob_client(self.container_name blob_name)<line_sep>blob.create_page_blob(1024<times>1024 encryption_scope=encryption_scope)<line_sep><return>blob<block_end># -- Test cases for APIs supporting CPK ----------------------------------------------
@pytest.mark.playback_test_only@BlobPreparer()<def_stmt>test_put_block_and_put_block_list self storage_account_name storage_account_key# Arrange
<block_start>bsc=BlobServiceClient(self.account_url(storage_account_name "blob") credential=storage_account_key connection_data_block_size=1024 max_single_put_size=1024 min_large_block_upload_threshold=1024 max_block_size=1024 max_page_size=1024)<line_sep>self._setup(bsc)<line_sep>blob_client,_=self._create_block_blob(bsc)<line_sep>blob_client.stage_block('1' b'AAA' encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)<line_sep>blob_client.stage_block('2' b'BBB' encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)<line_sep>blob_client.stage_block('3' b'CCC' encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)<line_sep># Act
block_list=[BlobBlock(block_id='1') BlobBlock(block_id='2') BlobBlock(block_id='3')]<line_sep>put_block_list_resp=blob_client.commit_block_list(block_list encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)<line_sep># Assert
self.assertIsNotNone(put_block_list_resp['etag'])<line_sep>self.assertIsNotNone(put_block_list_resp['last_modified'])<line_sep>self.assertTrue(put_block_list_resp['request_server_encrypted'])<line_sep>self.assertEqual(put_block_list_resp['encryption_scope'] TEST_ENCRYPTION_KEY_SCOPE)<line_sep># Act get the blob content
blob=blob_client.download_blob()<line_sep># Assert content was retrieved with the cpk
self.assertEqual(blob.readall() b'AAABBBCCC')<line_sep>self.assertEqual(blob.properties.etag put_block_list_resp['etag'])<line_sep>self.assertEqual(blob.properties.last_modified put_block_list_resp['last_modified'])<line_sep>self.assertEqual(blob.properties.encryption_scope TEST_ENCRYPTION_KEY_SCOPE)<line_sep>self._teardown(bsc)<block_end>@pytest.mark.live_test_only@BlobPreparer()<def_stmt>test_put_block_and_put_block_list_with_blob_sas self storage_account_name storage_account_key# Arrange
<block_start>bsc=BlobServiceClient(self.account_url(storage_account_name "blob") credential=storage_account_key connection_data_block_size=1024 max_single_put_size=1024 min_large_block_upload_threshold=1024 max_block_size=1024 max_page_size=1024)<line_sep>self._setup(bsc)<line_sep>blob_name=self._get_blob_reference()<line_sep>token1=generate_blob_sas(storage_account_name self.container_name blob_name account_key=storage_account_key permission=BlobSasPermissions(read=<true> write=<true> delete=<true>) expiry=datetime.utcnow()+timedelta(hours=1) encryption_scope=TEST_SAS_ENCRYPTION_SCOPE )<line_sep>blob_client=BlobServiceClient(self.account_url(storage_account_name "blob") token1).get_blob_client(self.container_name blob_name)<line_sep>blob_client.stage_block('1' b'AAA')<line_sep>blob_client.stage_block('2' b'BBB')<line_sep>blob_client.stage_block('3' b'CCC')<line_sep># Act
block_list=[BlobBlock(block_id='1') BlobBlock(block_id='2') BlobBlock(block_id='3')]<line_sep>put_block_list_resp=blob_client.commit_block_list(block_list)<line_sep># Assert
self.assertIsNotNone(put_block_list_resp['etag'])<line_sep>self.assertIsNotNone(put_block_list_resp['last_modified'])<line_sep>self.assertTrue(put_block_list_resp['request_server_encrypted'])<line_sep>self.assertEqual(put_block_list_resp['encryption_scope'] TEST_SAS_ENCRYPTION_SCOPE)<line_sep># Act get the blob content
blob=blob_client.download_blob()<line_sep># Assert content was retrieved with the cpk
self.assertEqual(blob.readall() b'AAABBBCCC')<line_sep>self.assertEqual(blob.properties.etag put_block_list_resp['etag'])<line_sep>self.assertEqual(blob.properties.last_modified put_block_list_resp['last_modified'])<line_sep>self.assertEqual(blob.properties.encryption_scope TEST_SAS_ENCRYPTION_SCOPE)<line_sep>self._teardown(bsc)<block_end>@pytest.mark.live_test_only@BlobPreparer()<def_stmt>test_put_block_and_put_block_list_with_blob_sas_fails self storage_account_name storage_account_key# Arrange
<block_start>bsc=BlobServiceClient(self.account_url(storage_account_name "blob") credential=storage_account_key connection_data_block_size=1024 max_single_put_size=1024 min_large_block_upload_threshold=1024 max_block_size=1024 max_page_size=1024)<line_sep>self._setup(bsc)<line_sep>blob_name=self._get_blob_reference()<line_sep>token1=generate_blob_sas(storage_account_name self.container_name blob_name account_key=storage_account_key permission=BlobSasPermissions(read=<true> write=<true> delete=<true>) expiry=datetime.utcnow()+timedelta(hours=1) encryption_scope=TEST_SAS_ENCRYPTION_SCOPE )<line_sep>blob_client=BlobServiceClient(self.account_url(storage_account_name "blob") token1).get_blob_client(self.container_name blob_name)<line_sep># both ses in SAS and encryption_scopes are both set and have DIFFERENT values will throw exception
<with_stmt>self.assertRaises(HttpResponseError)<block_start>blob_client.stage_block('1' b'AAA' encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)<block_end># both ses in SAS and encryption_scopes are both set and have SAME values will succeed
blob_client.stage_block('1' b'AAA' encryption_scope=TEST_SAS_ENCRYPTION_SCOPE)<line_sep># Act
block_list=[BlobBlock(block_id='1')]<line_sep># both ses in SAS and encryption_scopes are both set and have DIFFERENT values will throw exception
<with_stmt>self.assertRaises(HttpResponseError)<block_start>blob_client.commit_block_list(block_list encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)<block_end># both ses in SAS and encryption_scopes are both set and have SAME values will succeed
put_block_list_resp=blob_client.commit_block_list(block_list encryption_scope=TEST_SAS_ENCRYPTION_SCOPE)<line_sep># Assert
self.assertIsNotNone(put_block_list_resp['etag'])<line_sep>self.assertIsNotNone(put_block_list_resp['last_modified'])<line_sep>self.assertTrue(put_block_list_resp['request_server_encrypted'])<line_sep>self.assertEqual(put_block_list_resp['encryption_scope'] TEST_SAS_ENCRYPTION_SCOPE)<line_sep># generate a sas with a different encryption scope
token2=generate_blob_sas(storage_account_name self.container_name blob_name account_key=storage_account_key permission=BlobSasPermissions(read=<true> write=<true> delete=<true>) expiry=datetime.utcnow()+timedelta(hours=1) encryption_scope=TEST_ENCRYPTION_KEY_SCOPE )<line_sep>blob_client_diff_encryption_scope_sas=BlobServiceClient(self.account_url(storage_account_name "blob") token2).get_blob_client(self.container_name blob_name)<line_sep># blob can be downloaded successfully no matter which encryption scope is used on the blob actually
# the encryption scope on blob is TEST_SAS_ENCRYPTION_SCOPE and ses is TEST_ENCRYPTION_KEY_SCOPE in SAS token,
# while we can still download the blob successfully
blob=blob_client_diff_encryption_scope_sas.download_blob()<line_sep># Assert content was retrieved with the cpk
self.assertEqual(blob.readall() b'AAA')<line_sep>self.assertEqual(blob.properties.etag put_block_list_resp['etag'])<line_sep>self.assertEqual(blob.properties.last_modified put_block_list_resp['last_modified'])<line_sep>self.assertEqual(blob.properties.encryption_scope TEST_SAS_ENCRYPTION_SCOPE)<line_sep>self._teardown(bsc)<block_end>@[email protected]_test_only@BlobPreparer()<def_stmt>test_create_block_blob_with_chunks self storage_account_name storage_account_key# parallel operation
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
<block_start>bsc=BlobServiceClient(self.account_url(storage_account_name "blob") credential=storage_account_key connection_data_block_size=1024 max_single_put_size=1024 min_large_block_upload_threshold=1024 max_block_size=1024 max_page_size=1024)<line_sep>self._setup(bsc)<line_sep># Arrange
# to force the in-memory chunks to be used
self.config.use_byte_buffer=<true><line_sep># Act
# create_blob_from_bytes forces the in-memory chunks to be used
blob_client,upload_response=self._create_block_blob(bsc data=self.byte_data encryption_scope=TEST_ENCRYPTION_KEY_SCOPE max_concurrency=2)<line_sep># Assert
self.assertIsNotNone(upload_response['etag'])<line_sep>self.assertIsNotNone(upload_response['last_modified'])<line_sep>self.assertTrue(upload_response['request_server_encrypted'])<line_sep>self.assertEqual(upload_response['encryption_scope'] TEST_ENCRYPTION_KEY_SCOPE)<line_sep># Act get the blob content
blob=blob_client.download_blob()<line_sep># Assert content was retrieved with the cpk
self.assertEqual(blob.readall() self.byte_data)<line_sep>self.assertEqual(blob.properties.etag upload_response['etag'])<line_sep>self.assertEqual(blob.properties.last_modified upload_response['last_modified'])<line_sep>self._teardown(bsc)<block_end>@[email protected]_test_only@BlobPreparer()<def_stmt>test_create_block_blob_with_sub_streams self storage_account_name storage_account_key# problem with the recording framework can only run live
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
<block_start>bsc=BlobServiceClient(self.account_url(storage_account_name "blob") credential=storage_account_key connection_data_block_size=1024 max_single_put_size=1024 min_large_block_upload_threshold=1024 max_block_size=1024 max_page_size=1024)<line_sep>self._setup(bsc)<line_sep># Act
# create_blob_from_bytes forces the in-memory chunks to be used
blob_client,upload_response=self._create_block_blob(bsc data=self.byte_data encryption_scope=TEST_ENCRYPTION_KEY_SCOPE max_concurrency=2)<line_sep># Assert
self.assertIsNotNone(upload_response['etag'])<line_sep>self.assertIsNotNone(upload_response['last_modified'])<line_sep>self.assertTrue(upload_response['request_server_encrypted'])<line_sep>self.assertEqual(upload_response['encryption_scope'] TEST_ENCRYPTION_KEY_SCOPE)<line_sep># Act get the blob content
blob=blob_client.download_blob()<line_sep># Assert content was retrieved with the cpk
self.assertEqual(blob.readall() self.byte_data)<line_sep>self.assertEqual(blob.properties.etag upload_response['etag'])<line_sep>self.assertEqual(blob.properties.last_modified upload_response['last_modified'])<line_sep>self._teardown(bsc)<block_end>@pytest.mark.playback_test_only@BlobPreparer()<def_stmt>test_create_block_blob_with_single_chunk self storage_account_name storage_account_key# Act
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
<block_start>bsc=BlobServiceClient(self.account_url(storage_account_name "blob") credential=storage_account_key connection_data_block_size=1024 max_single_put_size=1024 min_large_block_upload_threshold=1024 max_block_size=1024 max_page_size=1024)<line_sep>self._setup(bsc)<line_sep>data=b'AAABBBCCC'<line_sep># create_blob_from_bytes forces the in-memory chunks to be used
blob_client,upload_response=self._create_block_blob(bsc data=data encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)<line_sep># Assert
self.assertIsNotNone(upload_response['etag'])<line_sep>self.assertIsNotNone(upload_response['last_modified'])<line_sep>self.assertTrue(upload_response['request_server_encrypted'])<line_sep># Act get the blob content
blob=blob_client.download_blob()<line_sep># Assert content was retrieved with the cpk
self.assertEqual(blob.readall() data)<line_sep>self.assertEqual(blob.properties.etag upload_response['etag'])<line_sep>self.assertEqual(blob.properties.last_modified upload_response['last_modified'])<line_sep>self._teardown(bsc)<block_end>@pytest.mark.playback_test_only@BlobPreparer()<def_stmt>test_put_block_from_url_and_commit_with_cpk self storage_account_name storage_account_key# Arrange
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
<block_start>bsc=BlobServiceClient(self.account_url(storage_account_name "blob") credential=storage_account_key connection_data_block_size=1024 max_single_put_size=1024 min_large_block_upload_threshold=1024 max_block_size=1024 max_page_size=1024)<line_sep>self._setup(bsc)<line_sep># create source blob and get source blob url
source_blob_name=self.get_resource_name("sourceblob")<line_sep>self.config.use_byte_buffer=<true># Make sure using chunk upload, then we can record the request
source_blob_client,_=self._create_block_blob(bsc blob_name=source_blob_name data=self.byte_data)<line_sep>source_blob_sas=generate_blob_sas(source_blob_client.account_name source_blob_client.container_name source_blob_client.blob_name snapshot=source_blob_client.snapshot account_key=source_blob_client.credential.account_key permission=BlobSasPermissions(read=<true>) expiry=datetime.utcnow()+timedelta(hours=1))<line_sep>source_blob_url=source_blob_client.url+"?"+source_blob_sas<line_sep># create destination blob
self.config.use_byte_buffer=<false><line_sep>destination_blob_client,_=self._create_block_blob(bsc encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)<line_sep># Act part 1: make put block from url calls
destination_blob_client.stage_block_from_url(block_id=1 source_url=source_blob_url source_offset=0 source_length=4<times>1024 encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)<line_sep>destination_blob_client.stage_block_from_url(block_id=2 source_url=source_blob_url source_offset=4<times>1024 source_length=4<times>1024 encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)<line_sep># Assert blocks
committed,uncommitted=destination_blob_client.get_block_list('all')<line_sep>self.assertEqual(len(uncommitted) 2)<line_sep>self.assertEqual(len(committed) 0)<line_sep># commit the blocks without cpk should fail
block_list=[BlobBlock(block_id='1') BlobBlock(block_id='2')]<with_stmt>self.assertRaises(HttpResponseError)<block_start>destination_blob_client.commit_block_list(block_list)<block_end># Act commit the blocks with cpk should succeed
put_block_list_resp=destination_blob_client.commit_block_list(block_list encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)<line_sep># Assert
self.assertIsNotNone(put_block_list_resp['etag'])<line_sep>self.assertIsNotNone(put_block_list_resp['last_modified'])<line_sep>self.assertTrue(put_block_list_resp['request_server_encrypted'])<line_sep># Act get the blob content
blob=destination_blob_client.download_blob()<line_sep># Assert content was retrieved with the cpk
self.assertEqual(blob.readall() self.byte_data[0:8<times>1024])<line_sep>self.assertEqual(blob.properties.etag put_block_list_resp['etag'])<line_sep>self.assertEqual(blob.properties.last_modified put_block_list_resp['last_modified'])<line_sep>self._teardown(bsc)<block_end>@pytest.mark.playback_test_only@BlobPreparer()<def_stmt>test_append_block self storage_account_name storage_account_key# Arrange
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
<block_start>bsc=BlobServiceClient(self.account_url(storage_account_name "blob") credential=storage_account_key connection_data_block_size=1024 max_single_put_size=1024 min_large_block_upload_threshold=1024 max_block_size=1024 max_page_size=1024)<line_sep>self._setup(bsc)<line_sep>blob_client=self._create_append_blob(bsc encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)<line_sep># Act
<for_stmt>content [b'AAA' b'BBB' b'CCC']<block_start>append_blob_prop=blob_client.append_block(content encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)<line_sep># Assert
self.assertIsNotNone(append_blob_prop['etag'])<line_sep>self.assertIsNotNone(append_blob_prop['last_modified'])<line_sep>self.assertTrue(append_blob_prop['request_server_encrypted'])<block_end># Act get the blob content
blob=blob_client.download_blob()<line_sep># Assert content was retrieved with the cpk
self.assertEqual(blob.readall() b'AAABBBCCC')<line_sep>self._teardown(bsc)<block_end>@pytest.mark.playback_test_only@BlobPreparer()<def_stmt>test_append_block_from_url self storage_account_name storage_account_key# Arrange
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
<block_start>bsc=BlobServiceClient(self.account_url(storage_account_name "blob") credential=storage_account_key connection_data_block_size=1024 max_single_put_size=1024 min_large_block_upload_threshold=1024 max_block_size=1024 max_page_size=1024)<line_sep>self._setup(bsc)<line_sep>source_blob_name=self.get_resource_name("sourceblob")<line_sep>self.config.use_byte_buffer=<true># chunk upload
source_blob_client,_=self._create_block_blob(bsc blob_name=source_blob_name data=self.byte_data)<line_sep>source_blob_sas=generate_blob_sas(source_blob_client.account_name source_blob_client.container_name source_blob_client.blob_name snapshot=source_blob_client.snapshot account_key=source_blob_client.credential.account_key permission=BlobSasPermissions(read=<true>) expiry=datetime.utcnow()+timedelta(hours=1))<line_sep>source_blob_url=source_blob_client.url+"?"+source_blob_sas<line_sep>self.config.use_byte_buffer=<false><line_sep>destination_blob_client=self._create_append_blob(bsc encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)<line_sep># Act
append_blob_prop=destination_blob_client.append_block_from_url(source_blob_url source_offset=0 source_length=4<times>1024 encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)<line_sep># Assert
self.assertIsNotNone(append_blob_prop['etag'])<line_sep>self.assertIsNotNone(append_blob_prop['last_modified'])<line_sep>self.assertTrue(append_blob_prop['request_server_encrypted'])<line_sep>self.assertEqual(append_blob_prop['encryption_scope'] TEST_ENCRYPTION_KEY_SCOPE)<line_sep># Act get the blob content
blob=destination_blob_client.download_blob()<line_sep># Assert content was retrieved with the cpk
self.assertEqual(blob.readall() self.byte_data[0:4<times>1024])<line_sep>self.assertEqual(blob.properties.encryption_scope TEST_ENCRYPTION_KEY_SCOPE)<line_sep>self._teardown(bsc)<block_end>@pytest.mark.playback_test_only@BlobPreparer()<def_stmt>test_create_append_blob_with_chunks self storage_account_name storage_account_key# Arrange
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
<block_start>bsc=BlobServiceClient(self.account_url(storage_account_name "blob") credential=storage_account_key connection_data_block_size=1024 max_single_put_size=1024 min_large_block_upload_threshold=1024 max_block_size=1024 max_page_size=1024)<line_sep>self._setup(bsc)<line_sep>blob_client=self._create_append_blob(bsc encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)<line_sep># Act
append_blob_prop=blob_client.upload_blob(self.byte_data blob_type=BlobType.AppendBlob encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)<line_sep># Assert
self.assertIsNotNone(append_blob_prop['etag'])<line_sep>self.assertIsNotNone(append_blob_prop['last_modified'])<line_sep>self.assertTrue(append_blob_prop['request_server_encrypted'])<line_sep>self.assertEqual(append_blob_prop['encryption_scope'] TEST_ENCRYPTION_KEY_SCOPE)<line_sep># Act get the blob content
blob=blob_client.download_blob()<line_sep># Assert content was retrieved with the cpk
self.assertEqual(blob.readall() self.byte_data)<line_sep>self.assertEqual(blob.properties.encryption_scope TEST_ENCRYPTION_KEY_SCOPE)<line_sep>self._teardown(bsc)<block_end>@pytest.mark.playback_test_only@BlobPreparer()<def_stmt>test_update_page self storage_account_name storage_account_key# Arrange
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
<block_start>bsc=BlobServiceClient(self.account_url(storage_account_name "blob") credential=storage_account_key connection_data_block_size=1024 max_single_put_size=1024 min_large_block_upload_threshold=1024 max_block_size=1024 max_page_size=1024)<line_sep>self._setup(bsc)<line_sep>blob_client=self._create_page_blob(bsc encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)<line_sep># Act
page_blob_prop=blob_client.upload_page(self.byte_data offset=0 length=len(self.byte_data) encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)<line_sep># Assert
self.assertIsNotNone(page_blob_prop['etag'])<line_sep>self.assertIsNotNone(page_blob_prop['last_modified'])<line_sep>self.assertTrue(page_blob_prop['request_server_encrypted'])<line_sep>self.assertEqual(page_blob_prop['encryption_scope'] TEST_ENCRYPTION_KEY_SCOPE)<line_sep># Act get the blob content
blob=blob_client.download_blob(offset=0 length=len(self.byte_data))<line_sep># Assert content was retrieved with the cpk
self.assertEqual(blob.readall() self.byte_data)<line_sep>self.assertEqual(blob.properties.encryption_scope TEST_ENCRYPTION_KEY_SCOPE)<line_sep>self._teardown(bsc)<block_end>@pytest.mark.playback_test_only@BlobPreparer()<def_stmt>test_update_page_from_url self storage_account_name storage_account_key# Arrange
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
<block_start>bsc=BlobServiceClient(self.account_url(storage_account_name "blob") credential=storage_account_key connection_data_block_size=1024 max_single_put_size=1024 min_large_block_upload_threshold=1024 max_block_size=1024 max_page_size=1024)<line_sep>self._setup(bsc)<line_sep>source_blob_name=self.get_resource_name("sourceblob")<line_sep>self.config.use_byte_buffer=<true># Make sure using chunk upload, then we can record the request
source_blob_client,_=self._create_block_blob(bsc blob_name=source_blob_name data=self.byte_data)<line_sep>source_blob_sas=generate_blob_sas(source_blob_client.account_name source_blob_client.container_name source_blob_client.blob_name snapshot=source_blob_client.snapshot account_key=source_blob_client.credential.account_key permission=BlobSasPermissions(read=<true>) expiry=datetime.utcnow()+timedelta(hours=1))<line_sep>source_blob_url=source_blob_client.url+"?"+source_blob_sas<line_sep>self.config.use_byte_buffer=<false><line_sep>blob_client=self._create_page_blob(bsc encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)<line_sep># Act
page_blob_prop=blob_client.upload_pages_from_url(source_blob_url offset=0 length=len(self.byte_data) source_offset=0 encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)<line_sep># Assert
self.assertIsNotNone(page_blob_prop['etag'])<line_sep>self.assertIsNotNone(page_blob_prop['last_modified'])<line_sep>self.assertTrue(page_blob_prop['request_server_encrypted'])<line_sep>self.assertEqual(page_blob_prop['encryption_scope'] TEST_ENCRYPTION_KEY_SCOPE)<line_sep># Act get the blob content
blob=blob_client.download_blob(offset=0 length=len(self.byte_data))<line_sep># Assert content was retrieved with the cpk
self.assertEqual(blob.readall() self.byte_data)<line_sep>self.assertEqual(blob.properties.encryption_scope TEST_ENCRYPTION_KEY_SCOPE)<line_sep>self._teardown(bsc)<block_end>@[email protected]_test_only@BlobPreparer()<def_stmt>test_create_page_blob_with_chunks self storage_account_name storage_account_key# Act
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
<block_start>bsc=BlobServiceClient(self.account_url(storage_account_name "blob") credential=storage_account_key connection_data_block_size=1024 max_single_put_size=1024 min_large_block_upload_threshold=1024 max_block_size=1024 max_page_size=1024)<line_sep>self._setup(bsc)<line_sep>blob_client=bsc.get_blob_client(self.container_name self._get_blob_reference())<line_sep>page_blob_prop=blob_client.upload_blob(self.byte_data blob_type=BlobType.PageBlob max_concurrency=2 encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)<line_sep># Assert
self.assertIsNotNone(page_blob_prop['etag'])<line_sep>self.assertIsNotNone(page_blob_prop['last_modified'])<line_sep>self.assertTrue(page_blob_prop['request_server_encrypted'])<line_sep>self.assertEqual(page_blob_prop['encryption_scope'] TEST_ENCRYPTION_KEY_SCOPE)<line_sep># Act get the blob content
blob=blob_client.download_blob()<line_sep># Assert content was retrieved with the cpk
self.assertEqual(blob.readall() self.byte_data)<line_sep>self.assertEqual(blob.properties.encryption_scope TEST_ENCRYPTION_KEY_SCOPE)<line_sep>self._teardown(bsc)<block_end>@pytest.mark.playback_test_only@BlobPreparer()<def_stmt>test_get_set_blob_metadata self storage_account_name storage_account_key# Arrange
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
<block_start>bsc=BlobServiceClient(self.account_url(storage_account_name "blob") credential=storage_account_key connection_data_block_size=1024 max_single_put_size=1024 min_large_block_upload_threshold=1024 max_block_size=1024 max_page_size=1024)<line_sep>self._setup(bsc)<line_sep>blob_client,_=self._create_block_blob(bsc data=b'AAABBBCCC' encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)<line_sep># Act
blob_props=blob_client.get_blob_properties()<line_sep># Assert
self.assertTrue(blob_props.server_encrypted)<line_sep>self.assertEqual(blob_props['encryption_scope'] TEST_ENCRYPTION_KEY_SCOPE)<line_sep># Act set blob properties
metadata={'hello':'world' 'number':'42' 'up':'upval'}<with_stmt>self.assertRaises(HttpResponseError)<block_start>blob_client.set_blob_metadata(metadata=metadata )<block_end>blob_client.set_blob_metadata(metadata=metadata encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)<line_sep># Assert
blob_props=blob_client.get_blob_properties()<line_sep>md=blob_props.metadata<line_sep>self.assertEqual(3 len(md))<line_sep>self.assertEqual(md['hello'] 'world')<line_sep>self.assertEqual(md['number'] '42')<line_sep>self.assertEqual(md['up'] 'upval')<line_sep>self.assertFalse('Up'<in>md)<line_sep>self._teardown(bsc)<block_end>@pytest.mark.playback_test_only@BlobPreparer()<def_stmt>test_snapshot_blob self storage_account_name storage_account_key# Arrange
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
<block_start>bsc=BlobServiceClient(self.account_url(storage_account_name "blob") credential=storage_account_key connection_data_block_size=1024 max_single_put_size=1024 min_large_block_upload_threshold=1024 max_block_size=1024 max_page_size=1024)<line_sep>self._setup(bsc)<line_sep>blob_client,_=self._create_block_blob(bsc data=b'AAABBBCCC' encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)<line_sep># Act without cpk should not work
<with_stmt>self.assertRaises(HttpResponseError)<block_start>blob_client.create_snapshot()<block_end># Act with cpk should work
blob_snapshot=blob_client.create_snapshot(encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)<line_sep># Assert
self.assertIsNotNone(blob_snapshot)<line_sep>self._teardown(bsc)<block_end>@pytest.mark.playback_test_only@BlobPreparer()<def_stmt>test_list_blobs self storage_account_name storage_account_key# Arrange
<block_start>bsc=BlobServiceClient(self.account_url(storage_account_name "blob") credential=storage_account_key connection_data_block_size=1024 max_single_put_size=1024 min_large_block_upload_threshold=1024 max_block_size=1024 max_page_size=1024)<line_sep>self._setup(bsc)<line_sep>blob_client,_=self._create_block_blob(bsc blob_name="blockblob" data=b'AAABBBCCC' encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)<line_sep>self._create_append_blob(bsc encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)<line_sep>container_client=bsc.get_container_client(self.container_name)<line_sep>generator=container_client.list_blobs(include="metadata")<for_stmt>blob generator<block_start>self.assertIsNotNone(blob)<line_sep># Assert: every listed blob has encryption_scope
self.assertEqual(blob.encryption_scope TEST_ENCRYPTION_KEY_SCOPE)<block_end>self._teardown(bsc)<block_end>@pytest.mark.live_test_only@BlobPreparer()<def_stmt>test_list_blobs_using_container_encryption_scope_sas self storage_account_name storage_account_key# Arrange
<block_start>bsc=BlobServiceClient(self.account_url(storage_account_name "blob") credential=storage_account_key connection_data_block_size=1024 max_single_put_size=1024 min_large_block_upload_threshold=1024 max_block_size=1024 max_page_size=1024)<line_sep>self._setup(bsc)<line_sep>token=generate_container_sas(storage_account_name self.container_name storage_account_key permission=ContainerSasPermissions(read=<true> write=<true> list=<true> delete=<true>) expiry=datetime.utcnow()+timedelta(hours=1) encryption_scope=TEST_SAS_ENCRYPTION_SCOPE)<line_sep>bsc_with_sas_credential=BlobServiceClient(self.account_url(storage_account_name "blob") credential=token connection_data_block_size=1024 max_single_put_size=1024 min_large_block_upload_threshold=1024 max_block_size=1024 max_page_size=1024)<line_sep># blob is encrypted using TEST_SAS_ENCRYPTION_SCOPE
blob_client,_=self._create_block_blob(bsc_with_sas_credential blob_name="blockblob" data=b'AAABBBCCC' overwrite=<true>)<line_sep>self._create_append_blob(bsc_with_sas_credential)<line_sep># generate a token with TEST_ENCRYPTION_KEY_SCOPE
token2=generate_container_sas(storage_account_name self.container_name storage_account_key permission=ContainerSasPermissions(read=<true> write=<true> list=<true> delete=<true>) expiry=datetime.utcnow()+timedelta(hours=1) encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)<line_sep>bsc_with_diff_sas_credential=BlobServiceClient(self.account_url(storage_account_name "blob") credential=token2 connection_data_block_size=1024 max_single_put_size=1024 min_large_block_upload_threshold=1024 max_block_size=1024 max_page_size=1024)<line_sep>container_client=bsc_with_diff_sas_credential.get_container_client(self.container_name)<line_sep># The ses field in SAS token when list blobs is different from the encryption scope used on creating blob, while
# list blobs should also succeed
generator=container_client.list_blobs(include="metadata")<for_stmt>blob generator<block_start>self.assertIsNotNone(blob)<line_sep># Assert: every listed blob has encryption_scope
# and the encryption scope is the same as the one on blob creation
self.assertEqual(blob.encryption_scope TEST_SAS_ENCRYPTION_SCOPE)<block_end>self._teardown(bsc)<block_end>@pytest.mark.live_test_only@BlobPreparer()<def_stmt>test_copy_with_account_encryption_scope_sas self storage_account_name storage_account_key# Arrange
<block_start>sas_token=generate_account_sas(storage_account_name account_key=storage_account_key resource_types=ResourceTypes(object=<true> container=<true>) permission=AccountSasPermissions(read=<true> write=<true> delete=<true> list=<true>) expiry=datetime.utcnow()+timedelta(hours=1) encryption_scope=TEST_SAS_ENCRYPTION_SCOPE_2)<line_sep>bsc_with_sas_credential=BlobServiceClient(self.account_url(storage_account_name "blob") credential=sas_token connection_data_block_size=1024 max_single_put_size=1024 min_large_block_upload_threshold=1024 max_block_size=1024 max_page_size=1024)<line_sep>self._setup(bsc_with_sas_credential)<line_sep># blob is encrypted using TEST_SAS_ENCRYPTION_SCOPE_2
blob_client,_=self._create_block_blob(bsc_with_sas_credential blob_name="blockblob" data=b'AAABBBCCC' overwrite=<true>)<line_sep>#
sas_token2=generate_account_sas(storage_account_name account_key=storage_account_key resource_types=ResourceTypes(object=<true> container=<true>) permission=AccountSasPermissions(read=<true> write=<true> delete=<true> list=<true>) expiry=datetime.utcnow()+timedelta(hours=1) encryption_scope=TEST_SAS_ENCRYPTION_SCOPE)<line_sep>bsc_with_account_key_credential=BlobServiceClient(self.account_url(storage_account_name "blob") credential=sas_token2 connection_data_block_size=1024 max_single_put_size=1024 min_large_block_upload_threshold=1024 max_block_size=1024 max_page_size=1024)<line_sep>copied_blob=self.get_resource_name('copiedblob')<line_sep>copied_blob_client=bsc_with_account_key_credential.get_blob_client(self.container_name copied_blob)<line_sep># TODO: to confirm with Sean/Heidi ses in SAS cannot be set for async copy.
# The test failed for async copy (without requires_sync=True)
copied_blob_client.start_copy_from_url(blob_client.url requires_sync=<true>)<line_sep>props=copied_blob_client.get_blob_properties()<line_sep>self.assertEqual(props.encryption_scope TEST_SAS_ENCRYPTION_SCOPE)<line_sep>self._teardown(bsc_with_sas_credential)<block_end>@pytest.mark.live_test_only@BlobPreparer()<def_stmt>test_copy_blob_from_url_with_ecryption_scope self storage_account_name storage_account_key# Arrange
# create sas for source blob
<block_start>sas_token=generate_account_sas(storage_account_name account_key=storage_account_key resource_types=ResourceTypes(object=<true> container=<true>) permission=AccountSasPermissions(read=<true> write=<true> delete=<true> list=<true>) expiry=datetime.utcnow()+timedelta(hours=1) )<line_sep>bsc_with_sas_credential=BlobServiceClient(self.account_url(storage_account_name "blob") credential=sas_token connection_data_block_size=1024 max_single_put_size=1024 min_large_block_upload_threshold=1024 max_block_size=1024 max_page_size=1024)<line_sep>self._setup(bsc_with_sas_credential)<line_sep>blob_client,_=self._create_block_blob(bsc_with_sas_credential blob_name="blockblob" data=b'AAABBBCCC' overwrite=<true>)<line_sep>bsc=BlobServiceClient(self.account_url(storage_account_name "blob") credential=storage_account_key connection_data_block_size=1024 max_single_put_size=1024 min_large_block_upload_threshold=1024 max_block_size=1024 max_page_size=1024)<line_sep>copied_blob=self.get_resource_name('copiedblob')<line_sep>copied_blob_client=bsc.get_blob_client(self.container_name copied_blob)<line_sep>copied_blob_client.start_copy_from_url(blob_client.url requires_sync=<true> encryption_scope=TEST_SAS_ENCRYPTION_SCOPE)<line_sep>props=copied_blob_client.get_blob_properties()<line_sep>self.assertEqual(props.encryption_scope TEST_SAS_ENCRYPTION_SCOPE)<line_sep>self._teardown(bsc_with_sas_credential)<block_end>@pytest.mark.live_test_only@BlobPreparer()<def_stmt>test_copy_with_user_delegation_encryption_scope_sas self storage_account_name storage_account_key# Arrange
# to get user delegation key
<block_start>oauth_token_credential=self.generate_oauth_token()<line_sep>service_client=BlobServiceClient(self.account_url(storage_account_name "blob") credential=oauth_token_credential connection_data_block_size=1024 max_single_put_size=1024 min_large_block_upload_threshold=1024 max_block_size=1024 max_page_size=1024)<line_sep>user_delegation_key=service_client.get_user_delegation_key(datetime.utcnow() datetime.utcnow()+timedelta(hours=1))<line_sep>self._setup(service_client)<line_sep>blob_name=self.get_resource_name('blob')<line_sep>sas_token=generate_blob_sas(storage_account_name self.container_name blob_name account_key=user_delegation_key permission=BlobSasPermissions(read=<true> write=<true> create=<true> delete=<true>) expiry=datetime.utcnow()+timedelta(hours=1) encryption_scope=TEST_SAS_ENCRYPTION_SCOPE)<line_sep>bsc_with_delegation_sas=BlobServiceClient(self.account_url(storage_account_name "blob") credential=sas_token connection_data_block_size=1024 max_single_put_size=1024 min_large_block_upload_threshold=1024 max_block_size=1024 max_page_size=1024)<line_sep># blob is encrypted using TEST_SAS_ENCRYPTION_SCOPE
blob_client,_=self._create_block_blob(bsc_with_delegation_sas blob_name=blob_name data=b'AAABBBCCC' overwrite=<true>)<line_sep>props=blob_client.get_blob_properties()<line_sep>self.assertEqual(props.encryption_scope TEST_SAS_ENCRYPTION_SCOPE)<line_sep>self._teardown(service_client)<block_end>@pytest.mark.playback_test_only@BlobPreparer()<def_stmt>test_create_container_with_default_cpk_n self storage_account_name storage_account_key# Arrange
<block_start>bsc=BlobServiceClient(self.account_url(storage_account_name "blob") credential=storage_account_key connection_data_block_size=1024 max_single_put_size=1024 min_large_block_upload_threshold=1024 max_block_size=1024 max_page_size=1024)<line_sep>container_client=bsc.create_container('cpkcontainer' container_encryption_scope=TEST_CONTAINER_ENCRYPTION_KEY_SCOPE)<line_sep>container_props=container_client.get_container_properties()<line_sep>self.assertEqual(container_props.encryption_scope.default_encryption_scope TEST_CONTAINER_ENCRYPTION_KEY_SCOPE.default_encryption_scope)<line_sep>self.assertEqual(container_props.encryption_scope.prevent_encryption_scope_override <false>)<for_stmt>container bsc.list_containers(name_starts_with='cpkcontainer')<block_start>self.assertEqual(container_props.encryption_scope.default_encryption_scope TEST_CONTAINER_ENCRYPTION_KEY_SCOPE.default_encryption_scope)<line_sep>self.assertEqual(container_props.encryption_scope.prevent_encryption_scope_override <false>)<block_end>blob_client=container_client.get_blob_client("appendblob")<line_sep># providing encryption scope when upload the blob
resp=blob_client.upload_blob(b'aaaa' BlobType.AppendBlob encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)<line_sep># Use the provided encryption scope on the blob
self.assertEqual(resp['encryption_scope'] TEST_ENCRYPTION_KEY_SCOPE)<line_sep>container_client.delete_container()<block_end>@pytest.mark.playback_test_only@BlobPreparer()<def_stmt>test_create_container_with_default_cpk_n_deny_override self storage_account_name storage_account_key# Arrange
<block_start>bsc=BlobServiceClient(self.account_url(storage_account_name "blob") credential=storage_account_key connection_data_block_size=1024 max_single_put_size=1024 min_large_block_upload_threshold=1024 max_block_size=1024 max_page_size=1024)<line_sep>container_client=bsc.create_container('denyoverridecpkcontainer' container_encryption_scope=TEST_CONTAINER_ENCRYPTION_KEY_SCOPE_DENY_OVERRIDE)<line_sep>container_props=container_client.get_container_properties()<line_sep>self.assertEqual(container_props.encryption_scope.default_encryption_scope TEST_CONTAINER_ENCRYPTION_KEY_SCOPE.default_encryption_scope)<line_sep>self.assertEqual(container_props.encryption_scope.prevent_encryption_scope_override <true>)<for_stmt>container bsc.list_containers(name_starts_with='denyoverridecpkcontainer')<block_start>self.assertEqual(container_props.encryption_scope.default_encryption_scope TEST_CONTAINER_ENCRYPTION_KEY_SCOPE.default_encryption_scope)<line_sep>self.assertEqual(container_props.encryption_scope.prevent_encryption_scope_override <true>)<block_end>blob_client=container_client.get_blob_client("appendblob")<line_sep># It's not allowed to set encryption scope on the blob when the container denies encryption scope override.
<with_stmt>self.assertRaises(HttpResponseError)<block_start>blob_client.upload_blob(b'aaaa' BlobType.AppendBlob encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)<block_end>resp=blob_client.upload_blob(b'aaaa' BlobType.AppendBlob)<line_sep>self.assertEqual(resp['encryption_scope'] TEST_CONTAINER_ENCRYPTION_KEY_SCOPE.default_encryption_scope)<line_sep>container_client.delete_container()<block_end><block_end># ------------------------------------------------------------------------------
|
<import_stmt>pyglet<line_sep>pyglet.options['shadow_window']=<false><line_sep>pyglet.options['debug_gl']=<false><class_stmt>Window(pyglet.window.Window)<block_start><def_stmt>__init__ self width height<block_start>self.time=0.0<line_sep>self.alive=<true><line_sep>self.mouse=(0 0)<line_sep>config=pyglet.gl.Config(major_version=3 minor_version=3 forward_compatible=<true> double_buffer=<false> depth_size=0 samples=0 )<line_sep>super().__init__(width=width height=height config=config vsync=<true>)<line_sep>width,height=self.get_framebuffer_size()<line_sep>self.size=(width height)<line_sep>self.aspect=width/height<block_end><def_stmt>on_resize self width height<block_start><pass><block_end><def_stmt>on_draw self<block_start><pass><block_end><def_stmt>on_mouse_motion self x y dx dy<block_start>self.mouse=(x y)<block_end><def_stmt>on_close self<block_start>self.alive=<false><block_end><def_stmt>update self<block_start>self.flip()<line_sep>self.dispatch_events()<line_sep>self.time<augadd>1.0/60.0<line_sep><return>self.alive<block_end>@staticmethod<def_stmt>run <block_start>pyglet.app.run()<block_end><block_end>
|
# Generated by Django 3.1.13 on 2021-09-23 00:06
<import_from_stmt>django.db migrations<import_stmt>timezone_field.fields<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('rolodex' '0020_auto_20210922_2337') ]<line_sep>operations=[migrations.AddField(model_name='project' name='timezone' field=timezone_field.fields.TimeZoneField(default='America/Los_Angeles' help_text='Timezone of the project / working hours' verbose_name='Project Timezone') ) ]<block_end>
|
<import_from_stmt>web3 Web3<import_from_stmt>populus.utils.wait wait_for_transaction_receipt<import_from_stmt>eth_utils keccak is_0x_prefixed decode_hex<import_from_stmt>web3.utils.threads Timeout <def_stmt>pack *args<arrow>bytes<block_start>"""
Simulates Solidity's sha3 packing. Integers can be passed as tuples where the second tuple
element specifies the variable's size in bits, e.g.:
sha3((5, 32))
would be equivalent to Solidity's
sha3(uint32(5))
Default size is 256.
"""<def_stmt>format_int value size<block_start><assert_stmt>isinstance(value int)<assert_stmt>isinstance(size int)<if_stmt>value<ge>0<block_start><return>decode_hex('{:x}'.format(value).zfill(size<floordiv>4))<block_end><else_stmt><block_start><return>decode_hex('{:x}'.format((1<lshift>size)+value))<block_end><block_end>msg=b''<for_stmt>arg args<block_start><assert_stmt>arg<if_stmt>isinstance(arg bytes)<block_start>msg<augadd>arg<block_end><elif_stmt>isinstance(arg str)<block_start><if_stmt>is_0x_prefixed(arg)<block_start>msg<augadd>decode_hex(arg)<block_end><else_stmt><block_start>msg<augadd>arg.encode()<block_end><block_end><elif_stmt>isinstance(arg int)<block_start>msg<augadd>format_int(arg 256)<block_end><elif_stmt>isinstance(arg tuple)<block_start>msg<augadd>format_int(arg[0] arg[1])<block_end><else_stmt><block_start><raise>ValueError('Unsupported type: {}.'.format(type(arg)))<block_end><block_end><return>msg<block_end><def_stmt>sol_sha3 *args<arrow>bytes<block_start><return>keccak(pack(*args))<block_end><def_stmt>check_succesful_tx web3:Web3 txid:str timeout=180<arrow>dict<block_start>'''See if transaction went through (Solidity code did not throw).
:return: Transaction receipt
'''<line_sep>receipt=wait_for_transaction_receipt(web3 txid timeout=timeout)<line_sep>txinfo=web3.eth.getTransaction(txid)<assert_stmt>txinfo['gas']<ne>receipt['gasUsed']<line_sep><return>receipt<block_end><def_stmt>wait transfer_filter timeout=30<block_start><with_stmt>Timeout(timeout)<as>timeout<block_start><while_stmt><not>transfer_filter.get(<false>)<block_start>timeout.sleep(2)<block_end><block_end><block_end>
|
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2020, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
<import_stmt>logging<import_from_stmt>volttron.platform.agent.known_identities VOLTTRON_CENTRAL <import_from_stmt>volttron.platform.vip.agent Agent RPC <line_sep>_log=logging.getLogger(__name__)<class_stmt>VCConnection(Agent)<block_start>"""
This agent will connect to an instance with volttron.central agent connected
to it. The volttron.central agent will use this agent to communicate with
the platform.agent(vcp) running on the current instance of the platform.
"""<def_stmt>__init__ self **kwargs<block_start>self._log=logging.getLogger(self.__class__.__name__)<line_sep>super(VCConnection self).__init__(**kwargs)<line_sep>self._main_agent=<none><block_end><def_stmt>set_main_agent self main_agent<block_start>"""
The main agent is the VCP that is using this agent to connect to the
remote volttron instance.
:param main_agent: the agent that instantiated this one.
:type VolttronCentralPlatform:
"""<line_sep>self._main_agent=main_agent<block_end><def_stmt>publish_to_vc self topic message=<none> headers={}<block_start>"""
This method allows the main_agent to publish a message up to the
volttron.central instance.
:param topic:
:param message:
:param headers:
"""<line_sep>self.vip.pubsub.publish('pubsub' topic headers message).get(timeout=5)<block_end>@RPC.export<def_stmt>start_bacnet_scan self iam_topic proxy_identity low_device_id=<none> high_device_id=<none> target_address=<none> scan_length=5<block_start>"""
Starts a bacnet scan using the the named proxy_identity as the callee.
:param iam_topic:
:param proxy_identity:
:param low_device_id:
:param high_device_id:
:param target_address:
:param scan_length:
:return:
"""<line_sep>self._main_agent.start_bacnet_scan(iam_vc_response_topic=iam_topic proxy_identity=proxy_identity low_device_id=low_device_id high_device_id=high_device_id target_address=target_address scan_length=scan_length)<block_end>@RPC.export<def_stmt>get_instance_uuid self<block_start>"""
Retrieve the instance uuid for the vcp agent's instance.
:return:
"""<line_sep><return>self._main_agent.get_instance_uuid()<block_end>@RPC.export<def_stmt>get_health self<block_start>"""
Retrieve the health of the vcp agent.
:return:
"""<line_sep><return>self._main_agent.vip.health.get_status()<block_end>@RPC.export<def_stmt>start_agent self agent_uuid<block_start>"""
Start an agent that is already present on the vcp instance.
:param agent_uuid:
:return:
"""<line_sep><return>self._main_agent.start_agent(agent_uuid)<block_end>@RPC.export<def_stmt>stop_agent self agent_uuid<block_start>"""
Stop an agent already running on the vcp instance.
:param agent_uuid:
:return:
"""<line_sep><return>self._main_agent.start_agent(agent_uuid)<block_end>@RPC.export<def_stmt>restart self agent_uuid<block_start>"""
Performs the stop and start operations on the vcp instance for an agent.
:param agent_uuid:
:return:
"""<line_sep>stop_result=self.stop_agent(agent_uuid)<line_sep>start_result=self.start_agent(agent_uuid)<line_sep><return>stop_result start_result<block_end>@RPC.export<def_stmt>agent_status self agent_uuid<block_start>"""
Retrieves the status of a particular agent executing on the vcp
instance. The agent does not have to be executing in order to receive
it's status.
:param agent_uuid:
:return:
"""<line_sep><return>self._main_agent.agent_status(agent_uuid)<block_end>@RPC.export<def_stmt>status_agents self<block_start>"""
Return all of the installed agents' statuses for the vcp instance.
:return:
"""<line_sep><return>self._main_agent.status_agents()<block_end>@RPC.export<def_stmt>get_devices self<block_start>"""
Retrieves configuration entries from the config store that begin with
'devices'.
:return: dictionary of devices.
"""<line_sep>self._log.debug("Getting devices in vcconnection.py")<line_sep><return>self._main_agent.get_devices()<block_end>@RPC.export<def_stmt>publish_bacnet_props self proxy_identity publish_topic address device_id filter=[]<block_start>self._log.debug('Publishing bacnet props to topic: {}'.format(publish_topic))<line_sep>self._main_agent.publish_bacnet_props(proxy_identity publish_topic address device_id filter=[])<block_end>@RPC.export<def_stmt>store_agent_config self agent_identity config_name raw_contents config_type='raw'<block_start>"""
Store an agent configuration on the volttron instance associated with
this agent.
:param agent_identity:
:param config_name:
:param raw_contents:
:param config_type:
:return: None
"""<line_sep><return>self._main_agent.store_agent_config(agent_identity config_name raw_contents config_type)<block_end>@RPC.export<def_stmt>list_agent_configs self agent_identity<block_start>"""
List the agent configuration files stored on the volttron instance
associated with this agent.
:param agent_identity: Agent identity to retrieve configuration from.
:return: A list of the configuration names.
"""<line_sep><return>self._main_agent.list_agent_configs(agent_identity)<block_end>@RPC.export<def_stmt>get_agent_config self agent_identity config_name raw=<true><block_start>"""
Retrieve the configuration from the config store of the passed agent
identity.
:param agent_identity:
:param config_name:
:param raw:
:return: The stored configuration.
"""<line_sep><return>self._main_agent.get_agent_config(agent_identity config_name raw)<block_end>@RPC.export<def_stmt>delete_agent_config self agent_identity config_name<block_start>"""
Deletes the configuration from the config store of the passed agent
identity.
:param agent_identity:
:param config_name:
:return: The stored configuration.
"""<line_sep><return>self._main_agent.delete_agent_config(agent_identity config_name)<block_end>@RPC.export<def_stmt>subscribe_to_vcp self prefix prefix_on_vc<block_start>"""
Allows volttron.central to listen to the message bus on vcp instance.
:param prefix: The prefix to listen for.
:param prefix_on_vc:
The prefix to publish to on volttron central instance.
"""<line_sep>self._log.info("VC subscribing to prefix: {}".format(prefix))<line_sep>self._log.info("VCP will publish to {} on VC".format(prefix_on_vc))<def_stmt>subscription_wrapper peer sender bus topic headers message# We only publish up to vc for things that aren't forwarded.
<block_start><if_stmt>'X-Forwarded'<in>headers<block_start><return><block_end>self._log.debug("publishing to VC topic: {}".format(prefix_on_vc+topic))<line_sep># Prepend the specified prefix to the topic that was passed
# to the method
self.publish_to_vc(prefix_on_vc+topic message headers)<block_end># Use the main agent to do the subscription on.
self._main_agent.vip.pubsub.subscribe('pubsub' prefix subscription_wrapper)<block_end>@RPC.export<def_stmt>call self platform_method *args **kwargs<block_start><return>self._main_agent.call(platform_method *args **kwargs)<block_end><def_stmt>is_connected self<block_start>connected=self.vip.hello().get(timeout=5)<is><not><none><line_sep>self._log.debug("is_connected returning {}".format(connected))<line_sep><return>connected<block_end><def_stmt>is_peer_connected self peer=VOLTTRON_CENTRAL<block_start>connected=peer<in>self.vip.peerlist().get(timeout=5)<line_sep>self._log.debug("is_connected returning {}".format(connected))<line_sep><return>connected<block_end>@RPC.export<def_stmt>route_to_agent_method self id agent_method params<block_start>"""
Calls a method on an installed agent running on the platform.
.. note::
This method only valid for installed agents not dynamic agents.
:param id:
:param agent_method:
:param params:
:return:
"""<line_sep>self._log.debug("Routing method: {}".format(agent_method))<line_sep><return>self._main_agent.route_request(id agent_method params)<block_end>@RPC.export<def_stmt>get_vip_addresses self<block_start>"""
Retrieves the vip addresses that were specified in the configuration
file or via command line.
:return:
"""<line_sep><return>self._main_agent.get_external_vip_addresses()<block_end>@RPC.export<def_stmt>get_instance_name self<block_start><return>self._main_agent.get_instance_name()<block_end>@RPC.export<def_stmt>start_agent self agent_uuid<block_start>"""
Calls start_agent method on the vcp main agent instance.
.. note::
This method only valid for installed agents not dynamic agents.
:param agent_uuid:
:return:
"""<line_sep>self._main_agent.start_agent(agent_uuid)<block_end>@RPC.export<def_stmt>stop_agent self agent_uuid<block_start>"""
Calls stop_agent method on the vcp main agent instance.
.. note::
This method only valid for installed agents not dynamic agents.
:param agent_uuid:
:return:
"""<line_sep>proc_result=self._main_agent.stop_agent(agent_uuid)<line_sep><return>proc_result<block_end>@RPC.export<def_stmt>restart_agent self agent_uuid<block_start>"""
Calls restart method on the vcp main agent instance.
.. note::
This method only valid for installed agents not dynamic agents.
:param agent_uuid:
:return:
"""<line_sep><return>self._main_agent.restart(agent_uuid)<block_end>@RPC.export<def_stmt>agent_status self agent_uuid<block_start>"""
Calls agent_status method on the vcp main agent instance.
.. note::
This method only valid for installed agents not dynamic agents.
:param agent_uuid:
:return:
"""<line_sep><return>self._main_agent.agent_status(agent_uuid)<block_end>@RPC.export<def_stmt>status_agents self<block_start>"""
Calls status_agents method on the vcp main agent instance.
.. note::
This method only valid for installed agents not dynamic agents.
:return:
"""<line_sep><return>self._main_agent.status_agents()<block_end>@RPC.export<def_stmt>list_agents self<block_start>"""
Calls list_agents method on the vcp main agent instance.
.. note::
This method only valid for installed agents not dynamic agents.
:return:
"""<line_sep><return>self._main_agent.list_agents()<block_end>@RPC.export<def_stmt>install_agent self local_wheel_file<block_start>"""
Installs
:param local_wheel_file:
:return:
"""<line_sep><return>self._main_agent.install_agent<block_end><block_end>
|
# encoding: utf-8
<import_from_stmt>.baseline Baseline<def_stmt>build_model cfg num_classes<block_start>model=Baseline(num_classes cfg.MODEL.LAST_STRIDE cfg.MODEL.PRETRAIN_PATH cfg.MODEL.NAME cfg.MODEL.GENERALIZED_MEAN_POOL cfg.MODEL.PRETRAIN_CHOICE)<line_sep><return>model<block_end>
|
# CamJam EduKit 3 - Robotics
# Worksheet 7 - Controlling the motors with PWM
<import_stmt>RPi.GPIO<as>GPIO# Import the GPIO Library
<import_stmt>time# Import the Time library
# Set the GPIO modes
GPIO.setmode(GPIO.BCM)<line_sep>GPIO.setwarnings(<false>)<line_sep># Set variables for the GPIO motor pins
pinMotorAForwards=10<line_sep>pinMotorABackwards=9<line_sep>pinMotorBForwards=8<line_sep>pinMotorBBackwards=7<line_sep># How many times to turn the pin on and off each second
Frequency=20<line_sep># How long the pin stays on each cycle, as a percent (here, it's 30%)
DutyCycle=30<line_sep># Setting the duty cycle to 0 means the motors will not turn
Stop=0<line_sep># Set the GPIO Pin mode to be Output
GPIO.setup(pinMotorAForwards GPIO.OUT)<line_sep>GPIO.setup(pinMotorABackwards GPIO.OUT)<line_sep>GPIO.setup(pinMotorBForwards GPIO.OUT)<line_sep>GPIO.setup(pinMotorBBackwards GPIO.OUT)<line_sep># Set the GPIO to software PWM at 'Frequency' Hertz
pwmMotorAForwards=GPIO.PWM(pinMotorAForwards Frequency)<line_sep>pwmMotorABackwards=GPIO.PWM(pinMotorABackwards Frequency)<line_sep>pwmMotorBForwards=GPIO.PWM(pinMotorBForwards Frequency)<line_sep>pwmMotorBBackwards=GPIO.PWM(pinMotorBBackwards Frequency)<line_sep># Start the software PWM with a duty cycle of 0 (i.e. not moving)
pwmMotorAForwards.start(Stop)<line_sep>pwmMotorABackwards.start(Stop)<line_sep>pwmMotorBForwards.start(Stop)<line_sep>pwmMotorBBackwards.start(Stop)<line_sep># Turn all motors off
<def_stmt>stopmotors <block_start>pwmMotorAForwards.ChangeDutyCycle(Stop)<line_sep>pwmMotorABackwards.ChangeDutyCycle(Stop)<line_sep>pwmMotorBForwards.ChangeDutyCycle(Stop)<line_sep>pwmMotorBBackwards.ChangeDutyCycle(Stop)<block_end># Turn both motors forwards
<def_stmt>forwards <block_start>pwmMotorAForwards.ChangeDutyCycle(DutyCycle)<line_sep>pwmMotorABackwards.ChangeDutyCycle(Stop)<line_sep>pwmMotorBForwards.ChangeDutyCycle(DutyCycle)<line_sep>pwmMotorBBackwards.ChangeDutyCycle(Stop)<block_end># Turn both motors backwards
<def_stmt>backwards <block_start>pwmMotorAForwards.ChangeDutyCycle(Stop)<line_sep>pwmMotorABackwards.ChangeDutyCycle(DutyCycle)<line_sep>pwmMotorBForwards.ChangeDutyCycle(Stop)<line_sep>pwmMotorBBackwards.ChangeDutyCycle(DutyCycle)<block_end># Turn left
<def_stmt>left <block_start>pwmMotorAForwards.ChangeDutyCycle(Stop)<line_sep>pwmMotorABackwards.ChangeDutyCycle(DutyCycle)<line_sep>pwmMotorBForwards.ChangeDutyCycle(DutyCycle)<line_sep>pwmMotorBBackwards.ChangeDutyCycle(Stop)<block_end># Turn Right
<def_stmt>right <block_start>pwmMotorAForwards.ChangeDutyCycle(DutyCycle)<line_sep>pwmMotorABackwards.ChangeDutyCycle(Stop)<line_sep>pwmMotorBForwards.ChangeDutyCycle(Stop)<line_sep>pwmMotorBBackwards.ChangeDutyCycle(DutyCycle)<block_end># Your code to control the robot goes below this line
forwards()<line_sep>time.sleep(1)# Pause for 1 second
left()<line_sep>time.sleep(0.5)# Pause for half a second
forwards()<line_sep>time.sleep(1)<line_sep>right()<line_sep>time.sleep(0.5)<line_sep>backwards()<line_sep>time.sleep(0.5)<line_sep>stopmotors()<line_sep>GPIO.cleanup()<line_sep>
|
<import_stmt>time<import_stmt>async<class_stmt>Disconnect<block_start><pass><block_end>server=async.server('10.211.55.3' 20019)<line_sep>async.register(transport=server protocol=Disconnect)<line_sep>async.run()<line_sep>
|
# Import from third library
<import_from_stmt>torch.nn.modules.loss _Loss<def_stmt>_reduce loss reduction **kwargs<block_start><if_stmt>reduction<eq>'none'<block_start>ret=loss<block_end><elif_stmt>reduction<eq>'mean'<block_start>normalizer=loss.numel()<if_stmt>kwargs.get('normalizer' <none>)<block_start>normalizer=kwargs['normalizer']<block_end>ret=loss.sum()/normalizer<block_end><elif_stmt>reduction<eq>'sum'<block_start>ret=loss.sum()<block_end><else_stmt><block_start><raise>ValueError(reduction+' is not valid')<block_end><return>ret<block_end><class_stmt>BaseLoss(_Loss)# do not use syntax like `super(xxx, self).__init__,
# which will cause infinited recursion while using class decorator`
<block_start><def_stmt>__init__ self name='base' reduction='none' loss_weight=1.0<block_start>r"""
Arguments:
- name (:obj:`str`): name of the loss function
- reduction (:obj:`str`): reduction type, choice of mean, none, sum
- loss_weight (:obj:`float`): loss weight
"""<line_sep>_Loss.__init__(self reduction=reduction)<line_sep>self.loss_weight=loss_weight<line_sep>self.name=name<block_end><def_stmt>__call__ self input target reduction_override=<none> normalizer_override=<none> **kwargs<block_start>r"""
Arguments:
- input (:obj:`Tensor`)
- reduction (:obj:`Tensor`)
- reduction_override (:obj:`str`): choice of 'none', 'mean', 'sum', override the reduction type
defined in __init__ function
- normalizer_override (:obj:`float`): override the normalizer when reduction is 'mean'
"""<line_sep>reduction=reduction_override<if>reduction_override<else>self.reduction<assert_stmt>(normalizer_override<is><none><or>reduction<eq>'mean') f'normalizer is not allowed when reduction is {reduction}'<line_sep>loss=_Loss.__call__(self input target reduction normalizer=normalizer_override **kwargs)<line_sep><return>loss<times>self.loss_weight<block_end><def_stmt>forward self input target reduction normalizer=<none> **kwargs<block_start><raise>NotImplementedError<block_end><block_end>
|
<import_from_stmt>mmcv.utils Registry<line_sep>DATA_CONVERTERS=Registry('data_converters')<def_stmt>build_data_converter cfg<block_start>"""Build data converter."""<line_sep><return>DATA_CONVERTERS.build(cfg)<block_end>
|
path2file=sys.argv[1]<line_sep>file=open(path2file 'r')<while_stmt><true><block_start>line=file.readline()<if_stmt><not>line<block_start><break><block_end>stroka=unicode(line 'utf-8')<line_sep>type('f' KeyModifier.CTRL)<line_sep>sleep(1)<line_sep>paste(stroka)<line_sep>sleep(1)<line_sep>type(Key.ENTER)<line_sep>sleep(1)<line_sep><break><block_end>exit(0)<line_sep>
|
<import_from_stmt>django.core.management.base BaseCommand<import_from_stmt>dimagi.utils.couch.database iter_docs<import_from_stmt>corehq.apps.locations.models SQLLocation<import_from_stmt>corehq.apps.users.models CommCareUser<class_stmt>Command(BaseCommand)<block_start>help="Re-syncs location user data for all mobile workers in the domain."<def_stmt>add_arguments self parser<block_start>parser.add_argument('domain')<block_end><def_stmt>process_user self user<block_start><if_stmt>user.location_id<block_start>user.set_location(SQLLocation.objects.get(location_id=user.location_id))<block_end><else_stmt><block_start>user.unset_location()<block_end><block_end><def_stmt>handle self domain **options<block_start>ids=(CommCareUser.ids_by_domain(domain is_active=<true>)+CommCareUser.ids_by_domain(domain is_active=<false>))<for_stmt>doc iter_docs(CommCareUser.get_db() ids)<block_start>user=CommCareUser.wrap(doc)<try_stmt><block_start>self.process_user(user)<block_end><except_stmt>Exception<as>e<block_start>print("Error processing user %s: %s"%(user._id e))<block_end><block_end><block_end><block_end>
|
<import_stmt>spacy<line_sep>nlp=spacy.load("zh_core_web_sm")<line_sep>text="写入历史了:苹果是美国第一家市值超过一万亿美元的上市公司。"<line_sep># 处理文本
doc=____<for_stmt>token doc# 获取词符文本、词性标注及依存关系标签
<block_start>token_text=____.____<line_sep>token_pos=____.____<line_sep>token_dep=____.____<line_sep># 规范化打印的格式
print(f"{token_text:<12}{token_pos:<10}{token_dep:<10}")<block_end>
|
<import_from_stmt>zerver.lib.data_types DictType EnumType Equals ListType NumberType OptionalType StringDictType TupleType UnionType UrlType schema <import_from_stmt>zerver.lib.test_classes ZulipTestCase<class_stmt>MiscTest(ZulipTestCase)<block_start><def_stmt>test_data_type_schema self<arrow><none><block_start>"""
We really only test this to get test coverage. The
code covered here is really only used in testing tools.
"""<line_sep>test_schema=DictType([("type" Equals("realm")) ("maybe_n" OptionalType(int)) ("s" str) ("timestamp" NumberType()) ("flag" bool) ("tup" TupleType([int str])) ("level" EnumType([1 2 3])) ("lst" ListType(int)) ("config" StringDictType(str)) ("value" UnionType([int str])) ("url" UrlType()) ])<line_sep>expected="""
test (dict):
config (string_dict):
value: str
flag: bool
level in [1, 2, 3]
lst (list):
type: int
maybe_n: int
s: str
timestamp: number
tup (tuple):
0: int
1: str
type in ['realm']
url: str
value (union):
type: int
type: str
"""<line_sep>self.assertEqual(schema("test" test_schema).strip() expected.strip())<block_end><block_end>
|
<import_stmt>sys<line_sep>sys.path.append('core')<import_stmt>argparse<import_stmt>glob<import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>tqdm tqdm<import_from_stmt>pathlib Path<import_from_stmt>raft_stereo RAFTStereo<import_from_stmt>utils.utils InputPadder<import_from_stmt>PIL Image<import_from_stmt>matplotlib pyplot<as>plt<line_sep>DEVICE='cuda'<def_stmt>load_image imfile<block_start>img=np.array(Image.open(imfile)).astype(np.uint8)<line_sep>img=torch.from_numpy(img).permute(2 0 1).float()<line_sep><return>img[<none>].to(DEVICE)<block_end><def_stmt>demo args<block_start>model=torch.nn.DataParallel(RAFTStereo(args) device_ids=[0])<line_sep>model.load_state_dict(torch.load(args.restore_ckpt))<line_sep>model=model.module<line_sep>model.to(DEVICE)<line_sep>model.eval()<line_sep>output_directory=Path(args.output_directory)<line_sep>output_directory.mkdir(exist_ok=<true>)<with_stmt>torch.no_grad()<block_start>left_images=sorted(glob.glob(args.left_imgs recursive=<true>))<line_sep>right_images=sorted(glob.glob(args.right_imgs recursive=<true>))<line_sep>print(f"Found {len(left_images)} images. Saving files to {output_directory}/")<for_stmt>(imfile1 imfile2) tqdm(list(zip(left_images right_images)))<block_start>image1=load_image(imfile1)<line_sep>image2=load_image(imfile2)<line_sep>padder=InputPadder(image1.shape divis_by=32)<line_sep>image1,image2=padder.pad(image1 image2)<line_sep>_,flow_up=model(image1 image2 iters=args.valid_iters test_mode=<true>)<line_sep>file_stem=imfile1.split('/')[-2]<if_stmt>args.save_numpy<block_start>np.save(output_directory/f"{file_stem}.npy" flow_up.cpu().numpy().squeeze())<block_end>plt.imsave(output_directory/f"{file_stem}.png" -flow_up.cpu().numpy().squeeze() cmap='jet')<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--restore_ckpt' help="restore checkpoint" required=<true>)<line_sep>parser.add_argument('--save_numpy' action='store_true' help='save output as numpy arrays')<line_sep>parser.add_argument('-l' '--left_imgs' help="path to all first (left) frames" default="datasets/Middlebury/MiddEval3/testH/*/im0.png")<line_sep>parser.add_argument('-r' '--right_imgs' help="path to all second (right) frames" default="datasets/Middlebury/MiddEval3/testH/*/im1.png")<line_sep>parser.add_argument('--output_directory' help="directory to save output" default="demo_output")<line_sep>parser.add_argument('--mixed_precision' action='store_true' help='use mixed precision')<line_sep>parser.add_argument('--valid_iters' type=int default=32 help='number of flow-field updates during forward pass')<line_sep># Architecture choices
parser.add_argument('--hidden_dims' nargs='+' type=int default=[128]<times>3 help="hidden state and context dimensions")<line_sep>parser.add_argument('--corr_implementation' choices=["reg" "alt" "reg_cuda" "alt_cuda"] default="reg" help="correlation volume implementation")<line_sep>parser.add_argument('--shared_backbone' action='store_true' help="use a single backbone for the context and feature encoders")<line_sep>parser.add_argument('--corr_levels' type=int default=4 help="number of levels in the correlation pyramid")<line_sep>parser.add_argument('--corr_radius' type=int default=4 help="width of the correlation pyramid")<line_sep>parser.add_argument('--n_downsample' type=int default=2 help="resolution of the disparity field (1/2^K)")<line_sep>parser.add_argument('--slow_fast_gru' action='store_true' help="iterate the low-res GRUs more frequently")<line_sep>parser.add_argument('--n_gru_layers' type=int default=3 help="number of hidden GRU levels")<line_sep>args=parser.parse_args()<line_sep>demo(args)<block_end>
|
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>#--------------------------------------------------------------------------------
# select collection of "good" collision vertices
selectedVerticesForPFMEtCorrType0=cms.EDFilter("VertexSelector" src=cms.InputTag('offlinePrimaryVertices') cut=cms.string("isValid & ndof >= 4 & chi2 > 0 & tracksSize > 0 & abs(z) < 24 & abs(position.Rho) < 2.") filter=cms.bool(<false>))<line_sep>selectedPrimaryVertexHighestPtTrackSumForPFMEtCorrType0=cms.EDFilter("PATSingleVertexSelector" mode=cms.string('firstVertex') vertices=cms.InputTag('selectedVerticesForPFMEtCorrType0') filter=cms.bool(<false>))<line_sep>#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# association of PFCandidates to vertices
<import_from_stmt>RecoParticleFlow.PFTracking.particleFlowDisplacedVertex_cfi particleFlowDisplacedVertex<import_from_stmt>TrackingTools.TransientTrack.TransientTrackBuilder_cfi *<import_from_stmt>CommonTools.RecoUtils.pfcand_assomap_cfi PFCandAssoMap<as>_PFCandAssoMap<line_sep>pfCandidateToVertexAssociation=_PFCandAssoMap.clone(PFCandidateCollection=cms.InputTag('particleFlow') UseBeamSpotCompatibility=cms.untracked.bool(<true>) ignoreMissingCollection=cms.bool(<true>))<line_sep>#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# produce Type 0 MET corrections
pfMETcorrType0=cms.EDProducer("Type0PFMETcorrInputProducer" srcPFCandidateToVertexAssociations=cms.InputTag('pfCandidateToVertexAssociation') srcHardScatterVertex=cms.InputTag('selectedPrimaryVertexHighestPtTrackSumForPFMEtCorrType0') correction=cms.PSet(# RunI correction
# formula = cms.string("-([0] + [1]*x)*(1.0 + TMath::Erf(-[2]*TMath::Power(x, [3])))"),
# par0 = cms.double(0.),
# par1 = cms.double(-0.703151),
# par2 = cms.double(0.0303531),
# par3 = cms.double(0.909209)
formula=cms.string("(x<35)?(-( [0]+x*[1]+pow(x, 2)*[2]+pow(x, 3)*[3] )):(-( [0]+35*[1]+pow(35, 2)*[2]+pow(35, 3)*[3] ))") par0=cms.double(-1.81414e-01) par1=cms.double(-4.76934e-01) par2=cms.double(8.63564e-03) par3=cms.double(-4.94181e-05)) minDz=cms.double(0.2)# [cm], minimum distance required between pile-up vertices and "hard scatter" vertex
)<line_sep>#--------------------------------------------------------------------------------
type0PFMEtCorrectionPFCandToVertexAssociationTask=cms.Task(selectedVerticesForPFMEtCorrType0 selectedPrimaryVertexHighestPtTrackSumForPFMEtCorrType0 particleFlowDisplacedVertex pfCandidateToVertexAssociation)<line_sep>type0PFMEtCorrectionPFCandToVertexAssociation=cms.Sequence(type0PFMEtCorrectionPFCandToVertexAssociationTask)<line_sep>type0PFMEtCorrectionPFCandToVertexAssociationForValidation=cms.Sequence(type0PFMEtCorrectionPFCandToVertexAssociationTask)<line_sep>type0PFMEtCorrectionPFCandToVertexAssociationForValidationMiniAOD=cms.Sequence(type0PFMEtCorrectionPFCandToVertexAssociationTask)<line_sep>type0PFMEtCorrectionTask=cms.Task(type0PFMEtCorrectionPFCandToVertexAssociationTask pfMETcorrType0)<line_sep>type0PFMEtCorrection=cms.Sequence(type0PFMEtCorrectionTask)<line_sep>
|
# the __all__ is generated
__all__=[]<line_sep># __init__.py structure:
# common code of the package
# export interface in __all__ which contains __all__ of its sub modules
# import all from submodule sina_block_money_flow_recorder
<import_from_stmt>.sina_block_money_flow_recorder *<import_from_stmt>.sina_block_money_flow_recorder __all__<as>_sina_block_money_flow_recorder_all<line_sep>__all__<augadd>_sina_block_money_flow_recorder_all<line_sep># import all from submodule sina_stock_money_flow_recorder
<import_from_stmt>.sina_stock_money_flow_recorder *<import_from_stmt>.sina_stock_money_flow_recorder __all__<as>_sina_stock_money_flow_recorder_all<line_sep>__all__<augadd>_sina_stock_money_flow_recorder_all<line_sep>
|
<import_from_stmt>flask Flask<import_from_stmt>requests JSONRPCRequest<import_from_stmt>views json_echo echo<import_from_stmt>method_views JSONEchoView<line_sep>Flask.request_class=JSONRPCRequest<def_stmt>create_app <block_start>app=Flask("FlaskEchoApp")<line_sep>app.config.from_pyfile("celeryconfig.py")<line_sep># Register the blueprint version of the echoer
app.register_blueprint(json_echo url_prefix="/json_echo")<line_sep># Register the modelview version of the echoer
app.add_url_rule("/json_echo_class" view_func=JSONEchoView.as_view("json_echo_class"))<line_sep><return>app<block_end>
|
<def_stmt>cipher_text plain_text<block_start><pass><block_end>
|
<import_from_stmt>lib.actions YammerAction<line_sep>__all__=['ListUsersAction']<class_stmt>ListUsersAction(YammerAction)<block_start><def_stmt>run self page=<none> letter=<none> sort_by=<none> reverse=<none><block_start>yammer=self.authenticate()<line_sep>users=yammer.users.all(page=page letter=letter sort_by=sort_by reverse=reverse)<line_sep><return>users<block_end><block_end>
|
"""
Setup of SD Mask RCNN codebase
Author: <NAME>
"""<import_stmt>os<import_from_stmt>setuptools setup<line_sep>root_dir=os.path.dirname(os.path.realpath(__file__))<line_sep># load __version__
version_file='sd_maskrcnn/version.py'<line_sep>exec(open(version_file).read())<line_sep># load README.md as long_description
long_description=''<if_stmt>os.path.exists('README.md')<block_start><with_stmt>open('README.md' 'r')<as>f<block_start>long_description=f.read()<block_end><block_end>setup_requirements=['Cython' 'numpy']<line_sep>requirements=['pycocotools>=2.0' # For benchmarking
'scikit-image>=0.14.2' # For image loading
'keras>=2.2<2.3' # For training
'tqdm' # For pretty progress bars
'matplotlib' # For visualization of results
'h5py<3.0.0' # Loading pretrained models
'autolab_core>=1.1.0' # For core utilities
'nvidia-tensorflow' # For training - need TF 1.15 so use nvidia
f'mask-rcnn @ file://localhost{root_dir}/maskrcnn'# Underlying Mask RCNN model
]<line_sep>generation_requirements=['gym>=0.11' # For sampling heaps
'pyglet==1.4.0b1' # For pyrender
'pyrender>=0.1.23' # For rendering images
'pybullet' # For dynamic sim
'trimesh[easy]' # For mesh loading/exporting
'scipy'# For random vars
]<line_sep>setup(name='sd_maskrcnn' version=__version__ description='SD Mask RCNN project code' long_description=long_description long_description_content_type='text/markdown' author='<NAME>' author_email='<EMAIL>' license='MIT' url='http://github.com/BerkeleyAutomation/sd-maskrcnn' classifiers=["Programming Language :: Python :: 3" "License :: OSI Approved :: MIT License" 'Natural Language :: English' 'Topic :: Scientific/Engineering'] packages=['sd_maskrcnn' 'sd_maskrcnn.envs'] package_data={'sd_maskrcnn':['data/plane/*' 'data/bin/*']} setup_requires=setup_requirements install_requires=requirements extras_require={'generation':generation_requirements})<line_sep>
|
<import_stmt>pytest<import_from_stmt>dbt.tests.util run_dbt update_config_file<import_from_stmt>dbt.exceptions CompilationException<line_sep>model_sql="""
select 1 as id
"""<line_sep>bad_generate_macros__generate_names_sql="""
{% macro generate_schema_name(custom_schema_name, node) -%}
{% do var('somevar') %}
{% do return(dbt.generate_schema_name(custom_schema_name, node)) %}
{%- endmacro %}
"""<class_stmt>TestMissingVarGenerateNameMacro<block_start>@pytest.fixture(scope="class")<def_stmt>macros self<block_start><return>{"generate_names.sql":bad_generate_macros__generate_names_sql}<block_end>@pytest.fixture(scope="class")<def_stmt>models self<block_start><return>{"model.sql":model_sql}<block_end><def_stmt>test_generate_schema_name_var self project# var isn't set, so generate_name macro fails
<block_start><with_stmt>pytest.raises(CompilationException)<as>excinfo<block_start>run_dbt(["compile"])<block_end><assert_stmt>"Required var 'somevar' not found in config"<in>str(excinfo.value)<line_sep># globally scoped -- var is set at top-level
update_config_file({"vars":{"somevar":1}} project.project_root "dbt_project.yml")<line_sep>run_dbt(["compile"])<line_sep># locally scoped -- var is set in 'test' scope
update_config_file({"vars":{"test":{"somevar":1}}} project.project_root "dbt_project.yml")<line_sep>run_dbt(["compile"])<block_end><block_end>
|
<import_from_future_stmt> absolute_import<import_stmt>os<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_from_stmt>experiments.scripts.pickle_wrapper save_pkl load_pkl<import_from_stmt>.ops simple_linear select_action_tf clipped_error<import_from_stmt>.alpha_vector AlphaVector<import_from_stmt>.base_tf_solver BaseTFSolver<class_stmt>LinearAlphaNet(BaseTFSolver)<block_start>"""
Linear Alpha Network
- linear FA for alpha vectors
- 6 inputs (r(s,a))
- 6 outputs (1 hyperplane per action)
"""<def_stmt>__init__ self agent sess<block_start>super(LinearAlphaNet self).__init__(agent sess)<line_sep>self.ops={}<line_sep>self.w={}<line_sep>self.summary_ops={}<line_sep>self.summary_placeholders={}<line_sep>self.w_input={}<line_sep>self.w_assign_op={}<line_sep>self.build_linear_network()<with_stmt>tf.variable_scope('step')<block_start>self.step_op=tf.Variable(0 trainable=<false> name='step')<line_sep>self.step_input=tf.placeholder('int32' <none> name='step_input')<line_sep>self.step_assign_op=self.step_op.assign(self.step_input)<block_end>tf.global_variables_initializer().run()<block_end>@staticmethod<def_stmt>reset agent sess<block_start><return>LinearAlphaNet(agent sess)<block_end><def_stmt>train self epoch<block_start>start_step=self.step_assign_op.eval({self.step_input:epoch<times>self.model.max_steps})<line_sep>total_reward,avg_reward_per_step,total_loss,total_v,total_delta=0. 0. 0. 0. 0.<line_sep>actions=[]<line_sep># Reset for new run
belief=self.model.get_initial_belief_state()<for_stmt>step range(start_step start_step+self.model.max_steps)# 1. predict
<block_start>action,pred_v=self.e_greedy_predict(belief step)<line_sep># 2. act
step_result=self.model.generate_step(action)<if_stmt>step_result.is_terminal<block_start>v_b_next=np.array([0.])<block_end><else_stmt><block_start>next_belief=self.model.belief_update(belief action step_result.observation)<line_sep># optionally clip reward
# generate target
_,v_b_next=self.greedy_predict(next_belief)<block_end>target_v=self.model.discount<times>(step_result.reward+v_b_next)<line_sep># compute gradient and do weight update
_,loss,delta=self.gradients(target_v belief step)<line_sep>total_loss<augadd>loss<line_sep>total_reward<augadd>step_result.reward<line_sep>total_v<augadd>pred_v[0]<line_sep>total_delta<augadd>delta[0]<if_stmt>step_result.is_terminal# Reset for new run
<block_start>belief=self.model.get_initial_belief_state()<block_end>actions.append(action)<line_sep>avg_reward_per_step=total_reward/(step+1.)<line_sep>avg_loss=loss/(step+1.)<line_sep>avg_v=total_v/(step+1.)<line_sep>avg_delta=total_delta/(step+1.)<line_sep>self.step_assign_op.eval({self.step_input:step+1})<line_sep>self.inject_summary({'average.reward':avg_reward_per_step 'average.loss':avg_loss 'average.v':avg_v 'average.delta':avg_delta 'training.weights':self.sess.run(self.w['l1_w'] feed_dict={self.ops['l0_in']:np.reshape(self.model.get_reward_matrix().flatten() [1 6]) self.ops['belief']:belief}) 'training.learning_rate':self.ops['learning_rate_op'].eval({self.ops['learning_rate_step']:step+1}) 'training.epsilon':self.ops['epsilon_op'].eval({self.ops['epsilon_step']:step+1})} step+1)<block_end><block_end><def_stmt>e_greedy_predict self belief epsilon_step# try hard-coding input of linear net to be rewards (can try random as well)
<block_start>action,v_b,epsilon=self.sess.run([self.ops['a'] self.ops['v_b'] self.ops['epsilon_op']] feed_dict={self.ops['l0_in']:np.reshape(self.model.get_reward_matrix().flatten() [1 6]) self.ops['belief']:belief self.ops['epsilon_step']:epsilon_step})<line_sep># e-greedy action selection
<if_stmt>np.random.uniform(0 1)<l>epsilon<block_start>action=np.random.randint(self.model.num_actions)<block_end><return>action v_b<block_end><def_stmt>greedy_predict self belief# try hard-coding input of linear net to be rewards (can try random as well)
<block_start>action,v_b=self.sess.run([self.ops['a'] self.ops['v_b']] feed_dict={self.ops['l0_in']:np.reshape(self.model.get_reward_matrix().flatten() [1 6]) self.ops['belief']:belief})<line_sep><return>action v_b<block_end><def_stmt>gradients self target_v belief learning_rate_step<block_start><return>self.sess.run([self.ops['optim'] self.ops['loss'] self.ops['delta']] feed_dict={self.ops['target_v']:target_v self.ops['l0_in']:np.reshape(self.model.get_reward_matrix().flatten() [1 6]) self.ops['belief']:belief self.ops['learning_rate_step']:learning_rate_step})<block_end><def_stmt>alpha_vectors self<block_start>gamma=self.sess.run(self.ops['l1_out'] feed_dict={self.ops['l0_in']:np.reshape(self.model.get_reward_matrix().flatten() [1 6]) self.ops['belief']:self.model.get_initial_belief_state()})<line_sep>gamma=np.reshape(gamma [self.model.num_actions self.model.num_states])<line_sep>vector_set=set()<for_stmt>i range(self.model.num_actions)<block_start>vector_set.add(AlphaVector(a=i v=gamma[i]))<block_end><return>vector_set<block_end><def_stmt>build_linear_network self<block_start><with_stmt>tf.variable_scope('linear_fa_prediction')<block_start>self.ops['belief']=tf.placeholder('float32' [self.model.num_states] name='belief_input')<with_stmt>tf.name_scope('linear_layer')<block_start>self.ops['l0_in']=tf.placeholder('float32' [1 self.model.num_states<times>self.model.num_actions] name='input')<line_sep>self.ops['l1_out'],self.w['l1_w'],self.w['l1_b']=simple_linear(self.ops['l0_in'] activation_fn=<none> name='weights')<line_sep>self.ops['l1_out']=tf.reshape(self.ops['l1_out'] [self.model.num_actions self.model.num_states] name='output')<block_end><with_stmt>tf.variable_scope('action_selection')<block_start>vector_set=set()<for_stmt>i range(self.model.num_actions)<block_start>vector_set.add(AlphaVector(a=i v=self.ops['l1_out'][i :]))<block_end>self.ops['a'],self.ops['v_b']=select_action_tf(self.ops['belief'] vector_set)<block_end><with_stmt>tf.variable_scope('epsilon_greedy')<block_start>self.ops['epsilon_step']=tf.placeholder('int64' <none> name='epsilon_step')<line_sep>self.ops['epsilon_op']=tf.maximum(self.model.epsilon_minimum tf.train.exponential_decay(self.model.epsilon_start self.ops['epsilon_step'] self.model.epsilon_decay_step self.model.epsilon_decay staircase=<true>))<block_end><block_end><with_stmt>tf.variable_scope('linear_optimizer')# MSE loss function
<block_start>self.ops['target_v']=tf.placeholder('float32' [<none>] name='target_v')<line_sep>self.ops['delta']=self.ops['target_v']-self.ops['v_b']<line_sep># self.ops['clipped_delta'] = tf.clip_by_value(self.ops['delta'], -1, 1, name='clipped_delta')
# L2 regularization
self.ops['loss']=tf.reduce_mean(clipped_error(self.ops['delta'])+self.model.beta<times>tf.nn.l2_loss(self.w['l1_w'])+self.model.beta<times>tf.nn.l2_loss(self.w['l1_b']) name='loss')<line_sep>self.ops['learning_rate_step']=tf.placeholder('int64' <none> name='learning_rate_step')<line_sep>self.ops['learning_rate_op']=tf.maximum(self.model.learning_rate_minimum tf.train.exponential_decay(self.model.learning_rate self.ops['learning_rate_step'] self.model.learning_rate_decay_step self.model.learning_rate_decay staircase=<true>))<line_sep>self.ops['optim']=tf.train.MomentumOptimizer(self.ops['learning_rate_op'] momentum=0.8 name='Optimizer').minimize(self.ops['loss'])<block_end><with_stmt>tf.variable_scope('linear_fa_summary')<block_start>scalar_summary_tags=['average.reward' 'average.loss' 'average.v' 'average.delta' 'training.learning_rate' 'training.epsilon']<for_stmt>tag scalar_summary_tags<block_start>self.summary_placeholders[tag]=tf.placeholder('float32' <none> name=tag.replace(' ' '_'))<line_sep>self.summary_ops['{}'.format(tag)]=tf.summary.scalar('{}'.format(tag) self.summary_placeholders[tag])<block_end>self.summary_placeholders['training.weights']=tf.placeholder('float32' [1 6] name='training_weights')<line_sep>self.summary_ops['training.weights']=tf.summary.histogram('weights' self.summary_placeholders['training.weights'])<line_sep>self.summary_ops['writer']=tf.summary.FileWriter(self.model.logs self.sess.graph)<block_end>self.summary_ops['saver']=tf.train.Saver(self.w max_to_keep=30)<line_sep>self.load_model()<block_end><def_stmt>inject_summary self tag_dict step<block_start>summary_str_lists=self.sess.run([self.summary_ops['{}'.format(tag)]<for>tag tag_dict.keys()] feed_dict={self.summary_placeholders[tag]:value<for>tag,value tag_dict.items()})<for_stmt>summary_str summary_str_lists<block_start>self.summary_ops['writer'].add_summary(summary_str step)<block_end><block_end><def_stmt>save_weight_to_pkl self<block_start><if_stmt><not>os.path.exists(self.model.weight_dir)<block_start>os.makedirs(self.model.weight_dir)<block_end><for_stmt>name self.w.keys()<block_start>save_pkl(self.w[name].eval() os.path.join(self.model.weight_dir "%s.pkl"%name))<block_end><block_end><def_stmt>load_weight_from_pkl self<block_start><with_stmt>tf.variable_scope('load_pred_from_pkl')<block_start><for_stmt>name self.w.keys()<block_start>self.w_input[name]=tf.placeholder('float32' self.w[name].get_shape().as_list() name=name)<line_sep>self.w_assign_op[name]=self.w[name].assign(self.w_input[name])<block_end><block_end><for_stmt>name self.w.keys()<block_start>self.w_assign_op[name].eval({self.w_input[name]:load_pkl(os.path.join(self.model.weight_dir "%s.pkl"%name))})<block_end><block_end><def_stmt>save_alpha_vectors self<block_start><if_stmt><not>os.path.exists(self.model.weight_dir)<block_start>os.makedirs(self.model.weight_dir)<block_end>av=self.alpha_vectors()<line_sep>save_pkl(av os.path.join(self.model.weight_dir "linear_alpha_net_vectors.pkl"))<block_end><block_end>
|
<import_from_stmt>django.core.management BaseCommand<import_from_stmt>plans tasks<class_stmt>Command(BaseCommand)<block_start>help='Autorenew accounts and with recurring payments'<def_stmt>handle self *args **options# pragma: no cover
<block_start>self.stdout.write("Starting renewal")<line_sep>renewed_accounts=tasks.autorenew_account()<if_stmt>renewed_accounts<block_start>self.stdout.write("Accounts autorenewed: "+", ".join(str(s)<for>s renewed_accounts))<block_end><else_stmt><block_start>self.stdout.write("No accounts autorenewed")<block_end><block_end><block_end>
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer encoder."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>paddle.fluid.layers<as>layers<import_from_stmt>models.neural_modules positionwise_feed_forward pre_process_layer post_process_layer<import_from_stmt>models.attention multi_head_attention multi_head_pooling multi_head_structure_attention<def_stmt>transformer_encoder_layer query_input key_input attn_bias n_head d_key d_value d_model d_inner_hid prepostprocess_dropout attention_dropout relu_dropout hidden_act preprocess_cmd="n" postprocess_cmd="da" param_initializer=<none> name=''<block_start>"""The encoder layers that can be stacked to form a deep encoder.
This module consits of a multi-head (self) attention followed by
position-wise feed-forward networks and both the two components companied
with the post_process_layer to add residual connection, layer normalization
and droput.
"""<line_sep>key_input=pre_process_layer(key_input preprocess_cmd prepostprocess_dropout name=name+'_pre_att')<if>key_input<else><none><line_sep>value_input=key_input<if>key_input<else><none><line_sep>attn_output=multi_head_attention(pre_process_layer(query_input preprocess_cmd prepostprocess_dropout name=name+'_pre_att') key_input value_input attn_bias d_key d_value d_model n_head attention_dropout param_initializer=param_initializer name=name+'_multi_head_att')<line_sep>attn_output=post_process_layer(query_input attn_output postprocess_cmd prepostprocess_dropout name=name+'_post_att')<line_sep>ffd_output=positionwise_feed_forward(pre_process_layer(attn_output preprocess_cmd prepostprocess_dropout name=name+'_pre_ffn') d_inner_hid d_model relu_dropout hidden_act param_initializer=param_initializer name=name+'_ffn')<line_sep><return>post_process_layer(attn_output ffd_output postprocess_cmd prepostprocess_dropout name=name+'_post_ffn')<block_end><def_stmt>transformer_encoder enc_input attn_bias n_layer n_head d_key d_value d_model d_inner_hid prepostprocess_dropout attention_dropout relu_dropout hidden_act preprocess_cmd="n" postprocess_cmd="da" param_initializer=<none> name='transformer_encoder' with_post_process=<true><block_start>"""
The encoder is composed of a stack of identical layers returned by calling
encoder_layer.
"""<for_stmt>i range(n_layer)<block_start>enc_output=transformer_encoder_layer(enc_input <none> attn_bias n_head d_key d_value d_model d_inner_hid prepostprocess_dropout attention_dropout relu_dropout hidden_act preprocess_cmd postprocess_cmd param_initializer=param_initializer name=name+'_layer_'+str(i))<line_sep>enc_input=enc_output<block_end><if_stmt>with_post_process<block_start>enc_output=pre_process_layer(enc_output preprocess_cmd prepostprocess_dropout name="post_encoder")<block_end><return>enc_output<block_end><def_stmt>self_attention_pooling_layer enc_input attn_bias n_head d_key d_value d_model d_inner_hid prepostprocess_dropout attention_dropout relu_dropout n_block preprocess_cmd="n" postprocess_cmd="da" name='self_attention_pooling'<block_start>"""
enc_input: # (batch_size, n_tokens, emb_dim)
attn_bias: # (batch_size, n_head, n_tokens, n_tokens)
"""<line_sep>attn_output=multi_head_pooling(keys=pre_process_layer(enc_input preprocess_cmd prepostprocess_dropout name=name+'_pre') # add layer normalization
values=<none> attn_bias=attn_bias # (batch_size, n_head, n_tokens, n_tokens)
d_value=d_value d_model=d_model n_head=n_head dropout_rate=attention_dropout name=name)<line_sep># (batch_sizes, d_model)
# print("n_block = %s" % n_block)
# print("attn_output.shape = %s" % str(attn_output.shape))
attn_output=layers.reshape(attn_output shape=[-1 n_block d_model])<line_sep># print("attn_output.shape = %s" % str(attn_output.shape))
pooling_output=layers.dropout(attn_output dropout_prob=attention_dropout dropout_implementation="upscale_in_train" is_test=<false>)<line_sep><return>pooling_output<block_end><def_stmt>graph_encoder_layer enc_input # (batch_size, n_block, emb_dim)
attn_bias # (batch_size, n_head, n_block, n_block)
graph_attn_bias # (batch_size, n_head, n_block, n_block)
pos_win n_head d_key d_value d_model d_inner_hid prepostprocess_dropout attention_dropout relu_dropout hidden_act preprocess_cmd="n" postprocess_cmd="da" param_initializer=<none> name=''<block_start>"""
:param enc_input: (batch_size, n_blocks, emb_dim)
:param attn_bias: (batch_size, n_head, n_blocks, n_blocks)
:param graph_attn_bias: (batch_size, n_head, n_blocks, n_blocks)
"""<line_sep>#layers.Print(enc_input, message="enc_intput ", summarize=-1)
#layers.Print(attn_bias, message="attn_bias", summarize=-1)
#layers.Print(graph_attn_bias, message="graph_attn_bias", summarize=-1)
# (batch_size, n_block, d_model)
attn_output=multi_head_structure_attention(queries=pre_process_layer(out=enc_input # add layer normalization
process_cmd=preprocess_cmd dropout_rate=prepostprocess_dropout name=name+'_pre_attn') keys=<none> values=<none> attn_bias=attn_bias graph_attn_bias=graph_attn_bias pos_win=pos_win d_key=d_key d_value=d_value d_model=d_model n_head=n_head dropout_rate=attention_dropout name=name+'_graph_attn')<line_sep># add dropout and residual connection
attn_output=post_process_layer(prev_out=enc_input out=attn_output process_cmd=postprocess_cmd dropout_rate=prepostprocess_dropout name=name+'_post_attn')<line_sep>ffd_output=positionwise_feed_forward(x=pre_process_layer(out=attn_output # add layer normalization
process_cmd=preprocess_cmd dropout_rate=prepostprocess_dropout name=name+'_pre_ffn') d_inner_hid=d_inner_hid d_hid=d_model dropout_rate=relu_dropout hidden_act=hidden_act param_initializer=param_initializer name=name+'_ffn')<line_sep><return>post_process_layer(prev_out=attn_output # add dropout and residual connection
out=ffd_output process_cmd=postprocess_cmd dropout_rate=prepostprocess_dropout name=name+'_post_ffn')<block_end><def_stmt>graph_encoder enc_words_output src_words_slf_attn_bias src_sents_slf_attn_bias graph_attn_bias cls_ids pos_win graph_layers n_head d_key d_value d_model d_inner_hid prepostprocess_dropout attention_dropout relu_dropout hidden_act preprocess_cmd="n" postprocess_cmd="da" param_initializer=<none> name='graph_encoder'<block_start>"""
:param enc_words_output: # (batch_size, n_tokens, emb_dim)
:param src_words_slf_attn_bias: (batch_size, n_head, n_tokens, n_tokens)
:param src_sents_slf_attn_bias: (batch_size, n_head, n_block, n_block)
:param graph_attn_bias: (batch_size, n_head, n_block, n_block)
:param cls_ids: (batch_size, n_block, 2)
:return:
"""<line_sep>sents_vec=layers.gather_nd(enc_words_output cls_ids)<line_sep>enc_input=sents_vec# (batch_size, n_block, d_model)
<for_stmt>i range(graph_layers)# (batch_size, n_block, emb_dim)
<block_start>enc_output=graph_encoder_layer(enc_input=enc_input # (batch_size, n_block, emb_dim)
attn_bias=src_sents_slf_attn_bias # (batch_size, n_head, n_block, n_block)
graph_attn_bias=graph_attn_bias # (batch_size, n_head, n_block, n_block)
pos_win=pos_win n_head=n_head d_key=d_key d_value=d_value d_model=d_model d_inner_hid=d_inner_hid prepostprocess_dropout=prepostprocess_dropout attention_dropout=attention_dropout relu_dropout=relu_dropout hidden_act=hidden_act preprocess_cmd=preprocess_cmd postprocess_cmd=postprocess_cmd param_initializer=param_initializer name=name+'_layer_'+str(i))<line_sep>enc_input=enc_output<block_end># (batch_size, n_block, emb_dim)
# add layer normalization
enc_output=pre_process_layer(out=enc_output process_cmd=preprocess_cmd dropout_rate=prepostprocess_dropout name=name+'_post')<line_sep><return>enc_output<block_end># (batch_size, n_block, emb_dim)
<def_stmt>pretrained_graph_encoder sents_vec src_sents_slf_attn_bias graph_attn_bias pos_win graph_layers n_head d_key d_value d_model d_inner_hid prepostprocess_dropout attention_dropout relu_dropout hidden_act preprocess_cmd="n" postprocess_cmd="da" param_initializer=<none> name='pretrained_graph_encoder'<block_start>"""
:param sents_vec: # (batch_size, n_blocks, emb_dim)
:param src_sents_slf_attn_bias: (batch_size, n_head, n_block, n_block)
:param graph_attn_bias: (batch_size, n_head, n_block, n_block)
:return:
"""<line_sep>enc_input=sents_vec# (batch_size, n_block, d_model)
<for_stmt>i range(graph_layers)# (batch_size, n_block, emb_dim)
<block_start>enc_output=graph_encoder_layer(enc_input=enc_input # (batch_size, n_block, emb_dim)
attn_bias=src_sents_slf_attn_bias # (batch_size, n_head, n_block, n_block)
graph_attn_bias=graph_attn_bias # (batch_size, n_head, n_block, n_block)
pos_win=pos_win n_head=n_head d_key=d_key d_value=d_value d_model=d_model d_inner_hid=d_inner_hid prepostprocess_dropout=prepostprocess_dropout attention_dropout=attention_dropout relu_dropout=relu_dropout hidden_act=hidden_act preprocess_cmd=preprocess_cmd postprocess_cmd=postprocess_cmd param_initializer=param_initializer name=name+'_layer_'+str(i))<line_sep>enc_input=enc_output<block_end># (batch_size, n_block, emb_dim)
# add layer normalization
enc_output=pre_process_layer(out=enc_output process_cmd=preprocess_cmd dropout_rate=prepostprocess_dropout name=name+'_post')<line_sep><return>enc_output<block_end># (batch_size, n_block, emb_dim)
|
<import_stmt>os<import_stmt>unittest<import_from_stmt>tempfile TemporaryDirectory<import_from_stmt>sklearn.datasets fetch_california_housing<import_from_stmt>flaml AutoML<import_from_stmt>flaml.training_log training_log_reader<class_stmt>TestTrainingLog(unittest.TestCase)<block_start><def_stmt>test_training_log self path="test_training_log.log" estimator_list="auto"<block_start><with_stmt>TemporaryDirectory()<as>d<block_start>filename=os.path.join(d path)<line_sep># Run a simple job.
automl=AutoML()<line_sep>automl_settings={"time_budget":1 "metric":"mse" "task":"regression" "log_file_name":filename "log_training_metric":<true> "mem_thres":1024<times>1024 "n_jobs":1 "model_history":<true> "train_time_limit":0.1 "verbose":3 # "ensemble": True,
"keep_search_state":<true> "estimator_list":estimator_list }<line_sep>X_train,y_train=fetch_california_housing(return_X_y=<true>)<line_sep>automl.fit(X_train=X_train y_train=y_train **automl_settings)<line_sep># Check if the training log file is populated.
self.assertTrue(os.path.exists(filename))<if_stmt>automl.best_estimator<block_start>estimator,config=automl.best_estimator automl.best_config<line_sep>model0=automl.best_model_for_estimator(estimator)<line_sep>print(model0.params["n_estimators"] config)<line_sep># train on full data with no time limit
automl._state.time_budget=<none><line_sep>model,_=automl._state._train_with_config(estimator config)<line_sep># assuming estimator & config are saved and loaded as follows
automl=AutoML()<line_sep>automl.fit(X_train=X_train y_train=y_train max_iter=1 task="regression" estimator_list=[estimator] n_jobs=1 starting_points={estimator:config} )<line_sep>print(automl.best_config)<line_sep># then the fitted model should be equivalent to model
<assert_stmt>(str(model.estimator)<eq>str(automl.model.estimator)<or>estimator<eq>"xgboost"<and>str(model.estimator.get_dump())<eq>str(automl.model.estimator.get_dump())<or>estimator<eq>"catboost"<and>str(model.estimator.get_all_params())<eq>str(automl.model.estimator.get_all_params()))<with_stmt>training_log_reader(filename)<as>reader<block_start>count=0<for_stmt>record reader.records()<block_start>print(record)<line_sep>count<augadd>1<block_end>self.assertGreater(count 0)<block_end><block_end>automl_settings["log_file_name"]=<none><line_sep>automl.fit(X_train=X_train y_train=y_train **automl_settings)<line_sep>automl._selected.update(<none> 0)<line_sep>automl=AutoML()<line_sep>automl.fit(X_train=X_train y_train=y_train max_iter=0 task="regression")<block_end><block_end><def_stmt>test_illfilename self<block_start><try_stmt><block_start>self.test_training_log("/")<block_end><except_stmt>IsADirectoryError<block_start>print("IsADirectoryError happens as expected in linux.")<block_end><except_stmt>PermissionError<block_start>print("PermissionError happens as expected in windows.")<block_end><block_end><def_stmt>test_each_estimator self<block_start>self.test_training_log(estimator_list=["xgboost"])<line_sep>self.test_training_log(estimator_list=["catboost"])<line_sep>self.test_training_log(estimator_list=["extra_tree"])<line_sep>self.test_training_log(estimator_list=["rf"])<line_sep>self.test_training_log(estimator_list=["lgbm"])<block_end><block_end>
|
"""Utilities for with-statement contexts. See PEP 343.
Original source code: https://hg.python.org/cpython/file/3.4/Lib/contextlib.py
Not implemented:
- redirect_stdout;
"""<import_stmt>sys<import_from_stmt>collections deque<import_from_stmt>ucontextlib *<class_stmt>closing(object)<block_start>"""Context to automatically close something at the end of a block.
Code like this:
with closing(<module>.open(<arguments>)) as f:
<block>
is equivalent to this:
f = <module>.open(<arguments>)
try:
<block>
finally:
f.close()
"""<def_stmt>__init__ self thing<block_start>self.thing=thing<block_end><def_stmt>__enter__ self<block_start><return>self.thing<block_end><def_stmt>__exit__ self *exc_info<block_start>self.thing.close()<block_end><block_end><class_stmt>suppress<block_start>"""Context manager to suppress specified exceptions
After the exception is suppressed, execution proceeds with the next
statement following the with statement.
with suppress(FileNotFoundError):
os.remove(somefile)
# Execution still resumes here if the file was already removed
"""<def_stmt>__init__ self *exceptions<block_start>self._exceptions=exceptions<block_end><def_stmt>__enter__ self<block_start><pass><block_end><def_stmt>__exit__ self exctype excinst exctb# Unlike isinstance and issubclass, CPython exception handling
# currently only looks at the concrete type hierarchy (ignoring
# the instance and subclass checking hooks). While Guido considers
# that a bug rather than a feature, it's a fairly hard one to fix
# due to various internal implementation details. suppress provides
# the simpler issubclass based semantics, rather than trying to
# exactly reproduce the limitations of the CPython interpreter.
#
# See http://bugs.python.org/issue12029 for more details
<block_start><return>exctype<is><not><none><and>issubclass(exctype self._exceptions)<block_end><block_end># Inspired by discussions on http://bugs.python.org/issue13585
<class_stmt>ExitStack(object)<block_start>"""Context manager for dynamic management of a stack of exit callbacks
For example:
with ExitStack() as stack:
files = [stack.enter_context(open(fname)) for fname in filenames]
# All opened files will automatically be closed at the end of
# the with statement, even if attempts to open files later
# in the list raise an exception
"""<def_stmt>__init__ self<block_start>self._exit_callbacks=deque()<block_end><def_stmt>pop_all self<block_start>"""Preserve the context stack by transferring it to a new instance"""<line_sep>new_stack=type(self)()<line_sep>new_stack._exit_callbacks=self._exit_callbacks<line_sep>self._exit_callbacks=deque()<line_sep><return>new_stack<block_end><def_stmt>_push_cm_exit self cm cm_exit<block_start>"""Helper to correctly register callbacks to __exit__ methods"""<def_stmt>_exit_wrapper *exc_details<block_start><return>cm_exit(cm *exc_details)<block_end>self.push(_exit_wrapper)<block_end><def_stmt>push self exit<block_start>"""Registers a callback with the standard __exit__ method signature
Can suppress exceptions the same way __exit__ methods can.
Also accepts any object with an __exit__ method (registering a call
to the method instead of the object itself)
"""<line_sep># We use an unbound method rather than a bound method to follow
# the standard lookup behaviour for special methods
_cb_type=type(exit)<try_stmt><block_start>exit_method=_cb_type.__exit__<block_end><except_stmt>AttributeError# Not a context manager, so assume its a callable
<block_start>self._exit_callbacks.append(exit)<block_end><else_stmt><block_start>self._push_cm_exit(exit exit_method)<block_end><return>exit<block_end># Allow use as a decorator
<def_stmt>callback self callback *args **kwds<block_start>"""Registers an arbitrary callback and arguments.
Cannot suppress exceptions.
"""<def_stmt>_exit_wrapper exc_type exc tb<block_start>callback(*args **kwds)<block_end>self.push(_exit_wrapper)<line_sep><return>callback<block_end># Allow use as a decorator
<def_stmt>enter_context self cm<block_start>"""Enters the supplied context manager
If successful, also pushes its __exit__ method as a callback and
returns the result of the __enter__ method.
"""<line_sep># We look up the special methods on the type to match the with statement
_cm_type=type(cm)<line_sep>_exit=_cm_type.__exit__<line_sep>result=_cm_type.__enter__(cm)<line_sep>self._push_cm_exit(cm _exit)<line_sep><return>result<block_end><def_stmt>close self<block_start>"""Immediately unwind the context stack"""<line_sep>self.__exit__(<none> <none> <none>)<block_end><def_stmt>__enter__ self<block_start><return>self<block_end><def_stmt>__exit__ self *exc_details<block_start>received_exc=exc_details[0]<is><not><none><line_sep># Callbacks are invoked in LIFO order to match the behaviour of
# nested context managers
suppressed_exc=<false><line_sep>pending_raise=<false><while_stmt>self._exit_callbacks<block_start>cb=self._exit_callbacks.pop()<try_stmt><block_start><if_stmt>cb(*exc_details)<block_start>suppressed_exc=<true><line_sep>pending_raise=<false><line_sep>exc_details=(<none> <none> <none>)<block_end><block_end><except_stmt><block_start>exc_details=sys.exc_info()<line_sep>pending_raise=<true><block_end><block_end><if_stmt>pending_raise<block_start><raise>exc_details[1]<block_end><return>received_exc<and>suppressed_exc<block_end><block_end>
|
example_string='我是字符串'<line_sep>example_list=['我' '是' '列' '表']<line_sep>example_tuple=('我' '是' '元' '组')<line_sep>print('1.取第一个元素 >' example_string[0] example_list[0] example_tuple[0])<line_sep>print('2.取下标为2的元素(第三个元素)>' example_string[2] example_list[2] example_tuple[2])<line_sep>print('3.取最后一个元素 >' example_string[-1] example_list[-1] example_tuple[-1])<line_sep>print('4.取倒数第二个元素 >' example_string[-2] example_list[-2] example_tuple[-2])<line_sep>print('5.切片0:1 >' example_string[0:1] example_list[0:1] example_tuple[0:1])<line_sep>print('6.切片0:2 >' example_string[0:2] example_list[0:2] example_tuple[0:2])<line_sep>print('7.切片2:4 >' example_string[2:4] example_list[2:4] example_tuple[2:4])<line_sep>print('8.切片从第一个元素直到下标为1的元素 >' example_string[:2] example_list[:2] example_tuple[:2])<line_sep>print('9.切片从下标为1的元素直到全部 >' example_string[1:] example_list[1:] example_tuple[1:])<line_sep>print('10.切片去掉最后一个元素 >' example_string[:-1] example_list[:-1] example_tuple[:-1])<line_sep>print('11.切片去掉最后两个元素 >' example_string[:-2] example_list[:-2] example_tuple[:-2])<line_sep>print('12.每2个字取一个 >' example_string[::2] example_list[::2] example_tuple[::2])<line_sep>print('13.将字符串、列表、元组倒序输出 >' example_string[::-1] example_list[::-1] example_tuple[::-1])<line_sep># string_1 = '你好'
# string_2 = '世界'
# string_3 = string_1 + string_2
# print(string_3)
#
#
# list_1 = ['abc', 'xyz']
# list_2 = ['哈哈哈哈', '嘿嘿嘿黑']
# list_3 = list_1 + list_2
# print(list_3)
#
# existed_list = [1, 2, 3]
# existed_list[1] = '新的值'
# print(existed_list)
#
# list_4 = ['Python', '爬虫']
# print(list_4)
# list_4.append('一')
# print(list_4)
# list_4.append('酷')
# print(list_4)
|
<import_stmt>torch<import_stmt>numpy<as>np<import_from_stmt>torch.autograd Variable<import_stmt>torch.optim<as>optim<import_stmt>argparse<import_stmt>random<import_stmt>os<import_stmt>models<import_stmt>torchvision.utils<as>vutils<import_stmt>utils<import_stmt>nyuDataLoader<as>dataLoader_nyu<import_stmt>dataLoader<as>dataLoader_ours<import_stmt>torch.nn<as>nn<import_from_stmt>torch.utils.data DataLoader<import_stmt>torch.nn.functional<as>F<import_stmt>wrapperBRDF<as>wcg<import_stmt>wrapperNYU<as>wnyu<import_stmt>scipy.io<as>io<import_stmt>os.path<as>osp<line_sep>parser=argparse.ArgumentParser()<line_sep># The locationi of training set
parser.add_argument('--dataRoot' default=<none> help='path to input images')<line_sep>parser.add_argument('--NYURoot' default=<none> help='path to the NYU dataset')<line_sep>parser.add_argument('--experimentBRDF' default=<none> help='path to the model for BRDF prediction')<line_sep>parser.add_argument('--experiment' default=<none> help='the path to store samples and models')<line_sep># The basic training setting
parser.add_argument('--nepochBRDF' type=int default=14 help='the number of epochs for BRDF prediction')<line_sep>parser.add_argument('--nepoch' type=int default=2 help='the number of epochs for training')<line_sep>parser.add_argument('--batchSize' type=int default=8 help='input batch size')<line_sep>parser.add_argument('--imHeight' type=int default=240 help='the height / width of the input image to network')<line_sep>parser.add_argument('--imWidth' type=int default=320 help='the height / width of the input image to network')<line_sep>parser.add_argument('--cuda' action='store_true' help='enables cuda')<line_sep>parser.add_argument('--deviceIds' type=int nargs='+' default=[0 1] help='the gpus used for training network')<line_sep># The training weight
parser.add_argument('--albedoWeight' type=float default=0.75 help='the weight for the diffuse component')<line_sep>parser.add_argument('--normalWeight' type=float default=0.5 help='the weight for the diffuse component')<line_sep>parser.add_argument('--roughWeight' type=float default=0.25 help='the weight for the roughness component')<line_sep>parser.add_argument('--depthWeight' type=float default=0.25 help='the weight for depth component')<line_sep># The training weight on NYU
parser.add_argument('--normalNYUWeight' type=float default=4.5 help='the weight for the diffuse component')<line_sep>parser.add_argument('--depthNYUWeight' type=float default=4.5 help='the weight for depth component')<line_sep># Cascae Level
parser.add_argument('--cascadeLevel' type=int default=0 help='the casacade level')<line_sep># The detail network setting
opt=parser.parse_args()<line_sep>print(opt)<line_sep>opt.gpuId=opt.deviceIds[0]<line_sep>torch.multiprocessing.set_sharing_strategy('file_system')<if_stmt>opt.experiment<is><none><block_start>opt.experiment='check_cascadeNYU%d'%opt.cascadeLevel<block_end>os.system('mkdir {0}'.format(opt.experiment))<line_sep>os.system('cp *.py %s'%opt.experiment)<if_stmt>opt.experimentBRDF<is><none><block_start>opt.experimentBRDF='check_cascade0_w%d_h%d'%(opt.imWidth opt.imHeight)<block_end>albeW,normW=opt.albedoWeight opt.normalWeight<line_sep>rougW=opt.roughWeight<line_sep>deptW=opt.depthWeight<line_sep>normNYUW=opt.normalNYUWeight<line_sep>depthNYUW=opt.depthNYUWeight<line_sep>opt.seed=0<line_sep>print("Random Seed: " opt.seed)<line_sep>random.seed(opt.seed)<line_sep>torch.manual_seed(opt.seed)<if_stmt>torch.cuda.is_available()<and><not>opt.cuda<block_start>print("WARNING: You have a CUDA device, so you should probably run with --cuda")<block_end>####################################
# Initial Network
encoder=models.encoder0(cascadeLevel=opt.cascadeLevel)<line_sep>albedoDecoder=models.decoder0(mode=0)<line_sep>normalDecoder=models.decoder0(mode=1)<line_sep>roughDecoder=models.decoder0(mode=2)<line_sep>depthDecoder=models.decoder0(mode=4)<line_sep>####################################################################
#########################################
encoder.load_state_dict(torch.load('{0}/encoder{1}_{2}.pth'.format(opt.experimentBRDF opt.cascadeLevel opt.nepochBRDF-1)).state_dict())<line_sep>albedoDecoder.load_state_dict(torch.load('{0}/albedo{1}_{2}.pth'.format(opt.experimentBRDF opt.cascadeLevel opt.nepochBRDF-1)).state_dict())<line_sep>normalDecoder.load_state_dict(torch.load('{0}/normal{1}_{2}.pth'.format(opt.experimentBRDF opt.cascadeLevel opt.nepochBRDF-1)).state_dict())<line_sep>roughDecoder.load_state_dict(torch.load('{0}/rough{1}_{2}.pth'.format(opt.experimentBRDF opt.cascadeLevel opt.nepochBRDF-1)).state_dict())<line_sep>depthDecoder.load_state_dict(torch.load('{0}/depth{1}_{2}.pth'.format(opt.experimentBRDF opt.cascadeLevel opt.nepochBRDF-1)).state_dict())<line_sep>lr_scale=0.5<line_sep>#########################################
encoder=nn.DataParallel(encoder device_ids=opt.deviceIds)<line_sep>albedoDecoder=nn.DataParallel(albedoDecoder device_ids=opt.deviceIds)<line_sep>normalDecoder=nn.DataParallel(normalDecoder device_ids=opt.deviceIds)<line_sep>roughDecoder=nn.DataParallel(roughDecoder device_ids=opt.deviceIds)<line_sep>depthDecoder=nn.DataParallel(depthDecoder device_ids=opt.deviceIds)<line_sep>############## ######################
# Send things into GPU
<if_stmt>opt.cuda<block_start>encoder=encoder.cuda(opt.gpuId)<line_sep>albedoDecoder=albedoDecoder.cuda(opt.gpuId)<line_sep>normalDecoder=normalDecoder.cuda(opt.gpuId)<line_sep>roughDecoder=roughDecoder.cuda(opt.gpuId)<line_sep>depthDecoder=depthDecoder.cuda(opt.gpuId)<block_end>####################################
####################################
# Optimizer
opEncoder=optim.Adam(encoder.parameters() lr=1e-4<times>lr_scale betas=(0.5 0.999))<line_sep>opAlbedo=optim.Adam(albedoDecoder.parameters() lr=1e-4<times>lr_scale betas=(0.5 0.999))<line_sep>opNormal=optim.Adam(normalDecoder.parameters() lr=1e-4<times>lr_scale betas=(0.5 0.999))<line_sep>opRough=optim.Adam(roughDecoder.parameters() lr=1e-4<times>lr_scale betas=(0.5 0.999))<line_sep>opDepth=optim.Adam(depthDecoder.parameters() lr=1e-4<times>lr_scale betas=(0.5 0.999))<line_sep>#####################################
####################################
brdfDataset=dataLoader_ours.BatchLoader(opt.dataRoot imWidth=opt.imWidth imHeight=opt.imHeight cascadeLevel=0 isLight=<false>)<line_sep>NYUDataset=dataLoader_nyu.NYULoader(imRoot=osp.join(opt.NYURoot 'images') normalRoot=osp.join(opt.NYURoot 'normals') depthRoot=osp.join(opt.NYURoot 'depths') segRoot=osp.join(opt.NYURoot 'masks') imHeight=opt.imHeight imWidth=opt.imWidth phase='TRAIN')<line_sep>trainDataset=dataLoader_nyu.ConcatDataset(brdfDataset NYUDataset)<line_sep>brdfLoader=DataLoader(trainDataset batch_size=opt.batchSize num_workers=6 shuffle=<true>)<line_sep>j=0<line_sep># BRDFLost
albedoErrsNpList=np.ones([1 1] dtype=np.float32)<line_sep>normalErrsNpList=np.ones([1 1] dtype=np.float32)<line_sep>roughErrsNpList=np.ones([1 1] dtype=np.float32)<line_sep>depthErrsNpList=np.ones([1 1] dtype=np.float32)<line_sep>normalNYUErrsNpList=np.ones([1 1] dtype=np.float32)<line_sep>angleNYUErrsNpList=np.ones([1 1] dtype=np.float32)<line_sep>depthNYUErrsNpList=np.ones([1 1] dtype=np.float32)<for_stmt>epoch list(range(0 opt.nepoch))<block_start>trainingLog=open('{0}/trainingLog_{1}.txt'.format(opt.experiment epoch) 'w')<for_stmt>i,trainBatch enumerate(brdfLoader)<block_start>j<augadd>1<line_sep>dataBatch=trainBatch[0]<line_sep>NYUBatch=trainBatch[1]<line_sep>#####################################################################################################################
############################################# Train with CGBRDF dataset #############################################
#####################################################################################################################
# Clear the gradient in optimizer
opEncoder.zero_grad()<line_sep>opAlbedo.zero_grad()<line_sep>opNormal.zero_grad()<line_sep>opRough.zero_grad()<line_sep>opDepth.zero_grad()<line_sep>albedoPair,normalPair,roughPair,depthPair=wcg.wrapperBRDF(dataBatch opt encoder albedoDecoder normalDecoder roughDecoder depthDecoder)<line_sep>albedoPred,albedoErr=albedoPair[0] albedoPair[1]<line_sep>normalPred,normalErr=normalPair[0] normalPair[1]<line_sep>roughPred,roughErr=roughPair[0] roughPair[1]<line_sep>depthPred,depthErr=depthPair[0] depthPair[1]<line_sep># Back propagate the gradients
totalErr=4<times>albeW<times>albedoErr+normW<times>normalErr+rougW<times>roughErr+deptW<times>depthErr<line_sep>totalErr.backward()<line_sep># Update the network parameter
opEncoder.step()<line_sep>opAlbedo.step()<line_sep>opNormal.step()<line_sep>opRough.step()<line_sep>opDepth.step()<line_sep># Output training error
utils.writeErrToScreen('albedo' [albedoErr] epoch j)<line_sep>utils.writeErrToScreen('normal' [normalErr] epoch j)<line_sep>utils.writeErrToScreen('rough' [roughErr] epoch j)<line_sep>utils.writeErrToScreen('depth' [depthErr] epoch j)<line_sep>utils.writeErrToFile('albedo' [albedoErr] trainingLog epoch j)<line_sep>utils.writeErrToFile('normal' [normalErr] trainingLog epoch j)<line_sep>utils.writeErrToFile('rough' [roughErr] trainingLog epoch j)<line_sep>utils.writeErrToFile('depth' [depthErr] trainingLog epoch j)<line_sep>albedoErrsNpList=np.concatenate([albedoErrsNpList utils.turnErrorIntoNumpy([albedoErr])] axis=0)<line_sep>normalErrsNpList=np.concatenate([normalErrsNpList utils.turnErrorIntoNumpy([normalErr])] axis=0)<line_sep>roughErrsNpList=np.concatenate([roughErrsNpList utils.turnErrorIntoNumpy([roughErr])] axis=0)<line_sep>depthErrsNpList=np.concatenate([depthErrsNpList utils.turnErrorIntoNumpy([depthErr])] axis=0)<if_stmt>j<l>1000<block_start>utils.writeNpErrToScreen('albedoAccu' np.mean(albedoErrsNpList[1:j+1 :] axis=0) epoch j)<line_sep>utils.writeNpErrToScreen('normalAccu' np.mean(normalErrsNpList[1:j+1 :] axis=0) epoch j)<line_sep>utils.writeNpErrToScreen('roughAccu' np.mean(roughErrsNpList[1:j+1 :] axis=0) epoch j)<line_sep>utils.writeNpErrToScreen('depthAccu' np.mean(depthErrsNpList[1:j+1 :] axis=0) epoch j)<line_sep>utils.writeNpErrToFile('albedoAccu' np.mean(albedoErrsNpList[1:j+1 :] axis=0) trainingLog epoch j)<line_sep>utils.writeNpErrToFile('normalAccu' np.mean(normalErrsNpList[1:j+1 :] axis=0) trainingLog epoch j)<line_sep>utils.writeNpErrToFile('roughAccu' np.mean(roughErrsNpList[1:j+1 :] axis=0) trainingLog epoch j)<line_sep>utils.writeNpErrToFile('depthAccu' np.mean(depthErrsNpList[1:j+1 :] axis=0) trainingLog epoch j)<block_end><else_stmt><block_start>utils.writeNpErrToScreen('albedoAccu' np.mean(albedoErrsNpList[j-999:j+1 :] axis=0) epoch j)<line_sep>utils.writeNpErrToScreen('normalAccu' np.mean(normalErrsNpList[j-999:j+1 :] axis=0) epoch j)<line_sep>utils.writeNpErrToScreen('roughAccu' np.mean(roughErrsNpList[j-999:j+1 :] axis=0) epoch j)<line_sep>utils.writeNpErrToScreen('depthAccu' np.mean(depthErrsNpList[j-999:j+1 :] axis=0) epoch j)<line_sep>utils.writeNpErrToFile('albedoAccu' np.mean(albedoErrsNpList[j-999:j+1 :] axis=0) trainingLog epoch j)<line_sep>utils.writeNpErrToFile('normalAccu' np.mean(normalErrsNpList[j-999:j+1 :] axis=0) trainingLog epoch j)<line_sep>utils.writeNpErrToFile('roughAccu' np.mean(roughErrsNpList[j-999:j+1 :] axis=0) trainingLog epoch j)<line_sep>utils.writeNpErrToFile('depthAccu' np.mean(depthErrsNpList[j-999:j+1 :] axis=0) trainingLog epoch j)<block_end><if_stmt>j<eq>1<or>j%2000<eq>0# Save the predicted results
<block_start>vutils.save_image(((albedoPred)<power>(1.0/2.2)).data '{0}/{1}_albedoPred_{2}.png'.format(opt.experiment j 0))<line_sep>vutils.save_image((0.5<times>(normalPred+1)).data '{0}/{1}_normalPred_{2}.png'.format(opt.experiment j 0))<line_sep>vutils.save_image((0.5<times>(roughPred+1)).data '{0}/{1}_roughPred_{2}.png'.format(opt.experiment j 0))<line_sep>depthOut=1/torch.clamp(depthPred+1 1e-6 10)<line_sep>vutils.save_image((depthOut).data '{0}/{1}_depthPred_{2}.png'.format(opt.experiment j 0))<block_end>##############################################################################################################
######################################## Train with NYU dataset ##############################################
##############################################################################################################
# Clear the gradient in optimizer
opEncoder.zero_grad()<line_sep>opAlbedo.zero_grad()<line_sep>opNormal.zero_grad()<line_sep>opRough.zero_grad()<line_sep>opDepth.zero_grad()<line_sep>albedoPair,normalPair,roughPair,depthPair=wnyu.wrapperNYU(NYUBatch opt encoder albedoDecoder normalDecoder roughDecoder depthDecoder)<line_sep>albedoPred=albedoPair[0]<line_sep>normalPred,normalErr,angleErr=normalPair[0] normalPair[1] normalPair[2]<line_sep>roughPred=roughPair[0]<line_sep>depthPred,depthErr=depthPair[0] depthPair[1]<line_sep>totalErr=normNYUW<times>normalErr+depthNYUW<times>depthErr<line_sep>totalErr.backward()<line_sep># Update the network parameter
opEncoder.step()<line_sep>opAlbedo.step()<line_sep>opNormal.step()<line_sep>opRough.step()<line_sep>opDepth.step()<line_sep># Output training error
utils.writeErrToScreen('normalNYU' [normalErr] epoch j)<line_sep>utils.writeErrToScreen('angleNYU' [angleErr] epoch j)<line_sep>utils.writeErrToScreen('depthNYU' [depthErr] epoch j)<line_sep>utils.writeErrToFile('normalNYU' [normalErr] trainingLog epoch j)<line_sep>utils.writeErrToFile('angleNYU' [angleErr] trainingLog epoch j)<line_sep>utils.writeErrToFile('depthNYU' [depthErr] trainingLog epoch j)<line_sep>normalNYUErrsNpList=np.concatenate([normalNYUErrsNpList utils.turnErrorIntoNumpy([normalErr])] axis=0)<line_sep>angleNYUErrsNpList=np.concatenate([angleNYUErrsNpList utils.turnErrorIntoNumpy([angleErr])] axis=0)<line_sep>depthNYUErrsNpList=np.concatenate([depthNYUErrsNpList utils.turnErrorIntoNumpy([depthErr])] axis=0)<if_stmt>j<l>1000<block_start>utils.writeNpErrToScreen('normalAccuNYU' np.mean(normalNYUErrsNpList[1:j+1 :] axis=0) epoch j)<line_sep>utils.writeNpErrToScreen('angleAccuNYU' np.mean(angleNYUErrsNpList[1:j+1 :] axis=0) epoch j)<line_sep>utils.writeNpErrToScreen('depthAccuNYU' np.mean(depthNYUErrsNpList[1:j+1 :] axis=0) epoch j)<line_sep>utils.writeNpErrToFile('normalAccuNYU' np.mean(normalNYUErrsNpList[1:j+1 :] axis=0) trainingLog epoch j)<line_sep>utils.writeNpErrToFile('angleAccuNYU' np.mean(angleNYUErrsNpList[1:j+1 :] axis=0) trainingLog epoch j)<line_sep>utils.writeNpErrToFile('depthAccuNYU' np.mean(depthNYUErrsNpList[1:j+1 :] axis=0) trainingLog epoch j)<block_end><else_stmt><block_start>utils.writeNpErrToScreen('normalAccuNYU' np.mean(normalNYUErrsNpList[j-999:j+1 :] axis=0) epoch j)<line_sep>utils.writeNpErrToScreen('angleAccuNYU' np.mean(angleNYUErrsNpList[j-999:j+1 :] axis=0) epoch j)<line_sep>utils.writeNpErrToScreen('depthAccuNYU' np.mean(depthNYUErrsNpList[j-999:j+1 :] axis=0) epoch j)<line_sep>utils.writeNpErrToFile('normalAccuNYU' np.mean(normalNYUErrsNpList[j-999:j+1 :] axis=0) trainingLog epoch j)<line_sep>utils.writeNpErrToFile('angleAccuNYU' np.mean(angleNYUErrsNpList[j-999:j+1 :] axis=0) trainingLog epoch j)<line_sep>utils.writeNpErrToFile('depthAccuNYU' np.mean(depthNYUErrsNpList[j-999:j+1 :] axis=0) trainingLog epoch j)<block_end><if_stmt>j<eq>1<or>j%500<eq>0# Save the predicted results
<block_start>vutils.save_image(((albedoPred)<power>(1.0/2.2)).data '{0}/{1}_albedoPredNYU_{2}.png'.format(opt.experiment j 0))<line_sep>vutils.save_image((0.5<times>(normalPred+1)).data '{0}/{1}_normalPredNYU_{2}.png'.format(opt.experiment j 0))<line_sep>vutils.save_image((0.5<times>(roughPred+1)).data '{0}/{1}_roughPredNYU_{2}.png'.format(opt.experiment j 0))<line_sep>depthOut=1/torch.clamp(depthPred+1 1e-6 10)<line_sep>vutils.save_image((depthOut).data '{0}/{1}_depthPredNYU_{2}.png'.format(opt.experiment j 0))<block_end><if_stmt>j%2000<eq>0# save the models
<block_start>torch.save(encoder.module '{0}/encoder{1}_{2}_{3}.pth'.format(opt.experiment opt.cascadeLevel epoch j))<line_sep>torch.save(albedoDecoder.module '{0}/albedo{1}_{2}_{3}.pth'.format(opt.experiment opt.cascadeLevel epoch j))<line_sep>torch.save(normalDecoder.module '{0}/normal{1}_{2}_{3}.pth'.format(opt.experiment opt.cascadeLevel epoch j))<line_sep>torch.save(roughDecoder.module '{0}/rough{1}_{2}_{3}.pth'.format(opt.experiment opt.cascadeLevel epoch j))<line_sep>torch.save(depthDecoder.module '{0}/depth{1}_{2}_{3}.pth'.format(opt.experiment opt.cascadeLevel epoch j))<block_end>######################################################################################################################
<block_end>trainingLog.close()<line_sep># Update the training rate
<if_stmt>(epoch+1)%10<eq>0<block_start><for_stmt>param_group opEncoder.param_groups<block_start>param_group['lr']<augdiv>2<block_end><for_stmt>param_group opAlbedo.param_groups<block_start>param_group['lr']<augdiv>2<block_end><for_stmt>param_group opNormal.param_groups<block_start>param_group['lr']<augdiv>2<block_end><for_stmt>param_group opRough.param_groups<block_start>param_group['lr']<augdiv>2<block_end><for_stmt>param_group opDepth.param_groups<block_start>param_group['lr']<augdiv>2<block_end><block_end># Save the error record
np.save('{0}/albedoError_{1}.npy'.format(opt.experiment epoch) albedoErrsNpList)<line_sep>np.save('{0}/normalError_{1}.npy'.format(opt.experiment epoch) normalErrsNpList)<line_sep>np.save('{0}/roughError_{1}.npy'.format(opt.experiment epoch) roughErrsNpList)<line_sep>np.save('{0}/depthError_{1}.npy'.format(opt.experiment epoch) depthErrsNpList)<line_sep>np.save('{0}/normalNYUError_{1}.npy'.format(opt.experiment epoch) normalNYUErrsNpList)<line_sep>np.save('{0}/angleNYUError_{1}.npy'.format(opt.experiment epoch) angleNYUErrsNpList)<line_sep># save the models
torch.save(encoder.module '{0}/encoder{1}_{2}.pth'.format(opt.experiment opt.cascadeLevel epoch))<line_sep>torch.save(albedoDecoder.module '{0}/albedo{1}_{2}.pth'.format(opt.experiment opt.cascadeLevel epoch))<line_sep>torch.save(normalDecoder.module '{0}/normal{1}_{2}.pth'.format(opt.experiment opt.cascadeLevel epoch))<line_sep>torch.save(roughDecoder.module '{0}/rough{1}_{2}.pth'.format(opt.experiment opt.cascadeLevel epoch))<line_sep>torch.save(depthDecoder.module '{0}/depth{1}_{2}.pth'.format(opt.experiment opt.cascadeLevel epoch))<block_end>
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
<import_stmt>pickle<import_stmt>numpy<as>np<import_from_stmt>astropy.time Time<class_stmt>TestPickle<block_start>"""Basic pickle test of time"""<def_stmt>test_pickle self<block_start>times=['1999-01-01 00:00:00.123456789' '2010-01-01 00:00:00']<line_sep>t1=Time(times scale='utc')<for_stmt>prot range(pickle.HIGHEST_PROTOCOL)<block_start>t1d=pickle.dumps(t1 prot)<line_sep>t1l=pickle.loads(t1d)<assert_stmt>np.all(t1l<eq>t1)<block_end>t2=Time('2012-06-30 12:00:00' scale='utc')<for_stmt>prot range(pickle.HIGHEST_PROTOCOL)<block_start>t2d=pickle.dumps(t2 prot)<line_sep>t2l=pickle.loads(t2d)<assert_stmt>t2l<eq>t2<block_end><block_end><block_end>
|
<import_stmt>basic_container<class_stmt>Java9Container(basic_container.BasicContainer)<block_start><def_stmt>__init__ self<block_start>super(self.__class__ self).__init__()<line_sep>self.image="java:9"<line_sep>self.command='sh -c "javac main.java && java main"'<line_sep>self.file_extension=".java"<block_end><block_end>
|
#! /usr/bin/env python
<import_from_stmt>datetime datetime<import_stmt>inspect<import_stmt>logging<line_sep>logger=logging.getLogger(__name__)<line_sep>do_print=<false><def_stmt>log_init name<block_start><global>logger<line_sep>logger=logging.getLogger(name)<block_end><def_stmt>log_msg lgr_fn m<block_start>tstr=datetime.now().strftime("%H:%M:%S")<line_sep>msg="{}:{}:{} {}".format(inspect.stack()[2][1] inspect.stack()[2][2] tstr m)<line_sep>lgr_fn(msg)<if_stmt>do_print<block_start>print(msg)<block_end><block_end><def_stmt>log_error m<block_start>log_msg(logger.error m)<block_end><def_stmt>log_info m<block_start>log_msg(logger.info m)<block_end><def_stmt>log_warn m<block_start>log_msg(logger.warning m)<block_end><def_stmt>log_debug m<block_start>log_msg(logger.debug m)<block_end><def_stmt>set_print <block_start><global>do_print<line_sep>do_print=<true><block_end>
|
# -*- coding: utf-8 -*-
<import_from_stmt>django.conf.urls url<import_from_stmt>ralph.reports views<line_sep>urlpatterns=[url(r'^category_model_report/?$' views.CategoryModelReport.as_view() name='category_model_report') url(r'^category_model__status_report/?$' views.CategoryModelStatusReport.as_view() name='category_model__status_report') url(r'^manufactured_category_model_report/?$' views.ManufacturerCategoryModelReport.as_view() name='manufactured_category_model_report') url(r'^status_model_report/?$' views.StatusModelReport.as_view() name='status_model_report') url(r'^asset_relations/?$' views.AssetRelationsReport.as_view() name='asset-relations') url(r'^licence_relations/?$' views.LicenceRelationsReport.as_view() name='licence-relations') url(r'^failures_report/?$' views.FailureReport.as_view() name='failures-report') url(r'^supports_report/?$' views.AssetSupportsReport.as_view() name='assets-supports') ]<line_sep>
|
"""base class for user transforms"""<class_stmt>UserTransform<block_start>"""base class for user transforms, should express taking a set of k inputs to k outputs independently"""<def_stmt>__init__ self treatment<block_start>self.y_aware_=<true><line_sep>self.treatment_=treatment<line_sep>self.incoming_vars_=[]<line_sep>self.derived_vars_=[]<block_end># noinspection PyPep8Naming
<def_stmt>fit self X y<block_start>"""
sklearn API
:param X: explanatory values
:param y: dependent values
:return: self for method chaining
"""<line_sep><raise>NotImplementedError("base method called")<block_end># noinspection PyPep8Naming
<def_stmt>transform self X<block_start>"""
:param X: explanatory values
:return: transformed data
"""<line_sep><raise>NotImplementedError("base method called")<block_end># noinspection PyPep8Naming
<def_stmt>fit_transform self X y<block_start>"""
:param X: explanatory values
:param y: dependent values
:return: transformed data
"""<line_sep>self.fit(X y)<line_sep><return>self.transform(X)<block_end><def_stmt>__repr__ self<block_start><return>("vtreat.transform.UserTransform("+"treatment="+self.treatment_.__repr__()+") {"+"'y_aware_': "+str(self.y_aware_)+", "+"'treatment_': "+str(self.treatment_)+", "+"'incoming_vars_': "+str(self.incoming_vars_)+"}")<block_end><def_stmt>__str__ self<block_start><return>self.__repr__()<block_end><block_end>
|
<import_from_stmt>dataclasses dataclass field<import_stmt>dataclasses<import_from_stmt>typing Dict Mapping Sequence Union<import_from_stmt>rpcq.messages ParameterAref<line_sep>ParameterValue=Union[int float Sequence[int] Sequence[float]]<line_sep>@dataclass<class_stmt>Memory<block_start>"""
Memory encapsulates the values to be sent as parameters alongside a program at time of
execution, and read back afterwards.
"""<line_sep>values:Dict[ParameterAref Union[int float]]=field(default_factory=dict)<def_stmt>copy self<arrow>"Memory"<block_start>"""
Return a deep copy of this Memory object.
"""<line_sep><return>Memory(values={dataclasses.replace(k):v<for>k,v self.values.items()})<block_end><def_stmt>write self parameter_values:Mapping[Union[str ParameterAref] ParameterValue]<arrow>"Memory"<block_start>"""
Set the given values for the given parameters.
"""<for_stmt>parameter,parameter_value parameter_values.items()<block_start>self._write_value(parameter=parameter value=parameter_value)<block_end><return>self<block_end><def_stmt>_write_value self * parameter:Union[ParameterAref str] value:ParameterValue <arrow>"Memory"<block_start>"""
Mutate the program to set the given parameter value.
:param parameter: Name of the memory region, or parameter reference with offset.
:param value: the value or values to set for this parameter. If a list
is provided, parameter must be a ``str`` or ``parameter.offset == 0``.
"""<if_stmt>isinstance(parameter str)<block_start>parameter=ParameterAref(name=parameter index=0)<block_end><import_stmt>numpy<as>np<if_stmt>isinstance(value (int float))<block_start>self.values[parameter]=value<block_end><elif_stmt>isinstance(value (Sequence np.ndarray))<block_start><if_stmt>parameter.index<ne>0<block_start><raise>ValueError("Parameter may not have a non-zero index when its value is a sequence")<block_end><for_stmt>index,v enumerate(value)<block_start><if_stmt><not>isinstance(v (int float))<block_start><raise>TypeError(f"Parameter must be numeric, not {type(value)}")<block_end>aref=ParameterAref(name=parameter.name index=index)<line_sep>self.values[aref]=v<block_end><block_end><else_stmt><block_start><raise>TypeError(f"Parameter must be numeric or an iterable of numeric values, not {type(value)}")<block_end><return>self<block_end><block_end>
|
<import_stmt>unittest<import_stmt>tensorflow<as>tf<if_stmt>tf.__version__<ge>'2.0'<block_start>tf=tf.compat.v1<block_end><import_from_stmt>akdl.runner.config BatchTaskConfig StreamTaskConfig TrainTaskConfig<def_stmt>print_dataset dataset:tf.data.Dataset<block_start>next_record=dataset.make_one_shot_iterator().get_next()<line_sep>counter=0<with_stmt>tf.Session()<as>sess<block_start><while_stmt><true><block_start><try_stmt><block_start>record=sess.run(next_record)<line_sep>example=tf.train.Example.FromString(record)<if_stmt>counter<l>10<block_start>print(example)<block_end>counter<augadd>1<block_end><except_stmt>tf.errors.OutOfRangeError<block_start><break><block_end><block_end><block_end>print("total examples: "+str(counter))<block_end><def_stmt>batch_main args:BatchTaskConfig<block_start>print_dataset(args.dataset)<block_end><def_stmt>stream_main args:StreamTaskConfig<block_start>print_dataset(args.dataset_fn())<block_end><def_stmt>train_main args:TrainTaskConfig<block_start>print_dataset(args.dataset)<block_end><class_stmt>TestConfig(unittest.TestCase)<block_start><def_stmt>test_batch_task_config self<block_start>tfrecords_path="dataset.tfrecords"<line_sep>batch_main(BatchTaskConfig(tf_context=<none> cluster=<none> dataset_length=<none> dataset=tf.data.TFRecordDataset(tfrecords_path) task_type='chief' task_index=0 num_workers=1 work_dir=<none> dataset_file=tfrecords_path user_params={} output_writer=<none>))<block_end><def_stmt>test_stream_task_config self<block_start>tfrecords_path="dataset.tfrecords"<line_sep>stream_main(StreamTaskConfig(tf_context=<none> cluster=<none> dataset_length=<none> dataset_fn=<lambda>:tf.data.TFRecordDataset(tfrecords_path) task_type='chief' task_index=0 num_workers=1 work_dir=<none> user_params={} output_writer=<none>))<block_end><def_stmt>test_train_task_config self<block_start>tfrecords_path="dataset.tfrecords"<line_sep>train_main(TrainTaskConfig(tf_context=<none> cluster=<none> dataset_length=<none> dataset=tf.data.TFRecordDataset(tfrecords_path) task_type='chief' task_index=0 num_workers=1 work_dir=<none> dataset_file=tfrecords_path user_params={} saved_model_dir=<none>))<block_end><block_end>
|
# coding: utf8
<import_from_future_stmt> unicode_literals<import_from_stmt>flask_api.settings APISettings<import_stmt>unittest<class_stmt>SettingsTests(unittest.TestCase)<block_start><def_stmt>test_bad_import self<block_start>settings=APISettings({'DEFAULT_PARSERS':'foobarz.FailedImport'})<with_stmt>self.assertRaises(ImportError)<as>context<block_start>settings.DEFAULT_PARSERS<block_end>msg=str(context.exception)<line_sep>excepted_py2=("Could not import 'foobarz.FailedImport' for API setting "<concat>"'DEFAULT_PARSERS'. No module named foobarz.")<line_sep>excepted_py3=("Could not import 'foobarz.FailedImport' for API setting "<concat>"'DEFAULT_PARSERS'. No module named 'foobarz'.")<line_sep>self.assertIn(msg (excepted_py2 excepted_py3))<block_end><block_end>
|
# Copyright (c) <NAME>, 2015
# See LICENSE for details.
"""
Responsible for getting the version and name from a project.
"""<import_stmt>sys<import_from_stmt>importlib import_module<import_from_stmt>incremental Version<def_stmt>_get_package package_dir package<block_start><try_stmt><block_start>module=import_module(package)<block_end><except_stmt>ImportError# Package is not already available / installed.
# Force importing it based on the source files.
<block_start>sys.path.insert(0 package_dir)<try_stmt><block_start>module=import_module(package)<block_end><except_stmt>ImportError<as>e<block_start>err=f"tried to import {package}, but ran into this error: {e}"<line_sep># NOTE: this might be redirected via "towncrier --draft > …".
print(f"ERROR: {err}")<line_sep><raise><block_end><finally_stmt><block_start>sys.path.pop(0)<block_end><block_end><return>module<block_end><def_stmt>get_version package_dir package<block_start>module=_get_package(package_dir package)<line_sep>version=getattr(module "__version__" <none>)<if_stmt><not>version<block_start><raise>Exception("No __version__, I don't know how else to look")<block_end><if_stmt>isinstance(version str)<block_start><return>version.strip()<block_end><if_stmt>isinstance(version Version)<block_start><return>version.base().strip()<block_end><if_stmt>isinstance(version tuple)<block_start><return>".".join(map(str version)).strip()<block_end><raise>Exception("I only know how to look at a __version__ that is a str, "<concat>"an Increment Version, or a tuple. If you can't provide "<concat>"that, use the --version argument and specify one.")<block_end><def_stmt>get_project_name package_dir package<block_start>module=_get_package(package_dir package)<line_sep>version=getattr(module "__version__" <none>)<if_stmt><not>version# welp idk
<block_start><return>package.title()<block_end><if_stmt>isinstance(version str)<block_start><return>package.title()<block_end><if_stmt>isinstance(version Version)# Incremental has support for package names
<block_start><return>version.package<block_end><block_end>
|
<import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>torch.autograd Variable<class_stmt>CW(object)<block_start><def_stmt>__init__ self model device norm IsTargeted kappa lr init_const max_iter binary_search_steps data_name<block_start>self.net=model<line_sep>self.device=device<line_sep>self.IsTargeted=IsTargeted<line_sep>self.kappa=kappa#0
self.learning_rate=lr#0.2
self.init_const=init_const#0.01
self.lower_bound=0.0#0.0
self.upper_bound=1.0#1.0
self.max_iter=max_iter#200
self.norm=norm<line_sep>self.binary_search_steps=binary_search_steps#4
self.data_name=data_name<if_stmt>self.data_name<eq>'imagenet'<block_start>self.class_type_number=1000<block_end><else_stmt><block_start>self.class_type_number=10<block_end><if_stmt>self.data_name<eq>"cifar10"<and>self.IsTargeted<block_start><raise>AssertionError('cifar10 dont support targeted attack')<block_end><if_stmt>self.norm<eq>np.inf<block_start><raise>AssertionError('curreent cw dont support linf')<block_end><assert_stmt>self.norm<eq>2<block_end><def_stmt>atanh self x<block_start><return>0.5<times>torch.log((1+x)/(1-x))<block_end><def_stmt>forward self xs=<none> ys=<none> ytarget=<none><block_start>device=self.device<line_sep>targeted=self.IsTargeted<line_sep>copy_xs=xs.clone()<line_sep>copy_ys=ys.clone()<if_stmt>ytarget<is><not><none><block_start>copy_ytarget=ytarget.clone()<block_end><else_stmt><block_start>copy_ytarget=copy_ys<block_end>#没有啥作用,只是随便给个值
batch_size=xs.shape[0]#10
mid_point=(self.upper_bound+self.lower_bound)<times>0.5#0.5
half_range=(self.upper_bound-self.lower_bound)<times>0.5#0.5
arctanh_xs=self.atanh((copy_xs-mid_point)/half_range<times>0.9999)#(10,3,32,32)
# var_xs = Variable(torch.from_numpy(arctanh_xs).to(device), requires_grad=True) #torch.Size([10, 3, 32, 32])
var_xs=arctanh_xs.clone()<line_sep>var_xs.requires_grad=<true><line_sep>const_origin=torch.ones(batch_size device=self.device)<times>self.init_const#0.01的矩阵
c_upper_bound=[1e10]<times>batch_size<line_sep>c_lower_bound=torch.zeros(batch_size device=self.device)<line_sep>targets_in_one_hot=[]<line_sep>targeteg_class_in_one_hot=[]<line_sep>temp_one_hot_matrix=torch.eye(int(self.class_type_number) device=self.device)<if_stmt>targeted<block_start><for_stmt>i range(batch_size)<block_start>current_target1=temp_one_hot_matrix[copy_ytarget[i]]<line_sep>targeteg_class_in_one_hot.append(current_target1)<block_end>targeteg_class_in_one_hot=torch.stack(targeteg_class_in_one_hot).clone().type_as(xs).to(self.device)#torch.Size([10, 10])
<block_end><else_stmt><block_start><for_stmt>i range(batch_size)<block_start>current_target=temp_one_hot_matrix[copy_ys[i]]<line_sep>targets_in_one_hot.append(current_target)<block_end>targets_in_one_hot=torch.stack(targets_in_one_hot).clone().type_as(xs).to(self.device)<block_end>#torch.Size([10, 10])
best_l2=[1e10]<times>batch_size<line_sep>best_perturbation=torch.zeros(var_xs.size())#(10, 3, 32, 32)
current_prediction_class=[-1]<times>batch_size<def_stmt>attack_achieved pre_softmax true_class target_class<block_start>targeted=self.IsTargeted<if_stmt>targeted<block_start>pre_softmax[target_class]<augsub>self.kappa<line_sep><return>torch.argmax(pre_softmax).item()<eq>target_class<block_end><else_stmt><block_start>pre_softmax[true_class]<augsub>self.kappa<line_sep><return>torch.argmax(pre_softmax).item()<ne>true_class<block_end><block_end><for_stmt>search_for_c range(self.binary_search_steps)<block_start>modifier=torch.zeros(var_xs.shape).float()<line_sep>modifier=Variable(modifier.to(device) requires_grad=<true>)<line_sep>optimizer=torch.optim.Adam([modifier] lr=self.learning_rate)<line_sep>var_const=const_origin.clone().to(device)<line_sep>print("\tbinary search step {}:".format(search_for_c))<for_stmt>iteration_times range(self.max_iter)# inverse the transform tanh -> [0, 1]
<block_start>perturbed_images=(torch.tanh(var_xs+modifier)<times>half_range+mid_point)<line_sep>prediction=self.net(perturbed_images)<line_sep>l2dist=torch.sum((perturbed_images-(torch.tanh(var_xs)<times>half_range+mid_point))<power>2 [1 2 3] )<if_stmt>targeted<block_start>constraint_loss=torch.max((prediction-1e10<times>targeteg_class_in_one_hot).max(1)[0]-(prediction<times>targeteg_class_in_one_hot).sum(1) torch.ones(batch_size device=device)<times>self.kappa<times>-1 )<block_end><else_stmt><block_start>constraint_loss=torch.max((prediction<times>targets_in_one_hot).sum(1)-(prediction-1e10<times>targets_in_one_hot).max(1)[0] torch.ones(batch_size device=device)<times>self.kappa<times>-1 )<block_end>loss_f=var_const<times>constraint_loss<line_sep>loss=l2dist.sum()+loss_f.sum()# minimize |r| + c * loss_f(x+r,l)
optimizer.zero_grad()<line_sep>loss.backward(retain_graph=<true>)<line_sep>optimizer.step()<line_sep># update the best l2 distance, current predication class as well as the corresponding adversarial example
# for i, (dist, score, img) in enumerate(
# zip(
# l2dist.data.cpu().numpy(),
# prediction.data.cpu().numpy(),
# perturbed_images.data.cpu().numpy(),
# )
# ):
<for_stmt>i range(prediction.shape[0])<block_start>dist=l2dist[i]<line_sep>score=prediction[i]<line_sep>img=perturbed_images[i]<if_stmt>dist.item()<l>best_l2[i]<and>attack_achieved(score copy_ys[i] copy_ytarget[i])<block_start>best_l2[i]=dist<line_sep>current_prediction_class[i]=torch.argmax(score)<line_sep>best_perturbation[i]=img<block_end><block_end><block_end># update the best constant c for each sample in the batch
<for_stmt>i range(batch_size)<block_start><if_stmt>(current_prediction_class[i]<eq>copy_ys[i].item()<and>current_prediction_class[i]<ne>-1)<block_start>c_upper_bound[i]=min(c_upper_bound[i] const_origin[i].item())<if_stmt>c_upper_bound[i]<l>1e10<block_start>const_origin[i]=(c_lower_bound[i].item()+c_upper_bound[i])/2.0<block_end><block_end><else_stmt><block_start>c_lower_bound[i]=max(c_lower_bound[i].item() const_origin[i].item())<if_stmt>c_upper_bound[i]<l>1e10<block_start>const_origin=(c_lower_bound[i].item()+c_upper_bound[i])/2.0<block_end><else_stmt><block_start>const_origin[i]<augmul>10<block_end><block_end><block_end><block_end>adv_xs=best_perturbation.to(device)<line_sep><return>adv_xs<block_end><block_end>
|
# Copyright (c) 2014-2015, Ericsson AB. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
<import_from_future_stmt> print_function<import_stmt>xml.etree.ElementTree<as>ET<import_stmt>itertools<import_from_stmt>standard_types VoidType IntType LongPtrType GParamSpecType JObjectWrapperType<import_from_stmt>standard_types ClassCallbackMetaType GObjectMetaType CallbackMetaType<import_from_stmt>standard_types EnumMetaType BitfieldMetaType GWeakRefType JDestroyType<import_from_stmt>copy copy<line_sep>NS='{http://www.gtk.org/introspection/core/1.0}'<line_sep>C_NS='{http://www.gtk.org/introspection/c/1.0}'<line_sep>GLIB_NS='{http://www.gtk.org/introspection/glib/1.0}'<line_sep>TAG_CLASS=NS+'class'<line_sep>TAG_NAMESPACE=NS+'namespace'<line_sep>TAG_INCLUDE=NS+'include'<line_sep>TAG_CONSTRUCTOR=NS+'constructor'<line_sep>TAG_RETURN_VALUE=NS+'return-value'<line_sep>TAG_TYPE=NS+'type'<line_sep>TAG_ARRAY=NS+'array'<line_sep>TAG_PARAMETERS=NS+'parameters'<line_sep>TAG_VIRTUAL_METHOD=NS+'virtual-method'<line_sep>TAG_PARAMETER=NS+'parameter'<line_sep>TAG_PROPERTY=NS+'property'<line_sep>TAG_RECORD=NS+'record'<line_sep>TAG_FIELD=NS+'field'<line_sep>TAG_ENUMERATION=NS+'enumeration'<line_sep>TAG_MEMBER=NS+'member'<line_sep>TAG_DOC=NS+'doc'<line_sep>TAG_CALLBACK=NS+'callback'<line_sep>TAG_INSTANCE_PARAMETER=NS+'instance-parameter'<line_sep>TAG_METHOD=NS+'method'<line_sep>TAG_BITFIELD=NS+'bitfield'<line_sep>TAG_FUNCTION=NS+'function'<line_sep>TAG_SIGNAL=GLIB_NS+'signal'<line_sep>TAG_INTERFACE=NS+'interface'<line_sep>TAG_IMPLEMENTS=NS+'implements'<line_sep>ATTR_NAME='name'<line_sep>ATTR_WHEN='when'<line_sep>ATTR_VALUE='value'<line_sep>ATTR_SCOPE='scope'<line_sep>ATTR_LENGTH='length'<line_sep>ATTR_PARENT='parent'<line_sep>ATTR_CLOSURE='closure'<line_sep>ATTR_DESTORY='destroy'<line_sep>ATTR_READABLE='readable'<line_sep>ATTR_WRITABLE='writable'<line_sep>ATTR_ALLOW_NONE='allow-none'<line_sep>ATTR_INTROSPECTABLE='introspectable'<line_sep>ATTR_CONSTRUCT_ONLY='construct-only'<line_sep>ATTR_SHARED_LIBRARY='shared-library'<line_sep>ATTR_ZERO_TERMINATED='zero-terminated'<line_sep>ATTR_TRANSFER_ONWERSHIP='transfer-ownership'<line_sep>ATTR_C_IDENTIFIER_PREFIXES=C_NS+'identifier-prefixes'<line_sep>ATTR_C_IDENTIFIER=C_NS+'identifier'<line_sep>ATTR_C_SYMBOL_PREFIXES=C_NS+'symbol-prefixes'<line_sep>ATTR_C_SYMBOL_PREFIX=C_NS+'symbol-prefix'<line_sep>ATTR_C_TYPE=C_NS+'type'<line_sep>ATTR_GLIB_NICK=GLIB_NS+'nick'<line_sep>ATTR_GLIB_TYPE_NAME=GLIB_NS+'type-name'<line_sep>ATTR_GLIB_GET_TYPE=GLIB_NS+'get-type'<line_sep>ATTR_GLIB_TYPE_STRUCT=GLIB_NS+'type-struct'<def_stmt>printable cls<block_start>cls.__repr__=<lambda>self:str(self.__dict__)<line_sep><return>cls<block_end><def_stmt>partition pred iterable<block_start>t1,t2=itertools.tee(iterable)<line_sep><return>filter(pred t1) filter(<lambda>x:<not>pred(x) t2)<block_end><def_stmt>by_name elements<block_start><return>{e.name:e<for>e elements}<block_end><def_stmt>title_case st<block_start><return>''.join(c<for>c st.title()<if>c.isalpha())<block_end><def_stmt>parse_doc tag<block_start>text=tag.findtext(TAG_DOC)<if_stmt>text<block_start>text=text.replace('\n' ' ')<block_end><return>text<block_end><def_stmt>camel_case st<block_start>st=title_case(st)<line_sep><return>st[0].lower()+st[1:]<block_end><def_stmt>parse_tag_value type_registry tag name=<none><block_start><def_stmt>lookup_type tag<block_start><if_stmt>tag.tag<eq>TAG_ARRAY<block_start>inner_tag=tag.find(TAG_TYPE)<line_sep>gir_type=inner_tag.get(ATTR_NAME)<line_sep>c_type=inner_tag.get(ATTR_C_TYPE)<line_sep><return>type_registry.lookup(gir_type c_type is_array=<true>)<block_end><else_stmt><block_start>gir_type=tag.get(ATTR_NAME)<line_sep>c_type=tag.get(ATTR_C_TYPE)<line_sep><return>type_registry.lookup(gir_type c_type)<block_end><block_end>transfer=tag.get(ATTR_TRANSFER_ONWERSHIP)<line_sep>type_tag=tag.find(TAG_TYPE)<if_stmt>type_tag<is><none><block_start>type_tag=tag.find(TAG_ARRAY)<block_end>scope=tag.get(ATTR_SCOPE)<line_sep>allow_none=tag.get(ATTR_ALLOW_NONE)<eq>'1'<line_sep>inner_type_tags=type_tag.findall(TAG_TYPE)<if_stmt>name<is><none><block_start>name=tag.get(ATTR_NAME)<block_end><assert_stmt>name<line_sep>typ=lookup_type(type_tag)<line_sep>value=<none><if_stmt>typ.is_container<block_start><assert_stmt>inner_type_tags<line_sep>types=enumerate(map(lookup_type inner_type_tags))<line_sep>type_params=[c(name+'_'+str(i) transfer<eq>'full')<for>i,c types]<line_sep>value=typ(name transfer<ne>'none' allow_none *type_params)<block_end><else_stmt><block_start><assert_stmt>transfer<ne>'container'<if_stmt>typ.is_array<block_start>c_array_type=type_tag.get(ATTR_C_TYPE)<line_sep>value=typ(name transfer<eq>'full' allow_none c_array_type)<block_end><else_stmt><block_start><if_stmt>scope<is><not><none><block_start>value=typ(name transfer<eq>'full' allow_none scope)<block_end><else_stmt><block_start>value=typ(name transfer<eq>'full' allow_none)<block_end><block_end><block_end>value.doc=parse_doc(tag)<line_sep><return>value<block_end>@printable<class_stmt>Parameters(object)<block_start><def_stmt>__init__ self return_value instance_param params=<none> java_params=<none><block_start>params=params<or>[]<line_sep>self.instance_param=instance_param<if_stmt>return_value<is><none><block_start>return_value=VoidType()<block_end>self.return_value=return_value<line_sep>self.params=params<if_stmt>instance_param<is><not><none><block_start>self.all_params=[instance_param]+params<block_end><else_stmt><block_start>self.all_params=params<block_end><def_stmt>is_closure_param param<block_start><return>isinstance(param JObjectWrapperType)<block_end>self.closure_params,self.java_params=partition(is_closure_param params)<def_stmt>is_length_param param<block_start><return>param.is_length_param<block_end>self.length_params,self.java_params=partition(is_length_param self.java_params)<if_stmt>java_params<block_start>self.java_params=java_params<block_end><def_stmt>set_parent param<block_start><if_stmt>param<is><not><none><block_start>param.parent=self<block_end><block_end>map(set_parent [return_value instance_param]+params)<block_end><def_stmt>__iter__ self<block_start><return>iter(self.all_params)<block_end>@classmethod<def_stmt>from_tag cls type_registry tag<block_start>return_value=parse_tag_value(type_registry tag.find(TAG_RETURN_VALUE) 'result')<line_sep>params_tag=tag.find(TAG_PARAMETERS)<if_stmt>params_tag<is><none><block_start><return>cls(return_value <none>)<block_end>closure_refs={}<line_sep>destroy_refs={}<line_sep>array_refs={}<for_stmt>tag_index,tag enumerate(params_tag.findall(TAG_PARAMETER))<block_start>closure=tag.get(ATTR_CLOSURE)<if_stmt>closure<is><not><none><block_start>closure_refs[int(closure)]=tag_index<block_end>destroy=tag.get(ATTR_DESTORY)<if_stmt>destroy<is><not><none><block_start>destroy_refs[int(destroy)]=tag_index<block_end>array_tag=tag.find(TAG_ARRAY)<if_stmt>array_tag<is><not><none><block_start>length=array_tag.get(ATTR_LENGTH)<if_stmt>length<is><not><none><block_start>array_refs[int(length)]=tag_index<block_end><block_end><block_end>params=[]<line_sep>instance_param=<none><line_sep>real_tag_index=0<for_stmt>tag params_tag<block_start><if_stmt>tag.tag<eq>TAG_INSTANCE_PARAMETER<block_start><assert_stmt>real_tag_index<eq>0<line_sep>instance_param=parse_tag_value(type_registry tag)<block_end><else_stmt><block_start><if_stmt>closure_refs.get(real_tag_index)<is><not><none><block_start>name=tag.get(ATTR_NAME)<line_sep>closure_index=closure_refs.get(real_tag_index)<line_sep>closure=<none><if_stmt>closure_index<eq>real_tag_index-1<block_start>closure=params[-1]<block_end><else_stmt><block_start><assert_stmt>closure_index<eq>real_tag_index<block_end>params.append(JObjectWrapperType(name closure transfer_ownership=<true>))<block_end><elif_stmt>destroy_refs.get(real_tag_index)<is><not><none><block_start>name=tag.get(ATTR_NAME)<line_sep>destroy_index=destroy_refs.get(real_tag_index)<assert_stmt>destroy_index<eq>real_tag_index-2<line_sep>params[-2].scope<eq>'notified'<line_sep>params.append(JDestroyType(name))<block_end><elif_stmt>array_refs.get(real_tag_index)<is><not><none><block_start>array_index=array_refs.get(real_tag_index)<assert_stmt>array_index<eq>real_tag_index-1<line_sep>array=params[-1]<line_sep>value=parse_tag_value(type_registry tag)<line_sep>value.is_length_param=<true><line_sep>value.array=array<line_sep>array.length=value<line_sep>params.append(value)<block_end><else_stmt><block_start>params.append(parse_tag_value(type_registry tag))<block_end>real_tag_index<augadd>1<block_end><block_end><return>cls(return_value instance_param params)<block_end><block_end>@printable<class_stmt>Property(object)<block_start><def_stmt>__init__ self name value class_value readable writable construct_only<block_start>self.name=name<line_sep>self.value=value<line_sep>self.readable=readable<line_sep>self.writable=writable<line_sep>self.construct_only=construct_only<if_stmt>readable<block_start>get_value=copy(value)<line_sep>get_value.transfer_ownership=<not>get_value.transfer_ownership<line_sep>self.getter=Method(c_name=<none> name='get'+title_case(name) params=Parameters(get_value class_value) )<line_sep>self.signal=Signal(name='on'+title_case(name)+'Changed' params=Parameters(<none> class_value [GParamSpecType('pspec' transfer_ownership=<false>) JObjectWrapperType('listener' <none> transfer_ownership=<false>) ] java_params=[value]) signal_name='notify::'+name interface_name=title_case(name)+'ChangeListener' class_value=class_value when='first' )<block_end><if_stmt>writable<block_start>self.setter=Method(c_name=<none> name='set'+title_case(name) params=Parameters(<none> class_value [value]) )<block_end><block_end>@classmethod<def_stmt>from_tag cls type_registry class_value tag<block_start>name=tag.get(ATTR_NAME)<line_sep><return>cls(name=name value=parse_tag_value(type_registry tag camel_case(name)) class_value=class_value readable=str(tag.get(ATTR_READABLE))<ne>'0' writable=str(tag.get(ATTR_WRITABLE))<eq>'1'<and>str(tag.get(ATTR_CONSTRUCT_ONLY))<ne>'1' construct_only=bool(tag.get(ATTR_CONSTRUCT_ONLY)) )<block_end><block_end>@printable<class_stmt>BaseFunction(object)<block_start><def_stmt>__init__ self name params c_name=<none> doc=<none><block_start>self.name=name<line_sep>self.c_name=c_name<line_sep>self.params=params<line_sep>self.doc=doc<block_end>@property<def_stmt>method_signature self<block_start>arg_signature=''.join((p.java_signature<for>p self.params.java_params<if>p.java_signature<is><not><none>))<line_sep><return>'('+arg_signature+')'+self.params.return_value.java_signature<block_end>@classmethod<def_stmt>from_tag cls type_registry tag<block_start><return>cls(doc=parse_doc(tag) name=camel_case(tag.get(ATTR_NAME)) c_name=tag.get(ATTR_C_IDENTIFIER) params=Parameters.from_tag(type_registry tag) )<block_end><block_end><class_stmt>Function(BaseFunction)<block_start><pass><block_end><class_stmt>Method(BaseFunction)<block_start><pass><block_end><class_stmt>Constructor(BaseFunction)<block_start><def_stmt>__init__ self **kwargs<block_start>super(Constructor self).__init__(**kwargs)<line_sep>p=self.params<line_sep>self.params=Parameters(GWeakRefType('instance_pointer') p.instance_param p.params)<line_sep>self.name='nativeConstructor'<block_end><block_end><class_stmt>Callback(BaseFunction)<block_start><def_stmt>__init__ self value **kwargs<block_start>super(Callback self).__init__(**kwargs)<line_sep>self.value=value<block_end>@classmethod<def_stmt>from_tag cls type_registry tag<block_start>callback_name=tag.get(ATTR_NAME)<line_sep>callback_value=type_registry.lookup(callback_name <none>)('listener' <false>)<line_sep><return>cls(doc=parse_doc(tag) name='on'+callback_name value=callback_value params=Parameters.from_tag(type_registry tag) )<block_end><block_end><class_stmt>Signal(BaseFunction)<block_start><def_stmt>__init__ self signal_name interface_name class_value when **kwargs<block_start>BaseFunction.__init__(self **kwargs)<line_sep>self.signal_name=signal_name<line_sep>self.when=when<line_sep>listener_value=ClassCallbackMetaType(java_type=interface_name outer=class_value )('listener')<line_sep>handle_value=IntType('handle' transfer_ownership=<false>)<line_sep>closure_value=JObjectWrapperType('user_data' listener_value transfer_ownership=<false>)<line_sep>self.add_listener=Method(c_name=<none> name='connect'+listener_value.java_type params=Parameters(handle_value class_value [listener_value closure_value]) )<line_sep>self.remove_listener=Method(c_name=<none> name='disconnect'+listener_value.java_type params=Parameters(<none> class_value [handle_value]) )<line_sep>self.public_add_listener=Method(c_name=<none> name='add'+listener_value.java_type params=Parameters(<none> <none> [listener_value]) )<line_sep>self.public_remove_listener=Method(c_name=<none> name='remove'+listener_value.java_type params=Parameters(<none> <none> [listener_value]) )<line_sep>self.value=listener_value<block_end>@classmethod<def_stmt>from_tag cls type_registry class_value tag<block_start>signal_name=tag.get(ATTR_NAME)<line_sep>parsed_params=Parameters.from_tag(type_registry tag)<line_sep>return_value=parsed_params.return_value<line_sep>params=parsed_params.all_params<if>parsed_params<is><not><none><else>[]<line_sep>params=[return_value class_value]+[params+[JObjectWrapperType('listener' <none> transfer_ownership=<false>)]]<line_sep><return>cls(name=camel_case(signal_name) signal_name=signal_name interface_name=title_case(signal_name)+'Listener' class_value=class_value when=tag.get(ATTR_WHEN) params=Parameters(*params) )<block_end><block_end>@printable<class_stmt>Class(object)<block_start><def_stmt>__init__ self **kwargs<block_start>self.__dict__.update(**kwargs)<block_end>@classmethod<def_stmt>from_tag cls type_registry tag interfaces=<none><block_start>parent=tag.get(ATTR_PARENT)<if_stmt>parent<eq>'GObject.Object'<block_start>parent=<none><block_end>name=tag.get(ATTR_NAME)<line_sep>value=type_registry.lookup(name <none>)('self')<line_sep><return>cls(name=name parent=parent c_type=tag.get(ATTR_C_TYPE) value=value c_symbol_prefix=tag.get(ATTR_C_SYMBOL_PREFIX) glib_type_name=tag.get(ATTR_GLIB_TYPE_NAME) glib_get_type=tag.get(ATTR_GLIB_GET_TYPE) glib_type_struct=tag.get(ATTR_GLIB_TYPE_STRUCT) constructors=[Constructor.from_tag(type_registry t)<for>t tag.findall(TAG_CONSTRUCTOR)<if>t.get(ATTR_INTROSPECTABLE)<ne>'0'] properties=[Property.from_tag(type_registry value t)<for>t tag.findall(TAG_PROPERTY)<if>t.get(ATTR_INTROSPECTABLE)<ne>'0'] methods=[Method.from_tag(type_registry t)<for>t tag.findall(TAG_METHOD)<if>t.get(ATTR_INTROSPECTABLE)<ne>'0'] functions=[Function.from_tag(type_registry t)<for>t tag.findall(TAG_FUNCTION)<if>t.get(ATTR_INTROSPECTABLE)<ne>'0'] signals=[Signal.from_tag(type_registry value t)<for>t tag.findall(TAG_SIGNAL)<if>t.get(ATTR_INTROSPECTABLE)<ne>'0'] interfaces=[interfaces[t.get(ATTR_NAME)]<for>t tag.findall(TAG_IMPLEMENTS)] )<block_end><block_end>@printable<class_stmt>EnumMember(object)<block_start><def_stmt>__init__ self value name c_name nick=<none> description=<none><block_start>self.value=value<line_sep>self.name=name<line_sep>self.c_name=c_name<line_sep>self.nick=nick<line_sep>self.description=description<block_end>@classmethod<def_stmt>from_tag cls tag glib_tag=<none><block_start>value=tag.get(ATTR_VALUE)<if_stmt>glib_tag<is><not><none><block_start><assert_stmt>value<eq>glib_tag.get(ATTR_VALUE)<line_sep><return>cls(value=value name=tag.get(ATTR_NAME).upper() c_name=tag.get(ATTR_C_IDENTIFIER) nick=glib_tag.get(ATTR_GLIB_NICK) description=glib_tag.get(ATTR_C_IDENTIFIER) )<block_end><else_stmt><block_start><return>cls(value=value name=tag.get(ATTR_NAME).upper() c_name=tag.get(ATTR_C_IDENTIFIER) )<block_end><block_end><block_end>@printable<class_stmt>Enum(object)<block_start><def_stmt>__init__ self name c_name type is_bitfield members has_nick=<false> has_description=<false><block_start>self.name=name<line_sep>self.c_name=c_name<line_sep>self.type=type<line_sep>self.is_bitfield=is_bitfield<line_sep>self.members=members<line_sep>self.has_nick=has_nick<line_sep>self.has_description=has_description<block_end>@classmethod<def_stmt>from_tag cls type_registry tag glib_tag=<none><block_start>members=tag.findall(TAG_MEMBER)<line_sep>name=tag.get(ATTR_NAME)<line_sep>c_name=tag.get(ATTR_C_TYPE)<line_sep>type=type_registry.lookup(name c_name)<if_stmt>glib_tag<is><not><none><block_start>glib_members=glib_tag.findall(TAG_MEMBER)<line_sep><return>cls(name=name c_name=c_name type=type is_bitfield=tag.tag<eq>TAG_BITFIELD members=[EnumMember.from_tag(*tags)<for>tags zip(members glib_members)] has_nick=<true> has_description=<true> )<block_end><else_stmt><block_start><return>cls(name=name c_name=c_name type=type is_bitfield=tag.tag<eq>TAG_BITFIELD members=[EnumMember.from_tag(tag)<for>tag members] )<block_end><block_end><block_end>@printable<class_stmt>Namespace(object)<block_start><def_stmt>__init__ self type_registry tag<block_start><def_stmt>find_enum_pairs <block_start>enum_tags=tag.findall(TAG_ENUMERATION)+tag.findall(TAG_BITFIELD)<line_sep>c_enums,glib_enums=partition(<lambda>top:top.get(ATTR_GLIB_TYPE_NAME)<is><none> enum_tags)<line_sep>glib_enum_dict={enum.get(ATTR_NAME):enum<for>enum glib_enums}<def_stmt>glib_from_c c_enum<block_start>glib_enum=glib_enum_dict.get(c_enum.get(ATTR_NAME)+'s')<if_stmt>glib_enum<is><not><none><block_start><return>[c_enum glib_enum]<block_end><else_stmt><block_start><return>[c_enum]<block_end><block_end><return>map(glib_from_c c_enums)<block_end>interfaces=[Class.from_tag(type_registry t)<for>t tag.findall(TAG_INTERFACE)]<line_sep>interface_map={interface.name:interface<for>interface interfaces}<line_sep>self.name=tag.get(ATTR_NAME)<line_sep>self.symbol_prefix=tag.get(ATTR_C_SYMBOL_PREFIXES)<line_sep>self.identifier_prefix=tag.get(ATTR_C_IDENTIFIER_PREFIXES)<line_sep>self.shared_library=tag.get(ATTR_SHARED_LIBRARY)<line_sep>self.interfaces=interfaces<line_sep>self.enums=[Enum.from_tag(type_registry *tags)<for>tags find_enum_pairs()]<line_sep>self.callbacks=[Callback.from_tag(type_registry t)<for>t tag.findall(TAG_CALLBACK)]<line_sep>self.classes=[Class.from_tag(type_registry t interface_map)<for>t tag.findall(TAG_CLASS)]<line_sep>self.functions=[Function.from_tag(type_registry t)<for>t tag.findall(TAG_FUNCTION)]<block_end><block_end><class_stmt>GirParser(object)<block_start><def_stmt>__init__ self xml_root<block_start>self.xml_root=xml_root<block_end><def_stmt>parse_types self<block_start>types=[]<for_stmt>namespace self.xml_root.findall(TAG_NAMESPACE)<block_start>prefix=namespace.get(ATTR_C_SYMBOL_PREFIXES)<line_sep>tag_types={TAG_CLASS:GObjectMetaType TAG_INTERFACE:GObjectMetaType TAG_CALLBACK:CallbackMetaType TAG_ENUMERATION:EnumMetaType TAG_BITFIELD:BitfieldMetaType }<line_sep>tags=sum(map(namespace.findall tag_types.keys()) [])<for_stmt>tag tags<block_start>gir_type=tag.get(ATTR_NAME)<line_sep>c_type=tag.get(ATTR_C_TYPE)<line_sep>MetaType=tag_types[tag.tag]<if_stmt>MetaType<eq>EnumMetaType<or>MetaType<eq>BitfieldMetaType<block_start><if_stmt>tag.get(ATTR_GLIB_TYPE_NAME)<is><not><none><block_start><continue><block_end><block_end>types.append(MetaType(gir_type=gir_type c_type=c_type prefix=prefix ))<block_end><block_end><return>types<block_end><def_stmt>parse_enum_aliases self<block_start>aliases={}<for_stmt>namespace self.xml_root.findall(TAG_NAMESPACE)<block_start>enum_tags=namespace.findall(TAG_ENUMERATION)+namespace.findall(TAG_BITFIELD)<for_stmt>tag enum_tags<block_start><if_stmt>tag.get(ATTR_GLIB_TYPE_NAME)<is><not><none><block_start>alias=tag.get(ATTR_NAME)<line_sep>name=alias[:-1]<line_sep>aliases[alias]=name<block_end><block_end><block_end><return>aliases<block_end><def_stmt>parse_full self type_registry<block_start><return>[Namespace(type_registry tag)<for>tag self.xml_root.findall(TAG_NAMESPACE)]<block_end><block_end>
|
<import_from_stmt>openfda.tests.api_test_helpers *<import_from_stmt>nose.tools *<def_stmt>test_consumer_merge <block_start>meta,results=fetch('/food/event.json?search=report_number:65420')<line_sep>eq_(len(results) 1)<line_sep>event=results[0]<line_sep>eq_(event["consumer"]["gender"] "M")<line_sep>eq_(event["consumer"]["age"] "33")<line_sep>eq_(event["consumer"]["age_unit"] "year(s)")<block_end><def_stmt>test_consumer_merge_with_missing_data <block_start>meta,results=fetch('/food/event.json?search=report_number:65619')<line_sep>eq_(len(results) 1)<line_sep>event=results[0]<line_sep>eq_(event["consumer"]["gender"] "M")<line_sep>eq_(event["consumer"]["age"] "70")<line_sep>eq_(event["consumer"]["age_unit"] "year(s)")<block_end><def_stmt>test_full_record <block_start>meta,results=fetch('/food/event.json?search=report_number:65619')<line_sep>eq_(len(results) 1)<line_sep>event=results[0]<line_sep>eq_(event["date_created"] "20040112")<line_sep>eq_(event["date_started"] "20031222")<line_sep>eq_(event["outcomes"] ["Disability"])<line_sep>products=sorted(event["products"] key=<lambda>k:k['name_brand'])<line_sep>eq_(len(products) 5)<line_sep>eq_(products[0]["industry_code"] "54")<line_sep>eq_(products[0]["industry_name"] "Vit/Min/Prot/Unconv Diet(Human/Animal)")<line_sep>eq_(products[0]["name_brand"] "ACEYTL-L-CARNITINE")<line_sep>eq_(products[0]["role"] "SUSPECT")<line_sep>eq_(products[1]["industry_code"] "54")<line_sep>eq_(products[1]["industry_name"] "Vit/Min/Prot/Unconv Diet(Human/Animal)")<line_sep>eq_(products[1]["name_brand"] "ALPHA LIPOIC")<line_sep>eq_(products[1]["role"] "SUSPECT")<line_sep>eq_(products[2]["industry_code"] "54")<line_sep>eq_(products[2]["industry_name"] "Vit/Min/Prot/Unconv Diet(Human/Animal)")<line_sep>eq_(products[2]["name_brand"] "CALCIUM CALTRATE")<line_sep>eq_(products[2]["role"] "SUSPECT")<line_sep>eq_(products[3]["industry_code"] "54")<line_sep>eq_(products[3]["industry_name"] "Vit/Min/Prot/Unconv Diet(Human/Animal)")<line_sep>eq_(products[3]["name_brand"] "MULTIVITAMIN")<line_sep>eq_(products[3]["role"] "SUSPECT")<line_sep>eq_(products[4]["industry_code"] "54")<line_sep>eq_(products[4]["industry_name"] "Vit/Min/Prot/Unconv Diet(Human/Animal)")<line_sep>eq_(products[4]["name_brand"] "VITAMIN E")<line_sep>eq_(products[4]["role"] "SUSPECT")<line_sep>eq_(sorted(event["reactions"] key=<lambda>k:k) [u'ASTHENIA' u'DEPRESSED MOOD' u'DIZZINESS' u'IMPAIRED DRIVING ABILITY' u'LETHARGY' u'PHYSICAL EXAMINATION'])<line_sep>eq_(event["report_number"] "65619")<line_sep>eq_(event["consumer"]["gender"] "M")<line_sep>eq_(event["consumer"]["age"] "70")<line_sep>eq_(event["consumer"]["age_unit"] "year(s)")<block_end><def_stmt>test_sort_by_date_created <block_start>meta,results=fetch('/food/event.json?search=date_created:[20170220+TO+20170225]+AND+reactions:OVARIAN+CANCER&sort=date_created:asc')<line_sep>eq_(len(results) 1)<line_sep>event=results[0]<line_sep>eq_(event["date_created"] "20170221")<line_sep>meta,results=fetch('/food/event.json?search=date_created:[20170220+TO+20170225]+AND+reactions:OVARIAN+CANCER&sort=date_created:desc')<line_sep>eq_(len(results) 1)<line_sep>event=results[0]<line_sep>eq_(event["date_created"] "20170224")<block_end>
|
<import_from_stmt>paste.deploy.converters asbool<import_from_stmt>paste.wsgilib catch_errors<import_from_stmt>paste.util import_string<import_stmt>sqlobject<import_stmt>threading<def_stmt>make_middleware app global_conf database=<none> use_transaction=<false> hub=<none><block_start>"""
WSGI middleware that sets the connection for the request (using
the database URI or connection object) and the given hub (or
``sqlobject.sqlhub`` if not given).
If ``use_transaction`` is true, then the request will be run in a
transaction.
Applications can use the keys (which are all no-argument functions):
``sqlobject.get_connection()``:
Returns the connection object
``sqlobject.abort()``:
Aborts the transaction. Does not raise an error, but at the *end*
of the request there will be a rollback.
``sqlobject.begin()``:
Starts a transaction. First commits (or rolls back if aborted) if
this is run in a transaction.
``sqlobject.in_transaction()``:
Returns true or false, depending if we are currently in a
transaction.
"""<line_sep>use_transaction=asbool(use_transaction)<if_stmt>database<is><none><block_start>database=global_conf.get('database')<block_end><if_stmt><not>database<block_start><raise>ValueError("You must provide a 'database' configuration value")<block_end><if_stmt>isinstance(hub basestring)<block_start>hub=import_string.eval_import(hub)<block_end><if_stmt><not>hub<block_start>hub=sqlobject.sqlhub<block_end><if_stmt>isinstance(database basestring)<block_start>database=sqlobject.connectionForURI(database)<block_end><return>SQLObjectMiddleware(app database use_transaction hub)<block_end><class_stmt>SQLObjectMiddleware(object)<block_start><def_stmt>__init__ self app conn use_transaction hub<block_start>self.app=app<line_sep>self.conn=conn<line_sep>self.use_transaction=use_transaction<line_sep>self.hub=hub<block_end><def_stmt>__call__ self environ start_response<block_start>conn=[self.conn]<if_stmt>self.use_transaction<block_start>conn[0]=conn[0].transaction()<block_end>any_errors=[]<line_sep>use_transaction=[self.use_transaction]<line_sep>self.hub.threadConnection=conn[0]<def_stmt>abort <block_start><assert_stmt>use_transaction[0] ("You cannot abort, because a transaction is not being used")<line_sep>any_errors.append(<none>)<block_end><def_stmt>begin <block_start><if_stmt>use_transaction[0]<block_start><if_stmt>any_errors<block_start>conn[0].rollback()<block_end><else_stmt><block_start>conn[0].commit()<block_end><block_end>any_errors[:]=[]<line_sep>use_transaction[0]=<true><line_sep>conn[0]=self.conn.transaction()<line_sep>self.hub.threadConnection=conn[0]<block_end><def_stmt>error exc_info=<none><block_start>any_errors.append(<none>)<line_sep>ok()<block_end><def_stmt>ok <block_start><if_stmt>use_transaction[0]<block_start><if_stmt>any_errors<block_start>conn[0].rollback()<block_end><else_stmt><block_start>conn[0].commit(close=<true>)<block_end><block_end>self.hub.threadConnection=<none><block_end><def_stmt>in_transaction <block_start><return>use_transaction[0]<block_end><def_stmt>get_connection <block_start><return>conn[0]<block_end>environ['sqlobject.get_connection']=get_connection<line_sep>environ['sqlobject.abort']=abort<line_sep>environ['sqlobject.begin']=begin<line_sep>environ['sqlobject.in_transaction']=in_transaction<line_sep><return>catch_errors(self.app environ start_response error_callback=error ok_callback=ok)<block_end><block_end>
|
"""
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
"""<import_from_stmt>abc abstractmethod ABCMeta<class_stmt>PickleSerializable()<block_start>__metaclass__=ABCMeta<line_sep>@abstractmethod<def_stmt>serialize self<block_start><pass><block_end>@abstractmethod<def_stmt>deserialize self<block_start><pass><block_end><block_end>
|
<import_from_stmt>.augmentation SemsegAugmentation ObjdetAugmentation<line_sep>
|
<import_from_stmt>argparse Action<import_from_stmt>flexget.options ArgumentParser<def_stmt>test_subparser_nested_namespace <block_start>p=ArgumentParser()<line_sep>p.add_argument('--outer')<line_sep>p.add_subparsers(nested_namespaces=<true>)<line_sep>sub=p.add_subparser('sub')<line_sep>sub.add_argument('--inner')<line_sep>sub.add_subparsers()<line_sep>subsub=sub.add_subparser('subsub')<line_sep>subsub.add_argument('--innerinner')<line_sep>result=p.parse_args(['--outer' 'a' 'sub' '--inner' 'b' 'subsub' '--innerinner' 'c'])<assert_stmt>result.outer<eq>'a'<line_sep># First subparser values should be nested under subparser name
<assert_stmt>result.sub.inner<eq>'b'<assert_stmt><not>hasattr(result 'inner')<line_sep># The second layer did not define nested_namespaces, results should be in first subparser namespace
<assert_stmt>result.sub.innerinner<eq>'c'<assert_stmt><not>hasattr(result 'innerinner')<block_end><def_stmt>test_subparser_parent_defaults <block_start>p=ArgumentParser()<line_sep>p.add_argument('--a')<line_sep>p.set_post_defaults(a='default')<line_sep>p.add_subparsers()<line_sep>p.add_subparser('sub')<line_sep>p.add_subparser('sub_with_default' parent_defaults={'a':'sub_default'})<line_sep># Make sure normal default works
result=p.parse_args(['sub'])<assert_stmt>result.a<eq>'default'<line_sep># Test subparser default
result=p.parse_args(['sub_with_default'])<assert_stmt>result.a<eq>'sub_default'<line_sep># Subparser default should not override explicit one
result=p.parse_args(['--a' 'manual' 'sub_with_default'])<assert_stmt>result.a<eq>'manual'<block_end><def_stmt>test_post_defaults <block_start><class_stmt>CustomAction(Action)<block_start><def_stmt>__call__ self parser namespace values option_string=<none><block_start><if_stmt><not>hasattr(namespace 'post_set')<block_start>namespace.post_set='custom'<block_end><block_end><block_end>p=ArgumentParser()<line_sep>p.add_argument('--post-set')<line_sep>p.add_argument('--custom' action=CustomAction nargs=0)<line_sep>p.set_post_defaults(post_set='default')<line_sep># Explicitly specified, no defaults should be set
result=p.parse_args(['--post-set' 'manual'])<assert_stmt>result.post_set<eq>'manual'<line_sep># Nothing supplied, should get the post set default
result=p.parse_args([])<assert_stmt>result.post_set<eq>'default'<line_sep># Custom action should be allowed to set default
result=p.parse_args(['--custom'])<assert_stmt>result.post_set<eq>'custom'<block_end>
|
"""Python class representation of a Raster Foundry Image"""<import_from_stmt>.base BaseModel<import_from_stmt>.band Band<class_stmt>Image(BaseModel)<block_start>URL_PATH="/api/images/"<def_stmt>__init__ self rawDataBytes visibility filename sourceuri bands imageMetadata resolutionMeters metadataFiles scene=<none> owner=<none> <block_start>"""Create a new Image
Args:
rawDataBytes (int): size of image
visibility (str): accessibility level for object
filename (str): filename for image (displayed to users)
sourceri (str): source of image
bands (List[Band]): list of bands in image
imageMetadata (dict): extra information about the image
resolutionMeters (float): resolution of image
owner (str): optional owner of image
"""<line_sep>self.rawDataBytes=rawDataBytes<line_sep>self.visibility=visibility<line_sep>self.filename=filename<line_sep>self.sourceUri=sourceuri<line_sep>self.scene=scene<line_sep>self.imageMetadata=imageMetadata<line_sep>self.resolutionMeters=resolutionMeters<line_sep>self.metadataFiles=metadataFiles<line_sep>self.bands=bands<line_sep>self.owner=owner<block_end><def_stmt>__repr__ self<block_start><return>"<Image: {}>".format(self.filename)<block_end>@classmethod<def_stmt>from_dict cls d<block_start>bands=[Band.from_dict(band)<for>band d.get("bands")]<line_sep><return>cls(d.get("rawDataBytes") d.get("visibility") d.get("filename") d.get("sourceUri") bands d.get("imageMetadata") d.get("resolutionMeters") d.get("metadataFiles") d.get("scene") d.get("owner") )<block_end><def_stmt>to_dict self<block_start>image_dict=dict(rawDataBytes=self.rawDataBytes visibility=self.visibility filename=self.filename sourceUri=self.sourceUri bands=[band.to_dict()<for>band self.bands] imageMetadata=self.imageMetadata metadataFiles=self.metadataFiles resolutionMeters=self.resolutionMeters owner=self.owner )<if_stmt>self.scene<block_start>image_dict["scene"]=self.scene<block_end><return>image_dict<block_end><def_stmt>create self<block_start><assert_stmt>self.scene "Scene is required to create an Image"<line_sep><return>super(Image self).create()<block_end><block_end>
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Array related ops."""<import_stmt>tensorflow<as>tf<def_stmt>increment_last_dim input_tensor:tf.Tensor default_value:float<arrow>tf.Tensor<block_start>"""Grows the size of last dimension of given `input_tensor` by one.
Examples:
- [[1, 2], [3, 4]] -> [[1, 2, 1], [3, 4, 1]] (default_value = 1).
- [1, 2, 3] -> [1, 2, 3, 4] (default_value = 4).
Args:
input_tensor: a float tf.Tensor whose last dimension is to be incremented.
default_value: a float value denoting the default value for the increased
part.
Returns:
A new `tf.Tensor` with increased last dimension size.
"""<line_sep>input_tensor=tf.dtypes.cast(input_tensor tf.float32)<line_sep>inc_tensor=tf.ones(tf.shape(input_tensor)[:-1])<line_sep>inc_tensor=tf.expand_dims(inc_tensor -1)<times>default_value<line_sep><return>tf.concat([input_tensor inc_tensor] axis=-1)<block_end>
|
<import_from_stmt>pylab *<import_stmt>sys<import_stmt>re<if_stmt>__name__<eq>'__main__'<block_start><if_stmt>len(sys.argv)<ne>3<block_start>print("usage: python binary_hist.py <input filename> <time of histogram>")<line_sep>sys.exit(1)<block_end><else_stmt><block_start>fname=sys.argv[1]<line_sep>time=float(sys.argv[2])<line_sep>f=open(fname "r")<line_sep>inblock=<false><line_sep>EkTs=[]<for_stmt>line f<block_start><if_stmt>re.search("%%% time= (\d+\.\d*)" line)<block_start><if_stmt>float(re.search("%%% time= (\d+\.\d*)" line).group(1))<eq>time<block_start>inblock=<true><block_end><block_end><if_stmt>inblock<and>re.search("%%% .*E/kT=(-?\d+\.\d+)" line)<block_start>EkTs.append(float(re.search("%%%.*E/kT=(-?\d+\.\d+)" line).group(1)))<block_end><if_stmt>inblock<and>re.search("%%% Emul/E" line)<block_start>inblock=<false><block_end><block_end>f.close()<if_stmt>len(EkTs)<g>0<block_start>hist(EkTs)<line_sep>show()<block_end><else_stmt><block_start>print("No binaries found at time = %f."%time)<block_end><block_end><block_end>
|
"""
From original at https://github.com/aimagelab/meshed-memory-transformer/blob/master/models/transformer/utils.py
Original copyright of AImageLab code below, modifications by <NAME>, Copyright 2021.
"""<line_sep># Copyright (c) 2019, AImageLab
<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<line_sep>__all__=["PositionWiseFeedForward"]<class_stmt>PositionWiseFeedForward(nn.Module)<block_start>'''
Position-wise feed forward layer
'''<def_stmt>__init__ self * d_model:int d_ff:int dropout:float<block_start>super(PositionWiseFeedForward self).__init__()<line_sep>#self.identity_map_reordering = identity_map_reordering
self.fc1=nn.Linear(d_model d_ff)<line_sep>self.fc2=nn.Linear(d_ff d_model)<line_sep>self.dropout=nn.Dropout(p=dropout)<if>dropout<g>0.<else><none><line_sep>self.dropout_2=nn.Dropout(p=dropout)<if>dropout<g>0.<else><none><line_sep>self.layer_norm=nn.LayerNorm(d_model)<block_end><def_stmt>forward self inputs# if self.identity_map_reordering:
# out = self.layer_norm(input)
# out = self.fc2(self.dropout_2(F.relu(self.fc1(out))))
# out = input + self.dropout(torch.relu(out))
#else:
<block_start>out=F.relu(self.fc1(inputs))<if_stmt>self.dropout_2<block_start>out=self.dropout_2(out)<block_end>out=self.fc2(out)<if_stmt>self.dropout<block_start>out=self.dropout(out)<block_end>out=self.layer_norm(inputs+out)<line_sep><return>out<block_end><block_end>
|
"""
ulid/api/microsecond
~~~~~~~~~~~~~~~~~~~~
Contains the public API of the `ulid` package using the microsecond provider.
"""<import_from_stmt>.. consts providers ulid<import_from_stmt>. api<line_sep>API=api.Api(providers.MICROSECOND)<line_sep>create=API.create<line_sep>from_bytes=API.from_bytes<line_sep>from_int=API.from_int<line_sep>from_randomness=API.from_randomness<line_sep>from_str=API.from_str<line_sep>from_timestamp=API.from_timestamp<line_sep>from_uuid=API.from_uuid<line_sep>new=API.new<line_sep>parse=API.parse<line_sep>MIN_TIMESTAMP=consts.MIN_TIMESTAMP<line_sep>MAX_TIMESTAMP=consts.MAX_TIMESTAMP<line_sep>MIN_RANDOMNESS=consts.MIN_RANDOMNESS<line_sep>MAX_RANDOMNESS=consts.MAX_RANDOMNESS<line_sep>MIN_ULID=consts.MIN_ULID<line_sep>MAX_ULID=consts.MAX_ULID<line_sep>Timestamp=ulid.Timestamp<line_sep>Randomness=ulid.Randomness<line_sep>ULID=ulid.ULID<line_sep>__all__=api.ALL<line_sep>
|
<import_from_stmt>oie_readers.oieReader OieReader<import_from_stmt>oie_readers.extraction Extraction<class_stmt>PropSReader(OieReader)<block_start><def_stmt>__init__ self<block_start>self.name='PropS'<block_end><def_stmt>read self fn<block_start>d={}<with_stmt>open(fn)<as>fin<block_start><for_stmt>line fin<block_start><if_stmt><not>line.strip()<block_start><continue><block_end>data=line.strip().split('\t')<line_sep>confidence,text,rel=data[:3]<line_sep>curExtraction=Extraction(pred=rel sent=text confidence=float(confidence))<for_stmt>arg data[4::2]<block_start>curExtraction.addArg(arg)<block_end>d[text]=d.get(text [])+[curExtraction]<block_end><block_end>self.oie=d<line_sep>self.normalizeConfidence()<block_end><def_stmt>normalizeConfidence self<block_start>''' Normalize confidence to resemble probabilities '''<line_sep>EPSILON=1e-3<line_sep>self.confidences=[extraction.confidence<for>sent self.oie<for>extraction self.oie[sent]]<line_sep>maxConfidence=max(self.confidences)<line_sep>minConfidence=min(self.confidences)<line_sep>denom=maxConfidence-minConfidence+(2<times>EPSILON)<for_stmt>sent,extractions self.oie.items()<block_start><for_stmt>extraction extractions<block_start>extraction.confidence=((extraction.confidence-minConfidence)+EPSILON)/denom<block_end><block_end><block_end><block_end>
|
#! /usr/bin/env python
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) <NAME> <<EMAIL>>
# This program is published under a GPLv2 license
# scapy.contrib.description = Unified Diagnostic Service (UDS)
# scapy.contrib.status = loads
<import_stmt>struct<import_from_stmt>scapy.fields ByteEnumField StrField ConditionalField BitEnumField BitField XByteField FieldListField XShortField X3BytesField XIntField ByteField ShortField ObservableDict XShortEnumField XByteEnumField<import_from_stmt>scapy.packet Packet bind_layers<import_from_stmt>scapy.config conf<import_from_stmt>scapy.error log_loading<import_from_stmt>scapy.utils PeriodicSenderThread<line_sep>"""
UDS
"""<try_stmt><block_start><if_stmt>conf.contribs['UDS']['treat-response-pending-as-answer']<block_start><pass><block_end><block_end><except_stmt>KeyError<block_start>log_loading.info("Specify \"conf.contribs['UDS'] = "<concat>"{'treat-response-pending-as-answer': True}\" to treat "<concat>"a negative response 'requestCorrectlyReceived-"<concat>"ResponsePending' as answer of a request. \n"<concat>"The default value is False.")<line_sep>conf.contribs['UDS']={'treat-response-pending-as-answer':<false>}<block_end><class_stmt>UDS(Packet)<block_start>services=ObservableDict({0x10:'DiagnosticSessionControl' 0x11:'ECUReset' 0x14:'ClearDiagnosticInformation' 0x19:'ReadDTCInformation' 0x22:'ReadDataByIdentifier' 0x23:'ReadMemoryByAddress' 0x24:'ReadScalingDataByIdentifier' 0x27:'SecurityAccess' 0x28:'CommunicationControl' 0x2A:'ReadDataPeriodicIdentifier' 0x2C:'DynamicallyDefineDataIdentifier' 0x2E:'WriteDataByIdentifier' 0x2F:'InputOutputControlByIdentifier' 0x31:'RoutineControl' 0x34:'RequestDownload' 0x35:'RequestUpload' 0x36:'TransferData' 0x37:'RequestTransferExit' 0x3D:'WriteMemoryByAddress' 0x3E:'TesterPresent' 0x50:'DiagnosticSessionControlPositiveResponse' 0x51:'ECUResetPositiveResponse' 0x54:'ClearDiagnosticInformationPositiveResponse' 0x59:'ReadDTCInformationPositiveResponse' 0x62:'ReadDataByIdentifierPositiveResponse' 0x63:'ReadMemoryByAddressPositiveResponse' 0x64:'ReadScalingDataByIdentifierPositiveResponse' 0x67:'SecurityAccessPositiveResponse' 0x68:'CommunicationControlPositiveResponse' 0x6A:'ReadDataPeriodicIdentifierPositiveResponse' 0x6C:'DynamicallyDefineDataIdentifierPositiveResponse' 0x6E:'WriteDataByIdentifierPositiveResponse' 0x6F:'InputOutputControlByIdentifierPositiveResponse' 0x71:'RoutineControlPositiveResponse' 0x74:'RequestDownloadPositiveResponse' 0x75:'RequestUploadPositiveResponse' 0x76:'TransferDataPositiveResponse' 0x77:'RequestTransferExitPositiveResponse' 0x7D:'WriteMemoryByAddressPositiveResponse' 0x7E:'TesterPresentPositiveResponse' 0x83:'AccessTimingParameter' 0x84:'SecuredDataTransmission' 0x85:'ControlDTCSetting' 0x86:'ResponseOnEvent' 0x87:'LinkControl' 0xC3:'AccessTimingParameterPositiveResponse' 0xC4:'SecuredDataTransmissionPositiveResponse' 0xC5:'ControlDTCSettingPositiveResponse' 0xC6:'ResponseOnEventPositiveResponse' 0xC7:'LinkControlPositiveResponse' 0x7f:'NegativeResponse'})<line_sep>name='UDS'<line_sep>fields_desc=[XByteEnumField('service' 0 services)]<def_stmt>answers self other<block_start>"""DEV: true if self is an answer from other"""<if_stmt>other.__class__<eq>self.__class__<block_start><return>(other.service+0x40)<eq>self.service<or>(self.service<eq>0x7f<and>self.requestServiceId<eq>other.service<and>(self.negativeResponseCode<ne>0x78<or>conf.contribs['UDS']['treat-response-pending-as-answer']))<block_end><return>0<block_end><def_stmt>hashret self<block_start><if_stmt>self.service<eq>0x7f<block_start><return>struct.pack('B' self.requestServiceId)<block_end><return>struct.pack('B' self.service&~0x40)<block_end><block_end># ########################DSC###################################
<class_stmt>UDS_DSC(Packet)<block_start>diagnosticSessionTypes={0x00:'ISOSAEReserved' 0x01:'defaultSession' 0x02:'programmingSession' 0x03:'extendedDiagnosticSession' 0x04:'safetySystemDiagnosticSession' 0x7F:'ISOSAEReserved'}<line_sep>name='DiagnosticSessionControl'<line_sep>fields_desc=[ByteEnumField('diagnosticSessionType' 0 diagnosticSessionTypes)]<block_end>bind_layers(UDS UDS_DSC service=0x10)<class_stmt>UDS_DSCPR(Packet)<block_start>name='DiagnosticSessionControlPositiveResponse'<line_sep>fields_desc=[ByteEnumField('diagnosticSessionType' 0 UDS_DSC.diagnosticSessionTypes) StrField('sessionParameterRecord' B"")]<block_end>bind_layers(UDS UDS_DSCPR service=0x50)<line_sep># #########################ER###################################
<class_stmt>UDS_ER(Packet)<block_start>resetTypes={0x00:'ISOSAEReserved' 0x01:'hardReset' 0x02:'keyOffOnReset' 0x03:'softReset' 0x04:'enableRapidPowerShutDown' 0x05:'disableRapidPowerShutDown' 0x7F:'ISOSAEReserved'}<line_sep>name='ECUReset'<line_sep>fields_desc=[ByteEnumField('resetType' 0 resetTypes)]<block_end>bind_layers(UDS UDS_ER service=0x11)<class_stmt>UDS_ERPR(Packet)<block_start>name='ECUResetPositiveResponse'<line_sep>fields_desc=[ByteEnumField('resetType' 0 UDS_ER.resetTypes) ConditionalField(ByteField('powerDownTime' 0) <lambda>pkt:pkt.resetType<eq>0x04)]<block_end>bind_layers(UDS UDS_ERPR service=0x51)<line_sep># #########################SA###################################
<class_stmt>UDS_SA(Packet)<block_start>name='SecurityAccess'<line_sep>fields_desc=[ByteField('securityAccessType' 0) ConditionalField(StrField('securityAccessDataRecord' B"") <lambda>pkt:pkt.securityAccessType%2<eq>1) ConditionalField(StrField('securityKey' B"") <lambda>pkt:pkt.securityAccessType%2<eq>0)]<block_end>bind_layers(UDS UDS_SA service=0x27)<class_stmt>UDS_SAPR(Packet)<block_start>name='SecurityAccessPositiveResponse'<line_sep>fields_desc=[ByteField('securityAccessType' 0) ConditionalField(StrField('securitySeed' B"") <lambda>pkt:pkt.securityAccessType%2<eq>1) ]<block_end>bind_layers(UDS UDS_SAPR service=0x67)<line_sep># #########################CC###################################
<class_stmt>UDS_CC(Packet)<block_start>controlTypes={0x00:'enableRxAndTx' 0x01:'enableRxAndDisableTx' 0x02:'disableRxAndEnableTx' 0x03:'disableRxAndTx'}<line_sep>name='CommunicationControl'<line_sep>fields_desc=[ByteEnumField('controlType' 0 controlTypes) BitEnumField('communicationType0' 0 2 {0:'ISOSAEReserved' 1:'normalCommunicationMessages' 2:'networkManagmentCommunicationMessages' 3:'networkManagmentCommunicationMessages and '<concat>'normalCommunicationMessages'}) BitField('communicationType1' 0 2) BitEnumField('communicationType2' 0 4 {0:'Disable/Enable specified communication Type' 1:'Disable/Enable specific subnet' 2:'Disable/Enable specific subnet' 3:'Disable/Enable specific subnet' 4:'Disable/Enable specific subnet' 5:'Disable/Enable specific subnet' 6:'Disable/Enable specific subnet' 7:'Disable/Enable specific subnet' 8:'Disable/Enable specific subnet' 9:'Disable/Enable specific subnet' 10:'Disable/Enable specific subnet' 11:'Disable/Enable specific subnet' 12:'Disable/Enable specific subnet' 13:'Disable/Enable specific subnet' 14:'Disable/Enable specific subnet' 15:'Disable/Enable network'})]<block_end>bind_layers(UDS UDS_CC service=0x28)<class_stmt>UDS_CCPR(Packet)<block_start>name='CommunicationControlPositiveResponse'<line_sep>fields_desc=[ByteEnumField('controlType' 0 UDS_CC.controlTypes)]<block_end>bind_layers(UDS UDS_CCPR service=0x68)<line_sep># #########################TP###################################
<class_stmt>UDS_TP(Packet)<block_start>name='TesterPresent'<line_sep>fields_desc=[ByteField('subFunction' 0)]<block_end>bind_layers(UDS UDS_TP service=0x3E)<class_stmt>UDS_TPPR(Packet)<block_start>name='TesterPresentPositiveResponse'<line_sep>fields_desc=[ByteField('zeroSubFunction' 0)]<block_end>bind_layers(UDS UDS_TPPR service=0x7E)<line_sep># #########################ATP###################################
<class_stmt>UDS_ATP(Packet)<block_start>timingParameterAccessTypes={0:'ISOSAEReserved' 1:'readExtendedTimingParameterSet' 2:'setTimingParametersToDefaultValues' 3:'readCurrentlyActiveTimingParameters' 4:'setTimingParametersToGivenValues'}<line_sep>name='AccessTimingParameter'<line_sep>fields_desc=[ByteEnumField('timingParameterAccessType' 0 timingParameterAccessTypes) ConditionalField(StrField('timingParameterRequestRecord' B"") <lambda>pkt:pkt.timingParameterAccessType<eq>0x4)]<block_end>bind_layers(UDS UDS_ATP service=0x83)<class_stmt>UDS_ATPPR(Packet)<block_start>name='AccessTimingParameterPositiveResponse'<line_sep>fields_desc=[ByteEnumField('timingParameterAccessType' 0 UDS_ATP.timingParameterAccessTypes) ConditionalField(StrField('timingParameterResponseRecord' B"") <lambda>pkt:pkt.timingParameterAccessType<eq>0x3)]<block_end>bind_layers(UDS UDS_ATPPR service=0xC3)<line_sep># #########################SDT###################################
<class_stmt>UDS_SDT(Packet)<block_start>name='SecuredDataTransmission'<line_sep>fields_desc=[StrField('securityDataRequestRecord' B"")]<block_end>bind_layers(UDS UDS_SDT service=0x84)<class_stmt>UDS_SDTPR(Packet)<block_start>name='SecuredDataTransmissionPositiveResponse'<line_sep>fields_desc=[StrField('securityDataResponseRecord' B"")]<block_end>bind_layers(UDS UDS_SDTPR service=0xC4)<line_sep># #########################CDTCS###################################
<class_stmt>UDS_CDTCS(Packet)<block_start>DTCSettingTypes={0:'ISOSAEReserved' 1:'on' 2:'off'}<line_sep>name='ControlDTCSetting'<line_sep>fields_desc=[ByteEnumField('DTCSettingType' 0 DTCSettingTypes) StrField('DTCSettingControlOptionRecord' B"")]<block_end>bind_layers(UDS UDS_CDTCS service=0x85)<class_stmt>UDS_CDTCSPR(Packet)<block_start>name='ControlDTCSettingPositiveResponse'<line_sep>fields_desc=[ByteEnumField('DTCSettingType' 0 UDS_CDTCS.DTCSettingTypes)]<block_end>bind_layers(UDS UDS_CDTCSPR service=0xC5)<line_sep># #########################ROE###################################
# TODO: improve this protocol implementation
<class_stmt>UDS_ROE(Packet)<block_start>eventTypes={0:'doNotStoreEvent' 1:'storeEvent'}<line_sep>name='ResponseOnEvent'<line_sep>fields_desc=[ByteEnumField('eventType' 0 eventTypes) ByteField('eventWindowTime' 0) StrField('eventTypeRecord' B"")]<block_end>bind_layers(UDS UDS_ROE service=0x86)<class_stmt>UDS_ROEPR(Packet)<block_start>name='ResponseOnEventPositiveResponse'<line_sep>fields_desc=[ByteEnumField('eventType' 0 UDS_ROE.eventTypes) ByteField('numberOfIdentifiedEvents' 0) ByteField('eventWindowTime' 0) StrField('eventTypeRecord' B"")]<block_end>bind_layers(UDS UDS_ROEPR service=0xC6)<line_sep># #########################LC###################################
<class_stmt>UDS_LC(Packet)<block_start>linkControlTypes={0:'ISOSAEReserved' 1:'verifyBaudrateTransitionWithFixedBaudrate' 2:'verifyBaudrateTransitionWithSpecificBaudrate' 3:'transitionBaudrate'}<line_sep>name='LinkControl'<line_sep>fields_desc=[ByteEnumField('linkControlType' 0 linkControlTypes) ConditionalField(ByteField('baudrateIdentifier' 0) <lambda>pkt:pkt.linkControlType<eq>0x1) ConditionalField(ByteField('baudrateHighByte' 0) <lambda>pkt:pkt.linkControlType<eq>0x2) ConditionalField(ByteField('baudrateMiddleByte' 0) <lambda>pkt:pkt.linkControlType<eq>0x2) ConditionalField(ByteField('baudrateLowByte' 0) <lambda>pkt:pkt.linkControlType<eq>0x2)]<block_end>bind_layers(UDS UDS_LC service=0x87)<class_stmt>UDS_LCPR(Packet)<block_start>name='LinkControlPositiveResponse'<line_sep>fields_desc=[ByteEnumField('linkControlType' 0 UDS_LC.linkControlTypes)]<block_end>bind_layers(UDS UDS_LCPR service=0xC7)<line_sep># #########################RDBI###################################
<class_stmt>UDS_RDBI(Packet)<block_start>dataIdentifiers=ObservableDict()<line_sep>name='ReadDataByIdentifier'<line_sep>fields_desc=[FieldListField("identifiers" [] XShortEnumField('dataIdentifier' 0 dataIdentifiers))]<block_end>bind_layers(UDS UDS_RDBI service=0x22)<class_stmt>UDS_RDBIPR(Packet)<block_start>name='ReadDataByIdentifierPositiveResponse'<line_sep>fields_desc=[XShortEnumField('dataIdentifier' 0 UDS_RDBI.dataIdentifiers) ]<block_end>bind_layers(UDS UDS_RDBIPR service=0x62)<line_sep># #########################RMBA###################################
<class_stmt>UDS_RMBA(Packet)<block_start>name='ReadMemoryByAddress'<line_sep>fields_desc=[BitField('memorySizeLen' 0 4) BitField('memoryAddressLen' 0 4) ConditionalField(XByteField('memoryAddress1' 0) <lambda>pkt:pkt.memoryAddressLen<eq>1) ConditionalField(XShortField('memoryAddress2' 0) <lambda>pkt:pkt.memoryAddressLen<eq>2) ConditionalField(X3BytesField('memoryAddress3' 0) <lambda>pkt:pkt.memoryAddressLen<eq>3) ConditionalField(XIntField('memoryAddress4' 0) <lambda>pkt:pkt.memoryAddressLen<eq>4) ConditionalField(XByteField('memorySize1' 0) <lambda>pkt:pkt.memorySizeLen<eq>1) ConditionalField(XShortField('memorySize2' 0) <lambda>pkt:pkt.memorySizeLen<eq>2) ConditionalField(X3BytesField('memorySize3' 0) <lambda>pkt:pkt.memorySizeLen<eq>3) ConditionalField(XIntField('memorySize4' 0) <lambda>pkt:pkt.memorySizeLen<eq>4) ]<block_end>bind_layers(UDS UDS_RMBA service=0x23)<class_stmt>UDS_RMBAPR(Packet)<block_start>name='ReadMemoryByAddressPositiveResponse'<line_sep>fields_desc=[StrField('dataRecord' <none> fmt="B")]<block_end>bind_layers(UDS UDS_RMBAPR service=0x63)<line_sep># #########################RSDBI###################################
<class_stmt>UDS_RSDBI(Packet)<block_start>name='ReadScalingDataByIdentifier'<line_sep>fields_desc=[XShortField('dataIdentifier' 0)]<block_end>bind_layers(UDS UDS_RSDBI service=0x24)<line_sep># TODO: Implement correct scaling here, instead of using just the dataRecord
<class_stmt>UDS_RSDBIPR(Packet)<block_start>name='ReadScalingDataByIdentifierPositiveResponse'<line_sep>fields_desc=[XShortField('dataIdentifier' 0) ByteField('scalingByte' 0) StrField('dataRecord' <none> fmt="B")]<block_end>bind_layers(UDS UDS_RSDBIPR service=0x64)<line_sep># #########################RDBPI###################################
<class_stmt>UDS_RDBPI(Packet)<block_start>transmissionModes={0:'ISOSAEReserved' 1:'sendAtSlowRate' 2:'sendAtMediumRate' 3:'sendAtFastRate' 4:'stopSending'}<line_sep>name='ReadDataByPeriodicIdentifier'<line_sep>fields_desc=[ByteEnumField('transmissionMode' 0 transmissionModes) ByteField('periodicDataIdentifier' 0) StrField('furtherPeriodicDataIdentifier' 0 fmt="B")]<block_end>bind_layers(UDS UDS_RDBPI service=0x2A)<line_sep># TODO: Implement correct scaling here, instead of using just the dataRecord
<class_stmt>UDS_RDBPIPR(Packet)<block_start>name='ReadDataByPeriodicIdentifierPositiveResponse'<line_sep>fields_desc=[ByteField('periodicDataIdentifier' 0) StrField('dataRecord' <none> fmt="B")]<block_end>bind_layers(UDS UDS_RDBPIPR service=0x6A)<line_sep># #########################DDDI###################################
# TODO: Implement correct interpretation here,
# instead of using just the dataRecord
<class_stmt>UDS_DDDI(Packet)<block_start>name='DynamicallyDefineDataIdentifier'<line_sep>fields_desc=[ByteField('definitionMode' 0) StrField('dataRecord' 0 fmt="B")]<block_end>bind_layers(UDS UDS_DDDI service=0x2C)<class_stmt>UDS_DDDIPR(Packet)<block_start>name='DynamicallyDefineDataIdentifierPositiveResponse'<line_sep>fields_desc=[ByteField('definitionMode' 0) XShortField('dynamicallyDefinedDataIdentifier' 0)]<block_end>bind_layers(UDS UDS_DDDIPR service=0x6C)<line_sep># #########################WDBI###################################
<class_stmt>UDS_WDBI(Packet)<block_start>name='WriteDataByIdentifier'<line_sep>fields_desc=[XShortEnumField('dataIdentifier' 0 UDS_RDBI.dataIdentifiers)]<block_end>bind_layers(UDS UDS_WDBI service=0x2E)<class_stmt>UDS_WDBIPR(Packet)<block_start>name='WriteDataByIdentifierPositiveResponse'<line_sep>fields_desc=[XShortEnumField('dataIdentifier' 0 UDS_RDBI.dataIdentifiers) ]<block_end>bind_layers(UDS UDS_WDBIPR service=0x6E)<line_sep># #########################WMBA###################################
<class_stmt>UDS_WMBA(Packet)<block_start>name='WriteMemoryByAddress'<line_sep>fields_desc=[BitField('memorySizeLen' 0 4) BitField('memoryAddressLen' 0 4) ConditionalField(XByteField('memoryAddress1' 0) <lambda>pkt:pkt.memoryAddressLen<eq>1) ConditionalField(XShortField('memoryAddress2' 0) <lambda>pkt:pkt.memoryAddressLen<eq>2) ConditionalField(X3BytesField('memoryAddress3' 0) <lambda>pkt:pkt.memoryAddressLen<eq>3) ConditionalField(XIntField('memoryAddress4' 0) <lambda>pkt:pkt.memoryAddressLen<eq>4) ConditionalField(XByteField('memorySize1' 0) <lambda>pkt:pkt.memorySizeLen<eq>1) ConditionalField(XShortField('memorySize2' 0) <lambda>pkt:pkt.memorySizeLen<eq>2) ConditionalField(X3BytesField('memorySize3' 0) <lambda>pkt:pkt.memorySizeLen<eq>3) ConditionalField(XIntField('memorySize4' 0) <lambda>pkt:pkt.memorySizeLen<eq>4) StrField('dataRecord' b'\x00' fmt="B") ]<block_end>bind_layers(UDS UDS_WMBA service=0x3D)<class_stmt>UDS_WMBAPR(Packet)<block_start>name='WriteMemoryByAddressPositiveResponse'<line_sep>fields_desc=[BitField('memorySizeLen' 0 4) BitField('memoryAddressLen' 0 4) ConditionalField(XByteField('memoryAddress1' 0) <lambda>pkt:pkt.memoryAddressLen<eq>1) ConditionalField(XShortField('memoryAddress2' 0) <lambda>pkt:pkt.memoryAddressLen<eq>2) ConditionalField(X3BytesField('memoryAddress3' 0) <lambda>pkt:pkt.memoryAddressLen<eq>3) ConditionalField(XIntField('memoryAddress4' 0) <lambda>pkt:pkt.memoryAddressLen<eq>4) ConditionalField(XByteField('memorySize1' 0) <lambda>pkt:pkt.memorySizeLen<eq>1) ConditionalField(XShortField('memorySize2' 0) <lambda>pkt:pkt.memorySizeLen<eq>2) ConditionalField(X3BytesField('memorySize3' 0) <lambda>pkt:pkt.memorySizeLen<eq>3) ConditionalField(XIntField('memorySize4' 0) <lambda>pkt:pkt.memorySizeLen<eq>4)]<block_end>bind_layers(UDS UDS_WMBAPR service=0x7D)<line_sep># #########################CDTCI###################################
<class_stmt>UDS_CDTCI(Packet)<block_start>name='ClearDiagnosticInformation'<line_sep>fields_desc=[ByteField('groupOfDTCHighByte' 0) ByteField('groupOfDTCMiddleByte' 0) ByteField('groupOfDTCLowByte' 0) ]<block_end>bind_layers(UDS UDS_CDTCI service=0x14)<line_sep># #########################RDTCI###################################
<class_stmt>UDS_RDTCI(Packet)<block_start>reportTypes={0:'ISOSAEReserved' 1:'reportNumberOfDTCByStatusMask' 2:'reportDTCByStatusMask' 3:'reportDTCSnapshotIdentification' 4:'reportDTCSnapshotRecordByDTCNumber' 5:'reportDTCSnapshotRecordByRecordNumber' 6:'reportDTCExtendedDataRecordByDTCNumber' 7:'reportNumberOfDTCBySeverityMaskRecord' 8:'reportDTCBySeverityMaskRecord' 9:'reportSeverityInformationOfDTC' 10:'reportSupportedDTC' 11:'reportFirstTestFailedDTC' 12:'reportFirstConfirmedDTC' 13:'reportMostRecentTestFailedDTC' 14:'reportMostRecentConfirmedDTC' 15:'reportMirrorMemoryDTCByStatusMask' 16:'reportMirrorMemoryDTCExtendedDataRecordByDTCNumber' 17:'reportNumberOfMirrorMemoryDTCByStatusMask' 18:'reportNumberOfEmissionsRelatedOBDDTCByStatusMask' 19:'reportEmissionsRelatedOBDDTCByStatusMask' 20:'reportDTCFaultDetectionCounter' 21:'reportDTCWithPermanentStatus'}<line_sep>name='ReadDTCInformation'<line_sep>fields_desc=[ByteEnumField('reportType' 0 reportTypes) ConditionalField(XByteField('DTCStatusMask' 0) <lambda>pkt:pkt.reportType<in>[0x01 0x02 0x0f 0x11 0x12 0x13]) ConditionalField(ByteField('DTCHighByte' 0) <lambda>pkt:pkt.reportType<in>[0x3 0x4 0x6 0x10 0x09]) ConditionalField(ByteField('DTCMiddleByte' 0) <lambda>pkt:pkt.reportType<in>[0x3 0x4 0x6 0x10 0x09]) ConditionalField(ByteField('DTCLowByte' 0) <lambda>pkt:pkt.reportType<in>[0x3 0x4 0x6 0x10 0x09]) ConditionalField(ByteField('DTCSnapshotRecordNumber' 0) <lambda>pkt:pkt.reportType<in>[0x3 0x4 0x5]) ConditionalField(ByteField('DTCExtendedDataRecordNumber' 0) <lambda>pkt:pkt.reportType<in>[0x6 0x10]) ConditionalField(ByteField('DTCSeverityMask' 0) <lambda>pkt:pkt.reportType<in>[0x07 0x08]) ConditionalField(ByteField('DTCStatusMask' 0) <lambda>pkt:pkt.reportType<in>[0x07 0x08]) ]<block_end>bind_layers(UDS UDS_RDTCI service=0x19)<class_stmt>UDS_RDTCIPR(Packet)<block_start>name='ReadDTCInformationPositiveResponse'<line_sep>fields_desc=[ByteEnumField('reportType' 0 UDS_RDTCI.reportTypes) ConditionalField(XByteField('DTCStatusAvailabilityMask' 0) <lambda>pkt:pkt.reportType<in>[0x01 0x07 0x11 0x12 0x02 0x0A 0x0B 0x0C 0x0D 0x0E 0x0F 0x13 0x15]) ConditionalField(ByteEnumField('DTCFormatIdentifier' 0 {0:'ISO15031-6DTCFormat' 1:'UDS-1DTCFormat' 2:'SAEJ1939-73DTCFormat' 3:'ISO11992-4DTCFormat'}) <lambda>pkt:pkt.reportType<in>[0x01 0x07 0x11 0x12]) ConditionalField(ShortField('DTCCount' 0) <lambda>pkt:pkt.reportType<in>[0x01 0x07 0x11 0x12]) ConditionalField(StrField('DTCAndStatusRecord' 0) <lambda>pkt:pkt.reportType<in>[0x02 0x0A 0x0B 0x0C 0x0D 0x0E 0x0F 0x13 0x15]) ConditionalField(StrField('dataRecord' 0) <lambda>pkt:pkt.reportType<in>[0x03 0x04 0x05 0x06 0x08 0x09 0x10 0x14])]<block_end>bind_layers(UDS UDS_RDTCIPR service=0x59)<line_sep># #########################RC###################################
<class_stmt>UDS_RC(Packet)<block_start>routineControlTypes={0:'ISOSAEReserved' 1:'startRoutine' 2:'stopRoutine' 3:'requestRoutineResults'}<line_sep>name='RoutineControl'<line_sep>fields_desc=[ByteEnumField('routineControlType' 0 routineControlTypes) XShortField('routineIdentifier' 0) StrField('routineControlOptionRecord' 0 fmt="B") ]<block_end>bind_layers(UDS UDS_RC service=0x31)<class_stmt>UDS_RCPR(Packet)<block_start>name='RoutineControlPositiveResponse'<line_sep>fields_desc=[ByteEnumField('routineControlType' 0 UDS_RC.routineControlTypes) XShortField('routineIdentifier' 0) StrField('routineStatusRecord' 0 fmt="B") ]<block_end>bind_layers(UDS UDS_RCPR service=0x71)<line_sep># #########################RD###################################
<class_stmt>UDS_RD(Packet)<block_start>dataFormatIdentifiers={0:'noCompressionNoEncryption'}<line_sep>name='RequestDownload'<line_sep>fields_desc=[ByteEnumField('dataFormatIdentifier' 0 dataFormatIdentifiers) BitField('memorySizeLen' 0 4) BitField('memoryAddressLen' 0 4) ConditionalField(XByteField('memoryAddress1' 0) <lambda>pkt:pkt.memoryAddressLen<eq>1) ConditionalField(XShortField('memoryAddress2' 0) <lambda>pkt:pkt.memoryAddressLen<eq>2) ConditionalField(X3BytesField('memoryAddress3' 0) <lambda>pkt:pkt.memoryAddressLen<eq>3) ConditionalField(XIntField('memoryAddress4' 0) <lambda>pkt:pkt.memoryAddressLen<eq>4) ConditionalField(XByteField('memorySize1' 0) <lambda>pkt:pkt.memorySizeLen<eq>1) ConditionalField(XShortField('memorySize2' 0) <lambda>pkt:pkt.memorySizeLen<eq>2) ConditionalField(X3BytesField('memorySize3' 0) <lambda>pkt:pkt.memorySizeLen<eq>3) ConditionalField(XIntField('memorySize4' 0) <lambda>pkt:pkt.memorySizeLen<eq>4)]<block_end>bind_layers(UDS UDS_RD service=0x34)<class_stmt>UDS_RDPR(Packet)<block_start>name='RequestDownloadPositiveResponse'<line_sep>fields_desc=[ByteEnumField('routineControlType' 0 UDS_RC.routineControlTypes) BitField('memorySizeLen' 0 4) BitField('memoryAddressLen' 0 4) StrField('maxNumberOfBlockLength' 0 fmt="B") ]<block_end>bind_layers(UDS UDS_RDPR service=0x74)<line_sep># #########################RU###################################
<class_stmt>UDS_RU(Packet)<block_start>name='RequestUpload'<line_sep>fields_desc=[ByteEnumField('dataFormatIdentifier' 0 UDS_RD.dataFormatIdentifiers) BitField('memorySizeLen' 0 4) BitField('memoryAddressLen' 0 4) ConditionalField(XByteField('memoryAddress1' 0) <lambda>pkt:pkt.memoryAddressLen<eq>1) ConditionalField(XShortField('memoryAddress2' 0) <lambda>pkt:pkt.memoryAddressLen<eq>2) ConditionalField(X3BytesField('memoryAddress3' 0) <lambda>pkt:pkt.memoryAddressLen<eq>3) ConditionalField(XIntField('memoryAddress4' 0) <lambda>pkt:pkt.memoryAddressLen<eq>4) ConditionalField(XByteField('memorySize1' 0) <lambda>pkt:pkt.memorySizeLen<eq>1) ConditionalField(XShortField('memorySize2' 0) <lambda>pkt:pkt.memorySizeLen<eq>2) ConditionalField(X3BytesField('memorySize3' 0) <lambda>pkt:pkt.memorySizeLen<eq>3) ConditionalField(XIntField('memorySize4' 0) <lambda>pkt:pkt.memorySizeLen<eq>4)]<block_end>bind_layers(UDS UDS_RU service=0x35)<class_stmt>UDS_RUPR(Packet)<block_start>name='RequestUploadPositiveResponse'<line_sep>fields_desc=[ByteEnumField('routineControlType' 0 UDS_RC.routineControlTypes) BitField('memorySizeLen' 0 4) BitField('memoryAddressLen' 0 4) StrField('maxNumberOfBlockLength' 0 fmt="B") ]<block_end>bind_layers(UDS UDS_RUPR service=0x75)<line_sep># #########################TD###################################
<class_stmt>UDS_TD(Packet)<block_start>name='TransferData'<line_sep>fields_desc=[ByteField('blockSequenceCounter' 0) StrField('transferRequestParameterRecord' 0 fmt="B")]<block_end>bind_layers(UDS UDS_TD service=0x36)<class_stmt>UDS_TDPR(Packet)<block_start>name='TransferDataPositiveResponse'<line_sep>fields_desc=[ByteField('blockSequenceCounter' 0) StrField('transferResponseParameterRecord' 0 fmt="B")]<block_end>bind_layers(UDS UDS_TDPR service=0x76)<line_sep># #########################RTE###################################
<class_stmt>UDS_RTE(Packet)<block_start>name='RequestTransferExit'<line_sep>fields_desc=[StrField('transferRequestParameterRecord' 0 fmt="B")]<block_end>bind_layers(UDS UDS_RTE service=0x37)<class_stmt>UDS_RTEPR(Packet)<block_start>name='RequestTransferExitPositiveResponse'<line_sep>fields_desc=[StrField('transferResponseParameterRecord' 0 fmt="B")]<block_end>bind_layers(UDS UDS_RTEPR service=0x77)<line_sep># #########################IOCBI###################################
<class_stmt>UDS_IOCBI(Packet)<block_start>name='InputOutputControlByIdentifier'<line_sep>fields_desc=[XShortField('dataIdentifier' 0) ByteField('controlOptionRecord' 0) StrField('controlEnableMaskRecord' 0 fmt="B")]<block_end>bind_layers(UDS UDS_IOCBI service=0x2F)<class_stmt>UDS_IOCBIPR(Packet)<block_start>name='InputOutputControlByIdentifierPositiveResponse'<line_sep>fields_desc=[XShortField('dataIdentifier' 0) StrField('controlStatusRecord' 0 fmt="B")]<block_end>bind_layers(UDS UDS_IOCBIPR service=0x6F)<line_sep># #########################NRC###################################
<class_stmt>UDS_NRC(Packet)<block_start>negativeResponseCodes={0x00:'positiveResponse' 0x10:'generalReject' 0x11:'serviceNotSupported' 0x12:'subFunctionNotSupported' 0x13:'incorrectMessageLengthOrInvalidFormat' 0x14:'responseTooLong' 0x20:'ISOSAEReserved' 0x21:'busyRepeatRequest' 0x22:'conditionsNotCorrect' 0x23:'ISOSAEReserved' 0x24:'requestSequenceError' 0x25:'noResponseFromSubnetComponent' 0x26:'failurePreventsExecutionOfRequestedAction' 0x31:'requestOutOfRange' 0x33:'securityAccessDenied' 0x35:'invalidKey' 0x36:'exceedNumberOfAttempts' 0x37:'requiredTimeDelayNotExpired' 0x70:'uploadDownloadNotAccepted' 0x71:'transferDataSuspended' 0x72:'generalProgrammingFailure' 0x73:'wrongBlockSequenceCounter' 0x78:'requestCorrectlyReceived-ResponsePending' 0x7E:'subFunctionNotSupportedInActiveSession' 0x7F:'serviceNotSupportedInActiveSession' 0x80:'ISOSAEReserved' 0x81:'rpmTooHigh' 0x82:'rpmTooLow' 0x83:'engineIsRunning' 0x84:'engineIsNotRunning' 0x85:'engineRunTimeTooLow' 0x86:'temperatureTooHigh' 0x87:'temperatureTooLow' 0x88:'vehicleSpeedTooHigh' 0x89:'vehicleSpeedTooLow' 0x8a:'throttle/PedalTooHigh' 0x8b:'throttle/PedalTooLow' 0x8c:'transmissionRangeNotInNeutral' 0x8d:'transmissionRangeNotInGear' 0x8e:'ISOSAEReserved' 0x8f:'brakeSwitch(es)NotClosed' 0x90:'shifterLeverNotInPark' 0x91:'torqueConverterClutchLocked' 0x92:'voltageTooHigh' 0x93:'voltageTooLow' }<line_sep>name='NegativeResponseCode'<line_sep>fields_desc=[XByteEnumField('requestServiceId' 0 UDS.services) ByteEnumField('negativeResponseCode' 0 negativeResponseCodes)]<block_end>bind_layers(UDS UDS_NRC service=0x7f)<line_sep># ##################################################################
# ######################## UTILS ###################################
# ##################################################################
<class_stmt>UDS_TesterPresentSender(PeriodicSenderThread)<block_start><def_stmt>__init__ self sock pkt=UDS()/UDS_TP() interval=2<block_start>""" Thread to send TesterPresent messages packets periodically
Args:
sock: socket where packet is sent periodically
pkt: packet to send
interval: interval between two packets
"""<line_sep>PeriodicSenderThread.__init__(self sock pkt interval)<block_end><block_end>
|
<import_stmt>numpy<as>np<import_from_stmt>statsmodels.tsa.interp dentonm<def_stmt>test_denton_quarterly # Data and results taken from IMF paper
<block_start>indicator=np.array([98.2 100.8 102.2 100.8 99.0 101.6 102.7 101.5 100.5 103.0 103.5 101.5])<line_sep>benchmark=np.array([4000. 4161.4])<line_sep>x_imf=dentonm(indicator benchmark freq="aq")<line_sep>imf_stata=np.array([969.8 998.4 1018.3 1013.4 1007.2 1042.9 1060.3 1051.0 1040.6 1066.5 1071.7 1051.0])<line_sep>np.testing.assert_almost_equal(imf_stata x_imf 1)<block_end><def_stmt>test_denton_quarterly2 # Test denton vs stata. Higher precision than other test.
<block_start>zQ=np.array([50 100 150 100]<times>5)<line_sep>Y=np.array([500 400 300 400 500])<line_sep>x_denton=dentonm(zQ Y freq="aq")<line_sep>x_stata=np.array([64.334796 127.80616 187.82379 120.03526 56.563894 105.97568 147.50144 89.958987 40.547201 74.445963 108.34473 76.66211 42.763347 94.14664 153.41596 109.67405 58.290761 122.62556 190.41409 128.66959])<line_sep>np.testing.assert_almost_equal(x_denton x_stata 5)<block_end><if_stmt>__name__<eq>"__main__"<block_start><import_stmt>pytest<line_sep>pytest.main([__file__ '-vvs' '-x' '--pdb'])<block_end>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.