content
stringlengths 0
1.55M
|
---|
<import_from_stmt>tabulate tabulate<import_from_stmt>semseg models<import_from_stmt>semseg datasets<import_from_stmt>semseg.models backbones heads<def_stmt>show_models <block_start>model_names=models.__all__<line_sep>numbers=list(range(1 len(model_names)+1))<line_sep>print(tabulate({'No.':numbers 'Model Names':model_names} headers='keys'))<block_end><def_stmt>show_backbones <block_start>backbone_names=backbones.__all__<line_sep>variants=[]<for_stmt>name backbone_names<block_start><try_stmt><block_start>variants.append(list(eval(f"backbones.{name.lower()}_settings").keys()))<block_end><except_stmt><block_start>variants.append('-')<block_end><block_end>print(tabulate({'Backbone Names':backbone_names 'Variants':variants} headers='keys'))<block_end><def_stmt>show_heads <block_start>head_names=heads.__all__<line_sep>numbers=list(range(1 len(head_names)+1))<line_sep>print(tabulate({'No.':numbers 'Heads':head_names} headers='keys'))<block_end><def_stmt>show_datasets <block_start>dataset_names=datasets.__all__<line_sep>numbers=list(range(1 len(dataset_names)+1))<line_sep>print(tabulate({'No.':numbers 'Datasets':dataset_names} headers='keys'))<block_end> |
# Copyright 2017-2020 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# touch all root tree entities
# touch docker registry
# TODO: touch runs starting from date
<import_from_stmt>pipeline PipelineAPI<import_stmt>os<import_stmt>json<line_sep>ROLE='SEARCH_UPDATE'<def_stmt>touch_item api id acl_class<block_start>print('Processing %s [%s]'%(str(id) acl_class))<line_sep>permissions={'id':id 'aclClass':acl_class 'mask':0 'principal':<false> 'userName':ROLE}<try_stmt><block_start>api.execute_request(str(api.api_url)+'/grant' method='post' data=json.dumps(permissions))<line_sep>api.execute_request(str(api.api_url)+'/grant?id={id}&aclClass={aclClass}&user={userName}&isPrincipal=false'.format(**permissions) method='delete')<block_end><except_stmt>BaseException<as>e<block_start>print(str(e.message))<block_end><block_end><def_stmt>run <block_start>api=PipelineAPI(os.environ['API'] 'logs')<line_sep>result=api.execute_request(str(api.api_url)+'/folder/loadTree' method='get')<line_sep>children=['pipelines' 'childFolders' 'storages' 'configurations']<for_stmt>child_type children<block_start><if_stmt>child_type<in>result<block_start><for_stmt>item result[child_type]<block_start>touch_item(api item['id'] item['aclClass'])<block_end><block_end><block_end>registries=api.docker_registry_load_all()<for_stmt>registry registries<block_start>touch_item(api registry['id'] registry['aclClass'])<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>run()<block_end> |
"""
The :mod:`skmultilearn.ext` provides wrappers for other multi-label
classification libraries. Currently it provides a wrapper for:
Currently the available classes include:
+--------------------------------------------+------------------------------------------------------------------+
| Name | Description |
+============================================+==================================================================+
| :class:`~skmultilearn.ext.Meka` | Wrapper for the Multilabel Extension to WEKA - |
| | `MEKA <http://meka.sf.net>`_ library |
+--------------------------------------------+------------------------------------------------------------------+
| :class:`~skmultilearn.ext.Keras` | Wrapper for the Python Deep Learning library - |
| | `KERAS <http://https://keras.io/>`_ |
+--------------------------------------------+------------------------------------------------------------------+
| :func:`~skmultilearn.ext.download_meka` | Helper function for installing MEKA |
+--------------------------------------------+------------------------------------------------------------------+
"""<import_stmt>sys platform<import_from_stmt>.meka Meka download_meka<line_sep>__all__=["Meka" 'download_meka']<if_stmt><not>(sys.version_info[0]<eq>2<or>platform.architecture()[0]<eq>'32bit')<block_start><import_from_stmt>.keras Keras<line_sep>__all__<augadd>['Keras']<block_end> |
<import_from_future_stmt> print_function absolute_import<import_stmt>time<import_stmt>torch<import_from_stmt>torch.autograd Variable<import_from_stmt>.evaluation_metrics accuracy<import_from_stmt>.loss OIMLoss TripletLoss<import_from_stmt>.utils.meters AverageMeter<class_stmt>BaseTrainer(object)<block_start><def_stmt>__init__ self model criterions print_freq=1<block_start>super(BaseTrainer self).__init__()<line_sep>self.model=model<line_sep>self.criterions=criterions<line_sep>self.print_freq=print_freq<block_end><def_stmt>train self epoch data_loader optimizer<block_start>self.model.train()<line_sep># for name, param in self.model.named_parameters():
# if 'classifier' in name:
# param.requires_grad = False
batch_time=AverageMeter()<line_sep>data_time=AverageMeter()<line_sep>losses=AverageMeter()<line_sep>precisions=AverageMeter()<line_sep>end=time.time()<for_stmt>i,inputs enumerate(data_loader)<block_start>data_time.update(time.time()-end)<line_sep>inputs,targets=self._parse_data(inputs)<line_sep>loss,prec1=self._forward(inputs targets epoch)<line_sep>losses.update(loss.data[0] targets.size(0))<line_sep>precisions.update(prec1 targets.size(0))<line_sep>optimizer.zero_grad()<line_sep>loss.backward()<line_sep>#add gradient clip for lstm
<for_stmt>param self.model.parameters()<block_start><try_stmt><block_start>param.grad.data.clamp(-1. 1.)<block_end><except_stmt><block_start><continue><block_end><block_end>optimizer.step()<line_sep>batch_time.update(time.time()-end)<line_sep>end=time.time()<if_stmt>(i+1)%self.print_freq<eq>0<block_start>print('Epoch: [{}][{}/{}]\t'<concat>'Time {:.3f} ({:.3f})\t'<concat>'Data {:.3f} ({:.3f})\t'<concat>'Loss {:.3f} ({:.3f})\t'<concat>'Prec {:.2%} ({:.2%})\t'.format(epoch i+1 len(data_loader) batch_time.val batch_time.avg data_time.val data_time.avg losses.val losses.avg precisions.val precisions.avg))<block_end><block_end><block_end><def_stmt>_parse_data self inputs<block_start><raise>NotImplementedError<block_end><def_stmt>_forward self inputs targets<block_start><raise>NotImplementedError<block_end><block_end><class_stmt>Trainer(BaseTrainer)<block_start><def_stmt>_parse_data self inputs<block_start>imgs,_,pids,_=inputs<line_sep>inputs=[Variable(imgs)]<line_sep>targets=Variable(pids.cuda())<line_sep><return>inputs targets<block_end><def_stmt>_forward self inputs targets epoch<block_start>outputs=self.model(*inputs)#outputs=[x1,x2,x3]
#new added by wc
# x1 triplet loss
loss_tri,prec_tri=self.criterions[0](outputs[0] targets epoch)<line_sep># x2 triplet loss
loss_global,prec_global=self.criterions[1](outputs[1] targets epoch)<line_sep><return>loss_tri+loss_global prec_global<block_end><block_end> |
# Generated by Django 2.0.3 on 2018-03-22 11:55
<import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[("order" "0042_auto_20180227_0436")]<line_sep>operations=[migrations.AlterModelOptions(name="order" options={"ordering":("-pk" ) "permissions":(("view_order" "Can view orders") ("edit_order" "Can edit orders") ) } ) migrations.AlterField(model_name="order" name="language_code" field=models.CharField(default="en" max_length=35) ) ]<block_end> |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extracts non-empty patches of extracted stafflines.
Extracts vertical slices of the image where glyphs are expected
(see `staffline_extractor.py`), and takes horizontal windows of the slice which
will be clustered. Some patches will have a glyph roughly in their center, and
the corresponding cluster centroids will be labeled as such.
"""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>logging<import_stmt>apache_beam<as>beam<import_from_stmt>apache_beam metrics<import_from_stmt>moonlight.staves staffline_extractor<import_from_stmt>moonlight.util more_iter_tools<import_stmt>numpy<as>np<import_from_stmt>six.moves filter<import_stmt>tensorflow<as>tf<def_stmt>_filter_patch patch min_num_dark_pixels=10<block_start>unused_patch_name,patch=patch<line_sep><return>np.greater_equal(np.sum(np.less(patch 0.5)) min_num_dark_pixels)<block_end><class_stmt>StafflinePatchesDoFn(beam.DoFn)<block_start>"""Runs the staffline patches graph."""<def_stmt>__init__ self patch_height patch_width num_stafflines timeout_ms max_patches_per_page<block_start>self.patch_height=patch_height<line_sep>self.patch_width=patch_width<line_sep>self.num_stafflines=num_stafflines<line_sep>self.timeout_ms=timeout_ms<line_sep>self.max_patches_per_page=max_patches_per_page<line_sep>self.total_pages_counter=metrics.Metrics.counter(self.__class__ 'total_pages')<line_sep>self.failed_pages_counter=metrics.Metrics.counter(self.__class__ 'failed_pages')<line_sep>self.successful_pages_counter=metrics.Metrics.counter(self.__class__ 'successful_pages')<line_sep>self.empty_pages_counter=metrics.Metrics.counter(self.__class__ 'empty_pages')<line_sep>self.total_patches_counter=metrics.Metrics.counter(self.__class__ 'total_patches')<line_sep>self.emitted_patches_counter=metrics.Metrics.counter(self.__class__ 'emitted_patches')<block_end><def_stmt>start_bundle self<block_start>self.extractor=staffline_extractor.StafflinePatchExtractor(patch_height=self.patch_height patch_width=self.patch_width run_options=tf.RunOptions(timeout_in_ms=self.timeout_ms))<line_sep>self.session=tf.Session(graph=self.extractor.graph)<block_end><def_stmt>process self png_path<block_start>self.total_pages_counter.inc()<try_stmt><block_start><with_stmt>self.session.as_default()<block_start>patches_iter=self.extractor.page_patch_iterator(png_path)<block_end><block_end># pylint: disable=broad-except
<except_stmt>Exception<block_start>logging.exception('Skipping failed music score (%s)' png_path)<line_sep>self.failed_pages_counter.inc()<line_sep><return><block_end>patches_iter=filter(_filter_patch patches_iter)<if_stmt>0<l>self.max_patches_per_page# Subsample patches.
<block_start>patches=more_iter_tools.iter_sample(patches_iter self.max_patches_per_page)<block_end><else_stmt><block_start>patches=list(patches_iter)<block_end><if_stmt><not>patches<block_start>self.empty_pages_counter.inc()<block_end>self.total_patches_counter.inc(len(patches))<line_sep># Serialize each patch as an Example.
<for_stmt>patch_name,patch patches<block_start>example=tf.train.Example()<line_sep>example.features.feature['name'].bytes_list.value.append(patch_name.encode('utf-8'))<line_sep>example.features.feature['features'].float_list.value.extend(patch.ravel())<line_sep>example.features.feature['height'].int64_list.value.append(patch.shape[0])<line_sep>example.features.feature['width'].int64_list.value.append(patch.shape[1])<line_sep><yield>example<block_end>self.successful_pages_counter.inc()<line_sep># Patches are sub-sampled by this point.
self.emitted_patches_counter.inc(len(patches))<block_end><def_stmt>finish_bundle self<block_start>self.session.close()<del_stmt>self.extractor<del_stmt>self.session<block_end><block_end> |
<import_stmt>ujson<import_stmt>parallel<import_stmt>datetime<import_from_stmt>parallel timer thread gmtime sys_stats socket_stats memory_stats context_stats thread_seq_id <import_from_stmt>parallel.http.server quote_html text_response html_response json_serialization HttpServer <line_sep>thr=thread(interval=8 thread_characteristics="Low Latency")<class_stmt>RateLimitedServer(HttpServer)<block_start>http11=<true><line_sep>rate_limit=datetime.timedelta(milliseconds=16)<def_stmt>hello self transport data<block_start><return>b'Hello, World!'<block_end><def_stmt>stats self transport data<block_start><return>t.data<or>b''<block_end><def_stmt>uni self transport data<block_start><return>'<html><body>Works!</body></html>'<block_end><def_stmt>bytearr self transport data<block_start><return>bytearray(b'abcd')<block_end><def_stmt>res self transport data<block_start><return>thr.system_responsiveness<block_end><block_end>server1=parallel.server('0.0.0.0' 8081)<line_sep>parallel.register(server1 RateLimitedServer)<import_stmt>parallel<def_stmt>gmtime <block_start><return>parallel.gmtime()<block_end>t=parallel.thread(func=gmtime interval=1000 thread_characteristics="Low Latency")<import_stmt>parallel<import_stmt>datetime<def_stmt>gmtime <block_start><return>parallel.gmtime()<block_end>t=parallel.timer(func=gmtime duetime=datetime.timedelta(milliseconds=1000) period=1000)<line_sep> |
"""
Copyright (c) 2019 Lumerical Inc. """<line_sep>######## IMPORTS ########
# General purpose imports
<import_stmt>os<import_stmt>numpy<as>np<import_stmt>scipy<as>sp<line_sep># Optimization specific imports
<import_from_stmt>lumopt.utilities.load_lumerical_scripts load_from_lsf<import_from_stmt>lumopt.utilities.wavelengths Wavelengths<import_from_stmt>lumopt.geometries.polygon FunctionDefinedPolygon<import_from_stmt>lumopt.figures_of_merit.modematch ModeMatch<import_from_stmt>lumopt.optimizers.generic_optimizers ScipyOptimizers<import_from_stmt>lumopt.optimization Optimization<import_from_stmt>lumopt.utilities.materials Material<import_from_stmt>numpy.random rand<def_stmt>runGratingOptimization bandwidth_in_nm etch_depth n_grates params<block_start>bounds=[(0.1 1)]<times>4<line_sep>bounds[0]=(-3 3)#< Starting position
bounds[1]=(0 0.1)#< Scaling parameter R
bounds[2]=(1.5 3)#< Parameter a
bounds[3]=(0 2)#< Parameter b
<def_stmt>grating_params_pos params output_waveguide_length=0.5e-6 height=220e-9 y0=0<block_start>x_begin=-3e-6<line_sep>y3=y0+height<line_sep>y1=y3-etch_depth<line_sep>x_start=params[0]<times>1e-6#< First parameter is the starting position
x0=x_start<line_sep>R=params[1]<times>1e6#< second parameter (unit is 1/um)
a=params[2]#< Third parameter (dim-less)
b=params[3]#< Fourth parameter (dim-less)
verts=np.array([[x_begin y0] [x_begin y3] [x0 y3] [x0 y1]])<line_sep>lambda_c=1.55e-6<line_sep>F0=0.95<line_sep>## Iterate over all but the last
<for_stmt>i range(n_grates-1)<block_start>F=F0-R<times>(x0-x_start)<line_sep>Lambda=lambda_c/(a+F<times>b)<line_sep>x1=x0+(1-F)<times>Lambda#< Width of the etched region
x2=x0+Lambda#< Rest of cell
verts=np.concatenate((verts [[x1 y1] [x1 y3] [x2 y3] [x2 y1]]) axis=0)<line_sep>x0=x2<block_end>F=F0-R<times>(x0-x_start)<line_sep>Lambda=lambda_c/(a+F<times>b)<line_sep>x1=x0+(1-F)<times>Lambda#< Width of the etched region
x_end=x1+output_waveguide_length<line_sep>verts=np.concatenate((verts [[x1 y1] [x1 y3] [x_end y3] [x_end y0]]) axis=0)<line_sep><return>verts<block_end>geometry=FunctionDefinedPolygon(func=grating_params_pos initial_params=params bounds=bounds z=0.0 depth=110e-9 eps_out=1.44<power>2 eps_in=3.47668<power>2 edge_precision=5 dx=1e-3)<line_sep>######## DEFINE FIGURE OF MERIT ########
fom=ModeMatch(monitor_name='fom' mode_number=1 direction='Backward' target_T_fwd=<lambda>wl:np.ones(wl.size) norm_p=1)<line_sep>######## DEFINE OPTIMIZATION ALGORITHM ########
optimizer=ScipyOptimizers(max_iter=25 method='L-BFGS-B' scaling_factor=1 pgtol=1e-6)<line_sep>######## DEFINE BASE SIMULATION ########
base_script=load_from_lsf(os.path.join(os.path.dirname(__file__) 'grating_coupler_2D_2etch.lsf'))<line_sep>######## PUT EVERYTHING TOGETHER ########
lambda_start=1550-bandwidth_in_nm/2<line_sep>lambda_end=1550+bandwidth_in_nm/2<line_sep>lambda_pts=int(bandwidth_in_nm/10)+1<line_sep>wavelengths=Wavelengths(start=lambda_start<times>1e-9 stop=lambda_end<times>1e-9 points=lambda_pts)<line_sep>opt=Optimization(base_script=base_script wavelengths=wavelengths fom=fom geometry=geometry optimizer=optimizer hide_fdtd_cad=<true> use_deps=<true>)<line_sep>######## RUN THE OPTIMIZER ########
opt.run()<block_end><if_stmt>__name__<eq>"__main__"<block_start>bandwidth_in_nm=0#< Only optimiza for center frequency of 1550nm
initial_params=[0 0.03 2.4 0.5369]<line_sep>runGratingOptimization(bandwidth_in_nm=bandwidth_in_nm etch_depth=80e-9 n_grates=25 params=initial_params)<block_end> |
<import_from_stmt>flask url_for<import_from_stmt>flask current_app<def_stmt>get_static boxormodule filename<block_start><if_stmt>current_app.config["DEBUG"]<eq><true><block_start><return>url_for("devstatic" boxormodule=boxormodule filename=filename)<block_end><else_stmt><block_start><return>url_for("static" filename="modules/{}/{}".format(boxormodule filename))<block_end><block_end> |
<import_from_stmt>django.contrib admin<import_from_stmt>django.forms TextInput ModelForm<import_from_stmt>suit.admin SortableModelAdmin<import_from_stmt>.models MarqueeMessage<class_stmt>MarqueeMessageForm(ModelForm)<block_start><class_stmt>Meta<block_start>widgets={'message':TextInput(attrs={'class':'input-xxlarge'}) }<block_end><block_end><class_stmt>MarqueeMessageAdmin(SortableModelAdmin)<block_start>form=MarqueeMessageForm<line_sep>sortable='order'<line_sep>list_editable=('display' 'order')<line_sep>list_display=('message' 'display' 'order')<block_end>admin.site.register(MarqueeMessage MarqueeMessageAdmin)<line_sep> |
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GS GPS parameters."""<import_from_stmt>makani.config mconfig<import_from_stmt>makani.control system_types<import_stmt>numpy<as>np<line_sep>@mconfig.Config(deps={'gs_model':'base_station.gs_model' 'test_site':'common.test_site' })<def_stmt>MakeParams params<block_start>"""Make ground station gps parameters."""<if_stmt>params['gs_model']<eq>system_types.kGroundStationModelTopHat<block_start>gps_primary_antenna_dir=[0.0 0.0 -1.0]<line_sep>gps_primary_pos=[1.418 -1.657 -2.417]<line_sep># TopHat doesn't actually have a secondary gps.
gps_secondary_antenna_dir=gps_primary_antenna_dir<line_sep>gps_secondary_pos=gps_primary_pos<line_sep># Angle [rad] from the GPS compass baseline to the zero-azimuth
# reference of the perch frame. Note: The TopHat does not have a
# GPS compass, but this value is set for historical consistency.
gps_compass_to_perch_azi=-2.440<block_end><elif_stmt>params['gs_model']<eq>system_types.kGroundStationModelGSv1<block_start>gps_primary_antenna_dir=[0.0 0.0 -1.0]<line_sep># Position measured on 2015-06-15.
gps_primary_pos=[0.0 0.0 -2.94]<line_sep># GSv1 doesn't actually have a secondary gps.
gps_secondary_antenna_dir=gps_primary_antenna_dir<line_sep>gps_secondary_pos=gps_primary_pos<line_sep># Angle [rad] from the GPS compass baseline to the zero-azimuth
# reference of the perch frame
gps_compass_to_perch_azi=-2.440<block_end><elif_stmt>params['gs_model']<eq>system_types.kGroundStationModelGSv2<block_start>gps_primary_antenna_dir=[0.0 0.0 -1.0]<line_sep>gps_secondary_antenna_dir=[0.0 0.0 -1.0]<if_stmt>params['test_site']<eq>system_types.kTestSiteParkerRanch# See b/137283974 for details.
<block_start>gps_primary_pos=[-0.002 0.011 -6.7]<line_sep>gps_secondary_pos=[-2.450 -0.428 -6.827]<block_end><elif_stmt>params['test_site']<eq>system_types.kTestSiteNorway# See b/137660975 for details.
<block_start>gps_primary_pos=[-0.002 0.011 -6.7]<line_sep>gps_secondary_pos=[-2.450 -0.428 -6.757]<block_end><else_stmt><block_start><assert_stmt><false> 'Unsupported test site.'<block_end># Angle [rad] from the GPS compass baseline to the zero-azimuth
# reference of the platform frame. See b/118710931.
gps_compass_to_perch_azi=np.deg2rad(169.84)<block_end><else_stmt><block_start><assert_stmt><false> 'Unsupported ground station model.'<block_end><return>{# Position [m] of the GS GPS antenna in the platform frame.
# NOTE: The direction of the antennae is currently not used.
'primary_antenna_p':{'antenna_dir':gps_primary_antenna_dir 'pos':gps_primary_pos } 'secondary_antenna_p':{'antenna_dir':gps_secondary_antenna_dir 'pos':gps_secondary_pos } # Calibration for the ground station compass ([#], [rad], [#]).
# The bias is used to account for the angle between the perch
# frame and the NovAtel differential GPS receiver.
# TODO: Remove this parameter once the computation of
# compass heading from the primary and secondary antennae is implemented.
'heading_cal':{'scale':1.0 'bias':gps_compass_to_perch_azi 'bias_count':0}}<block_end> |
<import_from_stmt>.base_options BaseOptions<class_stmt>TestOptions(BaseOptions)<block_start><def_stmt>initialize self<block_start>BaseOptions.initialize(self)<line_sep>self.parser.add_argument('--results_dir' type=str default='./results/' help='saves results here.')<line_sep>self.parser.add_argument('--outline_style' type=int default=0 help='which edge style')<line_sep>self.parser.add_argument('--shading_style' type=int default=0 help='which shading style')<line_sep>self.parser.add_argument('--Sigma' type=float default=2.5 help='sigma for XDoG')<line_sep>self.parser.add_argument('--pad' type=int default=10)<line_sep>self.parser.add_argument('--r' type=int default=11)<line_sep>self.parser.add_argument('--eps' type=float default=0.1)<line_sep>self.isTrain=<false><block_end><block_end> |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: fewshot/configs/cnn_config.proto
<import_stmt>sys<line_sep>_b=sys.version_info[0]<l>3<and>(<lambda>x:x)<or>(<lambda>x:x.encode('latin1'))<import_from_stmt>google.protobuf descriptor<as>_descriptor<import_from_stmt>google.protobuf message<as>_message<import_from_stmt>google.protobuf reflection<as>_reflection<import_from_stmt>google.protobuf symbol_database<as>_symbol_database<line_sep># @@protoc_insertion_point(imports)
_sym_db=_symbol_database.Default()<line_sep>DESCRIPTOR=_descriptor.FileDescriptor(name='fewshot/configs/cnn_config.proto' package='fewshot.configs' syntax='proto2' serialized_options=<none> serialized_pb=_b('\n fewshot/configs/cnn_config.proto\x12\x0f\x66\x65wshot.configs\"\xa1\x01\n\tCNNConfig\x12\x0e\n\x06height\x18\x01 \x01(\x05\x12\r\n\x05width\x18\x02 \x01(\x05\x12\x13\n\x0bnum_channel\x18\x03 \x01(\x05\x12\x13\n\x0bnum_filters\x18\x04 \x03(\x05\x12\x0f\n\x07strides\x18\x05 \x03(\x05\x12\x0f\n\x07pool_fn\x18\x06 \x03(\t\x12\x14\n\x0cpool_strides\x18\x07 \x03(\x05\x12\x13\n\x0b\x63onv_act_fn\x18\x08 \x03(\t'))<line_sep>_CNNCONFIG=_descriptor.Descriptor(name='CNNConfig' full_name='fewshot.configs.CNNConfig' filename=<none> file=DESCRIPTOR containing_type=<none> fields=[_descriptor.FieldDescriptor(name='height' full_name='fewshot.configs.CNNConfig.height' index=0 number=1 type=5 cpp_type=1 label=1 has_default_value=<false> default_value=0 message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> serialized_options=<none> file=DESCRIPTOR) _descriptor.FieldDescriptor(name='width' full_name='fewshot.configs.CNNConfig.width' index=1 number=2 type=5 cpp_type=1 label=1 has_default_value=<false> default_value=0 message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> serialized_options=<none> file=DESCRIPTOR) _descriptor.FieldDescriptor(name='num_channel' full_name='fewshot.configs.CNNConfig.num_channel' index=2 number=3 type=5 cpp_type=1 label=1 has_default_value=<false> default_value=0 message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> serialized_options=<none> file=DESCRIPTOR) _descriptor.FieldDescriptor(name='num_filters' full_name='fewshot.configs.CNNConfig.num_filters' index=3 number=4 type=5 cpp_type=1 label=3 has_default_value=<false> default_value=[] message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> serialized_options=<none> file=DESCRIPTOR) _descriptor.FieldDescriptor(name='strides' full_name='fewshot.configs.CNNConfig.strides' index=4 number=5 type=5 cpp_type=1 label=3 has_default_value=<false> default_value=[] message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> serialized_options=<none> file=DESCRIPTOR) _descriptor.FieldDescriptor(name='pool_fn' full_name='fewshot.configs.CNNConfig.pool_fn' index=5 number=6 type=9 cpp_type=9 label=3 has_default_value=<false> default_value=[] message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> serialized_options=<none> file=DESCRIPTOR) _descriptor.FieldDescriptor(name='pool_strides' full_name='fewshot.configs.CNNConfig.pool_strides' index=6 number=7 type=5 cpp_type=1 label=3 has_default_value=<false> default_value=[] message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> serialized_options=<none> file=DESCRIPTOR) _descriptor.FieldDescriptor(name='conv_act_fn' full_name='fewshot.configs.CNNConfig.conv_act_fn' index=7 number=8 type=9 cpp_type=9 label=3 has_default_value=<false> default_value=[] message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> serialized_options=<none> file=DESCRIPTOR) ] extensions=[] nested_types=[] enum_types=[] serialized_options=<none> is_extendable=<false> syntax='proto2' extension_ranges=[] oneofs=[] serialized_start=54 serialized_end=215 )<line_sep>DESCRIPTOR.message_types_by_name['CNNConfig']=_CNNCONFIG<line_sep>_sym_db.RegisterFileDescriptor(DESCRIPTOR)<line_sep>CNNConfig=_reflection.GeneratedProtocolMessageType('CNNConfig' (_message.Message ) dict(DESCRIPTOR=_CNNCONFIG __module__='fewshot.configs.cnn_config_pb2'# @@protoc_insertion_point(class_scope:fewshot.configs.CNNConfig)
))<line_sep>_sym_db.RegisterMessage(CNNConfig)<line_sep># @@protoc_insertion_point(module_scope)
|
"""
Module for handling KNX primitves.
* KNX Addresses
* KNX Telegrams
"""<line_sep># flake8: noqa
<import_from_stmt>.address GroupAddress GroupAddressType IndividualAddress<import_from_stmt>.address_filter AddressFilter<import_from_stmt>.telegram Telegram TelegramDirection<line_sep>__all__=["AddressFilter" "GroupAddress" "GroupAddressType" "IndividualAddress" "Telegram" "TelegramDirection" ]<line_sep> |
<import_stmt>lights<import_from_stmt>tools xbmclog<class_stmt>AmbilightController(lights.Controller)<block_start><def_stmt>__init__ self *args **kwargs<block_start>super(AmbilightController self).__init__(*args **kwargs)<block_end><def_stmt>on_playback_start self<block_start><if_stmt>self.settings.ambilight_start_dim_enable<block_start>self.save_state_as_initial()<line_sep>xbmclog('Kodi Hue: In AmbilightController.on_playback_start() '<concat>'dimming ambilight group')<line_sep>self.set_state(bri=self.settings.ambilight_start_dim force_on=self.settings.force_light_on )<block_end><block_end><def_stmt>on_playback_pause self<block_start><if_stmt>self.settings.ambilight_start_dim_enable<block_start>xbmclog('Kodi Hue: In AmbilightController.on_playback_pause() '<concat>'undimming ambilight group')<if_stmt>self.settings.ambilight_pause_bri_override<block_start>bri=self.settings.ambilight_pause_bri<line_sep>self.set_state(bri=bri force_on=self.settings.force_light_on )<block_end><else_stmt><block_start>self.restore_initial_state(force_on=self.settings.force_light_on )<block_end><block_end><block_end><def_stmt>on_playback_stop self<block_start><if_stmt>self.settings.ambilight_start_dim_enable<block_start>xbmclog('Kodi Hue: In AmbilightController.on_playback_stop() '<concat>'undimming ambilight group')<if_stmt>self.settings.ambilight_stop_bri_override<block_start>self.set_state(bri=self.settings.ambilight_stop_bri force_on=self.settings.force_light_on )<block_end><else_stmt><block_start>self.restore_initial_state(force_on=self.settings.force_light_on )<block_end><block_end><else_stmt><block_start>self.restore_initial_state(force_on=self.settings.force_light_on )<block_end><block_end><block_end> |
<import_from_stmt>.base_agent BaseAgent<import_from_stmt>.bot_agent BotAgent<line_sep> |
"""
Tests of 'python -m recipy' usage.
This script uses a Python script (run_numpy_no_recipy.py) about
which the following assumptions are made:
* Co-located with this test script, in the same directory.
* Expects two arguments via the command-line: an input file
name and an output file name.
* Reads the input file and creates the output file using a library
which recipy is configured to log.
"""<line_sep># Copyright (c) 2016 University of Edinburgh.
<import_stmt>os<import_stmt>os.path<import_stmt>shutil<import_stmt>tempfile<import_from_stmt>integration_test helpers<import_from_stmt>integration_test recipy_environment<as>recipyenv<class_stmt>TestMflag<block_start>"""
Tests of 'python -m recipy' usage.
"""<line_sep>SCRIPT_NAME="run_numpy_no_recipy.py"<line_sep>""" Test script assumed to be in same directory as this class. """<line_sep>script=""<line_sep>""" Absolute path to test script. """<line_sep>original_script=""<line_sep>""" Absolute path to original copy of test script. """<line_sep>directory=""<line_sep>""" Absolute path to temporary directory for these tests. """<def_stmt>setup_method self method<block_start>"""
py.test setup function, creates test directory in $TEMP,
sets 'script' with path to SCRIPT_NAME and copies script from
'script' to 'original_script'.
:param method: Test method
:type method: function
"""<line_sep>TestMflag.directory=tempfile.mkdtemp(TestMflag.__name__)<line_sep>TestMflag.script=os.path.join(os.path.dirname(__file__) TestMflag.SCRIPT_NAME)<line_sep>TestMflag.original_script=TestMflag.script+".orig"<line_sep>shutil.copy(TestMflag.script TestMflag.original_script)<block_end><def_stmt>teardown_method self method<block_start>"""
py.test teardown function, deletes test directory in $TEMP,
and moves 'original_script' to 'script'.
"""<if_stmt>os.path.isdir(TestMflag.directory)<block_start>shutil.rmtree(TestMflag.directory)<block_end>os.remove(TestMflag.script)<line_sep>os.rename(TestMflag.original_script TestMflag.script)<block_end><def_stmt>test_m_recipy self<block_start>"""
Running 'python -m recipy script' and the same script that
inclues 'import recipy' should give the same results in the
log (aside from their 'unique_id', 'diff', 'date',
'exit_date', 'command_args', 'inputs' and 'outputs').
"""<line_sep>input_file=os.path.join(TestMflag.directory "input.csv")<with_stmt>open(input_file "w")<as>csv_file<block_start>csv_file.write("1,4,9,16\n")<block_end>output_file=os.path.join(TestMflag.directory "output.csv")<line_sep>exit_code,_=helpers.execute_python(["-m" "recipy" TestMflag.script input_file output_file])<assert_stmt>exit_code<eq>0 ("Unexpected exit code "+str(exit_code))<line_sep>module_log,_=helpers.get_log(recipyenv.get_recipydb())<line_sep>helpers.enable_recipy(TestMflag.original_script TestMflag.script)<line_sep>exit_code,_=helpers.execute_python(["-m" "recipy" TestMflag.script input_file output_file])<assert_stmt>exit_code<eq>0 ("Unexpected exit code "+str(exit_code))<line_sep>import_log,_=helpers.get_log(recipyenv.get_recipydb())<for_stmt>key ["inputs" "outputs"]<block_start><assert_stmt>len(module_log[key])<eq>len(import_log[key]) ("Expected same number of "+key+" files")<for_stmt>index range(0 len(module_log[key]))<block_start>[import_file _]=module_log[key][index]<line_sep>[module_file _]=import_log[key][index]<assert_stmt>os.path.basename(import_file)<eq>os.path.basename(module_file) "Expected local file names to be equal"<block_end><block_end># Remove fields that are specific to a run.
<for_stmt>key ["unique_id" "diff" "date" "exit_date" "command_args" "inputs" "outputs"]<block_start><if_stmt>key<in>module_log<block_start><del_stmt>module_log[key]<del_stmt>import_log[key]<block_end><block_end><assert_stmt>module_log<eq>import_log ("Expected "+str(module_log)+" to equal "+str(import_log))<block_end><block_end> |
<import_from_future_stmt> absolute_import<import_from_future_stmt> print_function<import_from_future_stmt> division<import_stmt>sys os glob<import_stmt>json<import_stmt>argparse<import_stmt>numpy<as>np<import_stmt>open3d<as>o3d<import_stmt>pickle<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_from_stmt>torch.nn init<import_stmt>torch.optim<as>optim<import_from_stmt>torch.optim lr_scheduler<import_stmt>smplx<import_from_stmt>human_body_prior.tools.model_loader load_vposer<import_stmt>chamfer_pytorch.dist_chamfer<as>ext<import_from_stmt>cvae BodyParamParser HumanCVAES2 GeometryTransformer<import_from_stmt>batch_gen BatchGeneratorTest<class_stmt>TestOP<block_start><def_stmt>__init__ self testconfig<block_start><for_stmt>key,val testconfig.items()<block_start>setattr(self key val)<block_end><if_stmt><not>os.path.exists(self.ckpt_dir)<block_start>print('--[ERROR] checkpoints do not exist')<line_sep>sys.exit()<block_end><if_stmt>self.use_cont_rot<block_start>n_dim_body=72+3<block_end><else_stmt><block_start>n_dim_body=72<block_end>self.model_h_latentD=256<line_sep>self.model_h=HumanCVAES2(latentD_g=self.model_h_latentD latentD_l=self.model_h_latentD n_dim_body=n_dim_body n_dim_scene=self.model_h_latentD test=<true>)<line_sep>### body mesh model
self.vposer,_=load_vposer(self.vposer_ckpt_path vp_model='snapshot')<line_sep>self.body_mesh_model=smplx.create(self.human_model_path model_type='smplx' gender='neutral' ext='npz' num_pca_comps=12 create_global_orient=<true> create_body_pose=<true> create_betas=<true> create_left_hand_pose=<true> create_right_hand_pose=<true> create_expression=<true> create_jaw_pose=<true> create_leye_pose=<true> create_reye_pose=<true> create_transl=<true> batch_size=self.n_samples)<block_end><def_stmt>test self batch_gen<block_start>self.model_h.eval()<line_sep>self.model_h.to(self.device)<line_sep>self.vposer.to(self.device)<line_sep>self.body_mesh_model.to(self.device)<line_sep>## load checkpoints
ckp_list=sorted(glob.glob(os.path.join(self.ckpt_dir 'epoch-*.ckp')) key=os.path.getmtime)<line_sep>ckp_path=ckp_list[-1]<line_sep>checkpoint=torch.load(ckp_path)<line_sep>print('[INFO] load checkpoints: '+ckp_path)<line_sep>self.model_h.load_state_dict(checkpoint['model_h_state_dict'])<line_sep>## get a batch of data for testing
batch_gen.reset()<line_sep>test_data=batch_gen.next_batch(batch_size=1)<line_sep>depth_batch=test_data[0]<line_sep>seg_batch=test_data[1]<line_sep>max_d_batch=test_data[2]<line_sep>cam_int_batch=test_data[3]<line_sep>cam_ext_batch=test_data[4]<line_sep>## pass data to network
xs=torch.cat([depth_batch seg_batch] dim=1)<line_sep>xs_n=xs.repeat(self.n_samples 1 1 1)<line_sep>noise_batch_g=torch.randn([self.n_samples self.model_h_latentD] dtype=torch.float32 device=self.device)<line_sep>noise_batch_l=torch.randn([self.n_samples self.model_h_latentD] dtype=torch.float32 device=self.device)<if_stmt>self.use_cont_rot<block_start>xhnr_gen=self.model_h.sample(xs_n noise_batch_g noise_batch_l)<line_sep>xhn_gen=GeometryTransformer.convert_to_3D_rot(xhnr_gen)<block_end><else_stmt><block_start>xhnr_gen=self.model_h.sample(xs_n noise_batch_g noise_batch_l)<block_end>xh_gen=GeometryTransformer.recover_global_T(xhn_gen cam_int_batch max_d_batch)<line_sep>body_param_list=BodyParamParser.body_params_encapsulate(xh_gen)<line_sep>scene_name=os.path.abspath(self.scene_file_path).split("/")[-2].split("_")[0]<line_sep>outdir=os.path.join(self.output_dir scene_name)<if_stmt><not>os.path.exists(outdir)<block_start>os.makedirs(outdir)<block_end>print('[INFO] save results to: '+outdir)<for_stmt>ii,body_param enumerate(body_param_list)<block_start>body_param['cam_ext']=cam_ext_batch.detach().cpu().numpy()<line_sep>body_param['cam_int']=cam_int_batch.detach().cpu().numpy()<line_sep>outfilename=os.path.join(outdir 'body_gen_{:06d}.pkl'.format(ii+900))<line_sep>outfile=open(outfilename 'wb')<line_sep>pickle.dump(body_param outfile)<line_sep>outfile.close()<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>proxe_path='/is/cluster/yzhang/PROXE'<line_sep>test_file_list=['MPH16_00157_01' 'N0SittingBooth_00162_01' 'MPH1Library_00034_01' 'N3OpenArea_00157_01']<for_stmt>fileid range(len(test_file_list))<block_start>testconfig={'human_model_path':'/is/ps2/yzhang/body_models/VPoser' 'scene_3d_path':os.path.join(proxe_path 'scenes/') 'vposer_ckpt_path':'/is/ps2/yzhang/body_models/VPoser/vposer_v1_0' 'device':torch.device("cuda"<if>torch.cuda.is_available()<else>"cpu") 'ckpt_dir':'checkpoints/checkpoints_virtualcams3_modelS2_batch32_epoch30_LRS0.0003_LRH0.0003_LossVposer0.001_LossKL0.1_LossContact0.001_LossCollision0.01' 'test_data_path':os.path.join(proxe_path 'snapshot/'+test_file_list[fileid]) 'scene_file_path':os.path.join(proxe_path 'snapshot/'+test_file_list[fileid]+'/rec_000000.mat') 'n_samples':300 'use_cont_rot':<true> 'output_dir':'results_proxe_stage2_sceneloss/virtualcams'}<line_sep>batch_gen=BatchGeneratorTest(dataset_path=testconfig['test_data_path'] device=testconfig['device'])<line_sep>test_op=TestOP(testconfig)<line_sep>test_op.test(batch_gen)<block_end><block_end> |
<import_stmt>paddle<import_stmt>paddle.fluid<as>fluid<import_from_stmt>.operations OPS<def_stmt>AuxiliaryHeadCIFAR inputs C class_num<block_start>print('AuxiliaryHeadCIFAR : inputs-shape : {:}'.format(inputs.shape))<line_sep>temp=fluid.layers.relu(inputs)<line_sep>temp=fluid.layers.pool2d(temp pool_size=5 pool_stride=3 pool_padding=0 pool_type='avg')<line_sep>temp=fluid.layers.conv2d(temp filter_size=1 num_filters=128 stride=1 padding=0 act=<none> bias_attr=<false>)<line_sep>temp=fluid.layers.batch_norm(input=temp act='relu' bias_attr=<none>)<line_sep>temp=fluid.layers.conv2d(temp filter_size=1 num_filters=768 stride=2 padding=0 act=<none> bias_attr=<false>)<line_sep>temp=fluid.layers.batch_norm(input=temp act='relu' bias_attr=<none>)<line_sep>print('AuxiliaryHeadCIFAR : last---shape : {:}'.format(temp.shape))<line_sep>predict=fluid.layers.fc(input=temp size=class_num act='softmax')<line_sep><return>predict<block_end><def_stmt>InferCell name inputs_prev_prev inputs_prev genotype C_prev_prev C_prev C reduction reduction_prev<block_start>print('[{:}] C_prev_prev={:} C_prev={:}, C={:}, reduction_prev={:}, reduction={:}'.format(name C_prev_prev C_prev C reduction_prev reduction))<line_sep>print('inputs_prev_prev : {:}'.format(inputs_prev_prev.shape))<line_sep>print('inputs_prev : {:}'.format(inputs_prev.shape))<line_sep>inputs_prev_prev=OPS['skip_connect'](inputs_prev_prev C_prev_prev C 2<if>reduction_prev<else>1)<line_sep>inputs_prev=OPS['skip_connect'](inputs_prev C_prev C 1)<line_sep>print('inputs_prev_prev : {:}'.format(inputs_prev_prev.shape))<line_sep>print('inputs_prev : {:}'.format(inputs_prev.shape))<if_stmt>reduction<block_start>step_ops,concat=genotype.reduce genotype.reduce_concat<block_end><else_stmt><block_start>step_ops,concat=genotype.normal genotype.normal_concat<block_end>states=[inputs_prev_prev inputs_prev]<for_stmt>istep,operations enumerate(step_ops)<block_start>op_a,op_b=operations<line_sep># the first operation
#print ('-->>[{:}/{:}] [{:}] + [{:}]'.format(istep, len(step_ops), op_a, op_b))
stride=2<if>reduction<and>op_a[1]<l>2<else>1<line_sep>tensor1=OPS[op_a[0]](states[op_a[1]] C C stride)<line_sep>stride=2<if>reduction<and>op_b[1]<l>2<else>1<line_sep>tensor2=OPS[op_b[0]](states[op_b[1]] C C stride)<line_sep>state=fluid.layers.elementwise_add(x=tensor1 y=tensor2 act=<none>)<assert_stmt>tensor1.shape<eq>tensor2.shape 'invalid shape {:} vs. {:}'.format(tensor1.shape tensor2.shape)<line_sep>print('-->>[{:}/{:}] tensor={:} from {:} + {:}'.format(istep len(step_ops) state.shape tensor1.shape tensor2.shape))<line_sep>states.append(state)<block_end>states_to_cat=[states[x]<for>x concat]<line_sep>outputs=fluid.layers.concat(states_to_cat axis=1)<line_sep>print('-->> output-shape : {:} from concat={:}'.format(outputs.shape concat))<line_sep><return>outputs<block_end># NASCifarNet(inputs, 36, 6, 3, 10, 'xxx', True)
<def_stmt>NASCifarNet ipt C N stem_multiplier class_num genotype auxiliary# cifar head module
<block_start>C_curr=stem_multiplier<times>C<line_sep>stem=fluid.layers.conv2d(ipt filter_size=3 num_filters=C_curr stride=1 padding=1 act=<none> bias_attr=<false>)<line_sep>stem=fluid.layers.batch_norm(input=stem act=<none> bias_attr=<none>)<line_sep>print('stem-shape : {:}'.format(stem.shape))<line_sep># N + 1 + N + 1 + N cells
layer_channels=[C]<times>N+[C<times>2]+[C<times>2]<times>N+[C<times>4]+[C<times>4]<times>N<line_sep>layer_reductions=[<false>]<times>N+[<true>]+[<false>]<times>N+[<true>]+[<false>]<times>N<line_sep>C_prev_prev,C_prev,C_curr=C_curr C_curr C<line_sep>reduction_prev=<false><line_sep>auxiliary_pred=<none><line_sep>cell_results=[stem stem]<for_stmt>index,(C_curr reduction) enumerate(zip(layer_channels layer_reductions))<block_start>xstr='{:02d}/{:02d}'.format(index len(layer_channels))<line_sep>cell_result=InferCell(xstr cell_results[-2] cell_results[-1] genotype C_prev_prev C_prev C_curr reduction reduction_prev)<line_sep>reduction_prev=reduction<line_sep>C_prev_prev,C_prev=C_prev cell_result.shape[1]<line_sep>cell_results.append(cell_result)<if_stmt>auxiliary<and>reduction<and>C_curr<eq>C<times>4<block_start>auxiliary_pred=AuxiliaryHeadCIFAR(cell_result C_prev class_num)<block_end><block_end>global_P=fluid.layers.pool2d(input=cell_results[-1] pool_size=8 pool_type='avg' pool_stride=1)<line_sep>predicts=fluid.layers.fc(input=global_P size=class_num act='softmax')<line_sep>print('predict-shape : {:}'.format(predicts.shape))<if_stmt>auxiliary_pred<is><none><block_start><return>predicts<block_end><else_stmt><block_start><return>[predicts auxiliary_pred]<block_end><block_end> |
"""
Given two integers n and k, return all possible combinations of k numbers out
of 1 ... n.
For example,
If n = 4 and k = 2, a solution is:
[
[2,4],
[3,4],
[2,3],
[1,2],
[1,3],
[1,4],
]
"""<class_stmt>Solution(object)<block_start><def_stmt>combine self n k<block_start>"""
:type n: int
:type k: int
:rtype: List[List[int]]
"""<line_sep>a=range(1 n+1)<line_sep><return>self.combine_aux(a k)<block_end><def_stmt>combine_aux self a k<block_start><if_stmt>k<eq>0<block_start><return>[[]]<block_end><else_stmt><block_start>res=[]<for_stmt>i,e enumerate(a)<block_start>rest_comb=self.combine_aux(a[i+1:] k-1)<for_stmt>comb rest_comb<block_start>comb.insert(0 e)<block_end>res<augadd>rest_comb<block_end><return>res<block_end><block_end><block_end> |
__import__('bumpversion').main()<line_sep> |
<import_stmt>bpy<line_sep>bpy.context.scene.render.fps=50<line_sep>bpy.context.scene.render.fps_base=1<line_sep> |
#! /usr/bin/env python
<import_stmt>os<def_stmt>main <block_start><if_stmt>os.path.exists(".dockerignore")<block_start>print(".dockerignore already exists, remove it to proceed")<line_sep>exit(-1)<block_end><with_stmt>open(".gitignore" "r")<as>fin open(".dockerignore" "w")<as>fout<block_start>fout.write("# This file was automatically generated by ./ci/bootstrap_dockerignore.py\n")<line_sep>fout.write("# based on the contents of .gitignore\n\n")<for_stmt>line fin<block_start><if_stmt>line[0]<in>"#!/\n"<block_start>fout.write(line)<block_end><else_stmt><block_start>fout.write("**/"+line)<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end> |
"""
TODO (wimax July 2020): I don't see anything in here that indicates that
screams "e2e test", this certainly seems like more of an integration test.
There's nothing here that does anything cross-service.
Perhaps it's just "does it work in AWS?"
"""<import_from_stmt>grapl_tests_common.clients.grapl_web_client GraplWebClient<def_stmt>test_real_user_fake_password <arrow><none># Exercises the PasswordVerification case in grapl-web-ui login.rs
<block_start>resp=GraplWebClient().real_user_fake_password()<assert_stmt>resp.status_code<eq>401<block_end><def_stmt>test_nonexistent_user <arrow><none># Exercises the UserRecordNotFound case in grapl-web-ui login.rs
<block_start>resp=GraplWebClient().nonexistent_user()<assert_stmt>resp.status_code<eq>401<block_end><def_stmt>test_check__empty_creds <arrow><none><block_start>resp=GraplWebClient().empty_creds()<assert_stmt>resp.status_code<eq>500<block_end># TODO: https://github.com/grapl-security/issue-tracker/issues/686
# Add a `test_no_content_type()` (it currently 200s for some reason)
|
"""
Script for extracting the ground plane from the KITTI dataset.
We need to determine the ground plane position and orientation in order to be able to reconstruct
points on it, which we are trying to detect.
We will collect all the points on the ground plane from the dataset and then fit a plane to them
with RANSAC.
----------------------------------------------------------------------------------------------------
python kitti_extract_ground_plane.py path_labels
----------------------------------------------------------------------------------------------------
"""<line_sep>__date__='04/13/2017'<line_sep>__author__='<NAME>'<line_sep>__email__='<EMAIL>'<import_stmt>argparse<import_stmt>os<import_stmt>numpy<as>np<import_stmt>random<line_sep># import matplotlib
# matplotlib.use('Agg') # Prevents from using X interface for plotting
<import_from_stmt>matplotlib pyplot<as>plt<import_from_stmt>mpl_toolkits.mplot3d Axes3D<import_from_stmt>shared.geometry R3x3_y t3x1 Rt4x4<line_sep>####################################################################################################
# DEFINITIONS #
####################################################################################################
# Parameter for RANSAC
# Distance from the plane (in meters), which is considered as an inlier region
INLIER_TRHESHOLD=1.0<line_sep># Number of estimation iterations carried out by RANSAC
RANSAC_ITERS=10000<line_sep>####################################################################################################
# FUNCTIONS #
####################################################################################################
<def_stmt>plane_3p p1 p2 p3<block_start>"""
Computes the equation of a plane passing through the 3 given points.
Input:
p1, p2, p3: 3x1 np.matrix coordinates of points in the plane
Returns:
[a, b, c, d] coefficients as a 1x4 np.matrix
"""<line_sep>l1=p2-p1<line_sep>l2=p3-p1<line_sep>normal=np.cross(l1 l2 axis=0)<line_sep>d=-(normal[0 0]<times>p1[0 0]+normal[1 0]<times>p1[1 0]+normal[2 0]<times>p1[2 0])<line_sep><return>np.asmatrix([normal[0 0] normal[1 0] normal[2 0] d])<block_end><def_stmt>show_X_and_gp gp_X_4xn gp_1x4<block_start>"""
Show a 3D plot of the estimated ground plane.
"""<line_sep>fig=plt.figure()<line_sep>ax=fig.add_subplot(111 projection='3d')<line_sep>ax.set_aspect('equal')<line_sep>ax.scatter(np.array(gp_X_4xn[2 0:1000]) np.array(gp_X_4xn[0 0:1000]) np.array(-gp_X_4xn[1 0:1000]) color='red')<line_sep>X=np.arange(-20 20 1)<line_sep>Y=np.arange(-1 10 1)<line_sep>X,Y=np.meshgrid(X Y)<line_sep>Z=-(gp_1x4[0 0]<times>X+gp_1x4[0 1]<times>Y+gp_1x4[0 3])/gp_1x4[0 2]<line_sep>ax.plot_surface(Z X -Y linewidth=0 alpha=0.5 antialiased=<true>)<line_sep># Bounding box of the car
ax.plot([3 3 3 3 3] [1.5 1.5 -1.5 -1.5 1.5] [0 -1.9 -1.9 0 0] color='green')<line_sep>ax.plot([-3 -3 -3 -3 -3] [1.5 1.5 -1.5 -1.5 1.5] [0 -1.9 -1.9 0 0] color='red')<line_sep>ax.plot([3 -3] [1.5 1.5] [0 0] color='blue')<line_sep>ax.plot([3 -3] [1.5 1.5] [-1.9 -1.9] color='blue')<line_sep>ax.plot([3 -3] [-1.5 -1.5] [0 0] color='blue')<line_sep>ax.plot([3 -3] [-1.5 -1.5] [-1.9 -1.9] color='blue')<line_sep>ax.set_xlim(-100 100)<line_sep>ax.set_ylim(-100 100)<line_sep>ax.set_zlim(-100 100)<line_sep>ax.set_xlabel('Z')<line_sep>ax.set_ylabel('X')<line_sep>ax.set_zlabel('Y')<line_sep>plt.show()<block_end>####################################################################################################
# CLASSES #
####################################################################################################
<class_stmt>GroundPlaneEstimator(object)<block_start>"""
Takes care of the estimation of the ground plane position in the KITTI dataset.
"""<def_stmt>__init__ self path_labels<block_start>"""
Input:
path_labels: Path to the "label_2" folder of the KITTI dataset
"""<line_sep>super(GroundPlaneEstimator self).__init__()<line_sep>self.path_labels=path_labels<line_sep>self.gp_points=[]<block_end><def_stmt>run_estimation self<block_start>"""
Runs the whole process of estimating the ground plane.
"""<line_sep>print('-- ESTIMATING GROUND PLANE POSITION')<line_sep># Read label files and get all ground plane points
print('-- Reading label files')<line_sep>self._read_label_files()<line_sep>print('-- Label files contain '+str(len(self.gp_points))+' points')<line_sep># Create a matrix from all the points for easier computation
self.gp_X_4xn=np.asmatrix(np.ones((4 len(self.gp_points))))<for_stmt>i xrange(len(self.gp_points))<block_start>self.gp_X_4xn[0:3 i]=self.gp_points[i]<block_end># plt.scatter(self.gp_X_4xn[2,:], self.gp_X_4xn[1,:])
# plt.show()
# Run RANSAC on those points
print('-- Running RANSAC plane estimation')<line_sep>self._ransac_plane()<block_end><def_stmt>_read_label_files self<block_start>"""
Reads all label files and extract the points on the ground plane.
"""<line_sep>filenames=[f<for>f os.listdir(self.path_labels)<if>os.path.isfile(os.path.join(self.path_labels f))]<if_stmt>len(filenames)<ne>7481<block_start>print('Wrong number (%d) of files in the KITTI dataset! Should be 7481.'%(len(filenames)))<line_sep>exit(1)<block_end># Read each label file
# i = 0
<for_stmt>f filenames<block_start>path_label_file=os.path.join(self.path_labels f)<line_sep>self._process_label_file(path_label_file)<line_sep># i += 1
# if i == 1000: break
<block_end><block_end><def_stmt>_process_label_file self path_label_file<block_start>"""
Processes one label file.
Input:
path_label_file: Path to the TXT label file in KITTI format to be processed.
"""<with_stmt>open(path_label_file 'r')<as>infile_label# Read the objects
<block_start><for_stmt>line infile_label<block_start>line=line.rstrip('\n')<line_sep>data=line.split(' ')<line_sep># First element of the data is the label. We don't want to process 'Misc' and
# 'DontCare' labels
<if_stmt>data[0]<eq>'Misc'<or>data[0]<eq>'DontCare'<block_start><continue><block_end># Extract the points of this object on the ground plane
self._extract_ground_plane_pts(data)<block_end><block_end><block_end><def_stmt>_extract_ground_plane_pts self data<block_start>"""
Extract 3D points from the object bounding box, which lie on the ground plane.
Input:
data: One split line of the label file (line.split(' '))
"""<line_sep># Object dimensions
h=float(data[8])<line_sep>w=float(data[9])<line_sep>l=float(data[10])<line_sep># Position of the center point on the ground plane (xz plane)
cx=float(data[11])<line_sep>cy=float(data[12])<line_sep>cz=float(data[13])<line_sep># Rotation of the object around y
ry=float(data[14])<line_sep># 3D box corners on the ground plane. Careful, the coordinate system of the car is that
# x points forward, not z! (It is rotated by 90deg with respect to the camera one)
# fbr, rbr, fbl, rbl
X=np.asmatrix([[l/2 -l/2 l/2 -l/2] [0 0 0 0] [-w/2 -w/2 w/2 w/2] [1 1 1 1]])<line_sep># Rotate the 3D box around y axis and translate it to the correct position in the cam. frame
X=Rt4x4(R3x3_y(ry) t3x1(cx cy cz))<times>X<line_sep>self.gp_points.append(X[0:3 0])<line_sep>self.gp_points.append(X[0:3 1])<line_sep>self.gp_points.append(X[0:3 2])<line_sep>self.gp_points.append(X[0:3 3])<block_end><def_stmt>_ransac_plane self<block_start>"""
Finds "optimal" ground plane position given the points.
Returns:
[a, b, c, d] plane equation ax+by+cz+d=0 coefficients as a 1x4 np.matrix
"""<line_sep>num_points=len(self.gp_points)<line_sep># Variables for storing minimum distance sum from the estimated plane
dist2_sum_min=99999999999999999<line_sep>gp_1x4_max=np.asmatrix(np.zeros((1 4)))<for_stmt>i range(RANSAC_ITERS)<block_start>rp=random.sample(range(0 num_points) 3)<line_sep># Compute the equation of the ground plane
gp_1x4=plane_3p(self.gp_points[rp[0]] self.gp_points[rp[1]] self.gp_points[rp[2]])<line_sep># Check that the plane gives small errors on the original points - when we have some
# close to singular situation we have to be careful
<if_stmt>gp_1x4<times>self.gp_X_4xn[: rp[0]]<g>0.000000001<or>gp_1x4<times>self.gp_X_4xn[: rp[1]]<g>0.000000001<or>gp_1x4<times>self.gp_X_4xn[: rp[2]]<g>0.000000001<block_start>print('WARNING: Solution not precise, skipping...')<line_sep><continue><block_end># Compute the sum of distances from this plane
distances2=np.power(gp_1x4<times>self.gp_X_4xn 2)<line_sep>dist2_sum=np.sum(distances2 axis=1)<if_stmt>dist2_sum[0 0]<l>dist2_sum_min<block_start>print('New min distance sum: '+str(dist2_sum[0 0]))<line_sep>dist2_sum_min=dist2_sum[0 0]<line_sep>gp_1x4_max=gp_1x4<block_end><block_end>print('-- RANSAC FINISHED')<line_sep>print('Estimated ground plane: '+str(gp_1x4_max))<line_sep>print('Sum of distances: '+str(dist2_sum_min)+', '+str(dist2_sum_min/num_points)+' per point')<line_sep># Show a plot of the plane
show_X_and_gp(self.gp_X_4xn gp_1x4_max)<line_sep><return>gp_1x4_max<block_end><block_end>####################################################################################################
# MAIN #
####################################################################################################
<def_stmt>parse_arguments <block_start>"""
Parse input options of the script.
"""<line_sep>parser=argparse.ArgumentParser(description='Convert KITTI label files into BBTXT.')<line_sep>parser.add_argument('path_labels' metavar='path_labels' type=str help='Path to the "label_2" folder of the KITTI dataset')<line_sep>args=parser.parse_args()<if_stmt><not>os.path.exists(args.path_labels)<block_start>print('Input path "%s" does not exist!'%(args.path_labels))<line_sep>parser.print_help()<line_sep>exit(1)<block_end><return>args<block_end><def_stmt>main <block_start>args=parse_arguments()<line_sep>gpe=GroundPlaneEstimator(args.path_labels)<line_sep>gpe.run_estimation()<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end> |
<class_stmt>EmitterTypeError(Exception)<block_start><pass><block_end><class_stmt>EmitterValidationError(Exception)<block_start><pass><block_end> |
<def_stmt>test_upgrade_atac_alignment_enrichment_quality_metric_1_2 upgrader atac_alignment_enrichment_quality_metric_1<block_start>value=upgrader.upgrade('atac_alignment_enrichment_quality_metric' atac_alignment_enrichment_quality_metric_1 current_version='1' target_version='2' )<assert_stmt>value['schema_version']<eq>'2'<assert_stmt>'fri_blacklist'<not><in>value<assert_stmt>value['fri_exclusion_list']<eq>0.0013046877081284722<block_end> |
<import_from_stmt>.application Application<as>make_app<line_sep> |
<import_from_stmt>celery shared_task<import_from_stmt>celery_progress.backend ProgressRecorder<import_from_stmt>time sleep<line_sep>@shared_task(bind=<true>)<def_stmt>go_to_sleep self duration<block_start>progress_recorder=ProgressRecorder(self)<for_stmt>i range(100)<block_start>sleep(duration)<line_sep>progress_recorder.set_progress(i+1 100 f'On iteration {i}')<block_end><return>'Done'<block_end> |
# -- encoding: UTF-8 --
<import_from_stmt>datetime time<import_stmt>babel.dates<as>dates<import_stmt>pytest<line_sep>@pytest.mark.parametrize("locale, time, expected_period_id" [("de" time(7 42) "morning1") # (from, before)
("de" time(3 11) "night1") # (after, before)
("fi" time(0) "midnight") # (at)
("en_US" time(12) "noon") # (at)
("agq" time(10) "am") # no periods defined
("agq" time(22) "pm") # no periods defined
("am" time(14) "afternoon1") # (before, after)
])<def_stmt>test_day_period_rules locale time expected_period_id<block_start><assert_stmt>dates.get_period_id(time locale=locale)<eq>expected_period_id<block_end> |
# Copyright 2016-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
<import_from_future_stmt> print_function<import_from_stmt>awsglue.context GlueContext<import_from_stmt>hive_metastore_migration *<line_sep>CONNECTION_TYPE_NAME='com.amazonaws.services.glue.connections.DataCatalogConnection'<def_stmt>transform_catalog_to_df dyf<block_start><return>dyf.toDF()<block_end><def_stmt>datacatalog_migrate_to_s3 databases tables partitions output_path# load
<block_start>databases.write.format('json').mode('overwrite').save(output_path+'databases')<line_sep>tables.write.format('json').mode('overwrite').save(output_path+'tables')<line_sep>partitions.write.format('json').mode('overwrite').save(output_path+'partitions')<block_end># apply hard-coded schema on dataframes, ensure schema is consistent for transformations
<def_stmt>change_schemas sql_context databases tables partitions<block_start>databases=sql_context.read.json(databases.toJSON() schema=DATACATALOG_DATABASE_SCHEMA)<line_sep>tables=sql_context.read.json(tables.toJSON() schema=DATACATALOG_TABLE_SCHEMA)<line_sep>partitions=sql_context.read.json(partitions.toJSON() schema=DATACATALOG_PARTITION_SCHEMA)<line_sep><return>(databases tables partitions)<block_end><def_stmt>datacatalog_migrate_to_hive_metastore sc sql_context databases tables partitions connection<block_start>hive_metastore=HiveMetastore(connection sql_context)<line_sep>transform_databases_tables_partitions(sc sql_context hive_metastore databases tables partitions)<line_sep>hive_metastore.export_to_metastore()<block_end><def_stmt>read_databases_from_catalog sql_context glue_context datacatalog_name database_arr region<block_start>databases=<none><line_sep>tables=<none><line_sep>partitions=<none><for_stmt>database database_arr<block_start>dyf=glue_context.create_dynamic_frame.from_options(connection_type=CONNECTION_TYPE_NAME connection_options={'catalog.name':datacatalog_name 'catalog.database':database 'catalog.region':region})<line_sep>df=transform_catalog_to_df(dyf)<line_sep># filter into databases, tables, and partitions
dc_databases_no_schema=df.where('type = "database"')<line_sep>dc_tables_no_schema=df.where('type = "table"')<line_sep>dc_partitions_no_schema=df.where('type = "partition"')<line_sep># apply schema to dataframes
(dc_databases dc_tables dc_partitions)=change_schemas(sql_context dc_databases_no_schema dc_tables_no_schema dc_partitions_no_schema)<line_sep>(a_databases a_tables a_partitions)=transform_items_to_item(dc_databases=dc_databases dc_tables=dc_tables dc_partitions=dc_partitions)<line_sep>databases=databases.union(a_databases)<if>databases<else>a_databases<line_sep>tables=tables.union(a_tables)<if>tables<else>a_tables<line_sep>partitions=partitions.union(a_partitions)<if>partitions<else>a_partitions<block_end><return>(databases tables partitions)<block_end><def_stmt>main <block_start>to_s3='to-s3'<line_sep>to_jdbc='to-jdbc'<line_sep>parser=argparse.ArgumentParser(prog=sys.argv[0])<line_sep>parser.add_argument('-m' '--mode' required=<true> choices=[to_s3 to_jdbc] help='Choose to migrate from datacatalog to s3 or to metastore')<line_sep>parser.add_argument('--database-names' required=<true> help='Semicolon-separated list of names of database in Datacatalog to export')<line_sep>parser.add_argument('-o' '--output-path' required=<false> help='Output path, either local directory or S3 path')<line_sep>parser.add_argument('-c' '--connection-name' required=<false> help='Glue Connection name for Hive metastore JDBC connection')<line_sep>parser.add_argument('-R' '--region' required=<false> help='AWS region of source Glue DataCatalog, default to "us-east-1"')<line_sep>options=get_options(parser sys.argv)<if_stmt>options['mode']<eq>to_s3<block_start>validate_options_in_mode(options=options mode=to_s3 required_options=['output_path'] not_allowed_options=['connection_name'])<block_end><elif_stmt>options['mode']<eq>to_jdbc<block_start>validate_options_in_mode(options=options mode=to_jdbc required_options=['connection_name'] not_allowed_options=['output_path'])<block_end><else_stmt><block_start><raise>AssertionError('unknown mode '+options['mode'])<block_end>validate_aws_regions(options['region'])<line_sep># spark env
(conf sc sql_context)=get_spark_env()<line_sep>glue_context=GlueContext(sc)<line_sep># extract from datacatalog reader
database_arr=options['database_names'].split(';')<line_sep>(databases tables partitions)=read_databases_from_catalog(sql_context=sql_context glue_context=glue_context datacatalog_name='datacatalog' database_arr=database_arr region=options.get('region')<or>'us-east-1')<if_stmt>options['mode']<eq>to_s3<block_start>output_path=get_output_dir(options['output_path'])<line_sep>datacatalog_migrate_to_s3(databases=databases tables=tables partitions=partitions output_path=output_path)<block_end><elif_stmt>options['mode']<eq>to_jdbc<block_start>connection_name=options['connection_name']<line_sep>datacatalog_migrate_to_hive_metastore(sc=sc sql_context=sql_context databases=databases tables=tables partitions=partitions connection=glue_context.extract_jdbc_conf(connection_name))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end> |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Explainer evaluation encapsulator."""<import_stmt>copy<import_from_stmt>mindinsight.explainer.encapsulator.explain_data_encap ExplainDataEncap<import_from_stmt>mindinsight.datavisual.common.exceptions TrainJobNotExistError<class_stmt>EvaluationEncap(ExplainDataEncap)<block_start>"""Explainer evaluation encapsulator."""<def_stmt>query_explainer_scores self train_id<block_start>"""Query evaluation scores."""<line_sep>job=self.job_manager.get_job(train_id)<if_stmt>job<is><none><block_start><raise>TrainJobNotExistError(train_id)<block_end><return>copy.deepcopy(job.explainer_scores)<block_end><block_end> |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
<import_stmt>logging<import_stmt>os<import_stmt>subprocess<import_stmt>tempfile<import_stmt>zipfile<import_from_stmt>lib.subcommand SubCommand<import_from_stmt>lib.symbol SymbolDataSources<line_sep>LOGGER=logging.getLogger('dmprof')<class_stmt>UploadCommand(SubCommand)<block_start><def_stmt>__init__ self<block_start>super(UploadCommand self).__init__('Usage: %prog upload [--gsutil path/to/gsutil] '<concat>'<first-dump> <destination-gs-path>')<line_sep>self._parser.add_option('--gsutil' default='gsutil' help='path to GSUTIL' metavar='GSUTIL')<block_end><def_stmt>do self sys_argv<block_start>options,args=self._parse_args(sys_argv 2)<line_sep>dump_path=args[1]<line_sep>gs_path=args[2]<line_sep>dump_files=SubCommand._find_all_dumps(dump_path)<line_sep>bucket_files=SubCommand._find_all_buckets(dump_path)<line_sep>prefix=SubCommand._find_prefix(dump_path)<line_sep>symbol_data_sources=SymbolDataSources(prefix)<line_sep>symbol_data_sources.prepare()<line_sep>symbol_path=symbol_data_sources.path()<line_sep>handle_zip,filename_zip=tempfile.mkstemp('.zip' 'dmprof')<line_sep>os.close(handle_zip)<try_stmt><block_start>file_zip=zipfile.ZipFile(filename_zip 'w' zipfile.ZIP_DEFLATED)<for_stmt>filename dump_files<block_start>file_zip.write(filename os.path.basename(os.path.abspath(filename)))<block_end><for_stmt>filename bucket_files<block_start>file_zip.write(filename os.path.basename(os.path.abspath(filename)))<block_end>symbol_basename=os.path.basename(os.path.abspath(symbol_path))<for_stmt>filename os.listdir(symbol_path)<block_start><if_stmt><not>filename.startswith('.')<block_start>file_zip.write(os.path.join(symbol_path filename) os.path.join(symbol_basename os.path.basename(os.path.abspath(filename))))<block_end><block_end>file_zip.close()<line_sep>returncode=UploadCommand._run_gsutil(options.gsutil 'cp' '-a' 'public-read' filename_zip gs_path)<block_end><finally_stmt><block_start>os.remove(filename_zip)<block_end><return>returncode<block_end>@staticmethod<def_stmt>_run_gsutil gsutil *args<block_start>"""Run gsutil as a subprocess.
Args:
*args: Arguments to pass to gsutil. The first argument should be an
operation such as ls, cp or cat.
Returns:
The return code from the process.
"""<line_sep>command=[gsutil]+list(args)<line_sep>LOGGER.info("Running: %s" command)<try_stmt><block_start><return>subprocess.call(command)<block_end><except_stmt>OSError e<block_start>LOGGER.error('Error to run gsutil: %s' e)<block_end><block_end><block_end> |
<class_stmt>Solution<block_start><def_stmt>maxProfit self prices<block_start>"""
:type prices: List[int]
:rtype: int
"""<line_sep>size=len(prices)<line_sep>bought=<false><line_sep>profit=0<line_sep>price=0<for_stmt>i range(0 size-1)<block_start><if_stmt><not>bought<block_start><if_stmt>prices[i]<l>prices[i+1]<block_start>bought=<true><line_sep>price=prices[i]<block_end><block_end><else_stmt><block_start><if_stmt>prices[i]<g>prices[i+1]<block_start>bought=<false><line_sep>profit<augadd>prices[i]-price<line_sep>price=0<block_end><block_end><block_end><if_stmt>bought<block_start>profit<augadd>prices[i+1]-price<block_end><return>profit<block_end><block_end> |
<import_from_stmt>.sopttest_gateway SopttestGateway<line_sep> |
<import_stmt>numpy<as>np<import_stmt>cv2<import_stmt>os<import_stmt>argparse<import_stmt>csv<def_stmt>get_args <block_start>parser=argparse.ArgumentParser(description="This script cleans-up noisy labels "<concat>"and creates database for training." formatter_class=argparse.ArgumentDefaultsHelpFormatter)<line_sep>parser.add_argument("--input" type=str default="/home/sai/YANG/datasets/face_datasets/megaage_asian/megaage_asian/train_crop/" help="dataset; wiki or imdb")<line_sep>parser.add_argument("--output" type=str default='/home/sai/YANG/datasets/face_datasets/megaage_asian/megaage_asian/megaage_asian.npz' help="path to output database mat file")<line_sep>parser.add_argument('--label' default='/home/sai/YANG/datasets/face_datasets/megaage_asian/megaage_asian/train.csv' help='')<line_sep>parser.add_argument("--img_size" type=int default=112 help="output image size")<line_sep>args=parser.parse_args()<line_sep><return>args<block_end><def_stmt>main <block_start>args=get_args()<line_sep>out_genders=[]<line_sep>out_ages=[]<line_sep>out_imgs=[]<line_sep>labelList=csv.reader(open(args.label "rt" encoding="utf-8-sig"))<for_stmt>row labelList<block_start>true_age=int(row[1])<line_sep>true_gender=int(0)<line_sep>img_id=row[0]<line_sep>img=cv2.imread(os.path.join(args.input img_id))<if_stmt>img<is><none><block_start><continue><block_end>out_genders.append(true_gender)<line_sep>out_ages.append(true_age)<line_sep>out_imgs.append(cv2.resize(img (args.img_size args.img_size)))<block_end>np.savez(args.output image=np.array(out_imgs) gender=np.array(out_genders) age=np.array(out_ages) img_size=args.img_size)<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end> |
# -*- coding: utf-8 -*-
#@+leo-ver=5-thin
#@+node:ekr.20210926044012.1: * @file ../unittests/test_doctests.py
#@@first
"""Run all doctests."""<import_stmt>doctest<import_stmt>glob<import_stmt>os<import_stmt>unittest<import_from_stmt>leo.core leoGlobals<as>g<line_sep>unittest_dir=os.path.dirname(__file__)<line_sep>leo_dir=os.path.abspath(os.path.join(unittest_dir '..'))<line_sep>#@+others # Define a function containing a doctest.
#@+node:ekr.20210926053601.1: ** factorial (test_dectests.py)
<def_stmt>factorial n# Modified from https://docs.python.org/3/library/doctest.html
# Must import factorial. See: stackoverflow.com/questions/65066002
<block_start>"""Return the factorial of n, an exact integer >= 0.
>>> from leo.unittests.test_doctests import factorial
>>> [factorial(n) for n in range(6)]
[1, 1, 2, 6, 24, 120]
>>> factorial(30)
265252859812191058636308480000000
>>> factorial(-1)
Traceback (most recent call last):
...
ValueError: n must be >= 0
Factorials of floats are OK, but the float must be an exact integer:
>>> factorial(30.1)
Traceback (most recent call last):
...
ValueError: n must be exact integer
>>> factorial(30.0)
265252859812191058636308480000000
It must also not be ridiculously large:
>>> factorial(1e100)
Traceback (most recent call last):
...
OverflowError: n too large
"""<line_sep># Blank line above is required.
<import_stmt>math<if_stmt><not>n<ge>0<block_start><raise>ValueError("n must be >= 0")<block_end><if_stmt>math.floor(n)<ne>n<block_start><raise>ValueError("n must be exact integer")<block_end><if_stmt>n+1<eq>n# catch a value like 1e300
<block_start><raise>OverflowError("n too large")<block_end>result=1<line_sep>factor=2<while_stmt>factor<le>n<block_start>result<augmul>factor<line_sep>factor<augadd>1<block_end><return>result<block_end>#@-others
<class_stmt>TestDocTests(unittest.TestCase)# No need to be a subclass of leoTest2.LeoUnitTest.
<block_start><def_stmt>test_all_doctests self<block_start>fails_list=[]# List of files with failing doctests.
files_list=[]# List of files containing a doctest.
n=0# Total doctests found
<for_stmt>module ('core' 'plugins' 'unittests')<block_start>module_path=os.path.join(leo_dir module)<line_sep>self.assertTrue(os.path.exists(module_path) msg=repr(module_path))<line_sep>path=os.path.join(module_path '**' '*.py')<line_sep>files=glob.glob(path recursive=<true>)<line_sep>files=[z<for>z files<if><not>z.endswith('__init__.py')]<for_stmt>f files# Exclude two problematic files.
<block_start><if_stmt>'dtest.py'<in>f<or>'javascript.py'<in>f<block_start><continue><block_end>fails,count=doctest.testfile(f)<line_sep>n<augadd>count<if_stmt>count<block_start>files_list.append(f)<block_end><if_stmt>fails<block_start>fails_list.append(f)<line_sep>print(f"{fails} failures in {g.shortFileName(f)}")<block_end><block_end>self.assertEqual(fails_list [])<block_end><if_stmt>0<block_start>g.trace(f"{n} doctests found in {len(files_list)} file{g.plural(len(files_list))}")<line_sep>g.printObj(files_list tag="files containing any doctest")<line_sep>g.printObj(fails_list tag="files containing a failed doctest")<block_end><block_end><block_end>#@-leo
|
<def_stmt>f x<block_start><if_stmt>x<block_start><return><block_end><if_stmt>x<block_start><return><block_end><elif_stmt>y<block_start><return><block_end><if_stmt>x<block_start><return><block_end><else_stmt><block_start><return><block_end><if_stmt>x<block_start><return><block_end><elif_stmt>y<block_start><return><block_end><else_stmt><block_start><return><block_end><if_stmt>x<block_start><return><block_end><elif_stmt>y<block_start><return><block_end><elif_stmt>z<block_start><return><block_end><else_stmt><block_start><return><block_end><return><none><block_end> |
"""
Given a non negative integer number num. For every numbers i in the range 0 <= i <= num calculate the number of 1's in
their binary representation and return them as an array.
Example:
For num = 5 you should return [0,1,1,2,1,2].
Follow up:
It is very easy to come up with a solution with run time O(n*sizeof(integer)). But can you do it in linear time O(n) /
possibly in a single pass?
Space complexity should be O(n).
Can you do it like a boss? Do it without using any builtin function like __builtin_popcount in c++ or in any other
language.
"""<line_sep>__author__='Daniel'<class_stmt>Solution(object)<block_start><def_stmt>countBits self num<block_start>"""
Dynamic programming: make use of what you have produced already
0 => 0
1 => 1
10 => 1+0
11 => 1+1
100 => 1+0
101 => 1+1
110 => 1+1
111 => 1+2
:type num: int
:rtype: List[int]
"""<line_sep>ret=[0]<line_sep>i=0<line_sep>hi=len(ret)<while_stmt>len(ret)<l>num+1<block_start><if_stmt>i<eq>hi<block_start>i=0<line_sep>hi=len(ret)<block_end>ret.append(1+ret[i])<line_sep>i<augadd>1<block_end><return>ret<block_end><block_end> |
<import_stmt>unittest<import_stmt>numpy<as>np<import_from_stmt>small_text.utils.data list_length<class_stmt>DataUtilsTest(unittest.TestCase)<block_start><def_stmt>test_list_length self<block_start>self.assertEqual(10 list_length(list(range(10))))<line_sep>self.assertEqual(10 list_length(np.random.rand(10 2)))<block_end><block_end> |
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the network_util module that require the full network.yaml."""<import_stmt>os<import_stmt>unittest<import_stmt>makani<import_from_stmt>makani.avionics.network network_config<import_from_stmt>makani.avionics.network network_util<class_stmt>NetworkYamlTest(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>filename=os.path.join(makani.HOME 'avionics/network/network.yaml')<line_sep>self._network_config=network_config.NetworkConfig(filename)<block_end><def_stmt>testCheckForLoopRoutes self<block_start>config=self._network_config<line_sep>message_types=config.all_messages<line_sep>path_finder=network_util.PathFinder(config.GetSwitches() message_types)<for_stmt>message message_types<block_start>graph=network_util.MessageGraph(path_finder message)<line_sep>visitor=network_util.MessageGraphVisitor()<line_sep>graph.VisitSenders(visitor message.all_senders)<block_end><block_end><def_stmt>testCheckForUnintendedRecipients self<block_start>config=self._network_config<line_sep>message_types=config.all_messages<line_sep>path_finder=network_util.PathFinder(config.GetSwitches() message_types)<for_stmt>message message_types<block_start>graph=network_util.MessageGraph(path_finder message)<line_sep>network_util.CheckForUnintendedRecipients(graph)<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end> |
a[-1]<line_sep>a[-2:]<line_sep>a[:-2]<line_sep>a[::-1]<line_sep>a[1::-1]<line_sep>a[:-3:-1]<line_sep>a[-3::-1]<line_sep>point_coords=coords[i :]<line_sep>main(sys.argv[1:])<line_sep> |
###########################################################################
# Created by: <NAME>
# Email: <EMAIL>
# Copyright (c) 2017
###########################################################################
<import_from_stmt>PIL Image ImageOps ImageFilter<import_stmt>os<import_stmt>math<import_stmt>random<import_stmt>numpy<as>np<import_from_stmt>tqdm trange<import_stmt>torch<import_stmt>torch.utils.data<as>data<line_sep>__all__=names=("PascalContext" )<class_stmt>BaseDataset(data.Dataset)<block_start><def_stmt>__init__ self root split mode=<none> transform=<none> target_transform=<none> base_size=520 crop_size=480 <block_start>self.root=root<line_sep>self.transform=transform<line_sep>self.target_transform=target_transform<line_sep>self.split=split<line_sep>self.mode=mode<if>mode<is><not><none><else>split<line_sep>self.base_size=base_size<line_sep>self.crop_size=crop_size<if_stmt>self.mode<eq>"train"<block_start>print("BaseDataset: base_size {}, crop_size {}".format(base_size crop_size))<block_end><block_end><def_stmt>__getitem__ self index<block_start><raise>NotImplemented<block_end>@property<def_stmt>num_class self<block_start><return>self.NUM_CLASS<block_end>@property<def_stmt>pred_offset self<block_start><raise>NotImplemented<block_end><def_stmt>make_pred self x<block_start><return>x+self.pred_offset<block_end><def_stmt>_val_sync_transform self img mask<block_start>outsize=self.crop_size<line_sep>short_size=outsize<line_sep>w,h=img.size<if_stmt>w<g>h<block_start>oh=short_size<line_sep>ow=int(1.0<times>w<times>oh/h)<block_end><else_stmt><block_start>ow=short_size<line_sep>oh=int(1.0<times>h<times>ow/w)<block_end>img=img.resize((ow oh) Image.BILINEAR)<line_sep>mask=mask.resize((ow oh) Image.NEAREST)<line_sep># center crop
w,h=img.size<line_sep>x1=int(round((w-outsize)/2.0))<line_sep>y1=int(round((h-outsize)/2.0))<line_sep>img=img.crop((x1 y1 x1+outsize y1+outsize))<line_sep>mask=mask.crop((x1 y1 x1+outsize y1+outsize))<line_sep># final transform
<return>img self._mask_transform(mask)<block_end><def_stmt>_sync_transform self img mask# random mirror
<block_start><if_stmt>random.random()<l>0.5<block_start>img=img.transpose(Image.FLIP_LEFT_RIGHT)<line_sep>mask=mask.transpose(Image.FLIP_LEFT_RIGHT)<block_end>crop_size=self.crop_size<line_sep># random scale (short edge)
w,h=img.size<line_sep>long_size=random.randint(int(self.base_size<times>0.5) int(self.base_size<times>2.0))<if_stmt>h<g>w<block_start>oh=long_size<line_sep>ow=int(1.0<times>w<times>long_size/h+0.5)<line_sep>short_size=ow<block_end><else_stmt><block_start>ow=long_size<line_sep>oh=int(1.0<times>h<times>long_size/w+0.5)<line_sep>short_size=oh<block_end>img=img.resize((ow oh) Image.BILINEAR)<line_sep>mask=mask.resize((ow oh) Image.NEAREST)<line_sep># pad crop
<if_stmt>short_size<l>crop_size<block_start>padh=crop_size-oh<if>oh<l>crop_size<else>0<line_sep>padw=crop_size-ow<if>ow<l>crop_size<else>0<line_sep>img=ImageOps.expand(img border=(0 0 padw padh) fill=0)<line_sep>mask=ImageOps.expand(mask border=(0 0 padw padh) fill=0)<block_end># random crop crop_size
w,h=img.size<line_sep>x1=random.randint(0 w-crop_size)<line_sep>y1=random.randint(0 h-crop_size)<line_sep>img=img.crop((x1 y1 x1+crop_size y1+crop_size))<line_sep>mask=mask.crop((x1 y1 x1+crop_size y1+crop_size))<line_sep># final transform
<return>img self._mask_transform(mask)<block_end><def_stmt>_mask_transform self mask<block_start><return>torch.from_numpy(np.array(mask)).long()<block_end><block_end><class_stmt>PascalContext(BaseDataset)<block_start>NUM_CLASS=59<def_stmt>__init__ self root="./data" split="train" mode=<none> transform=<none> target_transform=<none> **kwargs<block_start>super(PascalContext self).__init__(root split mode transform target_transform **kwargs)<import_from_stmt>detail Detail<line_sep># from detail import mask
root=os.path.join(root "PascalContext")<line_sep>annFile=os.path.join(root "trainval_merged.json")<line_sep>imgDir=os.path.join(root "JPEGImages")<line_sep># training mode
self.detail=Detail(annFile imgDir split)<line_sep>self.transform=transform<line_sep>self.target_transform=target_transform<line_sep>self.ids=self.detail.getImgs()<line_sep># generate masks
self._mapping=np.sort(np.array([0 2 259 260 415 324 9 258 144 18 19 22 23 397 25 284 158 159 416 33 162 420 454 295 296 427 44 45 46 308 59 440 445 31 232 65 354 424 68 326 72 458 34 207 80 355 85 347 220 349 360 98 187 104 105 366 189 368 113 115 ]))<line_sep>self.classes=["background" "aeroplane" "mountain" "mouse" "track" "road" "bag" "motorbike" "fence" "bed" "bedclothes" "bench" "bicycle" "diningtable" "bird" "person" "floor" "boat" "train" "book" "bottle" "tree" "window" "plate" "platform" "tvmonitor" "building" "bus" "cabinet" "shelves" "light" "pottedplant" "wall" "car" "ground" "cat" "sidewalk" "truck" "ceiling" "rock" "chair" "wood" "food" "horse" "cloth" "sign" "computer" "sheep" "keyboard" "flower" "sky" "cow" "grass" "cup" "curtain" "snow" "water" "sofa" "dog" "door" ]<line_sep>self._key=np.array(range(len(self._mapping))).astype("uint8")<line_sep>mask_file=os.path.join(root self.split+".pth")<line_sep>print("mask_file:" mask_file)<if_stmt>os.path.exists(mask_file)<block_start>self.masks=torch.load(mask_file)<block_end><else_stmt><block_start>self.masks=self._preprocess(mask_file)<block_end><block_end><def_stmt>_class_to_index self mask# assert the values
<block_start>values=np.unique(mask)<for_stmt>i range(len(values))<block_start><assert_stmt>values[i]<in>self._mapping<block_end>index=np.digitize(mask.ravel() self._mapping right=<true>)<line_sep><return>self._key[index].reshape(mask.shape)<block_end><def_stmt>_preprocess self mask_file<block_start>masks={}<line_sep>tbar=trange(len(self.ids))<line_sep>print("Preprocessing mask, this will take a while."+"But don't worry, it only run once for each split.")<for_stmt>i tbar<block_start>img_id=self.ids[i]<line_sep>mask=Image.fromarray(self._class_to_index(self.detail.getMask(img_id)))<line_sep>masks[img_id["image_id"]]=mask<line_sep>tbar.set_description("Preprocessing masks {}".format(img_id["image_id"]))<block_end>torch.save(masks mask_file)<line_sep><return>masks<block_end><def_stmt>__getitem__ self index<block_start>img_id=self.ids[index]<line_sep>path=img_id["file_name"]<line_sep>iid=img_id["image_id"]<line_sep>img=Image.open(os.path.join(self.detail.img_folder path)).convert("RGB")<if_stmt>self.mode<eq>"test"<block_start><if_stmt>self.transform<is><not><none><block_start>img=self.transform(img)<block_end><return>img os.path.basename(path)<block_end># convert mask to 60 categories
mask=self.masks[iid]<line_sep># synchrosized transform
<if_stmt>self.mode<eq>"train"<block_start>img,mask=self._sync_transform(img mask)<block_end><elif_stmt>self.mode<eq>"val"<block_start>img,mask=self._val_sync_transform(img mask)<block_end><else_stmt><block_start><assert_stmt>self.mode<eq>"testval"<line_sep>mask=self._mask_transform(mask)<block_end># general resize, normalize and toTensor
<if_stmt>self.transform<is><not><none><block_start>img=self.transform(img)<block_end><if_stmt>self.target_transform<is><not><none><block_start>mask=self.target_transform(mask)<block_end><return>img mask<block_end><def_stmt>_mask_transform self mask<block_start>target=np.array(mask).astype("int32")-1<line_sep><return>torch.from_numpy(target).long()<block_end><def_stmt>__len__ self<block_start><return>len(self.ids)<block_end>@property<def_stmt>pred_offset self<block_start><return>1<block_end><block_end> |
<import_from_stmt>unittest TestCase<import_stmt>torch<import_from_stmt>api_inference_community.normalizers speaker_diarization_normalize<class_stmt>NormalizersTestCase(TestCase)<block_start><def_stmt>test_speaker_diarization_dummy self<block_start>tensor=torch.zeros((10 2))<line_sep>outputs=speaker_diarization_normalize(tensor 16000 ["SPEAKER_0" "SPEAKER_1"])<line_sep>self.assertEqual(outputs [])<block_end><def_stmt>test_speaker_diarization self<block_start>tensor=torch.zeros((10 2))<line_sep>tensor[1:4 0]=1<line_sep>tensor[3:8 1]=1<line_sep>tensor[8:10 0]=1<line_sep>outputs=speaker_diarization_normalize(tensor 16000 ["SPEAKER_0" "SPEAKER_1"])<line_sep>self.assertEqual(outputs [{"class":"SPEAKER_0" "start":1/16000 "end":4/16000} {"class":"SPEAKER_1" "start":3/16000 "end":8/16000} {"class":"SPEAKER_0" "start":8/16000 "end":10/16000} ] )<block_end><def_stmt>test_speaker_diarization_3_speakers self<block_start>tensor=torch.zeros((10 3))<line_sep>tensor[1:4 0]=1<line_sep>tensor[3:8 1]=1<line_sep>tensor[8:10 2]=1<with_stmt>self.assertRaises(ValueError)<block_start>outputs=speaker_diarization_normalize(tensor 16000 ["SPEAKER_0" "SPEAKER_1"])<block_end>outputs=speaker_diarization_normalize(tensor 16000 ["SPEAKER_0" "SPEAKER_1" "SPEAKER_2"])<line_sep>self.assertEqual(outputs [{"class":"SPEAKER_0" "start":1/16000 "end":4/16000} {"class":"SPEAKER_1" "start":3/16000 "end":8/16000} {"class":"SPEAKER_2" "start":8/16000 "end":10/16000} ] )<block_end><block_end> |
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chrome Autofill Task Flow
Execute a set of autofill tasks in a fresh ChromeDriver instance that has been
pre-loaded with some default profile.
Requires:
- Selenium python bindings
http://selenium-python.readthedocs.org/
- ChromeDriver
https://sites.google.com/a/chromium.org/chromedriver/downloads
The ChromeDriver executable must be available on the search PATH.
- Chrome
"""<import_stmt>abc<import_from_stmt>urlparse urlparse<import_stmt>os<import_stmt>shutil<import_from_stmt>random choice<import_from_stmt>string ascii_lowercase<import_from_stmt>selenium webdriver<import_from_stmt>selenium.common.exceptions TimeoutException WebDriverException<import_from_stmt>selenium.webdriver.chrome.options Options<class_stmt>TaskFlow(object)<block_start>"""Represents an executable set of Autofill Tasks.
Attributes:
profile: Dict of profile data that acts as the master source for
validating autofill behaviour.
debug: Whether debug output should be printed (False if not specified).
"""<line_sep>__metaclass__=abc.ABCMeta<def_stmt>__init__ self profile debug=<false><block_start>self.set_profile(profile)<line_sep>self._debug=debug<line_sep>self._running=<false><line_sep>self._tasks=self._generate_task_sequence()<block_end><def_stmt>set_profile self profile<block_start>"""Validates |profile| before assigning it as the source of user data.
Args:
profile: Dict of profile data that acts as the master source for
validating autofill behaviour.
Raises:
ValueError: The |profile| dict provided is missing required keys
"""<if_stmt><not>isinstance(profile dict)<block_start><raise>ValueError('profile must be a a valid dictionary')<line_sep><block_end>self._profile=profile<block_end><def_stmt>run self user_data_dir chrome_binary=<none><block_start>"""Generates and executes a sequence of chrome driver tasks.
Args:
user_data_dir: Path string for the writable directory in which profiles
should be stored.
chrome_binary: Path string to the Chrome binary that should be used by
ChromeDriver.
If None then it will use the PATH to find a binary.
Raises:
RuntimeError: Running the TaskFlow was attempted while it's already
running.
Exception: Any failure encountered while running the tests
"""<if_stmt>self._running<block_start><raise>RuntimeError('Cannot run TaskFlow when already running')<block_end>self._running=<true><line_sep>self._run_tasks(user_data_dir chrome_binary=chrome_binary)<line_sep>self._running=<false><block_end>@abc.abstractmethod<def_stmt>_generate_task_sequence self<block_start>"""Generates a set of executable tasks that will be run in ChromeDriver.
Note: Subclasses must implement this method.
Raises:
NotImplementedError: Subclass did not implement the method
Returns:
A list of AutofillTask instances that are to be run in ChromeDriver.
These tasks are to be run in order.
"""<line_sep><raise>NotImplementedError()<block_end><def_stmt>_run_tasks self user_data_dir chrome_binary=<none><block_start>"""Runs the internal set of tasks in a fresh ChromeDriver instance.
Args:
user_data_dir: Path string for the writable directory in which profiles
should be stored.
chrome_binary: Path string to the Chrome binary that should be used by
ChromeDriver.
If None then it will use the PATH to find a binary.
Raises:
Exception: Any failure encountered while running the tests
"""<line_sep>driver=self._get_driver(user_data_dir chrome_binary=chrome_binary)<try_stmt><block_start><for_stmt>task self._tasks<block_start>task.run(driver)<block_end><block_end><finally_stmt><block_start>driver.quit()<line_sep>shutil.rmtree(self._profile_dir_dst)<block_end><block_end><def_stmt>_get_driver self user_data_dir profile_name=<none> chrome_binary=<none> chromedriver_binary='chromedriver'<block_start>"""Spin up a ChromeDriver instance that uses a given set of user data.
Generates a temporary profile data directory using a local set of test data.
Args:
user_data_dir: Path string for the writable directory in which profiles
should be stored.
profile_name: Name of the profile data directory to be created/used in
user_data_dir.
If None then an eight character name will be generated randomly.
This directory will be removed after the task flow completes.
chrome_binary: Path string to the Chrome binary that should be used by
ChromeDriver.
If None then it will use the PATH to find a binary.
Returns: The generated Chrome Driver instance.
"""<line_sep>options=Options()<if_stmt>profile_name<is><none><block_start>profile_name=''.join(choice(ascii_lowercase)<for>i range(8))<block_end>options.add_argument('--profile-directory=%s'%profile_name)<line_sep>full_path=os.path.realpath(__file__)<line_sep>path,filename=os.path.split(full_path)<line_sep>profile_dir_src=os.path.join(path 'testdata' 'Default')<line_sep>self._profile_dir_dst=os.path.join(user_data_dir profile_name)<line_sep>self._copy_tree(profile_dir_src self._profile_dir_dst)<if_stmt>chrome_binary<is><not><none><block_start>options.binary_location=chrome_binary<block_end>options.add_argument('--user-data-dir=%s'%user_data_dir)<line_sep>options.add_argument('--show-autofill-type-predictions')<line_sep>service_args=[]<line_sep>driver=webdriver.Chrome(executable_path=chromedriver_binary chrome_options=options service_args=service_args)<line_sep>driver.set_page_load_timeout(15)# seconds
<return>driver<block_end><def_stmt>_copy_tree self src dst<block_start>"""Recursively copy a directory tree.
If the destination directory does not exist then it will be created for you.
Doesn't overwrite newer existing files.
Args:
src: Path to the target source directory. It must exist.
dst: Path to the target destination directory. Permissions to create the
the directory (if necessary) and modify it's contents.
"""<if_stmt><not>os.path.exists(dst)<block_start>os.makedirs(dst)<block_end><for_stmt>item os.listdir(src)<block_start>src_item=os.path.join(src item)<line_sep>dst_item=os.path.join(dst item)<if_stmt>os.path.isdir(src_item)<block_start>self._copy_tree(src_item dst_item)<block_end><elif_stmt>(<not>os.path.exists(dst_item)<or>os.stat(src_item).st_mtime-os.stat(dst_item).st_mtime<g>1)# Copy a file if it doesn't already exist, or if existing one is older.
<block_start>shutil.copy2(src_item dst_item)<block_end><block_end><block_end><block_end> |
<import_from_stmt>django template<line_sep>register=template.Library()<line_sep>@register.filter()<def_stmt>redact text case<block_start><return>case.redact_obj(text)<block_end>@register.filter()<def_stmt>elide text case<block_start><return>case.elide_obj(text)<block_end> |
<class_stmt>A(object)<block_start><def_stmt>A <block_start>print('factory')<line_sep><return>A()<block_end><def_stmt>__init__ self<block_start>print('init')<block_end><def_stmt>__call__ self<block_start>print('call')<block_end><block_end>print('chamar o construtor')<line_sep>a=A()<line_sep>print('chamar o construtor e a função')<line_sep>b=A()()<line_sep>print('chamar a função')<line_sep>c=A.A()<line_sep>#https://pt.stackoverflow.com/q/109813/101
|
#@<> Setup
testutil.deploy_sandbox(__mysql_sandbox_port1 "root")<line_sep>#@<> Setup cluster
<import_stmt>mysqlsh<line_sep>mydba=mysqlsh.connect_dba(__sandbox_uri1)<line_sep>cluster=mydba.create_cluster("mycluster")<line_sep>cluster.disconnect()<line_sep>#@<> Catch error through mysqlsh.Error
<try_stmt><block_start>mydba.get_cluster("badcluster")<line_sep>testutil.fail("<red>Function didn't throw exception as expected</red>")<block_end><except_stmt>mysqlsh.Error<as>e<block_start>EXPECT_EQ(51101 e.code)<block_end><except_stmt><block_start>testutil.fail("<red>Function threw wrong exception</red>")<block_end>#@<> dba.session
mydba.session.run_sql("select 1")<line_sep>#@<> DbError should be a subclass of Error
<try_stmt><block_start>mydba.session.run_sql("badquery")<line_sep>testutil.fail("<red>Function didn't throw exception as expected</red>")<block_end><except_stmt>mysqlsh.DBError<as>e<block_start>EXPECT_EQ(mysql.ErrorCode.ER_PARSE_ERROR e.code)<block_end><except_stmt><block_start>testutil.fail("<red>Function threw wrong exception</red>")<block_end><try_stmt><block_start>mydba.session.run_sql("badquery")<line_sep>testutil.fail("<red>Function didn't throw exception as expected</red>")<block_end><except_stmt>mysqlsh.Error<as>e<block_start>EXPECT_EQ(mysql.ErrorCode.ER_PARSE_ERROR e.code)<block_end><except_stmt><block_start>testutil.fail("<red>Function threw wrong exception</red>")<block_end>#@<> Check for __qualname__ and __name__ in wrapped methods
EXPECT_EQ("Testutils.deploy_sandbox" testutil.deploy_sandbox.__qualname__)<line_sep>EXPECT_EQ("Dba.create_cluster" dba.create_cluster.__qualname__)<line_sep>EXPECT_EQ("deploy_sandbox" testutil.deploy_sandbox.__name__)<line_sep>EXPECT_EQ("create_cluster" dba.create_cluster.__name__)<line_sep>#@<> check that isatty exists (checking the return value depends on how the tests are ran)
sys.stdout.isatty()<line_sep>sys.stdin.isatty()<line_sep>sys.stderr.isatty()<line_sep>#@<> Cleanup
mydba.session.close()<line_sep>testutil.destroy_sandbox(__mysql_sandbox_port1)<line_sep> |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: carbon.proto
<import_stmt>sys<line_sep>_b=sys.version_info[0]<l>3<and>(<lambda>x:x)<or>(<lambda>x:x.encode('latin1'))<import_from_stmt>google.protobuf descriptor<as>_descriptor<import_from_stmt>google.protobuf message<as>_message<import_from_stmt>google.protobuf reflection<as>_reflection<import_from_stmt>google.protobuf symbol_database<as>_symbol_database<import_from_stmt>google.protobuf descriptor_pb2<line_sep># @@protoc_insertion_point(imports)
_sym_db=_symbol_database.Default()<line_sep>DESCRIPTOR=_descriptor.FileDescriptor(name='carbon.proto' package='' syntax='proto3' serialized_pb=_b('\n\x0c\x63\x61rbon.proto\")\n\x05Point\x12\x11\n\ttimestamp\x18\x01 \x01(\r\x12\r\n\x05value\x18\x02 \x01(\x01\"0\n\x06Metric\x12\x0e\n\x06metric\x18\x01 \x01(\t\x12\x16\n\x06points\x18\x02 \x03(\x0b\x32\x06.Point\"#\n\x07Payload\x12\x18\n\x07metrics\x18\x01 \x03(\x0b\x32\x07.Metricb\x06proto3'))<line_sep>_sym_db.RegisterFileDescriptor(DESCRIPTOR)<line_sep>_POINT=_descriptor.Descriptor(name='Point' full_name='Point' filename=<none> file=DESCRIPTOR containing_type=<none> fields=[_descriptor.FieldDescriptor(name='timestamp' full_name='Point.timestamp' index=0 number=1 type=13 cpp_type=3 label=1 has_default_value=<false> default_value=0 message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> options=<none>) _descriptor.FieldDescriptor(name='value' full_name='Point.value' index=1 number=2 type=1 cpp_type=5 label=1 has_default_value=<false> default_value=float(0) message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> options=<none>) ] extensions=[] nested_types=[] enum_types=[] options=<none> is_extendable=<false> syntax='proto3' extension_ranges=[] oneofs=[] serialized_start=16 serialized_end=57 )<line_sep>_METRIC=_descriptor.Descriptor(name='Metric' full_name='Metric' filename=<none> file=DESCRIPTOR containing_type=<none> fields=[_descriptor.FieldDescriptor(name='metric' full_name='Metric.metric' index=0 number=1 type=9 cpp_type=9 label=1 has_default_value=<false> default_value=_b("").decode('utf-8') message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> options=<none>) _descriptor.FieldDescriptor(name='points' full_name='Metric.points' index=1 number=2 type=11 cpp_type=10 label=3 has_default_value=<false> default_value=[] message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> options=<none>) ] extensions=[] nested_types=[] enum_types=[] options=<none> is_extendable=<false> syntax='proto3' extension_ranges=[] oneofs=[] serialized_start=59 serialized_end=107 )<line_sep>_PAYLOAD=_descriptor.Descriptor(name='Payload' full_name='Payload' filename=<none> file=DESCRIPTOR containing_type=<none> fields=[_descriptor.FieldDescriptor(name='metrics' full_name='Payload.metrics' index=0 number=1 type=11 cpp_type=10 label=3 has_default_value=<false> default_value=[] message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> options=<none>) ] extensions=[] nested_types=[] enum_types=[] options=<none> is_extendable=<false> syntax='proto3' extension_ranges=[] oneofs=[] serialized_start=109 serialized_end=144 )<line_sep>_METRIC.fields_by_name['points'].message_type=_POINT<line_sep>_PAYLOAD.fields_by_name['metrics'].message_type=_METRIC<line_sep>DESCRIPTOR.message_types_by_name['Point']=_POINT<line_sep>DESCRIPTOR.message_types_by_name['Metric']=_METRIC<line_sep>DESCRIPTOR.message_types_by_name['Payload']=_PAYLOAD<line_sep>Point=_reflection.GeneratedProtocolMessageType('Point' (_message.Message ) dict(DESCRIPTOR=_POINT __module__='carbon_pb2'# @@protoc_insertion_point(class_scope:Point)
))<line_sep>_sym_db.RegisterMessage(Point)<line_sep>Metric=_reflection.GeneratedProtocolMessageType('Metric' (_message.Message ) dict(DESCRIPTOR=_METRIC __module__='carbon_pb2'# @@protoc_insertion_point(class_scope:Metric)
))<line_sep>_sym_db.RegisterMessage(Metric)<line_sep>Payload=_reflection.GeneratedProtocolMessageType('Payload' (_message.Message ) dict(DESCRIPTOR=_PAYLOAD __module__='carbon_pb2'# @@protoc_insertion_point(class_scope:Payload)
))<line_sep>_sym_db.RegisterMessage(Payload)<line_sep># @@protoc_insertion_point(module_scope)
|
<import_stmt>logging<import_stmt>os<import_stmt>time<import_stmt>base64<import_stmt>random<import_stmt>string<import_from_stmt>lsassy.impacketfile ImpacketFile<import_from_stmt>lsassy.dumpmethod IDumpMethod<class_stmt>DumpMethod(IDumpMethod)<block_start><def_stmt>__init__ self session timeout<block_start>super().__init__(session timeout)<line_sep>self.mirrordump="MirrorDump.exe"<line_sep>self.mirrordump_path=<false><line_sep>self.mirrordump_remote_share="C$"<line_sep>self.mirrordump_remote_path="\\Windows\\Temp\\"<line_sep>self.mirrordump_uploaded=<false><block_end><def_stmt>prepare self options<block_start>self.mirrordump=options.get("mirrordump" self.mirrordump)<line_sep>self.mirrordump_path=options.get("mirrordump_path" self.mirrordump_path)<line_sep>self.mirrordump_remote_share=options.get("mirrordump_remote_share" self.mirrordump_remote_share)<line_sep>self.mirrordump_remote_path=options.get("mirrordump_remote_path" self.mirrordump_remote_path)<if_stmt><not>self.mirrordump_path<block_start>logging.error("Missing mirrordump_path")<line_sep><return><none><block_end><if_stmt><not>os.path.exists(self.mirrordump_path)<block_start>logging.error("{} does not exist.".format(self.mirrordump_path))<line_sep><return><none><block_end># Upload MirrorDump
logging.debug('Copy {} to {}'.format(self.mirrordump_path self.mirrordump_remote_path))<with_stmt>open(self.mirrordump_path 'rb')<as>p<block_start><try_stmt><block_start>self._session.smb_session.putFile(self.mirrordump_remote_share self.mirrordump_remote_path+self.mirrordump p.read)<line_sep>logging.success("MirrorDump successfully uploaded")<line_sep>self.mirrordump_uploaded=<true><line_sep><return><true><block_end><except_stmt>Exception<as>e<block_start>logging.error("MirrorDump upload error" exc_info=<true>)<line_sep><return><none><block_end><block_end><block_end><def_stmt>clean self<block_start><if_stmt>self.mirrordump_uploaded<block_start>ImpacketFile.delete(self._session self.mirrordump_remote_path+self.mirrordump timeout=self._timeout)<block_end><block_end><def_stmt>get_commands self dump_path=<none> dump_name=<none> no_powershell=<false><block_start>cmd_command="""{}{} -f {}{} -d {}""".format(self.mirrordump_remote_path self.mirrordump self.dump_path self.dump_name ''.join(random.choice(string.ascii_letters+string.digits)<for>_ range(8))+".dll")<line_sep>pwsh_command=cmd_command<line_sep><return>{"cmd":cmd_command "pwsh":pwsh_command}<block_end><block_end> |
<import_stmt>e1<import_stmt>e2<import_stmt>e3<import_stmt>destijl<line_sep> |
<import_stmt>numpy<as>np<import_stmt>pytest<import_from_stmt>artemis.general.nondeterminism_hunting delete_vars assert_variable_matches_between_runs variable_matches_between_runs reset_variable_tracker<def_stmt>_runs_are_the_same var_gen_1 var_gen_2 use_assert=<false><block_start>delete_vars(['_test_random_var_32r5477w32'])<for_stmt>run,gen [(0 var_gen_1) (1 var_gen_2)]<block_start>reset_variable_tracker()<for_stmt>v gen<block_start><if_stmt>use_assert<block_start>assert_variable_matches_between_runs(v '_test_random_var_32r5477w32')<block_end><else_stmt><block_start>its_a_match=variable_matches_between_runs(v '_test_random_var_32r5477w32')<if_stmt>run<eq>0<block_start><assert_stmt>its_a_match<is><none><block_end><else_stmt><block_start><if_stmt><not>its_a_match<block_start><return><false><block_end><block_end><block_end><block_end><block_end><return><true><block_end><def_stmt>test_variable_matches_between_runs <block_start>rng1=np.random.RandomState(1234)<line_sep>gen1=(rng1.randn(3 4)<for>_ range(5))<line_sep>rng2=np.random.RandomState(1234)<line_sep>gen2=(rng2.randn(3 4)<for>_ range(5))<assert_stmt>_runs_are_the_same(gen1 gen2)<line_sep>rng=np.random.RandomState(1234)<line_sep>gen1=(rng.randn(3 4)<for>_ range(5))<line_sep>gen2=(rng.randn(3 4)<for>_ range(5))<assert_stmt><not>_runs_are_the_same(gen1 gen2)<line_sep>gen1=(i<for>i range(5))<line_sep>gen2=(i<for>i range(5))<assert_stmt>_runs_are_the_same(gen1 gen2)<line_sep>gen1=(i<for>i range(5))<line_sep>gen2=(i<if>i<l>4<else>7<for>i range(5))<assert_stmt><not>_runs_are_the_same(gen1 gen2)<block_end><def_stmt>test_assert_variable_matches_between_runs <block_start>rng1=np.random.RandomState(1234)<line_sep>gen1=(rng1.randn(3 4)<for>_ range(5))<line_sep>rng2=np.random.RandomState(1234)<line_sep>gen2=(rng2.randn(3 4)<for>_ range(5))<line_sep>_runs_are_the_same(gen1 gen2 use_assert=<true>)<line_sep>rng=np.random.RandomState(1234)<line_sep>gen1=(rng.randn(3 4)<for>_ range(5))<line_sep>gen2=(rng.randn(3 4)<for>_ range(5))<with_stmt>pytest.raises(AssertionError)<block_start>_runs_are_the_same(gen1 gen2 use_assert=<true>)<block_end>gen1=(i<for>i range(5))<line_sep>gen2=(i<for>i range(5))<line_sep>_runs_are_the_same(gen1 gen2 use_assert=<true>)<line_sep>gen1=(i<for>i range(5))<line_sep>gen2=(i<if>i<l>4<else>7<for>i range(5))<with_stmt>pytest.raises(AssertionError)<block_start>_runs_are_the_same(gen1 gen2 use_assert=<true>)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>test_variable_matches_between_runs()<line_sep>test_assert_variable_matches_between_runs()<block_end> |
<import_stmt>scipy.io<as>sio<import_stmt>numpy<as>np<def_stmt>read_data path<block_start><for_stmt>i range(1 6)<block_start><if_stmt>i<eq>1<block_start>data_mat=sio.loadmat(path+"data_batch_"+str(i)+".mat")<line_sep>data=np.transpose(np.reshape(data_mat["data"] [10000 3 32 32]) [0 2 3 1])<line_sep>labels=data_mat["labels"]<block_end><else_stmt><block_start>data_mat=sio.loadmat(path+"data_batch_"+str(i)+".mat")<line_sep>temp=np.transpose(np.reshape(data_mat["data"] [10000 3 32 32]) [0 2 3 1])<line_sep>data=np.concatenate((temp data) axis=0)<line_sep>labels=np.concatenate((data_mat["labels"] labels) axis=0)<block_end><block_end><return>data labels<block_end><def_stmt>get_batch data batchsize<block_start>data_nums=data.shape[0]<line_sep>rand_select=np.random.randint(0 data_nums [batchsize])<line_sep>batch=data[rand_select]<line_sep>z=np.random.normal(0 1 [batchsize 512])<line_sep><return>batch z<block_end><def_stmt>read_face_data path<block_start>data=sio.loadmat(path)<line_sep><return>data["data"]<block_end># a, b = read_data("./dataset/")
# a = 0
|
<import_stmt>torch<import_stmt>torch.nn.utils<import_from_stmt>vel.api ModelFactory<import_from_stmt>vel.api.metrics.averaging_metric AveragingNamedMetric<import_from_stmt>vel.rl.api OptimizerAlgoBase<class_stmt>DistributionalDeepQLearning(OptimizerAlgoBase)<block_start>""" Deep Q-Learning algorithm """<def_stmt>__init__ self model_factory:ModelFactory discount_factor:float double_dqn:bool target_update_frequency:int max_grad_norm:float<block_start>super().__init__(max_grad_norm)<line_sep>self.model_factory=model_factory<line_sep>self.discount_factor=discount_factor<line_sep>self.double_dqn=double_dqn<line_sep>self.target_update_frequency=target_update_frequency<line_sep>self.target_model=<none><line_sep>self.vmin=<none><line_sep>self.vmax=<none><line_sep>self.num_atoms=<none><line_sep>self.support_atoms=<none><line_sep>self.atom_delta=<none><block_end><def_stmt>initialize self training_info model environment device<block_start>""" Initialize policy gradient from reinforcer settings """<line_sep>self.target_model=self.model_factory.instantiate(action_space=environment.action_space).to(device)<line_sep>self.target_model.load_state_dict(model.state_dict())<line_sep>self.target_model.eval()<line_sep>histogram_info=model.histogram_info()<line_sep>self.vmin=histogram_info['vmin']<line_sep>self.vmax=histogram_info['vmax']<line_sep>self.num_atoms=histogram_info['num_atoms']<line_sep>self.support_atoms=histogram_info['support_atoms']<line_sep>self.atom_delta=histogram_info['atom_delta']<block_end><def_stmt>calculate_gradient self batch_info device model rollout<block_start>""" Calculate loss of the supplied rollout """<line_sep>evaluator=model.evaluate(rollout)<line_sep>batch_size=rollout.frames()<line_sep>dones_tensor=evaluator.get('rollout:dones')<line_sep>rewards_tensor=evaluator.get('rollout:rewards')<assert_stmt>dones_tensor.dtype<eq>torch.float32<with_stmt>torch.no_grad()<block_start>target_evaluator=self.target_model.evaluate(rollout)<if_stmt>self.double_dqn# DOUBLE DQN
# Histogram gets returned as logits initially, we need to exp it before projection
<block_start>target_value_histogram_for_all_actions=target_evaluator.get('model:q_dist_next').exp()<line_sep>model_value_histogram_for_all_actions=evaluator.get('model:q_dist_next').exp()<line_sep>atoms_aligned=self.support_atoms.view(1 1 self.num_atoms)<line_sep>selected_action_indices=((atoms_aligned<times>model_value_histogram_for_all_actions).sum(dim=-1).argmax(dim=1))<line_sep># Select largest 'target' value based on action that 'model' selects
next_value_histograms=(target_value_histogram_for_all_actions[range(batch_size) selected_action_indices])<block_end><else_stmt># REGULAR DQN
# Histogram gets returned as logits initially, we need to exp it before projection
<block_start>target_value_histogram_for_all_actions=target_evaluator.get('model:q_dist_next').exp()<line_sep>atoms_aligned=self.support_atoms.view(1 1 self.num_atoms)<line_sep>selected_action_indices=((atoms_aligned<times>target_value_histogram_for_all_actions).sum(dim=-1).argmax(dim=1))<line_sep>next_value_histograms=(target_value_histogram_for_all_actions[range(batch_size) selected_action_indices])<block_end># HISTOGRAM PROJECTION CODE
forward_steps=rollout.extra_data.get('forward_steps' 1)<line_sep>atoms_projected=(rewards_tensor.unsqueeze(1)+(self.discount_factor<power>forward_steps)<times>(1-dones_tensor).unsqueeze(1)<times>self.support_atoms.unsqueeze(0))<line_sep>atoms_projected=atoms_projected.clamp(min=self.vmin max=self.vmax)<line_sep>projection_indices=(atoms_projected-self.vmin)/self.atom_delta<line_sep>index_floor=projection_indices.floor().long()<line_sep>index_ceil=projection_indices.ceil().long()<line_sep># Fix corner case when index_floor == index_ceil
index_floor[(index_ceil<g>0)<times>(index_floor<eq>index_ceil)]<augsub>1<line_sep>index_ceil[(index_floor<l>(self.num_atoms-1))<times>(index_floor<eq>index_ceil)]<augadd>1<line_sep>value_histogram_projected=torch.zeros_like(next_value_histograms)<line_sep># Following part will be a bit convoluted, in an effort to fully vectorize projection operation
# Special offset index tensor
offsets=(torch.arange(0 batch_size<times>self.num_atoms self.num_atoms).unsqueeze(1).expand(batch_size self.num_atoms).contiguous().view(-1).to(device))<line_sep># Linearize all the buffers
value_histogram_projected=value_histogram_projected.view(-1)<line_sep>index_ceil=index_ceil.view(-1)<line_sep>index_floor=index_floor.view(-1)<line_sep>projection_indices=projection_indices.view(-1)<line_sep>value_histogram_projected.index_add_(0 index_floor+offsets (next_value_histograms.view(-1)<times>(index_ceil.float()-projection_indices)))<line_sep>value_histogram_projected.index_add_(0 index_ceil+offsets (next_value_histograms.view(-1)<times>(projection_indices-index_floor.float())))<line_sep>value_histogram_projected=value_histogram_projected.reshape(next_value_histograms.shape)<block_end>q_log_histogram_selected=evaluator.get('model:action:q_dist')<line_sep># Cross-entropy loss as usual
original_losses=-(value_histogram_projected<times>q_log_histogram_selected).sum(dim=1)<if_stmt>evaluator.is_provided('rollout:weights')<block_start>weights=evaluator.get('rollout:weights')<block_end><else_stmt><block_start>weights=torch.ones_like(rewards_tensor)<block_end>loss_value=torch.mean(weights<times>original_losses)<line_sep>loss_value.backward()<with_stmt>torch.no_grad()<block_start>mean_q_model=(self.support_atoms.unsqueeze(0)<times>torch.exp(q_log_histogram_selected)).sum(dim=1).mean()<line_sep>mean_q_target=(self.support_atoms.unsqueeze(0)<times>value_histogram_projected).sum(dim=1).mean()<block_end><return>{'loss':loss_value.item() # We need it to update priorities in the replay buffer:
'errors':original_losses.detach().cpu().numpy() 'average_q_selected':mean_q_model.item() 'average_q_target':mean_q_target.item()}<block_end><def_stmt>post_optimization_step self batch_info device model rollout<block_start>""" Steps to take after optimization has been done"""<if_stmt>batch_info.aggregate_batch_number%self.target_update_frequency<eq>0<block_start>self.target_model.load_state_dict(model.state_dict())<line_sep>self.target_model.eval()<block_end><block_end><def_stmt>metrics self<arrow>list<block_start>""" List of metrics to track for this learning process """<line_sep><return>[AveragingNamedMetric("loss") AveragingNamedMetric("average_q_selected") AveragingNamedMetric("average_q_target") AveragingNamedMetric("grad_norm") ]<block_end><block_end><def_stmt>create model:ModelFactory discount_factor:float target_update_frequency:int max_grad_norm:float double_dqn:bool=<false><block_start>""" Vel factory function """<line_sep><return>DistributionalDeepQLearning(model_factory=model discount_factor=discount_factor double_dqn=double_dqn target_update_frequency=target_update_frequency max_grad_norm=max_grad_norm)<block_end> |
<class_stmt>Solution<block_start><def_stmt>getSum self a:int b:int<arrow>int# 32 bits integer max and min
<block_start>MAX=0x7FFFFFFF<line_sep>MIN=0x80000000<line_sep>mask=0xFFFFFFFF<while_stmt>b<ne>0<block_start>carry=a&b<line_sep>a,b=(a^b)&mask (carry<lshift>1)&mask<block_end><return>a<if>a<le>MAX<else>~(a^mask)<block_end><block_end> |
<import_stmt>os<import_stmt>sys<import_stmt>pkg_resources<line_sep>pkg=sys.argv[1]<line_sep>prefix=sys.argv[2]<line_sep>dist=pkg_resources.get_distribution(pkg)<if_stmt>dist.has_metadata('RECORD')<block_start><for_stmt>line dist.get_metadata_lines('RECORD')<block_start>print(os.path.join(dist.location line.split(',')[0]))<block_end><block_end><elif_stmt>dist.has_metadata('installed-files.txt')<block_start><for_stmt>line dist.get_metadata_lines('installed-files.txt')<block_start>print(os.path.join(dist.egg_info line.split(',')[0]))<block_end><block_end><elif_stmt>dist.has_metadata('entry_points.txt')<block_start><try_stmt><block_start><import_from_stmt>ConfigParser SafeConfigParser<import_from_stmt>StringIO StringIO<block_end><except_stmt>ImportError<block_start><import_from_stmt>configparser SafeConfigParser<import_from_stmt>io StringIO<block_end>parser=SafeConfigParser()<line_sep>parser.readfp(StringIO('\n'.join(dist.get_metadata_lines('entry_points.txt'))))<if_stmt>parser.has_section('console_scripts')<block_start><for_stmt>name,_ parser.items('console_scripts')<block_start>print(os.path.join(prefix name))<block_end><block_end><block_end> |
<import_stmt>os sys<import_stmt>time<import_stmt>torch<import_stmt>numpy<as>np<line_sep>sys.path.append(os.path.dirname(os.path.abspath(__file__)))<import_from_stmt>vis_utils get_vis_depth get_vis_mask get_vis_normal<import_stmt>copy<import_stmt>cv2<import_stmt>matplotlib<as>mpl<line_sep>mpl.use('Agg')<import_stmt>matplotlib.pyplot<as>plt<import_stmt>matplotlib.cm<as>cm<import_from_stmt>PIL Image<as>pil<import_stmt>pickle<def_stmt>print_loss_pack loss_pack name<block_start>loss_depth,loss_mask_gt,loss_mask_out,loss_normal,loss_l2reg=loss_pack['depth'] loss_pack['mask_gt'] loss_pack['mask_out'] loss_pack['normal'] loss_pack['l2reg']<if_stmt>len(loss_depth.shape)<eq>1<block_start>loss_mask_gt,loss_mask_out,loss_depth,loss_normal,loss_l2reg=loss_mask_gt.mean() loss_mask_out.mean() loss_depth.mean() loss_normal.mean() loss_l2reg.mean()<block_end>print('NAME = [{0}] -- loss_depth: {1:.4f}, loss_mask_gt: {2:.4f}, loss_mask_out: {3:.4f}, loss_normal: {4:.4f}, loss_l2reg: {5:.4f}'.format(name loss_depth.detach().cpu().numpy() loss_mask_gt.detach().cpu().numpy() loss_mask_out.detach().cpu().numpy() loss_normal.detach().cpu().numpy() loss_l2reg.detach().cpu().numpy()))<block_end><def_stmt>print_loss_pack_color loss_pack name<block_start>loss_color,loss_depth,loss_mask_gt,loss_mask_out,loss_normal,loss_l2reg,loss_l2reg_c=loss_pack['color'] loss_pack['depth'] loss_pack['mask_gt'] loss_pack['mask_out'] loss_pack['normal'] loss_pack['l2reg'] loss_pack['l2reg_c']<line_sep>print('NAME = [{0}] -- loss_color: {1:.4f}, loss_depth: {2:.4f}, loss_mask_gt: {3:.4f}, loss_mask_out: {4:.4f}, loss_normal: {5:.4f}, loss_l2reg: {6:.4f}, loss_l2re_cg: {7:.4f}'.format(name loss_color.detach().cpu().numpy() loss_depth.detach().cpu().numpy() loss_mask_gt.detach().cpu().numpy() loss_mask_out.detach().cpu().numpy() loss_normal.detach().cpu().numpy() loss_l2reg.detach().cpu().numpy() loss_l2reg_c.detach().cpu().numpy()))<block_end><def_stmt>demo_color_save_render_output prefix sdf_renderer shape_code color_code camera lighting_loc=<none> profile=<false><block_start>R,T=camera.extrinsic[: :3] camera.extrinsic[: 3]<line_sep>R,T=torch.from_numpy(R).float().cuda() torch.from_numpy(T).float().cuda()<line_sep>R.requires_grad,T.requires_grad=<false> <false><if_stmt>lighting_loc<is><not><none><block_start>lighting_locations=torch.from_numpy(lighting_loc).float().unsqueeze(0).cuda()<block_end><else_stmt><block_start>lighting_locations=<none><block_end>render_output=sdf_renderer.render(color_code shape_code R T profile=profile no_grad=<true> lighting_locations=lighting_locations)<line_sep>depth_rendered,normal_rendered,color_rgb,valid_mask_rendered,min_sdf_sample=render_output<line_sep>data={}<line_sep>data['depth']=depth_rendered.detach().cpu().numpy()<line_sep>data['normal']=normal_rendered.detach().cpu().numpy()<line_sep>data['mask']=valid_mask_rendered.detach().cpu().numpy()<line_sep>data['color']=color_rgb.detach().cpu().numpy()<line_sep>data['min_sdf_sample']=min_sdf_sample.detach().cpu().numpy()<line_sep>data['latent_tensor']=shape_code.detach().cpu().numpy()<line_sep>data['K']=sdf_renderer.get_intrinsic()<line_sep>data['RT']=torch.cat([R T[: <none>]] 1).detach().cpu().numpy()<line_sep>fname=prefix+'_info.pkl'<with_stmt>open(fname 'wb')<as>f<block_start>pickle.dump(data f)<block_end>img_hw=sdf_renderer.get_img_hw()<line_sep>visualizer=Visualizer(img_hw)<line_sep>print('Writing to prefix: {}'.format(prefix))<line_sep>visualizer.visualize_depth(prefix+'_depth.png' depth_rendered.detach().cpu().numpy() valid_mask_rendered.detach().cpu().numpy())<line_sep>visualizer.visualize_normal(prefix+'_normal.png' normal_rendered.detach().cpu().numpy() valid_mask_rendered.detach().cpu().numpy() bgr2rgb=<true>)<line_sep>visualizer.visualize_mask(prefix+'_silhouette.png' valid_mask_rendered.detach().cpu().numpy())<line_sep>cv2.imwrite(prefix+'_rendered_rgb.png' color_rgb.detach().cpu().numpy()<times>255)<block_end><class_stmt>Visualizer(object)<block_start><def_stmt>__init__ self img_hw dmin=0.0 dmax=10.0<block_start>self.img_h,self.img_w=img_hw[0] img_hw[1]<line_sep>self.data={}<line_sep>self.dmin,self.dmax=dmin dmax<line_sep>self.loss_counter=0<line_sep>self.loss_curve={}<line_sep>self.loss_list=[]<line_sep>self.chamfer_list=[]<block_end><def_stmt>get_data self data_name<block_start><if_stmt>data_name<in>self.data.keys()<block_start><return>self.data[data_name]<block_end><else_stmt><block_start><raise>ValueError('Key {0} does not exist.'.format(data_name))<block_end><block_end><def_stmt>set_data self data<block_start>self.data=data<block_end><def_stmt>reset_data self<block_start>self.data={}<line_sep>keys=['mask_gt' 'mask_output' 'loss_mask_gt' 'loss_mask_out' 'depth_gt' 'depth_output' 'loss_depth' 'normal_gt' 'normal_output' 'loss_normal']<for_stmt>key keys<block_start>self.data[key]=np.zeros((64 64))<block_end><block_end><def_stmt>reset_loss_curve self<block_start>self.loss_counter=0<line_sep>self.loss_curve={}<block_end><def_stmt>reset_all self<block_start>self.reset_data()<line_sep>self.reset_loss_curve()<block_end><def_stmt>add_loss_from_pack self loss_pack<block_start>'''
potential properties:
['mask_gt', 'mask_out', 'depth' 'normal', 'l2reg']
'''<line_sep>loss_name_list=list(loss_pack.keys())<if_stmt>self.loss_curve<eq>{}<block_start><for_stmt>loss_name loss_name_list<block_start>self.loss_curve[loss_name]=[]<block_end><block_end><for_stmt>loss_name loss_name_list<block_start>loss_value=loss_pack[loss_name].detach().cpu().numpy()<line_sep>self.loss_curve[loss_name].append(loss_value)<block_end>self.loss_counter=self.loss_counter+1<block_end><def_stmt>add_loss self loss<block_start>self.loss_list.append(loss.detach().cpu().numpy())<block_end><def_stmt>add_chamfer self chamfer<block_start>self.chamfer_list.append(chamfer)<block_end><def_stmt>add_data self data_name data_src data_mask=<none><block_start>'''
potential properties:
mask: ['mask_gt', 'mask_output', 'loss_mask_gt', 'loss_mask_out']
depth: ['depth_gt', 'depth_output', 'loss_depth']
normal: ['normal_gt', 'normal_output', 'loss_normal']
'''<if_stmt>data_mask<is><none><block_start>self.data[data_name]=data_src<block_end><else_stmt><block_start>data_map=np.zeros(data_mask.shape)<line_sep>data_map[data_mask<ne>0]=data_src<line_sep>self.data[data_name]=data_map<block_end><block_end><def_stmt>save_depth self fname depth_vis cmap='magma' direct=<false><block_start><if_stmt>direct<block_start>cv2.imwrite(fname depth_vis)<line_sep><return>0<block_end>vmin,vmax=0 255<line_sep>normalizer=mpl.colors.Normalize(vmin=vmin vmax=vmax)<line_sep>mapper=cm.ScalarMappable(norm=normalizer cmap=cmap)<line_sep>colormapped_im=(mapper.to_rgba(depth_vis)[: : :3]<times>255).astype(np.uint8)<line_sep>im=pil.fromarray(colormapped_im)<line_sep>im.save(fname)<block_end><def_stmt>save_mask self fname mask_vis bgr2rgb=<false><block_start><if_stmt>bgr2rgb<block_start>mask_vis=cv2.cvtColor(mask_vis cv2.COLOR_BGR2RGB)<block_end>cv2.imwrite(fname mask_vis)<block_end><def_stmt>save_normal self fname normal_vis bgr2rgb=<false><block_start><if_stmt>bgr2rgb<block_start>normal_vis=cv2.cvtColor(normal_vis cv2.COLOR_BGR2RGB)<block_end>cv2.imwrite(fname normal_vis)<block_end><def_stmt>save_error self fname error_vis bgr2rgb=<false><block_start>self.save_depth(fname error_vis cmap='jet')<block_end><def_stmt>visualize_depth self fname depth mask=<none># depth_vis = get_vis_depth(depth, mask=mask, dmin=self.dmin, dmax=self.dmax)
<block_start>depth_vis=get_vis_depth(depth mask=mask)<line_sep># self.save_depth(fname, depth_vis)
cv2.imwrite(fname depth_vis)<block_end><def_stmt>visualize_normal self fname normal mask=<none> bgr2rgb=<false><block_start>normal_vis=get_vis_normal(normal mask=mask)<if_stmt>bgr2rgb<block_start>normal_vis=cv2.cvtColor(normal_vis cv2.COLOR_BGR2RGB)<block_end>cv2.imwrite(fname normal_vis)<block_end><def_stmt>visualize_mask self fname mask bgr2rgb=<false><block_start>mask_vis=get_vis_mask(mask)<if_stmt>bgr2rgb<block_start>mask_vis=cv2.cvtColor(mask_vis cv2.COLOR_BGR2RGB)<block_end>cv2.imwrite(fname mask_vis)<block_end><def_stmt>imshow self ax img title=<none><block_start>ax.imshow(img)<line_sep>ax.axis('off')<if_stmt>title<is><not><none><block_start>ax.set_title(title)<block_end><block_end><def_stmt>imshow_bgr2rgb self ax img title=<none><block_start><if_stmt>len(img.shape)<eq>3<block_start>img=cv2.cvtColor(img cv2.COLOR_BGR2RGB)<block_end>ax.imshow(img)<line_sep>ax.axis('off')<if_stmt>title<is><not><none><block_start>ax.set_title(title)<block_end><block_end><def_stmt>show_loss_curve self fname<block_start><pass><block_end><def_stmt>show_all_data_3x4 self fname<block_start>fig,axs=plt.subplots(3 4 figsize=(30 30))<line_sep># first row, groundtruth
depth_gt_vis=get_vis_depth(self.data['depth_gt'] mask=self.data['mask_gt'] dmin=self.dmin dmax=self.dmax)<line_sep>self.imshow_bgr2rgb(axs[0 0] 255-depth_gt_vis title='depth gt')<line_sep>normal_gt_vis=get_vis_normal(self.data['normal_gt'] mask=self.data['mask_gt'])<line_sep>self.imshow(axs[0 1] normal_gt_vis title='normal gt')<line_sep>mask_gt_vis=get_vis_mask(self.data['mask_gt'])<line_sep>self.imshow_bgr2rgb(axs[0 2] 255-mask_gt_vis title='mask gt')<line_sep>axs[0 3].axis('off')<line_sep># second row, output
depth_output_vis=get_vis_depth(self.data['depth_output'] mask=self.data['mask_output'] dmin=self.dmin dmax=self.dmax)<line_sep>self.imshow_bgr2rgb(axs[1 0] 255-depth_output_vis title='depth output')<line_sep>normal_output_vis=get_vis_normal(self.data['normal_output'] mask=self.data['mask_output'])<line_sep>self.imshow(axs[1 1] normal_output_vis title='normal output')<line_sep>mask_output_vis=get_vis_mask(self.data['mask_output'])<line_sep>self.imshow_bgr2rgb(axs[1 2] 255-mask_output_vis title='mask output')<line_sep>axs[1 3].axis('off')<line_sep># third row, loss
valid_mask=np.logical_and(self.data['mask_gt'] self.data['mask_output'])<line_sep>loss_depth_vis=get_vis_depth(np.abs(self.data['loss_depth']) valid_mask dmin=0.0 dmax=0.5)<line_sep>self.imshow_bgr2rgb(axs[2 0] 255-loss_depth_vis title='depth loss')<line_sep>loss_normal_vis=get_vis_depth(self.data['loss_normal'] valid_mask dmin=-1.0 dmax=0.0)<line_sep>self.imshow_bgr2rgb(axs[2 1] 255-loss_normal_vis title='normal loss')<line_sep>loss_mask_gt_vis=get_vis_mask(np.abs(self.data['loss_mask_gt'])<g>0)<line_sep>self.imshow_bgr2rgb(axs[2 2] 255-loss_mask_gt_vis title='gt \ output')<line_sep>loss_mask_out_vis=get_vis_mask(np.abs(self.data['loss_mask_out'])<g>0)<line_sep>self.imshow_bgr2rgb(axs[2 3] 255-loss_mask_out_vis title='output \ gt')<line_sep># savefig
fig.savefig(fname)<line_sep>plt.close('all')<block_end><def_stmt>save_all_data self prefix# groundtruth
<block_start>depth_gt_vis=get_vis_depth(self.data['depth_gt'] mask=self.data['mask_gt'] dmin=self.dmin dmax=self.dmax)<line_sep>self.save_depth(prefix+'_depth_gt.png' depth_gt_vis cmap='magma' direct=<true>)<line_sep>normal_gt_vis=get_vis_normal(self.data['normal_gt'] mask=self.data['mask_gt'])<line_sep>self.save_normal(prefix+'_normal_gt.png' normal_gt_vis bgr2rgb=<true>)<line_sep>mask_gt_vis=get_vis_mask(self.data['mask_gt'])<line_sep>self.save_mask(prefix+'_mask_gt.png' mask_gt_vis)<line_sep># output
depth_output_vis=get_vis_depth(self.data['depth_output'] mask=self.data['mask_output'] dmin=self.dmin dmax=self.dmax)<line_sep>self.save_depth(prefix+'_depth_output.png' depth_output_vis cmap='magma' direct=<true>)<line_sep>normal_output_vis=get_vis_normal(self.data['normal_output'] mask=self.data['mask_output'])<line_sep>self.save_normal(prefix+'_normal_output.png' normal_output_vis bgr2rgb=<true>)<line_sep>mask_output_vis=get_vis_mask(self.data['mask_output'])<line_sep>self.save_mask(prefix+'_mask_output.png' mask_output_vis)<line_sep># third row, loss
valid_mask=np.logical_and(self.data['mask_gt'] self.data['mask_output'])<line_sep>loss_depth_vis=get_vis_depth(np.abs(self.data['loss_depth']) valid_mask dmin=0.0 dmax=0.5 bg_color=0)<line_sep>self.save_error(prefix+'_depth_loss.png' loss_depth_vis bgr2rgb=<true>)<line_sep>loss_normal_vis=get_vis_depth(self.data['loss_normal'] valid_mask dmin=-1.0 dmax=0.0 bg_color=0)<line_sep>self.save_error(prefix+'_normal_loss.png' loss_normal_vis bgr2rgb=<true>)<line_sep>loss_mask_gt_vis=get_vis_depth(np.abs(self.data['loss_mask_gt']) bg_color=0)<line_sep>self.save_error(prefix+'_mask_gt_loss.png' loss_mask_gt_vis bgr2rgb=<true>)<line_sep>loss_mask_out_vis=get_vis_depth(np.abs(self.data['loss_mask_out']) bg_color=0)<line_sep>self.save_error(prefix+'_mask_out_loss.png' loss_mask_out_vis bgr2rgb=<true>)<line_sep>self.save_error(prefix+'_mask_loss.png' loss_mask_gt_vis+loss_mask_out_vis bgr2rgb=<true>)<block_end><def_stmt>dump_all_data self fname<block_start><with_stmt>open(fname 'wb')<as>f<block_start>pickle.dump({'data':self.data 'loss_curve':self.loss_curve 'loss_list':self.loss_list 'chamfer_list':self.chamfer_list} f)<block_end><block_end><def_stmt>show_all_data self fname<block_start>self.show_all_data_3x4(fname)<line_sep># self.save_all_data(fname[:-4])
<block_end><def_stmt>show_all_data_color self fname<block_start>fig,axs=plt.subplots(3 4 figsize=(30 30))<line_sep># first row, groundtruth
depth_gt_vis=get_vis_depth(self.data['depth_gt'] mask=self.data['mask_gt'] dmin=self.dmin dmax=self.dmax)<line_sep>self.imshow_bgr2rgb(axs[0 0] depth_gt_vis title='depth gt')<line_sep>normal_gt_vis=get_vis_normal(self.data['normal_gt'])<line_sep>self.imshow_bgr2rgb(axs[0 1] normal_gt_vis title='normal gt')<line_sep>mask_gt_vis=get_vis_mask(self.data['mask_gt'])<line_sep>self.imshow_bgr2rgb(axs[0 2] mask_gt_vis title='mask gt')<line_sep>self.imshow_bgr2rgb(axs[0 3] self.data['color_gt'] title='rgb gt')<line_sep># second row, output
depth_output_vis=get_vis_depth(self.data['depth_output'] mask=self.data['mask_output'] dmin=self.dmin dmax=self.dmax)<line_sep>self.imshow_bgr2rgb(axs[1 0] depth_output_vis title='depth output')<line_sep>normal_output_vis=get_vis_normal(self.data['normal_output'])<line_sep>self.imshow_bgr2rgb(axs[1 1] normal_output_vis title='normal output')<line_sep>mask_output_vis=get_vis_mask(self.data['mask_output'])<line_sep>self.imshow_bgr2rgb(axs[1 2] mask_output_vis title='mask output')<line_sep>self.imshow_bgr2rgb(axs[1 3] self.data['color_output'] title='rgb output')<line_sep># third row, loss
valid_mask=np.logical_and(self.data['mask_gt'] self.data['mask_output'])<line_sep>loss_depth_vis=get_vis_depth(np.abs(self.data['loss_depth']) valid_mask dmin=0.0 dmax=0.5)<line_sep>self.imshow_bgr2rgb(axs[2 0] loss_depth_vis title='depth loss')<line_sep>loss_normal_vis=get_vis_depth(self.data['loss_normal'] valid_mask dmin=-1.0 dmax=0.0)<line_sep>self.imshow_bgr2rgb(axs[2 1] loss_normal_vis title='normal loss')<line_sep>loss_mask_gt_vis=get_vis_mask(np.abs(self.data['loss_mask_gt'])<g>0)<line_sep>loss_mask_out_vis=get_vis_mask(np.abs(self.data['loss_mask_out'])<g>0)<line_sep>loss_mask_gt_vis<augadd>loss_mask_out_vis<line_sep>self.imshow_bgr2rgb(axs[2 2] loss_mask_gt_vis title='mask loss')<line_sep>self.imshow_bgr2rgb(axs[2 3] self.data['loss_color'] title='rgb loss')<line_sep># savefig
fig.savefig(fname)<line_sep>plt.close('all')<block_end><def_stmt>return_output_data_color self<block_start><return>self.data['color_output'] self.data['depth_output'] self.data['normal_output'] self.data['mask_output']<block_end><def_stmt>show_all_data_color_multi self fname num_img=4<block_start>fig,axs=plt.subplots(3 2<times>num_img figsize=(8<times>2<times>num_img 25))<for_stmt>i range(num_img)# first row, ground truth
<block_start>self.imshow_bgr2rgb(axs[0 2<times>i] self.data['color_gt-{}'.format(i)] title='rgb gt {}'.format(i))<line_sep>mask_gt_vis=get_vis_mask(self.data['mask_gt-{}'.format(i)])<line_sep>self.imshow_bgr2rgb(axs[0 2<times>i+1] mask_gt_vis title='mask gt {}'.format(i))<line_sep># second row, output
self.imshow_bgr2rgb(axs[1 2<times>i] self.data['color_output-{}'.format(i)] title='rgb output {}'.format(i))<line_sep>mask_output_vis=get_vis_mask(self.data['mask_output-{}'.format(i)])<line_sep>self.imshow_bgr2rgb(axs[1 2<times>i+1] mask_output_vis title='mask output {}'.format(i))<line_sep># third row, loss
self.imshow_bgr2rgb(axs[2 2<times>i] self.data['loss_color-{}'.format(i)] title='rgb loss {}'.format(i))<line_sep>loss_mask_gt_vis=get_vis_mask(np.abs(self.data['loss_mask_gt-{}'.format(i)])<g>0)<line_sep>loss_mask_out_vis=get_vis_mask(np.abs(self.data['loss_mask_out-{}'.format(i)])<g>0)<line_sep>loss_mask_gt_vis<augadd>loss_mask_out_vis<line_sep>self.imshow_bgr2rgb(axs[2 2<times>i+1] loss_mask_gt_vis title='mask loss {}'.format(i))<block_end># savefig
plt.subplots_adjust(top=0.95 right=0.99 left=0.01 bottom=0.01 wspace=0.05 hspace=0.1)<line_sep>fig.savefig(fname)<line_sep>plt.close('all')<block_end><def_stmt>show_all_data_color_warp self fname<block_start>fig,axs=plt.subplots(1 5 figsize=(15 3.4))<line_sep>self.imshow_bgr2rgb(axs[0] self.data['color_gt-1'] title='view 1')<line_sep>self.imshow_bgr2rgb(axs[1] self.data['color_gt-2'] title='view 2')<line_sep>self.imshow_bgr2rgb(axs[2] self.data['color_valid-1'] title='valid region in view 1')<line_sep>self.imshow_bgr2rgb(axs[3] self.data['color_valid-2'] title='warped color from view 2')<line_sep>self.imshow_bgr2rgb(axs[4] self.data['color_valid_loss'] title='color loss')<line_sep># savefig
plt.subplots_adjust(top=0.99 right=0.99 left=0.01 bottom=0.00 wspace=0.05 hspace=0)<line_sep>fig.savefig(fname)<line_sep>plt.close('all')<block_end><block_end> |
<import_from_stmt>pylayers.antprop.antenna *<import_from_stmt>numpy *<import_from_stmt>matplotlib.pylab *<line_sep>kf=30<line_sep>A=Antenna('S2R3.vsh3' 'ant')<line_sep>phi=linspace(0 2<times>pi 180)<line_sep>theta=array([1.57])<line_sep>Fth,Fph=A.pattern(theta phi)<line_sep>polar(phi abs(Fth[kf 0 :]) phi abs(Fph[kf 0 :]))<line_sep>B=Antenna('S2R3.mat' 'ant/UWBAN/Matfile')<line_sep>polar(B.phi abs(B.Ftheta[kf 45 :]) B.phi abs(B.Fphi[kf 45 :]))<line_sep>legend((u'$F_{\\theta}^{vsh}$' u'$F_{\phi}^{vsh}$' u'$F_{\\theta}^{original}$' u'$F_{\phi}^{original}$') loc='best')<line_sep>t=title('$\\theta=\\frac{\pi}{2}$'+', f = '+str(A.fa[kf])[0:6]+' GHz')<line_sep>t.set_fontsize(18)<line_sep>savefig('polarvsh3.png')<line_sep>show()<line_sep> |
# Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ensure directories and files are staged in the chrooted guest to match
what's expected by run_in_chroot.sh
Benefits:
1. Avoid symlinks in the chroot that point outside of it.
2. Provide useful error messages prior to running chroot script.
"""<import_stmt>logging<import_from_stmt>pathlib Path<import_stmt>typing<def_stmt>is_file expected_file:Path substring=''<arrow>typing.List[str]<block_start>"""Assert that expected_file exists. If substring is provided,
assert that the file contains it.
Returns:
A list of errors found, or empty if successful.
"""<line_sep>logger=logging.getLogger('_is_file {}'.format(expected_file))<line_sep>errs=[]<try_stmt><block_start>actual_content=expected_file.read_text()<line_sep>logger.debug('content: {}'.format(substring))<if_stmt>substring<and>substring<not><in>actual_content<block_start>errs.append('{content} not found in {fname}'.format(content=substring fname=expected_file))<block_end><block_end><except_stmt>BaseException<as>e<block_start>logger.debug(e)<line_sep>errs.append('File not found: {}'.format(expected_file))<block_end><return>errs<block_end><def_stmt>is_non_empty_dir expected_dir:Path<arrow>typing.List[str]<block_start>"""Assert that directory exists, and that it's not empty.
Returns:
A list of errors found, or empty if successful.
"""<line_sep>errs=[]<if_stmt>expected_dir.is_dir()<block_start><for_stmt>child expected_dir.iterdir()<block_start><if_stmt>child.is_file()<or>(child.is_dir()<and>len(is_non_empty_dir(child))<eq>0)<block_start><return>errs<block_end><block_end>errs.append('Directory is empty: {}'.format(expected_dir))<block_end><else_stmt><block_start>errs.append('Directory not found: {}'.format(expected_dir))<block_end><return>errs<block_end><def_stmt>check_root fs_root:Path runtime_dir:Path check_os_mounts=<true><arrow>typing.List[str]<block_start>"""Assert that the filesystem rooted at fs_root follows
the layout expected by run_in_chroot.sh.
Args:
fs_root: Directory to consider root of filesystem.
runtime_dir: Location where run_in_chroot.sh expects
its runtime dependencies.
check_os_mounts: Whether to check mounts such as dev, proc, sys.
Returns:
A list of errors found, or empty if successful.
"""<line_sep>checks=[<lambda>:is_file(runtime_dir/'cloud_product.txt' substring='sle-module-public-cloud') <lambda>:is_file(runtime_dir/'post_convert_packages.txt') <lambda>:is_file(runtime_dir/'run_in_chroot.sh' substring='#!/usr/bin/env bash') <lambda>:is_non_empty_dir(runtime_dir/'pre_convert_py') <lambda>:is_non_empty_dir(runtime_dir/'pre_convert_rpm') <lambda>:is_file(fs_root/'etc/hosts' substring='metadata.google.internal') <lambda>:is_file(fs_root/'etc/resolv.conf' substring='google.internal') ]<if_stmt>check_os_mounts<block_start>checks<augadd>[<lambda>:is_non_empty_dir(fs_root/'dev') <lambda>:is_non_empty_dir(fs_root/'proc') <lambda>:is_non_empty_dir(fs_root/'sys') ]<block_end>errs=[]<for_stmt>c checks<block_start>errs<augadd>c()<block_end><return>errs<block_end> |
# -*- coding: utf-8 -*-
<import_from_stmt>..expr *<line_sep>def_Topic(Title("Gaussian quadrature") Section("Gauss-Legendre quadrature") SeeTopics("Legendre polynomials") Entries("0745ee" # Legendre polynomial zeros
"ea4754" # weights
"47b181" # -1,1
"545987" # a,b
) )<line_sep>make_entry(ID("ea4754") Formula(Equal(GaussLegendreWeight(n k) 2/((1-LegendrePolynomialZero(n k)<power>2)<times>ComplexDerivative(LegendrePolynomial(n t) For(t LegendrePolynomialZero(n k) 1))<power>2))) Variables(n k) Assumptions(And(Element(n ZZGreaterEqual(1)) Element(k Range(1 n)))))<line_sep>make_entry(ID("47b181") Formula(Where(LessEqual(Abs(Integral(f(t) For(t -1 1))-Sum(GaussLegendreWeight(n k)<times>f(LegendrePolynomialZero(n k)) For(k 1 n))) 64<times>M/(15<times>(1-rho<power>-2)<times>rho<power>(2<times>n))) Equal(M Supremum(Abs(f(t)) ForElement(t BernsteinEllipse(rho)))))) Variables(f n rho) Assumptions(And(Element(n ZZGreaterEqual(1)) Element(rho RR) Greater(rho 1) IsHolomorphic(f(z) ForElement(z InteriorClosure(BernsteinEllipse(rho)))))) References("<NAME>, Is Gauss Quadrature Better than Clenshaw-Curtis? SIAM Rev., 50(1), 67-87. DOI:10.1137/060659831"))<line_sep>make_entry(ID("545987") Formula(Where(LessEqual(Abs(Integral(f(t) For(t a b))-(b-a)/2<times>Sum(GaussLegendreWeight(n k)<times>f((b-a)/2<times>LegendrePolynomialZero(n k)+(a+b)/2) For(k 1 n))) (Abs(b-a)/2)<times>(64<times>M/(15<times>(1-rho<power>-2)<times>rho<power>(2<times>n)))) Equal(M Supremum(Abs(f((b-a)/2<times>t+(a+b)/2)) ForElement(t BernsteinEllipse(rho)))))) Variables(f a b n rho) Assumptions(And(Element(a CC) Element(b CC) Element(n ZZGreaterEqual(1)) Element(rho RR) Greater(rho 1) IsHolomorphic(f(z) ForElement(z Subset(InteriorClosure(BernsteinEllipse(rho))))))) References("<NAME>, Is Gauss Quadrature Better than Clenshaw-Curtis? SIAM Rev., 50(1), 67-87. DOI:10.1137/060659831"))<line_sep> |
<import_stmt>pandas<as>pd<line_sep>df=pd.read_csv('data/src/sample_pandas_normal.csv' index_col=0)<line_sep>print(df)<line_sep># age state point
# name
# Alice 24 NY 64
# Bob 42 CA 92
# Charlie 18 CA 70
# Dave 68 TX 70
# Ellen 24 CA 88
# Frank 30 NY 57
print(df.index)<line_sep># Index(['Alice', 'Bob', 'Charlie', 'Dave', 'Ellen', 'Frank'], dtype='object', name='name')
print(df.index.str.contains('li'))<line_sep># [ True False True False False False]
print(df[df.index.str.contains('li')])<line_sep># age state point
# name
# Alice 24 NY 64
# Charlie 18 CA 70
print(df.index.str.endswith('e'))<line_sep># [ True False True True False False]
print(df[df.index.str.endswith('e')])<line_sep># age state point
# name
# Alice 24 NY 64
# Charlie 18 CA 70
# Dave 68 TX 70
print(df.columns)<line_sep># Index(['age', 'state', 'point'], dtype='object')
print(df.columns.str.endswith('e'))<line_sep># [ True True False]
print(df.loc[: df.columns.str.endswith('e')])<line_sep># age state
# name
# Alice 24 NY
# Bob 42 CA
# Charlie 18 CA
# Dave 68 TX
# Ellen 24 CA
# Frank 30 NY
print(df.iloc[: df.columns.str.endswith('e')])<line_sep># age state
# name
# Alice 24 NY
# Bob 42 CA
# Charlie 18 CA
# Dave 68 TX
# Ellen 24 CA
# Frank 30 NY
print(df.loc[df.index.str.contains('li') df.columns.str.endswith('e')])<line_sep># age state
# name
# Alice 24 NY
# Charlie 18 CA
|
<import_stmt>settings<import_stmt>tweepy<import_stmt>base64<import_stmt>hashlib<import_stmt>hmac<import_stmt>simplejson<as>json<import_from_stmt>facepy SignedRequest GraphAPI<import_from_stmt>django.http HttpResponse HttpResponseRedirect<import_from_stmt>social_auth.models UserSocialAuth<def_stmt>twitter_get_auth_url request<block_start>auth=tweepy.OAuthHandler(settings.TWITTER_CONSUMER_KEY settings.TWITTER_CONSUMER_SECRET)<line_sep>auth_url=auth.get_authorization_url()<line_sep>request.session['request_token']=(auth.request_token.key auth.request_token.secret)<line_sep><return>HttpResponseRedirect(auth_url)<block_end><def_stmt>check_facebook_connection user<block_start>"""Checks facebook connection exists with the app if not,
then returns False
"""<try_stmt><block_start>user_social=UserSocialAuth.objects.get(user=user provider='facebook')<line_sep>extra_data=eval(str(user_social.extra_data))<line_sep>access_token=extra_data['access_token']<line_sep>graph=GraphAPI(access_token)<try_stmt><block_start>graph=graph.get('me/')<block_end><except_stmt>GraphAPI.Error<block_start>connected=<false><block_end><else_stmt><block_start>connected=<true><block_end><block_end><except_stmt>UserSocialAuth.DoesNotExist<block_start>connected=<false><block_end><return>connected<block_end><def_stmt>check_twitter_connection user<block_start>"""Checks twitter connection exists with the app if not,
then returns False
"""<try_stmt><block_start>user_social=UserSocialAuth.objects.get(user=user provider='twitter')<line_sep>extra_data=eval(str(user_social.extra_data))<line_sep>access_tokens=extra_data['access_token']<line_sep>access_token_list=access_tokens.split('oauth_token_secret=')[1].split('&oauth_token=')<line_sep>secret=access_token_list[0]<line_sep>key=access_token_list[1]<line_sep>auth=tweepy.OAuthHandler(settings.TWITTER_CONSUMER_KEY settings.TWITTER_CONSUMER_SECRET)<line_sep>auth.set_access_token(key secret)<line_sep>api=tweepy.API(auth)<line_sep>connected=api.verify_credentials()<block_end><except_stmt>UserSocialAuth.DoesNotExist<block_start>connected=<false><block_end><if_stmt>connected<block_start>connected=<true><block_end><return>connected<block_end><def_stmt>base64_url_decode inp<block_start>padding_factor=(4-len(inp)%4)%4<line_sep>inp<augadd>"="<times>padding_factor<line_sep><return>base64.b64decode(unicode(inp).translate(dict(zip(map(ord u'-_') u'+/'))))<block_end><def_stmt>parse_signed_request signed_request secret<block_start>"""The signed_request parameter is a simple way to make sure that the data
you're receiving is the actual data sent by Facebook. It is signed using
your application secret which is only known by you and Facebook. If someone
were to make a change to the data, the signature would no longer validate as
they wouldn't know your application secret to also update the signature.
Code snippet parsing "signed_request"
"""<line_sep>l=signed_request.split('.' 2)<line_sep>encoded_sig=l[0]<line_sep>payload=l[1]<line_sep>sig=base64_url_decode(encoded_sig)<line_sep>data=json.loads(base64_url_decode(payload))<if_stmt>data.get('algorithm').upper()<ne>'HMAC-SHA256'<block_start>log.error('Unknown algorithm')<line_sep><return><none><block_end><else_stmt><block_start>expected_sig=hmac.new(secret msg=payload digestmod=hashlib.sha256).digest()<block_end><if_stmt>sig<ne>expected_sig<block_start><return><none><block_end><else_stmt><block_start>log.debug('valid signed request received..')<line_sep><return>data<block_end><block_end> |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert UV crops to full UV maps."""<import_stmt>os<import_stmt>sys<import_stmt>json<import_from_stmt>PIL Image<import_stmt>numpy<as>np<def_stmt>place_crop crop image center_x center_y<block_start>"""Place the crop in the image at the specified location."""<line_sep>im_height,im_width=image.shape[:2]<line_sep>crop_height,crop_width=crop.shape[:2]<line_sep>left=center_x-crop_width<floordiv>2<line_sep>right=left+crop_width<line_sep>top=center_y-crop_height<floordiv>2<line_sep>bottom=top+crop_height<line_sep>adjusted_crop=crop# remove regions of crop that go beyond image bounds
<if_stmt>left<l>0<block_start>adjusted_crop=adjusted_crop[: -left:]<block_end><if_stmt>right<g>im_width<block_start>adjusted_crop=adjusted_crop[: :(im_width-right)]<block_end><if_stmt>top<l>0<block_start>adjusted_crop=adjusted_crop[-top:]<block_end><if_stmt>bottom<g>im_height<block_start>adjusted_crop=adjusted_crop[:(im_height-bottom)]<block_end>crop_mask=(adjusted_crop<g>0).astype(crop.dtype).sum(-1 keepdims=<true>)<line_sep>image[max(0 top):min(im_height bottom) max(0 left):min(im_width right)]<augmul>(1-crop_mask)<line_sep>image[max(0 top):min(im_height bottom) max(0 left):min(im_width right)]<augadd>adjusted_crop<line_sep><return>image<block_end><def_stmt>crop2full keypoints_path metadata_path uvdir outdir<block_start>"""Create each frame's layer UVs from predicted UV crops"""<with_stmt>open(keypoints_path)<as>f<block_start>kp_data=json.load(f)<block_end># Get all people ids
people_ids=set()<for_stmt>frame kp_data<block_start><for_stmt>skeleton kp_data[frame]<block_start>people_ids.add(skeleton['idx'])<block_end><block_end>people_ids=sorted(list(people_ids))<with_stmt>open(metadata_path)<as>f<block_start>metadata=json.load(f)<block_end>orig_size=np.array(metadata['alphapose_input_size'][::-1])<line_sep>out_size=np.array(metadata['size_LR'][::-1])<if_stmt>'people_layers'<in>metadata<block_start>people_layers=metadata['people_layers']<block_end><else_stmt><block_start>people_layers=[[pid]<for>pid people_ids]<block_end># Create output directories.
<for_stmt>layer_i range(1 1+len(people_layers))<block_start>os.makedirs(os.path.join(outdir f'{layer_i:02d}') exist_ok=<true>)<block_end>print(f'Writing UVs to {outdir}')<for_stmt>frame sorted(kp_data)<block_start><for_stmt>layer_i,layer enumerate(people_layers 1)<block_start>out_path=os.path.join(outdir f'{layer_i:02d}' frame)<line_sep>sys.stdout.flush()<line_sep>sys.stdout.write('processing frame %s\r'%out_path)<line_sep>uv_map=np.zeros([out_size[0] out_size[1] 4])<for_stmt>person_id layer<block_start>matches=[p<for>p kp_data[frame]<if>p['idx']<eq>person_id]<if_stmt>len(matches)<eq>0# person doesn't appear in this frame
<block_start><continue><block_end>skeleton=matches[0]<line_sep>kps=np.array(skeleton['keypoints']).reshape(17 3)<line_sep># Get kps bounding box.
left=kps[: 0].min()<line_sep>right=kps[: 0].max()<line_sep>top=kps[: 1].min()<line_sep>bottom=kps[: 1].max()<line_sep>height=bottom-top<line_sep>width=right-left<line_sep>orig_crop_size=max(height width)<line_sep>orig_center_x=(left+right)<floordiv>2<line_sep>orig_center_y=(top+bottom)<floordiv>2<line_sep># read predicted uv map
uv_crop_path=os.path.join(uvdir f'{person_id:02d}_{os.path.basename(out_path)[:-4]}_output_uv.png')<if_stmt>os.path.exists(uv_crop_path)<block_start>uv_crop=np.array(Image.open(uv_crop_path))<block_end><else_stmt><block_start>uv_crop=np.zeros([256 256 3])<block_end># add person ID channel
person_mask=(uv_crop[<ellipsis> 0:1]<g>0).astype('uint8')<line_sep>person_ids=(255-person_id)<times>person_mask<line_sep>uv_crop=np.concatenate([uv_crop person_ids] -1)<line_sep># scale crop to desired output size
# 256 is the crop size, 192 is the inner crop size
out_crop_size=orig_crop_size<times>256./192<times>out_size/orig_size<line_sep>out_crop_size=out_crop_size.astype(np.int)<line_sep>uv_crop=uv_crop.astype(np.uint8)<line_sep>uv_crop=np.array(Image.fromarray(uv_crop).resize((out_crop_size[1] out_crop_size[0]) resample=Image.NEAREST))<line_sep># scale center coordinate accordingly
out_center_x=(orig_center_x<times>out_size[1]/orig_size[1]).astype(np.int)<line_sep>out_center_y=(orig_center_y<times>out_size[0]/orig_size[0]).astype(np.int)<line_sep># Place UV crop in full UV map and save.
uv_map=place_crop(uv_crop uv_map out_center_x out_center_y)<block_end>uv_map=Image.fromarray(uv_map.astype('uint8'))<line_sep>uv_map.save(out_path)<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start><import_stmt>argparse<line_sep>arguments=argparse.ArgumentParser()<line_sep>arguments.add_argument('--dataroot' type=str)<line_sep>opt=arguments.parse_args()<line_sep>keypoints_path=os.path.join(opt.dataroot 'keypoints.json')<line_sep>metadata_path=os.path.join(opt.dataroot 'metadata.json')<line_sep>uvdir=os.path.join(opt.dataroot 'kp2uv/test_latest/images')<line_sep>outdir=os.path.join(opt.dataroot 'iuv')<line_sep>crop2full(keypoints_path metadata_path uvdir outdir)<block_end> |
<import_stmt>os<import_stmt>sys<import_stmt>argparse<import_stmt>datetime<import_stmt>time<import_stmt>os.path<as>osp<import_stmt>matplotlib<line_sep>matplotlib.use('Agg')<import_from_stmt>matplotlib pyplot<as>plt<import_stmt>numpy<as>np<import_stmt>torch<import_stmt>torch.nn<as>nn<import_from_stmt>torch.optim lr_scheduler<import_stmt>torch.backends.cudnn<as>cudnn<import_stmt>datasets<import_stmt>models<import_from_stmt>utils AverageMeter Logger<import_from_stmt>center_loss CenterLoss<line_sep>parser=argparse.ArgumentParser("Center Loss Example")<line_sep># dataset
parser.add_argument('-d' '--dataset' type=str default='mnist' choices=['mnist'])<line_sep>parser.add_argument('-j' '--workers' default=4 type=int help="number of data loading workers (default: 4)")<line_sep># optimization
parser.add_argument('--batch-size' type=int default=128)<line_sep>parser.add_argument('--lr-model' type=float default=0.001 help="learning rate for model")<line_sep>parser.add_argument('--lr-cent' type=float default=0.5 help="learning rate for center loss")<line_sep>parser.add_argument('--weight-cent' type=float default=1 help="weight for center loss")<line_sep>parser.add_argument('--max-epoch' type=int default=100)<line_sep>parser.add_argument('--stepsize' type=int default=20)<line_sep>parser.add_argument('--gamma' type=float default=0.5 help="learning rate decay")<line_sep># model
parser.add_argument('--model' type=str default='cnn')<line_sep># misc
parser.add_argument('--eval-freq' type=int default=10)<line_sep>parser.add_argument('--print-freq' type=int default=50)<line_sep>parser.add_argument('--gpu' type=str default='0')<line_sep>parser.add_argument('--seed' type=int default=1)<line_sep>parser.add_argument('--use-cpu' action='store_true')<line_sep>parser.add_argument('--save-dir' type=str default='log')<line_sep>parser.add_argument('--plot' action='store_true' help="whether to plot features for every epoch")<line_sep>args=parser.parse_args()<def_stmt>main <block_start>torch.manual_seed(args.seed)<line_sep>os.environ['CUDA_VISIBLE_DEVICES']=args.gpu<line_sep>use_gpu=torch.cuda.is_available()<if_stmt>args.use_cpu<block_start>use_gpu=<false><block_end>sys.stdout=Logger(osp.join(args.save_dir 'log_'+args.dataset+'.txt'))<if_stmt>use_gpu<block_start>print("Currently using GPU: {}".format(args.gpu))<line_sep>cudnn.benchmark=<true><line_sep>torch.cuda.manual_seed_all(args.seed)<block_end><else_stmt><block_start>print("Currently using CPU")<block_end>print("Creating dataset: {}".format(args.dataset))<line_sep>dataset=datasets.create(name=args.dataset batch_size=args.batch_size use_gpu=use_gpu num_workers=args.workers )<line_sep>trainloader,testloader=dataset.trainloader dataset.testloader<line_sep>print("Creating model: {}".format(args.model))<line_sep>model=models.create(name=args.model num_classes=dataset.num_classes)<if_stmt>use_gpu<block_start>model=nn.DataParallel(model).cuda()<block_end>criterion_xent=nn.CrossEntropyLoss()<line_sep>criterion_cent=CenterLoss(num_classes=dataset.num_classes feat_dim=2 use_gpu=use_gpu)<line_sep>optimizer_model=torch.optim.SGD(model.parameters() lr=args.lr_model weight_decay=5e-04 momentum=0.9)<line_sep>optimizer_centloss=torch.optim.SGD(criterion_cent.parameters() lr=args.lr_cent)<if_stmt>args.stepsize<g>0<block_start>scheduler=lr_scheduler.StepLR(optimizer_model step_size=args.stepsize gamma=args.gamma)<block_end>start_time=time.time()<for_stmt>epoch range(args.max_epoch)<block_start>print("==> Epoch {}/{}".format(epoch+1 args.max_epoch))<line_sep>train(model criterion_xent criterion_cent optimizer_model optimizer_centloss trainloader use_gpu dataset.num_classes epoch)<if_stmt>args.stepsize<g>0<block_start>scheduler.step()<block_end><if_stmt>args.eval_freq<g>0<and>(epoch+1)%args.eval_freq<eq>0<or>(epoch+1)<eq>args.max_epoch<block_start>print("==> Test")<line_sep>acc,err=test(model testloader use_gpu dataset.num_classes epoch)<line_sep>print("Accuracy (%): {}\t Error rate (%): {}".format(acc err))<block_end><block_end>elapsed=round(time.time()-start_time)<line_sep>elapsed=str(datetime.timedelta(seconds=elapsed))<line_sep>print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))<block_end><def_stmt>train model criterion_xent criterion_cent optimizer_model optimizer_centloss trainloader use_gpu num_classes epoch<block_start>model.train()<line_sep>xent_losses=AverageMeter()<line_sep>cent_losses=AverageMeter()<line_sep>losses=AverageMeter()<if_stmt>args.plot<block_start>all_features,all_labels=[] []<block_end><for_stmt>batch_idx,(data labels) enumerate(trainloader)<block_start><if_stmt>use_gpu<block_start>data,labels=data.cuda() labels.cuda()<block_end>features,outputs=model(data)<line_sep>loss_xent=criterion_xent(outputs labels)<line_sep>loss_cent=criterion_cent(features labels)<line_sep>loss_cent<augmul>args.weight_cent<line_sep>loss=loss_xent+loss_cent<line_sep>optimizer_model.zero_grad()<line_sep>optimizer_centloss.zero_grad()<line_sep>loss.backward()<line_sep>optimizer_model.step()<line_sep># by doing so, weight_cent would not impact on the learning of centers
<for_stmt>param criterion_cent.parameters()<block_start>param.grad.data<augmul>(1./args.weight_cent)<block_end>optimizer_centloss.step()<line_sep>losses.update(loss.item() labels.size(0))<line_sep>xent_losses.update(loss_xent.item() labels.size(0))<line_sep>cent_losses.update(loss_cent.item() labels.size(0))<if_stmt>args.plot<block_start><if_stmt>use_gpu<block_start>all_features.append(features.data.cpu().numpy())<line_sep>all_labels.append(labels.data.cpu().numpy())<block_end><else_stmt><block_start>all_features.append(features.data.numpy())<line_sep>all_labels.append(labels.data.numpy())<block_end><block_end><if_stmt>(batch_idx+1)%args.print_freq<eq>0<block_start>print("Batch {}/{}\t Loss {:.6f} ({:.6f}) XentLoss {:.6f} ({:.6f}) CenterLoss {:.6f} ({:.6f})".format(batch_idx+1 len(trainloader) losses.val losses.avg xent_losses.val xent_losses.avg cent_losses.val cent_losses.avg))<block_end><block_end><if_stmt>args.plot<block_start>all_features=np.concatenate(all_features 0)<line_sep>all_labels=np.concatenate(all_labels 0)<line_sep>plot_features(all_features all_labels num_classes epoch prefix='train')<block_end><block_end><def_stmt>test model testloader use_gpu num_classes epoch<block_start>model.eval()<line_sep>correct,total=0 0<if_stmt>args.plot<block_start>all_features,all_labels=[] []<block_end><with_stmt>torch.no_grad()<block_start><for_stmt>data,labels testloader<block_start><if_stmt>use_gpu<block_start>data,labels=data.cuda() labels.cuda()<block_end>features,outputs=model(data)<line_sep>predictions=outputs.data.max(1)[1]<line_sep>total<augadd>labels.size(0)<line_sep>correct<augadd>(predictions<eq>labels.data).sum()<if_stmt>args.plot<block_start><if_stmt>use_gpu<block_start>all_features.append(features.data.cpu().numpy())<line_sep>all_labels.append(labels.data.cpu().numpy())<block_end><else_stmt><block_start>all_features.append(features.data.numpy())<line_sep>all_labels.append(labels.data.numpy())<block_end><block_end><block_end><block_end><if_stmt>args.plot<block_start>all_features=np.concatenate(all_features 0)<line_sep>all_labels=np.concatenate(all_labels 0)<line_sep>plot_features(all_features all_labels num_classes epoch prefix='test')<block_end>acc=correct<times>100./total<line_sep>err=100.-acc<line_sep><return>acc err<block_end><def_stmt>plot_features features labels num_classes epoch prefix<block_start>"""Plot features on 2D plane.
Args:
features: (num_instances, num_features).
labels: (num_instances).
"""<line_sep>colors=['C0' 'C1' 'C2' 'C3' 'C4' 'C5' 'C6' 'C7' 'C8' 'C9']<for_stmt>label_idx range(num_classes)<block_start>plt.scatter(features[labels<eq>label_idx 0] features[labels<eq>label_idx 1] c=colors[label_idx] s=1 )<block_end>plt.legend(['0' '1' '2' '3' '4' '5' '6' '7' '8' '9'] loc='upper right')<line_sep>dirname=osp.join(args.save_dir prefix)<if_stmt><not>osp.exists(dirname)<block_start>os.mkdir(dirname)<block_end>save_name=osp.join(dirname 'epoch_'+str(epoch+1)+'.png')<line_sep>plt.savefig(save_name bbox_inches='tight')<line_sep>plt.close()<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end> |
src=Split('''
hal_test.c
''')<line_sep>component=aos_component('hal_test' src)<line_sep>component.add_cflags('-Wall')<line_sep>component.add_cflags('-Werror')<line_sep> |
<import_from_stmt>pytest mark<import_from_stmt>messages show_count<line_sep>@mark.parametrize('qty, expected' [(1 '1 part') (2 '2 parts') (0 'no parts') ])<def_stmt>test_show_count qty:int expected:str<arrow><none><block_start>got=show_count(qty 'part')<assert_stmt>got<eq>expected<block_end># tag::TEST_IRREGULAR[]
@mark.parametrize('qty, expected' [(1 '1 child') (2 '2 children') (0 'no children') ])<def_stmt>test_irregular qty:int expected:str<arrow><none><block_start>got=show_count(qty 'child' 'children')<assert_stmt>got<eq>expected<block_end># end::TEST_IRREGULAR[]
|
# -*- coding: utf-8 -*-
<import_from_future_stmt> unicode_literals<import_from_stmt>django.db models migrations<import_stmt>datetime<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('blog' '0001_initial') ]<line_sep>operations=[migrations.AlterModelOptions(name='blogcategory' options={'ordering':['name'] 'verbose_name_plural':'Blog Categories'} ) migrations.AddField(model_name='blogpage' name='date' field=models.DateField(verbose_name='Post date' default=datetime.datetime(2015 2 26 23 5 30 771014)) preserve_default=<false> ) ]<block_end> |
<import_stmt>pytest<def_stmt>test_noun_chunks_is_parsed_el el_tokenizer<block_start>"""Test that noun_chunks raises Value Error for 'el' language if Doc is not parsed."""<line_sep>doc=el_tokenizer("είναι χώρα της νοτιοανατολικής")<with_stmt>pytest.raises(ValueError)<block_start>list(doc.noun_chunks)<block_end><block_end> |
"""Test runway.core.providers.aws.s3._helpers.parameters."""<line_sep># pylint: disable=no-self-use
<import_from_future_stmt> annotations<import_from_stmt>typing TYPE_CHECKING Any Dict List<import_stmt>pytest<import_from_stmt>pydantic ValidationError<import_from_stmt>runway.core.providers.aws.s3._helpers.parameters Parameters ParametersDataModel <if_stmt>TYPE_CHECKING<block_start><import_from_stmt>pathlib Path<import_from_stmt>pytest_mock MockerFixture<import_from_stmt>runway.core.providers.aws.s3._helpers.parameters PathsType<block_end><class_stmt>TestParameters<block_start>"""Test Parameters."""<line_sep>data_locallocal:ParametersDataModel<line_sep>data_s3s3:ParametersDataModel<line_sep>data_s3local:ParametersDataModel<def_stmt>setup_method self<arrow><none><block_start>"""Run before each test method if run to return the class instance attrs to default."""<line_sep>self.data_locallocal=ParametersDataModel(dest="test-dest" src="test-src")<line_sep>self.data_s3s3=ParametersDataModel(dest="s3://test-dest" src="s3://test-src")<line_sep>self.data_s3local=ParametersDataModel(dest="test-dest" src="s3://test-src")<block_end><def_stmt>test_init self mocker:MockerFixture<arrow><none><block_start>"""Test __init__."""<line_sep>mock_validate_path_args=mocker.patch.object(Parameters "_validate_path_args")<line_sep>obj=Parameters("test" self.data_locallocal)<assert_stmt>obj.action<eq>"test"<assert_stmt>obj.data<eq>self.data_locallocal<line_sep>mock_validate_path_args.assert_called_once_with()<block_end>@pytest.mark.parametrize("cmd, expected" [("sync" <true>) ("mb" <true>) ("rb" <true>) ("cp" <false>) ("mv" <false>)] )<def_stmt>test_init_set_dir_op self cmd:str expected:bool mocker:MockerFixture<arrow><none><block_start>"""Test __init__."""<line_sep>mocker.patch.object(Parameters "_validate_path_args")<assert_stmt>Parameters(cmd self.data_locallocal).data.dir_op<eq>expected<block_end>@pytest.mark.parametrize("cmd, expected" [("sync" <false>) ("mb" <false>) ("rb" <false>) ("cp" <false>) ("mv" <true>)] )<def_stmt>test_init_set_is_move self cmd:str expected:bool mocker:MockerFixture<arrow><none><block_start>"""Test __init__."""<line_sep>mocker.patch.object(Parameters "_validate_path_args")<assert_stmt>Parameters(cmd self.data_locallocal).data.is_move<eq>expected<block_end><def_stmt>test_same_path_mv_locallocal self<arrow><none><block_start>"""Test _same_path."""<line_sep>self.data_locallocal.dest=self.data_locallocal.src<assert_stmt>Parameters("mv" self.data_locallocal)<block_end><def_stmt>test_same_path_mv_s3s3 self<arrow><none><block_start>"""Test _same_path."""<line_sep>self.data_s3s3.dest=self.data_s3s3.src<with_stmt>pytest.raises(ValueError)<as>excinfo<block_start>Parameters("mv" self.data_s3s3)<block_end><assert_stmt>"Cannot mv a file onto itself"<in>str(excinfo.value)<block_end><def_stmt>test_same_path_mv_s3s3_not_same self<arrow><none><block_start>"""Test _same_path."""<assert_stmt>Parameters("mv" self.data_s3s3)<block_end><def_stmt>test_same_path_sync_locallocal self<arrow><none><block_start>"""Test _same_path."""<line_sep>self.data_locallocal.dest=self.data_locallocal.src<assert_stmt>Parameters("sync" self.data_locallocal)<block_end><def_stmt>test_same_path_sync_s3s3 self<arrow><none><block_start>"""Test _same_path."""<line_sep>self.data_s3s3.dest=self.data_s3s3.src<assert_stmt>Parameters("sync" self.data_s3s3)<block_end><def_stmt>test_validate_path_args_mv_s3local self tmp_path:Path<arrow><none><block_start>"""Test _validate_path_args."""<line_sep>self.data_s3local.dest=str(tmp_path)<assert_stmt>Parameters("mv" self.data_s3local)<block_end><def_stmt>test_validate_path_args_mv_s3local_not_exist self tmp_path:Path<arrow><none><block_start>"""Test _validate_path_args."""<line_sep>missing_dir=tmp_path/"missing"<line_sep>self.data_s3local.dest=str(missing_dir)<assert_stmt>Parameters("mv" self.data_s3local)<assert_stmt><not>missing_dir.exists()<block_end><def_stmt>test_validate_path_args_sync_s3local self tmp_path:Path<arrow><none><block_start>"""Test _validate_path_args."""<line_sep>self.data_s3local.dest=str(tmp_path)<assert_stmt>Parameters("sync" self.data_s3local)<block_end><def_stmt>test_validate_path_args_sync_s3local_not_exist self tmp_path:Path<arrow><none><block_start>"""Test _validate_path_args."""<line_sep>missing_dir=tmp_path/"missing"<line_sep>self.data_s3local.dest=str(missing_dir)<assert_stmt>Parameters("sync" self.data_s3local)<assert_stmt>missing_dir.exists()<block_end><block_end><class_stmt>TestParametersDataModel<block_start>"""Test ParametersDataModel."""<line_sep>@pytest.mark.parametrize("dest, src, expected" [("test-dest" "test-src" "locallocal") ("test-dest" "s3://test-src" "s3local") ("s3://test-dest" "test-src" "locals3") ("s3://test-dest" "s3://test-src" "s3s3") ] )<def_stmt>test_determine_paths_type self dest:str expected:PathsType src:str<arrow><none><block_start>"""Test _determine_paths_type."""<assert_stmt>ParametersDataModel(dest=dest src=src).paths_type<eq>expected<block_end><def_stmt>test_field_defaults self<arrow><none><block_start>"""Test field defaults."""<line_sep>kwargs={"dest":"test-dest" "src":"test-src"}<line_sep>obj=ParametersDataModel(**kwargs)<assert_stmt>obj.dest<eq>kwargs["dest"]<assert_stmt>obj.src<eq>kwargs["src"]<assert_stmt><not>obj.delete<assert_stmt><not>obj.dir_op<assert_stmt><not>obj.exact_timestamps<assert_stmt><not>obj.follow_symlinks<assert_stmt><not>obj.is_move<assert_stmt><not>obj.only_show_errors<assert_stmt><not>obj.page_size<assert_stmt>obj.paths_type<eq>"locallocal"<assert_stmt><not>obj.size_only<block_end>@pytest.mark.parametrize("provided, expected" [("s3://test-bucket" "s3://test-bucket/") ("s3://test-bucket/" "s3://test-bucket/") ("s3://test-bucket/key.txt" "s3://test-bucket/key.txt") ("./local" "./local") ("./local/" "./local/") ("./local/test.txt" "./local/test.txt") ] )<def_stmt>test_normalize_s3_trailing_slash self provided:str expected:str<arrow><none><block_start>"""Test _normalize_s3_trailing_slash."""<assert_stmt>ParametersDataModel(dest=provided src="test").dest<eq>expected<assert_stmt>ParametersDataModel(dest="test" src=provided).src<eq>expected<block_end>@pytest.mark.parametrize("kwargs, error_locs" [({"dest":"test-dest"} ["src"]) ({"src":"test-src"} ["dest"])] )<def_stmt>test_required_fields self error_locs:List[str] kwargs:Dict[str Any]<arrow><none><block_start>"""Test required fields."""<with_stmt>pytest.raises(ValidationError)<as>excinfo<block_start>ParametersDataModel(**kwargs)<block_end>errors=excinfo.value.errors()<for_stmt>index,loc enumerate(error_locs)<block_start><assert_stmt>errors[index]["loc"]<eq>(loc )<block_end><block_end><block_end> |
<import_from_stmt>.boxes_view BoxesView# noqa
<import_from_stmt>.graphics_item_view GraphicsItemView# noqa
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.27 on 2020-02-10 20:26
<import_from_future_stmt> unicode_literals<import_stmt>django.contrib.postgres.fields<import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('api' '0001_initial') ]<line_sep>operations=[migrations.AlterField(model_name='sqlapiuser' name='permissions' field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(blank=<true> max_length=126 null=<true>) default=list null=<true> size=<none>) ) ]<block_end> |
@app.post('/login')<def_stmt>login response:Response<block_start><ellipsis><line_sep>token=manager.create_access_token(data=dict(sub=user.email))<line_sep>manager.set_cookie(response token)<line_sep><return>response<block_end> |
"""
Collections of Fermion-to-Qubit encodings known to tequila
Most are Interfaces to OpenFermion
"""<import_from_stmt>tequila.circuit.circuit QCircuit<import_from_stmt>tequila.circuit.gates X<import_from_stmt>tequila.hamiltonian.qubit_hamiltonian QubitHamiltonian<import_stmt>openfermion<def_stmt>known_encodings # convenience for testing and I/O
<block_start>encodings={"JordanWigner":JordanWigner "BravyiKitaev":BravyiKitaev "BravyiKitaevFast":BravyiKitaevFast "BravyiKitaevTree":BravyiKitaevTree "TaperedBravyiKitaev":TaperedBravyKitaev}<line_sep># aliases
encodings={**encodings "ReorderedJordanWigner":<lambda>**kwargs:JordanWigner(up_then_down=<true> **kwargs) "ReorderedBravyiKitaev":<lambda>**kwargs:BravyiKitaev(up_then_down=<true> **kwargs) "ReorderedBravyiKitaevTree":<lambda>**kwargs:BravyiKitaevTree(up_then_down=<true> **kwargs) }<line_sep><return>{k.replace("_" "").replace("-" "").upper():v<for>k,v encodings.items()}<block_end><class_stmt>EncodingBase<block_start>@property<def_stmt>name self<block_start>prefix=""<if_stmt>self.up_then_down<block_start>prefix="Reordered"<block_end><if_stmt>hasattr(self "_name")<block_start><return>prefix+self._name<block_end><else_stmt><block_start><return>prefix+type(self).__name__<block_end><block_end><def_stmt>__init__ self n_electrons n_orbitals up_then_down=<false> *args **kwargs<block_start>self.n_electrons=n_electrons<line_sep>self.n_orbitals=n_orbitals<line_sep>self.up_then_down=up_then_down<block_end><def_stmt>__call__ self fermion_operator:openfermion.FermionOperator *args **kwargs<arrow>QubitHamiltonian<block_start>"""
:param fermion_operator:
an openfermion FermionOperator
:return:
The openfermion QubitOperator of this class ecoding
"""<if_stmt>self.up_then_down<block_start>op=openfermion.reorder(operator=fermion_operator order_function=openfermion.up_then_down num_modes=2<times>self.n_orbitals)<block_end><else_stmt><block_start>op=fermion_operator<block_end>fop=self.do_transform(fermion_operator=op *args **kwargs)<line_sep>fop.compress()<line_sep><return>self.post_processing(QubitHamiltonian.from_openfermion(fop))<block_end><def_stmt>post_processing self op *args **kwargs<block_start><return>op<block_end><def_stmt>up self i<block_start><if_stmt>self.up_then_down<block_start><return>i<block_end><else_stmt><block_start><return>2<times>i<block_end><block_end><def_stmt>down self i<block_start><if_stmt>self.up_then_down<block_start><return>i+self.n_orbitals<block_end><else_stmt><block_start><return>2<times>i+1<block_end><block_end><def_stmt>do_transform self fermion_operator:openfermion.FermionOperator *args **kwargs<arrow>openfermion.QubitOperator<block_start><raise>Exception("{}::do_transform: called base class".format(type(self).__name__))<block_end><def_stmt>map_state self state:list *args **kwargs<arrow>list<block_start>"""
Expects a state in spin-orbital ordering
Returns the corresponding qubit state in the class encoding
:param state:
basis-state as occupation number vector in spin orbitals
sorted as: [0_up, 0_down, 1_up, 1_down, ... N_up, N_down]
with N being the number of spatial orbitals
:return:
basis-state as qubit state in the corresponding mapping
"""<line_sep>"""Does a really lazy workaround ... but it works
:return: Hartree-Fock Reference as binary-number
Parameters
----------
reference_orbitals: list:
give list of doubly occupied orbitals
default is None which leads to automatic list of the
first n_electron/2 orbitals
Returns
-------
"""<line_sep># default is a lazy workaround, but it workds
n_qubits=2<times>self.n_orbitals<line_sep>spin_orbitals=sorted([i<for>i,x enumerate(state)<if>int(x)<eq>1])<line_sep>string="1.0 ["<for_stmt>i spin_orbitals<block_start>string<augadd>str(i)+"^ "<block_end>string<augadd>"]"<line_sep>fop=openfermion.FermionOperator(string 1.0)<line_sep>op=self(fop)<import_from_stmt>tequila.wavefunction.qubit_wavefunction QubitWaveFunction<line_sep>wfn=QubitWaveFunction.from_int(0 n_qubits=n_qubits)<line_sep>wfn=wfn.apply_qubitoperator(operator=op)<assert_stmt>(len(wfn.keys())<eq>1)<line_sep>key=list(wfn.keys())[0].array<line_sep><return>key<block_end><def_stmt>hcb_to_me self *args **kwargs<block_start><return><none><block_end><def_stmt>__str__ self<block_start><return>type(self).__name__<block_end><block_end><class_stmt>JordanWigner(EncodingBase)<block_start>"""
OpenFermion::jordan_wigner
"""<def_stmt>do_transform self fermion_operator:openfermion.FermionOperator *args **kwargs<arrow>openfermion.QubitOperator<block_start><return>openfermion.jordan_wigner(fermion_operator *args **kwargs)<block_end><def_stmt>map_state self state:list *args **kwargs<block_start>state=state+[0]<times>(self.n_orbitals-len(state))<line_sep>result=[0]<times>len(state)<if_stmt>self.up_then_down<block_start><return>[state[2<times>i]<for>i range(self.n_orbitals)]+[state[2<times>i+1]<for>i range(self.n_orbitals)]<block_end><else_stmt><block_start><return>state<block_end><block_end><def_stmt>hcb_to_me self *args **kwargs<block_start>U=QCircuit()<for_stmt>i range(self.n_orbitals)<block_start>U<augadd>X(target=self.down(i) control=self.up(i))<block_end><return>U<block_end><block_end><class_stmt>BravyiKitaev(EncodingBase)<block_start>"""
Uses OpenFermion::bravyi_kitaev
"""<def_stmt>do_transform self fermion_operator:openfermion.FermionOperator *args **kwargs<arrow>openfermion.QubitOperator<block_start><return>openfermion.bravyi_kitaev(fermion_operator n_qubits=self.n_orbitals<times>2)<block_end><block_end><class_stmt>BravyiKitaevTree(EncodingBase)<block_start>"""
Uses OpenFermion::bravyi_kitaev_tree
"""<def_stmt>do_transform self fermion_operator:openfermion.FermionOperator *args **kwargs<arrow>openfermion.QubitOperator<block_start><return>openfermion.bravyi_kitaev_tree(fermion_operator n_qubits=self.n_orbitals<times>2)<block_end><block_end><class_stmt>BravyiKitaevFast(EncodingBase)<block_start>"""
Uses OpenFermion::bravyi_kitaev_tree
"""<def_stmt>do_transform self fermion_operator:openfermion.FermionOperator *args **kwargs<arrow>openfermion.QubitOperator<block_start>n_qubits=openfermion.count_qubits(fermion_operator)<if_stmt>n_qubits<ne>self.n_orbitals<times>2<block_start><raise>Exception("BravyiKitaevFast transformation currently only possible for full Hamiltonians (no UCC generators).\nfermion_operator was {}".format(fermion_operator))<block_end>op=openfermion.get_interaction_operator(fermion_operator)<line_sep><return>openfermion.bravyi_kitaev_fast(op)<block_end><block_end><class_stmt>TaperedBravyKitaev(EncodingBase)<block_start>"""
Uses OpenFermion::symmetry_conserving_bravyi_kitaev (tapered bravyi_kitaev_tree arxiv:1701.07072)
Reduces Hamiltonian by 2 qubits
See OpenFermion Documentation for more
Does not work for UCC generators yet
"""<def_stmt>__init__ self n_electrons n_orbitals active_fermions=<none> active_orbitals=<none> *args **kwargs<block_start><if_stmt>active_fermions<is><none><block_start>self.active_fermions=n_electrons<block_end><else_stmt><block_start>self.active_fermions=active_fermions<block_end><if_stmt>active_orbitals<is><none><block_start>self.active_orbitals=n_orbitals<times>2# in openfermion those are spin-orbitals
<block_end><else_stmt><block_start>self.active_orbitals=active_orbitals<block_end><if_stmt>"up_then_down"<in>kwargs<block_start><raise>Exception("Don't pass up_then_down argument to {}, it can't be changed".format(type(self).__name__))<block_end>super().__init__(n_orbitals=n_orbitals n_electrons=n_electrons up_then_down=<false> *args **kwargs)<block_end><def_stmt>do_transform self fermion_operator:openfermion.FermionOperator *args **kwargs<arrow>openfermion.QubitOperator<block_start><if_stmt>openfermion.count_qubits(fermion_operator)<ne>self.n_orbitals<times>2<block_start><raise>Exception("TaperedBravyiKitaev not ready for UCC generators yet")<block_end><return>openfermion.symmetry_conserving_bravyi_kitaev(fermion_operator active_orbitals=self.active_orbitals active_fermions=self.active_fermions)<block_end><def_stmt>map_state self state:list *args **kwargs<block_start>non_tapered_trafo=BravyiKitaevTree(up_then_down=<true> n_electrons=self.n_electrons n_orbitals=self.n_orbitals)<line_sep>key=non_tapered_trafo.map_state(state=state *args **kwargs)<line_sep>n_qubits=self.n_orbitals<times>2<line_sep>active_qubits=[i<for>i range(n_qubits)<if>i<not><in>[n_qubits-1 n_qubits<floordiv>2-1]]<line_sep>key=[key[i]<for>i active_qubits]<line_sep><return>key<block_end><block_end> |
<import_from_stmt>os path<import_from_stmt>glob glob<import_stmt>tempfile<import_stmt>numpy<as>np<import_from_stmt>tempfile TemporaryDirectory NamedTemporaryFile<import_stmt>torch<as>ch<import_from_stmt>torch.utils.data Dataset<import_stmt>webdataset<as>wds<import_from_stmt>ffcv DatasetWriter<import_from_stmt>ffcv.reader Reader<import_from_stmt>ffcv.fields IntField FloatField<import_from_stmt>test_writer validate_simple_dataset<line_sep>field_names=['index' 'value.pyd']<class_stmt>DummyDataset(Dataset)<block_start><def_stmt>__init__ self l<block_start>self.l=l<block_end><def_stmt>__len__ self<block_start><return>self.l<block_end><def_stmt>__getitem__ self index<block_start><if_stmt>index<ge>self.l<block_start><raise>IndexError()<block_end><return>(index np.sin(index))<block_end><block_end><def_stmt>write_webdataset folder dataset field_names<block_start>pattern=path.join(folder "dataset-%06d.tar")<line_sep>writer=wds.ShardWriter(pattern maxcount=20)<with_stmt>writer<as>sink<block_start><for_stmt>i,sample enumerate(dataset)<block_start>data={'__key__':f'sample_{i}'}<for_stmt>field_name,value zip(field_names sample)<block_start>data[field_name]=value<block_end>sink.write(data)<block_end><block_end><block_end><def_stmt>pipeline dataset<block_start><return>(dataset.decode().to_tuple(*field_names))<block_end><if_stmt>__name__<eq>'__main__'<block_start>N=1007<line_sep>dataset=DummyDataset(N)<with_stmt>TemporaryDirectory()<as>temp_directory<block_start><with_stmt>NamedTemporaryFile()<as>handle<block_start>fname=handle.name<line_sep>write_webdataset(temp_directory dataset field_names)<line_sep>files=glob(path.join(temp_directory '*'))<line_sep>files=list(sorted(files))<line_sep>print(fname)<line_sep>writer=DatasetWriter(fname {'index':IntField() 'value':FloatField()})<line_sep>writer.from_webdataset(files pipeline)<line_sep>validate_simple_dataset(fname N shuffled=<false>)<block_end><block_end><block_end> |
# Copyright (c) 2018, salesforce.com, inc.
# All rights reserved.
# Licensed under the BSD 3-Clause license.
# For full license text, see the LICENSE file in the repo root
# or https://opensource.org/licenses/BSD-3-Clause
<import_stmt>torch<import_stmt>numpy<as>np<import_from_stmt>torchtext data<import_from_stmt>torchtext datasets<import_from_stmt>torch.nn functional<as>F<import_from_stmt>torch.autograd Variable<import_stmt>revtok<import_stmt>logging<import_stmt>random<import_stmt>string<import_stmt>traceback<import_stmt>math<import_stmt>uuid<import_stmt>argparse<import_stmt>os<import_stmt>copy<import_stmt>time<import_from_stmt>tqdm tqdm trange<import_from_stmt>model Transformer FastTransformer INF TINY softmax<import_from_stmt>utils NormalField NormalTranslationDataset TripleTranslationDataset ParallelDataset<import_from_stmt>utils Metrics Best computeGLEU computeBLEU Cache Batch masked_sort unsorted computeGroupBLEU<import_from_stmt>time gmtime strftime<import_stmt>sys<import_from_stmt>traceback extract_tb<import_from_stmt>code interact<def_stmt>interactive_exception e_class e_value tb<block_start>sys.__excepthook__(e_class e_value tb)<line_sep>tb_stack=extract_tb(tb)<line_sep>locals_stack=[]<while_stmt>tb<is><not><none><block_start>locals_stack.append(tb.tb_frame.f_locals)<line_sep>tb=tb.tb_next<block_end><while_stmt>len(tb_stack)<g>0<block_start>frame=tb_stack.pop()<line_sep>ls=locals_stack.pop()<line_sep>print('\nInterpreter at file "{}", line {}, in {}:'.format(frame.filename frame.lineno frame.name))<line_sep>print(' {}'.format(frame.line.strip()))<line_sep>interact(local=ls)<block_end><block_end>#sys.excepthook = interactive_exception
# check dirs
<for_stmt>d ['models' 'runs' 'logs']<block_start><if_stmt><not>os.path.exists('./{}'.format(d))<block_start>os.mkdir('./{}'.format(d))<block_end><block_end># params
parser=argparse.ArgumentParser(description='Train a Transformer model.')<line_sep># data
parser.add_argument('--data_prefix' type=str default='../data/')<line_sep>parser.add_argument('--dataset' type=str default='iwslt' help='"flickr" or "iwslt"')<line_sep>parser.add_argument('--language' type=str default='ende' help='a combination of two language markers to show the language pair.')<line_sep>parser.add_argument('--load_vocab' action='store_true' help='load a pre-computed vocabulary')<line_sep>parser.add_argument('--load_dataset' action='store_true' help='load a pre-processed dataset')<line_sep>parser.add_argument('--use_revtok' action='store_true' help='use reversible tokenization')<line_sep>parser.add_argument('--level' type=str default='subword' help='for BPE, we must preprocess the dataset')<line_sep>parser.add_argument('--good_course' action='store_true' help='use beam-search output for distillation')<line_sep>parser.add_argument('--test_set' type=str default=<none> help='which test set to use')<line_sep>parser.add_argument('--max_len' type=int default=<none> help='limit the train set sentences to this many tokens')<line_sep>parser.add_argument('--remove_eos' action='store_true' help='possibly remove <eos> tokens for FastTransformer')<line_sep># model basic
parser.add_argument('--prefix' type=str default='' help='prefix to denote the model, nothing or [time]')<line_sep>parser.add_argument('--params' type=str default='james-iwslt' help='pamarater sets: james-iwslt, t2t-base, etc')<line_sep>parser.add_argument('--fast' dest='model' action='store_const' const=FastTransformer default=Transformer help='use a single self-attn stack')<line_sep># model variants
parser.add_argument('--local' dest='windows' action='store_const' const=[1 3 5 7 -1] default=<none> help='use local attention')<line_sep>parser.add_argument('--causal' action='store_true' help='use causal attention')<line_sep>parser.add_argument('--positional_attention' action='store_true' help='incorporate positional information in key/value')<line_sep>parser.add_argument('--no_source' action='store_true')<line_sep>parser.add_argument('--use_mask' action='store_true' help='use src/trg mask during attention')<line_sep>parser.add_argument('--diag' action='store_true' help='ignore diagonal attention when doing self-attention.')<line_sep>parser.add_argument('--convblock' action='store_true' help='use ConvBlock instead of ResNet')<line_sep>parser.add_argument('--cosine_output' action='store_true' help='use cosine similarity as output layer')<line_sep>parser.add_argument('--noisy' action='store_true' help='inject noise in the attention mechanism: Beta-Gumbel softmax')<line_sep>parser.add_argument('--noise_samples' type=int default=0 help='only useful for noisy parallel decoding')<line_sep>parser.add_argument('--critic' action='store_true' help='use critic')<line_sep>parser.add_argument('--kernel_sizes' type=str default='2,3,4,5' help='kernel sizes of convnet critic')<line_sep>parser.add_argument('--kernel_num' type=int default=128 help='number of each kind of kernel')<line_sep>parser.add_argument('--use_wo' action='store_true' help='use output weight matrix in multihead attention')<line_sep>parser.add_argument('--share_embeddings' action='store_true' help='share embeddings between encoder and decoder')<line_sep>parser.add_argument('--use_alignment' action='store_true' help='use the aligned fake data to initialize')<line_sep>parser.add_argument('--hard_inputs' action='store_true' help='use hard selection as inputs, instead of soft-attention over embeddings.')<line_sep>parser.add_argument('--preordering' action='store_true' help='use the ground-truth reordering information')<line_sep>parser.add_argument('--use_posterior_order' action='store_true' help='directly use the groud-truth alignment for reordering.')<line_sep>parser.add_argument('--train_decoder_with_order' action='store_true' help='when training the decoder, use the ground-truth')<line_sep>parser.add_argument('--postordering' action='store_true' help='just have a try...')<line_sep>parser.add_argument('--fertility_only' action='store_true')<line_sep>parser.add_argument('--highway' action='store_true' help='usually false')<line_sep>parser.add_argument('--mix_of_experts' action='store_true')<line_sep>parser.add_argument('--orderless' action='store_true' help='for the inputs, remove the order information')<line_sep>parser.add_argument('--cheating' action='store_true' help='disable decoding, always use real fertility')<line_sep># running
parser.add_argument('--mode' type=str default='train' help='train, test or build')<line_sep>parser.add_argument('--gpu' type=int default=0 help='GPU to use or -1 for CPU')<line_sep>parser.add_argument('--seed' type=int default=19920206 help='seed for randomness')<line_sep>parser.add_argument('--eval-every' type=int default=1000 help='run dev every')<line_sep>parser.add_argument('--maximum_steps' type=int default=1000000 help='maximum steps you take to train a model')<line_sep>parser.add_argument('--disable_lr_schedule' action='store_true' help='disable the transformer learning rate')<line_sep>parser.add_argument('--batchsize' type=int default=2048 help='# of tokens processed per batch')<line_sep>parser.add_argument('--hidden_size' type=int default=<none> help='input the hidden size')<line_sep>parser.add_argument('--length_ratio' type=int default=2 help='maximum lengths of decoding')<line_sep>parser.add_argument('--optimizer' type=str default='Adam')<line_sep>parser.add_argument('--beam_size' type=int default=1 help='beam-size used in Beamsearch, default using greedy decoding')<line_sep>parser.add_argument('--alpha' type=float default=0.6 help='length normalization weights')<line_sep>parser.add_argument('--temperature' type=float default=1 help='smoothing temperature for noisy decoding')<line_sep>parser.add_argument('--multi_run' type=int default=1 help='we can run the code multiple times to get the best')<line_sep>parser.add_argument('--load_from' type=str default=<none> help='load from checkpoint')<line_sep>parser.add_argument('--resume' action='store_true' help='when loading from the saved model, it resumes from that.')<line_sep>parser.add_argument('--teacher' type=str default=<none> help='load a pre-trained auto-regressive model.')<line_sep>parser.add_argument('--share_encoder' action='store_true' help='use teacher-encoder to initialize student')<line_sep>parser.add_argument('--finetune_encoder' action='store_true' help='if further train the encoder')<line_sep>parser.add_argument('--seq_dist' action='store_true' help='knowledge distillation at sequence level')<line_sep>parser.add_argument('--word_dist' action='store_true' help='knowledge distillation at word level')<line_sep>parser.add_argument('--greedy_fertility' action='store_true' help='using the fertility generated by autoregressive model (only for seq_dist)')<line_sep>parser.add_argument('--fertility_mode' type=str default='argmax' help='mean, argmax or reinforce')<line_sep>parser.add_argument('--finetuning_truth' action='store_true' help='use ground-truth for finetuning')<line_sep>parser.add_argument('--trainable_teacher' action='store_true' help='have a trainable teacher')<line_sep>parser.add_argument('--only_update_errors' action='store_true' help='have a trainable teacher')<line_sep>parser.add_argument('--teacher_use_real' action='store_true' help='teacher also trained with MLE on real data')<line_sep>parser.add_argument('--max_cache' type=int default=0 help='save most recent max_cache decoded translations')<line_sep>parser.add_argument('--replay_every' type=int default=1000 help='every 1k updates, train the teacher again')<line_sep>parser.add_argument('--replay_times' type=int default=250 help='train the teacher again for 250k steps')<line_sep>parser.add_argument('--margin' type=float default=1.5 help='margin to make sure teacher will give higher score to real data')<line_sep>parser.add_argument('--real_data' action='store_true' help='only used in the reverse kl setting')<line_sep>parser.add_argument('--beta1' type=float default=0.5 help='balancing MLE and KL loss.')<line_sep>parser.add_argument('--beta2' type=float default=0.01 help='balancing the GAN loss.')<line_sep>parser.add_argument('--critic_only' type=int default=0 help='pre-training the critic model.')<line_sep>parser.add_argument('--st' action='store_true' help='straight through estimator')<line_sep>parser.add_argument('--entropy' action='store_true')<line_sep>parser.add_argument('--no_bpe' action='store_true' help='output files without BPE')<line_sep>parser.add_argument('--no_write' action='store_true' help='do not write the decoding into the decoding files.')<line_sep>parser.add_argument('--output_fer' action='store_true' help='decoding and output fertilities')<line_sep># debugging
parser.add_argument('--check' action='store_true' help='on training, only used to check on the test set.')<line_sep>parser.add_argument('--debug' action='store_true' help='debug mode: no saving or tensorboard')<line_sep>parser.add_argument('--tensorboard' action='store_true' help='use TensorBoard')<line_sep># old params
parser.add_argument('--old' action='store_true' help='this is used for solving conflicts of new codes')<line_sep>parser.add_argument('--hyperopt' action='store_true' help='use HyperOpt')<line_sep>parser.add_argument('--scst' action='store_true' help='use HyperOpt')<line_sep>parser.add_argument('--serve' type=int default=<none> help='serve at port')<line_sep>parser.add_argument('--attention_discrimination' action='store_true')<line_sep># ---------------------------------------------------------------------------------------------------------------- #
args=parser.parse_args()<if_stmt>args.prefix<eq>'[time]'<block_start>args.prefix=strftime("%m.%d_%H.%M." gmtime())<block_end>args.kernel_sizes=[int(k)<for>k args.kernel_sizes.split(',')]<line_sep># get the langauage pairs:
args.src=args.language[:2]# source language
args.trg=args.language[2:]# target language
# logger settings
logger=logging.getLogger()<line_sep>logger.setLevel(logging.DEBUG)<line_sep>formatter=logging.Formatter('%(asctime)s %(levelname)s: - %(message)s' datefmt='%Y-%m-%d %H:%M:%S')<line_sep>fh=logging.FileHandler('./logs/log-{}.txt'.format(args.prefix))<line_sep>fh.setLevel(logging.DEBUG)<line_sep>fh.setFormatter(formatter)<line_sep>ch=logging.StreamHandler()<line_sep>ch.setLevel(logging.DEBUG)<line_sep>ch.setFormatter(formatter)<line_sep>logger.addHandler(ch)<line_sep>logger.addHandler(fh)<line_sep># setup random seeds
random.seed(args.seed)<line_sep>np.random.seed(args.seed)<line_sep>torch.manual_seed(args.seed)<line_sep>torch.cuda.manual_seed_all(args.seed)<line_sep># setup data-field
DataField=data.ReversibleField<if>args.use_revtok<else>NormalField<line_sep>tokenizer=revtok.tokenize<if>args.use_revtok<else><lambda>x:x.replace('@@ ' '').split()<line_sep>TRG=DataField(init_token='<init>' eos_token='<eos>' batch_first=<true>)<line_sep>SRC=DataField(batch_first=<true>)<if><not>args.share_embeddings<else>TRG<line_sep>ALIGN=data.Field(sequential=<true> preprocessing=data.Pipeline(<lambda>tok:int(tok.split('-')[0])) use_vocab=<false> pad_token=0 batch_first=<true>)<line_sep>FER=data.Field(sequential=<true> preprocessing=data.Pipeline(<lambda>tok:int(tok)) use_vocab=<false> pad_token=0 batch_first=<true>)<line_sep>align_dict,align_table=<none> <none><line_sep># setup many datasets (need to manaually setup)
data_prefix=args.data_prefix<if_stmt>args.dataset<eq>'iwslt'<block_start><if_stmt>args.test_set<is><none><block_start>args.test_set='IWSLT16.TED.tst2013'<block_end><if_stmt>args.dist_set<is><none><block_start>args.dist_set='.dec.b1'<block_end><elif_stmt>args.greedy_fertility<block_start>logger.info('use the fertility predicted by autoregressive model (instead of fast-align)')<line_sep>train_data,dev_data=ParallelDataset.splits(path=data_prefix+'iwslt/en-de/' train='train.en-de.bpe.new' validation='IWSLT16.TED.tst2013.en-de.bpe.new.dev' exts=('.src.b1' '.trg.b1' '.dec.b1' '.fer' '.fer') fields=[('src' SRC) ('trg' TRG) ('dec' TRG) ('fer' FER) ('fer_dec' FER)] load_dataset=args.load_dataset prefix='ts')<block_end><elif_stmt>(args.mode<eq>'test')<or>(args.mode<eq>'test_noisy')<block_start>train_data,dev_data=NormalTranslationDataset.splits(path=data_prefix+'iwslt/en-de/' train='train.tags.en-de{}'.format('.bpe'<if><not>args.use_revtok<else>'') validation='{}.en-de{}'.format(args.test_set '.bpe'<if><not>args.use_revtok<else>'') exts=('.en' '.de') fields=(SRC TRG) load_dataset=args.load_dataset prefix='normal')<block_end><else_stmt><block_start>train_data,dev_data=ParallelDataset.splits(path=data_prefix+'iwslt/en-de/' train='train.tags.en-de.bpe' validation='train.tags.en-de.bpe.dev' exts=('.en2' '.de2' '.decoded2' '.aligned' '.decode.aligned' '.fer' '.decode.fer') fields=[('src' SRC) ('trg' TRG) ('dec' TRG) ('align' ALIGN) ('align_dec' ALIGN) ('fer' FER) ('fer_dec' FER)] load_dataset=args.load_dataset prefix='ts')<block_end>decoding_path=data_prefix+'iwslt/en-de/{}.en-de.bpe.new'<if_stmt>args.use_alignment<and>(args.model<is>FastTransformer)<block_start>align_dict={l.split()[0]:l.split()[1]<for>l open(data_prefix+'iwslt/en-de/train.tags.en-de.dict')}<block_end><block_end><elif_stmt>args.dataset<eq>'wmt16-ende'<block_start><if_stmt>args.test_set<is><none><block_start>args.test_set='newstest2013'<block_end><if_stmt>(args.mode<eq>'test')<or>(args.mode<eq>'test_noisy')<block_start>train_data,dev_data=NormalTranslationDataset.splits(path=data_prefix+'wmt16-ende/' train='newstest2013.tok.bpe.32000' validation='{}.tok.bpe.32000'.format(args.test_set) exts=('.{}'.format(args.src) '.{}'.format(args.trg)) fields=(SRC TRG) load_dataset=args.load_dataset prefix='real')<line_sep>decoding_path=data_prefix+'wmt16-ende/test.{}.{}'.format(args.prefix args.test_set)<block_end><elif_stmt><not>args.seq_dist<block_start>train_data,dev_data=NormalTranslationDataset.splits(path=data_prefix+'wmt16-ende/' train='train.tok.clean.bpe.32000' validation='{}.tok.bpe.32000'.format(args.test_set) exts=('.{}'.format(args.src) '.{}'.format(args.trg)) fields=(SRC TRG) load_dataset=args.load_dataset prefix='real')<line_sep>decoding_path=data_prefix+'wmt16-ende/{}.tok.bpe.decode'<block_end><else_stmt><block_start>train_data,dev_data=ParallelDataset.splits(path=data_prefix+'wmt16-ende/' train='train.tok.bpe.decode' validation='newstest2013.tok.bpe.decode.dev' exts=('.src.b1' '.trg.b1' '.dec.b1' '.real.aligned' '.fake.aligned' '.real.fer' '.fake.fer') fields=[('src' SRC) ('trg' TRG) ('dec' TRG) ('align' ALIGN) ('align_dec' ALIGN) ('fer' FER) ('fer_dec' FER)] load_dataset=args.load_dataset prefix='ts')<line_sep>decoding_path=data_prefix+'wmt16-ende/{}.tok.bpe.na'<block_end><if_stmt>args.use_alignment<and>(args.model<is>FastTransformer)<block_start>align_table={l.split()[0]:l.split()[1]<for>l open(data_prefix+'wmt16-ende/train.tok.bpe.decode.full.fastlign2.dict')}<block_end><block_end><elif_stmt>args.dataset<eq>'wmt16-deen'<block_start><if_stmt>args.test_set<is><none><block_start>args.test_set='newstest2013'<block_end><if_stmt>(args.mode<eq>'test')<or>(args.mode<eq>'test_noisy')<block_start>train_data,dev_data=NormalTranslationDataset.splits(path=data_prefix+'wmt16-ende/' train='newstest2013.tok.bpe.32000' validation='{}.tok.bpe.32000'.format(args.test_set) exts=('.{}'.format(args.src) '.{}'.format(args.trg)) fields=(SRC TRG) load_dataset=args.load_dataset prefix='real')<line_sep>decoding_path=data_prefix+'wmt16-ende/test.{}.{}'.format(args.prefix args.test_set)<block_end><elif_stmt><not>args.seq_dist<block_start>train_data,dev_data=NormalTranslationDataset.splits(path=data_prefix+'wmt16-deen/' train='train.tok.clean.bpe.32000' validation='{}.tok.bpe.32000'.format(args.test_set) exts=('.{}'.format(args.src) '.{}'.format(args.trg)) fields=(SRC TRG) load_dataset=args.load_dataset prefix='real')<line_sep>decoding_path=data_prefix+'wmt16-deen/{}.tok.bpe.decode'<block_end><else_stmt><block_start>train_data,dev_data=ParallelDataset.splits(path=data_prefix+'wmt16-deen/' train='train.tok.bpe.decode' validation='{}.tok.bpe.decode.dev'.format(args.test_set) exts=('.src.b1' '.trg.b1' '.dec.b1' '.real.aligned' '.fake.aligned' '.real.fer' '.fake.fer') fields=[('src' SRC) ('trg' TRG) ('dec' TRG) ('align' ALIGN) ('align_dec' ALIGN) ('fer' FER) ('fer_dec' FER)] load_dataset=args.load_dataset prefix='ts')<line_sep>decoding_path=data_prefix+'wmt16-deen/{}.tok.bpe.na'<block_end><if_stmt>args.use_alignment<and>(args.model<is>FastTransformer)<block_start>align_table={l.split()[0]:l.split()[1]<for>l open(data_prefix+'wmt16-deen/train.tok.bpe.decode.full.fastlign2.dict')}<block_end><block_end><elif_stmt>args.dataset<eq>'wmt16-enro'<block_start><if_stmt>args.test_set<is><none><block_start>args.test_set='dev'<block_end><if_stmt>(args.mode<eq>'test')<or>(args.mode<eq>'test_noisy')<block_start>train_data,dev_data=NormalTranslationDataset.splits(path=data_prefix+'wmt16-enro/' train='dev.bpe' validation='{}.bpe'.format(args.test_set) exts=('.{}'.format(args.src) '.{}'.format(args.trg)) fields=(SRC TRG) load_dataset=args.load_dataset prefix='real')<line_sep>decoding_path=data_prefix+'wmt16-enro/{}.bpe.decode'<block_end><elif_stmt><not>args.seq_dist<block_start>train_data,dev_data=NormalTranslationDataset.splits(path=data_prefix+'wmt16-enro/' train='corpus.bpe' validation='{}.bpe'.format(args.test_set) exts=('.{}'.format(args.src) '.{}'.format(args.trg)) fields=(SRC TRG) load_dataset=args.load_dataset prefix='real')<line_sep>decoding_path=data_prefix+'wmt16-enro/{}.bpe.decode'<block_end><else_stmt><block_start>train_data,dev_data=ParallelDataset.splits(path=data_prefix+'wmt16-enro/' train='train.bpe.decode' validation='dev.bpe.decode.dev' exts=('.src.b1' '.trg.b1' '.dec.b1' '.real.aligned' '.fake.aligned' '.real.fer' '.fake.fer') fields=[('src' SRC) ('trg' TRG) ('dec' TRG) ('align' ALIGN) ('align_dec' ALIGN) ('fer' FER) ('fer_dec' FER)] load_dataset=args.load_dataset prefix='ts')<line_sep>decoding_path=data_prefix+'wmt16-enro/{}.tok.bpe.na'<block_end><if_stmt>args.use_alignment<and>(args.model<is>FastTransformer)<block_start>align_table={l.split()[0]:l.split()[1]<for>l open(data_prefix+'wmt16-enro/train.bpe.decode.full.fastlign2.dict')}<block_end><block_end><elif_stmt>args.dataset<eq>'wmt16-roen'<block_start><if_stmt>args.test_set<is><none><block_start>args.test_set='dev'<block_end><if_stmt>(args.mode<eq>'test')<or>(args.mode<eq>'test_noisy')<block_start>train_data,dev_data=NormalTranslationDataset.splits(path=data_prefix+'wmt16-roen/' train='dev.bpe' validation='{}.bpe'.format(args.test_set) exts=('.{}'.format(args.src) '.{}'.format(args.trg)) fields=(SRC TRG) load_dataset=args.load_dataset prefix='real')<line_sep>decoding_path=data_prefix+'wmt16-roen/{}.bpe.decode'<block_end><elif_stmt><not>args.seq_dist<block_start>train_data,dev_data=NormalTranslationDataset.splits(path=data_prefix+'wmt16-roen/' train='corpus.bpe' validation='{}.bpe'.format(args.test_set) exts=('.{}'.format(args.src) '.{}'.format(args.trg)) fields=(SRC TRG) load_dataset=args.load_dataset prefix='real')<line_sep>decoding_path=data_prefix+'wmt16-roen/{}.bpe.decode'<block_end><else_stmt><block_start>train_data,dev_data=ParallelDataset.splits(path=data_prefix+'wmt16-roen/' train='train.bpe.decode' validation='dev.bpe.decode.dev' exts=('.src.b1' '.trg.b1' '.dec.b1' '.real.aligned' '.fake.aligned' '.real.fer' '.fake.fer') fields=[('src' SRC) ('trg' TRG) ('dec' TRG) ('align' ALIGN) ('align_dec' ALIGN) ('fer' FER) ('fer_dec' FER)] load_dataset=args.load_dataset prefix='ts')<line_sep>decoding_path=data_prefix+'wmt16-roen/{}.tok.bpe.na'<block_end><if_stmt>args.use_alignment<and>(args.model<is>FastTransformer)<block_start>align_table={l.split()[0]:l.split()[1]<for>l open(data_prefix+'wmt16-roen/train.bpe.decode.full.fastlign2.dict')}<block_end><block_end><else_stmt><block_start><raise>NotImplementedError<block_end># build word-level vocabularies
<if_stmt>args.load_vocab<and>os.path.exists(data_prefix+'{}/vocab{}_{}.pt'.format(args.dataset 'shared'<if>args.share_embeddings<else>'' '{}-{}'.format(args.src args.trg)))<block_start>logger.info('load saved vocabulary.')<line_sep>src_vocab,trg_vocab=torch.load(data_prefix+'{}/vocab{}_{}.pt'.format(args.dataset 'shared'<if>args.share_embeddings<else>'' '{}-{}'.format(args.src args.trg)))<line_sep>SRC.vocab=src_vocab<line_sep>TRG.vocab=trg_vocab<block_end><else_stmt><block_start>logger.info('save the vocabulary')<if_stmt><not>args.share_embeddings<block_start>SRC.build_vocab(train_data dev_data max_size=50000)<block_end>TRG.build_vocab(train_data dev_data max_size=50000)<line_sep>torch.save([SRC.vocab TRG.vocab] data_prefix+'{}/vocab{}_{}.pt'.format(args.dataset 'shared'<if>args.share_embeddings<else>'' '{}-{}'.format(args.src args.trg)))<block_end>args.__dict__.update({'trg_vocab':len(TRG.vocab) 'src_vocab':len(SRC.vocab)})<line_sep># build alignments ---
<if_stmt>align_dict<is><not><none><block_start>align_table=[TRG.vocab.stoi['<init>']<for>_ range(len(SRC.vocab.itos))]<for_stmt>src align_dict<block_start>align_table[SRC.vocab.stoi[src]]=TRG.vocab.stoi[align_dict[src]]<block_end>align_table[0]=0# --<unk>
align_table[1]=1<block_end># --<pad>
<def_stmt>dyn_batch_with_padding new i sofar<block_start>prev_max_len=sofar/(i-1)<if>i<g>1<else>0<if_stmt>args.seq_dist<block_start><return>max(len(new.src) len(new.trg) len(new.dec) prev_max_len)<times>i<block_end><else_stmt><block_start><return>max(len(new.src) len(new.trg) prev_max_len)<times>i<block_end><block_end><def_stmt>dyn_batch_without_padding new i sofar<block_start><if_stmt>args.seq_dist<block_start><return>sofar+max(len(new.src) len(new.trg) len(new.dec))<block_end><else_stmt><block_start><return>sofar+max(len(new.src) len(new.trg))<block_end><block_end># build the dataset iterators
# work around torchtext making it hard to share vocabs without sharing other field properties
<if_stmt>args.share_embeddings<block_start>SRC=copy.deepcopy(SRC)<line_sep>SRC.init_token=<none><line_sep>SRC.eos_token=<none><line_sep>train_data.fields['src']=SRC<line_sep>dev_data.fields['src']=SRC<block_end><if_stmt>(args.model<is>FastTransformer)<and>(args.remove_eos)<block_start>TRG.eos_token=<none><block_end><if_stmt>args.max_len<is><not><none><block_start>train_data.examples=[ex<for>ex train_data.examples<if>len(ex.trg)<le>args.max_len]<block_end><if_stmt>args.batchsize<eq>1# speed-test: one sentence per batch.
<block_start>batch_size_fn=<lambda>new count sofar:count<block_end><else_stmt><block_start>batch_size_fn=dyn_batch_without_padding<if>args.model<is>Transformer<else>dyn_batch_with_padding<block_end>train_real,dev_real=data.BucketIterator.splits((train_data dev_data) batch_sizes=(args.batchsize args.batchsize) device=args.gpu batch_size_fn=batch_size_fn repeat=<none><if>args.mode<eq>'train'<else><false>)<line_sep>logger.info("build the dataset. done!")<line_sep># model hyper-params:
hparams=<none><if_stmt>args.dataset<eq>'iwslt'<block_start><if_stmt>args.params<eq>'james-iwslt'<block_start>hparams={'d_model':278 'd_hidden':507 'n_layers':5 'n_heads':2 'drop_ratio':0.079 'warmup':746}<line_sep># ~32
<block_end><elif_stmt>args.params<eq>'james-iwslt2'<block_start>hparams={'d_model':278 'd_hidden':2048 'n_layers':5 'n_heads':2 'drop_ratio':0.079 'warmup':746}<line_sep># ~32
<block_end>teacher_hparams={'d_model':278 'd_hidden':507 'n_layers':5 'n_heads':2 'drop_ratio':0.079 'warmup':746}<block_end><elif_stmt>args.dataset<eq>'wmt16-ende'<block_start>logger.info('use default parameters of t2t-base')<line_sep>hparams={'d_model':512 'd_hidden':512 'n_layers':6 'n_heads':8 'drop_ratio':0.1 'warmup':16000}<line_sep># ~32
teacher_hparams=hparams<block_end><elif_stmt>args.dataset<eq>'wmt16-deen'<block_start>logger.info('use default parameters of t2t-base')<line_sep>hparams={'d_model':512 'd_hidden':512 'n_layers':6 'n_heads':8 'drop_ratio':0.1 'warmup':16000}<line_sep># ~32
teacher_hparams=hparams<block_end><elif_stmt>args.dataset<eq>'wmt16-enro'<block_start>logger.info('use default parameters of t2t-base')<line_sep>hparams={'d_model':512 'd_hidden':512 'n_layers':6 'n_heads':8 'drop_ratio':0.1 'warmup':16000}<line_sep># ~32
teacher_hparams=hparams<block_end><elif_stmt>args.dataset<eq>'wmt16-roen'<block_start>logger.info('use default parameters of t2t-base')<line_sep>hparams={'d_model':512 'd_hidden':512 'n_layers':6 'n_heads':8 'drop_ratio':0.1 'warmup':16000}<line_sep># ~32
teacher_hparams=hparams<block_end><if_stmt>hparams<is><none><block_start>logger.info('use default parameters of t2t-base')<line_sep>hparams={'d_model':512 'd_hidden':512 'n_layers':6 'n_heads':8 'drop_ratio':0.1 'warmup':16000}<block_end># ~32
<if_stmt>args.teacher<is><not><none><block_start>teacher_args=copy.deepcopy(args)<line_sep>teacher_args.__dict__.update(teacher_hparams)<block_end>args.__dict__.update(hparams)<if_stmt>args.hidden_size<is><not><none><block_start>args.d_hidden=args.hidden_size<block_end># show the arg:
logger.info(args)<line_sep>hp_str=(f"{args.dataset}_{args.level}_{'fast_'<if>args.model<is>FastTransformer<else>''}"<concat>f"{args.d_model}_{args.d_hidden}_{args.n_layers}_{args.n_heads}_"<concat>f"{args.drop_ratio:.3f}_{args.warmup}_"<concat>f"{args.xe_until<if>hasattr(args 'xe_until')<else>''}_"<concat>f"{f'{args.xe_ratio:.3f}'<if>hasattr(args 'xe_ratio')<else>''}_"<concat>f"{args.xe_every<if>hasattr(args 'xe_every')<else>''}")<line_sep>logger.info(f'Starting with HPARAMS: {hp_str}')<line_sep>model_name='./models/'+args.prefix+hp_str<line_sep># build the model
model=args.model(SRC TRG args)<if_stmt>args.load_from<is><not><none><block_start><with_stmt>torch.cuda.device(args.gpu)# very important.
<block_start>model.load_state_dict(torch.load('./models/'+args.load_from+'.pt' map_location=<lambda>storage loc:storage.cuda()))<line_sep># load the pretrained models.
<block_end><block_end><if_stmt>args.critic<block_start>model.install_critic()<block_end># logger.info(str(model))
# if using a teacher
<if_stmt>args.teacher<is><not><none><block_start>teacher_model=Transformer(SRC TRG teacher_args)<with_stmt>torch.cuda.device(args.gpu)<block_start>teacher_model.load_state_dict(torch.load('./models/'+args.teacher+'.pt' map_location=<lambda>storage loc:storage.cuda()))<block_end><for_stmt>params teacher_model.parameters()<block_start><if_stmt>args.trainable_teacher<block_start>params.requires_grad=<true><block_end><else_stmt><block_start>params.requires_grad=<false><block_end><block_end><if_stmt>(args.share_encoder)<and>(args.load_from<is><none>)<block_start>model.encoder=copy.deepcopy(teacher_model.encoder)<for_stmt>params model.encoder.parameters()<block_start><if_stmt>args.finetune_encoder<block_start>params.requires_grad=<true><block_end><else_stmt><block_start>params.requires_grad=<false><block_end><block_end><block_end><block_end><else_stmt><block_start>teacher_model=<none><block_end># use cuda
<if_stmt>args.gpu<g>-1<block_start>model.cuda(args.gpu)<if_stmt>align_table<is><not><none><block_start>align_table=torch.LongTensor(align_table).cuda(args.gpu)<line_sep>align_table=Variable(align_table)<line_sep>model.alignment=align_table<block_end><if_stmt>args.teacher<is><not><none><block_start>teacher_model.cuda(args.gpu)<block_end><block_end><def_stmt>register_nan_checks m<block_start><def_stmt>check_grad module grad_input grad_output<block_start><if_stmt>any(np.any(np.isnan(gi.data.cpu().numpy()))<for>gi grad_input<if>gi<is><not><none>)<block_start>print('NaN gradient in '+type(module).__name__)<line_sep>1/0<block_end><block_end>m.apply(<lambda>module:module.register_backward_hook(check_grad))<block_end><def_stmt>get_learning_rate i lr0=0.1<block_start><if_stmt><not>args.disable_lr_schedule<block_start><return>lr0<times>10/math.sqrt(args.d_model)<times>min(1/math.sqrt(i) i/(args.warmup<times>math.sqrt(args.warmup)))<block_end><return>0.00002<block_end><def_stmt>export x<block_start><try_stmt><block_start><with_stmt>torch.cuda.device(args.gpu)<block_start><return>x.data.cpu().float().mean()<block_end><block_end><except_stmt>Exception<block_start><return>0<block_end><block_end><def_stmt>devol batch<block_start>new_batch=copy.copy(batch)<line_sep>new_batch.src=Variable(batch.src.data volatile=<true>)<line_sep><return>new_batch<block_end># register_nan_checks(model)
# register_nan_checks(teacher_model)
<def_stmt>valid_model model dev dev_metrics=<none> distillation=<false> print_out=<false> teacher_model=<none><block_start>print_seqs=['[sources]' '[targets]' '[decoded]' '[fertili]' '[origind]']<line_sep>trg_outputs,dec_outputs=[] []<line_sep>outputs={}<line_sep>model.eval()<if_stmt>teacher_model<is><not><none><block_start>teacher_model.eval()<block_end><for_stmt>j,dev_batch enumerate(dev)# decode from the model (whatever Transformer or FastTransformer)
<block_start>torch.cuda.nvtx.range_push('quick_prepare')<line_sep>inputs,input_masks,targets,target_masks,sources,source_masks,encoding,batch_size=model.quick_prepare(dev_batch distillation)<line_sep>torch.cuda.nvtx.range_pop()<line_sep>torch.cuda.nvtx.range_push('prepare_initial')<line_sep>decoder_inputs,input_reorder,reordering_cost=inputs <none> <none><if_stmt>type(model)<is>FastTransformer# batch_align = dev_batch.align_dec if distillation else dev_batch.align
<block_start>batch_align=<none><line_sep>batch_fer=dev_batch.fer_dec<if>distillation<else>dev_batch.fer<line_sep># if args.postordering:
#
# targets_sorted = targets.gather(1, align_index)
# batch_align_sorted, align_index = masked_sort(batch_align, target_masks) # change the target indexxx, batch x max_trg
decoder_inputs,input_reorder,decoder_masks,reordering_cost=model.prepare_initial(encoding sources source_masks input_masks batch_align batch_fer decoding=(<not>args.cheating) mode='argmax')<block_end><else_stmt><block_start>decoder_masks=input_masks<block_end>torch.cuda.nvtx.range_pop()<line_sep>torch.cuda.nvtx.range_push('model')<line_sep>decoding,out,probs=model(encoding source_masks decoder_inputs decoder_masks decoding=<true> return_probs=<true>)<line_sep>torch.cuda.nvtx.range_pop()<line_sep>torch.cuda.nvtx.range_push('batched_cost')<line_sep>loss=0<if_stmt>args.postordering<block_start><if_stmt>args.cheating<block_start>decoding1=unsorted(decoding align_index)<block_end><else_stmt><block_start>positions=model.predict_offset(out decoder_masks <none>)<line_sep>shifted_index=positions.sort(1)[1]<line_sep>decoding1=unsorted(decoding shifted_index)<block_end><block_end><else_stmt><block_start>decoding1=decoding<block_end># loss = model.batched_cost(targets, target_masks, probs)
torch.cuda.nvtx.range_pop()<line_sep>torch.cuda.nvtx.range_push('output_decoding')<line_sep>dev_outputs=[model.output_decoding(d)<for>d [('src' sources) ('trg' targets) ('trg' decoding1) ('src' input_reorder)]]<if_stmt>args.postordering<block_start>dev_outputs<augadd>[model.output_decoding(('trg' decoding))]<block_end>torch.cuda.nvtx.range_pop()<line_sep>torch.cuda.nvtx.range_push('computeGLEU')<line_sep>gleu=computeGLEU(dev_outputs[2] dev_outputs[1] corpus=<false> tokenizer=tokenizer)<line_sep>torch.cuda.nvtx.range_pop()<if_stmt>print_out<block_start><for_stmt>k,d enumerate(dev_outputs)<block_start>logger.info("{}: {}".format(print_seqs[k] d[0]))<block_end>logger.info('------------------------------------------------------------------')<block_end><if_stmt>teacher_model<is><not><none># teacher is Transformer, student is FastTransformer
<block_start>inputs_student,_,targets_student,_,_,_,encoding_teacher,_=teacher_model.quick_prepare(dev_batch <false> decoding decoding input_masks target_masks source_masks)<line_sep>teacher_real_loss=teacher_model.cost(targets target_masks out=teacher_model(encoding_teacher source_masks inputs input_masks))<line_sep>teacher_fake_out=teacher_model(encoding_teacher source_masks inputs_student input_masks)<line_sep>teacher_fake_loss=teacher_model.cost(targets_student target_masks out=teacher_fake_out)<line_sep>teacher_alter_loss=teacher_model.cost(targets target_masks out=teacher_fake_out)<block_end>trg_outputs<augadd>dev_outputs[1]<line_sep>dec_outputs<augadd>dev_outputs[2]<if_stmt>dev_metrics<is><not><none><block_start>values=[loss gleu]<if_stmt>teacher_model<is><not><none><block_start>values<augadd>[teacher_real_loss teacher_fake_loss teacher_real_loss-teacher_fake_loss teacher_alter_loss teacher_alter_loss-teacher_fake_loss]<block_end><if_stmt>reordering_cost<is><not><none><block_start>values<augadd>[reordering_cost]<block_end>dev_metrics.accumulate(batch_size *values)<block_end><block_end>corpus_gleu=computeGLEU(dec_outputs trg_outputs corpus=<true> tokenizer=tokenizer)<line_sep>corpus_bleu=computeBLEU(dec_outputs trg_outputs corpus=<true> tokenizer=tokenizer)<line_sep>outputs['corpus_gleu']=corpus_gleu<line_sep>outputs['corpus_bleu']=corpus_bleu<if_stmt>dev_metrics<is><not><none><block_start>logger.info(dev_metrics)<block_end>logger.info("The dev-set corpus GLEU = {}".format(corpus_gleu))<line_sep>logger.info("The dev-set corpus BLEU = {}".format(corpus_bleu))<line_sep><return>outputs<block_end><def_stmt>train_model model train dev teacher_model=<none><block_start><if_stmt>args.tensorboard<and>(<not>args.debug)<block_start><import_from_stmt>tensorboardX SummaryWriter<line_sep>writer=SummaryWriter('./runs/{}'.format(args.prefix+hp_str))<block_end># optimizer
<if_stmt>args.optimizer<eq>'Adam'<block_start>opt=torch.optim.Adam([p<for>p model.parameters()<if>p.requires_grad] betas=(0.9 0.98) eps=1e-9)<if_stmt>args.trainable_teacher<block_start>opt_teacher=torch.optim.Adam([p<for>p teacher_model.parameters()<if>p.requires_grad] betas=(0.9 0.98) eps=1e-9)<block_end><block_end><elif_stmt>args.optimizer<eq>'RMSprop'<block_start>opt=torch.optim.RMSprop([p<for>p model.parameters()<if>p.requires_grad] eps=1e-9)<if_stmt>args.trainable_teacher<block_start>opt_teacher=torch.optim.RMSprop([p<for>p teacher_model.parameters()<if>p.requires_grad] eps=1e-9)<block_end><block_end><else_stmt><block_start><raise>NotImplementedError<block_end># if resume training
<if_stmt>(args.load_from<is><not><none>)<and>(args.resume)<block_start><with_stmt>torch.cuda.device(args.gpu)# very important.
<block_start>offset,opt_states=torch.load('./models/'+args.load_from+'.pt.states' map_location=<lambda>storage loc:storage.cuda())<line_sep>opt.load_state_dict(opt_states)<block_end><block_end><else_stmt><block_start>offset=0<block_end># metrics
best=Best(max 'corpus_bleu' 'corpus_gleu' 'gleu' 'loss' 'i' model=model opt=opt path=model_name gpu=args.gpu)<line_sep>train_metrics=Metrics('train' 'loss' 'real' 'fake')<line_sep>dev_metrics=Metrics('dev' 'loss' 'gleu' 'real_loss' 'fake_loss' 'distance' 'alter_loss' 'distance2' 'reordering_loss' 'corpus_gleu')<line_sep>progressbar=tqdm(total=args.eval_every desc='start training.')<line_sep># cache
<if_stmt>args.max_cache<g>0<block_start>caches=Cache(args.max_cache args.gpu)<block_end><for_stmt>iters,batch enumerate(train)<block_start>iters<augadd>offset<if_stmt>iters<g>args.maximum_steps<block_start>logger.info('reach the maximum updating steps.')<line_sep><break><block_end><if_stmt>iters%args.eval_every<eq>0<block_start>progressbar.close()<line_sep>dev_metrics.reset()<if_stmt>args.seq_dist<block_start>outputs_course=valid_model(model dev dev_metrics distillation=<true> teacher_model=<none>)<block_end>#teacher_model=teacher_model)
<if_stmt>args.trainable_teacher<block_start>outputs_teacher=valid_model(teacher_model dev <none>)<block_end>outputs_data=valid_model(model dev <none><if>args.seq_dist<else>dev_metrics teacher_model=<none> print_out=<true>)<if_stmt>args.tensorboard<and>(<not>args.debug)<block_start>writer.add_scalar('dev/GLEU_sentence_' dev_metrics.gleu iters)<line_sep>writer.add_scalar('dev/Loss' dev_metrics.loss iters)<line_sep>writer.add_scalar('dev/GLEU_corpus_' outputs_data['corpus_gleu'] iters)<line_sep>writer.add_scalar('dev/BLEU_corpus_' outputs_data['corpus_bleu'] iters)<if_stmt>args.seq_dist<block_start>writer.add_scalar('dev/GLEU_corpus_dis' outputs_course['corpus_gleu'] iters)<line_sep>writer.add_scalar('dev/BLEU_corpus_dis' outputs_course['corpus_bleu'] iters)<block_end><if_stmt>args.trainable_teacher<block_start>writer.add_scalar('dev/GLEU_corpus_teacher' outputs_teacher['corpus_gleu'] iters)<line_sep>writer.add_scalar('dev/BLEU_corpus_teacher' outputs_teacher['corpus_bleu'] iters)<block_end><if_stmt>args.teacher<is><not><none><block_start>writer.add_scalar('dev/Teacher_real_loss' dev_metrics.real_loss iters)<line_sep>writer.add_scalar('dev/Teacher_fake_loss' dev_metrics.fake_loss iters)<line_sep>writer.add_scalar('dev/Teacher_alter_loss' dev_metrics.alter_loss iters)<line_sep>writer.add_scalar('dev/Teacher_distance' dev_metrics.distance iters)<line_sep>writer.add_scalar('dev/Teacher_distance2' dev_metrics.distance2 iters)<block_end><if_stmt>args.preordering<block_start>writer.add_scalar('dev/Reordering_loss' dev_metrics.reordering_loss iters)<block_end><block_end><if_stmt><not>args.debug<block_start>best.accumulate(outputs_data['corpus_bleu'] outputs_data['corpus_gleu'] dev_metrics.gleu dev_metrics.loss iters)<line_sep>logger.info('the best model is achieved at {}, average greedy GLEU={}, corpus GLEU={}, corpus BLEU={}'.format(best.i best.gleu best.corpus_gleu best.corpus_bleu))<block_end>logger.info('model:'+args.prefix+hp_str)<line_sep># ---set-up a new progressor---
progressbar=tqdm(total=args.eval_every desc='start training.')<block_end># --- training --- #
# try:
model.train()<line_sep>opt.param_groups[0]['lr']=get_learning_rate(iters+1)<line_sep>opt.zero_grad()<line_sep># prepare the data
inputs,input_masks,targets,target_masks,sources,source_masks,encoding,batch_size=model.quick_prepare(batch args.seq_dist)<line_sep>input_reorder,reordering_cost,decoder_inputs=<none> <none> inputs<line_sep>batch_align=<none># batch.align_dec if args.seq_dist else batch.align
batch_fer=batch.fer_dec<if>args.seq_dist<else>batch.fer<line_sep># batch_align_sorted, align_index = masked_sort(batch_align, target_masks) # change the target indexxx, batch x max_trg
# print(batch_fer.size(), input_masks.size(), source_masks.size(), sources.size())
# Prepare_Initial
<if_stmt>type(model)<is>FastTransformer<block_start>inputs,input_reorder,input_masks,reordering_cost=model.prepare_initial(encoding sources source_masks input_masks batch_align batch_fer)<block_end># Maximum Likelihood Training
feedback={}<if_stmt><not>args.word_dist<block_start>loss=model.cost(targets target_masks out=model(encoding source_masks inputs input_masks positions=<none> feedback=feedback))<line_sep># train the reordering also using MLE??
<if_stmt>args.preordering<block_start>loss<augadd>reordering_cost<block_end><block_end><else_stmt># only used for FastTransformer: word-level adjustment
<block_start><if_stmt><not>args.preordering<block_start>decoding,out,probs=model(encoding source_masks inputs input_masks return_probs=<true> decoding=<true>)<line_sep>loss_student=model.batched_cost(targets target_masks probs)# student-loss (MLE)
decoder_masks=input_masks<block_end><else_stmt># Note that MLE and decoding has different translations. We need to run the same code twice
<block_start><if_stmt>args.finetuning_truth<block_start>decoding,out,probs=model(encoding source_masks inputs input_masks decoding=<true> return_probs=<true> feedback=feedback)<line_sep>loss_student=model.cost(targets target_masks out=out)<line_sep>decoder_masks=input_masks<block_end><else_stmt><block_start><if_stmt>args.fertility_mode<ne>'reinforce'<block_start>loss_student=model.cost(targets target_masks out=model(encoding source_masks inputs input_masks positions=<none> feedback=feedback))<line_sep>decoder_inputs,_,decoder_masks,_=model.prepare_initial(encoding sources source_masks input_masks batch_align batch_fer decoding=<true> mode=args.fertility_mode)<line_sep>decoding,out,probs=model(encoding source_masks decoder_inputs decoder_masks decoding=<true> return_probs=<true>)# decode again
<block_end><else_stmt># truth
<block_start>decoding,out,probs=model(encoding source_masks inputs input_masks decoding=<true> return_probs=<true> feedback=feedback)<line_sep>loss_student=model.cost(targets target_masks out=out)<line_sep>decoder_masks=input_masks<line_sep># baseline
decoder_inputs_b,_,decoder_masks_b,_=model.prepare_initial(encoding sources source_masks input_masks batch_align batch_fer decoding=<true> mode='mean')<line_sep>decoding_b,out_b,probs_b=model(encoding source_masks decoder_inputs_b decoder_masks_b decoding=<true> return_probs=<true>)# decode again
# reinforce
decoder_inputs_r,_,decoder_masks_r,_=model.prepare_initial(encoding sources source_masks input_masks batch_align batch_fer decoding=<true> mode='reinforce')<line_sep>decoding_r,out_r,probs_r=model(encoding source_masks decoder_inputs_r decoder_masks_r decoding=<true> return_probs=<true>)<block_end><block_end><block_end># decode again
# train the reordering also using MLE??
<if_stmt>args.preordering<block_start>loss_student<augadd>reordering_cost<block_end># teacher tries translation + look-at student's output
teacher_model.eval()<if_stmt>args.fertility_mode<ne>'reinforce'<block_start>inputs_student_index,_,targets_student_soft,_,_,_,encoding_teacher,_=model.quick_prepare(batch <false> decoding probs decoder_masks decoder_masks source_masks)<line_sep>out_teacher,probs_teacher=teacher_model(encoding_teacher source_masks inputs_student_index.detach() decoder_masks return_probs=<true>)<line_sep>loss_teacher=teacher_model.batched_cost(targets_student_soft decoder_masks probs_teacher.detach())<line_sep>loss=(1-args.beta1)<times>loss_teacher+args.beta1<times>loss_student<block_end># final results
<else_stmt><block_start>inputs_student_index,_,targets_student_soft,_,_,_,encoding_teacher,_=model.quick_prepare(batch <false> decoding probs decoder_masks decoder_masks source_masks)<line_sep>out_teacher,probs_teacher=teacher_model(encoding_teacher source_masks inputs_student_index.detach() decoder_masks return_probs=<true>)<line_sep>loss_teacher=teacher_model.batched_cost(targets_student_soft decoder_masks probs_teacher.detach())<line_sep>inputs_student_index,_=model.prepare_inputs(batch decoding_b <false> decoder_masks_b)<line_sep>targets_student_soft,_=model.prepare_targets(batch probs_b <false> decoder_masks_b)<line_sep>out_teacher,probs_teacher=teacher_model(encoding_teacher source_masks inputs_student_index.detach() decoder_masks_b return_probs=<true>)<line_sep>_,loss_1=teacher_model.batched_cost(targets_student_soft decoder_masks_b probs_teacher.detach() <true>)<line_sep>inputs_student_index,_=model.prepare_inputs(batch decoding_r <false> decoder_masks_r)<line_sep>targets_student_soft,_=model.prepare_targets(batch probs_r <false> decoder_masks_r)<line_sep>out_teacher,probs_teacher=teacher_model(encoding_teacher source_masks inputs_student_index.detach() decoder_masks_r return_probs=<true>)<line_sep>_,loss_2=teacher_model.batched_cost(targets_student_soft decoder_masks_r probs_teacher.detach() <true>)<line_sep>rewards=-(loss_2-loss_1).data<line_sep># if rewards.size(0) != 1:
rewards=rewards-rewards.mean()# ) / (rewards.std() + TINY)
rewards=rewards.expand_as(source_masks)<line_sep>rewards=rewards<times>source_masks<line_sep># print(model.predictor.saved_fertilities)
# print(batch.src.size())
model.predictor.saved_fertilities.reinforce(0.1<times>rewards.contiguous().view(-1 1))<line_sep>loss=(1-args.beta1)<times>loss_teacher+args.beta1<times>loss_student<block_end><block_end>#+ 0 * model.predictor.saved_fertilities.float().sum() # detect reinforce
# loss = 0 * model.predictor.saved_fertilities.float().sum() # detect reinforce
# accmulate the training metrics
train_metrics.accumulate(batch_size loss print_iter=<none>)<line_sep>train_metrics.reset()<line_sep># train the student
<if_stmt>args.preordering<and>args.fertility_mode<eq>'reinforce'<block_start>torch.autograd.backward((loss model.predictor.saved_fertilities) (torch.ones(1).cuda(loss.get_device()) <none>))<block_end><else_stmt><block_start>loss.backward()<block_end># torch.nn.utils.clip_grad_norm(model.parameters(), 1)
opt.step()<line_sep>info='training step={}, loss={:.3f}, lr={:.5f}'.format(iters export(loss) opt.param_groups[0]['lr'])<if_stmt>args.word_dist<block_start>info<augadd>'| NA:{:.3f}, AR:{:.3f}'.format(export(loss_student) export(loss_teacher))<block_end><if_stmt>args.trainable_teacher<and>(args.max_cache<le>0)<block_start>loss_alter,loss_worse=export(loss_alter) export(loss_worse)<line_sep>info<augadd>'| AL:{:.3f}, WO:{:.3f}'.format(loss_alter loss_worse)<block_end><if_stmt>args.preordering<block_start>info<augadd>'| RE:{:.3f}'.format(export(reordering_cost))<block_end><if_stmt>args.fertility_mode<eq>'reinforce'<block_start>info<augadd>'| RL: {:.3f}'.format(export(rewards.mean()))<block_end><if_stmt>args.max_cache<g>0<block_start>info<augadd>'| caches={}'.format(len(caches.cache))<block_end><if_stmt>args.tensorboard<and>(<not>args.debug)<block_start>writer.add_scalar('train/Loss' export(loss) iters)<block_end>progressbar.update(1)<line_sep>progressbar.set_description(info)<line_sep># continue-training the teacher model
<if_stmt>args.trainable_teacher<block_start><if_stmt>args.max_cache<g>0<block_start>caches.add([batch.src batch.trg batch.dec decoding])<block_end># experience-reply
# trainable teacher: used old experience to train
<if_stmt>(iters+1)%args.replay_every<eq>0# ---set-up a new progressor: teacher training--- #
<block_start>progressbar_teacher=tqdm(total=args.replay_times desc='start training the teacher.')<for_stmt>j range(args.replay_times)<block_start>opt_teacher.param_groups[0]['lr']=get_learning_rate(iters+1)<line_sep>opt_teacher.zero_grad()<line_sep>src,trg,dec,decoding=caches.sample()<line_sep>batch=Batch(src trg dec)<line_sep>inputs,input_masks,targets,target_masks,sources,source_masks,encoding_teacher,batch_size=teacher_model.quick_prepare(batch (<not>args.teacher_use_real))<line_sep>inputs_students,_=teacher_model.prepare_inputs(batch decoding masks=input_masks)<line_sep>loss_alter=teacher_model.cost(targets target_masks out=teacher_model(encoding_teacher source_masks inputs_students input_masks))<line_sep>loss_worse=teacher_model.cost(targets target_masks out=teacher_model(encoding_teacher source_masks inputs input_masks))<line_sep>loss2=loss_alter+loss_worse<line_sep>loss2.backward()<line_sep>opt_teacher.step()<line_sep>info='teacher step={}, loss={:.3f}, alter={:.3f}, worse={:.3f}'.format(j export(loss2) export(loss_alter) export(loss_worse))<line_sep>progressbar_teacher.update(1)<line_sep>progressbar_teacher.set_description(info)<block_end>progressbar_teacher.close()<block_end><block_end># except Exception as e:
# logger.warn('caught an exception: {}'.format(e))
<block_end><block_end><def_stmt>decode_model model train_real dev_real evaluate=<true> decoding_path=<none> names=['en' 'de' 'decode']<block_start><if_stmt>train_real<is><none><block_start>logger.info('decoding from the devlopment set. beamsize={}, alpha={}'.format(args.beam_size args.alpha))<line_sep>dev=dev_real<block_end><else_stmt><block_start>logger.info('decoding from the training set. beamsize={}, alpha={}'.format(args.beam_size args.alpha))<line_sep>dev=train_real<line_sep>dev.train=<false><block_end># make the Iterator create Variables with volatile=True so no graph is built
progressbar=tqdm(total=sum([1<for>_ dev]) desc='start decoding')<line_sep>model.eval()<if_stmt>decoding_path<is><not><none><block_start>decoding_path=decoding_path.format(args.test_set<if>train_real<is><none><else>'train')<line_sep>handle_dec=open(decoding_path+'.{}'.format(names[2]) 'w')<line_sep>handle_src=open(decoding_path+'.{}'.format(names[0]) 'w')<line_sep>handle_trg=open(decoding_path+'.{}'.format(names[1]) 'w')<if_stmt>args.output_fer<block_start>handle_fer=open(decoding_path+'.{}'.format('fer') 'w')<block_end><block_end>corpus_size=0<line_sep>src_outputs,trg_outputs,dec_outputs,timings=[] [] [] []<line_sep>decoded_words,target_words,decoded_info=0 0 0<line_sep>attentions=<none>#{'source': None, 'target': None}
pad_id=model.decoder.field.vocab.stoi['<pad>']<line_sep>eos_id=model.decoder.field.vocab.stoi['<eos>']<line_sep>curr_time=0<for_stmt>iters,dev_batch enumerate(dev)<block_start>start_t=time.time()<line_sep>inputs,input_masks,targets,target_masks,sources,source_masks,encoding,batch_size=model.quick_prepare(dev_batch)<if_stmt>args.model<is>FastTransformer<block_start>decoder_inputs,input_reorder,decoder_masks,_=model.prepare_initial(encoding sources source_masks input_masks <none> <none> decoding=<true> mode=args.fertility_mode)<block_end><else_stmt><block_start>decoder_inputs,decoder_masks=inputs input_masks<block_end>decoding=model(encoding source_masks decoder_inputs decoder_masks beam=args.beam_size alpha=args.alpha decoding=<true> feedback=attentions)<line_sep>used_t=time.time()-start_t<line_sep>curr_time<augadd>used_t<line_sep>real_mask=1-((decoding.data<eq>eos_id)+(decoding.data<eq>pad_id)).float()<line_sep>outputs=[model.output_decoding(d)<for>d [('src' sources) ('trg' targets) ('trg' decoding)]]<def_stmt>DHondt approx mask<block_start>L=mask.size(1)<line_sep>w=torch.arange(1 2<times>L 2)<if_stmt>approx.is_cuda<block_start>w=w.cuda(approx.get_device())<block_end>w=1/w# 1, 1/2, 1/3, ...
approx=approx[: : <none>]@w[<none> :]# B x Ts x Tt
approx=approx.view(approx.size(0) -1)# B x (Ts x Tt)
appinx=approx.topk(L 1)[1]# B x Tt (index)
fertility=approx.new(*approx.size()).fill_(0).scatter_(1 appinx mask)<line_sep>fertility=fertility.contiguous().view(mask.size(0) -1 mask.size(1)).sum(2).long()<line_sep><return>fertility<block_end><def_stmt>cutoff s t<block_start><for_stmt>i range(len(s) 0 -1)<block_start><if_stmt>s[i-1]<ne>t<block_start><return>s[:i]<block_end><block_end><raise>IndexError<block_end><if_stmt>args.output_fer<block_start>source_attention=attentions['source'].data.mean(1).transpose(2 1)# B x Ts x Tt
source_attention<augmul>real_mask[: <none> :]<line_sep>approx_fertility=source_attention.sum(2)# B x Ts
fertility=DHondt(approx_fertility real_mask)<block_end>corpus_size<augadd>batch_size<line_sep>src_outputs<augadd>outputs[0]<line_sep>trg_outputs<augadd>outputs[1]<line_sep>dec_outputs<augadd>outputs[2]<line_sep>timings<augadd>[used_t]<if_stmt>decoding_path<is><not><none><block_start><for_stmt>s,t,d zip(outputs[0] outputs[1] outputs[2])<block_start><if_stmt>args.no_bpe<block_start>s,t,d=s.replace('@@ ' '') t.replace('@@ ' '') d.replace('@@ ' '')<block_end>print(s file=handle_src flush=<true>)<line_sep>print(t file=handle_trg flush=<true>)<line_sep>print(d file=handle_dec flush=<true>)<block_end><if_stmt>args.output_fer<block_start><with_stmt>torch.cuda.device_of(fertility)<block_start>fertility=fertility.tolist()<for_stmt>f fertility<block_start>f=' '.join([str(fi)<for>fi cutoff(f 0)])<line_sep>print(f file=handle_fer flush=<true>)<block_end><block_end><block_end><block_end>progressbar.update(1)<line_sep>progressbar.set_description('finishing sentences={}/batches={}, speed={} sec/batch'.format(corpus_size iters curr_time/(1+iters)))<block_end><if_stmt>evaluate<block_start>corpus_gleu=computeGLEU(dec_outputs trg_outputs corpus=<true> tokenizer=tokenizer)<line_sep>corpus_bleu=computeBLEU(dec_outputs trg_outputs corpus=<true> tokenizer=tokenizer)<line_sep>logger.info("The dev-set corpus GLEU = {}".format(corpus_gleu))<line_sep>logger.info("The dev-set corpus BLEU = {}".format(corpus_bleu))<line_sep>computeGroupBLEU(dec_outputs trg_outputs tokenizer=tokenizer)<line_sep>torch.save([src_outputs trg_outputs dec_outputs timings] './space/data.pt')<block_end><block_end><def_stmt>noisy_decode_model model dev_real samples=1 alpha=1 tau=1 teacher_model=<none> evaluate=<true> decoding_path=<none> names=['en' 'de' 'decode'] saveall=<false><block_start><assert_stmt>type(model)<is>FastTransformer 'only works for fastTransformer'<line_sep>logger.info('decoding from the devlopment set. beamsize={}, alpha={}, tau={}'.format(args.beam_size args.alpha args.temperature))<line_sep>dev=dev_real<line_sep>progressbar=tqdm(total=sum([1<for>_ dev]) desc='start decoding')<line_sep>model.eval()<line_sep>teacher_model.eval()<if_stmt>decoding_path<is><not><none><block_start>decoding_path=decoding_path.format(args.test_set<if>train_real<is><none><else>'train')<line_sep>handle_dec=open(decoding_path+'.{}'.format(names[2]) 'w')<line_sep>handle_src=open(decoding_path+'.{}'.format(names[0]) 'w')<line_sep>handle_trg=open(decoding_path+'.{}'.format(names[1]) 'w')<line_sep># if saveall:
# handle_fer = open(decoding_path + '.{}'.format(names[3]), 'w')
<block_end>corpus_size=0<line_sep>src_outputs,trg_outputs,dec_outputs,timings=[] [] [] []<line_sep>all_dec_outputs=[]<line_sep>decoded_words,target_words,decoded_info=0 0 0<line_sep>attentions=<none>#{'source': None, 'target': None}
pad_id=model.decoder.field.vocab.stoi['<pad>']<line_sep>eos_id=model.decoder.field.vocab.stoi['<eos>']<line_sep>curr_time=0<for_stmt>iters,dev_batch enumerate(dev)<block_start>start_t=time.time()<line_sep>inputs,input_masks,targets,target_masks,sources,source_masks0,encoding0,batch_size=model.quick_prepare(dev_batch)<if_stmt>teacher_model<is><not><none><block_start>encoding_teacher=teacher_model.encoding(sources source_masks0)<block_end>batch_size,src_len,hsize=encoding0[0].size()<if_stmt>samples<g>1<block_start>source_masks=source_masks0[: <none> :].expand(batch_size samples src_len).contiguous().view(batch_size<times>samples src_len)<line_sep>encoding=[<none><for>_ encoding0]<for_stmt>i range(len(encoding))<block_start>encoding[i]=encoding0[i][: <none> :].expand(batch_size samples src_len hsize).contiguous().view(batch_size<times>samples src_len hsize)<block_end><if_stmt>teacher_model<is><not><none><block_start><for_stmt>i range(len(encoding))<block_start>encoding_teacher[i]=encoding_teacher[i][: <none> :].expand(batch_size samples src_len hsize).contiguous().view(batch_size<times>samples src_len hsize)<block_end><block_end><block_end><def_stmt>parallel <block_start>decoder_inputs,input_reorder,decoder_masks,logits_fer=model.prepare_initial(encoding0 sources source_masks0 input_masks <none> <none> decoding=<true> mode=args.fertility_mode N=samples tau=tau)<if_stmt>teacher_model<is><not><none><block_start>decoding=model(encoding source_masks decoder_inputs decoder_masks decoding=<true> feedback=attentions)<line_sep>student_inputs,_=teacher_model.prepare_inputs(dev_batch decoding decoder_masks)<line_sep>student_targets,_=teacher_model.prepare_targets(dev_batch decoding decoder_masks)<line_sep>out,probs=teacher_model(encoding_teacher source_masks student_inputs decoder_masks return_probs=<true> decoding=<false>)<line_sep>_,teacher_loss=model.batched_cost(student_targets decoder_masks probs batched=<true>)# student-loss (MLE)
# reranking the translation
teacher_loss=teacher_loss.view(batch_size samples)<line_sep>decoding=decoding.view(batch_size samples -1)<line_sep>lp=decoder_masks.sum(1).view(batch_size samples)<power>(1-alpha)<line_sep>teacher_loss=teacher_loss<times>Variable(lp)<block_end><return>decoding teacher_loss input_reorder<block_end><if_stmt>args.multi_run<g>1<block_start>decodings,teacher_losses,_=zip(*[parallel()<for>_ range(args.multi_run)])<line_sep>maxl=max([d.size(2)<for>d decodings])<line_sep>decoding=Variable(sources.data.new(batch_size samples<times>args.multi_run maxl).fill_(1).long())<for_stmt>i,d enumerate(decodings)<block_start>decoding[: i<times>samples:(i+1)<times>samples :d.size(2)]=d<block_end>teacher_loss=torch.cat(teacher_losses 1)<block_end><else_stmt><block_start>decoding,teacher_loss,input_reorder=parallel()<block_end>all_dec_outputs<augadd>[(decoding.view(batch_size<times>samples -1) input_reorder)]<line_sep>selected_idx=(-teacher_loss).topk(1 1)[1]# batch x 1
decoding=decoding.gather(1 selected_idx[: : <none>].expand(batch_size 1 decoding.size(-1)))[: 0 :]<line_sep>used_t=time.time()-start_t<line_sep>curr_time<augadd>used_t<line_sep>real_mask=1-((decoding.data<eq>eos_id)+(decoding.data<eq>pad_id)).float()<line_sep>outputs=[model.output_decoding(d)<for>d [('src' sources) ('trg' targets) ('trg' decoding)]]<line_sep>corpus_size<augadd>batch_size<line_sep>src_outputs<augadd>outputs[0]<line_sep>trg_outputs<augadd>outputs[1]<line_sep>dec_outputs<augadd>outputs[2]<line_sep>timings<augadd>[used_t]<if_stmt>decoding_path<is><not><none><block_start><for_stmt>s,t,d zip(outputs[0] outputs[1] outputs[2])<block_start><if_stmt>args.no_bpe<block_start>s,t,d=s.replace('@@ ' '') t.replace('@@ ' '') d.replace('@@ ' '')<block_end>print(s file=handle_src flush=<true>)<line_sep>print(t file=handle_trg flush=<true>)<line_sep>print(d file=handle_dec flush=<true>)<block_end># if saveall:
# for d, f in all_dec_outputs:
# ds = model.output_decoding(('trg', d))
# fs = model.output_decoding(('src', f))
# for dd, ff in zip(ds, fs):
# print(dd, file=handle_fer, flush=True)
# print(ff, file=handle_fer, flush=True)
<block_end>progressbar.update(1)<line_sep>progressbar.set_description('finishing sentences={}/batches={} speed={} sec/batch'.format(corpus_size iters curr_time/(1+iters)))<block_end><if_stmt>evaluate<block_start>corpus_gleu=computeGLEU(dec_outputs trg_outputs corpus=<true> tokenizer=tokenizer)<line_sep>corpus_bleu=computeBLEU(dec_outputs trg_outputs corpus=<true> tokenizer=tokenizer)<line_sep>logger.info("The dev-set corpus GLEU = {}".format(corpus_gleu))<line_sep>logger.info("The dev-set corpus BLEU = {}".format(corpus_bleu))<line_sep>computeGroupBLEU(dec_outputs trg_outputs tokenizer=tokenizer)<line_sep>torch.save([src_outputs trg_outputs dec_outputs timings] './space/data.pt')<block_end><block_end><def_stmt>self_improving_model model train dev<block_start><if_stmt>args.tensorboard<and>(<not>args.debug)<block_start><import_from_stmt>tensorboardX SummaryWriter<line_sep>writer=SummaryWriter('./runs/self-{}'.format(args.prefix+hp_str))<block_end># optimizer
<if_stmt>args.optimizer<eq>'Adam'<block_start>opt=torch.optim.Adam([p<for>p model.parameters()<if>p.requires_grad] betas=(0.9 0.98) eps=1e-9)<if_stmt>args.trainable_teacher<block_start>opt_teacher=torch.optim.Adam([p<for>p teacher_model.parameters()<if>p.requires_grad] betas=(0.9 0.98) eps=1e-9)<block_end><block_end><elif_stmt>args.optimizer<eq>'RMSprop'<block_start>opt=torch.optim.RMSprop([p<for>p model.parameters()<if>p.requires_grad] eps=1e-9)<if_stmt>args.trainable_teacher<block_start>opt_teacher=torch.optim.RMSprop([p<for>p teacher_model.parameters()<if>p.requires_grad] eps=1e-9)<block_end><block_end><else_stmt><block_start><raise>NotImplementedError<block_end># if resume training --
<if_stmt>(args.load_from<is><not><none>)<and>(args.resume)<block_start><with_stmt>torch.cuda.device(args.gpu)# very important.
<block_start>offset,opt_states=torch.load('./models/'+args.load_from+'.pt.states' map_location=<lambda>storage loc:storage.cuda())<line_sep>opt.load_state_dict(opt_states)<block_end><block_end><else_stmt><block_start>offset=0<block_end># metrics
best=Best(max 'corpus_bleu' 'corpus_gleu' 'gleu' 'loss' 'i' model=model opt=opt path=model_name gpu=args.gpu)<line_sep>train_metrics=Metrics('train' 'loss' 'real' 'fake')<line_sep>dev_metrics=Metrics('dev' 'loss' 'gleu' 'real_loss' 'fake_loss' 'distance' 'alter_loss' 'distance2' 'reordering_loss' 'corpus_gleu')<line_sep>progressbar=tqdm(total=args.eval_every desc='start training.')<line_sep># cache
samples=100<line_sep>tau=1<line_sep>caches=Cache(args.max_cache ['src' 'trg' 'dec' 'fer'])<line_sep>best_model=copy.deepcopy(model)# used for decoding
best_score=0<line_sep># start loop
iters=offset<line_sep>train=iter(train)<line_sep>counters=0<while_stmt>iters<le>args.maximum_steps<block_start>iters<augadd>1<line_sep>counters<augadd>1<line_sep>batch=devol(next(train))<line_sep># prepare inputs
model.eval()<line_sep>inputs,input_masks,targets,target_masks,sources,source_masks0,encoding,batch_size=model.quick_prepare(batch)<line_sep>_,src_len,hsize=encoding[0].size()<line_sep>trg_len=targets.size(1)<line_sep># prepare parallel -- noisy sampling
decoder_inputs,input_reorder,decoder_masks,_,pred_fer=model.prepare_initial(encoding sources source_masks0 input_masks <none> <none> decoding=<true> mode='reinforce' N=samples tau=tau return_samples=<true>)<line_sep># repeating for decoding
source_masks=source_masks0[: <none> :].expand(batch_size samples src_len).contiguous().view(batch_size<times>samples src_len)<for_stmt>i range(len(encoding))<block_start>encoding[i]=encoding[i][: <none> :].expand(batch_size samples src_len hsize).contiguous().view(batch_size<times>samples src_len hsize)<block_end># run decoding
decoding,_,probs=best_model(encoding source_masks decoder_inputs decoder_masks decoding=<true> return_probs=<true>)<line_sep># compute GLEU score to select the best translation
trg_output=best_model.output_decoding(('trg' targets[: <none> :].expand(batch_size samples trg_len).contiguous().view(batch_size<times>samples trg_len)))<line_sep>dec_output=best_model.output_decoding(('trg' decoding))<line_sep>bleu_score=computeBLEU(dec_output trg_output corpus=<false> tokenizer=tokenizer).contiguous().view(batch_size samples).cuda(args.gpu)<line_sep>best_index=bleu_score.max(1)[1]<def_stmt>index_gather data index samples<block_start>batch_size=index.size(0)<line_sep>data=data.contiguous().view(batch_size samples -1)# batch x samples x dim
index=index[: <none> <none>].expand(batch_size 1 data.size(2))<line_sep><return>data.gather(1 index)[: 0 :]<block_end>best_decoding,best_decoder_masks,best_fertilities=[index_gather(x best_index samples)<for>x [decoding decoder_masks pred_fer]]<line_sep>caches.add([sources targets best_decoding best_fertilities] [source_masks0 target_masks best_decoder_masks source_masks0] ['src' 'trg' 'dec' 'fer'])<line_sep>progressbar.update(1)<line_sep>progressbar.set_description('caching sentences={}/batches={}'.format(len(caches.cache) iters))<if_stmt>counters<eq>args.eval_every<block_start>logger.info('build a new dataset from the caches')<line_sep>print(len(caches.cache))<line_sep>cache_data=ParallelDataset(examples=caches.cache fields=[('src' SRC) ('trg' TRG) ('dec' TRG) ('fer' FER)])<line_sep>cache_iter=data.BucketIterator(cache_data batch_sizes=2048 device=args.gpu batch_size_fn=batch_size_fn)<line_sep>print('done')<import_stmt>sys<line_sep>sys.exit(1)<block_end><if_stmt><false># iters % args.eval_every == 0:
<block_start>progressbar.close()<line_sep>dev_metrics.reset()<line_sep>outputs_data=valid_model(model dev <none><if>args.seq_dist<else>dev_metrics teacher_model=<none> print_out=<true>)<if_stmt>args.tensorboard<and>(<not>args.debug)<block_start>writer.add_scalar('dev/GLEU_sentence_' dev_metrics.gleu iters)<line_sep>writer.add_scalar('dev/Loss' dev_metrics.loss iters)<line_sep>writer.add_scalar('dev/GLEU_corpus_' outputs_data['corpus_gleu'] iters)<line_sep>writer.add_scalar('dev/BLEU_corpus_' outputs_data['corpus_bleu'] iters)<block_end><if_stmt><not>args.debug<block_start>best.accumulate(outputs_data['corpus_bleu'] outputs_data['corpus_gleu'] dev_metrics.gleu dev_metrics.loss iters)<line_sep>logger.info('the best model is achieved at {}, average greedy GLEU={}, corpus GLEU={}, corpus BLEU={}'.format(best.i best.gleu best.corpus_gleu best.corpus_bleu))<block_end>logger.info('model:'+args.prefix+hp_str)<line_sep># ---set-up a new progressor---
progressbar=tqdm(total=args.eval_every desc='start training.')<block_end><block_end><block_end><if_stmt>args.mode<eq>'train'<block_start>logger.info('starting training')<line_sep>train_model(model train_real dev_real teacher_model)<block_end><elif_stmt>args.mode<eq>'self'<block_start>logger.info('starting self-training')<line_sep>self_improving_model(model train_real dev_real)<block_end><elif_stmt>args.mode<eq>'test'<block_start>logger.info('starting decoding from the pre-trained model, test...')<line_sep>names=['dev.src.b{}={}.{}'.format(args.beam_size args.load_from args fertility_mode) 'dev.trg.b{}={}.{}'.format(args.beam_size args.load_from args fertility_mode) 'dev.dec.b{}={}.{}'.format(args.beam_size args.load_from args fertility_mode)]<line_sep>decode_model(model <none> dev_real evaluate=<true> decoding_path=decoding_path<if><not>args.no_write<else><none> names=names)<block_end><elif_stmt>args.mode<eq>'test_noisy'<block_start>logger.info('starting decoding from the pre-trained model, test...')<line_sep>names=['dev.src.b{}={}.noise{}'.format(args.beam_size args.load_from args.beam_size) 'dev.trg.b{}={}.noise{}'.format(args.beam_size args.load_from args.beam_size) 'dev.dec.b{}={}.noise{}'.format(args.beam_size args.load_from args.beam_size) 'dev.fer.b{}={}.noise{}'.format(args.beam_size args.load_from args.beam_size)]<line_sep>noisy_decode_model(model dev_real samples=args.beam_size alpha=args.alpha tau=args.temperature teacher_model=teacher_model evaluate=<true> decoding_path=decoding_path<if><not>args.no_write<else><none> names=names saveall=<true>)<block_end><else_stmt><block_start>logger.info('starting decoding from the pre-trained model, build the course dataset...')<line_sep>names=['src.b{}'.format(args.beam_size) 'trg.b{}'.format(args.beam_size) 'dec.b{}'.format(args.beam_size)]<line_sep>decode_model(model train_real dev_real decoding_path=decoding_path<if><not>args.no_write<else><none> names=names)<block_end>logger.info("done.")<line_sep> |
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""API for generating OAuth2 access tokens from service account
keys predeployed to Chrome Ops bots via Puppet.
"""<import_stmt>contextlib<import_stmt>os<import_stmt>subprocess<import_stmt>tempfile<line_sep>@contextlib.contextmanager<def_stmt>with_access_token service_account_json<block_start>"""Yields an access token for the service account.
Args:
service_account_json: The path to the service account JSON file.
"""<line_sep>fd,path=tempfile.mkstemp(suffix='.json' prefix='tok')<try_stmt><block_start>args=['luci-auth' 'token']<if_stmt>service_account_json<block_start>args<augadd>['-service-account-json' service_account_json]<block_end>subprocess.check_call(args stdout=fd)<line_sep>os.close(fd)<line_sep>fd=<none><line_sep><yield>path<block_end><finally_stmt><block_start><if_stmt>fd<is><not><none><block_start>os.close(fd)<block_end>os.remove(path)<block_end><block_end> |
<import_from_stmt>opytimizer.optimizers.swarm SSA<line_sep># Creates a SSA optimizer
o=SSA()<line_sep> |
# Copyright 2019 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>torch.nn<class_stmt>Compressor(object)<block_start>"""Condensa model compressor class."""<def_stmt>__init__ self opt scheme model trainloader testloader valloader criterion<block_start>"""
Creates a `Compressor` instance.
:param opt: Optimizer.
:type opt: `condensa.Optimizer`
:param scheme: Compression scheme (class).
:param model: PyTorch model.
:type model: `torch.nn.Module`
:param trainloader: Training dataloader.
:param testloader: Test dataloader.
:param valloader: Validation dataloader.
:param criterion: Loss criterion.
"""<assert_stmt>isinstance(model torch.nn.Module)<line_sep>self.opt=opt<line_sep>self.pi=scheme.pi<line_sep>self.delta=scheme.delta<line_sep>self.model=model<line_sep>self.trainloader=trainloader<line_sep>self.testloader=testloader<line_sep>self.valloader=valloader<line_sep>self.criterion=criterion<line_sep>self._statistics=<none><block_end>@property<def_stmt>statistics self<block_start>"""
Retrieves compressed model statistics.
:return: Model statistics.
:rtype: `dict`
"""<line_sep><return>self._statistics<block_end><def_stmt>run self<block_start>"""
Executes model compressor.
:return: Compressed model.
:rtype: `torch.nn.Module`
"""<line_sep>w,statistics=self.opt.compress(self.model self.pi self.delta self.trainloader self.testloader self.valloader self.criterion)<line_sep>self._statistics=statistics<line_sep><return>w<block_end><block_end> |
<import_stmt>os<import_stmt>subprocess<import_stmt>sys<import_from_stmt>config TOOLS_DIR<line_sep>VERSION='aad2120'<line_sep>SUPPORTED_PLATFORMS={'cygwin':'windows' 'darwin':'mac' 'linux2':'linux' 'win32':'windows' }<def_stmt>is_platform_supported platform<block_start><return>platform<in>SUPPORTED_PLATFORMS<block_end><def_stmt>get_binary_path <block_start>platform=sys.platform<if_stmt><not>is_platform_supported(platform)<block_start><return><none><block_end>platform_dir=SUPPORTED_PLATFORMS[platform]<line_sep>path=os.path.join(TOOLS_DIR 'sccache' VERSION platform_dir 'sccache')<if_stmt>platform_dir<eq>'windows'<block_start>path<augadd>'.exe'<block_end><return>path<block_end><def_stmt>run *args<block_start>binary_path=get_binary_path()<if_stmt>binary_path<is><none><block_start><raise>Exception('No sccache binary found for the current platform.')<block_end>call_args=[binary_path]+list(args)<line_sep><return>subprocess.call(call_args)<block_end> |
<import_from_stmt>app create_app<def_stmt>test_home_page <block_start>"""
GIVEN a Flask application configured for testing
WHEN the '/' page is requested (GET)
THEN check that the response is valid
"""<line_sep>flask_app=create_app('flask_test.cfg')<line_sep># Create a test client using the Flask application configured for testing
<with_stmt>flask_app.test_client()<as>test_client<block_start>response=test_client.get('/')<assert_stmt>response.status_code<eq>200<assert_stmt>b"<title>PicoBrew Server</title>"<in>response.data<block_end><block_end> |
<import_stmt>pytest<import_from_stmt>loguru logger<import_from_stmt>tiktokpy TikTokPy<import_from_stmt>tiktokpy.models.feed FeedItem<line_sep>@pytest.mark.asyncio()<async_keyword><def_stmt>test_user_feed bot:TikTokPy<block_start>feed=<await>bot.user_feed(username="@mileycyrus")<line_sep>logger.info(feed)<assert_stmt>len(feed)<eq>50<assert_stmt>isinstance(feed[0] FeedItem)<block_end> |
<import_from_stmt>open.utilities.importing_models import_submodules<line_sep>__all__=import_submodules(__name__)<line_sep> |
# -*- coding: utf-8 -*-
"""
equip.visitors.classes
~~~~~~~~~~~~~~~~~~~~~~
Callback the visit method for each encountered class in the program.
:copyright: (c) 2014 by <NAME> (@rgaucher)
:license: Apache 2, see LICENSE for more details.
"""<class_stmt>ClassVisitor(object)<block_start>"""
A class visitor that is triggered for all encountered ``TypeDeclaration``.
Example, listing all types declared in the bytecode::
class TypeDeclVisitor(ClassVisitor):
def __init__(self):
ClassVisitor.__init__(self)
def visit(self, typeDecl):
print "New type: %s (parentDecl=%s)" \\
% (typeDecl.type_name, typeDecl.parent)
"""<def_stmt>__init__ self<block_start><pass><block_end><def_stmt>visit self typeDecl<block_start><pass><block_end><block_end> |
"""
This package implements spatial transformations in 1D, 2D, and 3D.
This is needed for the map-based registrations for example.
.. todo::
Implement CUDA version. There is already a 2D CUDA version available (in the source directory here).
But it needs to be extended to 1D and 3D. We also make use of a different convention for images which needs
to be accounted for, as we use the BxCxXxYxZ image format and BxdimxXxYxZ for the maps.
"""<line_sep>#TODO
<import_from_stmt>torch.nn.modules.module Module<line_sep>###########TODO temporal comment for torch1 compatability
# from mermaid.libraries.functions.stn_nd import STNFunction_ND_BCXYZ, STNFunction_ND_BCXYZ_Compile
# from mermaid.libraries.functions.nn_interpolation import get_nn_interpolationf
################################################################3
<import_from_stmt>..functions.stn_nd STNFunction_ND_BCXYZ<import_from_stmt>functools partial<line_sep># class STN_ND(Module):
# """
# Legacy code for nD spatial transforms. Ignore for now. Implements spatial transforms, but in BXYZC format.
# """
# def __init__(self, dim):
# super(STN_ND, self).__init__()
# self.dim = dim
# """spatial dimension"""
# self.f = STNFunction_ND( self.dim )
# """spatial transform function"""
# def forward(self, input1, input2):
# """
# Simply returns the transformed input
#
# :param input1: image in BCXYZ format
# :param input2: map in BdimXYZ format
# :return: returns the transformed image
# """
# return self.f(input1, input2)
<class_stmt>STN_ND_BCXYZ(Module)<block_start>"""
Spatial transform code for nD spatial transoforms. Uses the BCXYZ image format.
"""<def_stmt>__init__ self spacing zero_boundary=<false> use_bilinear=<true> use_01_input=<true> use_compile_version=<false><block_start>super(STN_ND_BCXYZ self).__init__()<line_sep>self.spacing=spacing<line_sep>"""spatial dimension"""<if_stmt>use_compile_version<block_start><if_stmt>use_bilinear<block_start>self.f=STNFunction_ND_BCXYZ_Compile(self.spacing zero_boundary)<block_end><else_stmt><block_start>self.f=partial(get_nn_interpolation spacing=self.spacing)<block_end><block_end><else_stmt><block_start>self.f=STNFunction_ND_BCXYZ(self.spacing zero_boundary=zero_boundary using_bilinear=use_bilinear using_01_input=use_01_input)<block_end>"""spatial transform function"""<block_end><def_stmt>forward self input1 input2<block_start>"""
Simply returns the transformed input
:param input1: image in BCXYZ format
:param input2: map in BdimXYZ format
:return: returns the transformed image
"""<line_sep><return>self.f(input1 input2)<block_end><block_end> |
<class_stmt>Post()<block_start><def_stmt>__init__ self userIden content timestamp image=<none> score='0' replies=<none><block_start>self.userIden=userIden<line_sep>self.content=content<line_sep>self.timestamp=timestamp<line_sep>self.image=image<line_sep>self.score=score<line_sep>self.replies=[]<if><not>replies<else>replies<block_end><block_end> |
"""Constants for the siren component."""<import_from_stmt>enum IntEnum<import_from_stmt>typing Final<line_sep>DOMAIN:Final="siren"<line_sep>ATTR_TONE:Final="tone"<line_sep>ATTR_AVAILABLE_TONES:Final="available_tones"<line_sep>ATTR_DURATION:Final="duration"<line_sep>ATTR_VOLUME_LEVEL:Final="volume_level"<class_stmt>SirenEntityFeature(IntEnum)<block_start>"""Supported features of the siren entity."""<line_sep>TURN_ON=1<line_sep>TURN_OFF=2<line_sep>TONES=4<line_sep>VOLUME_SET=8<line_sep>DURATION=16<block_end># These constants are deprecated as of Home Assistant 2022.5
# Please use the SirenEntityFeature enum instead.
SUPPORT_TURN_ON:Final=1<line_sep>SUPPORT_TURN_OFF:Final=2<line_sep>SUPPORT_TONES:Final=4<line_sep>SUPPORT_VOLUME_SET:Final=8<line_sep>SUPPORT_DURATION:Final=16<line_sep> |
# imports - module imports
<import_from_stmt>pipupgrade.exception PipupgradeError <line_sep># imports - test imports
<import_stmt>pytest<def_stmt>test_pipupgrade_error <block_start><with_stmt>pytest.raises(PipupgradeError)<block_start><raise>PipupgradeError<block_end><block_end> |
# Copyright 2018 Google, Inc.,
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modules for encoding and decoding observations."""<import_stmt>sonnet<as>snt<import_stmt>tensorflow<as>tf<import_from_stmt>. batch_dist<import_from_stmt>. dist_module<import_from_stmt>. util<class_stmt>EncoderSequence(snt.Sequential)<block_start>"""A wrapper arount snt.Sequential that also implements output_size."""<line_sep>@property<def_stmt>output_size self<block_start><return>self.layers[-1].output_size<block_end><block_end><class_stmt>FlattenEncoder(snt.AbstractModule)<block_start>"""Forwards the flattened input."""<def_stmt>__init__ self input_size=<none> name=<none><block_start>super(FlattenEncoder self).__init__(name=name)<line_sep>self._input_size=<none><if_stmt>input_size<is><not><none><block_start>self._merge_input_sizes(input_size)<block_end><block_end><def_stmt>_merge_input_sizes self input_size<block_start><if_stmt>self._input_size<is><none><block_start>self._input_size=snt.nest.map(tf.TensorShape input_size)<line_sep><return><block_end>self._input_size=snt.nest.map(<lambda>cur_size inp_size:cur_size.merge_with(inp_size) self._input_size input_size)<block_end>@property<def_stmt>output_size self<block_start>"""Returns the output Tensor shapes."""<if_stmt>self._input_size<is><none><block_start><return>tf.TensorShape([<none>])<block_end>flattened_size=0<for_stmt>inp_size snt.nest.flatten(self._input_size)<block_start>num_elements=inp_size.num_elements()<if_stmt>num_elements<is><none><block_start><return>tf.TensorShape([<none>])<block_end>flattened_size<augadd>num_elements<block_end><return>tf.TensorShape([flattened_size])<block_end><def_stmt>_build self inp<block_start>input_sizes=snt.nest.map(<lambda>inp_i:inp_i.get_shape()[1:] inp)<line_sep>self._merge_input_sizes(input_sizes)<line_sep>flatten=snt.BatchFlatten(preserve_dims=1)<line_sep>flat_inp=snt.nest.map(<lambda>inp_i:tf.to_float(flatten(inp_i)) inp)<line_sep>ret=util.concat_features(flat_inp)<line_sep>util.set_tensor_shapes(ret self.output_size add_batch_dims=1)<line_sep><return>ret<block_end><block_end><def_stmt>MLPObsEncoder hparams name=<none><block_start>"""Observation -> encoded, flat observation."""<line_sep>name=name<or>"mlp_obs_encoder"<line_sep>mlp=util.make_mlp(hparams hparams.obs_encoder_fc_layers name=name+"/mlp")<line_sep><return>EncoderSequence([FlattenEncoder() mlp] name=name)<block_end><class_stmt>DecoderSequence(dist_module.DistModule)<block_start>"""A sequence of zero or more AbstractModules, followed by a DistModule."""<def_stmt>__init__ self input_encoders decoder name=<none><block_start>super(DecoderSequence self).__init__(name=name)<line_sep>self._input_encoders=input_encoders<line_sep>self._decoder=decoder<block_end>@property<def_stmt>event_dtype self<block_start><return>self._decoder.event_dtype<block_end>@property<def_stmt>event_size self<block_start><return>self._decoder.event_size<block_end><def_stmt>dist self params name=<none><block_start><return>self._decoder.dist(params name=name)<block_end><def_stmt>_build self inputs<block_start><if_stmt>self._input_encoders<block_start>inputs=snt.Sequential(self._input_encoders)(inputs)<block_end><return>self._decoder(inputs)<block_end><block_end><def_stmt>MLPObsDecoder hparams decoder param_size name=<none><block_start>"""Inputs -> decoder(obs; mlp(inputs))."""<line_sep>name=name<or>"mlp_"+decoder.module_name<line_sep>layers=hparams.obs_decoder_fc_hidden_layers+[param_size]<line_sep>mlp=util.make_mlp(hparams layers name=name+"/mlp")<line_sep><return>DecoderSequence([util.concat_features mlp] decoder name=name)<block_end><class_stmt>BernoulliDecoder(dist_module.DistModule)<block_start>"""Inputs -> Bernoulli(obs; logits=inputs)."""<def_stmt>__init__ self dtype=tf.int32 squeeze_input=<false> name=<none><block_start>self._dtype=dtype<line_sep>self._squeeze_input=squeeze_input<line_sep>super(BernoulliDecoder self).__init__(name=name)<block_end>@property<def_stmt>event_dtype self<block_start><return>self._dtype<block_end>@property<def_stmt>event_size self<block_start><return>tf.TensorShape([])<block_end><def_stmt>_build self inputs<block_start><if_stmt>self._squeeze_input<block_start>inputs=tf.squeeze(inputs axis=-1)<block_end><return>inputs<block_end><def_stmt>dist self params name=<none><block_start><return>tf.distributions.Bernoulli(logits=params dtype=self._dtype name=name<or>self.module_name+"_dist")<block_end><block_end><class_stmt>BetaDecoder(dist_module.DistModule)<block_start>"""Inputs -> Beta(obs; conc1, conc0)."""<def_stmt>__init__ self positive_projection=<none> squeeze_input=<false> name=<none><block_start>self._positive_projection=positive_projection<line_sep>self._squeeze_input=squeeze_input<line_sep>super(BetaDecoder self).__init__(name=name)<block_end>@property<def_stmt>event_dtype self<block_start><return>tf.float32<block_end>@property<def_stmt>event_size self<block_start><return>tf.TensorShape([])<block_end><def_stmt>_build self inputs<block_start>conc1,conc0=tf.split(inputs 2 axis=-1)<if_stmt>self._positive_projection<is><not><none><block_start>conc1=self._positive_projection(conc1)<line_sep>conc0=self._positive_projection(conc0)<block_end><if_stmt>self._squeeze_input<block_start>conc1=tf.squeeze(conc1 axis=-1)<line_sep>conc0=tf.squeeze(conc0 axis=-1)<block_end><return>(conc1 conc0)<block_end><def_stmt>dist self params name=<none><block_start>conc1,conc0=params<line_sep><return>tf.distributions.Beta(conc1 conc0 name=name<or>self.module_name+"_dist")<block_end><block_end><class_stmt>_BinomialDist(tf.contrib.distributions.Binomial)<block_start>"""Work around missing functionality in Binomial."""<def_stmt>__init__ self total_count logits=<none> probs=<none> name=<none><block_start>self._total_count=total_count<line_sep>super(_BinomialDist self).__init__(total_count=tf.to_float(total_count) logits=logits probs=probs name=name<or>"Binomial")<block_end><def_stmt>_log_prob self counts<block_start><return>super(_BinomialDist self)._log_prob(tf.to_float(counts))<block_end><def_stmt>_sample_n self n seed=<none><block_start>all_counts=tf.to_float(tf.range(self._total_count+1))<for_stmt>batch_dim range(self.batch_shape.ndims)<block_start>all_counts=tf.expand_dims(all_counts axis=-1)<block_end>all_cdfs=tf.map_fn(self.cdf all_counts)<line_sep>shape=tf.concat([[n] self.batch_shape_tensor()] 0)<line_sep>uniform=tf.random_uniform(shape seed=seed)<line_sep><return>tf.foldl(<lambda>acc cdfs:tf.where(uniform<g>cdfs acc+1 acc) all_cdfs initializer=tf.zeros(shape dtype=tf.int32))<block_end><block_end><class_stmt>BinomialDecoder(dist_module.DistModule)<block_start>"""Inputs -> Binomial(obs; total_count, logits)."""<def_stmt>__init__ self total_count=<none> squeeze_input=<false> name=<none><block_start>self._total_count=total_count<line_sep>self._squeeze_input=squeeze_input<line_sep>super(BinomialDecoder self).__init__(name=name)<block_end>@property<def_stmt>event_dtype self<block_start><return>tf.int32<block_end>@property<def_stmt>event_size self<block_start><return>tf.TensorShape([])<block_end><def_stmt>_build self inputs<block_start><if_stmt>self._squeeze_input<block_start>inputs=tf.squeeze(inputs axis=-1)<block_end><return>inputs<block_end><def_stmt>dist self params name=<none><block_start><return>_BinomialDist(self._total_count logits=params name=name<or>self.module_name+"_dist")<block_end><block_end><class_stmt>CategoricalDecoder(dist_module.DistModule)<block_start>"""Inputs -> Categorical(obs; logits=inputs)."""<def_stmt>__init__ self dtype=tf.int32 name=<none><block_start>self._dtype=dtype<line_sep>super(CategoricalDecoder self).__init__(name=name)<block_end>@property<def_stmt>event_dtype self<block_start><return>self._dtype<block_end>@property<def_stmt>event_size self<block_start><return>tf.TensorShape([])<block_end><def_stmt>_build self inputs<block_start><return>inputs<block_end><def_stmt>dist self params name=<none><block_start><return>tf.distributions.Categorical(logits=params dtype=self._dtype name=name<or>self.module_name+"_dist")<block_end><block_end><class_stmt>NormalDecoder(dist_module.DistModule)<block_start>"""Inputs -> Normal(obs; loc=half(inputs), scale=project(half(inputs)))"""<def_stmt>__init__ self positive_projection=<none> name=<none><block_start>self._positive_projection=positive_projection<line_sep>super(NormalDecoder self).__init__(name=name)<block_end>@property<def_stmt>event_dtype self<block_start><return>tf.float32<block_end>@property<def_stmt>event_size self<block_start><return>tf.TensorShape([])<block_end><def_stmt>_build self inputs<block_start>loc,scale=tf.split(inputs 2 axis=-1)<if_stmt>self._positive_projection<is><not><none><block_start>scale=self._positive_projection(scale)<block_end><return>loc scale<block_end><def_stmt>dist self params name=<none><block_start>loc,scale=params<line_sep><return>tf.distributions.Normal(loc=loc scale=scale name=name<or>self.module_name+"_dist")<block_end><block_end><class_stmt>BatchDecoder(dist_module.DistModule)<block_start>"""Wrap a decoder to model batches of events."""<def_stmt>__init__ self decoder event_size name=<none><block_start>self._decoder=decoder<line_sep>self._event_size=tf.TensorShape(event_size)<line_sep>super(BatchDecoder self).__init__(name=name)<block_end>@property<def_stmt>event_dtype self<block_start><return>self._decoder.event_dtype<block_end>@property<def_stmt>event_size self<block_start><return>self._event_size<block_end><def_stmt>_build self inputs<block_start><return>self._decoder(inputs)<block_end><def_stmt>dist self params name=<none><block_start><return>batch_dist.BatchDistribution(self._decoder.dist(params name=name) ndims=self._event_size.ndims)<block_end><block_end><class_stmt>GroupDecoder(dist_module.DistModule)<block_start>"""Group up decoders to model a set of independent of events."""<def_stmt>__init__ self decoders name=<none><block_start>self._decoders=decoders<line_sep>super(GroupDecoder self).__init__(name=name)<block_end>@property<def_stmt>event_dtype self<block_start><return>snt.nest.map(<lambda>dec:dec.event_dtype self._decoders)<block_end>@property<def_stmt>event_size self<block_start><return>snt.nest.map(<lambda>dec:dec.event_size self._decoders)<block_end><def_stmt>_build self inputs<block_start><return>snt.nest.map_up_to(self._decoders <lambda>dec input_:dec(input_) self._decoders inputs)<block_end><def_stmt>dist self params name=<none><block_start><with_stmt>self._enter_variable_scope()<block_start><with_stmt>tf.name_scope(name<or>"group")<block_start>dists=snt.nest.map_up_to(self._decoders <lambda>dec param:dec.dist(param) self._decoders params)<block_end><return>batch_dist.GroupDistribution(dists name=name)<block_end><block_end><block_end> |
<def_stmt>main <block_start>i=0<while_stmt><true><block_start>print(i)<line_sep>i<augadd>1<if_stmt>i<g>5<block_start><break><block_end><block_end>j=10<while_stmt>j<l>100<block_start>print(j)<line_sep>j<augadd>10<block_end><while_stmt>1<block_start>print(j+i)<line_sep><break><block_end><while_stmt>0.1<block_start>print(j+i)<line_sep><break><block_end><while_stmt>0<block_start>print("This never executes")<block_end><while_stmt>0.0<block_start>print("This never executes")<block_end><while_stmt><none><block_start>print("This never executes")<block_end><while_stmt><false><block_start>print("This never executes")<block_end><while_stmt>""<block_start>print("This never executes")<block_end><while_stmt>"hi"<block_start>print("This executes")<line_sep><break><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end> |
'''
KnockoffGAN Knockoff Variable Generation
<NAME> (9/27/2018)
'''<line_sep>#%% Necessary Packages
<import_stmt>numpy<as>np<import_from_stmt>tqdm tqdm<import_stmt>tensorflow<as>tf<import_stmt>logging<import_stmt>argparse<import_stmt>pandas<as>pd<import_from_stmt>sklearn.preprocessing MinMaxScaler<line_sep>#%% KnockoffGAN Function
'''
Inputs:
x_train: Training data
lamda: Power network parameter = 0.01
mu: WGAN parameter = 1
'''<line_sep>logger=logging.getLogger()<def_stmt>KnockoffGAN x_train x_name lamda=0.01 mu=1 mb_size=128 niter=2000<block_start>tf_debug=<false><if_stmt>tf_debug<block_start>run_opts=tf.RunOptions(report_tensor_allocations_upon_oom=<true>)<line_sep>config=tf.ConfigProto()<line_sep>config.log_device_placement=<true><line_sep>config.gpu_options.allow_growth=<true><block_end><else_stmt><block_start>run_opts=<none><line_sep>config=<none><block_end>#%% Parameters
# 1. # of samples
n=len(x_train[: 0])<line_sep># 2. # of features
x_dim=len(x_train[0 :])<line_sep># 3. # of random dimensions
z_dim=int(x_dim)<line_sep># 4. # of hidden dimensions
h_dim=int(x_dim)<line_sep># 5. # of minibatch
# mb_size = 128
# 6. WGAN parameters
lam=10<line_sep>lr=1e-4<line_sep>#%% Necessary Functions
# 1. Xavier Initialization Definition
<def_stmt>xavier_init size<block_start>in_dim=size[0]<line_sep>xavier_stddev=1./tf.sqrt(in_dim/2.)<line_sep><return>tf.random_normal(shape=size stddev=xavier_stddev)<block_end># 2. Sample from normal distribution: Random variable generation
<def_stmt>sample_Z m n x_name<block_start><if_stmt>((x_name<eq>'Normal')|(x_name<eq>'AR_Normal'))<block_start><return>np.random.normal(0. np.sqrt(1./3000) size=[m n]).copy()<block_end><elif_stmt>((x_name<eq>'Uniform')|(x_name<eq>'AR_Uniform'))<block_start><return>np.random.uniform(-3<times>np.sqrt(1./3000) 3<times>np.sqrt(1./3000) [m n]).copy()<block_end><block_end># 3. Sample from the real data (Mini-batch index sampling)
<def_stmt>sample_X m n<block_start><return>np.random.permutation(m)[:n].copy()<block_end># 4. Permutation for MINE computation
<def_stmt>Permute x<block_start>n=len(x[: 0])<line_sep>idx=np.random.permutation(n)<line_sep>out=x[idx :].copy()<line_sep><return>out<block_end># 5. Bernoulli sampling for Swap and Hint variables
<def_stmt>sample_SH m n p<block_start><return>np.random.binomial(1 p [m n]).copy()<block_end>#%% Placeholder inputs
# 1. Feature
X=tf.placeholder(tf.float32 shape=[<none> x_dim])<line_sep># 2. Feature (Permute)
X_hat=tf.placeholder(tf.float32 shape=[<none> x_dim])<line_sep># 3. Random Variable
Z=tf.placeholder(tf.float32 shape=[<none> z_dim])<line_sep># 4. Swap
S=tf.placeholder(tf.float32 shape=[<none> x_dim])<line_sep># 5. Hint
H=tf.placeholder(tf.float32 shape=[<none> x_dim])<line_sep>#%% Network Building
#%% 1. Discriminator
# Input: Swap (X, tilde X) and Hint
D_W1=tf.Variable(xavier_init([x_dim+x_dim+x_dim h_dim]))<line_sep>D_b1=tf.Variable(tf.zeros(shape=[h_dim]))<line_sep>D_W2=tf.Variable(xavier_init([h_dim x_dim]))<line_sep>D_b2=tf.Variable(tf.zeros(shape=[x_dim]))<line_sep>theta_D=[D_W1 D_W2 D_b1 D_b2]<line_sep>#%% 2. WGAN Discriminator
# Input: tilde X
WD_W1=tf.Variable(xavier_init([x_dim h_dim]))<line_sep>WD_b1=tf.Variable(tf.zeros(shape=[h_dim]))<line_sep>WD_W2=tf.Variable(xavier_init([h_dim 1]))<line_sep>WD_b2=tf.Variable(tf.zeros(shape=[1]))<line_sep>theta_WD=[WD_W1 WD_W2 WD_b1 WD_b2]<line_sep>#%% 3. Generator
# Input: X and Z
G_W1=tf.Variable(xavier_init([x_dim+z_dim h_dim]))<line_sep>G_b1=tf.Variable(tf.zeros(shape=[h_dim]))<line_sep>G_W2=tf.Variable(xavier_init([h_dim x_dim]))<line_sep>G_b2=tf.Variable(tf.zeros(shape=[x_dim]))<line_sep>theta_G=[G_W1 G_W2 G_b1 G_b2]<line_sep>#%% 4. MINE
# Input: X and tilde X
# For X
M_W1A=tf.Variable(xavier_init([x_dim]))<line_sep>M_W1B=tf.Variable(xavier_init([x_dim]))<line_sep>M_b1=tf.Variable(tf.zeros(shape=[x_dim]))<line_sep># For tilde X
M_W2A=tf.Variable(xavier_init([x_dim]))<line_sep>M_W2B=tf.Variable(xavier_init([x_dim]))<line_sep>M_b2=tf.Variable(tf.zeros(shape=[x_dim]))<line_sep># Combine
M_W3=tf.Variable(xavier_init([x_dim]))<line_sep>M_b3=tf.Variable(tf.zeros(shape=[x_dim]))<line_sep>theta_M=[M_W1A M_W1B M_W2A M_W2B M_W3 M_b1 M_b2 M_b3]<line_sep>#%% Functions
# 1. Generator
<def_stmt>generator x z<block_start>inputs=tf.concat(axis=1 values=[x z])<line_sep>G_h1=tf.nn.tanh(tf.matmul(inputs G_W1)+G_b1)<line_sep>G_out=(tf.matmul(G_h1 G_W2)+G_b2)<line_sep><return>G_out<block_end># 2. Discriminator
<def_stmt>discriminator sA sB h<block_start>inputs=tf.concat(axis=1 values=[sA sB h])<line_sep>D_h1=tf.nn.tanh(tf.matmul(inputs D_W1)+D_b1)<line_sep>D_out=tf.nn.sigmoid(tf.matmul(D_h1 D_W2)+D_b2)<line_sep><return>D_out<block_end># 3. WGAN Discriminator
<def_stmt>WGAN_discriminator x<block_start>WD_h1=tf.nn.relu(tf.matmul(x WD_W1)+WD_b1)<line_sep>WD_out=(tf.matmul(WD_h1 WD_W2)+WD_b2)<line_sep><return>WD_out<block_end># 4. MINE
<def_stmt>MINE x x_hat<block_start>M_h1=tf.nn.tanh(M_W1A<times>x+M_W1B<times>x_hat+M_b1)<line_sep>M_h2=tf.nn.tanh(M_W2A<times>x+M_W2B<times>x_hat+M_b2)<line_sep>M_out=(M_W3<times>(M_h1+M_h2)+M_b3)<line_sep>Exp_M_out=tf.exp(M_out)<line_sep><return>M_out Exp_M_out<block_end>#%% Combination across the networks
# 1. Generater Knockoffs
G_sample=generator(X Z)<line_sep># 2. WGAN Outputs for real and fake
WD_real=WGAN_discriminator(X)<line_sep>WD_fake=WGAN_discriminator(G_sample)<line_sep># 3. Generate swapping (X, tilde X)
SwapA=S<times>X+(1-S)<times>G_sample<line_sep>SwapB=(1-S)<times>X+S<times>G_sample<line_sep># 4. Discriminator output
# (X, tilde X) is SwapA, SwapB. Hint is generated by H * S
D_out=discriminator(SwapA SwapB H<times>S)<line_sep># 5. MINE Computation
# Without permutation
M_out,_=MINE(X G_sample)<line_sep># Wit permutation
_,Exp_M_out=MINE(X_hat G_sample)<line_sep># 6. WGAN Loss Replacement of Clipping algorithm to Penalty term
# 1. Line 6 in Algorithm 1
eps=tf.random_uniform([mb_size 1] minval=0. maxval=1.)<line_sep>X_inter=eps<times>X+(1.-eps)<times>G_sample<line_sep># 2. Line 7 in Algorithm 1
grad=tf.gradients(WGAN_discriminator(X_inter) [X_inter])[0]<line_sep>grad_norm=tf.sqrt(tf.reduce_sum((grad)<power>2+1e-8 axis=1))<line_sep>grad_pen=lam<times>tf.reduce_mean((grad_norm-1)<power>2)<line_sep>#%% Loss function
# 1. WGAN Loss
WD_loss=tf.reduce_mean(WD_fake)-tf.reduce_mean(WD_real)+grad_pen<line_sep># 2. Discriminator loss
D_loss=-tf.reduce_mean(S<times>(1-H)<times>tf.log(D_out+1e-8)+(1-S)<times>(1-H)<times>tf.log(1-D_out+1e-8))<line_sep># 3. MINE Loss
M_loss=tf.reduce_sum(tf.reduce_mean(M_out axis=0)-tf.log(tf.reduce_mean(Exp_M_out axis=0)))<line_sep># 4. Generator loss
G_loss=-D_loss+mu<times>-tf.reduce_mean(WD_fake)+lamda<times>M_loss<line_sep># Solver
WD_solver=(tf.train.AdamOptimizer(learning_rate=lr beta1=0.5).minimize(WD_loss var_list=theta_WD))<line_sep>D_solver=(tf.train.AdamOptimizer(learning_rate=lr beta1=0.5).minimize(D_loss var_list=theta_D))<line_sep>G_solver=(tf.train.AdamOptimizer(learning_rate=lr beta1=0.5).minimize(G_loss var_list=theta_G))<line_sep>M_solver=(tf.train.AdamOptimizer(learning_rate=lr beta1=0.5).minimize(-M_loss var_list=theta_M))<line_sep>#%% Sessions
<if_stmt>tf_debug<block_start>sess=tf.Session(config=config)<line_sep>sess.run(tf.global_variables_initializer() options=run_opts)<block_end><else_stmt><block_start>sess=tf.Session()<line_sep>sess.run(tf.global_variables_initializer())<block_end>#%% Iterations
<for_stmt>it tqdm(range(niter))<block_start><for_stmt>dummy_range range(5)#%% WGAN, Discriminator and MINE Training
# Random variable generation
<block_start>Z_mb=sample_Z(mb_size z_dim x_name)<line_sep># Minibatch sampling
X_idx=sample_X(n mb_size)<line_sep>X_mb=x_train[X_idx :].copy()<line_sep>X_perm_mb=Permute(X_mb)<line_sep># Swap generation
S_mb=sample_SH(mb_size x_dim 0.5)<line_sep># Hint generation
H_mb=sample_SH(mb_size x_dim 0.9)<line_sep># 1. WGAN Training
_,WD_loss_curr=sess.run([WD_solver WD_loss] feed_dict={X:X_mb Z:Z_mb X_hat:X_perm_mb S:S_mb H:H_mb} options=run_opts)<line_sep># 2. Discriminator Training
# print('discriminator training')
_,D_loss_curr=sess.run([D_solver D_loss] feed_dict={X:X_mb Z:Z_mb X_hat:X_perm_mb S:S_mb H:H_mb} options=run_opts)<line_sep># 3. MINE Training
# print('mine training')
_,M_loss_curr=sess.run([M_solver M_loss] feed_dict={X:X_mb Z:Z_mb X_hat:X_perm_mb S:S_mb H:H_mb} options=run_opts)<block_end>#%% Generator Training
# Random variable generation
Z_mb=sample_Z(mb_size z_dim x_name)<line_sep># Minibatch sampling
X_idx=sample_X(n mb_size)<line_sep>X_mb=x_train[X_idx :].copy()<line_sep>X_perm_mb=Permute(X_mb)<line_sep># Swap generation
S_mb=sample_SH(mb_size x_dim 0.5)<line_sep># Hint generation
H_mb=sample_SH(mb_size x_dim 0.0)<line_sep># Generator training
# print('gen training')
_,G_loss_curr,G_sample_curr=sess.run([G_solver G_loss G_sample] feed_dict={X:X_mb Z:Z_mb X_hat:X_perm_mb S:S_mb H:H_mb} options=run_opts)<block_end>#%% Output
#print('last session run')
X_knockoff=sess.run([G_sample] feed_dict={X:x_train Z:sample_Z(n z_dim x_name)} options=run_opts)[0]<line_sep># X_knockoff = sess.run([G_sample], feed_dict = {X: x_train, Z: sample_Z(n, z_dim, x_name)})[0]
#print('closing session')
sess.close()<line_sep>tf.reset_default_graph()<line_sep><return>X_knockoff<block_end><def_stmt>init_arg <block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('-i')<line_sep>parser.add_argument('-o')<line_sep>parser.add_argument('--bs' default=128 type=int)<line_sep>parser.add_argument('--it' default=2000 type=int)<line_sep>parser.add_argument('--target')<line_sep>parser.add_argument('--xname' default='Normal' help='Sample distribution [Normal, Uniform]')<line_sep>parser.add_argument('--scale' default=1 type=int)<line_sep><return>parser.parse_args()<block_end><if_stmt>__name__<eq>"__main__"<block_start>args=init_arg()<line_sep>df=pd.read_csv(args.i)<line_sep>niter=args.it<line_sep>use_scale=args.scale<line_sep>x_name=args.xname<line_sep>lbl=args.target<line_sep>features=list(df.columns)<line_sep>features.remove(lbl)<line_sep># scale/normalize dataset
range_scaler=(0 1)<line_sep>scaler=MinMaxScaler(feature_range=range_scaler)<line_sep>x=df[features]<if_stmt>use_scale<block_start>scaler.fit(x)<line_sep>x=scaler.transform(x)<block_end><else_stmt><block_start>x=x.values<block_end>x_k=KnockoffGAN(x x_name mb_size=args.bs niter=niter)<line_sep>df_k=pd.DataFrame(x_k columns=features)<line_sep>df_k[lbl]=df[lbl]<line_sep>df_k.to_csv(args.o index=<false>)<block_end> |
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 19 13:39:52 2019
@author: <NAME>
"""<import_stmt>tensorflow<as>tf<import_stmt>os<import_from_stmt>model.inference_model InferenceModel<line_sep>tf.app.flags.DEFINE_string('checkpoints_path' os.path.abspath(os.path.join(os.path.dirname(__file__) '..' 'checkpoints/')) 'Path for the test data.')<line_sep>tf.app.flags.DEFINE_string('export_path_base' os.path.abspath(os.path.join(os.path.dirname(__file__) '..' 'model-export/')) 'Directory where to export the model.')<line_sep>tf.app.flags.DEFINE_integer('model_version' 1 'Version number of the model.')<line_sep>tf.app.flags.DEFINE_integer('num_v' 3952 'Number of visible neurons (Number of movies the users rated.)')<line_sep>FLAGS=tf.app.flags.FLAGS<def_stmt>run_inference <block_start>inference_graph=tf.Graph()<with_stmt>inference_graph.as_default()<block_start>model=InferenceModel(FLAGS)<line_sep>input_data=tf.placeholder(tf.float32 shape=[<none> 3952])<line_sep>ratings=model.inference(input_data)<line_sep>saver=tf.train.Saver()<block_end><with_stmt>tf.Session(graph=inference_graph)<as>sess<block_start>ckpt=tf.train.get_checkpoint_state(FLAGS.checkpoints_path)<line_sep>saver.restore(sess ckpt.model_checkpoint_path)<line_sep># Save the model
export_path=os.path.join(tf.compat.as_bytes(FLAGS.export_path_base) tf.compat.as_bytes('model_v_%s'%str(FLAGS.model_version)))<line_sep>print('Exporting trained model to %s'%export_path)<line_sep>builder=tf.saved_model.builder.SavedModelBuilder(export_path)<line_sep># create tensors info
predict_tensor_inputs_info=tf.saved_model.utils.build_tensor_info(input_data)<line_sep>predict_tensor_scores_info=tf.saved_model.utils.build_tensor_info(ratings)<line_sep># build prediction signature
prediction_signature=(tf.saved_model.signature_def_utils.build_signature_def(inputs={'inputs':predict_tensor_inputs_info} outputs={'ratings':predict_tensor_scores_info} method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME))<line_sep># save the model
builder.add_meta_graph_and_variables(sess [tf.saved_model.tag_constants.SERVING] signature_def_map={'predict_ratings':prediction_signature})<line_sep>builder.save()<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>run_inference()<block_end> |
<import_stmt>os<import_stmt>re<import_stmt>time<import_from_stmt>carbontracker.components.handler Handler<line_sep># RAPL Literature:
# https://www.researchgate.net/publication/322308215_RAPL_in_Action_Experiences_in_Using_RAPL_for_Power_Measurements
RAPL_DIR="/sys/class/powercap/"<line_sep>CPU=0<line_sep>DRAM=2<line_sep>MEASURE_DELAY=1<class_stmt>IntelCPU(Handler)<block_start><def_stmt>devices self<block_start>"""Returns the name of all RAPL Domains"""<line_sep><return>self._devices<block_end><def_stmt>available self<block_start><return>os.path.exists(RAPL_DIR)<and>bool(os.listdir(RAPL_DIR))<block_end><def_stmt>power_usage self<block_start>before_measures=self._get_measurements()<line_sep>time.sleep(MEASURE_DELAY)<line_sep>after_measures=self._get_measurements()<line_sep># Ensure all power measurements >= 0 and retry up to 3 times.
attempts=3<while_stmt>attempts<g>0<block_start>attempts<augsub>1<line_sep>power_usages=[self._compute_power(before after)<for>before,after zip(before_measures after_measures)]<if_stmt>all(power<ge>0<for>power power_usages)<block_start><return>power_usages<block_end><block_end>default=[0.0<for>device range(len(self._devices))]<line_sep><return>default<block_end><def_stmt>_compute_power self before after<block_start>"""Compute avg. power usage from two samples in microjoules."""<line_sep>joules=(after-before)/1000000<line_sep>watt=joules/MEASURE_DELAY<line_sep><return>watt<block_end><def_stmt>_read_energy self path<block_start><with_stmt>open(os.path.join(path "energy_uj") 'r')<as>f<block_start><return>int(f.read())<block_end><block_end><def_stmt>_get_measurements self<block_start>measurements=[]<for_stmt>package self._rapl_devices<block_start><try_stmt><block_start>power_usage=self._read_energy(os.path.join(RAPL_DIR package))<line_sep>measurements.append(power_usage)<block_end><except_stmt>FileNotFoundError# check cpu/gpu/dram
<block_start>parts=[f<for>f os.listdir(os.path.join(RAPL_DIR package))<if>re.match(self.parts_pattern f)]<line_sep>total_power_usage=0<for_stmt>part parts<block_start>total_power_usage<augadd>self._read_energy(os.path.join(RAPL_DIR package part))<block_end>measurements.append(total_power_usage)<block_end><block_end><return>measurements<block_end><def_stmt>_convert_rapl_name self name pattern<block_start><if_stmt>re.match(pattern name)<block_start><return>"cpu:"+name[-1]<block_end><block_end><def_stmt>init self# Get amount of intel-rapl folders
<block_start>packages=list(filter(<lambda>x:':'<in>x os.listdir(RAPL_DIR)))<line_sep>self.device_count=len(packages)<line_sep>self._devices=[]<line_sep>self._rapl_devices=[]<line_sep>self.parts_pattern=re.compile(r"intel-rapl:(\d):(\d)")<line_sep>devices_pattern=re.compile("intel-rapl:.")<for_stmt>package packages<block_start><if_stmt>re.fullmatch(devices_pattern package)<block_start><with_stmt>open(os.path.join(RAPL_DIR package "name") "r")<as>f<block_start>name=f.read().strip()<block_end><if_stmt>name<ne>"psys"<block_start>self._rapl_devices.append(package)<line_sep>self._devices.append(self._convert_rapl_name(package devices_pattern))<block_end><block_end><block_end><block_end><def_stmt>shutdown self<block_start><pass><block_end><block_end> |
# Build the speaker and phone networks.
# In this framework, they are both TDNN with different settings.
# The speaker network is a hard-coded TDNN and the phone network is specified by the parameters.
# Of course, the speaker network can be modified (e.g. to a larger network). Meanwhile, the parameters for the
# phone network should be modified as well so that the architecure is consistent with the speaker network.
# TODO: we can make the speaker network also controlled by config file which is not too difficult.
<import_stmt>tensorflow<as>tf<import_from_stmt>model.multitask_v1.pooling statistics_pooling_v2<import_from_stmt>model.common l2_scaling shape_list prelu<def_stmt>build_speaker_encoder features phone_labels feature_length params endpoints reuse_variables is_training=<false><block_start>"""Build encoder for speaker latent variable.
Use the same tdnn network with x-vector.
Args:
features: the input features.
phone_labels: the phone labels (i.e. alignment). will be used in the future.
feature_length: the length of each feature.
params: the parameters.
endpoints: will be updated during building.
reuse_variables: if true, reuse the existing variables.
is_training: used in batchnorm
:return: sampled_zs, mu_zs, logvar_zs
"""<line_sep>relu=tf.nn.relu<if_stmt>"network_relu_type"<in>params.dict<block_start><if_stmt>params.network_relu_type<eq>"prelu"<block_start>relu=prelu<block_end><if_stmt>params.network_relu_type<eq>"lrelu"<block_start>relu=tf.nn.leaky_relu<block_end><block_end><with_stmt>tf.variable_scope("encoder" reuse=reuse_variables)# Layer 1: [-2,-1,0,1,2] --> [b, 1, l-4, 512]
# conv2d + batchnorm + relu
<block_start>features=tf.layers.conv2d(features 512 (1 5) activation=<none> kernel_regularizer=tf.contrib.layers.l2_regularizer(params.weight_l2_regularizer) name='conv1')<line_sep>endpoints["conv1"]=features<line_sep>features=tf.layers.batch_normalization(features momentum=params.batchnorm_momentum training=is_training name="bn1")<line_sep>endpoints["bn1"]=features<line_sep>features=relu(features name='relu1')<line_sep>endpoints["relu1"]=features<line_sep># Layer 2: [-2, -1, 0, 1, 2] --> [b ,1, l-4, 512]
# conv2d + batchnorm + relu
# This is slightly different with Kaldi which use dilation convolution
features=tf.layers.conv2d(features 512 (1 5) activation=<none> kernel_regularizer=tf.contrib.layers.l2_regularizer(params.weight_l2_regularizer) name='conv2')<line_sep>endpoints["conv2"]=features<line_sep>features=tf.layers.batch_normalization(features momentum=params.batchnorm_momentum training=is_training name="bn2")<line_sep>endpoints["bn2"]=features<line_sep>features=relu(features name='relu2')<line_sep>endpoints["relu2"]=features<line_sep># Layer 3: [-3, -2, -1, 0, 1, 2, 3] --> [b, 1, l-6, 512]
# conv2d + batchnorm + relu
# Still, use a non-dilation one
features=tf.layers.conv2d(features 512 (1 7) activation=<none> kernel_regularizer=tf.contrib.layers.l2_regularizer(params.weight_l2_regularizer) name='conv3')<line_sep>endpoints["conv3"]=features<line_sep>features=tf.layers.batch_normalization(features momentum=params.batchnorm_momentum training=is_training name="bn3")<line_sep>endpoints["bn3"]=features<line_sep>features=relu(features name='relu3')<line_sep>endpoints["relu3"]=features<line_sep># Convert to [b, l, 512]
features=tf.squeeze(features axis=1)<line_sep># The output of the 3-rd layer can simply be rank 3.
endpoints["relu3"]=features<line_sep># Layer 4: [b, l, 512] --> [b, l, 512]
features=tf.layers.dense(features 512 activation=<none> kernel_regularizer=tf.contrib.layers.l2_regularizer(params.weight_l2_regularizer) name="dense4")<line_sep>endpoints["dense4"]=features<line_sep>features=tf.layers.batch_normalization(features momentum=params.batchnorm_momentum training=is_training name="bn4")<line_sep>endpoints["bn4"]=features<line_sep>features=relu(features name='relu4')<line_sep>endpoints["relu4"]=features<line_sep># Layer 5: [b, l, x]
<if_stmt>"num_nodes_pooling_layer"<not><in>params.dict# The default number of nodes before pooling
<block_start>params.dict["num_nodes_pooling_layer"]=1500<block_end>features=tf.layers.dense(features params.num_nodes_pooling_layer activation=<none> kernel_regularizer=tf.contrib.layers.l2_regularizer(params.weight_l2_regularizer) name="dense5")<line_sep>endpoints["dense5"]=features<line_sep>features=tf.layers.batch_normalization(features momentum=params.batchnorm_momentum training=is_training name="bn5")<line_sep>endpoints["bn5"]=features<line_sep>features=relu(features name='relu5')<line_sep>endpoints["relu5"]=features<line_sep># Here, we need to slice the feature since the original feature is expanded by the larger context between
# the speaker and phone context. I make a hypothesis that the phone context will be larger.
# So the speaker network need to slicing.
<if_stmt>(params.speaker_left_context<l>params.phone_left_context<and>params.speaker_right_context<l>params.phone_right_context)<block_start>features=features[: params.phone_left_context-params.speaker_left_context:params.speaker_right_context-params.phone_right_context :]<block_end><else_stmt><block_start><raise>NotImplementedError("The speake and phone context is not supported now.")<block_end># Make sure we've got the right feature
<with_stmt>tf.control_dependencies([tf.assert_equal(shape_list(features)[1] shape_list(phone_labels)[1])])# Pooling layer
# The length of utterances may be different.
# The original pooling use all the frames which is not appropriate for this case.
# So we create a new function (I don't want to change the original one).
<block_start><if_stmt>params.pooling_type<eq>"statistics_pooling"<block_start>features=statistics_pooling_v2(features feature_length endpoints params is_training)<block_end><else_stmt><block_start><raise>NotImplementedError("Not implement %s pooling"%params.pooling_type)<block_end>endpoints['pooling']=features<block_end># Utterance-level network
# Layer 6: [b, 512]
features=tf.layers.dense(features 512 activation=<none> kernel_regularizer=tf.contrib.layers.l2_regularizer(params.weight_l2_regularizer) name='dense6')<line_sep>endpoints['dense6']=features<line_sep>features=tf.layers.batch_normalization(features momentum=params.batchnorm_momentum training=is_training name="bn6")<line_sep>endpoints["bn6"]=features<line_sep>features=relu(features name='relu6')<line_sep>endpoints["relu6"]=features<line_sep># Layer 7: [b, x]
<if_stmt>"speaker_dim"<not><in>params.dict# The default number of nodes in the last layer
<block_start>params.dict["speaker_dim"]=512<block_end># We need mean and logvar.
mu=tf.layers.dense(features params.speaker_dim activation=<none> kernel_regularizer=tf.contrib.layers.l2_regularizer(params.weight_l2_regularizer) name="zs_dense")<line_sep>endpoints['zs_mu_dense']=mu<if_stmt>"spk_last_layer_no_bn"<not><in>params.dict<block_start>params.spk_last_layer_no_bn=<false><block_end><if_stmt><not>params.spk_last_layer_no_bn<block_start>mu=tf.layers.batch_normalization(mu momentum=params.batchnorm_momentum training=is_training name="zs_bn")<line_sep>endpoints['zs_mu_bn']=mu<block_end><if_stmt>"spk_last_layer_linear"<not><in>params.dict<block_start>params.spk_last_layer_linear=<false><block_end><if_stmt><not>params.spk_last_layer_linear<block_start>mu=relu(mu name="zs_mu_relu")<line_sep>endpoints['zs_mu_relu']=mu<block_end># We do not compute logvar in this version.
# Set logvar=0 ==> var=1
logvar=0<line_sep># epsilon = tf.random_normal(tf.shape(mu), name='zs_epsilon')
# sample = mu + tf.exp(0.5 * logvar) * epsilon
sample=mu<block_end><return>sample mu logvar<block_end><def_stmt>build_phone_encoder features speaker_labels feature_length params endpoints reuse_variables is_training=<false><block_start>"""Build encoder for phone latent variable.
Use the tdnn and share the same structure in the lower layers.
Args:
features: the input features.
speaker_labels: the speaker labels (i.e. the speaker index). may be used in the future.
feature_length: the length of each feature.
params: the parameters.
endpoints: will be updated during building.
reuse_variables: if true, reuse the existing variables
is_training: used in batchnorm.
:return: sampled_zs, mu_zs, logvar_zs
"""<line_sep>relu=tf.nn.relu<if_stmt>"network_relu_type"<in>params.dict<block_start><if_stmt>params.network_relu_type<eq>"prelu"<block_start>relu=prelu<block_end><if_stmt>params.network_relu_type<eq>"lrelu"<block_start>relu=tf.nn.leaky_relu<block_end><block_end># # This is moved to the model config file.
# # Acoustic network params:
# # Most share 4 layers with x-vector network.
# # [-2,2], [-2,2], [-3,3], [0], [-4,0,4]
# # The last fully-connected layer is appended as the phonetic embedding
# layer_size = [512, 512, 512, 512, 512]
# kernel_size = [5, 5, 7, 1, 3]
# dilation_size = [1, 1, 1, 1, 4]
num_layers=len(params.phone_kernel_size)<line_sep>layer_index=0<if_stmt>params.num_shared_layers<g>0# We may share the lower layers of the two tasks.
# Go through the shared layers between the speaker and phone networks.
<block_start><assert_stmt>params.num_shared_layers<l>num_layers<with_stmt>tf.variable_scope("encoder" reuse=<true>)<block_start><for_stmt>i range(params.num_shared_layers)<block_start><if_stmt>params.phone_kernel_size[layer_index]<g>1<block_start><if_stmt>len(shape_list(features))<eq>3# Add a dummy dim to support 2d conv
<block_start>features=tf.expand_dims(features axis=1)<block_end>features=tf.layers.conv2d(features params.phone_layer_size[layer_index] (1 params.phone_kernel_size[layer_index]) activation=<none> dilation_rate=(1 params.phone_dilation_size[layer_index]) kernel_regularizer=tf.contrib.layers.l2_regularizer(params.weight_l2_regularizer) name='conv%d'%(layer_index+1))<block_end><elif_stmt>params.phone_kernel_size[layer_index]<eq>1<block_start><if_stmt>len(shape_list(features))<eq>4# Remove a dummy dim to do dense layer
<block_start>features=tf.squeeze(features axis=1)<block_end>features=tf.layers.dense(features params.phone_layer_size[layer_index] activation=<none> kernel_regularizer=tf.contrib.layers.l2_regularizer(params.weight_l2_regularizer) name="dense%d"%(layer_index+1))<block_end>features=tf.layers.batch_normalization(features momentum=params.batchnorm_momentum training=is_training name="bn%d"%(layer_index+1))<line_sep>features=relu(features name='relu%d'%(layer_index+1))<line_sep>layer_index<augadd>1<block_end><block_end><block_end><with_stmt>tf.variable_scope("encoder_phone" reuse=reuse_variables)# In the unshared part, the endpoints should be updated.
<block_start><while_stmt>layer_index<l>num_layers<block_start><if_stmt>params.phone_kernel_size[layer_index]<g>1<block_start><if_stmt>len(shape_list(features))<eq>3<block_start>features=tf.expand_dims(features axis=1)<block_end>features=tf.layers.conv2d(features params.phone_layer_size[layer_index] (1 params.phone_kernel_size[layer_index]) activation=<none> dilation_rate=(1 params.phone_dilation_size[layer_index]) kernel_regularizer=tf.contrib.layers.l2_regularizer(params.weight_l2_regularizer) name='phn_conv%d'%(layer_index+1))<line_sep>endpoints["phn_conv%d"%(layer_index+1)]=features<block_end><elif_stmt>params.phone_kernel_size[layer_index]<eq>1<block_start><if_stmt>len(shape_list(features))<eq>4<block_start>features=tf.squeeze(features axis=1)<block_end>features=tf.layers.dense(features params.phone_layer_size[layer_index] activation=<none> kernel_regularizer=tf.contrib.layers.l2_regularizer(params.weight_l2_regularizer) name="phn_dense%d"%(layer_index+1))<line_sep>endpoints["phn_dense%d"%(layer_index+1)]=features<block_end>features=tf.layers.batch_normalization(features momentum=params.batchnorm_momentum training=is_training name="phn_bn%d"%(layer_index+1))<line_sep>endpoints["phn_bn%d"%(layer_index+1)]=features<line_sep>features=relu(features name='phn_relu%d'%(layer_index+1))<line_sep>endpoints["phn_relu%d"%(layer_index+1)]=features<line_sep>layer_index<augadd>1<block_end># The last layer
<if_stmt>len(shape_list(features))<eq>4<block_start>features=tf.squeeze(features axis=1)<block_end># Similar with the speaker network, we may need to slice the feature due to the different context between
# the speaker and phone network. At this moment, I just make a hypothesis that the phone context will be
# larger which means there is no need to slice for the phone network
<if_stmt>(params.speaker_left_context<g>params.phone_left_context<and>params.speaker_right_context<g>params.phone_right_context)<block_start><raise>NotImplementedError("The speake and phone context is not supported now.")<line_sep># features = features[:, params.speaker_left_context - params.phone_left_context:
# params.phone_right_context - params.speaker_right_context, :]
<block_end># # We do not validate the length because this will introduce the alignment -- phn_labels, which
# # is unnecessary when doing the phone inference.
# with tf.control_dependencies([tf.assert_equal(shape_list(features)[1], shape_list(self.phn_labels)[1])]):
# features = tf.identity(features)
<if_stmt>"phone_dim"<not><in>params.dict<block_start>params.dict["phone_dim"]=512<block_end>mu=tf.layers.dense(features params.phone_dim activation=<none> kernel_regularizer=tf.contrib.layers.l2_regularizer(params.weight_l2_regularizer) name="zp_dense")<line_sep>endpoints['zp_mu_dense']=mu<line_sep>mu=tf.layers.batch_normalization(mu momentum=params.batchnorm_momentum training=is_training name="zp_bn")<line_sep>endpoints['zp_mu_bn']=mu<line_sep>mu=relu(mu name='zp_mu_relu')<line_sep>endpoints['zp_mu_relu']=mu<line_sep>logvar=0<line_sep># epsilon = tf.random_normal(tf.shape(mu), name='zp_epsilon')
# sample = mu + tf.exp(0.5 * logvar) * epsilon
sample=mu<block_end><return>sample mu logvar<block_end> |
<import_stmt>torch.nn<as>nn<import_from_stmt>criterion.common.reduction.default build_loss_reduction_function<import_from_stmt>data.operator.bbox.spatial.vectorized.torch.cxcywh_to_xyxy box_cxcywh_to_xyxy<def_stmt>l1_loss_data_adaptor pred label _<block_start>predicted_bbox=pred['bbox']<if_stmt>label<is><none><block_start><return><false> predicted_bbox.sum()<times>0<block_end>(num_boxes_pos target_bounding_box_label_matrix)=label<line_sep><return><true> (box_cxcywh_to_xyxy(predicted_bbox) box_cxcywh_to_xyxy(target_bounding_box_label_matrix))<block_end><def_stmt>reduce_by_weight loss pred label context<block_start><return>((loss<times>context['sample_weight'].unsqueeze(-1).expand(-1 4)).reshape(-1)).sum()/4<block_end><def_stmt>build_L1 loss_parameters *_<block_start>l1_loss=nn.L1Loss(reduction='none')<if_stmt>'reduce'<in>loss_parameters<and>loss_parameters['reduce']<eq>'weighted'<block_start>loss_reduce_function=reduce_by_weight<block_end><else_stmt><block_start>loss_reduce_function=build_loss_reduction_function(loss_parameters)<block_end><return>l1_loss l1_loss_data_adaptor loss_reduce_function<block_end> |
# -*- coding: utf-8 -*-
<import_from_stmt>copy deepcopy<import_stmt>numpy<as>np<import_stmt>pytest<import_from_stmt>pygam *<import_from_stmt>pygam.terms Term Intercept SplineTerm LinearTerm FactorTerm TensorTerm TermList<import_from_stmt>pygam.utils flatten<line_sep>@pytest.fixture<def_stmt>chicago_gam chicago_X_y<block_start>X,y=chicago_X_y<line_sep>gam=PoissonGAM(terms=s(0 n_splines=200)+te(3 1)+s(2)).fit(X y)<line_sep><return>gam<block_end><def_stmt>test_wrong_length <block_start>"""iterable params must all match lengths
"""<with_stmt>pytest.raises(ValueError)<block_start>SplineTerm(0 lam=[0 1 2] penalties=['auto' 'auto'])<block_end><block_end><def_stmt>test_num_coefs mcycle_X_y wage_X_y<block_start>"""make sure this method gives correct values
"""<line_sep>X,y=mcycle_X_y<line_sep>term=Intercept().compile(X)<assert_stmt>term.n_coefs<eq>1<line_sep>term=LinearTerm(0).compile(X)<assert_stmt>term.n_coefs<eq>1<line_sep>term=SplineTerm(0).compile(X)<assert_stmt>term.n_coefs<eq>term.n_splines<line_sep>X,y=wage_X_y<line_sep>term=FactorTerm(2).compile(X)<assert_stmt>term.n_coefs<eq>5<line_sep>term_a=SplineTerm(0).compile(X)<line_sep>term_b=SplineTerm(1).compile(X)<line_sep>term=TensorTerm(term_a term_b).compile(X)<assert_stmt>term.n_coefs<eq>term_a.n_coefs<times>term_b.n_coefs<block_end><def_stmt>test_term_list_removes_duplicates <block_start>"""prove that we remove duplicated terms"""<line_sep>term=SplineTerm(0)<line_sep>term_list=term+term<assert_stmt>isinstance(term_list TermList)<assert_stmt>len(term_list)<eq>1<block_end><def_stmt>test_tensor_invariance_to_scaling chicago_gam chicago_X_y<block_start>"""a model with tensor terms should give results regardless of input scaling
"""<line_sep>X,y=chicago_X_y<line_sep>X[: 3]=X[: 3]<times>100<line_sep>gam=PoissonGAM(terms=s(0 n_splines=200)+te(3 1)+s(2)).fit(X y)<assert_stmt>np.allclose(gam.coef_ chicago_gam.coef_ atol=1e-6)<block_end><def_stmt>test_tensor_must_have_at_least_2_marginal_terms <block_start><with_stmt>pytest.raises(ValueError)<block_start>te(0)<block_end><block_end><def_stmt>test_tensor_term_expands_args_to_match_penalties_and_terms <block_start>tensor=te(0 1 lam=3)<assert_stmt>len(tensor.lam)<eq>2<assert_stmt>len(flatten(tensor.lam))<eq>2<line_sep>tensor=te(0 1 penalties='auto')<assert_stmt>len(tensor.lam)<eq>2<assert_stmt>len(flatten(tensor.lam))<eq>2<line_sep>tensor=te(0 1 penalties=['auto' ['auto' 'auto']])<assert_stmt>len(tensor.lam)<eq>2<assert_stmt>len(flatten(tensor.lam))<eq>3<block_end><def_stmt>test_tensor_term_skips_kwargs_when_marginal_term_is_supplied <block_start>tensor=te(0 s(1) n_splines=420)<assert_stmt>tensor._terms[0].n_coefs<eq>420<assert_stmt>tensor._terms[1].n_coefs<ne>420<block_end><def_stmt>test_tensor_term_doesnt_accept_tensor_terms <block_start><with_stmt>pytest.raises(ValueError)<block_start>te(l(0) te(0 1))<block_end><block_end><def_stmt>test_tensor_args_length_must_agree_with_number_of_terms <block_start><with_stmt>pytest.raises(ValueError)<block_start>te(0 1 lam=[3])<block_end><with_stmt>pytest.raises(ValueError)<block_start>te(0 1 lam=[3])<block_end><with_stmt>pytest.raises(ValueError)<block_start>te(0 1 lam=[3 3 3])<block_end><block_end><def_stmt>test_build_from_info <block_start>"""we can rebuild terms from info
"""<line_sep>terms=[Intercept() LinearTerm(0) SplineTerm(0) FactorTerm(0) TensorTerm(0 1)]<for_stmt>term terms<block_start><assert_stmt>Term.build_from_info(term.info)<eq>term<block_end><assert_stmt>te(0 1)<eq>TensorTerm(SplineTerm(0 n_splines=10) SplineTerm(1 n_splines=10))<block_end><def_stmt>test_by_variable <block_start>"""our fit on the toy tensor dataset with a by variable on the linear feature
should be similar to the fit with a tensor product of a spline with a linear
term
"""<line_sep><pass><block_end><def_stmt>test_by_variable_doesnt_exist_in_X mcycle_X_y<block_start>"""raises a value error if we cannot locate the by variable
"""<line_sep>term=s(0 by=1)<with_stmt>pytest.raises(ValueError)<block_start>term.compile(mcycle_X_y[0])<block_end><block_end><def_stmt>test_term_list_from_info <block_start>"""we can remake a term list from info
"""<line_sep>term_list=SplineTerm(0)+LinearTerm(1)<assert_stmt>Term.build_from_info(term_list.info)<eq>term_list<block_end><def_stmt>test_term_list_only_accepts_terms_or_term_list <block_start>TermList()<with_stmt>pytest.raises(ValueError)<block_start>TermList(<none>)<block_end><block_end><def_stmt>test_pop_term_from_term_list <block_start>term_list=SplineTerm(0)+LinearTerm(1)+Intercept()<line_sep>term_list_2=deepcopy(term_list)<line_sep># by default we pop the last
<assert_stmt>term_list_2.pop()<eq>term_list[-1]<assert_stmt>term_list_2.pop(0)<eq>term_list[0]<with_stmt>pytest.raises(ValueError)<block_start>term_list_2.pop(1)<eq>term_list[0]<block_end><block_end><def_stmt>test_no_multiply <block_start>"""trying to multiply terms raises an error
"""<with_stmt>pytest.raises(NotImplementedError)<block_start>SplineTerm(0)<times>LinearTerm(1)<block_end>term_list=SplineTerm(0)+LinearTerm(1)<with_stmt>pytest.raises(NotImplementedError)<block_start>term_list<times>term_list<block_end><block_end><def_stmt>test_by_is_similar_to_tensor_with_linear_term toy_interaction_X_y<block_start>"""for simple interactions we can acheive equivalent fits using:
- a spline with a by-variable
- a tensor between spline and a linear term
"""<line_sep>X,y=toy_interaction_X_y<line_sep>gam_a=LinearGAM(te(s(0 n_splines=20) l(1))).fit(X y)<line_sep>gam_b=LinearGAM(s(0 by=1)).fit(X y)<line_sep>r2_a=gam_a.statistics_['pseudo_r2']['explained_deviance']<line_sep>r2_b=gam_b.statistics_['pseudo_r2']['explained_deviance']<assert_stmt>np.allclose(r2_a r2_b)<block_end><def_stmt>test_correct_smoothing_in_tensors toy_interaction_X_y<block_start>"""check that smoothing penalties are correctly computed across the marginal
dimensions
feature 0 is the sinusoid, so this one needs to be wiggly
feature 1 is the linear function, so this can smoothed heavily
"""<line_sep>X,y=toy_interaction_X_y<line_sep># increase smoothing on linear function heavily, to no detriment
gam=LinearGAM(te(0 1 lam=[0.6 10000])).fit(X y)<assert_stmt>gam.statistics_['pseudo_r2']['explained_deviance']<g>0.9<line_sep># smoothing the sinusoid function heavily reduces fit quality
gam=LinearGAM(te(0 1 lam=[10000 0.6])).fit(X y)<assert_stmt>gam.statistics_['pseudo_r2']['explained_deviance']<l>0.1<block_end><def_stmt>test_dummy_encoding wage_X_y wage_gam<block_start>"""check that dummy encoding produces fewer coefficients than one-hot"""<line_sep>X,y=wage_X_y<line_sep>gam=LinearGAM(s(0)+s(1)+f(2 coding='dummy')).fit(X y)<assert_stmt>gam._modelmat(X=X term=2).shape[1]<eq>4<assert_stmt>gam.terms[2].n_coefs<eq>4<assert_stmt>wage_gam._modelmat(X=X term=2).shape[1]<eq>5<assert_stmt>wage_gam.terms[2].n_coefs<eq>5<block_end><def_stmt>test_build_cyclic_p_spline hepatitis_X_y<block_start>"""check the cyclic p spline builds
the r2 for a cyclic gam on a obviously aperiodic function should suffer
"""<line_sep>X,y=hepatitis_X_y<line_sep># unconstrained gam
gam=LinearGAM(s(0)).fit(X y)<line_sep>r_unconstrained=gam.statistics_['pseudo_r2']['explained_deviance']<line_sep># cyclic gam
gam=LinearGAM(s(0 basis='cp')).fit(X y)<line_sep>r_cyclic=gam.statistics_['pseudo_r2']['explained_deviance']<assert_stmt>r_unconstrained<g>r_cyclic<block_end><def_stmt>test_cyclic_p_spline_periodicity hepatitis_X_y<block_start>"""check the cyclic p spline behavioves periodically
namely:
- the value at the edge knots should be the same
- extrapolation should be periodic
"""<line_sep>X,y=hepatitis_X_y<line_sep>gam=LinearGAM(s(0 basis='cp')).fit(X y)<line_sep># check periodicity
left=gam.edge_knots_[0][1]<line_sep>right=gam.edge_knots_[0][1]<assert_stmt>(gam.predict(left)<eq>gam.predict(right))<line_sep># check extrapolation
further=right+(right-left)<assert_stmt>(gam.predict(further)<eq>gam.predict(right))<block_end><def_stmt>test_cyclic_p_spline_custom_period <block_start>"""show that we can set custom edge_knots, and that these affect our model's
performance
"""<line_sep># define square wave
X=np.linspace(0 1 5000)<line_sep>y=X<g>0.5<line_sep># when modeling the full period, we get close with a periodic basis
gam=LinearGAM(s(0 basis='cp' n_splines=4 spline_order=0)).fit(X y)<assert_stmt>np.allclose(gam.predict(X) y)<assert_stmt>np.allclose(gam.edge_knots_[0] [0 1])<line_sep># when modeling a non-periodic function, our periodic model fails
gam=LinearGAM(s(0 basis='cp' n_splines=4 spline_order=0 edge_knots=[0 0.5])).fit(X y)<assert_stmt>np.allclose(gam.predict(X) 0.5)<assert_stmt>np.allclose(gam.edge_knots_[0] [0 0.5])<block_end><def_stmt>test_tensor_terms_have_constraints toy_interaction_X_y<block_start>"""test that we can fit a gam with constrained tensor terms,
even if those constraints are 'none'
"""<line_sep>X,y=toy_interaction_X_y<line_sep>gam=LinearGAM(te(0 1 constraints='none')).fit(X y)<assert_stmt>gam._is_fitted<assert_stmt>gam.terms.hasconstraint<block_end><def_stmt>test_tensor_composite_constraints_equal_penalties <block_start>"""check that the composite constraint matrix for a tensor term
is equivalent to a penalty matrix under the correct conditions
"""<import_from_stmt>pygam.penalties derivative<def_stmt>der1 *args **kwargs<block_start>kwargs.update({'derivative':1})<line_sep><return>derivative(*args **kwargs)<block_end># create a 3D tensor where the penalty should be equal to the constraint
term=te(0 1 2 n_splines=[4 5 6] penalties=der1 lam=1 constraints='monotonic_inc')<line_sep># check all the dimensions
<for_stmt>i range(3)<block_start>P=term._build_marginal_penalties(i).A<line_sep>C=term._build_marginal_constraints(i -np.arange(term.n_coefs) constraint_lam=1 constraint_l2=0).A<assert_stmt>(P<eq>C).all()<block_end><block_end><def_stmt>test_tensor_with_constraints hepatitis_X_y<block_start>"""we should be able to fit a gam with not 'none' constraints on a tensor term
and observe its effect in reducing the R2 of the fit
"""<line_sep>X,y=hepatitis_X_y<line_sep>X=np.c_[X np.random.randn(len(X))]# add a random interaction data
# constrain useless dimension
gam_useless_constraint=LinearGAM(te(0 1 constraints=['none' 'monotonic_dec'] n_splines=[20 4]))<line_sep>gam_useless_constraint.fit(X y)<line_sep># constrain informative dimension
gam_constrained=LinearGAM(te(0 1 constraints=['monotonic_dec' 'none'] n_splines=[20 4]))<line_sep>gam_constrained.fit(X y)<assert_stmt>gam_useless_constraint.statistics_['pseudo_r2']['explained_deviance']<g>0.5<assert_stmt>gam_constrained.statistics_['pseudo_r2']['explained_deviance']<l>0.1<block_end><class_stmt>TestRegressions(object)<block_start><def_stmt>test_no_auto_dtype self<block_start><with_stmt>pytest.raises(ValueError)<block_start>SplineTerm(feature=0 dtype='auto')<block_end><block_end><def_stmt>test_compose_penalties self<block_start>"""penalties should be composable, and this is done by adding all
penalties on a single term, NOT multiplying them.
so a term with a derivative penalty and a None penalty should be equvalent
to a term with a derivative penalty.
"""<line_sep>base_term=SplineTerm(0)<line_sep>term=SplineTerm(feature=0 penalties=['auto' 'none'])<line_sep># penalties should be equivalent
<assert_stmt>(term.build_penalties()<eq>base_term.build_penalties()).A.all()<line_sep># multitple penalties should be additive, not multiplicative,
# so 'none' penalty should have no effect
<assert_stmt>np.abs(term.build_penalties().A).sum()<g>0<block_end><def_stmt>test_compose_constraints self hepatitis_X_y<block_start>"""we should be able to compose penalties
here we show that a gam with a monotonic increasing penalty composed with a monotonic decreasing
penalty is equivalent to a gam with only an intercept
"""<line_sep>X,y=hepatitis_X_y<line_sep>gam_compose=LinearGAM(s(0 constraints=['monotonic_inc' 'monotonic_dec'])).fit(X y)<line_sep>gam_intercept=LinearGAM(terms=<none>).fit(X y)<assert_stmt>np.allclose(gam_compose.coef_[-1] gam_intercept.coef_)<block_end><def_stmt>test_constraints_and_tensor self chicago_X_y<block_start>"""a model that has consrtraints and tensor terms should not fail to build
because of inability of tensor terms to build a 'none' constraint
"""<line_sep>X,y=chicago_X_y<line_sep>gam=PoissonGAM(s(0 constraints='monotonic_inc')+te(3 1)+s(2)).fit(X y)<assert_stmt>gam._is_fitted<block_end><block_end> |
<import_from_stmt>.custom_tools # noqa
CheckpointTool ClearTool PolyVertexDrawTool PolyVertexEditTool RestoreTool <line_sep> |
<import_stmt>os<import_stmt>functools<import_from_stmt>util read<line_sep>"""
Replace a custom heap with dlmalloc
Usage:
from util import heap
heap.declare(pt.linker)
pt.patch(addr, sym='dlmalloc')
pt.patch(addr, sym='dlcalloc')
pt.patch(addr, sym='dlfree')
pt.patch(addr, sym='dlrealloc')
"""<line_sep>__all__=["apply"]<line_sep>dlmalloc={'symbols':{'dlmalloc':'void *dlmalloc(size_t size)' 'dlfree':'void dlfree(void *addr)' 'dlcalloc':'void *dlcalloc(size_t count, size_t size)' 'dlrealloc':'void *dlrealloc(void *addr, size_t size)' } 'source':read('heap/malloc.c')}<def_stmt>declare linker<block_start><if_stmt><not>'dlmalloc'<in>linker<block_start>linker.declare(**dlmalloc)<block_end><block_end> |
<import_from_stmt>.kaggle_5th_place_model FootballKaggle5thPlaceModel<import_from_stmt>.rule_based_bot FootballRuleBaseModel<line_sep> |
<import_stmt>data_utils<import_stmt>json<line_sep># Generate training data splits.
# input source_directory - path to directory containing vegalite examples
# data_split_params - train/text/dev data split configuration
# output_directory - path to directory containing generated train/dev/test source files and vocabularies
source_directory="examples"<line_sep>data_split_params=[{"tag":"train" "percentage":[0 0.8]} {"tag":"dev" "percentage":[0.8 0.9]} {"tag":"test" "percentage":[0.9 1]}]<line_sep>output_directory="sourcedata"<line_sep>data_utils.generate_data_pairs(source_directory output_directory data_split_params)<line_sep> |
<import_stmt>sys<import_stmt>os<import_stmt>time<import_stmt>json<import_stmt>random<import_stmt>traceback<import_stmt>threading<import_from_stmt>PyPtt PTT<def_stmt>get_password password_file<block_start><try_stmt><block_start><with_stmt>open(password_file)<as>AccountFile<block_start>account=json.load(AccountFile)<line_sep>ptt_id=account['id']<line_sep>password=account['pw']<block_end><block_end><except_stmt>FileNotFoundError<block_start>print(f'Please write PTT ID and Password in {password_file}')<line_sep>print('{"id":"your ptt id", "pw":"your ptt pw"}')<line_sep>sys.exit()<block_end><return>ptt_id password<block_end><def_stmt>init <block_start>print('===正向===')<line_sep>print('===預設值===')<line_sep>PTT.API()<line_sep>print('===中文顯示===')<line_sep>PTT.API(language=PTT.i18n.language.CHINESE)<line_sep>print('===英文顯示===')<line_sep>PTT.API(language=PTT.i18n.language.ENGLISH)<line_sep>print('===log DEBUG===')<line_sep>PTT.API(log_level=PTT.log.level.DEBUG)<line_sep>print('===log INFO===')<line_sep>PTT.API(log_level=PTT.log.level.INFO)<line_sep>print('===log SLIENT===')<line_sep>PTT.API(log_level=PTT.log.level.SILENT)<line_sep>print('===log SLIENT======')<line_sep>print('===負向===')<try_stmt><block_start>print('===語言 99===')<line_sep>PTT.API(language=99)<block_end><except_stmt>ValueError<block_start>print('通過')<block_end><except_stmt><block_start>print('沒通過')<line_sep>sys.exit(-1)<block_end>print('===語言放字串===')<try_stmt><block_start>PTT.API(language='PTT.i18n.language.ENGLISH')<block_end><except_stmt>TypeError<block_start>print('通過')<block_end><except_stmt><block_start>print('沒通過')<line_sep>sys.exit(-1)<block_end><def_stmt>handler msg<block_start><with_stmt>open('log.txt' 'a' encoding='utf-8')<as>f<block_start>f.write(msg+'\n')<block_end><block_end>ptt_bot=PTT.API(log_handler=handler)<line_sep>ptt_bot.log('Test log')<block_end><def_stmt>performance_test <block_start>test_time=2000<line_sep>print(f'效能測試 get_time {test_time} 次')<line_sep>start_time=time.time()<for_stmt>_ range(test_time)<block_start>ptt_time=ptt_bot.get_time()<if_stmt>ptt_time<is><none><block_start>print('PTT_TIME is None')<line_sep><break><block_end># print(ptt_time)
<block_end>end_time=time.time()<line_sep>print('Performance Test get_time '+str(round(end_time-start_time 2))+' s')<line_sep>start_time=time.time()<for_stmt>_ range(test_time)<block_start>ptt_time=ptt_bot.fast_get_time()<if_stmt>ptt_time<is><none><block_start>print('PTT_TIME is None')<line_sep><break><block_end># print(ptt_time)
<block_end>end_time=time.time()<line_sep>print('Performance Test fast_get_time '+str(round(end_time-start_time 2))+' s')<line_sep>ptt_bot.logout()<line_sep>print('Performance Test finish')<line_sep>sys.exit()<block_end># for _ in range(1000):
# ptt_time = ptt_bot.fast_get_time()
# if len(ptt_time) != 5:
# print('error!', ptt_time)
# break
# # print(ptt_time)
<def_stmt>get_post <block_start><def_stmt>show name value<block_start><if_stmt>value<is><not><none><block_start>print(f'{name} [{value}]')<block_end><else_stmt><block_start>print(f'無{name}')<block_end><block_end><if_stmt>ptt_bot.config.host<eq>PTT.data_type.host_type.PTT1<block_start>test_post_list=[('Python' 1) ('NotExitBoard' 1) ('Python' '1TJH_XY0') # 文章格式錯誤
('Steam' 4444) ('Stock' 92324) ('Stock' '1TVnEivO') # 文章格式錯誤
('movie' 457) ('Gossiping' '1UDnXefr') ('joke' '1Tc6G9eQ') # 135193
('Test' 575) # 待證文章
('Test' '1U3pLzi0') # 古早文章
('LAW' 1) # 辦刪除文章
('Test' 347) # push number parse error
('Ptt25sign' '1VppdKLW') ]<block_end><else_stmt><block_start>test_post_list=[# PTT2
('PttSuggest' 1) ('PttSuggest' '0z7TVw00') # 文章格式錯誤
# 發信站:
('PttSuggest' '1EbQObff') # 文章起始消失跳躍,導致沒有結尾 (已經修正)
('WhoAmI' '1Tc0ooap') # Test
# 文章格式錯誤
# 瞎改
('Test' '1Sp1W7Fi') ('Test' '1TXRkuDW') ('WhoAmI' '1TqJhzQH')]<block_end>query=<false><for_stmt>(board index) test_post_list<block_start><try_stmt><block_start>print('看板' board index)<if_stmt>isinstance(index int)<block_start>post_info=ptt_bot.get_post(board post_index=index # SearchType=PTT.data_type.post_search_type.KEYWORD,
# SearchCondition='公告',
query=query)<block_end><else_stmt><block_start>post_info=ptt_bot.get_post(board post_aid=index # SearchType=PTT.data_type.post_search_type.KEYWORD,
# SearchCondition='公告',
query=query)<block_end><if_stmt>post_info<is><none><block_start>print('Empty')<line_sep><continue><block_end><if_stmt><not>post_info.pass_format_check<block_start>print('文章格式錯誤')<line_sep><continue><block_end><if_stmt>post_info.is_lock<block_start>print('鎖文狀態')<line_sep><continue><block_end><if_stmt>post_info.delete_status<ne>PTT.data_type.post_delete_status.NOT_DELETED<block_start>print('文章已經被刪除')<line_sep><continue><block_end># show('Origin Post\n', post.origin_post)
<if_stmt><not>query<block_start>print('Origin Post\n'+post_info.origin_post)<line_sep>print('='<times>30+' Origin Post Finish')<block_end>show('Board' post_info.board)<line_sep>show('AID' post_info.aid)<line_sep>show('push num' post_info.push_number)<line_sep>show('index' post_info.index)<line_sep>show('Author' post_info.author)<line_sep>show('push_number' post_info.push_number)<line_sep>show('List Date' post_info.list_date)<line_sep>show('Title' post_info.title)<line_sep>show('Money' post_info.money)<line_sep>show('URL' post_info.web_url)<if_stmt>post_info.is_unconfirmed<block_start>print('待證實文章')<block_end><if_stmt><not>query<block_start>show('Date' post_info.date)<line_sep>show('Content' post_info.content)<line_sep>show('IP' post_info.ip)<line_sep>show('Location' post_info.location)<line_sep># 在文章列表上的日期
push_count=0<line_sep>boo_count=0<line_sep>arrow_count=0<for_stmt>push_obj post_info.push_list# print(Push.getType())
# print(Push.getAuthor())
# print(Push.getContent())
# print(Push.getIP())
# print(Push.time)
<block_start><if_stmt>push_obj.type<eq>PTT.data_type.push_type.PUSH<block_start>push_count<augadd>1<line_sep>push_type='推'<block_end><if_stmt>push_obj.type<eq>PTT.data_type.push_type.BOO<block_start>boo_count<augadd>1<line_sep>push_type='噓'<block_end><if_stmt>push_obj.type<eq>PTT.data_type.push_type.ARROW<block_start>arrow_count<augadd>1<line_sep>push_type='→'<block_end>author=push_obj.author<line_sep>content=push_obj.content<line_sep># Buffer = f'[{Author}] 給了一個{Type} 說 [{Content}]'
# if Push.getIP() is not None:
# Buffer += f' 來自 [{Push.getIP()}]'
# Buffer += f' 時間是 [{Push.time}]'
<if_stmt>push_obj.ip<is><not><none><block_start>buffer=f'{push_type} {author}: {content} {push_obj.ip} {push_obj.time}'<block_end><else_stmt><block_start>buffer=f'{push_type} {author}: {content} {push_obj.time}'<block_end>print(buffer)<block_end># print(post_info.origin_post)
print(f'Total {push_count} Pushs {boo_count} Boo {arrow_count} Arrow = {push_count-boo_count}')<block_end><block_end><except_stmt>Exception<as>e<block_start>traceback.print_tb(e.__traceback__)<line_sep>print(e)<block_end><block_end><block_end><def_stmt>get_aid_from_url # test_url = [
# 'https://www.ptt.cc/bbs/NDHU-His_WV/M.1072146614.A.D59.html',
# 'https://www.ptt.cc/bbs/NDMC-M99c/M.1084922723.A.html',
# ]
#
# for url in test_url:
# board, aid = ptt_bot.get_aid_from_url(url)
# print(board, aid)
#
# return
<block_start>bug_board=['ck55th316']<def_stmt>random_board_test <block_start>board_list=ptt_bot.get_board_list()<line_sep>board_list=[x<for>x board_list<if>x<not><in>bug_board]<line_sep>test_range=5000<line_sep>test_board=random.sample(board_list test_range)<for_stmt>test_board test_board<block_start>print(test_board)<line_sep>newest_index=ptt_bot.get_newest_index(PTT.data_type.index_type.BBS board=test_board)<line_sep>print(f'newest_index {newest_index}')<if_stmt>newest_index<eq>0<block_start><continue><block_end><while_stmt><true><block_start>current_index=random.randrange(1 newest_index+1)<line_sep>print(current_index)<line_sep>post_info=ptt_bot.get_post(test_board post_index=current_index query=<true>)<if_stmt>post_info.delete_status<ne>PTT.data_type.post_delete_status.NOT_DELETED<block_start><continue><block_end><if_stmt>post_info.web_url<is><none><block_start>print(f'error url is None {test_board} {current_index}')<line_sep><break><block_end><if_stmt>post_info.aid<is><none><block_start>print(f'error aid is None {test_board} {current_index}')<line_sep><continue><block_end>convert_board,convert_aid=ptt_bot.get_aid_from_url(post_info.web_url)<if_stmt>convert_board<ne>test_board<block_start>print('board not match')<line_sep>print(f'post_info {test_board}')<line_sep>print(f'convert {convert_board}')<line_sep><raise>ValueError()<block_end><if_stmt>convert_aid<ne>post_info.aid<block_start>print('aid not match')<line_sep>print(f'post_info {post_info.aid}')<line_sep>print(f'convert {convert_aid}')<line_sep><raise>ValueError()<block_end><break><block_end><block_end>print('===================================')<block_end><def_stmt>random_post_test <block_start>test_board='Gossiping'<line_sep>newest_index=ptt_bot.get_newest_index(PTT.data_type.index_type.BBS board=test_board)<line_sep>print(f'{test_board} newest_index {newest_index}')<line_sep>test_range=5000<line_sep>start_index=random.randrange(1 newest_index+1-test_range)<line_sep>print(start_index)<for_stmt>current_index range(start_index start_index+test_range)<block_start>print(current_index)<line_sep>post_info=ptt_bot.get_post(test_board post_index=current_index query=<true>)<if_stmt>post_info.delete_status<ne>PTT.data_type.post_delete_status.NOT_DELETED<block_start><continue><block_end><if_stmt>post_info.web_url<is><none><block_start>print(f'error url is None {test_board} {current_index}')<line_sep><break><block_end><if_stmt>post_info.aid<is><none><block_start>print(f'error aid is None {test_board} {current_index}')<line_sep><continue><block_end>convert_board,convert_aid=ptt_bot.get_aid_from_url(post_info.web_url)<if_stmt>convert_board<ne>test_board<block_start>print('board not match')<line_sep>print(f'post_info {test_board}')<line_sep>print(f'convert {convert_board}')<line_sep><raise>ValueError()<block_end><if_stmt>convert_aid<ne>post_info.aid<block_start>print('aid not match')<line_sep>print(f'post_info {post_info.aid}')<line_sep>print(f'convert {convert_aid}')<line_sep><raise>ValueError()<block_end><block_end><block_end>random_post_test()<block_end>test_list={('Wanted' PTT.data_type.post_search_type.KEYWORD '[公告]') ('Wanted' PTT.data_type.post_search_type.AUTHOR 'gogin') ('Wanted' PTT.data_type.post_search_type.PUSH '10') ('Wanted' PTT.data_type.post_search_type.MARK 'm') ('Wanted' PTT.data_type.post_search_type.MONEY '5') ('Gossiping' PTT.data_type.post_search_type.KEYWORD '[公告]') ('Gossiping' PTT.data_type.post_search_type.AUTHOR 'ReDmango') ('Gossiping' PTT.data_type.post_search_type.PUSH '10') ('Gossiping' PTT.data_type.post_search_type.MARK 'm') ('Gossiping' PTT.data_type.post_search_type.MONEY '5') ('Gossiping' PTT.data_type.post_search_type.PUSH '-100') ('Gossiping' PTT.data_type.post_search_type.PUSH '150') }<def_stmt>show_condition test_board search_type condition<block_start><if_stmt>search_type<eq>PTT.data_type.post_search_type.KEYWORD<block_start>type_str='關鍵字'<block_end><if_stmt>search_type<eq>PTT.data_type.post_search_type.AUTHOR<block_start>type_str='作者'<block_end><if_stmt>search_type<eq>PTT.data_type.post_search_type.PUSH<block_start>type_str='推文數'<block_end><if_stmt>search_type<eq>PTT.data_type.post_search_type.MARK<block_start>type_str='標記'<block_end><if_stmt>search_type<eq>PTT.data_type.post_search_type.MONEY<block_start>type_str='稿酬'<block_end>print(f'{test_board} 使用 {type_str} 搜尋 {condition}')<block_end><def_stmt>get_post_with_condition # PTT1
<block_start><if_stmt>ptt_bot.config.host<eq>PTT.data_type.host_type.PTT1<block_start>test_list=[('Python' PTT.data_type.post_search_type.KEYWORD '[公告]') ('ALLPOST' PTT.data_type.post_search_type.KEYWORD '(Wanted)') ('Wanted' PTT.data_type.post_search_type.KEYWORD '(本文已被刪除)') ('ALLPOST' PTT.data_type.post_search_type.KEYWORD '(Gossiping)') ('Gossiping' PTT.data_type.post_search_type.KEYWORD '普悠瑪') ]<block_end><else_stmt><block_start>test_list=[('PttSuggest' PTT.data_type.post_search_type.KEYWORD '[問題]') ('PttSuggest' PTT.data_type.post_search_type.PUSH '10') ]<block_end>test_range=1<line_sep>query=<false><for_stmt>(board search_type condition) test_list<block_start>show_condition(board search_type condition)<line_sep>index=ptt_bot.get_newest_index(PTT.data_type.index_type.BBS board search_type=search_type search_condition=condition)<line_sep>print(f'{board} 最新文章編號 {index}')<for_stmt>i range(test_range)<block_start>post=ptt_bot.get_post(board post_index=index-i # PostIndex=611,
search_type=search_type search_condition=condition query=query)<line_sep>print('列表日期:')<line_sep>print(post.list_date)<line_sep>print('作者:')<line_sep>print(post.author)<line_sep>print('標題:')<line_sep>print(post.title)<if_stmt>post.delete_status<eq>PTT.data_type.post_delete_status.NOT_DELETED<block_start><if_stmt><not>query<block_start>print('內文:')<line_sep>print(post.content)<block_end><block_end><elif_stmt>post.delete_status<eq>PTT.data_type.post_delete_status.AUTHOR<block_start>print('文章被作者刪除')<block_end><elif_stmt>post.delete_status<eq>PTT.data_type.post_delete_status.MODERATOR<block_start>print('文章被版主刪除')<block_end>print('='<times>50)<block_end><block_end># TestList = [
# ('Python', PTT.data_type.post_search_type.KEYWORD, '[公告]')
# ]
# for (Board, SearchType, Condition) in TestList:
# index = PTTBot.getNewestIndex(
# PTT.data_type.index_type.BBS,
# Board,
# SearchType=SearchType,
# SearchCondition=Condition,
# )
# print(f'{Board} 最新文章編號 {index}')
# Post = PTTBot.getPost(
# Board,
# PostIndex=index,
# SearchType=SearchType,
# SearchCondition=Condition,
# )
# print('標題: ' + Post.getTitle())
# print('=' * 50)
search_list=[(PTT.data_type.post_search_type.KEYWORD '新聞') (PTT.data_type.post_search_type.AUTHOR 'Code') ]<line_sep>index=ptt_bot.get_newest_index(PTT.data_type.index_type.BBS 'Gossiping' search_type=PTT.data_type.post_search_type.KEYWORD search_condition='新聞' search_list=search_list)<line_sep>print(f'Gossiping 最新文章編號 {index}')<for_stmt>current_index range(1 index+1)<block_start>post_info=ptt_bot.get_post('Gossiping' post_index=current_index search_type=PTT.data_type.post_search_type.KEYWORD search_condition='新聞' search_list=search_list query=<true>)<line_sep>print(current_index post_info.title)<block_end><block_end><def_stmt>post <block_start>content='''
此為 PyPtt 貼文測試內容,如有打擾請告知。
github: https://github.com/PttCodingMan/PyPtt
開發手冊: https://github.com/PttCodingMan/PyPtt/tree/master/doc
ポ
ポポ
ポポポ
☂
☂☂
☂☂☂
'''<line_sep>content=content.replace('\n' '\r\n')<for_stmt>_ range(3)<block_start>ptt_bot.post(# 看板
'Test' # 標題
'PyPtt 程式貼文測試' # 內文
content # 標題分類
1 # 簽名檔
0)<block_end><block_end><def_stmt>get_newest_index <block_start><if_stmt>ptt_bot.config.host<eq>PTT.data_type.host_type.PTT1<block_start>test_board_list=['Wanted' 'Gossiping' 'Test' 'Stock' 'movie']<block_end><else_stmt><block_start>test_board_list=['PttSuggest' 'Test' 'WhoAmI' 'CodingMan']<block_end>test_range=100<for_stmt>board test_board_list<block_start><for_stmt>_ range(test_range)<block_start>index=ptt_bot.get_newest_index(PTT.data_type.index_type.BBS board=board)<line_sep>print(f'{board} 最新文章編號 {index}')<block_end><block_end>###############################################
index=ptt_bot.get_newest_index(PTT.data_type.index_type.MAIL)<line_sep>print(f'最新郵件編號 {index}')<line_sep>index=ptt_bot.get_newest_index(PTT.data_type.index_type.MAIL search_type=PTT.data_type.mail_search_type.KEYWORD search_condition='uPtt system')<line_sep>print(f'最新郵件編號 {index}')<line_sep>search_list=[(PTT.data_type.mail_search_type.KEYWORD 'uPtt') (PTT.data_type.mail_search_type.KEYWORD 'key')]<line_sep>index=ptt_bot.get_newest_index(PTT.data_type.index_type.MAIL search_list=search_list)<line_sep>print(f'最新郵件編號 {index}')<block_end><def_stmt>showValue Msg Value<block_start>print(f'{Msg} =>{Value}<=')<block_end><def_stmt>detectNone Name Obj Enable=<true><block_start><if_stmt>Obj<is><none><and>Enable<block_start><raise>ValueError(Name+' is None')<block_end><block_end>query=<false><def_stmt>crawlHandler Post<block_start><global>query<if_stmt>Post.delete_status<ne>PTT.data_type.post_delete_status.NOT_DELETED<block_start><if_stmt>Post.delete_status<eq>PTT.data_type.post_delete_status.MODERATOR# print(f'[版主刪除][{Post.getAuthor()}]')
<block_start><pass><block_end><elif_stmt>Post.delete_status<eq>PTT.data_type.post_delete_status.AUTHOR# print(f'[作者刪除][{Post.getAuthor()}]')
<block_start><pass><block_end><elif_stmt>Post.delete_status<eq>PTT.data_type.post_delete_status.UNKNOWN# print(f'[不明刪除]')
<block_start><pass><block_end><return><block_end># if Post.getTitle().startswith('Fw:') or Post.getTitle().startswith('轉'):
# print(f'[{Post.aid}][{Post.getAuthor()}][{Post.getTitle()}]')
# print(f'[{Post.getContent()}]')
# print(f'[{Post.getAuthor()}][{Post.getTitle()}]')
PushNumber=Post.push_number<if_stmt>PushNumber<is><not><none><block_start><if_stmt>PushNumber<eq>'爆'<block_start><pass><block_end><elif_stmt>PushNumber.startswith('X')<block_start>N=PushNumber[1:]<block_end><else_stmt><block_start><pass><line_sep># if not PushNumber.isdigit():
# print(f'[{Post.aid}][{Post.push_number}]')
# print(f'[{Post.aid}][{Post.push_number}]')
# print(f'[{Post.aid}][{Post.push_number}]')
# raise ValueError()
<block_end># print(f'[{Post.aid}][{Post.getPushNumber()}]')
<block_end>detectNone('標題' Post.title)<line_sep># detectNone('AID', Post.aid)
detectNone('Author' Post.author)<line_sep># detectNone('Money', Post.getMoney())
# detectNone('WebUrl', Post.web_url)
# detectNone('ListDate', Post.getListDate())
# if not Query:
# detectNone('Date', Post.getDate())
# detectNone('Content', Post.getContent())
# detectNone('IP', Post.getIP())
# time.sleep(0.2)
<block_end><def_stmt>crawl_board <block_start><global>query<if_stmt>ptt_bot.config.host<eq>PTT.data_type.host_type.PTT1<block_start>test_board_list=['Test' 'Wanted' 'Gossiping' 'Stock' 'movie' 'C_Chat' 'Baseball' 'NBA' 'HatePolitics' ]<block_end><else_stmt><block_start>test_board_list=['Test' 'WhoAmI' 'PttSuggest']<block_end># crawl_type = PTT.data_type.index_type.WEB
crawl_type=PTT.data_type.index_type.BBS<line_sep>index_type='Index'<line_sep>test_range=100<line_sep>test_round=2<for_stmt>_ range(test_round)<block_start><for_stmt>TestBoard test_board_list<block_start><if_stmt>crawl_type<eq>PTT.data_type.index_type.BBS<block_start><if_stmt>index_type<eq>'Index'<block_start>newest_index=ptt_bot.get_newest_index(PTT.data_type.index_type.BBS board=TestBoard)<line_sep>start_index=newest_index-test_range+1<line_sep>print(f'預備爬行 {TestBoard} 編號 {start_index} ~ {newest_index} 文章')<line_sep>print(f'TestBoard [{TestBoard}]')<line_sep>error_post_list,del_post_list=ptt_bot.crawl_board(PTT.data_type.crawl_type.BBS crawlHandler TestBoard start_index=start_index end_index=newest_index query=query)<block_end><elif_stmt>index_type<eq>'AID'<block_start>start_aid='1TnDKzxw'<line_sep>end_aid='1TnCPFGu'<line_sep>error_post_list,del_post_list=ptt_bot.crawl_board(PTT.data_type.crawl_type.BBS crawlHandler TestBoard start_aid=start_aid end_aid=end_aid)<block_end><if_stmt>len(error_post_list)<g>0<block_start>print('格式錯誤文章: \n'+'\n'.join(str(x)<for>x error_post_list))<block_end><else_stmt><block_start>print('沒有偵測到格式錯誤文章')<block_end><if_stmt>len(del_post_list)<g>0<block_start>print(f'共有 {len(del_post_list)} 篇文章被刪除')<block_end><block_end><elif_stmt>crawl_type<eq>PTT.data_type.index_type.WEB<block_start>newest_index=ptt_bot.get_newest_index(PTT.data_type.index_type.WEB board=TestBoard)<line_sep>end_page=newest_index<line_sep>start_page=end_page-test_range+1<line_sep>print(f'預備爬行 {TestBoard} 最新頁數 {newest_index}')<line_sep>print(f'預備爬行 {TestBoard} 編號 {start_page} ~ {end_page} 文章')<line_sep>error_post_list,del_post_list=ptt_bot.crawl_board(PTT.data_type.crawl_type.WEB crawlHandler TestBoard start_page=start_page end_page=end_page)<if_stmt>len(del_post_list)<g>0<block_start>print('\n'.join(del_post_list))<line_sep>print(f'共有 {len(del_post_list)} 篇文章被刪除')<block_end><block_end><block_end><block_end><block_end><def_stmt>crawl_board_with_condition # TestRange = 10
# for (Board, SearchType, Condition) in TestList:
# try:
# showCondition(Board, SearchType, Condition)
# NewestIndex = PTTBot.getNewestIndex(
# PTT.data_type.index_type.BBS,
# Board,
# SearchType=SearchType,
# SearchCondition=Condition,
# )
# print(f'{Board} 最新文章編號 {NewestIndex}')
# StartIndex = NewestIndex - TestRange + 1
# ErrorPostList, DelPostList = PTTBot.crawlBoard(
# crawlHandler,
# Board,
# StartIndex=StartIndex,
# EndIndex=NewestIndex,
# SearchType=SearchType,
# SearchCondition=Condition,
# )
# # print('標題: ' + Post.getTitle())
# print('=' * 50)
# except Exception as e:
# traceback.print_tb(e.__traceback__)
# print(e)
# if ptt_bot.config.host == PTT.data_type.host_type.PTT1:
# test_list = [
# # ptt1
# ('Stock', PTT.data_type.post_search_type.KEYWORD, '盤中閒聊'),
# ('Baseball', PTT.data_type.post_search_type.PUSH, '20')
# ]
# else:
# test_list = [
# ('WhoAmI', PTT.data_type.post_search_type.KEYWORD, '[閒聊]'),
# ('WhoAmI', PTT.data_type.post_search_type.PUSH, '10')
# ]
#
# test_range = 100
#
# for (board, search_type, search_condition) in test_list:
# show_condition(board, search_type, search_condition)
# newest_index = ptt_bot.get_newest_index(
# PTT.data_type.index_type.BBS,
# board,
# search_type=search_type,
# search_condition=search_condition)
# print(f'{board} 最新文章編號 {newest_index}')
#
# start_index = newest_index - test_range + 1
#
# error_post_list, del_post_list = ptt_bot.crawl_board(
# PTT.data_type.crawl_type.BBS,
# crawlHandler,
# board,
# start_index=start_index,
# end_index=newest_index,
# search_type=search_type,
# search_condition=search_condition,
# )
# print('=' * 50)
<block_start>search_list=[(PTT.data_type.post_search_type.KEYWORD '新聞') (PTT.data_type.post_search_type.AUTHOR 'Code') ]<line_sep>newest_index=ptt_bot.get_newest_index(PTT.data_type.index_type.BBS 'Gossiping' search_list=search_list)<line_sep>print(f'Gossiping 最新文章編號 {newest_index}')<line_sep>error_post_list,del_post_list=ptt_bot.crawl_board(PTT.data_type.crawl_type.BBS crawlHandler 'Gossiping' start_index=1 end_index=newest_index search_list=search_list)<block_end><def_stmt>get_user <block_start>test_user=[# 暱稱有特殊字元
'for40255' 'CodingMan']<line_sep>test_user=ptt_bot.search_user('c' max_page=1)<line_sep>test_user=test_user[:10]<line_sep>print(f'共有 {len(test_user)} 使用者')<for_stmt>user test_user<block_start><try_stmt><block_start>ptt_bot.log(user)<line_sep>user=ptt_bot.get_user(user)<if_stmt>user<is><none><block_start><return><block_end>ptt_bot.log('使用者ID: '+user.id)<line_sep>ptt_bot.log('使用者經濟狀況: '+str(user.money))<line_sep>ptt_bot.log('登入次數: '+str(user.login_time))<line_sep>ptt_bot.log('帳戶通過認證: '+str(user.account_verified))<line_sep>ptt_bot.log('有效文章數: '+str(user.legal_post))<line_sep>ptt_bot.log('退文文章數: '+str(user.illegal_post))<line_sep>ptt_bot.log('目前動態: '+user.status)<line_sep>ptt_bot.log('信箱狀態: '+user.mail_status)<line_sep>ptt_bot.log('最後登入時間: '+user.last_login)<line_sep>ptt_bot.log('上次故鄉: '+user.last_ip)<line_sep>ptt_bot.log('五子棋戰績: '+user.five_chess)<line_sep>ptt_bot.log('象棋戰績:'+user.chess)<line_sep>ptt_bot.log('簽名檔:'+user.signature_file)<line_sep>ptt_bot.log('=====================')<block_end><except_stmt>PTT.exceptions.NoSuchUser<block_start>print('無此使用者')<block_end><block_end><try_stmt><block_start>user=ptt_bot.get_user('sdjfklsdj')<block_end><except_stmt>PTT.exceptions.NoSuchUser<block_start>print('無此使用者')<block_end><block_end><def_stmt>push <block_start>test_post_list=[# ('Gossiping', 95692),
# ('Test', 'QQQQQQ'),
('Test' 383) # ('Wanted', '1Teyovc3')
]<line_sep># 分段推文
content='批踢踢實業坊,簡稱批踢踢、PTT,是一個臺灣電子布告欄(BBS),採用Telnet BBS技術運作,建立在台灣學術網路的資源之上,以學術性質為原始目的,提供線上言論空間。目前由國立臺灣大學電子布告欄系統研究社管理,大部份的系統原始碼由國立臺灣大學資訊工程學系的學生與校友進行維護,並且邀請法律專業人士擔任法律顧問。它有兩個分站,分別為批踢踢兔與批踢踢參。目前在批踢踢實業坊與批踢踢兔註冊總人數約150萬人,尖峰時段兩站超過15萬名使用者同時上線,擁有超過2萬個不同主題的看板,每日超過2萬篇新文章及50萬則推文被發表,是台灣使用人次最多的網路論壇之一。'<line_sep># 短推文
# content = '安安'
# 連續重複推文
# content = '''安安
# 安安
# 安安
# 安安
# 安安
# '''
testround:int=3<for_stmt>(board index) test_post_list<block_start><for_stmt>i range(testround)<block_start><if_stmt>isinstance(index int)<block_start>ptt_bot.push(board PTT.data_type.push_type.PUSH content post_index=index)<block_end><else_stmt><block_start>ptt_bot.push(board PTT.data_type.push_type.PUSH content post_aid=index)<block_end><block_end><block_end># Index = PTTBot.getNewestIndex(
# PTT.data_type.index_type.BBS,
# Board='Test'
# )
# PTTBot.push('Test', PTT.data_type.push_type.PUSH, Content, PostIndex=Index + 1)
<block_end><def_stmt>throw_waterball <block_start>ptt_id='DeepLearning'<line_sep># TestWaterBall = [str(x) + '_' * 35 + ' 水球測試結尾' for x in range(30)]
# # TestWaterBall = TestWaterBall * 3
# TestWaterBall = '\n'.join(TestWaterBall)
test_waterball='水球測試1 :D\n水球測試2 :D'<line_sep>ptt_bot.throw_waterball(ptt_id test_waterball)<line_sep># time.sleep(3)
<block_end><def_stmt>get_waterball # operate_type = PTT.data_type.waterball_operate_type.NOTHING
# OperateType = PTT.data_type.waterball_operate_type.MAIL
<block_start>operate_type=PTT.data_type.waterball_operate_type.CLEAR<while_stmt><true><block_start>newest_index=ptt_bot.get_newest_index(PTT.data_type.index_type.MAIL)<line_sep>waterball_list=ptt_bot.get_waterball(operate_type)<if_stmt>waterball_list<is><none><block_start><return><block_end># print('Result:')
<for_stmt>waterball waterball_list<block_start><if_stmt>waterball.type<eq>PTT.data_type.waterball_type.CATCH<block_start>temp='★'+waterball.target+' '<block_end><elif_stmt>waterball.type<eq>PTT.data_type.waterball_type.SEND<block_start>temp='To '+waterball.target+': '<block_end>temp<augadd>waterball.content+' ['+waterball.date+']'<line_sep>print(temp)<block_end>time.sleep(0.5)<block_end><block_end><def_stmt>call_status <block_start><def_stmt>show_call_status call_status<block_start><if_stmt>call_status<eq>PTT.data_type.call_status.ON<block_start>print('呼叫器狀態[打開]')<block_end><elif_stmt>call_status<eq>PTT.data_type.call_status.OFF<block_start>print('呼叫器狀態[關閉]')<block_end><elif_stmt>call_status<eq>PTT.data_type.call_status.UNPLUG<block_start>print('呼叫器狀態[拔掉]')<block_end><elif_stmt>call_status<eq>PTT.data_type.call_status.WATERPROOF<block_start>print('呼叫器狀態[防水]')<block_end><elif_stmt>call_status<eq>PTT.data_type.call_status.FRIEND<block_start>print('呼叫器狀態[朋友]')<block_end><else_stmt><block_start>print(f'Unknow call_status: {call_status}')<block_end><block_end><for_stmt>_ range(5)<block_start>current_call_status=ptt_bot.get_call_status()<line_sep>show_call_status(current_call_status)<block_end>print('連續測試通過')<line_sep>init_call_status=random.randint(PTT.data_type.call_status.min_value PTT.data_type.call_status.max_value)<line_sep>test_queue=[x<for>x range(PTT.data_type.call_status.min_value PTT.data_type.call_status.max_value+1)]<line_sep>random.shuffle(test_queue)<line_sep>print('初始呼叫器狀態')<line_sep>show_call_status(init_call_status)<line_sep>print('測試切換呼叫器狀態順序')<for_stmt>CurrentTeststatus test_queue<block_start>show_call_status(CurrentTeststatus)<block_end>ptt_bot.set_call_status(init_call_status)<line_sep>current_call_status=ptt_bot.get_call_status()<if_stmt>current_call_status<ne>init_call_status<block_start>print('設定初始呼叫器狀態: 不通過')<line_sep><return><block_end>print('設定初始呼叫器狀態: 通過')<for_stmt>CurrentTeststatus test_queue<block_start>print('準備設定呼叫器狀態')<line_sep>show_call_status(CurrentTeststatus)<line_sep>ptt_bot.set_call_status(CurrentTeststatus)<line_sep>current_call_status=ptt_bot.get_call_status()<line_sep>show_call_status(current_call_status)<if_stmt>current_call_status<ne>CurrentTeststatus<block_start>print('設定呼叫器狀態: 不通過')<line_sep><return><block_end>print('設定呼叫器狀態: 通過')<block_end>print('呼叫器測試全數通過')<block_end><def_stmt>give_money <block_start>ptt_bot.give_money('DeepLearning' 1)<line_sep>ptt_bot.give_money('DeepLearning' 1 title='紅包袋標題')<line_sep>ptt_bot.give_money('DeepLearning' 1 title='紅包袋標題' content='紅包袋內文')<line_sep>ptt_bot.give_money('DeepLearning' 1 content='紅包袋內文')<block_end><def_stmt>mail <block_start>content='\r\n\r\n'.join(['如有誤寄,對..對不起' 'PyPtt 程式寄信測試內容' 'github: https://tinyurl.com/umqff3v'])<try_stmt><block_start>ptt_bot.mail('sdjfkdsjfls' '程式寄信標題' content 0)<block_end><except_stmt>PTT.exceptions.NoSuchUser<block_start><pass><block_end>ptt_bot.mail(ptt_id '程式寄信標題' content 0 <false>)<line_sep>newest_index=ptt_bot.get_newest_index(PTT.data_type.index_type.MAIL)<line_sep>print(f'最新郵件編號 {newest_index}')<line_sep># ptt_bot.del_mail(newest_index)
<block_end><def_stmt>has_new_mail <block_start>result=ptt_bot.has_new_mail()<line_sep>ptt_bot.log(f'{result} 封新信')<line_sep>result=ptt_bot.has_new_mail()<line_sep>ptt_bot.log(f'{result} 封新信')<block_end>ThreadBot=<none><def_stmt>threading_test <block_start>id1,password1=get_password('<PASSWORD>')<line_sep>id2,password2=get_password('<PASSWORD>')<def_stmt>thread_func1 <block_start>thread_bot1=PTT.API()<try_stmt><block_start>thread_bot1.login(id1 password1 # kick_other_login=True
)<block_end><except_stmt>PTT.exceptions.LoginError<block_start>thread_bot1.log('登入失敗')<line_sep><return><block_end>thread_bot1.logout()<line_sep>print('1 多線程測試完成')<block_end><def_stmt>thread_func2 <block_start>thread_bot2=PTT.API()<try_stmt><block_start>thread_bot2.login(id2 password2 # kick_other_login=True
)<block_end><except_stmt>PTT.exceptions.LoginError<block_start>thread_bot2.log('登入失敗')<line_sep><return><block_end>thread_bot2.logout()<line_sep>print('2 多線程測試完成')<block_end>t1=threading.Thread(target=thread_func1)<line_sep>t2=threading.Thread(target=thread_func2)<line_sep>t1.start()<line_sep>t2.start()<line_sep>t1.join()<line_sep>t2.join()<line_sep># ThreadBot.log('Hi')
sys.exit()<block_end><def_stmt>get_board_list <block_start>board_list=ptt_bot.get_board_list()<line_sep># print(' '.join(BoardList))
print(f'總共有 {len(board_list)} 個板名')<line_sep>print(f'總共有 {len(set(board_list))} 個不重複板名')<block_end><def_stmt>reply_post <block_start>reply_post_index=383<line_sep>ptt_bot.reply_post(PTT.data_type.reply_type.BOARD 'Test' '測試回應到板上,如有打擾抱歉' post_index=reply_post_index)<line_sep>ptt_bot.reply_post(PTT.data_type.reply_type.MAIL 'Test' '測試回應到信箱,如有打擾抱歉' post_index=reply_post_index)<line_sep>ptt_bot.reply_post(PTT.data_type.reply_type.BOARD_MAIL 'Test' '測試回應到板上還有信箱,如有打擾抱歉' post_index=reply_post_index)<block_end><def_stmt>set_board_title <block_start><import_from_stmt>time strftime<line_sep>test_board='QQboard'<while_stmt><true><block_start>time_format=strftime('%H:%M:%S')<try_stmt><block_start>ptt_bot.set_board_title(test_board f'現在時間 {time_format}')<block_end><except_stmt>PTT.exceptions.ConnectionClosed<block_start><while_stmt><true><block_start><try_stmt><block_start>ptt_bot.login(ptt_id password)<line_sep><break><block_end><except_stmt>PTT.exceptions.LoginError<block_start>ptt_bot.log('登入失敗')<line_sep>time.sleep(1)<block_end><except_stmt>PTT.exceptions.ConnectError<block_start>ptt_bot.log('登入失敗')<line_sep>time.sleep(1)<block_end><block_end><block_end>print('已經更新時間 '+time_format end='\r')<try_stmt><block_start>time.sleep(1)<block_end><except_stmt>KeyboardInterrupt<block_start>print('已經更新時間 '+time_format)<line_sep>ptt_bot.set_board_title(test_board f'[{test_board}]')<line_sep>print('板標已經恢復')<line_sep><break><block_end><block_end><block_end><def_stmt>mark_post <block_start>board='CodingMan'<line_sep>mark_type=PTT.data_type.mark_type.S<line_sep>ptt_bot.mark_post(mark_type board post_index=850)<line_sep>ptt_bot.mark_post(mark_type board post_index=851)<line_sep># if mark_type == PTT.data_type.mark_type.D:
# ptt_bot.mark_post(
# PTT.data_type.mark_type.DeleteD,
# 'CodingMan'
# )
# ptt_bot.mark_post(
# mark_type,
# 'QQBoard',
# post_index=2000
# )
# PTTBot.mark_post(
# mark_type,
# 'CodingMan',
# post_index=2000
# )
<block_end><def_stmt>get_favourite_board <block_start>favourite_board_list=ptt_bot.get_favourite_board()<for_stmt>board favourite_board_list<block_start>buff=f'[{board.board}][{board.type}][{board.title}]'<line_sep>print(buff)<block_end><block_end><def_stmt>get_board_info # 《Gossiping》看板設定
# b - 中文敘述: 綜合 ◎【八卦】沒有開放政問 珍惜帳號
# 板主名單: arsonlolita/xianyao/Bignana/XXXXGAY
# h - 公開狀態(是否隱形): 公開
# g - 隱板時 可以 進入十大排行榜
# e - 開放 非看板會員發文
# y - 開放 回應文章
# d - 開放 自刪文章 發文與推文限制:
# r - 開放 推薦文章 登入次數 700 次以上
# s - 開放 噓文 退文篇數 0 篇以下
# f - 限制 快速連推文章, 最低間隔時間: 5 秒
# i - 推文時 自動 記錄來源 IP 名單編輯與其它: (需板主權限)
# a - 推文時 不用對齊 開頭 w)設定水桶 v)可見會員名單
# k - 板主 可 刪除部份違規文字 m)舉辦投票 o)投票名單
# x - 轉錄文章 會 自動記錄,且 需要 發文權限 c)文章類別 n)發文注意事項
# j - 未 設為冷靜模式 p)進板畫面
# 8 - 禁止 未滿十八歲進入
# board_list = ptt_bot.get_board_list()
# for board in board_list:
# board_info = ptt_bot.get_board_info(board)
#
# if not board_info.is_push_record_ip:
# continue
# if board_info.is_push_aligned:
# continue
#
# print(f'{board} !!!!!!!!!!')
# # break
# return
<block_start><if_stmt>ptt_bot.config.host<eq>PTT.data_type.host_type.PTT1<block_start>test_board_list=['Python' 'L_LifePlan' 'NDHU-sl103']<block_end><else_stmt><block_start>test_board_list=['WhoAmI']<block_end>get_post_kind=<true><for_stmt>board test_board_list<block_start>board_info=ptt_bot.get_board_info(board get_post_kind=get_post_kind)<line_sep>print('==============')<line_sep>print('板名: ' board_info.board)<line_sep>print('線上人數: ' board_info.online_user)<line_sep>print('中文敘述: ' board_info.chinese_des)<line_sep>print('板主: ' board_info.moderators)<line_sep>print('公開狀態(是否隱形): ' board_info.is_open)<line_sep>print('隱板時是否可進入十大排行榜: ' board_info.is_into_top_ten_when_hide)<line_sep>print('是否開放非看板會員發文: ' board_info.can_non_board_members_post)<line_sep>print('是否開放回應文章: ' board_info.can_reply_post)<line_sep>print('是否開放自刪文章: ' board_info.can_self_del_post)<line_sep>print('是否開放推薦文章: ' board_info.can_push_post)<line_sep>print('是否開放噓文: ' board_info.can_boo_post)<line_sep>print('是否可以快速連推文章: ' board_info.can_fast_push)<line_sep>print('推文最低間隔時間: ' board_info.min_interval)<line_sep>print('推文時是否記錄來源 IP: ' board_info.is_push_record_ip)<line_sep>print('推文時是否對齊開頭: ' board_info.is_push_aligned)<line_sep>print('板主是否可刪除部份違規文字: ' board_info.can_moderator_del_illegal_content)<line_sep>print('轉錄文章是否自動記錄,且是否需要發文權限: ' board_info.is_tran_post_auto_recorded_and_require_post_permissions)<line_sep>print('是否為冷靜模式: ' board_info.is_cool_mode)<line_sep>print('是否需要滿十八歲才可進入: ' board_info.is_require18)<line_sep>print('發文與推文限制登入次數需多少次以上: ' board_info.require_login_time)<line_sep>print('發文與推文限制退文篇數多少篇以下: ' board_info.require_illegal_post)<if_stmt>get_post_kind<block_start>print('發文種類:' ' '.join(board_info.post_kind))<block_end><block_end><block_end><def_stmt>get_bottom_post_list <block_start>test_board_list=['Wanted' 'Python' 'Gossiping']<line_sep>print('='<times>50)<for_stmt>board test_board_list<block_start>bottom_post_list=ptt_bot.get_bottom_post_list(board)<if_stmt>len(bottom_post_list)<eq>0<block_start>print(f'{board} 板無置頂文章')<block_end><else_stmt><block_start>print(f'{board} 共有 {len(bottom_post_list)} 置頂文章')<for_stmt>post bottom_post_list<block_start>print(post.title)<block_end><block_end>print('='<times>50)<block_end><block_end><def_stmt>del_post <block_start>content='''
此為 PyPtt 貼文測試內容,如有打擾請告知。
github: https://github.com/PttCodingMan/PyPtt
'''<line_sep>content=content.replace('\n' '\r\n')<for_stmt>_ range(3)<block_start>ptt_bot.post(# 看板
'Test' # 標題
'PyPtt 程式貼文測試' # 內文
content # 標題分類
1 # 簽名檔
0)<block_end>index=ptt_bot.get_newest_index(PTT.data_type.index_type.BBS 'Test')<for_stmt>i range(5)<block_start>current_index=index-int(i)<try_stmt><block_start>ptt_bot.del_post('Test' post_index=current_index)<line_sep>ptt_bot.log(f'Test {current_index} 刪除成功')<block_end><except_stmt>PTT.exceptions.NoPermission<block_start>ptt_bot.log(f'Test {current_index} 無刪除權限')<block_end><except_stmt>PTT.exceptions.DeletedPost<block_start>ptt_bot.log(f'Test {current_index} 已經被刪除')<block_end><except_stmt>PTT.exceptions.NoSuchPost<block_start>ptt_bot.log(f'Test {current_index} 無此文章')<block_end><block_end><block_end><def_stmt>bucket <block_start>ptt_bot.bucket('QQBoard' 7 'Bucket Reason' 'CodingMan')<block_end><def_stmt>search_user <block_start>user_list=ptt_bot.search_user('abcd' min_page=1 max_page=2)<line_sep>print(user_list)<line_sep>print(len(user_list))<line_sep># if 'abcd0800' in userlist:
# print('exist')
# else:
# print('Not exist')
<block_end><def_stmt>get_mail <block_start>mail_index=ptt_bot.get_newest_index(PTT.data_type.index_type.MAIL)<line_sep>ptt_bot.log('最新信件編號' mail_index)<for_stmt>i reversed(range(1 mail_index+1))<block_start>ptt_bot.log('檢查信件編號' i)<line_sep>mail_info=ptt_bot.get_mail(i)<line_sep>print(mail_info.title)<block_end><for_stmt>_ range(3)<block_start>newest_index=ptt_bot.get_newest_index(PTT.data_type.index_type.MAIL)<line_sep>print(f'最新信箱編號 {newest_index}')<line_sep>mail_info=ptt_bot.get_mail(newest_index)<if_stmt>mail_info<is><not><none><block_start>print(mail_info.author)<block_end><block_end>mail_index=ptt_bot.get_newest_index(PTT.data_type.index_type.MAIL search_type=PTT.data_type.mail_search_type.KEYWORD search_condition='uPtt system')<line_sep>ptt_bot.log('最新信件編號' mail_index)<for_stmt>i reversed(range(1 mail_index+1))<block_start>ptt_bot.log('檢查信件編號' i)<line_sep>mail_info=ptt_bot.get_mail(i search_type=PTT.data_type.mail_search_type.KEYWORD search_condition='uPtt system')<line_sep>print(mail_info.title)<block_end>search_list=[(PTT.data_type.mail_search_type.KEYWORD 'uPtt') (PTT.data_type.mail_search_type.KEYWORD 'key')]<line_sep>mail_index=ptt_bot.get_newest_index(PTT.data_type.index_type.MAIL search_list=search_list)<for_stmt>i reversed(range(1 mail_index+1))<block_start>ptt_bot.log('檢查信件編號' i)<line_sep>mail_info=ptt_bot.get_mail(i search_list=search_list)<line_sep>print(mail_info.title)<block_end><block_end><def_stmt>mail_recviver <block_start><while_stmt><true># ptt_bot.config.log_level = PTT.log.level.TRACE
<block_start>newest_index=ptt_bot.get_newest_index(PTT.data_type.index_type.MAIL)<line_sep># ptt_bot.config.log_level = PTT.log.level.INFO
ptt_bot.log(f'最新信箱編號 {newest_index}')<line_sep>#
# user = ptt_bot.get_user(ptt_id)
# ptt_bot.log(f'信箱狀態: {user.mail_status}')
<for_stmt>index range(1 newest_index+1)<block_start>mail_info=ptt_bot.get_mail(newest_index)<line_sep>print(mail_info.author)<line_sep>print(mail_info.content)<line_sep>ptt_bot.del_mail(index)<block_end>print('完成休息')<line_sep>time.sleep(3)<block_end><block_end><def_stmt>change_pw <block_start>ptt_bot.change_pw(password)<block_end><if_stmt>__name__<eq>'__main__'<block_start>print('Welcome to PyPtt v '+PTT.version.V+' test case')<try_stmt># init()
# threading_test()
<block_start>ptt_bot=PTT.API(# log_level=PTT.log.level.TRACE,
# log_level=PTT.log.level.DEBUG,
# host=PTT.data_type.host_type.PTT2
# for 本機測試
# connect_mode=PTT.connect_core.connect_mode.TELNET,
# host=PTT.data_type.host_type.LOCALHOST,
# port=8888,
# for 自定義 url 測試
# connect_mode=PTT.connect_core.connect_mode.TELNET,
# host='localhost',
# port=8888,
# language=PTT.i18n.language.ENGLISH
)<if_stmt>ptt_bot.config.host<eq>PTT.data_type.host_type.PTT1<block_start>ptt_id,password=get_password('account_pt<PASSWORD>')<block_end><else_stmt><block_start>ptt_id,password=get_password('account_ptt<PASSWORD>')<block_end><try_stmt><block_start>ptt_bot.login(ptt_id password # kick_other_login=True
)<block_end><except_stmt>PTT.exceptions.LoginError<block_start>ptt_bot.log('登入失敗')<line_sep>sys.exit()<block_end><except_stmt>PTT.exceptions.WrongIDorPassword<block_start>ptt_bot.log('帳號密碼錯誤')<line_sep>sys.exit()<block_end><except_stmt>PTT.exceptions.LoginTooOften<block_start>ptt_bot.log('請稍等一下再登入')<line_sep>sys.exit()<block_end><if_stmt>ptt_bot.unregistered_user<block_start>print('未註冊使用者')<if_stmt>ptt_bot.process_picks<ne>0<block_start>print(f'註冊單處理順位 {ptt_bot.process_picks}')<block_end><block_end><if_stmt>ptt_bot.registered_user<block_start>print('已註冊使用者')<block_end>###################################
###################################
# performance_test()
# get_post()
# get_post_with_condition()
# post()
# get_newest_index()
# crawl_board()
# crawl_board_with_condition()
# push()
# get_user()
# throw_waterball()
# get_waterball()
# call_status()
# give_money()
# mail()
# has_new_mail()
# get_board_list()
# get_board_info()
# reply_post()
# get_favourite_board()
# search_user()
# get_mail()
# mail_recviver()
# change_pw()
# get_aid_from_url()
# get_bottom_post_list()
# del_post()
# bucket()
# set_board_title()
# mark_post()
<block_end><except_stmt>Exception<as>e<block_start>print(type(e))<line_sep>traceback.print_tb(e.__traceback__)<line_sep>print(e)<block_end><except_stmt>KeyboardInterrupt<block_start><pass><block_end>ptt_bot.logout()<block_end> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.