content
stringlengths 0
1.55M
|
---|
<import_stmt>unittest<import_from_stmt>programy.utils.logging.ylogger YLoggerSnapshot<class_stmt>YLoggerSnapshotTests(unittest.TestCase)<block_start><def_stmt>test_snapshot_with_defaults self<block_start>snapshot=YLoggerSnapshot()<line_sep>self.assertIsNotNone(snapshot)<line_sep>self.assertEquals("Critical(0) Fatal(0) Error(0) Exception(0) Warning(0) Info(0), Debug(0)" str(snapshot))<line_sep>self.assertEqual({'criticals':0 'debugs':0 'errors':0 'exceptions':0 'fatals':0 'infos':0 'warnings':0} snapshot.to_json())<block_end><def_stmt>test_snapshot_without_defaults self<block_start>snapshot=YLoggerSnapshot(criticals=1 fatals=2 errors=3 exceptions=4 warnings=5 infos=6 debugs=7)<line_sep>self.assertIsNotNone(snapshot)<line_sep>self.assertEquals("Critical(1) Fatal(2) Error(3) Exception(4) Warning(5) Info(6), Debug(7)" str(snapshot))<line_sep>self.assertEqual({'criticals':1 'debugs':7 'errors':3 'exceptions':4 'fatals':2 'infos':6 'warnings':5} snapshot.to_json())<block_end><block_end> |
__version__="1.13.3"<line_sep> |
"""The oem component."""<line_sep> |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
<import_stmt>os<import_stmt>copy<import_stmt>math<import_from_stmt>pathlib Path<import_stmt>warnings<import_from_stmt>typing Callable Tuple Union List<import_stmt>decord<import_from_stmt>einops.layers.torch Rearrange<import_stmt>matplotlib.pyplot<as>plt<import_stmt>numpy<as>np<import_from_stmt>numpy.random randint<import_stmt>torch<import_from_stmt>torch.utils.data Dataset Subset DataLoader<import_from_stmt>torchvision.transforms Compose<import_from_stmt>.references transforms_video<as>transforms<import_from_stmt>.references.functional_video denormalize<import_from_stmt>..common.misc Config<import_from_stmt>..common.gpu num_devices db_num_workers<line_sep>Trans=Callable[[object dict] Tuple[object dict]]<line_sep>DEFAULT_MEAN=(0.43216 0.394666 0.37645)<line_sep>DEFAULT_STD=(0.22803 0.22145 0.216989)<class_stmt>VideoRecord(object)<block_start>"""
This class is used for parsing split-files where each row contains a path
and a label:
Ex:
```
path/to/my/clip_1 3
path/to/another/clip_2 32
```
"""<def_stmt>__init__ self data:List[str]<block_start>""" Initialized a VideoRecord
Ex.
data = ["path/to/video.mp4", 2, "cooking"]
Args:
row: a list where first element is the path and second element is
the label, and the third element (optional) is the label name
"""<assert_stmt>len(data)<ge>2<and>len(data)<le>3<assert_stmt>isinstance(data[0] str)<assert_stmt>isinstance(int(data[1]) int)<if_stmt>len(data)<eq>3<block_start><assert_stmt>isinstance(data[2] str)<block_end>self._data=data<line_sep>self._num_frames=<none><block_end>@property<def_stmt>path self<arrow>str<block_start><return>self._data[0]<block_end>@property<def_stmt>num_frames self<arrow>int<block_start><if_stmt>self._num_frames<is><none><block_start>self._num_frames=int(len([x<for>x Path(self._data[0]).glob("img_*")])-1)<block_end><return>self._num_frames<block_end>@property<def_stmt>label self<arrow>int<block_start><return>int(self._data[1])<block_end>@property<def_stmt>label_name self<arrow>str<block_start><return><none><if>len(self._data)<le>2<else>self._data[2]<block_end><block_end><def_stmt>get_transforms train:bool=<true> tfms_config:Config=<none><arrow>Trans<block_start>""" Get default transformations to apply depending on whether we're applying it to the training or the validation set. If no tfms configurations are passed in, use the defaults.
Args:
train: whether or not this is for training
tfms_config: Config object with tranforms-related configs
Returns:
A list of transforms to apply
"""<if_stmt>tfms_config<is><none><block_start>tfms_config=get_default_tfms_config(train=train)<block_end># 1. resize
tfms=[transforms.ToTensorVideo() transforms.ResizeVideo(tfms_config.im_scale tfms_config.resize_keep_ratio) ]<line_sep># 2. crop
<if_stmt>tfms_config.random_crop<block_start><if_stmt>tfms_config.random_crop_scales<block_start>crop=transforms.RandomResizedCropVideo(tfms_config.input_size tfms_config.random_crop_scales)<block_end><else_stmt><block_start>crop=transforms.RandomCropVideo(tfms_config.input_size)<block_end><block_end><else_stmt><block_start>crop=transforms.CenterCropVideo(tfms_config.input_size)<block_end>tfms.append(crop)<line_sep># 3. flip
tfms.append(transforms.RandomHorizontalFlipVideo(tfms_config.flip_ratio))<line_sep># 4. normalize
tfms.append(transforms.NormalizeVideo(tfms_config.mean tfms_config.std))<line_sep><return>Compose(tfms)<block_end><def_stmt>get_default_tfms_config train:bool<arrow>Config<block_start>"""
Args:
train: whether or not this is for training
Settings:
input_size (int or tuple): Model input image size.
im_scale (int or tuple): Resize target size.
resize_keep_ratio (bool): If True, keep the original ratio when resizing.
mean (tuple): Normalization mean.
if train:
std (tuple): Normalization std.
flip_ratio (float): Horizontal flip ratio.
random_crop (bool): If False, do center-crop.
random_crop_scales (tuple): Range of size of the origin size random cropped.
"""<line_sep>flip_ratio=0.5<if>train<else>0.0<line_sep>random_crop=<true><if>train<else><false><line_sep>random_crop_scales=(0.6 1.0)<if>train<else><none><line_sep><return>Config(dict(input_size=112 im_scale=128 resize_keep_ratio=<true> mean=DEFAULT_MEAN std=DEFAULT_STD flip_ratio=flip_ratio random_crop=random_crop random_crop_scales=random_crop_scales ))<block_end><class_stmt>VideoDataset<block_start>""" A video recognition dataset. """<def_stmt>__init__ self root:str seed:int=<none> train_pct:float=0.75 num_samples:int=1 sample_length:int=8 sample_step:int=1 temporal_jitter:bool=<true> temporal_jitter_step:int=2 random_shift:bool=<true> batch_size:int=8 video_ext:str="mp4" warning:bool=<false> train_split_file:str=<none> test_split_file:str=<none> train_transforms:Trans=get_transforms(train=<true>) test_transforms:Trans=get_transforms(train=<false>) <arrow><none><block_start>""" initialize dataset
Arg:
root: Videos directory.
seed: random seed
train_pct: percentage of dataset to use for training
num_samples: Number of clips to sample from each video.
sample_length: Number of consecutive frames to sample from a video (i.e. clip length).
sample_step: Sampling step.
temporal_jitter: Randomly skip frames when sampling each frames.
temporal_jitter_step: temporal jitter in frames
random_shift: Random temporal shift when sample a clip.
video_ext: Video file extension.
warning: On or off warning.
train_split_file: Annotation file containing video filenames and labels.
test_split_file: Annotation file containing video filenames and labels.
train_transforms: transforms for training
test_transforms: transforms for testing
"""<assert_stmt>sample_step<g>0<assert_stmt>num_samples<g>0<if_stmt>temporal_jitter<block_start><assert_stmt>temporal_jitter_step<g>0<block_end><if_stmt>train_split_file<block_start><assert_stmt>Path(train_split_file).exists()<assert_stmt>(test_split_file<is><not><none><and>Path(test_split_file).exists())<block_end><if_stmt>test_split_file<block_start><assert_stmt>Path(test_split_file).exists()<assert_stmt>(train_split_file<is><not><none><and>Path(train_split_file).exists())<block_end>self.root=root<line_sep>self.seed=seed<line_sep>self.num_samples=num_samples<line_sep>self.sample_length=sample_length<line_sep>self.sample_step=sample_step<line_sep>self.presample_length=sample_length<times>sample_step<line_sep>self.temporal_jitter_step=temporal_jitter_step<line_sep>self.train_transforms=train_transforms<line_sep>self.test_transforms=test_transforms<line_sep>self.random_shift=random_shift<line_sep>self.temporal_jitter=temporal_jitter<line_sep>self.batch_size=batch_size<line_sep>self.video_ext=video_ext<line_sep>self.warning=warning<line_sep># create training and validation datasets
self.train_ds,self.test_ds=(self.split_with_file(train_split_file=train_split_file test_split_file=test_split_file )<if>train_split_file<else>self.split_by_folder(train_pct=train_pct))<line_sep># initialize dataloaders
self.init_data_loaders()<block_end><def_stmt>split_by_folder self train_pct:float=0.8<arrow>Tuple[Dataset Dataset]<block_start>""" Split this dataset into a training and testing set based on the
folders that the videos are in.
```
/data
+-- action_class_1
| +-- video_01.mp4
| +-- video_02.mp4
| +-- ...
+-- action_class_2
| +-- video_11.mp4
| +-- video_12.mp4
| +-- ...
+-- ...
```
Args:
train_pct: the ratio of images to use for training vs
testing
Return
A training and testing dataset in that order
"""<line_sep>self.video_records=[]<line_sep># get all dirs in root (and make sure they are dirs)
dirs=[]<for_stmt>entry os.listdir(self.root)<block_start><if_stmt>os.path.isdir(os.path.join(self.root entry))<block_start>dirs.append(os.path.join(self.root entry))<block_end><block_end># add each video in each dir as a video record
label=0<line_sep>self.classes=[]<for_stmt>action dirs<block_start>action=os.path.basename(os.path.normpath(action))<line_sep>self.video_records.extend([VideoRecord([os.path.join(self.root action vid.split(".")[0]) label action ])<for>vid os.listdir(os.path.join(self.root action))])<line_sep>label<augadd>1<line_sep>self.classes.append(action)<block_end># random split
test_num=math.floor(len(self)<times>(1-train_pct))<if_stmt>self.seed<block_start>torch.manual_seed(self.seed)<block_end># set indices
indices=torch.randperm(len(self)).tolist()<line_sep>train_range=indices[test_num:]<line_sep>test_range=indices[:test_num]<line_sep><return>self.split_train_test(train_range test_range)<block_end><def_stmt>split_with_file self train_split_file:Union[Path str] test_split_file:Union[Path str] <arrow>Tuple[Dataset Dataset]<block_start>""" Split this dataset into a training and testing set using a split file.
Each line in the split file must use the form:
```
path/to/jumping/video_name_1 3
path/to/swimming/video_name_2 5
path/to/another/jumping/video_name_3 3
```
Args:
split_files: a tuple of 2 files
Return:
A training and testing dataset in that order
"""<line_sep>self.video_records=[]<line_sep># add train records
self.video_records.extend([VideoRecord(row.strip().split(" "))<for>row open(train_split_file)])<line_sep>train_len=len(self.video_records)<line_sep># add validation records
self.video_records.extend([VideoRecord(row.strip().split(" "))<for>row open(test_split_file)])<line_sep># create indices
indices=torch.arange(0 len(self.video_records))<line_sep>train_range=indices[:train_len]<line_sep>test_range=indices[train_len:]<line_sep><return>self.split_train_test(train_range test_range)<block_end><def_stmt>split_train_test self train_range:torch.Tensor test_range:torch.Tensor <arrow>Tuple[Dataset Dataset]<block_start>""" Split this dataset into a training and testing set
Args:
train_range: range of indices for training set
test_range: range of indices for testing set
Return
A training and testing dataset in that order
"""<line_sep># create train subset
train=copy.deepcopy(Subset(self train_range))<line_sep>train.dataset.transforms=self.train_transforms<line_sep>train.dataset.sample_step=(self.temporal_jitter_step<if>self.temporal_jitter<else>self.sample_step)<line_sep>train.dataset.presample_length=self.sample_length<times>self.sample_step<line_sep># create test subset
test=copy.deepcopy(Subset(self test_range))<line_sep>test.dataset.transforms=self.test_transforms<line_sep>test.dataset.random_shift=<false><line_sep>test.dataset.temporal_jitter=<false><line_sep><return>train test<block_end><def_stmt>init_data_loaders self<arrow><none><block_start>""" Create training and validation data loaders. """<line_sep>devices=num_devices()<line_sep>self.train_dl=DataLoader(self.train_ds batch_size=self.batch_size<times>devices shuffle=<true> num_workers=db_num_workers() pin_memory=<true> )<line_sep>self.test_dl=DataLoader(self.test_ds batch_size=self.batch_size<times>devices shuffle=<false> num_workers=db_num_workers() pin_memory=<true> )<block_end><def_stmt>__len__ self<arrow>int<block_start><return>len(self.video_records)<block_end><def_stmt>_sample_indices self record:VideoRecord<arrow>List[int]<block_start>"""
Create a list of frame-wise offsets into a video record. Depending on
whether or not 'random shift' is used, perform a uniform sample or a
random sample.
Args:
record (VideoRecord): A video record.
Return:
list: Segment offsets (start indices)
"""<if_stmt>record.num_frames<g>self.presample_length<block_start><if_stmt>self.random_shift# Random sample
<block_start>offsets=np.sort(randint(record.num_frames-self.presample_length+1 size=self.num_samples ))<block_end><else_stmt># Uniform sample
<block_start>distance=(record.num_frames-self.presample_length+1)/self.num_samples<line_sep>offsets=np.array([int(distance/2.0+distance<times>x)<for>x range(self.num_samples)])<block_end><block_end><else_stmt><block_start><if_stmt>self.warning<block_start>warnings.warn(f"num_samples and/or sample_length > num_frames in {record.path}")<block_end>offsets=np.zeros((self.num_samples ) dtype=int)<block_end><return>offsets<block_end><def_stmt>_get_frames self video_reader:decord.VideoReader offset:int <arrow>List[np.ndarray]<block_start>""" Get frames at sample length.
Args:
video_reader: the decord tool for parsing videos
offset: where to start the reader from
Returns
Frames at sample length in a List
"""<line_sep>clip=list()<line_sep># decord.seek() seems to have a bug. use seek_accurate().
video_reader.seek_accurate(offset)<line_sep># first frame
clip.append(video_reader.next().asnumpy())<line_sep># remaining frames
<try_stmt><block_start><for_stmt>i range(self.sample_length-1)<block_start>step=(randint(self.sample_step+1)<if>self.temporal_jitter<else>self.sample_step)<if_stmt>step<eq>0<and>self.temporal_jitter<block_start>clip.append(clip[-1].copy())<block_end><else_stmt><block_start><if_stmt>step<g>1<block_start>video_reader.skip_frames(step-1)<block_end>cur_frame=video_reader.next().asnumpy()<line_sep>clip.append(cur_frame)<block_end><block_end><block_end><except_stmt>StopIteration# pass when video has ended
<block_start><pass><block_end># if clip needs more frames, simply duplicate the last frame in the clip.
<while_stmt>len(clip)<l>self.sample_length<block_start>clip.append(clip[-1].copy())<block_end><return>clip<block_end><def_stmt>__getitem__ self idx:int<arrow>Tuple[torch.tensor int]<block_start>"""
Return:
(clips (torch.tensor), label (int))
"""<line_sep>record=self.video_records[idx]<line_sep>video_reader=decord.VideoReader("{}.{}".format(os.path.join(self.root record.path) self.video_ext) # TODO try to add `ctx=decord.ndarray.gpu(0) or .cuda(0)`
)<line_sep>record._num_frames=len(video_reader)<line_sep>offsets=self._sample_indices(record)<line_sep>clips=np.array([self._get_frames(video_reader o)<for>o offsets])<if_stmt>self.num_samples<eq>1<block_start><return>(# [T, H, W, C] -> [C, T, H, W]
self.transforms(torch.from_numpy(clips[0])) record.label )<block_end><else_stmt><block_start><return>(# [S, T, H, W, C] -> [S, C, T, H, W]
torch.stack([self.transforms(torch.from_numpy(c))<for>c clips]) record.label )<block_end><block_end><def_stmt>_show_batch self images:List[torch.tensor] labels:List[int] sample_length:int mean:Tuple[int int int]=DEFAULT_MEAN std:Tuple[int int int]=DEFAULT_STD <arrow><none><block_start>"""
Display a batch of images.
Args:
images: List of sample (clip) tensors
labels: List of labels
sample_length: Number of frames to show for each sample
mean: Normalization mean
std: Normalization std-dev
"""<line_sep>batch_size=len(images)<line_sep>plt.tight_layout()<line_sep>fig,axs=plt.subplots(batch_size sample_length figsize=(4<times>sample_length 3<times>batch_size) )<for_stmt>i,ax enumerate(axs)<block_start><if_stmt>batch_size<eq>1<block_start>clip=images[0]<block_end><else_stmt><block_start>clip=images[i]<block_end>clip=Rearrange("c t h w -> t c h w")(clip)<if_stmt><not>isinstance(ax np.ndarray)<block_start>ax=[ax]<block_end><for_stmt>j,a enumerate(ax)<block_start>a.axis("off")<line_sep>a.imshow(np.moveaxis(denormalize(clip[j] mean std).numpy() 0 -1))<line_sep># display label/label_name on the first image
<if_stmt>j<eq>0<block_start>a.text(x=3 y=15 s=f"{labels[i]}" fontsize=20 bbox=dict(facecolor="white" alpha=0.80) )<block_end><block_end><block_end><block_end><def_stmt>show_batch self train_or_test:str="train" rows:int=2<arrow><none><block_start>"""Plot first few samples in the datasets"""<if_stmt>train_or_test<eq>"train"<block_start>batch=[self.train_ds[i]<for>i range(rows)]<block_end><elif_stmt>train_or_test<eq>"test"<block_start>batch=[self.test_ds[i]<for>i range(rows)]<block_end><else_stmt><block_start><raise>ValueError("Unknown data type {}".format(which_data))<block_end>images=[im[0]<for>im batch]<line_sep>labels=[im[1]<for>im batch]<line_sep>self._show_batch(images labels self.sample_length)<block_end><block_end> |
<import_from_stmt>hwt.doc_markers internal<import_from_stmt>hwt.hdl.types.hdlType HdlType<class_stmt>HString(HdlType)<block_start><def_stmt>all_mask self<block_start><return>1<block_end>@internal@classmethod<def_stmt>getValueCls cls<block_start><try_stmt><block_start><return>cls._valCls<block_end><except_stmt>AttributeError<block_start><import_from_stmt>hwt.hdl.types.stringVal HStringVal<line_sep>cls._valCls=HStringVal<line_sep><return>cls._valCls<block_end><block_end><block_end> |
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_from_stmt>st2tests.base BaseActionAliasTestCase<class_stmt>PackGet(BaseActionAliasTestCase)<block_start>action_alias_name="pack_get"<def_stmt>test_alias_pack_get self<block_start>format_string=self.action_alias_db.formats[0]["representation"][0]<line_sep>format_strings=self.action_alias_db.get_format_strings()<line_sep>command="pack get st2"<line_sep>expected_parameters={"pack":"st2"}<line_sep>self.assertExtractedParametersMatch(format_string=format_string command=command parameters=expected_parameters)<line_sep>self.assertCommandMatchesExactlyOneFormatString(format_strings=format_strings command=command)<block_end><block_end><class_stmt>PackInstall(BaseActionAliasTestCase)<block_start>action_alias_name="pack_install"<def_stmt>test_alias_pack_install self<block_start>format_string=self.action_alias_db.formats[0]["representation"][0]<line_sep>command="pack install st2"<line_sep>expected_parameters={"packs":"st2"}<line_sep>self.assertExtractedParametersMatch(format_string=format_string command=command parameters=expected_parameters)<block_end><block_end><class_stmt>PackSearch(BaseActionAliasTestCase)<block_start>action_alias_name="pack_search"<def_stmt>test_alias_pack_search self<block_start>format_string=self.action_alias_db.formats[0]["representation"][0]<line_sep>format_strings=self.action_alias_db.get_format_strings()<line_sep>command="pack search st2"<line_sep>expected_parameters={"query":"st2"}<line_sep>self.assertExtractedParametersMatch(format_string=format_string command=command parameters=expected_parameters)<line_sep>self.assertCommandMatchesExactlyOneFormatString(format_strings=format_strings command=command)<block_end><block_end><class_stmt>PackShow(BaseActionAliasTestCase)<block_start>action_alias_name="pack_show"<def_stmt>test_alias_pack_show self<block_start>format_string=self.action_alias_db.formats[0]["representation"][0]<line_sep>format_strings=self.action_alias_db.get_format_strings()<line_sep>command="pack show st2"<line_sep>expected_parameters={"pack":"st2"}<line_sep>self.assertExtractedParametersMatch(format_string=format_string command=command parameters=expected_parameters)<line_sep>self.assertCommandMatchesExactlyOneFormatString(format_strings=format_strings command=command)<block_end><block_end> |
<import_from_stmt>django.apps AppConfig<class_stmt>SqlordersConfig(AppConfig)<block_start>name='sqlorders'<line_sep>verbose_name='SQL工单配置'<block_end> |
# coding=utf-8
# *** WARNING: this file was generated by test. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
<import_from_stmt>enum Enum<line_sep>__all__=['SupportedFilterTypes' ]<class_stmt>SupportedFilterTypes(str Enum)<block_start>"""
Type of product filter.
"""<line_sep>SHIP_TO_COUNTRIES="ShipToCountries"<line_sep>"""
Ship to country
"""<line_sep>DOUBLE_ENCRYPTION_STATUS="DoubleEncryptionStatus"<line_sep>"""
Double encryption status
"""<block_end> |
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>## baseline configuration in the class itself
<import_from_stmt>EventFilter.CSCRawToDigi.cscPackerDef_cfi cscPackerDef<line_sep>cscpacker=cscPackerDef.clone()<line_sep>## In Run-2 common: update the format version for new OTMBs in ME1/1
## Note: in the past, the packing with triggers and pretriggers was disabled
## for Run-2, Run-3 and Phase-2 scenarios. This should no longer be the case
## as of CMSSW_12_0_0_pre5
<import_from_stmt>Configuration.Eras.Modifier_run2_common_cff run2_common<line_sep>run2_common.toModify(cscpacker formatVersion=2013)<line_sep>## in Run-3 scenarios with GEM: pack GEM clusters
<import_from_stmt>Configuration.Eras.Modifier_run3_GEM_cff run3_GEM<line_sep>run3_GEM.toModify(cscpacker useGEMs=<true>)<line_sep> |
"""
"""<line_sep># Created on 2016.08.31
#
# Author: <NAME>
#
# Copyright 2013 - 2020 <NAME>
#
# This file is part of ldap3.
#
# ldap3 is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ldap3 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with ldap3 in the COPYING and COPYING.LESSER files.
# If not, see <http://www.gnu.org/licenses/>.
<import_from_stmt>sys stdin getdefaultencoding<import_from_stmt>.. ALL_ATTRIBUTES ALL_OPERATIONAL_ATTRIBUTES NO_ATTRIBUTES SEQUENCE_TYPES<import_from_stmt>..core.exceptions LDAPConfigurationParameterError<line_sep># checks
_CLASSES_EXCLUDED_FROM_CHECK=['subschema']<line_sep>_ATTRIBUTES_EXCLUDED_FROM_CHECK=[ALL_ATTRIBUTES ALL_OPERATIONAL_ATTRIBUTES NO_ATTRIBUTES 'ldapSyntaxes' 'matchingRules' 'matchingRuleUse' 'dITContentRules' 'dITStructureRules' 'nameForms' 'altServer' 'namingContexts' 'supportedControl' 'supportedExtension' 'supportedFeatures' 'supportedCapabilities' 'supportedLdapVersion' 'supportedSASLMechanisms' 'vendorName' 'vendorVersion' 'subschemaSubentry' 'ACL']<line_sep>_UTF8_ENCODED_SYNTAXES=['1.2.840.113556.1.4.904' # DN String [MICROSOFT]
'1.2.840.113556.1.4.1362' # String (Case) [MICROSOFT]
'1.3.6.1.4.1.1466.172.16.31.10' # DN String [RFC4517]
'1.3.6.1.4.1.1466.192.168.3.11' # Directory String [RFC4517]
'1.3.6.1.4.1.1466.192.168.3.11' # Postal Address) [RFC4517]
'1.3.6.1.4.1.1466.172.16.31.10' # Substring Assertion [RFC4517]
'2.16.840.1.113719.1.1.5.1.6' # Case Ignore List [NOVELL]
'2.16.840.1.113719.1.1.5.1.14' # Tagged String [NOVELL]
'2.16.840.1.113719.1.1.5.1.15' # Tagged Name and String [NOVELL]
'2.16.840.1.113719.1.1.5.1.23' # Tagged Name [NOVELL]
'2.16.840.1.113719.1.1.5.1.25']<line_sep># Typed Name [NOVELL]
_UTF8_ENCODED_TYPES=[]<line_sep>_ATTRIBUTES_EXCLUDED_FROM_OBJECT_DEF=['msds-memberOfTransitive' 'msds-memberTransitive' 'entryDN']<line_sep>_IGNORED_MANDATORY_ATTRIBUTES_IN_OBJECT_DEF=['instanceType' 'nTSecurityDescriptor' 'objectCategory']<line_sep>_CASE_INSENSITIVE_ATTRIBUTE_NAMES=<true><line_sep>_CASE_INSENSITIVE_SCHEMA_NAMES=<true><line_sep># abstraction layer
_ABSTRACTION_OPERATIONAL_ATTRIBUTE_PREFIX='OA_'<line_sep># communication
_POOLING_LOOP_TIMEOUT=10# number of seconds to wait before restarting a cycle to find an active server in the pool
_RESPONSE_SLEEPTIME=0.05# seconds to wait while waiting for a response in asynchronous strategies
_RESPONSE_WAITING_TIMEOUT=3# waiting timeout for receiving a response in asynchronous strategies
_SOCKET_SIZE=4096# socket byte size
_CHECK_AVAILABILITY_TIMEOUT=2.5# default timeout for socket connect when checking availability
_RESET_AVAILABILITY_TIMEOUT=5# default timeout for resetting the availability status when checking candidate addresses
_RESTARTABLE_SLEEPTIME=2# time to wait in a restartable strategy before retrying the request
_RESTARTABLE_TRIES=30# number of times to retry in a restartable strategy before giving up. Set to True for unlimited retries
_REUSABLE_THREADED_POOL_SIZE=5<line_sep>_REUSABLE_THREADED_LIFETIME=3600# 1 hour
_DEFAULT_THREADED_POOL_NAME='REUSABLE_DEFAULT_POOL'<line_sep>_ADDRESS_INFO_REFRESH_TIME=300# seconds to wait before refreshing address info from dns
_ADDITIONAL_SERVER_ENCODINGS=['latin-1' 'koi8-r']# some broken LDAP implementation may have different encoding than those expected by RFCs
_ADDITIONAL_CLIENT_ENCODINGS=['utf-8']<line_sep>_IGNORE_MALFORMED_SCHEMA=<false># some flaky LDAP servers returns malformed schema. If True no expection is raised and schema is thrown away
_DEFAULT_SERVER_ENCODING='utf-8'# should always be utf-8
<if_stmt>stdin<and>hasattr(stdin 'encoding')<and>stdin.encoding<block_start>_DEFAULT_CLIENT_ENCODING=stdin.encoding<block_end><elif_stmt>getdefaultencoding()<block_start>_DEFAULT_CLIENT_ENCODING=getdefaultencoding()<block_end><else_stmt><block_start>_DEFAULT_CLIENT_ENCODING='utf-8'<block_end>PARAMETERS=['CASE_INSENSITIVE_ATTRIBUTE_NAMES' 'CASE_INSENSITIVE_SCHEMA_NAMES' 'ABSTRACTION_OPERATIONAL_ATTRIBUTE_PREFIX' 'POOLING_LOOP_TIMEOUT' 'RESPONSE_SLEEPTIME' 'RESPONSE_WAITING_TIMEOUT' 'SOCKET_SIZE' 'CHECK_AVAILABILITY_TIMEOUT' 'RESTARTABLE_SLEEPTIME' 'RESTARTABLE_TRIES' 'REUSABLE_THREADED_POOL_SIZE' 'REUSABLE_THREADED_LIFETIME' 'DEFAULT_THREADED_POOL_NAME' 'ADDRESS_INFO_REFRESH_TIME' 'RESET_AVAILABILITY_TIMEOUT' 'DEFAULT_CLIENT_ENCODING' 'DEFAULT_SERVER_ENCODING' 'CLASSES_EXCLUDED_FROM_CHECK' 'ATTRIBUTES_EXCLUDED_FROM_CHECK' 'UTF8_ENCODED_SYNTAXES' 'UTF8_ENCODED_TYPES' 'ADDITIONAL_SERVER_ENCODINGS' 'ADDITIONAL_CLIENT_ENCODINGS' 'IGNORE_MALFORMED_SCHEMA' 'ATTRIBUTES_EXCLUDED_FROM_OBJECT_DEF' 'IGNORED_MANDATORY_ATTRIBUTES_IN_OBJECT_DEF']<def_stmt>get_config_parameter parameter<block_start><if_stmt>parameter<eq>'CASE_INSENSITIVE_ATTRIBUTE_NAMES'# Boolean
<block_start><return>_CASE_INSENSITIVE_ATTRIBUTE_NAMES<block_end><elif_stmt>parameter<eq>'CASE_INSENSITIVE_SCHEMA_NAMES'# Boolean
<block_start><return>_CASE_INSENSITIVE_SCHEMA_NAMES<block_end><elif_stmt>parameter<eq>'ABSTRACTION_OPERATIONAL_ATTRIBUTE_PREFIX'# String
<block_start><return>_ABSTRACTION_OPERATIONAL_ATTRIBUTE_PREFIX<block_end><elif_stmt>parameter<eq>'POOLING_LOOP_TIMEOUT'# Integer
<block_start><return>_POOLING_LOOP_TIMEOUT<block_end><elif_stmt>parameter<eq>'RESPONSE_SLEEPTIME'# Integer
<block_start><return>_RESPONSE_SLEEPTIME<block_end><elif_stmt>parameter<eq>'RESPONSE_WAITING_TIMEOUT'# Integer
<block_start><return>_RESPONSE_WAITING_TIMEOUT<block_end><elif_stmt>parameter<eq>'SOCKET_SIZE'# Integer
<block_start><return>_SOCKET_SIZE<block_end><elif_stmt>parameter<eq>'CHECK_AVAILABILITY_TIMEOUT'# Integer
<block_start><return>_CHECK_AVAILABILITY_TIMEOUT<block_end><elif_stmt>parameter<eq>'RESTARTABLE_SLEEPTIME'# Integer
<block_start><return>_RESTARTABLE_SLEEPTIME<block_end><elif_stmt>parameter<eq>'RESTARTABLE_TRIES'# Integer
<block_start><return>_RESTARTABLE_TRIES<block_end><elif_stmt>parameter<eq>'REUSABLE_THREADED_POOL_SIZE'# Integer
<block_start><return>_REUSABLE_THREADED_POOL_SIZE<block_end><elif_stmt>parameter<eq>'REUSABLE_THREADED_LIFETIME'# Integer
<block_start><return>_REUSABLE_THREADED_LIFETIME<block_end><elif_stmt>parameter<eq>'DEFAULT_THREADED_POOL_NAME'# String
<block_start><return>_DEFAULT_THREADED_POOL_NAME<block_end><elif_stmt>parameter<eq>'ADDRESS_INFO_REFRESH_TIME'# Integer
<block_start><return>_ADDRESS_INFO_REFRESH_TIME<block_end><elif_stmt>parameter<eq>'RESET_AVAILABILITY_TIMEOUT'# Integer
<block_start><return>_RESET_AVAILABILITY_TIMEOUT<block_end><elif_stmt>parameter<in>['DEFAULT_CLIENT_ENCODING' 'DEFAULT_ENCODING']# String - DEFAULT_ENCODING for backward compatibility
<block_start><return>_DEFAULT_CLIENT_ENCODING<block_end><elif_stmt>parameter<eq>'DEFAULT_SERVER_ENCODING'# String
<block_start><return>_DEFAULT_SERVER_ENCODING<block_end><elif_stmt>parameter<eq>'CLASSES_EXCLUDED_FROM_CHECK'# Sequence
<block_start><if_stmt>isinstance(_CLASSES_EXCLUDED_FROM_CHECK SEQUENCE_TYPES)<block_start><return>_CLASSES_EXCLUDED_FROM_CHECK<block_end><else_stmt><block_start><return>[_CLASSES_EXCLUDED_FROM_CHECK]<block_end><block_end><elif_stmt>parameter<eq>'ATTRIBUTES_EXCLUDED_FROM_CHECK'# Sequence
<block_start><if_stmt>isinstance(_ATTRIBUTES_EXCLUDED_FROM_CHECK SEQUENCE_TYPES)<block_start><return>_ATTRIBUTES_EXCLUDED_FROM_CHECK<block_end><else_stmt><block_start><return>[_ATTRIBUTES_EXCLUDED_FROM_CHECK]<block_end><block_end><elif_stmt>parameter<eq>'UTF8_ENCODED_SYNTAXES'# Sequence
<block_start><if_stmt>isinstance(_UTF8_ENCODED_SYNTAXES SEQUENCE_TYPES)<block_start><return>_UTF8_ENCODED_SYNTAXES<block_end><else_stmt><block_start><return>[_UTF8_ENCODED_SYNTAXES]<block_end><block_end><elif_stmt>parameter<eq>'UTF8_ENCODED_TYPES'# Sequence
<block_start><if_stmt>isinstance(_UTF8_ENCODED_TYPES SEQUENCE_TYPES)<block_start><return>_UTF8_ENCODED_TYPES<block_end><else_stmt><block_start><return>[_UTF8_ENCODED_TYPES]<block_end><block_end><elif_stmt>parameter<in>['ADDITIONAL_SERVER_ENCODINGS' 'ADDITIONAL_ENCODINGS']# Sequence - ADDITIONAL_ENCODINGS for backward compatibility
<block_start><if_stmt>isinstance(_ADDITIONAL_SERVER_ENCODINGS SEQUENCE_TYPES)<block_start><return>_ADDITIONAL_SERVER_ENCODINGS<block_end><else_stmt><block_start><return>[_ADDITIONAL_SERVER_ENCODINGS]<block_end><block_end><elif_stmt>parameter<in>['ADDITIONAL_CLIENT_ENCODINGS']# Sequence
<block_start><if_stmt>isinstance(_ADDITIONAL_CLIENT_ENCODINGS SEQUENCE_TYPES)<block_start><return>_ADDITIONAL_CLIENT_ENCODINGS<block_end><else_stmt><block_start><return>[_ADDITIONAL_CLIENT_ENCODINGS]<block_end><block_end><elif_stmt>parameter<eq>'IGNORE_MALFORMED_SCHEMA'# Boolean
<block_start><return>_IGNORE_MALFORMED_SCHEMA<block_end><elif_stmt>parameter<eq>'ATTRIBUTES_EXCLUDED_FROM_OBJECT_DEF'# Sequence
<block_start><if_stmt>isinstance(_ATTRIBUTES_EXCLUDED_FROM_OBJECT_DEF SEQUENCE_TYPES)<block_start><return>_ATTRIBUTES_EXCLUDED_FROM_OBJECT_DEF<block_end><else_stmt><block_start><return>[_ATTRIBUTES_EXCLUDED_FROM_OBJECT_DEF]<block_end><block_end><elif_stmt>parameter<eq>'IGNORED_MANDATORY_ATTRIBUTES_IN_OBJECT_DEF'# Sequence
<block_start><if_stmt>isinstance(_IGNORED_MANDATORY_ATTRIBUTES_IN_OBJECT_DEF SEQUENCE_TYPES)<block_start><return>_IGNORED_MANDATORY_ATTRIBUTES_IN_OBJECT_DEF<block_end><else_stmt><block_start><return>[_IGNORED_MANDATORY_ATTRIBUTES_IN_OBJECT_DEF]<block_end><block_end><raise>LDAPConfigurationParameterError('configuration parameter %s not valid'%parameter)<block_end><def_stmt>set_config_parameter parameter value<block_start><if_stmt>parameter<eq>'CASE_INSENSITIVE_ATTRIBUTE_NAMES'<block_start><global>_CASE_INSENSITIVE_ATTRIBUTE_NAMES<line_sep>_CASE_INSENSITIVE_ATTRIBUTE_NAMES=value<block_end><elif_stmt>parameter<eq>'CASE_INSENSITIVE_SCHEMA_NAMES'<block_start><global>_CASE_INSENSITIVE_SCHEMA_NAMES<line_sep>_CASE_INSENSITIVE_SCHEMA_NAMES=value<block_end><elif_stmt>parameter<eq>'ABSTRACTION_OPERATIONAL_ATTRIBUTE_PREFIX'<block_start><global>_ABSTRACTION_OPERATIONAL_ATTRIBUTE_PREFIX<line_sep>_ABSTRACTION_OPERATIONAL_ATTRIBUTE_PREFIX=value<block_end><elif_stmt>parameter<eq>'POOLING_LOOP_TIMEOUT'<block_start><global>_POOLING_LOOP_TIMEOUT<line_sep>_POOLING_LOOP_TIMEOUT=value<block_end><elif_stmt>parameter<eq>'RESPONSE_SLEEPTIME'<block_start><global>_RESPONSE_SLEEPTIME<line_sep>_RESPONSE_SLEEPTIME=value<block_end><elif_stmt>parameter<eq>'RESPONSE_WAITING_TIMEOUT'<block_start><global>_RESPONSE_WAITING_TIMEOUT<line_sep>_RESPONSE_WAITING_TIMEOUT=value<block_end><elif_stmt>parameter<eq>'SOCKET_SIZE'<block_start><global>_SOCKET_SIZE<line_sep>_SOCKET_SIZE=value<block_end><elif_stmt>parameter<eq>'CHECK_AVAILABILITY_TIMEOUT'<block_start><global>_CHECK_AVAILABILITY_TIMEOUT<line_sep>_CHECK_AVAILABILITY_TIMEOUT=value<block_end><elif_stmt>parameter<eq>'RESTARTABLE_SLEEPTIME'<block_start><global>_RESTARTABLE_SLEEPTIME<line_sep>_RESTARTABLE_SLEEPTIME=value<block_end><elif_stmt>parameter<eq>'RESTARTABLE_TRIES'<block_start><global>_RESTARTABLE_TRIES<line_sep>_RESTARTABLE_TRIES=value<block_end><elif_stmt>parameter<eq>'REUSABLE_THREADED_POOL_SIZE'<block_start><global>_REUSABLE_THREADED_POOL_SIZE<line_sep>_REUSABLE_THREADED_POOL_SIZE=value<block_end><elif_stmt>parameter<eq>'REUSABLE_THREADED_LIFETIME'<block_start><global>_REUSABLE_THREADED_LIFETIME<line_sep>_REUSABLE_THREADED_LIFETIME=value<block_end><elif_stmt>parameter<eq>'DEFAULT_THREADED_POOL_NAME'<block_start><global>_DEFAULT_THREADED_POOL_NAME<line_sep>_DEFAULT_THREADED_POOL_NAME=value<block_end><elif_stmt>parameter<eq>'ADDRESS_INFO_REFRESH_TIME'<block_start><global>_ADDRESS_INFO_REFRESH_TIME<line_sep>_ADDRESS_INFO_REFRESH_TIME=value<block_end><elif_stmt>parameter<eq>'RESET_AVAILABILITY_TIMEOUT'<block_start><global>_RESET_AVAILABILITY_TIMEOUT<line_sep>_RESET_AVAILABILITY_TIMEOUT=value<block_end><elif_stmt>parameter<in>['DEFAULT_CLIENT_ENCODING' 'DEFAULT_ENCODING']<block_start><global>_DEFAULT_CLIENT_ENCODING<line_sep>_DEFAULT_CLIENT_ENCODING=value<block_end><elif_stmt>parameter<eq>'DEFAULT_SERVER_ENCODING'<block_start><global>_DEFAULT_SERVER_ENCODING<line_sep>_DEFAULT_SERVER_ENCODING=value<block_end><elif_stmt>parameter<eq>'CLASSES_EXCLUDED_FROM_CHECK'<block_start><global>_CLASSES_EXCLUDED_FROM_CHECK<line_sep>_CLASSES_EXCLUDED_FROM_CHECK=value<block_end><elif_stmt>parameter<eq>'ATTRIBUTES_EXCLUDED_FROM_CHECK'<block_start><global>_ATTRIBUTES_EXCLUDED_FROM_CHECK<line_sep>_ATTRIBUTES_EXCLUDED_FROM_CHECK=value<block_end><elif_stmt>parameter<eq>'UTF8_ENCODED_SYNTAXES'<block_start><global>_UTF8_ENCODED_SYNTAXES<line_sep>_UTF8_ENCODED_SYNTAXES=value<block_end><elif_stmt>parameter<eq>'UTF8_ENCODED_TYPES'<block_start><global>_UTF8_ENCODED_TYPES<line_sep>_UTF8_ENCODED_TYPES=value<block_end><elif_stmt>parameter<in>['ADDITIONAL_SERVER_ENCODINGS' 'ADDITIONAL_ENCODINGS']<block_start><global>_ADDITIONAL_SERVER_ENCODINGS<line_sep>_ADDITIONAL_SERVER_ENCODINGS=value<if>isinstance(value SEQUENCE_TYPES)<else>[value]<block_end><elif_stmt>parameter<in>['ADDITIONAL_CLIENT_ENCODINGS']<block_start><global>_ADDITIONAL_CLIENT_ENCODINGS<line_sep>_ADDITIONAL_CLIENT_ENCODINGS=value<if>isinstance(value SEQUENCE_TYPES)<else>[value]<block_end><elif_stmt>parameter<eq>'IGNORE_MALFORMED_SCHEMA'<block_start><global>_IGNORE_MALFORMED_SCHEMA<line_sep>_IGNORE_MALFORMED_SCHEMA=value<block_end><elif_stmt>parameter<eq>'ATTRIBUTES_EXCLUDED_FROM_OBJECT_DEF'<block_start><global>_ATTRIBUTES_EXCLUDED_FROM_OBJECT_DEF<line_sep>_ATTRIBUTES_EXCLUDED_FROM_OBJECT_DEF=value<block_end><elif_stmt>parameter<eq>'IGNORED_MANDATORY_ATTRIBUTES_IN_OBJECT_DEF'<block_start><global>_IGNORED_MANDATORY_ATTRIBUTES_IN_OBJECT_DEF<line_sep>_IGNORED_MANDATORY_ATTRIBUTES_IN_OBJECT_DEF=value<block_end><else_stmt><block_start><raise>LDAPConfigurationParameterError('unable to set configuration parameter %s'%parameter)<block_end><block_end> |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
<import_stmt>proto# type: ignore
<import_from_stmt>google.ads.googleads.v8.enums.types conversion_value_rule_status<import_from_stmt>google.ads.googleads.v8.enums.types value_rule_device_type<import_from_stmt>google.ads.googleads.v8.enums.types value_rule_geo_location_match_type <import_from_stmt>google.ads.googleads.v8.enums.types value_rule_operation<line_sep>__protobuf__=proto.module(package="google.ads.googleads.v8.resources" marshal="google.ads.googleads.v8" manifest={"ConversionValueRule" } )<class_stmt>ConversionValueRule(proto.Message)<block_start>r"""A conversion value rule
Attributes:
resource_name (str):
Immutable. The resource name of the conversion value rule.
Conversion value rule resource names have the form:
``customers/{customer_id}/conversionValueRules/{conversion_value_rule_id}``
id (int):
Output only. The ID of the conversion value
rule.
action (google.ads.googleads.v8.resources.types.ConversionValueRule.ValueRuleAction):
Action applied when the rule is triggered.
geo_location_condition (google.ads.googleads.v8.resources.types.ConversionValueRule.ValueRuleGeoLocationCondition):
Condition for Geo location that must be
satisfied for the value rule to apply.
device_condition (google.ads.googleads.v8.resources.types.ConversionValueRule.ValueRuleDeviceCondition):
Condition for device type that must be
satisfied for the value rule to apply.
audience_condition (google.ads.googleads.v8.resources.types.ConversionValueRule.ValueRuleAudienceCondition):
Condition for audience that must be satisfied
for the value rule to apply.
owner_customer (str):
Output only. The resource name of the conversion value
rule's owner customer. When the value rule is inherited from
a manager customer, owner_customer will be the resource name
of the manager whereas the customer in the resource_name
will be of the requesting serving customer. \*\* Read-only
\*\*
status (google.ads.googleads.v8.enums.types.ConversionValueRuleStatusEnum.ConversionValueRuleStatus):
The status of the conversion value rule.
"""<class_stmt>ValueRuleAction(proto.Message)<block_start>r"""Action applied when rule is applied.
Attributes:
operation (google.ads.googleads.v8.enums.types.ValueRuleOperationEnum.ValueRuleOperation):
Specifies applied operation.
value (float):
Specifies applied value.
"""<line_sep>operation=proto.Field(proto.ENUM number=1 enum=value_rule_operation.ValueRuleOperationEnum.ValueRuleOperation )<line_sep>value=proto.Field(proto.DOUBLE number=2 )<block_end><class_stmt>ValueRuleGeoLocationCondition(proto.Message)<block_start>r"""Condition on Geo dimension.
Attributes:
excluded_geo_target_constants (Sequence[str]):
Geo locations that advertisers want to
exclude.
excluded_geo_match_type (google.ads.googleads.v8.enums.types.ValueRuleGeoLocationMatchTypeEnum.ValueRuleGeoLocationMatchType):
Excluded Geo location match type.
geo_target_constants (Sequence[str]):
Geo locations that advertisers want to
include.
geo_match_type (google.ads.googleads.v8.enums.types.ValueRuleGeoLocationMatchTypeEnum.ValueRuleGeoLocationMatchType):
Included Geo location match type.
"""<line_sep>excluded_geo_target_constants=proto.RepeatedField(proto.STRING number=1 )<line_sep>excluded_geo_match_type=proto.Field(proto.ENUM number=2 enum=value_rule_geo_location_match_type.ValueRuleGeoLocationMatchTypeEnum.ValueRuleGeoLocationMatchType )<line_sep>geo_target_constants=proto.RepeatedField(proto.STRING number=3 )<line_sep>geo_match_type=proto.Field(proto.ENUM number=4 enum=value_rule_geo_location_match_type.ValueRuleGeoLocationMatchTypeEnum.ValueRuleGeoLocationMatchType )<block_end><class_stmt>ValueRuleAudienceCondition(proto.Message)<block_start>r"""Condition on Audience dimension.
Attributes:
user_lists (Sequence[str]):
User Lists.
user_interests (Sequence[str]):
User Interests.
"""<line_sep>user_lists=proto.RepeatedField(proto.STRING number=1 )<line_sep>user_interests=proto.RepeatedField(proto.STRING number=2 )<block_end><class_stmt>ValueRuleDeviceCondition(proto.Message)<block_start>r"""Condition on Device dimension.
Attributes:
device_types (Sequence[google.ads.googleads.v8.enums.types.ValueRuleDeviceTypeEnum.ValueRuleDeviceType]):
Value for device type condition.
"""<line_sep>device_types=proto.RepeatedField(proto.ENUM number=1 enum=value_rule_device_type.ValueRuleDeviceTypeEnum.ValueRuleDeviceType )<block_end>resource_name=proto.Field(proto.STRING number=1 )<line_sep>id=proto.Field(proto.INT64 number=2 )<line_sep>action=proto.Field(proto.MESSAGE number=3 message=ValueRuleAction )<line_sep>geo_location_condition=proto.Field(proto.MESSAGE number=4 message=ValueRuleGeoLocationCondition )<line_sep>device_condition=proto.Field(proto.MESSAGE number=5 message=ValueRuleDeviceCondition )<line_sep>audience_condition=proto.Field(proto.MESSAGE number=6 message=ValueRuleAudienceCondition )<line_sep>owner_customer=proto.Field(proto.STRING number=7 )<line_sep>status=proto.Field(proto.ENUM number=8 enum=conversion_value_rule_status.ConversionValueRuleStatusEnum.ConversionValueRuleStatus )<block_end>__all__=tuple(sorted(__protobuf__.manifest))<line_sep> |
"""
Test example snippets from the lldb 'help expression' output.
"""<import_from_future_stmt> print_function<import_stmt>lldb<import_from_stmt>lldbsuite.test.decorators *<import_from_stmt>lldbsuite.test.lldbtest *<import_from_stmt>lldbsuite.test lldbutil<class_stmt>Radar9673644TestCase(TestBase)<block_start>mydir=TestBase.compute_mydir(__file__)<def_stmt>setUp self# Call super's setUp().
<block_start>TestBase.setUp(self)<line_sep># Find the line number to break inside main().
self.main_source="main.c"<line_sep>self.line=line_number(self.main_source '// Set breakpoint here.')<block_end><def_stmt>test_expr_commands self<block_start>"""The following expression commands should just work."""<line_sep>self.build()<line_sep>self.runCmd("file "+self.getBuildArtifact("a.out") CURRENT_EXECUTABLE_SET)<line_sep>lldbutil.run_break_set_by_file_and_line(self self.main_source self.line num_expected_locations=1 loc_exact=<true>)<line_sep>self.runCmd("run" RUN_SUCCEEDED)<line_sep># rdar://problem/9673664 lldb expression evaluation problem
self.expect('expr char str[] = "foo"; str[0]' substrs=["'f'"])<line_sep># runCmd: expr char c[] = "foo"; c[0]
# output: (char) $0 = 'f'
<block_end><block_end> |
# Copyright 2011 <NAME>. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
<import_from_future_stmt> with_statement<import_stmt>os<import_from_stmt>multiprocessing Process Queue cpu_count<import_from_stmt>whoosh.compat xrange iteritems pickle<import_from_stmt>whoosh.codec base<import_from_stmt>whoosh.writing PostingPool SegmentWriter<import_from_stmt>whoosh.externalsort imerge<import_from_stmt>whoosh.util random_name<def_stmt>finish_subsegment writer k=64# Tell the pool to finish up the current file
<block_start>writer.pool.save()<line_sep># Tell the pool to merge any and all runs in the pool until there
# is only one run remaining. "k" is an optional parameter passed
# from the parent which sets the maximum number of files to open
# while reducing.
writer.pool.reduce_to(1 k)<line_sep># The filename of the single remaining run
runname=writer.pool.runs[0]<line_sep># The indexed field names
fieldnames=writer.pool.fieldnames<line_sep># The segment object (parent can use this to re-open the files created
# by the sub-writer)
segment=writer._partial_segment()<line_sep><return>runname fieldnames segment<block_end># Multiprocessing Writer
<class_stmt>SubWriterTask(Process)# This is a Process object that takes "jobs" off a job Queue, processes
# them, and when it's done, puts a summary of its work on a results Queue
<block_start><def_stmt>__init__ self storage indexname jobqueue resultqueue kwargs multisegment<block_start>Process.__init__(self)<line_sep>self.storage=storage<line_sep>self.indexname=indexname<line_sep>self.jobqueue=jobqueue<line_sep>self.resultqueue=resultqueue<line_sep>self.kwargs=kwargs<line_sep>self.multisegment=multisegment<line_sep>self.running=<true><block_end><def_stmt>run self# This is the main loop of the process. OK, so the way this works is
# kind of brittle and stupid, but I had to figure out how to use the
# multiprocessing module, work around bugs, and address performance
# issues, so there is at least some reasoning behind some of this
# The "parent" task farms individual documents out to the subtasks for
# indexing. You could pickle the actual documents and put them in the
# queue, but that is not very performant. Instead, we assume the tasks
# share a filesystem and use that to pass the information around. The
# parent task writes a certain number of documents to a file, then puts
# the filename on the "job queue". A subtask gets the filename off the
# queue and reads through the file processing the documents.
<block_start>jobqueue=self.jobqueue<line_sep>resultqueue=self.resultqueue<line_sep>multisegment=self.multisegment<line_sep># Open a placeholder object representing the index
ix=self.storage.open_index(self.indexname)<line_sep># Open a writer for the index. The _lk=False parameter means to not try
# to lock the index (the parent object that started me takes care of
# locking the index)
writer=self.writer=SegmentWriter(ix _lk=<false> **self.kwargs)<line_sep># If the parent task calls cancel() on me, it will set self.running to
# False, so I'll notice the next time through the loop
<while_stmt>self.running# Take an object off the job queue
<block_start>jobinfo=jobqueue.get()<line_sep># If the object is None, it means the parent task wants me to
# finish up
<if_stmt>jobinfo<is><none><block_start><break><block_end># The object from the queue is a tuple of (filename,
# number_of_docs_in_file). Pass those two pieces of information as
# arguments to _process_file().
self._process_file(*jobinfo)<block_end><if_stmt><not>self.running# I was cancelled, so I'll cancel my underlying writer
<block_start>writer.cancel()<block_end><else_stmt><block_start><if_stmt>multisegment# Actually finish the segment and return it with no run
<block_start>runname=<none><line_sep>fieldnames=writer.pool.fieldnames<line_sep>segment=writer._finalize_segment()<block_end><else_stmt># Merge all runs in the writer's pool into one run, close the
# segment, and return the run name and the segment
<block_start>k=self.kwargs.get("k" 64)<line_sep>runname,fieldnames,segment=finish_subsegment(writer k)<block_end># Put the results (the run filename and the segment object) on the
# result queue
resultqueue.put((runname fieldnames segment) timeout=5)<block_end><block_end><def_stmt>_process_file self filename doc_count# This method processes a "job file" written out by the parent task. A
# job file is a series of pickled (code, arguments) tuples. Currently
# the only command codes is 0=add_document
<block_start>writer=self.writer<line_sep>tempstorage=writer.temp_storage()<line_sep>load=pickle.load<with_stmt>tempstorage.open_file(filename).raw_file()<as>f<block_start><for_stmt>_ xrange(doc_count)# Load the next pickled tuple from the file
<block_start>code,args=load(f)<assert_stmt>code<eq>0<line_sep>writer.add_document(**args)<block_end><block_end># Remove the job file
tempstorage.delete_file(filename)<block_end><def_stmt>cancel self<block_start>self.running=<false><block_end><block_end><class_stmt>MpWriter(SegmentWriter)<block_start><def_stmt>__init__ self ix procs=<none> batchsize=100 subargs=<none> multisegment=<false> **kwargs# This is the "main" writer that will aggregate the results created by
# the sub-tasks
<block_start>SegmentWriter.__init__(self ix **kwargs)<line_sep>self.procs=procs<or>cpu_count()<line_sep># The maximum number of documents in each job file submitted to the
# sub-tasks
self.batchsize=batchsize<line_sep># You can use keyword arguments or the "subargs" argument to pass
# keyword arguments to the sub-writers
self.subargs=subargs<if>subargs<else>kwargs<line_sep># If multisegment is True, don't merge the segments created by the
# sub-writers, just add them directly to the TOC
self.multisegment=multisegment<line_sep># A list to hold the sub-task Process objects
self.tasks=[]<line_sep># A queue to pass the filenames of job files to the sub-tasks
self.jobqueue=Queue(self.procs<times>4)<line_sep># A queue to get back the final results of the sub-tasks
self.resultqueue=Queue()<line_sep># A buffer for documents before they are flushed to a job file
self.docbuffer=[]<line_sep>self._grouping=0<line_sep>self._added_sub=<false><block_end><def_stmt>_new_task self<block_start>task=SubWriterTask(self.storage self.indexname self.jobqueue self.resultqueue self.subargs self.multisegment)<line_sep>self.tasks.append(task)<line_sep>task.start()<line_sep><return>task<block_end><def_stmt>_enqueue self# Flush the documents stored in self.docbuffer to a file and put the
# filename on the job queue
<block_start>docbuffer=self.docbuffer<line_sep>dump=pickle.dump<line_sep>length=len(docbuffer)<line_sep>filename="%s.doclist"%random_name()<with_stmt>self.temp_storage().create_file(filename).raw_file()<as>f<block_start><for_stmt>item docbuffer<block_start>dump(item f -1)<block_end><block_end><if_stmt>len(self.tasks)<l>self.procs<block_start>self._new_task()<block_end>jobinfo=(filename length)<line_sep>self.jobqueue.put(jobinfo)<line_sep>self.docbuffer=[]<block_end><def_stmt>cancel self<block_start><try_stmt><block_start><for_stmt>task self.tasks<block_start>task.cancel()<block_end><block_end><finally_stmt><block_start>SegmentWriter.cancel(self)<block_end><block_end><def_stmt>start_group self<block_start>self._grouping<augadd>1<block_end><def_stmt>end_group self<block_start><if_stmt><not>self._grouping<block_start><raise>Exception("Unbalanced end_group")<block_end>self._grouping<augsub>1<block_end><def_stmt>add_document self **fields# Add the document to the docbuffer
<block_start>self.docbuffer.append((0 fields))<line_sep># If the buffer is full, flush it to the job queue
<if_stmt><not>self._grouping<and>len(self.docbuffer)<ge>self.batchsize<block_start>self._enqueue()<block_end>self._added_sub=<true><block_end><def_stmt>_read_and_renumber_run self path offset# Note that SortingPool._read_run() automatically deletes the run file
# when it's finished
<block_start>gen=self.pool._read_run(path)<line_sep># If offset is 0, just return the items unchanged
<if_stmt><not>offset<block_start><return>gen<block_end><else_stmt># Otherwise, add the offset to each docnum
<block_start><return>((fname text docnum+offset weight value)<for>fname,text,docnum,weight,value gen)<block_end><block_end><def_stmt>commit self mergetype=<none> optimize=<none> merge=<none><block_start><if_stmt>self._added_sub# If documents have been added to sub-writers, use the parallel
# merge commit code
<block_start>self._commit(mergetype optimize merge)<block_end><else_stmt># Otherwise, just do a regular-old commit
<block_start>SegmentWriter.commit(self mergetype=mergetype optimize=optimize merge=merge)<block_end><block_end><def_stmt>_commit self mergetype optimize merge# Index the remaining documents in the doc buffer
<block_start><if_stmt>self.docbuffer<block_start>self._enqueue()<block_end># Tell the tasks to finish
<for_stmt>task self.tasks<block_start>self.jobqueue.put(<none>)<block_end># Merge existing segments
finalsegments=self._merge_segments(mergetype optimize merge)<line_sep># Wait for the subtasks to finish
<for_stmt>task self.tasks<block_start>task.join()<block_end># Pull a (run_file_name, fieldnames, segment) tuple off the result
# queue for each sub-task, representing the final results of the task
results=[]<for_stmt>task self.tasks<block_start>results.append(self.resultqueue.get(timeout=5))<block_end><if_stmt>self.multisegment# If we're not merging the segments, we don't care about the runname
# and fieldnames in the results... just pull out the segments and
# add them to the list of final segments
<block_start>finalsegments<augadd>[s<for>_,_,s results]<if_stmt>self._added<block_start>finalsegments.append(self._finalize_segment())<block_end><else_stmt><block_start>self._close_segment()<block_end><assert_stmt>self.perdocwriter.is_closed<block_end><else_stmt># Merge the posting sources from the sub-writers and my
# postings into this writer
<block_start>self._merge_subsegments(results mergetype)<line_sep>self._close_segment()<line_sep>self._assemble_segment()<line_sep>finalsegments.append(self.get_segment())<assert_stmt>self.perdocwriter.is_closed<block_end>self._commit_toc(finalsegments)<line_sep>self._finish()<block_end><def_stmt>_merge_subsegments self results mergetype<block_start>schema=self.schema<line_sep>schemanames=set(schema.names())<line_sep>storage=self.storage<line_sep>codec=self.codec<line_sep>sources=[]<line_sep># If information was added to this writer the conventional (e.g.
# through add_reader or merging segments), add it as an extra source
<if_stmt>self._added<block_start>sources.append(self.pool.iter_postings())<block_end>pdrs=[]<for_stmt>runname,fieldnames,segment results<block_start>fieldnames=set(fieldnames)|schemanames<line_sep>pdr=codec.per_document_reader(storage segment)<line_sep>pdrs.append(pdr)<line_sep>basedoc=self.docnum<line_sep>docmap=self.write_per_doc(fieldnames pdr)<assert_stmt>docmap<is><none><line_sep>items=self._read_and_renumber_run(runname basedoc)<line_sep>sources.append(items)<block_end># Create a MultiLengths object combining the length files from the
# subtask segments
self.perdocwriter.close()<line_sep>pdrs.insert(0 self.per_document_reader())<line_sep>mpdr=base.MultiPerDocumentReader(pdrs)<try_stmt># Merge the iterators into the field writer
<block_start>self.fieldwriter.add_postings(schema mpdr imerge(sources))<block_end><finally_stmt><block_start>mpdr.close()<block_end>self._added=<true><block_end><block_end><class_stmt>SerialMpWriter(MpWriter)# A non-parallel version of the MpWriter for testing purposes
<block_start><def_stmt>__init__ self ix procs=<none> batchsize=100 subargs=<none> **kwargs<block_start>SegmentWriter.__init__(self ix **kwargs)<line_sep>self.procs=procs<or>cpu_count()<line_sep>self.batchsize=batchsize<line_sep>self.subargs=subargs<if>subargs<else>kwargs<line_sep>self.tasks=[SegmentWriter(ix _lk=<false> **self.subargs)<for>_ xrange(self.procs)]<line_sep>self.pointer=0<line_sep>self._added_sub=<false><block_end><def_stmt>add_document self **fields<block_start>self.tasks[self.pointer].add_document(**fields)<line_sep>self.pointer=(self.pointer+1)%len(self.tasks)<line_sep>self._added_sub=<true><block_end><def_stmt>_commit self mergetype optimize merge# Pull a (run_file_name, segment) tuple off the result queue for each
# sub-task, representing the final results of the task
# Merge existing segments
<block_start>finalsegments=self._merge_segments(mergetype optimize merge)<line_sep>results=[]<for_stmt>writer self.tasks<block_start>results.append(finish_subsegment(writer))<block_end>self._merge_subsegments(results mergetype)<line_sep>self._close_segment()<line_sep>self._assemble_segment()<line_sep>finalsegments.append(self.get_segment())<line_sep>self._commit_toc(finalsegments)<line_sep>self._finish()<block_end><block_end># For compatibility with old multiproc module
<class_stmt>MultiSegmentWriter(MpWriter)<block_start><def_stmt>__init__ self *args **kwargs<block_start>MpWriter.__init__(self *args **kwargs)<line_sep>self.multisegment=<true><block_end><block_end> |
<import_stmt>json<import_stmt>time<import_stmt>pytest<import_from_stmt>anchore_engine.configuration localconfig<import_from_stmt>anchore_engine.db FeedGroupMetadata FeedMetadata GemMetadata Image NpmMetadata NvdMetadata Vulnerability session_scope <import_from_stmt>anchore_engine.services.policy_engine.engine.tasks FeedsUpdateTask ImageLoadTask <import_from_stmt>anchore_engine.subsys logger<line_sep>logger.enable_test_logging()<line_sep>localconfig.localconfig.update({"feeds":{"sync_enabled":<true> "selective_sync":{"enabled":<false> "feeds":{}}}})<line_sep>@pytest.mark.skip("Skipping due to long run time, will fix later")<def_stmt>test_feed_task test_data_env anchore_db<block_start>logger.info("Running a feed sync with config: {}".format(localconfig.get_config()))<line_sep>t=FeedsUpdateTask()<line_sep>t.execute()<with_stmt>session_scope()<as>db<block_start>feeds=db.query(FeedMetadata).all()<line_sep>logger.info("{}".format(feeds))<assert_stmt>len(feeds)<eq>4# packages, vulns, snyk, nvd
feed_groups=db.query(FeedGroupMetadata).all()<line_sep># See the tests/data/test_data_env/feeds dir for the proper count here
logger.info("{}".format(feed_groups))<assert_stmt>len(feed_groups)<eq>11<line_sep># ToDo: set the source data to a small number and make this an exact count
<assert_stmt>db.query(Vulnerability).count()<g>0<assert_stmt>db.query(NpmMetadata).count()<g>0<assert_stmt>db.query(GemMetadata).count()<g>0<assert_stmt>db.query(NvdMetadata).count()<eq>0<block_end><block_end><def_stmt>test_image_load test_data_env<block_start><for_stmt>f test_data_env.image_exports()<block_start>logger.info("Testing image export loading into the db")<with_stmt>open(f[1])<as>infile<block_start>json_data=json.load(infile)<line_sep>image_id=(json_data[0]["image"]["imagedata"]["image_report"]["meta"]["imageId"]<if>type(json_data)<eq>list<else>json_data["image_report"]["meta"]["imageId"])<line_sep>logger.info("Using image id: "+image_id)<block_end>t=time.time()<line_sep>task=ImageLoadTask(user_id="0" image_id=image_id url="file://"+f[1] force_reload=<true>)<line_sep>load_result=task.execute()<line_sep>load_duration=time.time()-t<line_sep>logger.info("Load complete for {}. Took: {} sec for db load. Result: {}".format(f load_duration load_result))<with_stmt>session_scope()<as>db<block_start><assert_stmt>(db.query(Image).filter_by(id=image_id user_id="0").one_or_none()<is><not><none>)<block_end><block_end><block_end> |
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>unittest<import_stmt>torch<import_from_stmt>monai.utils optional_import<import_from_stmt>tests.utils SkipIfNoModule<try_stmt><block_start>_,has_ignite=optional_import("ignite")<import_from_stmt>ignite.engine Engine Events<import_from_stmt>monai.handlers MetricLogger<block_end><except_stmt>ImportError<block_start>has_ignite=<false><block_end><class_stmt>TestHandlerMetricLogger(unittest.TestCase)<block_start>@SkipIfNoModule("ignite")<def_stmt>test_metric_logging self<block_start>dummy_name="dummy"<line_sep># set up engine
<def_stmt>_train_func engine batch<block_start><return>torch.tensor(0.0)<block_end>engine=Engine(_train_func)<line_sep># set up dummy metric
@engine.on(Events.EPOCH_COMPLETED)<def_stmt>_update_metric engine<block_start>engine.state.metrics[dummy_name]=1<block_end># set up testing handler
handler=MetricLogger(loss_transform=<lambda>output:output.item())<line_sep>handler.attach(engine)<line_sep>engine.run(range(3) max_epochs=2)<line_sep>expected_loss=[(1 0.0) (2 0.0) (3 0.0) (4 0.0) (5 0.0) (6 0.0)]<line_sep>expected_metric=[(4 1) (5 1) (6 1)]<line_sep>self.assertSetEqual({dummy_name} set(handler.metrics))<line_sep>self.assertListEqual(expected_loss handler.loss)<line_sep>self.assertListEqual(expected_metric handler.metrics[dummy_name])<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end> |
<import_from_stmt>datetime datetime timedelta timezone<import_stmt>boto3<def_stmt>get_delete_data older_days<block_start>delete_time=datetime.now(tz=timezone.utc)-timedelta(days=older_days)<line_sep><return>delete_time<block_end><def_stmt>is_ignore_shutdown tags<block_start><for_stmt>tag tags<block_start>print("K "+str(tag['Key'])+" is "+str(tag['Value']))<if_stmt>str(tag['Key'])<eq>'excludepower'<and>str(tag['Value'])<eq>'true'<block_start>print("Not stopping K "+str(tag['Key'])+" is "+str(tag['Value']))<line_sep><return><true><block_end><block_end><return><false><block_end><def_stmt>is_unassigned tags<block_start><if_stmt>'user'<not><in>[t['Key']<for>t tags]<block_start><return><true><block_end><return><false><block_end><class_stmt>Ec2Instances(object)<block_start><def_stmt>__init__ self region<block_start>print("region "+region)<line_sep># if you are not using AWS Tool Kit tool you will be needing to pass your access key and secret key here
# client = boto3.client('rds', region_name=region_name, aws_access_key_id=aws_access_key_id,
# aws_secret_access_key=aws_secret_access_key)
self.ec2=boto3.client('ec2' region_name=region)<block_end><def_stmt>delete_snapshots self older_days=2<block_start>delete_snapshots_num=0<line_sep>snapshots=self.get_nimesa_created_snapshots()<for_stmt>snapshot snapshots['Snapshots']<block_start>fmt_start_time=snapshot['StartTime']<if_stmt>fmt_start_time<l>get_delete_data(older_days)<block_start><try_stmt><block_start>self.delete_snapshot(snapshot['SnapshotId'])<line_sep>delete_snapshots_num+1<block_end><except_stmt>Exception<as>e<block_start>print(e)<block_end><block_end><block_end><return>delete_snapshots_num<block_end><def_stmt>get_user_created_snapshots self<block_start>snapshots=self.ec2.describe_snapshots(Filters=[{'Name':'owner-id' 'Values':['your owner id'] }])<line_sep># Filters=[{'Name': 'description', 'Values': ['Created by Nimesa']}]
<return>snapshots<block_end><def_stmt>delete_available_volumes self<block_start>volumes=self.ec2.describe_volumes()['Volumes']<for_stmt>volume volumes<block_start><if_stmt>volume['State']<eq>"available"<block_start>self.ec2.delete_volume(VolumeId=volume['VolumeId'])<block_end><block_end><block_end><def_stmt>delete_snapshot self snapshot_id<block_start>self.ec2.delete_snapshot(SnapshotId=snapshot_id)<block_end><def_stmt>shutdown self<block_start>instances=self.ec2.describe_instances()<line_sep>instance_to_stop=[]<line_sep>instance_to_terminate=[]<for_stmt>res instances['Reservations']<block_start><for_stmt>instance res['Instances']<block_start>tags=instance.get('Tags')<if_stmt>tags<is><none><block_start>instance_to_terminate.append(instance['InstanceId'])<line_sep><continue><block_end><if_stmt>is_unassigned(tags)<block_start>print("instance_to_terminate "+instance['InstanceId'])<line_sep>instance_to_terminate.append(instance['InstanceId'])<block_end><if_stmt>is_ignore_shutdown(tags)<block_start><continue><block_end><if_stmt>instance['State']['Code']<eq>16<block_start>instance_to_stop.append(instance['InstanceId'])<block_end><block_end><block_end><if_stmt>any(instance_to_stop)<block_start>self.ec2.stop_instances(InstanceIds=instance_to_stop)<block_end><if_stmt>any(instance_to_terminate)<block_start>print(instance_to_terminate)<line_sep>self.ec2.terminate_instances(InstanceIds=instance_to_terminate)<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>ec2=Ec2Instances('us-east-1')<line_sep>ec2.delete_snapshots(3)<line_sep>ec2.shutdown()<block_end> |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
<import_from_stmt>collections namedtuple<import_from_stmt>unittest mock skipUnless<import_stmt>pandas<as>pd<import_from_stmt>sqlalchemy types<import_from_stmt>sqlalchemy.engine.result RowProxy<import_from_stmt>sqlalchemy.sql select<import_from_stmt>superset.db_engine_specs.presto PrestoEngineSpec<import_from_stmt>superset.errors ErrorLevel SupersetError SupersetErrorType<import_from_stmt>superset.sql_parse ParsedQuery<import_from_stmt>superset.utils.core DatasourceName GenericDataType<import_from_stmt>tests.integration_tests.db_engine_specs.base_tests TestDbEngineSpec<class_stmt>TestPrestoDbEngineSpec(TestDbEngineSpec)<block_start>@skipUnless(TestDbEngineSpec.is_module_installed("pyhive") "pyhive not installed")<def_stmt>test_get_datatype_presto self<block_start>self.assertEqual("STRING" PrestoEngineSpec.get_datatype("string"))<block_end><def_stmt>test_presto_get_view_names_return_empty_list self # pylint: disable=invalid-name
<block_start>self.assertEqual([] PrestoEngineSpec.get_view_names(mock.ANY mock.ANY mock.ANY))<block_end>@mock.patch("superset.db_engine_specs.presto.is_feature_enabled")<def_stmt>test_get_view_names self mock_is_feature_enabled<block_start>mock_is_feature_enabled.return_value=<true><line_sep>mock_execute=mock.MagicMock()<line_sep>mock_fetchall=mock.MagicMock(return_value=[["a" "b," "c"] ["d" "e"]])<line_sep>database=mock.MagicMock()<line_sep>database.get_sqla_engine.return_value.raw_connection.return_value.cursor.return_value.execute=(mock_execute)<line_sep>database.get_sqla_engine.return_value.raw_connection.return_value.cursor.return_value.fetchall=(mock_fetchall)<line_sep>result=PrestoEngineSpec.get_view_names(database mock.Mock() <none>)<line_sep>mock_execute.assert_called_once_with("SELECT table_name FROM information_schema.views" {})<assert_stmt>result<eq>["a" "d"]<block_end>@mock.patch("superset.db_engine_specs.presto.is_feature_enabled")<def_stmt>test_get_view_names_with_schema self mock_is_feature_enabled<block_start>mock_is_feature_enabled.return_value=<true><line_sep>mock_execute=mock.MagicMock()<line_sep>mock_fetchall=mock.MagicMock(return_value=[["a" "b," "c"] ["d" "e"]])<line_sep>database=mock.MagicMock()<line_sep>database.get_sqla_engine.return_value.raw_connection.return_value.cursor.return_value.execute=(mock_execute)<line_sep>database.get_sqla_engine.return_value.raw_connection.return_value.cursor.return_value.fetchall=(mock_fetchall)<line_sep>schema="schema"<line_sep>result=PrestoEngineSpec.get_view_names(database mock.Mock() schema)<line_sep>mock_execute.assert_called_once_with("SELECT table_name FROM information_schema.views "<concat>"WHERE table_schema=%(schema)s" {"schema":schema} )<assert_stmt>result<eq>["a" "d"]<block_end><def_stmt>verify_presto_column self column expected_results<block_start>inspector=mock.Mock()<line_sep>inspector.engine.dialect.identifier_preparer.quote_identifier=mock.Mock()<line_sep>keymap={"Column":(<none> <none> 0) "Type":(<none> <none> 1) "Null":(<none> <none> 2) }<line_sep>row=RowProxy(mock.Mock() column [<none> <none> <none> <none>] keymap)<line_sep>inspector.bind.execute=mock.Mock(return_value=[row])<line_sep>results=PrestoEngineSpec.get_columns(inspector "" "")<line_sep>self.assertEqual(len(expected_results) len(results))<for_stmt>expected_result,result zip(expected_results results)<block_start>self.assertEqual(expected_result[0] result["name"])<line_sep>self.assertEqual(expected_result[1] str(result["type"]))<block_end><block_end><def_stmt>test_presto_get_column self<block_start>presto_column=("column_name" "boolean" "")<line_sep>expected_results=[("column_name" "BOOLEAN")]<line_sep>self.verify_presto_column(presto_column expected_results)<block_end>@mock.patch.dict("superset.extensions.feature_flag_manager._feature_flags" {"PRESTO_EXPAND_DATA":<true>} clear=<true> )<def_stmt>test_presto_get_simple_row_column self<block_start>presto_column=("column_name" "row(nested_obj double)" "")<line_sep>expected_results=[("column_name" "ROW") ("column_name.nested_obj" "FLOAT")]<line_sep>self.verify_presto_column(presto_column expected_results)<block_end>@mock.patch.dict("superset.extensions.feature_flag_manager._feature_flags" {"PRESTO_EXPAND_DATA":<true>} clear=<true> )<def_stmt>test_presto_get_simple_row_column_with_name_containing_whitespace self<block_start>presto_column=("column name" "row(nested_obj double)" "")<line_sep>expected_results=[("column name" "ROW") ("column name.nested_obj" "FLOAT")]<line_sep>self.verify_presto_column(presto_column expected_results)<block_end>@mock.patch.dict("superset.extensions.feature_flag_manager._feature_flags" {"PRESTO_EXPAND_DATA":<true>} clear=<true> )<def_stmt>test_presto_get_simple_row_column_with_tricky_nested_field_name self<block_start>presto_column=("column_name" 'row("Field Name(Tricky, Name)" double)' "")<line_sep>expected_results=[("column_name" "ROW") ('column_name."Field Name(Tricky, Name)"' "FLOAT") ]<line_sep>self.verify_presto_column(presto_column expected_results)<block_end>@mock.patch.dict("superset.extensions.feature_flag_manager._feature_flags" {"PRESTO_EXPAND_DATA":<true>} clear=<true> )<def_stmt>test_presto_get_simple_array_column self<block_start>presto_column=("column_name" "array(double)" "")<line_sep>expected_results=[("column_name" "ARRAY")]<line_sep>self.verify_presto_column(presto_column expected_results)<block_end>@mock.patch.dict("superset.extensions.feature_flag_manager._feature_flags" {"PRESTO_EXPAND_DATA":<true>} clear=<true> )<def_stmt>test_presto_get_row_within_array_within_row_column self<block_start>presto_column=("column_name" "row(nested_array array(row(nested_row double)), nested_obj double)" "" )<line_sep>expected_results=[("column_name" "ROW") ("column_name.nested_array" "ARRAY") ("column_name.nested_array.nested_row" "FLOAT") ("column_name.nested_obj" "FLOAT") ]<line_sep>self.verify_presto_column(presto_column expected_results)<block_end>@mock.patch.dict("superset.extensions.feature_flag_manager._feature_flags" {"PRESTO_EXPAND_DATA":<true>} clear=<true> )<def_stmt>test_presto_get_array_within_row_within_array_column self<block_start>presto_column=("column_name" "array(row(nested_array array(double), nested_obj double))" "" )<line_sep>expected_results=[("column_name" "ARRAY") ("column_name.nested_array" "ARRAY") ("column_name.nested_obj" "FLOAT") ]<line_sep>self.verify_presto_column(presto_column expected_results)<block_end><def_stmt>test_presto_get_fields self<block_start>cols=[{"name":"column"} {"name":"column.nested_obj"} {"name":'column."quoted.nested obj"'} ]<line_sep>actual_results=PrestoEngineSpec._get_fields(cols)<line_sep>expected_results=[{"name":'"column"' "label":"column"} {"name":'"column"."nested_obj"' "label":"column.nested_obj"} {"name":'"column"."quoted.nested obj"' "label":'column."quoted.nested obj"' } ]<for_stmt>actual_result,expected_result zip(actual_results expected_results)<block_start>self.assertEqual(actual_result.element.name expected_result["name"])<line_sep>self.assertEqual(actual_result.name expected_result["label"])<block_end><block_end>@mock.patch.dict("superset.extensions.feature_flag_manager._feature_flags" {"PRESTO_EXPAND_DATA":<true>} clear=<true> )<def_stmt>test_presto_expand_data_with_simple_structural_columns self<block_start>cols=[{"name":"row_column" "type":"ROW(NESTED_OBJ VARCHAR)"} {"name":"array_column" "type":"ARRAY(BIGINT)"} ]<line_sep>data=[{"row_column":["a"] "array_column":[1 2 3]} {"row_column":["b"] "array_column":[4 5 6]} ]<line_sep>actual_cols,actual_data,actual_expanded_cols=PrestoEngineSpec.expand_data(cols data)<line_sep>expected_cols=[{"name":"row_column" "type":"ROW(NESTED_OBJ VARCHAR)"} {"name":"row_column.nested_obj" "type":"VARCHAR"} {"name":"array_column" "type":"ARRAY(BIGINT)"} ]<line_sep>expected_data=[{"array_column":1 "row_column":["a"] "row_column.nested_obj":"a"} {"array_column":2 "row_column":"" "row_column.nested_obj":""} {"array_column":3 "row_column":"" "row_column.nested_obj":""} {"array_column":4 "row_column":["b"] "row_column.nested_obj":"b"} {"array_column":5 "row_column":"" "row_column.nested_obj":""} {"array_column":6 "row_column":"" "row_column.nested_obj":""} ]<line_sep>expected_expanded_cols=[{"name":"row_column.nested_obj" "type":"VARCHAR"}]<line_sep>self.assertEqual(actual_cols expected_cols)<line_sep>self.assertEqual(actual_data expected_data)<line_sep>self.assertEqual(actual_expanded_cols expected_expanded_cols)<block_end>@mock.patch.dict("superset.extensions.feature_flag_manager._feature_flags" {"PRESTO_EXPAND_DATA":<true>} clear=<true> )<def_stmt>test_presto_expand_data_with_complex_row_columns self<block_start>cols=[{"name":"row_column" "type":"ROW(NESTED_OBJ1 VARCHAR, NESTED_ROW ROW(NESTED_OBJ2 VARCHAR))" }]<line_sep>data=[{"row_column":["a1" ["a2"]]} {"row_column":["b1" ["b2"]]}]<line_sep>actual_cols,actual_data,actual_expanded_cols=PrestoEngineSpec.expand_data(cols data)<line_sep>expected_cols=[{"name":"row_column" "type":"ROW(NESTED_OBJ1 VARCHAR, NESTED_ROW ROW(NESTED_OBJ2 VARCHAR))" } {"name":"row_column.nested_obj1" "type":"VARCHAR"} {"name":"row_column.nested_row" "type":"ROW(NESTED_OBJ2 VARCHAR)"} {"name":"row_column.nested_row.nested_obj2" "type":"VARCHAR"} ]<line_sep>expected_data=[{"row_column":["a1" ["a2"]] "row_column.nested_obj1":"a1" "row_column.nested_row":["a2"] "row_column.nested_row.nested_obj2":"a2" } {"row_column":["b1" ["b2"]] "row_column.nested_obj1":"b1" "row_column.nested_row":["b2"] "row_column.nested_row.nested_obj2":"b2" } ]<line_sep>expected_expanded_cols=[{"name":"row_column.nested_obj1" "type":"VARCHAR"} {"name":"row_column.nested_row" "type":"ROW(NESTED_OBJ2 VARCHAR)"} {"name":"row_column.nested_row.nested_obj2" "type":"VARCHAR"} ]<line_sep>self.assertEqual(actual_cols expected_cols)<line_sep>self.assertEqual(actual_data expected_data)<line_sep>self.assertEqual(actual_expanded_cols expected_expanded_cols)<block_end>@mock.patch.dict("superset.extensions.feature_flag_manager._feature_flags" {"PRESTO_EXPAND_DATA":<true>} clear=<true> )<def_stmt>test_presto_expand_data_with_complex_row_columns_and_null_values self<block_start>cols=[{"name":"row_column" "type":"ROW(NESTED_ROW ROW(NESTED_OBJ VARCHAR))" }]<line_sep>data=[{"row_column":'[["a"]]'} {"row_column":"[[null]]"} {"row_column":"[null]"} {"row_column":"null"} ]<line_sep>actual_cols,actual_data,actual_expanded_cols=PrestoEngineSpec.expand_data(cols data)<line_sep>expected_cols=[{"name":"row_column" "type":"ROW(NESTED_ROW ROW(NESTED_OBJ VARCHAR))" } {"name":"row_column.nested_row" "type":"ROW(NESTED_OBJ VARCHAR)"} {"name":"row_column.nested_row.nested_obj" "type":"VARCHAR"} ]<line_sep>expected_data=[{"row_column":[["a"]] "row_column.nested_row":["a"] "row_column.nested_row.nested_obj":"a" } {"row_column":[[<none>]] "row_column.nested_row":[<none>] "row_column.nested_row.nested_obj":<none> } {"row_column":[<none>] "row_column.nested_row":<none> "row_column.nested_row.nested_obj":"" } {"row_column":<none> "row_column.nested_row":"" "row_column.nested_row.nested_obj":"" } ]<line_sep>expected_expanded_cols=[{"name":"row_column.nested_row" "type":"ROW(NESTED_OBJ VARCHAR)"} {"name":"row_column.nested_row.nested_obj" "type":"VARCHAR"} ]<line_sep>self.assertEqual(actual_cols expected_cols)<line_sep>self.assertEqual(actual_data expected_data)<line_sep>self.assertEqual(actual_expanded_cols expected_expanded_cols)<block_end>@mock.patch.dict("superset.extensions.feature_flag_manager._feature_flags" {"PRESTO_EXPAND_DATA":<true>} clear=<true> )<def_stmt>test_presto_expand_data_with_complex_array_columns self<block_start>cols=[{"name":"int_column" "type":"BIGINT"} {"name":"array_column" "type":"ARRAY(ROW(NESTED_ARRAY ARRAY(ROW(NESTED_OBJ VARCHAR))))" } ]<line_sep>data=[{"int_column":1 "array_column":[[[["a"] ["b"]]] [[["c"] ["d"]]]]} {"int_column":2 "array_column":[[[["e"] ["f"]]] [[["g"] ["h"]]]]} ]<line_sep>actual_cols,actual_data,actual_expanded_cols=PrestoEngineSpec.expand_data(cols data)<line_sep>expected_cols=[{"name":"int_column" "type":"BIGINT"} {"name":"array_column" "type":"ARRAY(ROW(NESTED_ARRAY ARRAY(ROW(NESTED_OBJ VARCHAR))))" } {"name":"array_column.nested_array" "type":"ARRAY(ROW(NESTED_OBJ VARCHAR))" } {"name":"array_column.nested_array.nested_obj" "type":"VARCHAR"} ]<line_sep>expected_data=[{"array_column":[[["a"] ["b"]]] "array_column.nested_array":["a"] "array_column.nested_array.nested_obj":"a" "int_column":1 } {"array_column":"" "array_column.nested_array":["b"] "array_column.nested_array.nested_obj":"b" "int_column":"" } {"array_column":[[["c"] ["d"]]] "array_column.nested_array":["c"] "array_column.nested_array.nested_obj":"c" "int_column":"" } {"array_column":"" "array_column.nested_array":["d"] "array_column.nested_array.nested_obj":"d" "int_column":"" } {"array_column":[[["e"] ["f"]]] "array_column.nested_array":["e"] "array_column.nested_array.nested_obj":"e" "int_column":2 } {"array_column":"" "array_column.nested_array":["f"] "array_column.nested_array.nested_obj":"f" "int_column":"" } {"array_column":[[["g"] ["h"]]] "array_column.nested_array":["g"] "array_column.nested_array.nested_obj":"g" "int_column":"" } {"array_column":"" "array_column.nested_array":["h"] "array_column.nested_array.nested_obj":"h" "int_column":"" } ]<line_sep>expected_expanded_cols=[{"name":"array_column.nested_array" "type":"ARRAY(ROW(NESTED_OBJ VARCHAR))" } {"name":"array_column.nested_array.nested_obj" "type":"VARCHAR"} ]<line_sep>self.assertEqual(actual_cols expected_cols)<line_sep>self.assertEqual(actual_data expected_data)<line_sep>self.assertEqual(actual_expanded_cols expected_expanded_cols)<block_end><def_stmt>test_presto_extra_table_metadata self<block_start>db=mock.Mock()<line_sep>db.get_indexes=mock.Mock(return_value=[{"column_names":["ds" "hour"]}])<line_sep>db.get_extra=mock.Mock(return_value={})<line_sep>df=pd.DataFrame({"ds":["01-01-19"] "hour":[1]})<line_sep>db.get_df=mock.Mock(return_value=df)<line_sep>PrestoEngineSpec.get_create_view=mock.Mock(return_value=<none>)<line_sep>result=PrestoEngineSpec.extra_table_metadata(db "test_table" "test_schema")<line_sep>self.assertEqual({"ds":"01-01-19" "hour":1} result["partitions"]["latest"])<block_end><def_stmt>test_presto_where_latest_partition self<block_start>db=mock.Mock()<line_sep>db.get_indexes=mock.Mock(return_value=[{"column_names":["ds" "hour"]}])<line_sep>db.get_extra=mock.Mock(return_value={})<line_sep>df=pd.DataFrame({"ds":["01-01-19"] "hour":[1]})<line_sep>db.get_df=mock.Mock(return_value=df)<line_sep>columns=[{"name":"ds"} {"name":"hour"}]<line_sep>result=PrestoEngineSpec.where_latest_partition("test_table" "test_schema" db select() columns)<line_sep>query_result=str(result.compile(compile_kwargs={"literal_binds":<true>}))<line_sep>self.assertEqual("SELECT \nWHERE ds = '01-01-19' AND hour = 1" query_result)<block_end><def_stmt>test_convert_dttm self<block_start>dttm=self.get_dttm()<line_sep>self.assertEqual(PrestoEngineSpec.convert_dttm("DATE" dttm) "from_iso8601_date('2019-01-02')" )<line_sep>self.assertEqual(PrestoEngineSpec.convert_dttm("TIMESTAMP" dttm) "from_iso8601_timestamp('2019-01-02T03:04:05.678900')" )<block_end><def_stmt>test_query_cost_formatter self<block_start>raw_cost=[{"inputTableColumnInfos":[{"table":{"catalog":"hive" "schemaTable":{"schema":"default" "table":"fact_passenger_state" } } "columnConstraints":[{"columnName":"ds" "typeSignature":"varchar" "domain":{"nullsAllowed":<false> "ranges":[{"low":{"value":"2019-07-10" "bound":"EXACTLY" } "high":{"value":"2019-07-10" "bound":"EXACTLY" } }] } }] "estimate":{"outputRowCount":9.04969899e8 "outputSizeInBytes":3.54143678301e11 "cpuCost":3.54143678301e11 "maxMemory":0.0 "networkCost":0.0 } }] "estimate":{"outputRowCount":9.04969899e8 "outputSizeInBytes":3.54143678301e11 "cpuCost":3.54143678301e11 "maxMemory":0.0 "networkCost":3.54143678301e11 } }]<line_sep>formatted_cost=PrestoEngineSpec.query_cost_formatter(raw_cost)<line_sep>expected=[{"Output count":"904 M rows" "Output size":"354 GB" "CPU cost":"354 G" "Max memory":"0 B" "Network cost":"354 G" }]<line_sep>self.assertEqual(formatted_cost expected)<block_end>@mock.patch.dict("superset.extensions.feature_flag_manager._feature_flags" {"PRESTO_EXPAND_DATA":<true>} clear=<true> )<def_stmt>test_presto_expand_data_array self<block_start>cols=[{"name":"event_id" "type":"VARCHAR" "is_date":<false>} {"name":"timestamp" "type":"BIGINT" "is_date":<false>} {"name":"user" "type":"ROW(ID BIGINT, FIRST_NAME VARCHAR, LAST_NAME VARCHAR)" "is_date":<false> } ]<line_sep>data=[{"event_id":"abcdef01-2345-6789-abcd-ef0123456789" "timestamp":"1595895506219" "user":'[1, "JOHN", "DOE"]' }]<line_sep>actual_cols,actual_data,actual_expanded_cols=PrestoEngineSpec.expand_data(cols data)<line_sep>expected_cols=[{"name":"event_id" "type":"VARCHAR" "is_date":<false>} {"name":"timestamp" "type":"BIGINT" "is_date":<false>} {"name":"user" "type":"ROW(ID BIGINT, FIRST_NAME VARCHAR, LAST_NAME VARCHAR)" "is_date":<false> } {"name":"user.id" "type":"BIGINT"} {"name":"user.first_name" "type":"VARCHAR"} {"name":"user.last_name" "type":"VARCHAR"} ]<line_sep>expected_data=[{"event_id":"abcdef01-2345-6789-abcd-ef0123456789" "timestamp":"1595895506219" "user":[1 "JOHN" "DOE"] "user.id":1 "user.first_name":"JOHN" "user.last_name":"DOE" }]<line_sep>expected_expanded_cols=[{"name":"user.id" "type":"BIGINT"} {"name":"user.first_name" "type":"VARCHAR"} {"name":"user.last_name" "type":"VARCHAR"} ]<line_sep>self.assertEqual(actual_cols expected_cols)<line_sep>self.assertEqual(actual_data expected_data)<line_sep>self.assertEqual(actual_expanded_cols expected_expanded_cols)<block_end><def_stmt>test_get_sqla_column_type self<block_start>column_spec=PrestoEngineSpec.get_column_spec("varchar(255)")<assert_stmt>isinstance(column_spec.sqla_type types.VARCHAR)<assert_stmt>column_spec.sqla_type.length<eq>255<line_sep>self.assertEqual(column_spec.generic_type GenericDataType.STRING)<line_sep>column_spec=PrestoEngineSpec.get_column_spec("varchar")<assert_stmt>isinstance(column_spec.sqla_type types.String)<assert_stmt>column_spec.sqla_type.length<is><none><line_sep>self.assertEqual(column_spec.generic_type GenericDataType.STRING)<line_sep>column_spec=PrestoEngineSpec.get_column_spec("char(10)")<assert_stmt>isinstance(column_spec.sqla_type types.CHAR)<assert_stmt>column_spec.sqla_type.length<eq>10<line_sep>self.assertEqual(column_spec.generic_type GenericDataType.STRING)<line_sep>column_spec=PrestoEngineSpec.get_column_spec("char")<assert_stmt>isinstance(column_spec.sqla_type types.CHAR)<assert_stmt>column_spec.sqla_type.length<is><none><line_sep>self.assertEqual(column_spec.generic_type GenericDataType.STRING)<line_sep>column_spec=PrestoEngineSpec.get_column_spec("integer")<assert_stmt>isinstance(column_spec.sqla_type types.Integer)<line_sep>self.assertEqual(column_spec.generic_type GenericDataType.NUMERIC)<line_sep>column_spec=PrestoEngineSpec.get_column_spec("time")<assert_stmt>isinstance(column_spec.sqla_type types.Time)<assert_stmt>type(column_spec.sqla_type).__name__<eq>"TemporalWrapperType"<line_sep>self.assertEqual(column_spec.generic_type GenericDataType.TEMPORAL)<line_sep>column_spec=PrestoEngineSpec.get_column_spec("timestamp")<assert_stmt>isinstance(column_spec.sqla_type types.TIMESTAMP)<assert_stmt>type(column_spec.sqla_type).__name__<eq>"TemporalWrapperType"<line_sep>self.assertEqual(column_spec.generic_type GenericDataType.TEMPORAL)<line_sep>sqla_type=PrestoEngineSpec.get_sqla_column_type(<none>)<assert_stmt>sqla_type<is><none><block_end>@mock.patch("superset.utils.feature_flag_manager.FeatureFlagManager.is_feature_enabled")@mock.patch("superset.db_engine_specs.base.BaseEngineSpec.get_table_names")@mock.patch("superset.db_engine_specs.presto.PrestoEngineSpec.get_view_names")<def_stmt>test_get_table_names_no_split_views_from_tables self mock_get_view_names mock_get_table_names mock_is_feature_enabled<block_start>mock_get_view_names.return_value=["view1" "view2"]<line_sep>table_names=["table1" "table2" "view1" "view2"]<line_sep>mock_get_table_names.return_value=table_names<line_sep>mock_is_feature_enabled.return_value=<false><line_sep>tables=PrestoEngineSpec.get_table_names(mock.Mock() mock.Mock() <none>)<assert_stmt>tables<eq>table_names<block_end>@mock.patch("superset.utils.feature_flag_manager.FeatureFlagManager.is_feature_enabled")@mock.patch("superset.db_engine_specs.base.BaseEngineSpec.get_table_names")@mock.patch("superset.db_engine_specs.presto.PrestoEngineSpec.get_view_names")<def_stmt>test_get_table_names_split_views_from_tables self mock_get_view_names mock_get_table_names mock_is_feature_enabled<block_start>mock_get_view_names.return_value=["view1" "view2"]<line_sep>table_names=["table1" "table2" "view1" "view2"]<line_sep>mock_get_table_names.return_value=table_names<line_sep>mock_is_feature_enabled.return_value=<true><line_sep>tables=PrestoEngineSpec.get_table_names(mock.Mock() mock.Mock() <none>)<assert_stmt>sorted(tables)<eq>sorted(table_names)<block_end>@mock.patch("superset.utils.feature_flag_manager.FeatureFlagManager.is_feature_enabled")@mock.patch("superset.db_engine_specs.base.BaseEngineSpec.get_table_names")@mock.patch("superset.db_engine_specs.presto.PrestoEngineSpec.get_view_names")<def_stmt>test_get_table_names_split_views_from_tables_no_tables self mock_get_view_names mock_get_table_names mock_is_feature_enabled<block_start>mock_get_view_names.return_value=[]<line_sep>table_names=[]<line_sep>mock_get_table_names.return_value=table_names<line_sep>mock_is_feature_enabled.return_value=<true><line_sep>tables=PrestoEngineSpec.get_table_names(mock.Mock() mock.Mock() <none>)<assert_stmt>tables<eq>[]<block_end><def_stmt>test_get_full_name self<block_start>names=[("part1" "part2") ("part11" "part22") ]<line_sep>result=PrestoEngineSpec._get_full_name(names)<assert_stmt>result<eq>"part1.part11"<block_end><def_stmt>test_get_full_name_empty_tuple self<block_start>names=[("part1" "part2") ("" "part3") ("part4" "part5") ("" "part6") ]<line_sep>result=PrestoEngineSpec._get_full_name(names)<assert_stmt>result<eq>"part1.part4"<block_end><def_stmt>test_split_data_type self<block_start>data_type="value1 value2"<line_sep>result=PrestoEngineSpec._split_data_type(data_type " ")<assert_stmt>result<eq>["value1" "value2"]<line_sep>data_type="value1,value2"<line_sep>result=PrestoEngineSpec._split_data_type(data_type ",")<assert_stmt>result<eq>["value1" "value2"]<line_sep>data_type='"value,1",value2'<line_sep>result=PrestoEngineSpec._split_data_type(data_type ",")<assert_stmt>result<eq>['"value,1"' "value2"]<block_end><def_stmt>test_show_columns self<block_start>inspector=mock.MagicMock()<line_sep>inspector.engine.dialect.identifier_preparer.quote_identifier=(<lambda>x:f'"{x}"')<line_sep>mock_execute=mock.MagicMock(return_value=["a" "b"])<line_sep>inspector.bind.execute=mock_execute<line_sep>table_name="table_name"<line_sep>result=PrestoEngineSpec._show_columns(inspector table_name <none>)<assert_stmt>result<eq>["a" "b"]<line_sep>mock_execute.assert_called_once_with(f'SHOW COLUMNS FROM "{table_name}"')<block_end><def_stmt>test_show_columns_with_schema self<block_start>inspector=mock.MagicMock()<line_sep>inspector.engine.dialect.identifier_preparer.quote_identifier=(<lambda>x:f'"{x}"')<line_sep>mock_execute=mock.MagicMock(return_value=["a" "b"])<line_sep>inspector.bind.execute=mock_execute<line_sep>table_name="table_name"<line_sep>schema="schema"<line_sep>result=PrestoEngineSpec._show_columns(inspector table_name schema)<assert_stmt>result<eq>["a" "b"]<line_sep>mock_execute.assert_called_once_with(f'SHOW COLUMNS FROM "{schema}"."{table_name}"')<block_end><def_stmt>test_is_column_name_quoted self<block_start>column_name="mock"<assert_stmt>PrestoEngineSpec._is_column_name_quoted(column_name)<is><false><line_sep>column_name='"mock'<assert_stmt>PrestoEngineSpec._is_column_name_quoted(column_name)<is><false><line_sep>column_name='"moc"k'<assert_stmt>PrestoEngineSpec._is_column_name_quoted(column_name)<is><false><line_sep>column_name='"moc"k"'<assert_stmt>PrestoEngineSpec._is_column_name_quoted(column_name)<is><true><block_end>@mock.patch("superset.db_engine_specs.base.BaseEngineSpec.select_star")<def_stmt>test_select_star_no_presto_expand_data self mock_select_star<block_start>database=mock.Mock()<line_sep>table_name="table_name"<line_sep>engine=mock.Mock()<line_sep>cols=[{"col1":"val1"} {"col2":"val2"} ]<line_sep>PrestoEngineSpec.select_star(database table_name engine cols=cols)<line_sep>mock_select_star.assert_called_once_with(database table_name engine <none> 100 <false> <true> <true> cols)<block_end>@mock.patch("superset.db_engine_specs.presto.is_feature_enabled")@mock.patch("superset.db_engine_specs.base.BaseEngineSpec.select_star")<def_stmt>test_select_star_presto_expand_data self mock_select_star mock_is_feature_enabled<block_start>mock_is_feature_enabled.return_value=<true><line_sep>database=mock.Mock()<line_sep>table_name="table_name"<line_sep>engine=mock.Mock()<line_sep>cols=[{"name":"val1"} {"name":"val2<?!@#$312,/'][p098"} {"name":".val2"} {"name":"val2."} {"name":"val.2"} {"name":".val2."} ]<line_sep>PrestoEngineSpec.select_star(database table_name engine show_cols=<true> cols=cols)<line_sep>mock_select_star.assert_called_once_with(database table_name engine <none> 100 <true> <true> <true> [{"name":"val1"} {"name":"val2<?!@#$312,/'][p098"} ] )<block_end><def_stmt>test_estimate_statement_cost self<block_start>mock_cursor=mock.MagicMock()<line_sep>estimate_json={"a":"b"}<line_sep>mock_cursor.fetchone.return_value=['{"a": "b"}' ]<line_sep>result=PrestoEngineSpec.estimate_statement_cost("SELECT * FROM brth_names" mock_cursor)<assert_stmt>result<eq>estimate_json<block_end><def_stmt>test_estimate_statement_cost_invalid_syntax self<block_start>mock_cursor=mock.MagicMock()<line_sep>mock_cursor.execute.side_effect=Exception()<with_stmt>self.assertRaises(Exception)<block_start>PrestoEngineSpec.estimate_statement_cost("DROP TABLE brth_names" mock_cursor)<block_end><block_end><def_stmt>test_get_all_datasource_names self<block_start>df=pd.DataFrame.from_dict({"table_schema":["schema1" "schema2"] "table_name":["name1" "name2"]})<line_sep>database=mock.MagicMock()<line_sep>database.get_df.return_value=df<line_sep>result=PrestoEngineSpec.get_all_datasource_names(database "table")<line_sep>expected_result=[DatasourceName(schema="schema1" table="name1") DatasourceName(schema="schema2" table="name2") ]<assert_stmt>result<eq>expected_result<block_end><def_stmt>test_get_create_view self<block_start>mock_execute=mock.MagicMock()<line_sep>mock_fetchall=mock.MagicMock(return_value=[["a" "b," "c"] ["d" "e"]])<line_sep>database=mock.MagicMock()<line_sep>database.get_sqla_engine.return_value.raw_connection.return_value.cursor.return_value.execute=(mock_execute)<line_sep>database.get_sqla_engine.return_value.raw_connection.return_value.cursor.return_value.fetchall=(mock_fetchall)<line_sep>database.get_sqla_engine.return_value.raw_connection.return_value.cursor.return_value.poll.return_value=(<false>)<line_sep>schema="schema"<line_sep>table="table"<line_sep>result=PrestoEngineSpec.get_create_view(database schema=schema table=table)<assert_stmt>result<eq>"a"<line_sep>mock_execute.assert_called_once_with(f"SHOW CREATE VIEW {schema}.{table}")<block_end><def_stmt>test_get_create_view_exception self<block_start>mock_execute=mock.MagicMock(side_effect=Exception())<line_sep>database=mock.MagicMock()<line_sep>database.get_sqla_engine.return_value.raw_connection.return_value.cursor.return_value.execute=(mock_execute)<line_sep>schema="schema"<line_sep>table="table"<with_stmt>self.assertRaises(Exception)<block_start>PrestoEngineSpec.get_create_view(database schema=schema table=table)<block_end><block_end><def_stmt>test_get_create_view_database_error self<block_start><import_from_stmt>pyhive.exc DatabaseError<line_sep>mock_execute=mock.MagicMock(side_effect=DatabaseError())<line_sep>database=mock.MagicMock()<line_sep>database.get_sqla_engine.return_value.raw_connection.return_value.cursor.return_value.execute=(mock_execute)<line_sep>schema="schema"<line_sep>table="table"<line_sep>result=PrestoEngineSpec.get_create_view(database schema=schema table=table)<assert_stmt>result<is><none><block_end><def_stmt>test_extract_error_message_orig self<block_start>DatabaseError=namedtuple("DatabaseError" ["error_dict"])<line_sep>db_err=DatabaseError({"errorName":"name" "errorLocation":"location" "message":"msg"})<line_sep>exception=Exception()<line_sep>exception.orig=db_err<line_sep>result=PrestoEngineSpec._extract_error_message(exception)<assert_stmt>result<eq>"name at location: msg"<block_end><def_stmt>test_extract_error_message_db_errr self<block_start><import_from_stmt>pyhive.exc DatabaseError<line_sep>exception=DatabaseError({"message":"Err message"})<line_sep>result=PrestoEngineSpec._extract_error_message(exception)<assert_stmt>result<eq>"Err message"<block_end><def_stmt>test_extract_error_message_general_exception self<block_start>exception=Exception("Err message")<line_sep>result=PrestoEngineSpec._extract_error_message(exception)<assert_stmt>result<eq>"Err message"<block_end><def_stmt>test_extract_errors self<block_start>msg="Generic Error"<line_sep>result=PrestoEngineSpec.extract_errors(Exception(msg))<assert_stmt>result<eq>[SupersetError(message="Generic Error" error_type=SupersetErrorType.GENERIC_DB_ENGINE_ERROR level=ErrorLevel.ERROR extra={"engine_name":"Presto" "issue_codes":[{"code":1002 "message":"Issue 1002 - The database returned an unexpected error." }] } )]<line_sep>msg="line 1:8: Column 'bogus' cannot be resolved"<line_sep>result=PrestoEngineSpec.extract_errors(Exception(msg))<assert_stmt>result<eq>[SupersetError(message='We can\'t seem to resolve the column "bogus" at line 1:8.' error_type=SupersetErrorType.COLUMN_DOES_NOT_EXIST_ERROR level=ErrorLevel.ERROR extra={"engine_name":"Presto" "issue_codes":[{"code":1003 "message":"Issue 1003 - There is a syntax error in the SQL query. Perhaps there was a misspelling or a typo." } {"code":1004 "message":"Issue 1004 - The column was deleted or renamed in the database." } ] } )]<line_sep>msg="line 1:15: Table 'tpch.tiny.region2' does not exist"<line_sep>result=PrestoEngineSpec.extract_errors(Exception(msg))<assert_stmt>result<eq>[SupersetError(message="The table \"'tpch.tiny.region2'\" does not exist. A valid table must be used to run this query." error_type=SupersetErrorType.TABLE_DOES_NOT_EXIST_ERROR level=ErrorLevel.ERROR extra={"engine_name":"Presto" "issue_codes":[{"code":1003 "message":"Issue 1003 - There is a syntax error in the SQL query. Perhaps there was a misspelling or a typo." } {"code":1005 "message":"Issue 1005 - The table was deleted or renamed in the database." } ] } )]<line_sep>msg="line 1:15: Schema 'tin' does not exist"<line_sep>result=PrestoEngineSpec.extract_errors(Exception(msg))<assert_stmt>result<eq>[SupersetError(message='The schema "tin" does not exist. A valid schema must be used to run this query.' error_type=SupersetErrorType.SCHEMA_DOES_NOT_EXIST_ERROR level=ErrorLevel.ERROR extra={"engine_name":"Presto" "issue_codes":[{"code":1003 "message":"Issue 1003 - There is a syntax error in the SQL query. Perhaps there was a misspelling or a typo." } {"code":1016 "message":"Issue 1005 - The schema was deleted or renamed in the database." } ] } )]<line_sep>msg=b"Access Denied: Invalid credentials"<line_sep>result=PrestoEngineSpec.extract_errors(Exception(msg) {"username":"alice"})<assert_stmt>result<eq>[SupersetError(message='Either the username "alice" or the password is incorrect.' error_type=SupersetErrorType.CONNECTION_ACCESS_DENIED_ERROR level=ErrorLevel.ERROR extra={"engine_name":"Presto" "issue_codes":[{"code":1014 "message":"Issue 1014 - Either the username or the password is wrong." }] } )]<line_sep>msg="Failed to establish a new connection: [Errno 8] nodename nor servname provided, or not known"<line_sep>result=PrestoEngineSpec.extract_errors(Exception(msg) {"hostname":"badhost"})<assert_stmt>result<eq>[SupersetError(message='The hostname "badhost" cannot be resolved.' error_type=SupersetErrorType.CONNECTION_INVALID_HOSTNAME_ERROR level=ErrorLevel.ERROR extra={"engine_name":"Presto" "issue_codes":[{"code":1007 "message":"Issue 1007 - The hostname provided can't be resolved." }] } )]<line_sep>msg="Failed to establish a new connection: [Errno 60] Operation timed out"<line_sep>result=PrestoEngineSpec.extract_errors(Exception(msg) {"hostname":"badhost" "port":12345})<assert_stmt>result<eq>[SupersetError(message='The host "badhost" might be down, and can\'t be reached on port 12345.' error_type=SupersetErrorType.CONNECTION_HOST_DOWN_ERROR level=ErrorLevel.ERROR extra={"engine_name":"Presto" "issue_codes":[{"code":1009 "message":"Issue 1009 - The host might be down, and can't be reached on the provided port." }] } )]<line_sep>msg="Failed to establish a new connection: [Errno 61] Connection refused"<line_sep>result=PrestoEngineSpec.extract_errors(Exception(msg) {"hostname":"badhost" "port":12345})<assert_stmt>result<eq>[SupersetError(message='Port 12345 on hostname "badhost" refused the connection.' error_type=SupersetErrorType.CONNECTION_PORT_CLOSED_ERROR level=ErrorLevel.ERROR extra={"engine_name":"Presto" "issue_codes":[{"code":1008 "message":"Issue 1008 - The port is closed."}] } )]<line_sep>msg="line 1:15: Catalog 'wrong' does not exist"<line_sep>result=PrestoEngineSpec.extract_errors(Exception(msg))<assert_stmt>result<eq>[SupersetError(message='Unable to connect to catalog named "wrong".' error_type=SupersetErrorType.CONNECTION_UNKNOWN_DATABASE_ERROR level=ErrorLevel.ERROR extra={"engine_name":"Presto" "issue_codes":[{"code":1015 "message":"Issue 1015 - Either the database is spelled incorrectly or does not exist." }] } )]<block_end><block_end><def_stmt>test_is_readonly <block_start><def_stmt>is_readonly sql:str<arrow>bool<block_start><return>PrestoEngineSpec.is_readonly_query(ParsedQuery(sql))<block_end><assert_stmt><not>is_readonly("SET hivevar:desc='Legislators'")<assert_stmt><not>is_readonly("UPDATE t1 SET col1 = NULL")<assert_stmt><not>is_readonly("INSERT OVERWRITE TABLE tabB SELECT a.Age FROM TableA")<assert_stmt>is_readonly("SHOW LOCKS test EXTENDED")<assert_stmt>is_readonly("EXPLAIN SELECT 1")<assert_stmt>is_readonly("SELECT 1")<assert_stmt>is_readonly("WITH (SELECT 1) bla SELECT * from bla")<block_end> |
<import_stmt>sys<def_stmt>test_vcr_import_deprecation recwarn<block_start><if_stmt>"vcr"<in>sys.modules# Remove imported module entry if already loaded in another test
<block_start><del_stmt>sys.modules["vcr"]<block_end><import_stmt>vcr# noqa: F401
<if_stmt>sys.version_info[0]<eq>2<block_start><assert_stmt>len(recwarn)<eq>1<assert_stmt>issubclass(recwarn[0].category DeprecationWarning)<block_end><else_stmt><block_start><assert_stmt>len(recwarn)<eq>0<block_end><block_end> |
"""The temper component."""<line_sep> |
"""
Small Theano LSTM recurrent network module.
@author: <NAME>
@date: December 10th 2014
Implements most of the great things that came out
in 2014 concerning recurrent neural networks, and
some good optimizers for these types of networks.
Note (from 5 January 2015): Dropout api is a bit sophisticated due to the way
random number generators are dealt with in Theano's scan.
"""<import_stmt>theano theano.tensor<as>T<import_stmt>numpy<as>np<import_from_stmt>collections OrderedDict<line_sep>srng=theano.tensor.shared_randomstreams.RandomStreams(1234)<line_sep>np_rng=np.random.RandomState(1234)<import_from_stmt>.masked_loss masked_loss masked_loss_dx<import_from_stmt>.shared_memory wrap_params borrow_memory borrow_all_memories<class_stmt>GradClip(theano.compile.ViewOp)<block_start>"""
Here we clip the gradients as <NAME> does in his
recurrent neural networks. In particular this prevents
explosion of gradients during backpropagation.
The original poster of this code was <NAME>,
[here](https://groups.google.com/forum/#!topic/theano-dev/GaJwGw6emK0).
"""<def_stmt>__init__ self clip_lower_bound clip_upper_bound<block_start>self.clip_lower_bound=clip_lower_bound<line_sep>self.clip_upper_bound=clip_upper_bound<assert_stmt>(self.clip_upper_bound<ge>self.clip_lower_bound)<block_end><def_stmt>grad self args g_outs<block_start><return>[T.clip(g_out self.clip_lower_bound self.clip_upper_bound)<for>g_out g_outs]<block_end><block_end><def_stmt>clip_gradient x bound<block_start>grad_clip=GradClip(-bound bound)<try_stmt><block_start>T.opt.register_canonicalize(theano.gof.OpRemove(grad_clip) name='grad_clip_%.1f'%(bound))<block_end><except_stmt>ValueError<block_start><pass><block_end><return>grad_clip(x)<block_end><def_stmt>create_shared out_size in_size=<none> name=<none><block_start>"""
Creates a shared matrix or vector
using the given in_size and out_size.
Inputs
------
out_size int : outer dimension of the
vector or matrix
in_size int (optional) : for a matrix, the inner
dimension.
Outputs
-------
theano shared : the shared matrix, with random numbers in it
"""<if_stmt>in_size<is><none><block_start><return>theano.shared(random_initialization((out_size )) name=name)<block_end><else_stmt><block_start><return>theano.shared(random_initialization((out_size in_size)) name=name)<block_end><block_end><def_stmt>random_initialization size<block_start><return>(np_rng.standard_normal(size)<times>1./size[0]).astype(theano.config.floatX)<block_end><def_stmt>Dropout shape prob<block_start>"""
Return a dropout mask on x.
The probability of a value in x going to zero is prob.
Inputs
------
x theano variable : the variable to add noise to
prob float, variable : probability of dropping an element.
size tuple(int, int) : size of the dropout mask.
Outputs
-------
y theano variable : x with the noise multiplied.
"""<line_sep>mask=srng.binomial(n=1 p=1-prob size=shape)<line_sep><return>T.cast(mask theano.config.floatX)<block_end><def_stmt>MultiDropout shapes dropout=0.<block_start>"""
Return all the masks needed for dropout outside of a scan loop.
"""<line_sep><return>[Dropout(shape dropout)<for>shape shapes]<block_end><class_stmt>Layer(object)<block_start>"""
Base object for neural network layers.
A layer has an input set of neurons, and
a hidden activation. The activation, f, is a
function applied to the affine transformation
of x by the connection matrix W, and the bias
vector b.
> y = f ( W * x + b )
"""<def_stmt>__init__ self input_size hidden_size activation clip_gradients=<false><block_start>self.input_size=input_size<line_sep>self.hidden_size=hidden_size<line_sep>self.activation=activation<line_sep>self.clip_gradients=clip_gradients<line_sep>self.is_recursive=<false><line_sep>self.create_variables()<block_end><def_stmt>create_variables self<block_start>"""
Create the connection matrix and the bias vector
"""<line_sep>self.linear_matrix=create_shared(self.hidden_size self.input_size name="Layer.linear_matrix")<line_sep>self.bias_matrix=create_shared(self.hidden_size name="Layer.bias_matrix")<block_end><def_stmt>activate self x<block_start>"""
The hidden activation of the network
"""<if_stmt>self.clip_gradients<is><not><false><block_start>x=clip_gradient(x self.clip_gradients)<block_end><if_stmt>x.ndim<g>1<block_start><return>self.activation(T.dot(self.linear_matrix x.T)+self.bias_matrix[: <none>]).T<block_end><else_stmt><block_start><return>self.activation(T.dot(self.linear_matrix x)+self.bias_matrix)<block_end><block_end>@property<def_stmt>params self<block_start><return>[self.linear_matrix self.bias_matrix]<block_end>@params.setter<def_stmt>params self param_list<block_start>self.linear_matrix.set_value(param_list[0].get_value())<line_sep>self.bias_matrix.set_value(param_list[1].get_value())<block_end><block_end><class_stmt>Embedding(Layer)<block_start>"""
A Matrix useful for storing word vectors or other distributed
representations.
use #activate(T.iscalar()) or #activate(T.ivector()) to embed
a symbol.
"""<def_stmt>__init__ self vocabulary_size hidden_size<block_start>"""
Vocabulary size is the number of different symbols to store,
and hidden_size is the size of their embedding.
"""<line_sep>self.vocabulary_size=vocabulary_size<line_sep>self.hidden_size=hidden_size<line_sep>self.create_variables()<line_sep>self.is_recursive=<false><block_end><def_stmt>create_variables self<block_start>self.embedding_matrix=create_shared(self.vocabulary_size self.hidden_size name='Embedding.embedding_matrix')<block_end><def_stmt>activate self x<block_start>"""
Inputs
------
x T.ivector() or T.iscalar() : indices to embed
Output
------
embedding : self.embedding_matrix[x]
"""<line_sep><return>self.embedding_matrix[x]<block_end>@property<def_stmt>params self<block_start><return>[self.embedding_matrix]<block_end>@params.setter<def_stmt>params self param_list<block_start>self.embedding_matrix.set_value(param_list[0].get_value())<block_end><block_end><class_stmt>RNN(Layer)<block_start>"""
Special recurrent layer than takes as input
a hidden activation, h, from the past and
an observation x.
> y = f ( W * [x, h] + b )
Note: x and h are concatenated in the activation.
"""<def_stmt>__init__ self *args **kwargs<block_start>super(RNN self).__init__(*args **kwargs)<line_sep>self.is_recursive=<true><block_end><def_stmt>create_variables self<block_start>"""
Create the connection matrix and the bias vector,
and the base hidden activation.
"""<line_sep>self.linear_matrix=create_shared(self.hidden_size self.input_size+self.hidden_size name="RNN.linear_matrix")<line_sep>self.bias_matrix=create_shared(self.hidden_size name="RNN.bias_matrix")<line_sep>self.initial_hidden_state=create_shared(self.hidden_size name="RNN.initial_hidden_state")<block_end><def_stmt>activate self x h<block_start>"""
The hidden activation of the network
"""<if_stmt>self.clip_gradients<is><not><false><block_start>x=clip_gradient(x self.clip_gradients)<line_sep>h=clip_gradient(h self.clip_gradients)<block_end><if_stmt>x.ndim<g>1<block_start><return>self.activation(T.dot(self.linear_matrix T.concatenate([x h] axis=1).T)+self.bias_matrix[: <none>]).T<block_end><else_stmt><block_start><return>self.activation(T.dot(self.linear_matrix T.concatenate([x h]))+self.bias_matrix)<block_end><block_end>@property<def_stmt>params self<block_start><return>[self.linear_matrix self.bias_matrix]<block_end>@params.setter<def_stmt>params self param_list<block_start>self.linear_matrix.set_value(param_list[0].get_value())<line_sep>self.bias_matrix.set_value(param_list[1].get_value())<block_end><block_end><class_stmt>GRU(RNN)<block_start><def_stmt>create_variables self<block_start>self.reset_layer=theano_lstm.RNN(self.input_size self.hidden_size activation=T.nnet.sigmoid)<line_sep>self.memory_interpolation_layer=theano_lstm.RNN(self.input_size self.hidden_size activation=T.nnet.sigmoid)<line_sep>self.memory_to_memory_layer=theano_lstm.RNN(self.input_size self.hidden_size activation=T.tanh)<line_sep>self.internal_layers=[self.reset_layer self.memory_interpolation_layer self.memory_to_memory_layer]<block_end>@property<def_stmt>params self<block_start><return>[param<for>layer self.internal_layers<for>param layer.params]<block_end>@params.setter<def_stmt>params self param_list<block_start><assert_stmt>(len(param_list)<eq>6)<line_sep>self.reset_layer.params=param_list[0:2]<line_sep>self.memory_interpolation_layer.params=param_list[2:4]<line_sep>self.memory_to_memory_layer.params=param_list[4:6]<block_end><def_stmt>activate self x h<block_start>reset_gate=self.reset_layer.activate(x h)<line_sep># the new state dampened by resetting
reset_h=reset_gate<times>h<line_sep># the new hidden state:
candidate_h=self.memory_to_memory_layer.activate(x reset_h)<line_sep># how much to update the new hidden state:
update_gate=self.memory_interpolation_layer.activate(x h)<line_sep># the new state interploated between candidate and old:
new_h=(h<times>(1.0-update_gate)+candidate_h<times>update_gate)<line_sep><return>new_h<block_end><block_end><class_stmt>LSTM(RNN)<block_start>"""
The structure of the LSTM allows it to learn on problems with
long term dependencies relatively easily. The "long term"
memory is stored in a vector of memory cells c.
Although many LSTM architectures differ in their connectivity
structure and activation functions, all LSTM architectures have
memory cells that are suitable for storing information for long
periods of time. Here we implement the LSTM from Graves et al.
(2013).
"""<def_stmt>create_variables self<block_start>"""
Create the different LSTM gates and
their variables, along with the initial
hidden state for the memory cells and
the initial hidden activation.
"""<line_sep># input gate for cells
self.in_gate=Layer(self.input_size+self.hidden_size self.hidden_size T.nnet.sigmoid self.clip_gradients)<line_sep># forget gate for cells
self.forget_gate=Layer(self.input_size+self.hidden_size self.hidden_size T.nnet.sigmoid self.clip_gradients)<line_sep># input modulation for cells
self.in_gate2=Layer(self.input_size+self.hidden_size self.hidden_size self.activation self.clip_gradients)<line_sep># output modulation
self.out_gate=Layer(self.input_size+self.hidden_size self.hidden_size T.nnet.sigmoid self.clip_gradients)<line_sep># keep these layers organized
self.internal_layers=[self.in_gate self.forget_gate self.in_gate2 self.out_gate]<line_sep># store the memory cells in first n spots, and store the current
# output in the next n spots:
self.initial_hidden_state=create_shared(self.hidden_size<times>2 name="LSTM.initial_hidden_state")<block_end>@property<def_stmt>params self<block_start>"""
Parameters given by the 4 gates and the
initial hidden activation of this LSTM cell
layer.
"""<line_sep><return>[param<for>layer self.internal_layers<for>param layer.params]<block_end>@params.setter<def_stmt>params self param_list<block_start>start=0<for_stmt>layer self.internal_layers<block_start>end=start+len(layer.params)<line_sep>layer.params=param_list[start:end]<line_sep>start=end<block_end><block_end><def_stmt>postprocess_activation self x *args<block_start><if_stmt>x.ndim<g>1<block_start><return>x[: self.hidden_size:]<block_end><else_stmt><block_start><return>x[self.hidden_size:]<block_end><block_end><def_stmt>activate self x h<block_start>"""
The hidden activation, h, of the network, along
with the new values for the memory cells, c,
Both are concatenated as follows:
> y = f( x, past )
Or more visibly, with past = [prev_c, prev_h]
> [c, h] = f( x, [prev_c, prev_h] )
"""<if_stmt>h.ndim<g>1#previous memory cell values
<block_start>prev_c=h[: :self.hidden_size]<line_sep>#previous activations of the hidden layer
prev_h=h[: self.hidden_size:]<block_end><else_stmt>#previous memory cell values
<block_start>prev_c=h[:self.hidden_size]<line_sep>#previous activations of the hidden layer
prev_h=h[self.hidden_size:]<block_end># input and previous hidden constitute the actual
# input to the LSTM:
<if_stmt>h.ndim<g>1<block_start>obs=T.concatenate([x prev_h] axis=1)<block_end><else_stmt><block_start>obs=T.concatenate([x prev_h])<block_end># TODO could we combine these 4 linear transformations for efficiency? (e.g., http://arxiv.org/pdf/1410.4615.pdf, page 5)
# how much to add to the memory cells
in_gate=self.in_gate.activate(obs)<line_sep># how much to forget the current contents of the memory
forget_gate=self.forget_gate.activate(obs)<line_sep># modulate the input for the memory cells
in_gate2=self.in_gate2.activate(obs)<line_sep># new memory cells
next_c=forget_gate<times>prev_c+in_gate2<times>in_gate<line_sep># modulate the memory cells to create the new output
out_gate=self.out_gate.activate(obs)<line_sep># new hidden output
next_h=out_gate<times>T.tanh(next_c)<if_stmt>h.ndim<g>1<block_start><return>T.concatenate([next_c next_h] axis=1)<block_end><else_stmt><block_start><return>T.concatenate([next_c next_h])<block_end><block_end><block_end><class_stmt>GatedInput(RNN)<block_start><def_stmt>create_variables self# input gate for cells
<block_start>self.in_gate=Layer(self.input_size+self.hidden_size 1 T.nnet.sigmoid self.clip_gradients)<line_sep>self.internal_layers=[self.in_gate]<block_end>@property<def_stmt>params self<block_start>"""
Parameters given by the 4 gates and the
initial hidden activation of this LSTM cell
layer.
"""<line_sep><return>[param<for>layer self.internal_layers<for>param layer.params]<block_end>@params.setter<def_stmt>params self param_list<block_start>start=0<for_stmt>layer self.internal_layers<block_start>end=start+len(layer.params)<line_sep>layer.params=param_list[start:end]<line_sep>start=end<block_end><block_end><def_stmt>activate self x h# input and previous hidden constitute the actual
# input to the LSTM:
<block_start><if_stmt>h.ndim<g>1<block_start>obs=T.concatenate([x h] axis=1)<block_end><else_stmt><block_start>obs=T.concatenate([x h])<block_end>gate=self.in_gate.activate(obs)<if_stmt>h.ndim<g>1<block_start>gate=gate[: 0][: <none>]<block_end><else_stmt><block_start>gate=gate[0]<block_end><return>gate<block_end><def_stmt>postprocess_activation self gate x h<block_start><return>gate<times>x<block_end><block_end><def_stmt>apply_dropout x mask<block_start><if_stmt>mask<is><not><none><block_start><return>mask<times>x<block_end><else_stmt><block_start><return>x<block_end><block_end><class_stmt>StackedCells(object)<block_start>"""
Sequentially connect several recurrent layers.
celltypes can be RNN or LSTM.
"""<def_stmt>__init__ self input_size celltype=RNN layers=<none> activation=<lambda>x:x clip_gradients=<false><block_start><if_stmt>layers<is><none><block_start>layers=[]<block_end>self.input_size=input_size<line_sep>self.clip_gradients=clip_gradients<line_sep>self.create_layers(layers activation celltype)<block_end><def_stmt>create_layers self layer_sizes activation_type celltype<block_start>self.layers=[]<line_sep>prev_size=self.input_size<for_stmt>k,layer_size enumerate(layer_sizes)<block_start>layer=celltype(prev_size layer_size activation_type clip_gradients=self.clip_gradients)<line_sep>self.layers.append(layer)<line_sep>prev_size=layer_size<block_end><block_end>@property<def_stmt>params self<block_start><return>[param<for>layer self.layers<for>param layer.params]<block_end>@params.setter<def_stmt>params self param_list<block_start>start=0<for_stmt>layer self.layers<block_start>end=start+len(layer.params)<line_sep>layer.params=param_list[start:end]<line_sep>start=end<block_end><block_end><def_stmt>forward self x prev_hiddens=<none> dropout=<none><block_start>"""
Return new hidden activations for all stacked RNNs
"""<if_stmt>dropout<is><none><block_start>dropout=[]<block_end><if_stmt>prev_hiddens<is><none><block_start>prev_hiddens=[(T.repeat(T.shape_padleft(layer.initial_hidden_state) x.shape[0] axis=0)<if>x.ndim<g>1<else>layer.initial_hidden_state)<if>hasattr(layer 'initial_hidden_state')<else><none><for>layer self.layers]<block_end>out=[]<line_sep>layer_input=x<for_stmt>k,layer enumerate(self.layers)<block_start>level_out=layer_input<if_stmt>len(dropout)<g>0<block_start>level_out=apply_dropout(layer_input dropout[k])<block_end><if_stmt>layer.is_recursive<block_start>level_out=layer.activate(level_out prev_hiddens[k])<block_end><else_stmt><block_start>level_out=layer.activate(level_out)<block_end>out.append(level_out)<line_sep># deliberate choice to change the upward structure here
# in an RNN, there is only one kind of hidden values
<if_stmt>hasattr(layer 'postprocess_activation')# in this case the hidden activation has memory cells
# that are not shared upwards
# along with hidden activations that can be sent
# updwards
<block_start><if_stmt>layer.is_recursive<block_start>level_out=layer.postprocess_activation(level_out layer_input prev_hiddens[k])<block_end><else_stmt><block_start>level_out=layer.postprocess_activation(level_out layer_input)<block_end><block_end>layer_input=level_out<block_end><return>out<block_end><block_end><def_stmt>create_optimization_updates cost params updates=<none> max_norm=5.0 lr=0.01 eps=1e-6 rho=0.95 method="adadelta" gradients=<none><block_start>"""
Get the updates for a gradient descent optimizer using
SGD, AdaDelta, or AdaGrad.
Returns the shared variables for the gradient caches,
and the updates dictionary for compilation by a
theano function.
Inputs
------
cost theano variable : what to minimize
params list : list of theano variables
with respect to which
the gradient is taken.
max_norm float : cap on excess gradients
lr float : base learning rate for
adagrad and SGD
eps float : numerical stability value
to not divide by zero
sometimes
rho float : adadelta hyperparameter.
method str : 'adagrad', 'adadelta', or 'sgd'.
Outputs:
--------
updates OrderedDict : the updates to pass to a
theano function
gsums list : gradient caches for Adagrad
and Adadelta
xsums list : gradient caches for AdaDelta only
lr theano shared : learning rate
max_norm theano_shared : normalizing clipping value for
excessive gradients (exploding).
"""<line_sep>lr=theano.shared(np.float64(lr).astype(theano.config.floatX))<line_sep>eps=np.float64(eps).astype(theano.config.floatX)<line_sep>rho=theano.shared(np.float64(rho).astype(theano.config.floatX))<if_stmt>max_norm<is><not><none><and>max_norm<is><not><false><block_start>max_norm=theano.shared(np.float64(max_norm).astype(theano.config.floatX))<block_end>gsums=[theano.shared(np.zeros_like(param.get_value(borrow=<true>)))<if>(method<eq>'adadelta'<or>method<eq>'adagrad')<else><none><for>param params]<line_sep>xsums=[theano.shared(np.zeros_like(param.get_value(borrow=<true>)))<if>method<eq>'adadelta'<else><none><for>param params]<line_sep>gparams=T.grad(cost params)<if>gradients<is><none><else>gradients<if_stmt>updates<is><none><block_start>updates=OrderedDict()<block_end><for_stmt>gparam,param,gsum,xsum zip(gparams params gsums xsums)# clip gradients if they get too big
<block_start><if_stmt>max_norm<is><not><none><and>max_norm<is><not><false><block_start>grad_norm=gparam.norm(L=2)<line_sep>gparam=(T.minimum(max_norm grad_norm)/(grad_norm+eps))<times>gparam<block_end><if_stmt>method<eq>'adadelta'<block_start>updates[gsum]=T.cast(rho<times>gsum+(1.-rho)<times>(gparam<power>2) theano.config.floatX)<line_sep>dparam=-T.sqrt((xsum+eps)/(updates[gsum]+eps))<times>gparam<line_sep>updates[xsum]=T.cast(rho<times>xsum+(1.-rho)<times>(dparam<power>2) theano.config.floatX)<line_sep>updates[param]=T.cast(param+dparam theano.config.floatX)<block_end><elif_stmt>method<eq>'adagrad'<block_start>updates[gsum]=T.cast(gsum+(gparam<power>2) theano.config.floatX)<line_sep>updates[param]=T.cast(param-lr<times>(gparam/(T.sqrt(updates[gsum]+eps))) theano.config.floatX)<block_end><else_stmt><block_start>updates[param]=param-gparam<times>lr<block_end><block_end><if_stmt>method<eq>'adadelta'<block_start>lr=rho<block_end><return>updates gsums xsums lr max_norm<block_end>__all__=["create_optimization_updates" "masked_loss" "masked_loss_dx" "clip_gradient" "create_shared" "Dropout" "apply_dropout" "StackedCells" "Layer" "LSTM" "RNN" "GatedInput" "Embedding" "MultiDropout" "wrap_params" "borrow_memory" "borrow_all_memories"]<line_sep> |
"""
The GeoDjango GEOS module. Please consult the GeoDjango documentation
for more details:
http://geodjango.org/docs/geos.html
"""<import_from_stmt>django.contrib.gis.geos.geometry GEOSGeometry wkt_regex hex_regex<import_from_stmt>django.contrib.gis.geos.point Point<import_from_stmt>django.contrib.gis.geos.linestring LineString LinearRing<import_from_stmt>django.contrib.gis.geos.polygon Polygon<import_from_stmt>django.contrib.gis.geos.collections GeometryCollection MultiPoint MultiLineString MultiPolygon<import_from_stmt>django.contrib.gis.geos.error GEOSException GEOSIndexError<import_from_stmt>django.contrib.gis.geos.io WKTReader WKTWriter WKBReader WKBWriter<import_from_stmt>django.contrib.gis.geos.factory fromfile fromstr<import_from_stmt>django.contrib.gis.geos.libgeos geos_version geos_version_info GEOS_PREPARE<line_sep> |
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""<import_stmt>torch<import_stmt>torch.nn<as>nn<import_from_stmt>model.blocks.shared_blocks make_activation<def_stmt>conv_bn inp oup stride activation=nn.ReLU<block_start>conv=nn.Sequential(nn.Conv2d(inp oup 3 stride 1 bias=<false>) nn.BatchNorm2d(oup) make_activation(activation))<line_sep>nn.init.kaiming_normal_(conv[0].weight mode='fan_out')<line_sep><return>conv<block_end><def_stmt>conv_1x1_bn inp oup activation=nn.ReLU<block_start>conv=nn.Sequential(nn.Conv2d(inp oup 1 1 0 bias=<false>) nn.BatchNorm2d(oup) make_activation(activation))<line_sep>nn.init.kaiming_normal_(conv[0].weight mode='fan_out')<line_sep><return>conv<block_end><def_stmt>channel_shuffle x groups<block_start>batchsize,num_channels,height,width=x.data.size()<line_sep>channels_per_group=num_channels<floordiv>groups<line_sep># reshape
x=x.view(batchsize groups channels_per_group height width)<line_sep>x=torch.transpose(x 1 2).contiguous()<line_sep># flatten
x=x.view(batchsize -1 height width)<line_sep><return>x<block_end><class_stmt>ShuffleInvertedResidual(nn.Module)<block_start><def_stmt>__init__ self inp oup stride benchmodel activation=nn.ReLU<block_start>super(ShuffleInvertedResidual self).__init__()<line_sep>self.benchmodel=benchmodel<line_sep>self.stride=stride<assert_stmt>stride<in>[1 2]<line_sep>oup_inc=oup<floordiv>2<if_stmt>self.benchmodel<eq>1# assert inp == oup_inc
<block_start>self.branch2=nn.Sequential(# pw
nn.Conv2d(oup_inc oup_inc 1 1 0 bias=<false>) nn.BatchNorm2d(oup_inc) make_activation(activation) # dw
nn.Conv2d(oup_inc oup_inc 3 stride 1 groups=oup_inc bias=<false>) nn.BatchNorm2d(oup_inc) # pw-linear
nn.Conv2d(oup_inc oup_inc 1 1 0 bias=<false>) nn.BatchNorm2d(oup_inc) make_activation(activation) )<block_end><else_stmt><block_start>self.branch1=nn.Sequential(# dw
nn.Conv2d(inp inp 3 stride 1 groups=inp bias=<false>) nn.BatchNorm2d(inp) # pw-linear
nn.Conv2d(inp oup_inc 1 1 0 bias=<false>) nn.BatchNorm2d(oup_inc) make_activation(activation) )<line_sep>self.branch2=nn.Sequential(# pw
nn.Conv2d(inp oup_inc 1 1 0 bias=<false>) nn.BatchNorm2d(oup_inc) make_activation(activation) # dw
nn.Conv2d(oup_inc oup_inc 3 stride 1 groups=oup_inc bias=<false>) nn.BatchNorm2d(oup_inc) # pw-linear
nn.Conv2d(oup_inc oup_inc 1 1 0 bias=<false>) nn.BatchNorm2d(oup_inc) make_activation(activation) )<block_end>self.init_weights()<block_end>@staticmethod<def_stmt>_concat x out# concatenate along channel axis
<block_start><return>torch.cat((x out) 1)<block_end><def_stmt>init_weights self<block_start><for_stmt>m self.children()<block_start><if_stmt>isinstance(m nn.Conv2d)<block_start>nn.init.kaiming_normal_(m.weight mode='fan_out')<block_end><elif_stmt>isinstance(m nn.BatchNorm2d)<block_start>nn.init.constant_(m.weight 1)<line_sep>nn.init.constant_(m.bias 0)<block_end><block_end><block_end><def_stmt>forward self x<block_start><if_stmt>self.benchmodel<eq>1<block_start>x1=x[: :(x.shape[1]<floordiv>2) : :]<line_sep>x2=x[: (x.shape[1]<floordiv>2): : :]<line_sep>out=self._concat(x1 self.branch2(x2))<block_end><elif_stmt>self.benchmodel<eq>2<block_start>out=self._concat(self.branch1(x) self.branch2(x))<block_end><return>channel_shuffle(out 2)<block_end><block_end> |
# coding=utf8
# This code is adapted from the https://github.com/tensorflow/models/tree/master/official/r1/resnet.
# ==========================================================================================
# NAVER’s modifications are Copyright 2020 NAVER corp. All rights reserved.
# ==========================================================================================
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>functools<import_stmt>os<import_stmt>tensorflow<as>tf<import_from_stmt>official.utils.export export<import_from_stmt>utils data_util<import_from_stmt>functions data_config<import_stmt>numpy<as>np<import_from_stmt>tqdm tqdm<def_stmt>export_test bin_export_path flags_obj ir_eval<block_start>ds=tf.data.Dataset.list_files(flags_obj.data_dir+'/'+flags_obj.val_regex)<line_sep>ds=ds.interleave(tf.data.TFRecordDataset cycle_length=10)<def_stmt>parse_tfr example_proto<block_start>feature_def={'image/class/label':tf.FixedLenFeature([] dtype=tf.int64 default_value=-1) 'image/encoded':tf.FixedLenFeature([] dtype=tf.string default_value='')}<line_sep>features=tf.io.parse_single_example(serialized=example_proto features=feature_def)<line_sep><return>features['image/encoded'] features['image/class/label']<block_end>ds=ds.map(parse_tfr)<line_sep>ds=ds.batch(flags_obj.val_batch_size)<line_sep>iterator=ds.make_one_shot_iterator()<line_sep>images,labels=iterator.get_next()<line_sep>dconf=data_config.get_config(flags_obj.dataset_name)<line_sep>num_val_images=dconf.num_images['validation']<if_stmt>flags_obj.zeroshot_eval<or>ir_eval<block_start>feature_dim=flags_obj.embedding_size<if>flags_obj.embedding_size<g>0<else>flags_obj.num_features<line_sep>np_features=np.zeros((num_val_images feature_dim) dtype=np.float32)<line_sep>np_labels=np.zeros(num_val_images dtype=np.int64)<line_sep>np_i=0<with_stmt>tf.Session()<as>sess<block_start>sess.run(tf.global_variables_initializer())<line_sep>sess.run(tf.local_variables_initializer())<line_sep>tf.saved_model.load(sess=sess export_dir=bin_export_path tags={"serve"})<for_stmt>_ tqdm(range(int(num_val_images/flags_obj.val_batch_size)+1))<block_start><try_stmt><block_start>np_image,np_label=sess.run([images labels])<line_sep>np_predict=sess.run('embedding_tensor:0' feed_dict={'input_tensor:0':np_image})<line_sep>np_features[np_i:np_i+np_predict.shape[0] :]=np_predict<line_sep>np_labels[np_i:np_i+np_label.shape[0]]=np_label<line_sep>np_i<augadd>np_predict.shape[0]<block_end><except_stmt>tf.errors.OutOfRangeError<block_start><break><block_end><block_end><assert_stmt>np_i<eq>num_val_images<block_end><import_from_stmt>sklearn.preprocessing normalize<line_sep>x=normalize(np_features)<line_sep>np_sim=x.dot(x.T)<line_sep>np.fill_diagonal(np_sim -10)# removing similarity for query.
num_correct=0<for_stmt>i range(num_val_images)<block_start>cur_label=np_labels[i]<line_sep>rank1_label=np_labels[np.argmax(np_sim[i :])]<if_stmt>rank1_label<eq>cur_label<block_start>num_correct<augadd>1<block_end><block_end>recall_at_1=num_correct/num_val_images<line_sep>metric=recall_at_1<block_end><else_stmt><block_start>np_i=0<line_sep>correct_cnt=0<with_stmt>tf.Session()<as>sess<block_start>sess.run(tf.global_variables_initializer())<line_sep>sess.run(tf.local_variables_initializer())<line_sep>tf.saved_model.load(sess=sess export_dir=bin_export_path tags={"serve"})<for_stmt>_ tqdm(range(int(num_val_images/flags_obj.val_batch_size)+1))<block_start><try_stmt><block_start>np_image,np_label=sess.run([images labels])<line_sep>np_predict=sess.run('ArgMax:0' feed_dict={'input_tensor:0':np_image})<line_sep>np_i<augadd>np_predict.shape[0]<line_sep>correct_cnt<augadd>np.sum(np_predict<eq>np_label)<block_end><except_stmt>tf.errors.OutOfRangeError<block_start><break><block_end><block_end><assert_stmt>np_i<eq>num_val_images<line_sep>metric=correct_cnt/np_i<block_end><block_end><return>metric<block_end><def_stmt>image_bytes_serving_input_fn image_shape decoder_name dtype=tf.float32 pptype='imagenet'<block_start>"""Serving input fn for raw jpeg images."""<def_stmt>_preprocess_image image_bytes<block_start>"""Preprocess a single raw image."""<line_sep># Bounding box around the whole image.
bbox=tf.constant([0.0 0.0 1.0 1.0] dtype=dtype shape=[1 1 4])<line_sep>_,_,num_channels=image_shape<line_sep>tf.logging.info("!!!!!!!!!! Preprocessing type for exporting pb: {} and decoder type: {}".format(pptype decoder_name))<line_sep>image=data_util.preprocess_image(image_buffer=image_bytes is_training=<false> bbox=bbox num_channels=num_channels dtype=dtype use_random_crop=<false> decoder_name=decoder_name dct_method='INTEGER_ACCURATE' preprocessing_type=pptype)<line_sep><return>image<block_end>image_bytes_list=tf.placeholder(shape=[<none>] dtype=tf.string name='input_tensor')<line_sep>images=tf.map_fn(_preprocess_image image_bytes_list back_prop=<false> dtype=dtype)<line_sep><return>tf.estimator.export.TensorServingInputReceiver(images {'image_bytes':image_bytes_list})<block_end><def_stmt>export_pb flags_core flags_obj shape classifier ir_eval=<false><block_start>export_dtype=flags_core.get_tf_dtype(flags_obj)<if_stmt><not>flags_obj.data_format<block_start><raise>ValueError('The `data_format` must be specified: channels_first or channels_last ')<block_end>bin_export_path=os.path.join(flags_obj.export_dir flags_obj.data_format 'binary_input')<line_sep>bin_input_receiver_fn=functools.partial(image_bytes_serving_input_fn shape flags_obj.export_decoder_type dtype=export_dtype pptype=flags_obj.preprocessing_type)<line_sep>pp_export_path=os.path.join(flags_obj.export_dir flags_obj.data_format 'preprocessed_input')<line_sep>pp_input_receiver_fn=export.build_tensor_serving_input_receiver_fn(shape batch_size=<none> dtype=export_dtype)<line_sep>result_bin_export_path=classifier.export_savedmodel(bin_export_path bin_input_receiver_fn)<line_sep>classifier.export_savedmodel(pp_export_path pp_input_receiver_fn)<if_stmt>flags_obj.export_decoder_type<eq>'jpeg'<block_start>metric=export_test(result_bin_export_path flags_obj ir_eval)<line_sep>msg='IMPOTANT! Evaluation metric of exported saved_model.pb is {}'.format(metric)<line_sep>tf.logging.info(msg)<with_stmt>tf.gfile.Open(result_bin_export_path.decode("utf-8")+'/model_performance.txt' 'w')<as>fp<block_start>fp.write(msg)<block_end><block_end><block_end> |
# Talon voice commands for Xcode
# <NAME> <EMAIL>
<import_from_stmt>talon.voice Key Context<import_from_stmt>..misc.mouse control_shift_click<line_sep>ctx=Context("xcode" bundle="com.apple.dt.Xcode")<line_sep>ctx.keymap({"build it":Key("cmd-b") "stop it":Key("cmd-.") "run it":Key("cmd-r") "go back":Key("cmd-ctrl-left") "go (fore | forward)":Key("cmd-ctrl-right") "find in (proj | project)":Key("cmd-shift-f") "(sell find in (proj | project) | find selection in project)":Key("cmd-e cmd-shift-f enter") "(sell find ace in (proj | project) | replace selection in project)":Key("cmd-e cmd-shift-alt-f") "next in (proj | project)":Key("cmd-ctrl-g") "prev in (proj | project)":Key("shift-cmd-ctrl-g") "split window":Key("cmd-alt-enter") "show editor":Key("cmd-enter") "(show | hide) debug":Key("cmd-shift-y") "(show | find) call hierarchy":Key("cmd-ctrl-shift-h") "show (recent | recent files)":[Key("ctrl-1") "recent files\n"] "show related":Key("ctrl-1") "show history":Key("ctrl-2") "show files":Key("ctrl-5") "show (methods | items)":Key("ctrl-6") "show navigator":Key("cmd-0") "hide (navigator | project | warnings | breakpoints | reports | build)":Key("cmd-0") "show project":Key("cmd-1") "show warnings":Key("cmd-5") "show breakpoints":Key("cmd-8") "show (reports | build)":Key("cmd-9") "show diffs":Key("cmd-alt-shift-enter") "(next counterpart | show header | switcher)":Key("cmd-ctrl-down") "prev counterpart":Key("cmd-ctrl-up") "toggle comment":Key("cmd-/") "toggle breakpoint":Key("cmd-\\") "toggle all breakpoints":Key("cmd-y") "move line up":Key("cmd-alt-[") "move line down":Key("cmd-alt-]") "go (deafen | definition)":Key("cmd-ctrl-j") "edit scheme":Key("cmd-shift-,") "quick open":Key("cmd-shift-o") "comm skoosh":"// " "(comm | comment) line":["//------------------------------------------------------------------------------" Key("enter") ] "step in":Key("f7") "step over":Key("f6") "step out":Key("f8") "step (continue | go)":Key("ctrl-cmd-y") "show blame for line":Key("cmd-alt-ctrl-b") "(reveal file | show file in finder)":Key("cmd-alt-ctrl-shift-f") "(snipline | delete line)":Key("cmd-alt-ctrl-shift-backspace") "add cursor down":Key("ctrl-shift-down") "add cursor up":Key("ctrl-shift-up") "add cursor":control_shift_click "dub add cursor":<lambda>m:control_shift_click(m 0 2) "((select | sell) (partial | sub) [word] left)":Key("shift-ctrl-left") "((select | sell) (partial | sub) [word] right)":Key("shift-ctrl-right") # the following require custom key bindings in xcode preferences
"((partial | sub) [word] left | wonkrim)":Key("alt-ctrl-left") "((partial | sub) [word] right | wonkrish)":Key("alt-ctrl-right") })<line_sep> |
#
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Lead Developers: <NAME> and <NAME>
# Authors: <NAME>, <NAME>, <NAME>, <NAME>
# Research Leads: <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# calculated the mutual information of various shapes of data
<import_stmt>numpy<import_stmt>pylab<as>pl<import_stmt>crosscat.utils.sample_utils<as>su<import_stmt>crosscat.utils.inference_utils<as>iu<import_stmt>crosscat.utils.data_utils<as>du<import_stmt>crosscat.cython_code.State<as>State<import_stmt>random<import_stmt>math<def_stmt>ring n=200<block_start>X=numpy.zeros((n 2))<for_stmt>i range(n)<block_start>angle=random.uniform(0 2<times>math.pi)<line_sep>distance=random.uniform(1 1.5)<line_sep>X[i 0]=math.cos(angle)<times>distance<line_sep>X[i 1]=math.sin(angle)<times>distance<block_end><return>X<block_end><def_stmt>circle n=200<block_start>X=numpy.zeros((n 2))<for_stmt>i range(n)<block_start>angle=random.uniform(0 2<times>math.pi)<line_sep>distance=random.uniform(0 1.5)<line_sep>X[i 0]=math.cos(angle)<times>distance<line_sep>X[i 1]=math.sin(angle)<times>distance<block_end><return>X<block_end><def_stmt>square n=200<block_start>X=numpy.zeros((n 2))<for_stmt>i range(n)<block_start>x=random.uniform(-1 1)<line_sep>y=random.uniform(-1 1)<line_sep>X[i 0]=x<line_sep>X[i 1]=y<block_end><return>X<block_end><def_stmt>diamond n=200<block_start>X=square(n=n)<for_stmt>i range(n)<block_start>angle=math.atan(X[i 1]/X[i 0])<line_sep>angle<augadd>math.pi/4<line_sep>hyp=(X[i 0]<power>2.0+X[i 1]<power>2.0)<power>.5<line_sep>x=math.cos(angle)<times>hyp<line_sep>y=math.sin(angle)<times>hyp<line_sep>X[i 0]=x<line_sep>X[i 1]=y<block_end><return>X<block_end><def_stmt>four_dots n=200<block_start>X=numpy.zeros((n 2))<line_sep>nb=n/4<line_sep>mx=[-1 1 -1 1]<line_sep>my=[-1 -1 1 1]<line_sep>s=.25<for_stmt>i range(n)<block_start>n=random.randrange(4)<line_sep>x=random.normalvariate(mx[n] s)<line_sep>y=random.normalvariate(my[n] s)<line_sep>X[i 0]=x<line_sep>X[i 1]=y<block_end><return>X<block_end><def_stmt>correlated r n=200<block_start>X=numpy.random.multivariate_normal([0 0] [[1 r] [r 1]] n)<line_sep><return>X<block_end><def_stmt>sample_from_view M_c X_L X_D get_next_seed<block_start>view_col=X_L['column_partition']['assignments'][0]<line_sep>view_col2=X_L['column_partition']['assignments'][1]<line_sep>same_view=<true><if_stmt>view_col2<ne>view_col<block_start>same_view=<false><block_end>view_state=X_L['view_state'][view_col]<line_sep>view_state2=X_L['view_state'][view_col2]<line_sep>cluster_crps=numpy.exp(su.determine_cluster_crp_logps(view_state))<line_sep>cluster_crps2=numpy.exp(su.determine_cluster_crp_logps(view_state2))<assert_stmt>(math.fabs(numpy.sum(cluster_crps)-1)<l>.00000001)<line_sep>samples=numpy.zeros((n 2))<line_sep>cluster_idx1=numpy.nonzero(numpy.random.multinomial(1 cluster_crps))[0][0]<line_sep>cluster_model1=su.create_cluster_model_from_X_L(M_c X_L view_col cluster_idx1)<if_stmt>same_view<block_start>cluster_idx2=cluster_idx1<line_sep>cluster_model2=cluster_model1<block_end><else_stmt><block_start>cluster_idx2=numpy.nonzero(numpy.random.multinomial(1 cluster_crps2))[0][0]<line_sep>cluster_model2=su.create_cluster_model_from_X_L(M_c X_L view_col2 cluster_idx2)<block_end>component_model1=cluster_model1[0]<line_sep>x=component_model1.get_draw(get_next_seed())<line_sep>component_model2=cluster_model2[1]<line_sep>y=component_model2.get_draw(get_next_seed())<line_sep><return>x y<block_end><def_stmt>sample_data_from_crosscat M_c X_Ls X_Ds get_next_seed n<block_start>X=numpy.zeros((n 2))<line_sep>n_samples=len(X_Ls)<for_stmt>i range(n)<block_start>cc=random.randrange(n_samples)<line_sep>x,y=sample_from_view(M_c X_Ls[cc] X_Ds[cc] get_next_seed)<line_sep>X[i 0]=x<line_sep>X[i 1]=y<block_end><return>X<block_end><def_stmt>do_test which_plot max_plots n burn_in cc_samples which_test correlation=0 do_plot=<false><block_start><if_stmt>which_test<is>"correlated"<block_start>X=correlated(correlation n=n)<block_end><elif_stmt>which_test<is>"square"<block_start>X=square(n=n)<block_end><elif_stmt>which_test<is>"ring"<block_start>X=ring(n=n)<block_end><elif_stmt>which_test<is>"circle"<block_start>X=circle(n=n)<block_end><elif_stmt>which_test<is>"diamond"<block_start>X=diamond(n=n)<block_end><elif_stmt>which_test<is>"blob"<block_start>X=correlated(0.0 n=n)<block_end><elif_stmt>which_test<is>"dots"<block_start>X=four_dots(n=n)<block_end><elif_stmt>which_test<is>"mixed"<block_start>X=numpy.vstack((correlated(.95 n=n/2) correlated(0 n=n/2)))<block_end>get_next_seed=<lambda>:random.randrange(32000)<line_sep># Build a state
M_c=du.gen_M_c_from_T(X.tolist())<line_sep>state=State.p_State(M_c X.tolist())<line_sep>X_Ls=[]<line_sep>X_Ds=[]<line_sep># collect crosscat samples
<for_stmt>_ range(cc_samples)<block_start>state=State.p_State(M_c X.tolist())<line_sep>state.transition(n_steps=burn_in)<line_sep>X_Ds.append(state.get_X_D())<line_sep>X_Ls.append(state.get_X_L())<block_end>SX=sample_data_from_crosscat(M_c X_Ls X_Ds get_next_seed n)<if_stmt>do_plot<block_start>pl.subplot(2 max_plots which_plot)<line_sep>pl.scatter(X[: 0] X[: 1] c='blue' alpha=.5)<line_sep>pl.title("Original data")<line_sep>pl.subplot(2 max_plots max_plots+which_plot)<line_sep>pl.scatter(SX[: 0] SX[: 1] c='red' alpha=.5)<line_sep>pl.title("Sampled data")<line_sep>pl.show<block_end><return>M_c X_Ls X_Ds<block_end><def_stmt>MI_test n burn_in cc_samples which_test n_MI_samples=500 correlation=0<block_start>get_next_seed=<lambda>:random.randrange(32000)<line_sep>M_c,X_Ls,X_Ds=do_test(0 0 n burn_in cc_samples "correlated" correlation=correlation do_plot=<false>)<line_sep># query column 0 and 1
MI,Linfoot=iu.mutual_information(M_c X_Ls X_Ds [(0 1)] get_next_seed n_samples=n_MI_samples)<line_sep>MI=numpy.mean(MI)<line_sep>Linfoot=numpy.mean(Linfoot)<if_stmt>which_test<eq>"correlated"<block_start>test_strn="Test: correlation (%1.2f), N: %i, burn_in: %i, samples: %i, MI_samples: %i\n\tMI: %f, Linfoot %f"%(correlation n burn_in cc_samples n_MI_samples MI Linfoot)<block_end><else_stmt><block_start>test_strn="Test: %s, N: %i, burn_in: %i, samples: %i, MI_samples: %i\n\tMI: %f, Linfoot %f"%(which_test n burn_in cc_samples n_MI_samples MI Linfoot)<block_end>print(test_strn)<line_sep><return>test_strn<block_end>do_plot=<false><line_sep>n_mi_samples=500<line_sep>N=[10 100 1000]<line_sep>burn_in=200<line_sep>cc_samples=10<line_sep>print(" ")<for_stmt>n N<block_start>strn=MI_test(n burn_in cc_samples "correlated" correlation=.3)<line_sep>strn=MI_test(n burn_in cc_samples "correlated" correlation=.6)<line_sep>strn=MI_test(n burn_in cc_samples "correlated" correlation=.9)<line_sep>strn=MI_test(n burn_in cc_samples "ring")<line_sep>strn=MI_test(n burn_in cc_samples "dots")<line_sep>strn=MI_test(n burn_in cc_samples "mixed")<block_end> |
# Copyright (C) 2016-2018 Alibaba Group Holding Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#!/usr/bin/env python
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script to update a failure list file to add/remove failures.
This is sort of like comm(1), except it recognizes comments and ignores them.
"""<import_stmt>argparse<line_sep>parser=argparse.ArgumentParser(description='Adds/removes failures from the failure list.')<line_sep>parser.add_argument('filename' type=str help='failure list file to update')<line_sep>parser.add_argument('--add' dest='add_list' action='append')<line_sep>parser.add_argument('--remove' dest='remove_list' action='append')<line_sep>args=parser.parse_args()<line_sep>add_set=set()<line_sep>remove_set=set()<for_stmt>add_file (args.add_list<or>[])<block_start><with_stmt>open(add_file)<as>f<block_start><for_stmt>line f<block_start>add_set.add(line)<block_end><block_end><block_end><for_stmt>remove_file (args.remove_list<or>[])<block_start><with_stmt>open(remove_file)<as>f<block_start><for_stmt>line f<block_start><if_stmt>line<in>add_set<block_start><raise>Exception("Asked to both add and remove test: "+line)<block_end>remove_set.add(line.strip())<block_end><block_end><block_end>add_list=sorted(add_set reverse=<true>)<with_stmt>open(args.filename)<as>in_file<block_start>existing_list=in_file.read()<block_end><with_stmt>open(args.filename "w")<as>f<block_start><for_stmt>line existing_list.splitlines(<true>)<block_start>test=line.split("#")[0].strip()<while_stmt>len(add_list)<g>0<and>test<g>add_list[-1]<block_start>f.write(add_list.pop())<block_end><if_stmt>test<not><in>remove_set<block_start>f.write(line)<block_end><block_end><block_end> |
# coding=utf-8
# Copyright 2021 The Deeplab2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for common.py."""<import_stmt>tensorflow<as>tf<import_from_stmt>deeplab2 common<class_stmt>CommonTest(tf.test.TestCase)<block_start><def_stmt>test_constants_keys self<block_start>self.assertEqual(common.PRED_PANOPTIC_KEY 'panoptic_pred')<line_sep>self.assertEqual(common.PRED_SEMANTIC_KEY 'semantic_pred')<line_sep>self.assertEqual(common.PRED_INSTANCE_CENTER_KEY 'instance_center_pred')<line_sep>self.assertEqual(common.PRED_INSTANCE_KEY 'instance_pred')<line_sep>self.assertEqual(common.PRED_SEMANTIC_LOGITS_KEY 'semantic_logits')<line_sep>self.assertEqual(common.PRED_CENTER_HEATMAP_KEY 'center_heatmap')<line_sep>self.assertEqual(common.PRED_OFFSET_MAP_KEY 'offset_map')<line_sep>self.assertEqual(common.PRED_FRAME_OFFSET_MAP_KEY 'frame_offset_map')<line_sep>self.assertEqual(common.GT_PANOPTIC_KEY 'panoptic_gt')<line_sep>self.assertEqual(common.GT_SEMANTIC_KEY 'semantic_gt')<line_sep>self.assertEqual(common.GT_INSTANCE_CENTER_KEY 'instance_center_gt')<line_sep>self.assertEqual(common.GT_FRAME_OFFSET_KEY 'frame_offset_gt')<line_sep>self.assertEqual(common.GT_INSTANCE_REGRESSION_KEY 'instance_regression_gt')<line_sep>self.assertEqual(common.GT_PANOPTIC_RAW 'panoptic_raw')<line_sep>self.assertEqual(common.GT_SEMANTIC_RAW 'semantic_raw')<line_sep>self.assertEqual(common.GT_SIZE_RAW 'size_raw')<line_sep>self.assertEqual(common.SEMANTIC_LOSS_WEIGHT_KEY 'semantic_loss_weight')<line_sep>self.assertEqual(common.CENTER_LOSS_WEIGHT_KEY 'center_loss_weight')<line_sep>self.assertEqual(common.REGRESSION_LOSS_WEIGHT_KEY 'regression_loss_weight')<line_sep>self.assertEqual(common.FRAME_REGRESSION_LOSS_WEIGHT_KEY 'frame_regression_loss_weight')<line_sep>self.assertEqual(common.RESIZED_IMAGE 'resized_image')<line_sep>self.assertEqual(common.IMAGE 'image')<line_sep>self.assertEqual(common.IMAGE_NAME 'image_name')<line_sep>self.assertEqual(common.SEQUENCE_ID 'sequence_id')<line_sep>self.assertEqual(common.KEY_FRAME_ID 'video/frame_id')<line_sep>self.assertEqual(common.KEY_SEQUENCE_ID 'video/sequence_id')<line_sep>self.assertEqual(common.KEY_LABEL_FORMAT 'image/segmentation/class/format')<line_sep>self.assertEqual(common.KEY_ENCODED_PREV_LABEL 'prev_image/segmentation/class/encoded')<line_sep>self.assertEqual(common.KEY_ENCODED_LABEL 'image/segmentation/class/encoded')<line_sep>self.assertEqual(common.KEY_IMAGE_CHANNELS 'image/channels')<line_sep>self.assertEqual(common.KEY_IMAGE_WIDTH 'image/width')<line_sep>self.assertEqual(common.KEY_IMAGE_HEIGHT 'image/height')<line_sep>self.assertEqual(common.KEY_IMAGE_FORMAT 'image/format')<line_sep>self.assertEqual(common.KEY_IMAGE_FILENAME 'image/filename')<line_sep>self.assertEqual(common.KEY_ENCODED_PREV_IMAGE 'prev_image/encoded')<line_sep>self.assertEqual(common.KEY_ENCODED_IMAGE 'image/encoded')<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>tf.test.main()<block_end> |
# Copyright 2020 LMNT, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
<import_stmt>argparse<import_stmt>matplotlib.pyplot<as>plt<import_stmt>numpy<as>np<import_stmt>os<def_stmt>extract x predicate<block_start><return>np.array(list(filter(predicate x)))<block_end><def_stmt>main args<block_start>np.set_printoptions(suppress=<true>)<line_sep>A=np.loadtxt(args.A delimiter=',')<line_sep>B=np.loadtxt(args.B delimiter=',')<line_sep>faster=1.0-A[: -1]/B[: -1]<line_sep>print(f'A is faster than B by:')<line_sep>print(f' mean: {np.mean(faster)<times>100:7.4}%')<line_sep>print(f' std: {np.std(faster)<times>100:7.4}%')<line_sep>print(f' median: {np.median(faster)<times>100:7.4}%')<line_sep>print(f' min: {np.min(faster)<times>100:7.4}%')<line_sep>print(f' max: {np.max(faster)<times>100:7.4}%')<for_stmt>batch_size np.unique(A[: 0])<block_start><for_stmt>input_size np.unique(A[: 2])<block_start>a=extract(A <lambda>x:x[0]<eq>batch_size<and>x[2]<eq>input_size)<line_sep>b=extract(B <lambda>x:x[0]<eq>batch_size<and>x[2]<eq>input_size)<line_sep>fig,ax=plt.subplots(dpi=200)<line_sep>ax.set_xticks(a[: 1])<line_sep>ax.set_xticklabels(a[: 1].astype(np.int32) rotation=60)<line_sep>ax.tick_params(axis='y' which='both' length=0)<line_sep>ax.spines['top'].set_visible(<false>)<line_sep>ax.spines['right'].set_visible(<false>)<line_sep>plt.title(f'batch size={int(batch_size)}, input size={int(input_size)}')<line_sep>plt.plot(a[: 1] a[: -1] color=args.color[0])<line_sep>plt.plot(a[: 1] b[: -1] color=args.color[1])<line_sep>plt.xlabel('hidden size')<line_sep>plt.ylabel('time (ms)')<line_sep>plt.legend(args.name frameon=<false>)<line_sep>plt.tight_layout()<if_stmt>args.save<block_start>os.makedirs(args.save[0] exist_ok=<true>)<line_sep>plt.savefig(f'{args.save[0]}/report_n={int(batch_size)}_c={int(input_size)}.png' dpi=200)<block_end><else_stmt><block_start>plt.show()<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--name' nargs=2 default=['A' 'B'])<line_sep>parser.add_argument('--color' nargs=2 default=['#1f77b4' '#2ca02c'])<line_sep>parser.add_argument('--save' nargs=1 default=<none>)<line_sep>parser.add_argument('A')<line_sep>parser.add_argument('B')<line_sep>main(parser.parse_args())<block_end> |
<import_from_stmt>typing Optional<import_from_stmt>botocore.client BaseClient<import_from_stmt>typing Dict<import_from_stmt>typing Union<import_from_stmt>botocore.paginate Paginator<import_from_stmt>datetime datetime<import_from_stmt>botocore.waiter Waiter<import_from_stmt>typing List<class_stmt>Client(BaseClient)<block_start><def_stmt>abort_environment_update self EnvironmentId:str=<none> EnvironmentName:str=<none><block_start><pass><block_end><def_stmt>apply_environment_managed_action self ActionId:str EnvironmentName:str=<none> EnvironmentId:str=<none><arrow>Dict<block_start><pass><block_end><def_stmt>can_paginate self operation_name:str=<none><block_start><pass><block_end><def_stmt>check_dns_availability self CNAMEPrefix:str<arrow>Dict<block_start><pass><block_end><def_stmt>compose_environments self ApplicationName:str=<none> GroupName:str=<none> VersionLabels:List=<none><arrow>Dict<block_start><pass><block_end><def_stmt>create_application self ApplicationName:str Description:str=<none> ResourceLifecycleConfig:Dict=<none> Tags:List=<none><arrow>Dict<block_start><pass><block_end><def_stmt>create_application_version self ApplicationName:str VersionLabel:str Description:str=<none> SourceBuildInformation:Dict=<none> SourceBundle:Dict=<none> BuildConfiguration:Dict=<none> AutoCreateApplication:bool=<none> Process:bool=<none> Tags:List=<none><arrow>Dict<block_start><pass><block_end><def_stmt>create_configuration_template self ApplicationName:str TemplateName:str SolutionStackName:str=<none> PlatformArn:str=<none> SourceConfiguration:Dict=<none> EnvironmentId:str=<none> Description:str=<none> OptionSettings:List=<none> Tags:List=<none><arrow>Dict<block_start><pass><block_end><def_stmt>create_environment self ApplicationName:str EnvironmentName:str=<none> GroupName:str=<none> Description:str=<none> CNAMEPrefix:str=<none> Tier:Dict=<none> Tags:List=<none> VersionLabel:str=<none> TemplateName:str=<none> SolutionStackName:str=<none> PlatformArn:str=<none> OptionSettings:List=<none> OptionsToRemove:List=<none><arrow>Dict<block_start><pass><block_end><def_stmt>create_platform_version self PlatformName:str PlatformVersion:str PlatformDefinitionBundle:Dict EnvironmentName:str=<none> OptionSettings:List=<none> Tags:List=<none><arrow>Dict<block_start><pass><block_end><def_stmt>create_storage_location self<arrow>Dict<block_start><pass><block_end><def_stmt>delete_application self ApplicationName:str TerminateEnvByForce:bool=<none><block_start><pass><block_end><def_stmt>delete_application_version self ApplicationName:str VersionLabel:str DeleteSourceBundle:bool=<none><block_start><pass><block_end><def_stmt>delete_configuration_template self ApplicationName:str TemplateName:str<block_start><pass><block_end><def_stmt>delete_environment_configuration self ApplicationName:str EnvironmentName:str<block_start><pass><block_end><def_stmt>delete_platform_version self PlatformArn:str=<none><arrow>Dict<block_start><pass><block_end><def_stmt>describe_account_attributes self<arrow>Dict<block_start><pass><block_end><def_stmt>describe_application_versions self ApplicationName:str=<none> VersionLabels:List=<none> MaxRecords:int=<none> NextToken:str=<none><arrow>Dict<block_start><pass><block_end><def_stmt>describe_applications self ApplicationNames:List=<none><arrow>Dict<block_start><pass><block_end><def_stmt>describe_configuration_options self ApplicationName:str=<none> TemplateName:str=<none> EnvironmentName:str=<none> SolutionStackName:str=<none> PlatformArn:str=<none> Options:List=<none><arrow>Dict<block_start><pass><block_end><def_stmt>describe_configuration_settings self ApplicationName:str TemplateName:str=<none> EnvironmentName:str=<none><arrow>Dict<block_start><pass><block_end><def_stmt>describe_environment_health self EnvironmentName:str=<none> EnvironmentId:str=<none> AttributeNames:List=<none><arrow>Dict<block_start><pass><block_end><def_stmt>describe_environment_managed_action_history self EnvironmentId:str=<none> EnvironmentName:str=<none> NextToken:str=<none> MaxItems:int=<none><arrow>Dict<block_start><pass><block_end><def_stmt>describe_environment_managed_actions self EnvironmentName:str=<none> EnvironmentId:str=<none> Status:str=<none><arrow>Dict<block_start><pass><block_end><def_stmt>describe_environment_resources self EnvironmentId:str=<none> EnvironmentName:str=<none><arrow>Dict<block_start><pass><block_end><def_stmt>describe_environments self ApplicationName:str=<none> VersionLabel:str=<none> EnvironmentIds:List=<none> EnvironmentNames:List=<none> IncludeDeleted:bool=<none> IncludedDeletedBackTo:datetime=<none> MaxRecords:int=<none> NextToken:str=<none><arrow>Dict<block_start><pass><block_end><def_stmt>describe_events self ApplicationName:str=<none> VersionLabel:str=<none> TemplateName:str=<none> EnvironmentId:str=<none> EnvironmentName:str=<none> PlatformArn:str=<none> RequestId:str=<none> Severity:str=<none> StartTime:datetime=<none> EndTime:datetime=<none> MaxRecords:int=<none> NextToken:str=<none><arrow>Dict<block_start><pass><block_end><def_stmt>describe_instances_health self EnvironmentName:str=<none> EnvironmentId:str=<none> AttributeNames:List=<none> NextToken:str=<none><arrow>Dict<block_start><pass><block_end><def_stmt>describe_platform_version self PlatformArn:str=<none><arrow>Dict<block_start><pass><block_end><def_stmt>generate_presigned_url self ClientMethod:str=<none> Params:Dict=<none> ExpiresIn:int=<none> HttpMethod:str=<none><block_start><pass><block_end><def_stmt>get_paginator self operation_name:str=<none><arrow>Paginator<block_start><pass><block_end><def_stmt>get_waiter self waiter_name:str=<none><arrow>Waiter<block_start><pass><block_end><def_stmt>list_available_solution_stacks self<arrow>Dict<block_start><pass><block_end><def_stmt>list_platform_versions self Filters:List=<none> MaxRecords:int=<none> NextToken:str=<none><arrow>Dict<block_start><pass><block_end><def_stmt>list_tags_for_resource self ResourceArn:str<arrow>Dict<block_start><pass><block_end><def_stmt>rebuild_environment self EnvironmentId:str=<none> EnvironmentName:str=<none><block_start><pass><block_end><def_stmt>request_environment_info self InfoType:str EnvironmentId:str=<none> EnvironmentName:str=<none><block_start><pass><block_end><def_stmt>restart_app_server self EnvironmentId:str=<none> EnvironmentName:str=<none><block_start><pass><block_end><def_stmt>retrieve_environment_info self InfoType:str EnvironmentId:str=<none> EnvironmentName:str=<none><arrow>Dict<block_start><pass><block_end><def_stmt>swap_environment_cnames self SourceEnvironmentId:str=<none> SourceEnvironmentName:str=<none> DestinationEnvironmentId:str=<none> DestinationEnvironmentName:str=<none><block_start><pass><block_end><def_stmt>terminate_environment self EnvironmentId:str=<none> EnvironmentName:str=<none> TerminateResources:bool=<none> ForceTerminate:bool=<none><arrow>Dict<block_start><pass><block_end><def_stmt>update_application self ApplicationName:str Description:str=<none><arrow>Dict<block_start><pass><block_end><def_stmt>update_application_resource_lifecycle self ApplicationName:str ResourceLifecycleConfig:Dict<arrow>Dict<block_start><pass><block_end><def_stmt>update_application_version self ApplicationName:str VersionLabel:str Description:str=<none><arrow>Dict<block_start><pass><block_end><def_stmt>update_configuration_template self ApplicationName:str TemplateName:str Description:str=<none> OptionSettings:List=<none> OptionsToRemove:List=<none><arrow>Dict<block_start><pass><block_end><def_stmt>update_environment self ApplicationName:str=<none> EnvironmentId:str=<none> EnvironmentName:str=<none> GroupName:str=<none> Description:str=<none> Tier:Dict=<none> VersionLabel:str=<none> TemplateName:str=<none> SolutionStackName:str=<none> PlatformArn:str=<none> OptionSettings:List=<none> OptionsToRemove:List=<none><arrow>Dict<block_start><pass><block_end><def_stmt>update_tags_for_resource self ResourceArn:str TagsToAdd:List=<none> TagsToRemove:List=<none><block_start><pass><block_end><def_stmt>validate_configuration_settings self ApplicationName:str OptionSettings:List TemplateName:str=<none> EnvironmentName:str=<none><arrow>Dict<block_start><pass><block_end><block_end> |
# encoding: utf-8
"""
med.py
Created by <NAME> on 2009-11-05.
Copyright (c) 2009-2017 Exa Networks. All rights reserved.
License: 3-clause BSD. (See the COPYRIGHT file)
"""<import_from_stmt>struct pack<import_from_stmt>struct unpack<import_from_stmt>exabgp.bgp.message.update.attribute.attribute Attribute<line_sep># ====================================================================== MED (4)
#
@Attribute.register()<class_stmt>MED(Attribute)<block_start>ID=Attribute.CODE.MED<line_sep>FLAG=Attribute.Flag.OPTIONAL<line_sep>CACHING=<true><def_stmt>__init__ self med packed=<none><block_start>self.med=med<line_sep>self._packed=self._attribute(packed<if>packed<is><not><none><else>pack('!L' med))<block_end><def_stmt>__eq__ self other<block_start><return>self.ID<eq>other.ID<and>self.FLAG<eq>other.FLAG<and>self.med<eq>other.med<block_end><def_stmt>__ne__ self other<block_start><return><not>self.__eq__(other)<block_end><def_stmt>pack self negotiated=<none><block_start><return>self._packed<block_end><def_stmt>__len__ self<block_start><return>4<block_end><def_stmt>__repr__ self<block_start><return>str(self.med)<block_end><def_stmt>__hash__ self<block_start><return>hash(self.med)<block_end>@classmethod<def_stmt>unpack cls data direction negotiated<block_start><return>cls(unpack('!L' data)[0])<block_end><block_end> |
# VMware vSphere Python SDK
# Copyright (c) 2008-2015 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_from_future_stmt> print_function<import_stmt>tests<import_from_stmt>pyVim connect<class_stmt>ManagedObjectTests(tests.VCRTestBase)<block_start>@tests.VCRTestBase.my_vcr.use_cassette('root_folder_parent.yaml' cassette_library_dir=tests.fixtures_path record_mode='once')<def_stmt>test_root_folder_parent self# see: http://python3porting.com/noconv.html
<block_start>si=connect.SmartConnect(host='vcsa' user='my_user' pwd='<PASSWORD>')<line_sep>root_folder=si.content.rootFolder<line_sep>self.assertTrue(hasattr(root_folder 'parent'))<line_sep># NOTE (hartsock): assertIsNone does not work in Python 2.6
self.assertTrue(root_folder.parent<is><none>)<block_end><block_end> |
<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<def_stmt>get_true_positives actual detections iou_threshold<block_start>"""
Identify and flag true positive detections.
Args:
actual: Ground truth data as `pd.DataFrame` having
`image`, `object_name`, `object_index`, `x0`, `y0`, `x1`, `y1`
as columns.
detections: Detections as `pd.DataFrame` having
`image`, `object_name`, `object_index`, `x0`, `y0`, `x1`, `y1`,
`score` as columns.
iou_threshold: Percentage above which detections overlapping with
ground truths are considered true positive.
Returns:
pd.DataFrame containing filtered out true positives.
"""<if_stmt>'detection_key'<not><in>detections.columns<block_start>detections['detection_key']=np.random.default_rng().choice(detections.shape[0] size=detections.shape[0] replace=<false>)<block_end>merged=actual.merge(detections on=['image' 'object_name'])<line_sep>merged['x0']=merged[['x0_x' 'x0_y']].max(1)<line_sep>merged['x1']=merged[['x1_x' 'x1_y']].min(1)<line_sep>merged['y0']=merged[['y0_x' 'y0_y']].max(1)<line_sep>merged['y1']=merged[['y1_x' 'y1_y']].min(1)<line_sep>true_intersect=(merged['x1']<g>merged['x0'])&(merged['y1']<g>merged['y0'])<line_sep>merged=merged[true_intersect]<line_sep>actual_areas=(merged['x1_x']-merged['x0_x'])<times>(merged['y1_x']-merged['y0_x'])<line_sep>predicted_areas=(merged['x1_y']-merged['x0_y'])<times>(merged['y1_y']-merged['y0_y'])<line_sep>intersection_areas=(merged['x1']-merged['x0'])<times>(merged['y1']-merged['y0'])<line_sep>merged['iou']=intersection_areas/(actual_areas+predicted_areas-intersection_areas)<line_sep>merged['true_positive']=<true><line_sep>merged['false_positive']=<false><line_sep>merged=merged[merged['iou']<ge>iou_threshold]<line_sep><return>merged.drop_duplicates(subset='detection_key')<block_end><def_stmt>get_false_positives detections true_positives<block_start>"""
Filter out False positives.
Args:
detections: Detections as `pd.DataFrame` having
`image`, `object_name`, `object_index`, `x0`, `y0`, `x1`, `y1`,
`score` as columns.
true_positives: `pd.DataFrame` of true positive detections, the result
of `get_true_positives`.
Returns:
`pd.DataFrame` containing filtered out false positives.
"""<line_sep>keys_before=detections['detection_key'].values<line_sep>keys_after=true_positives['detection_key'].values<line_sep>false_keys=np.where(np.isin(keys_before keys_after invert=<true>))<line_sep>false_keys=keys_before[false_keys]<line_sep>false_positives=detections.set_index('detection_key').loc[false_keys]<line_sep>false_positives['true_positive']=<false><line_sep>false_positives['false_positive']=<true><line_sep><return>false_positives.reset_index()<block_end><def_stmt>calculate_ap combined total_actual<block_start>"""
Calculate single object average precision.
Args:
combined: `pd.DataFrame` containing true positives + false positives.
total_actual: Total instances of an object in the dataset.
Returns:
Updated combined with average precision calculated.
"""<line_sep>combined=combined.sort_values(by='score' ascending=<false>).reset_index(drop=<true>)<line_sep>combined['acc_tp']=combined['true_positive'].cumsum()<line_sep>combined['acc_fp']=combined['false_positive'].cumsum()<line_sep>combined['precision']=combined['acc_tp']/(combined['acc_tp']+combined['acc_fp'])<line_sep>combined['recall']=combined['acc_tp']/total_actual<line_sep>combined['m_pre1']=combined['precision'].shift(1 fill_value=0)<line_sep>combined['m_pre']=combined[['m_pre1' 'precision']].max(axis=1)<line_sep>combined['m_rec1']=combined['recall'].shift(1 fill_value=0)<line_sep>combined.loc[combined['m_rec1']<ne>combined['recall'] 'valid_m_rec']=1<line_sep>combined['average_precision']=(combined['recall']-combined['m_rec1'])<times>combined['m_pre']<line_sep><return>combined<block_end><def_stmt>calculate_stats actual detections true_positives false_positives combined <block_start>"""
Calculate display data including total actual, total true positives, total false
positives and sort resulting `pd.DataFrame` by object average precision.
Args:
actual: Ground truth data as `pd.DataFrame` having
`image`, `object_name`, `object_index`, `x0`, `y0`, `x1`, `y1`
as columns.
detections: Detections as `pd.DataFrame` having
`image`, `object_name`, `object_index`, `x0`, `y0`, `x1`, `y1`,
`score` as columns.
true_positives: `pd.DataFrame` of true positive detections, the result
of `get_true_positives`.
false_positives: `pd.DataFrame` of false positive detections, the result
of `get_false_positives`.
combined: `pd.DataFrame` containing true positives + false positives.
Returns:
`pd.DataFrame` with calculated average precisions per dataset object.
"""<line_sep>class_stats=[]<for_stmt>object_name actual['object_name'].drop_duplicates().values<block_start>stats=dict()<line_sep>stats['object_name']=object_name<line_sep>stats['average_precision']=combined[combined['object_name']<eq>object_name]['average_precision'].sum()<line_sep>stats['actual']=actual[actual['object_name']<eq>object_name].shape[0]<line_sep>stats['detections']=detections[detections['object_name']<eq>object_name].shape[0]<line_sep>stats['true_positives']=true_positives[true_positives['object_name']<eq>object_name].shape[0]<line_sep>stats['false_positives']=false_positives[false_positives['object_name']<eq>object_name].shape[0]<line_sep>stats['combined']=combined[combined['object_name']<eq>object_name].shape[0]<line_sep>class_stats.append(stats)<block_end>total_stats=pd.DataFrame(class_stats).sort_values(by='average_precision' ascending=<false>)<line_sep><return>total_stats<block_end><def_stmt>calculate_map actual detections iou_threshold<block_start>"""
Calculate average precision per dataset object. The mean of the resulting
`pd.DataFrame` `average_precision` column is the mAP score.
Args:
actual: Ground truth data as `pd.DataFrame` having
`image`, `object_name`, `object_index`, `x0`, `y0`, `x1`, `y1`
as columns.
detections: Detections as `pd.DataFrame` having
`image`, `object_name`, `object_index`, `x0`, `y0`, `x1`, `y1`,
`score` as columns.
iou_threshold: Percentage above which detections overlapping with
ground truths are considered true positive.
Returns:
`pd.DataFrame`, the result of `calculate_stats`.
"""<line_sep>class_counts=actual['object_name'].value_counts().to_dict()<line_sep>true_positives=get_true_positives(actual detections iou_threshold)<line_sep>false_positives=get_false_positives(detections true_positives)<line_sep>true_positives=true_positives[[*set(true_positives.columns)&set(false_positives.columns)]]<line_sep>false_positives=false_positives[[*set(true_positives.columns)&set(false_positives.columns)]]<line_sep>combined=pd.concat([true_positives false_positives])<line_sep>combined=pd.concat([calculate_ap(group class_counts.get(object_name))<for>object_name,group combined.groupby('object_name')])<line_sep><return>calculate_stats(actual detections true_positives false_positives combined)<block_end> |
# stress test for the heap by allocating lots of objects within threads
# allocates about 5mb on the heap
#
# MIT license; Copyright (c) 2016 <NAME> on behalf of Pycom Ltd
<try_stmt><block_start><import_stmt>utime<as>time<block_end><except_stmt>ImportError<block_start><import_stmt>time<block_end><import_stmt>_thread<def_stmt>last l<block_start><return>l[-1]<block_end><def_stmt>thread_entry n# allocate a bytearray and fill it
<block_start>data=bytearray(i<for>i range(256))<line_sep># run a loop which allocates a small list and uses it each iteration
lst=8<times>[0]<line_sep>sum=0<for_stmt>i range(n)<block_start>sum<augadd>last(lst)<line_sep>lst=[0 0 0 0 0 0 0 i+1]<block_end># check that the bytearray still has the right data
<for_stmt>i,b enumerate(data)<block_start><assert_stmt>i<eq>b<block_end># print the result of the loop and indicate we are finished
<with_stmt>lock<block_start>print(sum lst[-1])<line_sep><global>n_finished<line_sep>n_finished<augadd>1<block_end><block_end>lock=_thread.allocate_lock()<line_sep>n_thread=10<line_sep>n_finished=0<line_sep># spawn threads
<for_stmt>i range(n_thread)<block_start>_thread.start_new_thread(thread_entry (10000 ))<block_end># wait for threads to finish
<while_stmt>n_finished<l>n_thread<block_start>time.sleep(1)<block_end> |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Test cases for checking that the secondary Storage usage is accounted. This is verified by checking the usage_event table
for a volume in 'Uploaded' state.
This test case does the following:
1.Creates an account and uploads a volume.
2.After the volume is uploaded successfully, connects to the database
3.From the database verifies that an entry is added to cloud.events table for the uploaded volume.
4.Cleans up the resources.
"""<import_from_stmt>marvin.cloudstackTestCase *<import_from_stmt>marvin.cloudstackAPI *<import_from_stmt>marvin.lib.utils *<import_from_stmt>marvin.lib.base *<import_from_stmt>marvin.lib.common *<import_from_stmt>nose.plugins.attrib attr<import_from_stmt>marvin.sshClient SshClient<import_from_stmt>marvin.codes BACKED_UP PASS FAIL <import_stmt>time<def_stmt>verify_vm self vmid state<block_start>list_vm=list_virtual_machines(self.userapiclient account=self.account.name domainid=self.account.domainid id=vmid)<line_sep>self.assertEqual(validateList(list_vm)[0] PASS "Check List vm response for vmid: %s"%vmid)<line_sep>self.assertGreater(len(list_vm) 0 "Check the list vm response for vm id: %s"%vmid)<line_sep>vm=list_vm[0]<line_sep>self.assertEqual(vm.id str(vmid) "Vm deployed is different from the test")<line_sep>self.assertEqual(vm.state state "VM is in %s state"%state)<block_end><def_stmt>uploadVolume self# upload a volume
<block_start>self.debug("Upload volume format is '%s'"%self.uploadVolumeformat)<line_sep>self.testdata["configurableData"]["upload_volume"]["format"]=self.uploadVolumeformat<line_sep>self.testdata["configurableData"]["upload_volume"]["url"]=self.uploadvolumeUrl<line_sep>upload_volume=Volume.upload(self.apiclient self.testdata["configurableData"]["upload_volume"] account=self.account.name domainid=self.domain.id zoneid=self.zone.id)<line_sep>upload_volume.wait_for_upload(self.apiclient)<line_sep><return>upload_volume.id<block_end><def_stmt>restartUsageServer self#Restart usage server
<block_start>sshClient=SshClient(self.mgtSvrDetails["mgtSvrIp"] 22 self.mgtSvrDetails["user"] self.mgtSvrDetails["passwd"])<line_sep>command="service cloudstack-usage restart"<line_sep>sshClient.execute(command)<line_sep><return><block_end><def_stmt>checkUsage self uuid_upload_volume_id<block_start>volume_id=self.dbclient.execute("SELECT id from cloud.volumes where uuid='%s';"%uuid_upload_volume_id)<line_sep>self.debug("Volume id of uploaded volume is= %s"%volume_id[0])<line_sep>qryresult_after_usageServerExecution=self.dbclient.execute("SELECT type FROM cloud.usage_event where resource_id = '%s';"%(volume_id[0]))<line_sep>self.debug("Usage Type is %s "%qryresult_after_usageServerExecution[0][0])<line_sep>self.assertEqual(qryresult_after_usageServerExecution[0][0] 'VOLUME.UPLOAD')<block_end><class_stmt>TestSecondaryVolumeUsage(cloudstackTestCase)<block_start>@classmethod<def_stmt>setUpClass cls<block_start>testClient=super(TestSecondaryVolumeUsage cls).getClsTestClient()<line_sep>cls.apiclient=testClient.getApiClient()<line_sep>cls.dbclient=testClient.getDbConnection()<line_sep>cls.testdata=testClient.getParsedTestDataConfig()<line_sep>cls.hypervisor=cls.testClient.getHypervisorInfo()<line_sep>cls.storagetype='shared'<line_sep># Get Zone, Domain and templates
cls.domain=get_domain(cls.apiclient)<line_sep>cls.zone=get_zone(cls.apiclient testClient.getZoneForTests())<line_sep>cls.mgtSvrDetails=cls.config.__dict__["mgtSvr"][0].__dict__<line_sep>cls._cleanup=[]<line_sep># Create an account
cls.account=Account.create(cls.apiclient cls.testdata["account"] domainid=cls.domain.id)<line_sep>cls._cleanup.append(cls.account)<line_sep># Create user api client of the account
cls.userapiclient=testClient.getUserApiClient(UserName=cls.account.name DomainName=cls.account.domain)<line_sep># Create Service offering
cls.service_offering=ServiceOffering.create(cls.apiclient cls.testdata["service_offering"] )<line_sep>cls._cleanup.append(cls.service_offering)<line_sep>cls.disk_offering=DiskOffering.create(cls.apiclient cls.testdata["disk_offering"] )<line_sep>cls._cleanup.append(cls.disk_offering)<line_sep>cls.skip=0<line_sep>hosts=list_hosts(cls.apiclient type="Routing")<for_stmt>hypervisorhost hosts<block_start><if_stmt>hypervisorhost.hypervisor.lower()<in>["xenserver"]<block_start>cls.uploadVolumeformat="VHD"<line_sep>cls.uploadvolumeUrl="http://download.cloudstack.org/releases/2.0.0/systemvm.vhd.bz2"<line_sep><break><block_end><elif_stmt>hypervisorhost.hypervisor.lower()<in>["vmware"]<block_start>cls.uploadVolumeformat="OVA"<line_sep>cls.uploadvolumeUrl="http://download.cloudstack.org/releases/2.2.0/systemvm-redundant-router.ova"<line_sep><break><block_end><elif_stmt>hypervisorhost.hypervisor<eq>"KVM"<block_start>cls.uploadVolumeformat="QCOW2"<line_sep>cls.uploadvolumeUrl="http://download.cloudstack.org/releases/2.0.0/UbuntuServer-10-04-64bit.qcow2.bz2"<line_sep><break><block_end><elif_stmt>hypervisorhost.hypervisor<eq>"LXC"<block_start>cls.uploadvolumeformat="QCOW2"<line_sep>cls.uploadvolumeUrl="http://download.cloudstack.org/releases/2.0.0/UbuntuServer-10-04-64bit.qcow2.bz2"<line_sep><break><block_end><else_stmt><block_start><break><block_end><block_end>cls.template=get_template(cls.apiclient cls.zone.id cls.testdata["ostype"])<try_stmt><block_start>cls.vm=VirtualMachine.create(cls.userapiclient cls.testdata["small"] templateid=cls.template.id accountid=cls.account.name domainid=cls.account.domainid serviceofferingid=cls.service_offering.id zoneid=cls.zone.id)<block_end><except_stmt>Exception<as>e<block_start>cls.tearDownClass()<line_sep><raise>e<block_end><return><block_end>@classmethod<def_stmt>tearDownClass cls<block_start><try_stmt><block_start>cleanup_resources(cls.apiclient cls._cleanup)<block_end><except_stmt>Exception<as>e<block_start><raise>Exception("Warning: Exception during cleanup : %s"%e)<block_end><block_end>@attr(tags=["basic" "advanced"] required_hardware="true")<def_stmt>test_01_SecondaryUsageUploadedVolume self<block_start><try_stmt><block_start>uploaded_volume_id_uuid=uploadVolume(self)<line_sep>checkUsage(self uploaded_volume_id_uuid)<block_end><except_stmt>Exception<as>e<block_start>self.tearDown()<line_sep><raise>e<block_end><return><block_end><block_end> |
<import_stmt>os<import_stmt>subprocess<import_stmt>re<import_stmt>uuid<class_stmt>ScliteHelper()<block_start>'''
The Sclite helper class calculates the word error rate (WER) and charater error rate (CER)
given a predicted and actual text.
This class uses sclite2.4 (ftp://jaguar.ncsl.nist.gov/pub/sctk-2.4.10-20151007-1312Z.tar.bz2)
and formats the data according.
Parameters
----------
sclite_location: optional, default="sctk-2.4.10/bin"
Location of the sclite_program
tmp_file_location: optional, default=tmp
folder to store the temporary text files.
'''<def_stmt>__init__ self sclite_location=os.path.join(".." "SCTK" "bin") tmp_file_location="tmp" use_uuid=<true># Check if sclite exists
<block_start><assert_stmt>os.path.isdir(sclite_location) "{} does not exist".format(sclite_location)<line_sep>sclite_error="{} doesn't contain sclite".format(sclite_location)<line_sep>retries=10<for_stmt>i range(retries)<block_start><if_stmt>self._test_sclite(sclite_location)<block_start><break><block_end><elif_stmt>i<eq>retries-1<block_start><raise>sclite_error<block_end><block_end>self.sclite_location=sclite_location<if_stmt>use_uuid<block_start>tmp_file_location<augadd>"/"+str(uuid.uuid4())<block_end># Check if tmp_file_location exists
<if_stmt><not>os.path.isdir(tmp_file_location)<block_start>os.makedirs(tmp_file_location)<block_end>self.tmp_file_location=tmp_file_location<line_sep>self.predicted_text=[]<line_sep>self.actual_text=[]<block_end><def_stmt>clear self<block_start>'''
Clear the class for new calculations.
'''<line_sep>self.predicted_text=[]<line_sep>self.actual_text=[]<block_end><def_stmt>_test_sclite self sclite_location<block_start>sclite_path=os.path.join(sclite_location "sclite")<line_sep>command_line_options=[sclite_path]<try_stmt><block_start>subprocess.check_output(command_line_options stderr=subprocess.STDOUT)<block_end><except_stmt>OSError<block_start><return><false><block_end><except_stmt>subprocess.CalledProcessError<block_start><return><true><block_end><return><true><block_end><def_stmt>_write_string_to_sclite_file self sentences_arrays filename<block_start>SPEAKER_LABEL="(spk{}_{})"<line_sep># Split string into sentences
converted_string=''<for_stmt>i,sentences_array enumerate(sentences_arrays)<block_start><for_stmt>line,sentence enumerate(sentences_array)<block_start>converted_string<augadd>sentence+SPEAKER_LABEL.format(i+1 line+1)+"\n"<block_end><block_end># Write converted_string into file
filepath=os.path.join(self.tmp_file_location filename)<with_stmt>open(filepath "w")<as>f<block_start>f.write(converted_string)<block_end><block_end><def_stmt>_run_sclite self predicted_filename actual_filename mode output<block_start>'''
Run command line for sclite.
Parameters
---------
predicted_filename: str
file containing output string of the network
actual_filename: str
file containing string of the label
mode: string, Options = ["CER", "WER"]
Choose between CER or WER
output: string, Options = ["print", "string"]
Choose between printing the output or returning a string
Returns
-------
stdoutput
If string was chosen as the output option, this function will return a file
containing the stdout
'''<assert_stmt>mode<in>["CER" "WER"] "mode {} is not in ['CER', 'WER]".format(mode)<assert_stmt>output<in>["print" "string"] "output {} is not in ['print', 'string']".format(output)<line_sep>command_line=[os.path.join(self.sclite_location "sclite") "-h" os.path.join(self.tmp_file_location predicted_filename) "-r" os.path.join(self.tmp_file_location actual_filename) "-i" "rm"]<if_stmt>mode<eq>"WER"<block_start><pass><block_end># Word error rate is by default
retries=10<for_stmt>i range(retries)<block_start><try_stmt><block_start><if_stmt>mode<eq>"CER"<block_start>command_line.append("-c")<block_end><if_stmt>output<eq>"print"<block_start>subprocess.call(command_line)<block_end><elif_stmt>output<eq>"string"<block_start>cmd=subprocess.Popen(command_line stdout=subprocess.PIPE)<line_sep><return>cmd.stdout<block_end><block_end><except_stmt><block_start>print("There was an error")<block_end><block_end><block_end><def_stmt>_print_error_rate_summary self mode predicted_filename="predicted.txt" actual_filename="actual.txt"<block_start>'''
Print the error rate summary of sclite
Parameters
----------
mode: string, Options = ["CER", "WER"]
Choose between CER or WER
'''<line_sep>self._run_sclite(predicted_filename actual_filename mode output="print")<block_end><def_stmt>_get_error_rate self mode predicted_filename="predicted.txt" actual_filename="actual.txt"<block_start>'''
Get the error rate by analysing the output of sclite
Parameters
----------
mode: string, Options = ["CER", "WER"]
Choose between CER or WER
Returns
-------
number: int
The number of characters or words depending on the mode selected.
error_rate: float
'''<line_sep>number=<none><line_sep>er=<none><line_sep>output_file=self._run_sclite(predicted_filename actual_filename mode output="string")<line_sep>match_tar=r'.*Mean.*\|.* (\d*.\d) .* (\d*.\d).* \|'<for_stmt>line output_file.readlines()<block_start>match=re.match(match_tar line.decode('utf-8') re.M|re.I)<if_stmt>match<block_start>number=match.group(1)<line_sep>er=match.group(2)<block_end><block_end><assert_stmt>number<ne><none><and>er<ne><none> "Error in parsing output."<line_sep><return>float(number) 100.0-float(er)<block_end><def_stmt>_make_sclite_files self predicted_filename="predicted.txt" actual_filename="actual.txt"<block_start>'''
Run command line for sclite.
Parameters
---------
predicted_filename: str, default: predicted.txt
filename of the predicted file
actual_filename: str, default: actual.txt
filename of the actual file
'''<line_sep>self._write_string_to_sclite_file(self.predicted_text filename=predicted_filename)<line_sep>self._write_string_to_sclite_file(self.actual_text filename=actual_filename)<block_end><def_stmt>add_text self predicted_text actual_text<block_start>'''
Function to save predicted and actual text pairs in memory.
Running the future fuctions will generate the required text files.
'''<line_sep>self.predicted_text.append(predicted_text)<line_sep>self.actual_text.append(actual_text)<block_end><def_stmt>print_wer_summary self<block_start>'''
see _print_error_rate_summary for docstring
'''<line_sep>self._make_sclite_files()<line_sep>self._print_error_rate_summary(mode="WER")<block_end><def_stmt>print_cer_summary self<block_start>'''
see _print_error_rate_summary for docstring
'''<line_sep>self._make_sclite_files()<line_sep>self._print_error_rate_summary(mode="CER")<block_end><def_stmt>get_wer self<block_start>'''
See _get_error_rate for docstring
'''<line_sep>self._make_sclite_files()<line_sep><return>self._get_error_rate(mode="WER")<block_end><def_stmt>get_cer self<block_start>'''
See _get_error_rate for docstring
'''<line_sep>self._make_sclite_files()<line_sep><return>self._get_error_rate(mode="CER")<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>cls=ScliteHelper()<line_sep>actual1='Jonathan loves to eat apples. This is the second sentence.'<line_sep>predicted1='Jonothon loves to eot. This is the second santense.'<line_sep>cls.add_text(predicted1 actual1)<line_sep>actual2='Jonathan loves to eat apples. This is the second sentence.'<line_sep>predicted2='Jonothan loves to eot. This is the second santense.'<line_sep>cls.add_text(predicted2 actual2)<line_sep>cls.print_cer_summary()<line_sep>num,er=cls.get_cer()<line_sep>print(num er)<block_end> |
<import_stmt>taichi<as>ti<import_stmt>tina<line_sep>ti.init(ti.gpu)<line_sep>scene=tina.Scene((640 480) smoothing=<true> ssr=<true> taa=<true>)<line_sep>monkey_material=tina.PBR(metallic=0.0 roughness=0.4)<line_sep>monkey=tina.MeshModel('assets/monkey.obj')<line_sep>scene.add_object(monkey monkey_material)<line_sep>param_metallic=tina.Param()<line_sep>param_roughness=tina.Param()<line_sep>plane_material=tina.PBR(metallic=param_metallic roughness=param_roughness)<line_sep>plane=tina.MeshTransform(tina.MeshGrid(32) tina.scale(2)@tina.eularXYZ([-ti.pi/2 0 0]))<line_sep>scene.add_object(plane plane_material)<line_sep>gui=ti.GUI(res=scene.res)<line_sep>nsteps=gui.slider('nsteps' 1 128 1)<line_sep>nsamples=gui.slider('nsamples' 1 128 1)<line_sep>stepsize=gui.slider('stepsize' 0 32 0.1)<line_sep>tolerance=gui.slider('tolerance' 0 64 0.1)<line_sep>blurring=gui.slider('blurring' 1 8 1)<line_sep>metallic=gui.slider('metallic' 0 1 0.01)<line_sep>roughness=gui.slider('roughness' 0 1 0.01)<line_sep>nsteps.value=64<line_sep>nsamples.value=12<line_sep>blurring.value=4<line_sep>stepsize.value=2<line_sep>tolerance.value=15<line_sep>metallic.value=1.0<line_sep>roughness.value=0.0<while_stmt>gui.running<block_start>scene.ssr.nsteps[<none>]=int(nsteps.value)<line_sep>scene.ssr.nsamples[<none>]=int(nsamples.value)<line_sep>scene.ssr.blurring[<none>]=int(blurring.value)<line_sep>scene.ssr.stepsize[<none>]=stepsize.value<line_sep>scene.ssr.tolerance[<none>]=tolerance.value<line_sep>param_metallic.value[<none>]=metallic.value<line_sep>param_roughness.value[<none>]=roughness.value<line_sep>scene.input(gui)<line_sep>scene.render()<line_sep>gui.set_image(scene.img)<line_sep>gui.show()<block_end> |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
<import_from_future_stmt> absolute_import<import_stmt>io<import_stmt>json<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_stmt>pytest<import_from_stmt>sagemaker.deserializers StringDeserializer BytesDeserializer CSVDeserializer StreamDeserializer NumpyDeserializer JSONDeserializer PandasDeserializer JSONLinesDeserializer <def_stmt>test_string_deserializer <block_start>deserializer=StringDeserializer()<line_sep>result=deserializer.deserialize(io.BytesIO(b"[1, 2, 3]") "application/json")<assert_stmt>result<eq>"[1, 2, 3]"<block_end><def_stmt>test_bytes_deserializer <block_start>deserializer=BytesDeserializer()<line_sep>result=deserializer.deserialize(io.BytesIO(b"[1, 2, 3]") "application/json")<assert_stmt>result<eq>b"[1, 2, 3]"<block_end>@pytest.fixture<def_stmt>csv_deserializer <block_start><return>CSVDeserializer()<block_end><def_stmt>test_csv_deserializer_single_element csv_deserializer<block_start>result=csv_deserializer.deserialize(io.BytesIO(b"1") "text/csv")<assert_stmt>result<eq>[["1"]]<block_end><def_stmt>test_csv_deserializer_array csv_deserializer<block_start>result=csv_deserializer.deserialize(io.BytesIO(b"1,2,3") "text/csv")<assert_stmt>result<eq>[["1" "2" "3"]]<block_end><def_stmt>test_csv_deserializer_2dimensional csv_deserializer<block_start>result=csv_deserializer.deserialize(io.BytesIO(b"1,2,3\n3,4,5") "text/csv")<assert_stmt>result<eq>[["1" "2" "3"] ["3" "4" "5"]]<block_end><def_stmt>test_csv_deserializer_posix_compliant csv_deserializer<block_start>result=csv_deserializer.deserialize(io.BytesIO(b"1,2,3\n3,4,5\n") "text/csv")<assert_stmt>result<eq>[["1" "2" "3"] ["3" "4" "5"]]<block_end><def_stmt>test_stream_deserializer <block_start>deserializer=StreamDeserializer()<line_sep>stream,content_type=deserializer.deserialize(io.BytesIO(b"[1, 2, 3]") "application/json")<try_stmt><block_start>result=stream.read()<block_end><finally_stmt><block_start>stream.close()<block_end><assert_stmt>result<eq>b"[1, 2, 3]"<assert_stmt>content_type<eq>"application/json"<block_end>@pytest.fixture<def_stmt>numpy_deserializer <block_start><return>NumpyDeserializer()<block_end><def_stmt>test_numpy_deserializer_from_csv numpy_deserializer<block_start>stream=io.BytesIO(b"1,2,3\n4,5,6")<line_sep>array=numpy_deserializer.deserialize(stream "text/csv")<assert_stmt>np.array_equal(array np.array([[1 2 3] [4 5 6]]))<block_end><def_stmt>test_numpy_deserializer_from_csv_ragged numpy_deserializer<block_start>stream=io.BytesIO(b"1,2,3\n4,5,6,7")<with_stmt>pytest.raises(ValueError)<as>error<block_start>numpy_deserializer.deserialize(stream "text/csv")<block_end><assert_stmt>"errors were detected"<in>str(error)<block_end><def_stmt>test_numpy_deserializer_from_csv_alpha <block_start>numpy_deserializer=NumpyDeserializer(dtype="U5")<line_sep>stream=io.BytesIO(b"hello,2,3\n4,5,6")<line_sep>array=numpy_deserializer.deserialize(stream "text/csv")<assert_stmt>np.array_equal(array np.array([["hello" 2 3] [4 5 6]]))<block_end><def_stmt>test_numpy_deserializer_from_json numpy_deserializer<block_start>stream=io.BytesIO(b"[[1,2,3],\n[4,5,6]]")<line_sep>array=numpy_deserializer.deserialize(stream "application/json")<assert_stmt>np.array_equal(array np.array([[1 2 3] [4 5 6]]))<block_end># Sadly, ragged arrays work fine in JSON (giving us a 1D array of Python lists)
<def_stmt>test_numpy_deserializer_from_json_ragged numpy_deserializer<block_start>stream=io.BytesIO(b"[[1,2,3],\n[4,5,6,7]]")<line_sep>array=numpy_deserializer.deserialize(stream "application/json")<assert_stmt>np.array_equal(array np.array([[1 2 3] [4 5 6 7]]))<block_end><def_stmt>test_numpy_deserializer_from_json_alpha <block_start>numpy_deserializer=NumpyDeserializer(dtype="U5")<line_sep>stream=io.BytesIO(b'[["hello",2,3],\n[4,5,6]]')<line_sep>array=numpy_deserializer.deserialize(stream "application/json")<assert_stmt>np.array_equal(array np.array([["hello" 2 3] [4 5 6]]))<block_end><def_stmt>test_numpy_deserializer_from_npy numpy_deserializer<block_start>array=np.ones((2 3))<line_sep>stream=io.BytesIO()<line_sep>np.save(stream array)<line_sep>stream.seek(0)<line_sep>result=numpy_deserializer.deserialize(stream "application/x-npy")<assert_stmt>np.array_equal(array result)<block_end><def_stmt>test_numpy_deserializer_from_npy_object_array numpy_deserializer<block_start>array=np.array([{"a":"" "b":""} {"c":"" "d":""}])<line_sep>stream=io.BytesIO()<line_sep>np.save(stream array)<line_sep>stream.seek(0)<line_sep>result=numpy_deserializer.deserialize(stream "application/x-npy")<assert_stmt>np.array_equal(array result)<block_end><def_stmt>test_numpy_deserializer_from_npy_object_array_with_allow_pickle_false <block_start>numpy_deserializer=NumpyDeserializer(allow_pickle=<false>)<line_sep>array=np.array([{"a":"" "b":""} {"c":"" "d":""}])<line_sep>stream=io.BytesIO()<line_sep>np.save(stream array)<line_sep>stream.seek(0)<with_stmt>pytest.raises(ValueError)<block_start>numpy_deserializer.deserialize(stream "application/x-npy")<block_end><block_end>@pytest.fixture<def_stmt>json_deserializer <block_start><return>JSONDeserializer()<block_end><def_stmt>test_json_deserializer_array json_deserializer<block_start>result=json_deserializer.deserialize(io.BytesIO(b"[1, 2, 3]") "application/json")<assert_stmt>result<eq>[1 2 3]<block_end><def_stmt>test_json_deserializer_2dimensional json_deserializer<block_start>result=json_deserializer.deserialize(io.BytesIO(b"[[1, 2, 3], [3, 4, 5]]") "application/json")<assert_stmt>result<eq>[[1 2 3] [3 4 5]]<block_end><def_stmt>test_json_deserializer_invalid_data json_deserializer<block_start><with_stmt>pytest.raises(ValueError)<as>error<block_start>json_deserializer.deserialize(io.BytesIO(b"[[1]") "application/json")<block_end><assert_stmt>"column"<in>str(error)<block_end>@pytest.fixture<def_stmt>pandas_deserializer <block_start><return>PandasDeserializer()<block_end><def_stmt>test_pandas_deserializer_json pandas_deserializer<block_start>data={"col 1":{"row 1":"a" "row 2":"c"} "col 2":{"row 1":"b" "row 2":"d"}}<line_sep>stream=io.BytesIO(json.dumps(data).encode("utf-8"))<line_sep>result=pandas_deserializer.deserialize(stream "application/json")<line_sep>expected=pd.DataFrame([["a" "b"] ["c" "d"]] index=["row 1" "row 2"] columns=["col 1" "col 2"])<assert_stmt>result.equals(expected)<block_end><def_stmt>test_pandas_deserializer_csv pandas_deserializer<block_start>stream=io.BytesIO(b"col 1,col 2\na,b\nc,d")<line_sep>result=pandas_deserializer.deserialize(stream "text/csv")<line_sep>expected=pd.DataFrame([["a" "b"] ["c" "d"]] columns=["col 1" "col 2"])<assert_stmt>result.equals(expected)<block_end>@pytest.fixture<def_stmt>json_lines_deserializer <block_start><return>JSONLinesDeserializer()<block_end>@pytest.mark.parametrize("source, expected" [(b'["Name", "Score"]\n["Gilbert", 24]' [["Name" "Score"] ["Gilbert" 24]]) (b'["Name", "Score"]\n["Gilbert", 24]\n' [["Name" "Score"] ["Gilbert" 24]]) (b'{"Name": "Gilbert", "Score": 24}\n{"Name": "Alexa", "Score": 29}' [{"Name":"Gilbert" "Score":24} {"Name":"Alexa" "Score":29}] ) ] )<def_stmt>test_json_lines_deserializer json_lines_deserializer source expected<block_start>stream=io.BytesIO(source)<line_sep>content_type="application/jsonlines"<line_sep>actual=json_lines_deserializer.deserialize(stream content_type)<assert_stmt>actual<eq>expected<block_end> |
"""This module contains functions related to Mantra Python filtering."""<line_sep># =============================================================================
# IMPORTS
# =============================================================================
# Standard Library
<import_stmt>logging<import_stmt>os<import_from_stmt>typing List Optional<line_sep>_logger=logging.getLogger(__name__)<line_sep># =============================================================================
# FUNCTIONS
# =============================================================================
<def_stmt>build_pyfilter_command pyfilter_args:Optional[List[str]]=<none> pyfilter_path:Optional[str]=<none><arrow>str<block_start>"""Build a PyFilter -P command.
:param pyfilter_args: Optional list of args to pass to the command.
:param pyfilter_path: Optional path to the filter script.
:return: The constructed PyFilter command.
"""<import_stmt>hou<if_stmt>pyfilter_args<is><none><block_start>pyfilter_args=[]<block_end># If no path was passed, use the one located in the HOUDINI_PATH.
<if_stmt>pyfilter_path<is><none><block_start><try_stmt><block_start>pyfilter_path=hou.findFile("pyfilter/ht-pyfilter.py")<line_sep># If we can't find the script them log an error and return nothing.
<block_end><except_stmt>hou.OperationFailed<block_start>_logger.error("Could not find pyfilter/ht-pyfilter.py")<line_sep><return>""<block_end><block_end><else_stmt># Ensure the script path exists.
<block_start><if_stmt><not>os.path.isfile(pyfilter_path)<block_start><raise>OSError("No such file: {}".format(pyfilter_path))<block_end><block_end>cmd='-P "{} {}"'.format(pyfilter_path " ".join(pyfilter_args))<line_sep><return>cmd<block_end> |
<def_stmt>m2m_set instance field_name objs<block_start>getattr(instance field_name).set(objs)<block_end> |
# -*- coding: utf-8 -*-
# @Time: 2019/12/4 12:14 上午
# @Author: GraceKoo
# @File: 4_find_median_sorted_array.py
# @Desc: https://leetcode-cn.com/problems/median-of-two-sorted-arrays/
<import_stmt>timeit<class_stmt>Solution<block_start><def_stmt>findMedianSortedArrays self nums1 nums2<arrow>float<block_start>m=len(nums1)<line_sep>n=len(nums2)<line_sep>k=(m+n)%2<if_stmt>k<eq>1<block_start><return>self.find_k(nums1 nums2 (m+n)<floordiv>2)<block_end><else_stmt><block_start><return>(self.find_k(nums1 nums2 (m+n)<floordiv>2-1)+self.find_k(nums1 nums2 (m+n)<floordiv>2))/2<block_end><block_end><def_stmt>find_k self nums1 nums2 k<block_start><if_stmt><not>nums1<block_start><return>nums2[k]<block_end><if_stmt><not>nums2<block_start><return>nums1[k]<block_end># print("len nums1:", len(nums1), "len nums2:", len(nums2))
i=len(nums1)<floordiv>2<line_sep>j=len(nums2)<floordiv>2<line_sep># print(i, j, k)
<if_stmt>k<g>i+j<block_start><if_stmt>nums1[i]<g>nums2[j]<block_start><return>self.find_k(nums1 nums2[j+1:] k-j-1)<block_end><else_stmt><block_start><return>self.find_k(nums1[i+1:] nums2 k-i-1)<block_end><block_end><else_stmt><block_start><if_stmt>nums1[i]<g>nums2[j]<block_start><return>self.find_k(nums1[:i] nums2 k)<block_end><else_stmt><block_start><return>self.find_k(nums1 nums2[:j] k)<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>so=Solution()<line_sep>nums1=[1 2 3]<line_sep>nums2=[1 2 3]<line_sep>start=timeit.default_timer()<line_sep>print(so.findMedianSortedArrays(nums1 nums2))<line_sep>end=timeit.default_timer()<line_sep>print(str((end-start)<times>1000) "s")<line_sep>nums1=[1 2 3]<line_sep>nums2=[4 5 6]<line_sep>print(so.findMedianSortedArrays(nums1 nums2))<line_sep>nums1=[1 2 3]<line_sep>nums2=[4 5]<line_sep>print(so.findMedianSortedArrays(nums1 nums2))<line_sep>nums1=[1 4 6]<line_sep>nums2=[2 5]<line_sep>print(so.findMedianSortedArrays(nums1 nums2))<block_end> |
WIDTH=87<line_sep>HEIGHT=87<line_sep>FIRST=0x20<line_sep>LAST=0x7f<line_sep>_font=b'\x00\x4a\x5a\x02\x44\x60\x44\x52\x60\x52\x02\x44\x60\x44\x60'<concat>b'\x60\x44\x02\x52\x52\x52\x3e\x52\x66\x02\x44\x60\x44\x44\x60'<concat>b'\x60\x02\x44\x60\x44\x52\x60\x52\x02\x46\x5e\x46\x59\x5e\x4b'<concat>b'\x02\x4b\x59\x4b\x5e\x59\x46\x02\x52\x52\x52\x44\x52\x60\x02'<concat>b'\x4b\x59\x4b\x46\x59\x5e\x02\x46\x5e\x46\x4b\x5e\x59\x02\x4b'<concat>b'\x59\x4b\x52\x59\x52\x02\x4d\x57\x4d\x57\x57\x4d\x02\x52\x52'<concat>b'\x52\x4b\x52\x59\x02\x4d\x57\x4d\x4d\x57\x57\x07\x47\x52\x52'<concat>b'\x47\x50\x47\x4d\x48\x4a\x4a\x48\x4d\x47\x50\x47\x52\x07\x47'<concat>b'\x52\x47\x52\x47\x54\x48\x57\x4a\x5a\x4d\x5c\x50\x5d\x52\x5d'<concat>b'\x07\x52\x5d\x52\x5d\x54\x5d\x57\x5c\x5a\x5a\x5c\x57\x5d\x54'<concat>b'\x5d\x52\x07\x52\x5d\x5d\x52\x5d\x50\x5c\x4d\x5a\x4a\x57\x48'<concat>b'\x54\x47\x52\x47\x08\x44\x60\x44\x4f\x47\x51\x4b\x53\x50\x54'<concat>b'\x54\x54\x59\x53\x5d\x51\x60\x4f\x08\x50\x55\x55\x44\x53\x47'<concat>b'\x51\x4b\x50\x50\x50\x54\x51\x59\x53\x5d\x55\x60\x08\x4f\x54'<concat>b'\x4f\x44\x51\x47\x53\x4b\x54\x50\x54\x54\x53\x59\x51\x5d\x4f'<concat>b'\x60\x08\x44\x60\x44\x55\x47\x53\x4b\x51\x50\x50\x54\x50\x59'<concat>b'\x51\x5d\x53\x60\x55\x04\x4b\x59\x52\x4a\x59\x4e\x4b\x56\x52'<concat>b'\x5a\x04\x4a\x5a\x4a\x52\x4e\x4b\x56\x59\x5a\x52\x04\x4b\x59'<concat>b'\x4b\x56\x4b\x4e\x59\x56\x59\x4e\x04\x4a\x5a\x4c\x58\x4a\x50'<concat>b'\x5a\x54\x58\x4c\x16\x4a\x5a\x4a\x5d\x4c\x5d\x4f\x5c\x51\x5b'<concat>b'\x54\x58\x55\x56\x56\x53\x56\x4f\x55\x4c\x54\x4a\x53\x49\x51'<concat>b'\x49\x50\x4a\x4f\x4c\x4e\x4f\x4e\x53\x4f\x56\x50\x58\x53\x5b'<concat>b'\x55\x5c\x58\x5d\x5a\x5d\x16\x49\x5d\x5d\x5a\x5d\x58\x5c\x55'<concat>b'\x5b\x53\x58\x50\x56\x4f\x53\x4e\x4f\x4e\x4c\x4f\x4a\x50\x49'<concat>b'\x51\x49\x53\x4a\x54\x4c\x55\x4f\x56\x53\x56\x56\x55\x58\x54'<concat>b'\x5b\x51\x5c\x4f\x5d\x4c\x5d\x4a\x16\x4a\x5a\x5a\x47\x58\x47'<concat>b'\x55\x48\x53\x49\x50\x4c\x4f\x4e\x4e\x51\x4e\x55\x4f\x58\x50'<concat>b'\x5a\x51\x5b\x53\x5b\x54\x5a\x55\x58\x56\x55\x56\x51\x55\x4e'<concat>b'\x54\x4c\x51\x49\x4f\x48\x4c\x47\x4a\x47\x16\x47\x5b\x47\x4a'<concat>b'\x47\x4c\x48\x4f\x49\x51\x4c\x54\x4e\x55\x51\x56\x55\x56\x58'<concat>b'\x55\x5a\x54\x5b\x53\x5b\x51\x5a\x50\x58\x4f\x55\x4e\x51\x4e'<concat>b'\x4e\x4f\x4c\x50\x49\x53\x48\x55\x47\x58\x47\x5a\x14\x45\x5b'<concat>b'\x45\x50\x46\x52\x48\x54\x4a\x55\x4d\x56\x51\x56\x55\x55\x58'<concat>b'\x53\x5a\x50\x5b\x4e\x5a\x4c\x57\x4c\x53\x4d\x51\x4e\x4e\x50'<concat>b'\x4c\x53\x4b\x56\x4b\x59\x4c\x5c\x4d\x5e\x12\x45\x59\x45\x54'<concat>b'\x48\x56\x4b\x57\x50\x57\x53\x56\x56\x54\x58\x51\x59\x4e\x59'<concat>b'\x4c\x58\x4b\x56\x4b\x53\x4c\x50\x4e\x4e\x51\x4d\x54\x4d\x59'<concat>b'\x4e\x5c\x50\x5f\x19\x4f\x55\x51\x4f\x4f\x51\x4f\x53\x51\x55'<concat>b'\x53\x55\x55\x53\x55\x51\x53\x4f\x51\x4f\x20\x52\x51\x50\x50'<concat>b'\x51\x50\x53\x51\x54\x53\x54\x54\x53\x54\x51\x53\x50\x51\x50'<concat>b'\x20\x52\x52\x51\x51\x52\x52\x53\x53\x52\x52\x51\x0a\x52\x57'<concat>b'\x52\x4d\x53\x4d\x55\x4e\x56\x4f\x57\x51\x57\x53\x56\x55\x55'<concat>b'\x56\x53\x57\x52\x57\x08\x44\x60\x44\x52\x4a\x52\x20\x52\x4f'<concat>b'\x52\x55\x52\x20\x52\x5a\x52\x60\x52\x04\x44\x60\x44\x55\x44'<concat>b'\x4f\x60\x4f\x60\x55\x05\x4a\x5a\x52\x44\x4a\x52\x20\x52\x52'<concat>b'\x44\x5a\x52\x08\x44\x60\x44\x52\x60\x52\x20\x52\x4a\x59\x5a'<concat>b'\x59\x20\x52\x50\x60\x54\x60\x08\x44\x60\x44\x52\x60\x52\x20'<concat>b'\x52\x44\x52\x52\x62\x20\x52\x60\x52\x52\x62\x11\x4b\x59\x51'<concat>b'\x4b\x4e\x4c\x4c\x4e\x4b\x51\x4b\x53\x4c\x56\x4e\x58\x51\x59'<concat>b'\x53\x59\x56\x58\x58\x56\x59\x53\x59\x51\x58\x4e\x56\x4c\x53'<concat>b'\x4b\x51\x4b\x05\x4c\x58\x4c\x4c\x4c\x58\x58\x58\x58\x4c\x4c'<concat>b'\x4c\x04\x4b\x59\x52\x4a\x4b\x56\x59\x56\x52\x4a\x05\x4c\x58'<concat>b'\x52\x48\x4c\x52\x52\x5c\x58\x52\x52\x48\x0b\x4a\x5a\x52\x49'<concat>b'\x50\x4f\x4a\x4f\x4f\x53\x4d\x59\x52\x55\x57\x59\x55\x53\x5a'<concat>b'\x4f\x54\x4f\x52\x49\x05\x4b\x59\x52\x4b\x52\x59\x20\x52\x4b'<concat>b'\x52\x59\x52\x05\x4d\x57\x4d\x4d\x57\x57\x20\x52\x57\x4d\x4d'<concat>b'\x57\x08\x4d\x57\x52\x4c\x52\x58\x20\x52\x4d\x4f\x57\x55\x20'<concat>b'\x52\x57\x4f\x4d\x55\x22\x4e\x56\x51\x4e\x4f\x4f\x4e\x51\x4e'<concat>b'\x53\x4f\x55\x51\x56\x53\x56\x55\x55\x56\x53\x56\x51\x55\x4f'<concat>b'\x53\x4e\x51\x4e\x20\x52\x4f\x51\x4f\x53\x20\x52\x50\x50\x50'<concat>b'\x54\x20\x52\x51\x4f\x51\x55\x20\x52\x52\x4f\x52\x55\x20\x52'<concat>b'\x53\x4f\x53\x55\x20\x52\x54\x50\x54\x54\x20\x52\x55\x51\x55'<concat>b'\x53\x1a\x4e\x56\x4e\x4e\x4e\x56\x56\x56\x56\x4e\x4e\x4e\x20'<concat>b'\x52\x4f\x4f\x4f\x55\x20\x52\x50\x4f\x50\x55\x20\x52\x51\x4f'<concat>b'\x51\x55\x20\x52\x52\x4f\x52\x55\x20\x52\x53\x4f\x53\x55\x20'<concat>b'\x52\x54\x4f\x54\x55\x20\x52\x55\x4f\x55\x55\x10\x4d\x57\x52'<concat>b'\x4c\x4d\x55\x57\x55\x52\x4c\x20\x52\x52\x4f\x4f\x54\x20\x52'<concat>b'\x52\x4f\x55\x54\x20\x52\x52\x52\x51\x54\x20\x52\x52\x52\x53'<concat>b'\x54\x10\x4c\x55\x4c\x52\x55\x57\x55\x4d\x4c\x52\x20\x52\x4f'<concat>b'\x52\x54\x55\x20\x52\x4f\x52\x54\x4f\x20\x52\x52\x52\x54\x53'<concat>b'\x20\x52\x52\x52\x54\x51\x10\x4d\x57\x52\x58\x57\x4f\x4d\x4f'<concat>b'\x52\x58\x20\x52\x52\x55\x55\x50\x20\x52\x52\x55\x4f\x50\x20'<concat>b'\x52\x52\x52\x53\x50\x20\x52\x52\x52\x51\x50\x10\x4f\x58\x58'<concat>b'\x52\x4f\x4d\x4f\x57\x58\x52\x20\x52\x55\x52\x50\x4f\x20\x52'<concat>b'\x55\x52\x50\x55\x20\x52\x52\x52\x50\x51\x20\x52\x52\x52\x50'<concat>b'\x53\x0a\x52\x59\x52\x4b\x52\x59\x20\x52\x52\x4b\x59\x4e\x52'<concat>b'\x51\x20\x52\x53\x4d\x56\x4e\x53\x4f\x14\x49\x5b\x52\x47\x52'<concat>b'\x56\x20\x52\x4d\x4a\x57\x50\x20\x52\x57\x4a\x4d\x50\x20\x52'<concat>b'\x49\x56\x4c\x5c\x20\x52\x5b\x56\x58\x5c\x20\x52\x49\x56\x5b'<concat>b'\x56\x20\x52\x4c\x5c\x58\x5c\x0c\x4d\x57\x52\x4c\x52\x58\x20'<concat>b'\x52\x4f\x4f\x55\x4f\x20\x52\x4d\x55\x4f\x57\x51\x58\x53\x58'<concat>b'\x55\x57\x57\x55\x0a\x4c\x58\x52\x4c\x52\x58\x20\x52\x4c\x51'<concat>b'\x4d\x4f\x57\x4f\x58\x51\x20\x52\x50\x57\x54\x57\x0d\x4b\x59'<concat>b'\x4d\x4e\x57\x58\x20\x52\x57\x4e\x4d\x58\x20\x52\x4f\x4c\x4c'<concat>b'\x4f\x4b\x51\x20\x52\x55\x4c\x58\x4f\x59\x51\x11\x49\x5b\x4e'<concat>b'\x49\x49\x5b\x20\x52\x56\x49\x5b\x5b\x20\x52\x4d\x4d\x5b\x5b'<concat>b'\x20\x52\x57\x4d\x49\x5b\x20\x52\x4e\x49\x56\x49\x20\x52\x4d'<concat>b'\x4d\x57\x4d\x02\x4b\x59\x4b\x46\x59\x5e\x0a\x47\x5b\x4d\x4a'<concat>b'\x53\x56\x20\x52\x4b\x50\x53\x4c\x20\x52\x47\x5c\x5b\x5c\x5b'<concat>b'\x52\x47\x5c\x0d\x4c\x58\x50\x4c\x50\x50\x4c\x50\x4c\x54\x50'<concat>b'\x54\x50\x58\x54\x58\x54\x54\x58\x54\x58\x50\x54\x50\x54\x4c'<concat>b'\x50\x4c\x1f\x4b\x59\x59\x50\x58\x4e\x56\x4c\x53\x4b\x51\x4b'<concat>b'\x4e\x4c\x4c\x4e\x4b\x51\x4b\x53\x4c\x56\x4e\x58\x51\x59\x53'<concat>b'\x59\x56\x58\x58\x56\x59\x54\x20\x52\x59\x50\x57\x4e\x55\x4d'<concat>b'\x53\x4d\x51\x4e\x50\x4f\x4f\x51\x4f\x53\x50\x55\x51\x56\x53'<concat>b'\x57\x55\x57\x57\x56\x59\x54\x09\x4b\x59\x52\x4a\x4b\x56\x59'<concat>b'\x56\x52\x4a\x20\x52\x52\x5a\x59\x4e\x4b\x4e\x52\x5a\x21\x47'<concat>b'\x5d\x50\x49\x50\x47\x51\x46\x53\x46\x54\x47\x54\x49\x20\x52'<concat>b'\x47\x5a\x48\x58\x4a\x56\x4b\x54\x4c\x50\x4c\x4b\x4d\x4a\x4f'<concat>b'\x49\x55\x49\x57\x4a\x58\x4b\x58\x50\x59\x54\x5a\x56\x5c\x58'<concat>b'\x5d\x5a\x20\x52\x47\x5a\x5d\x5a\x20\x52\x51\x5a\x50\x5b\x51'<concat>b'\x5c\x53\x5c\x54\x5b\x53\x5a\x3f\x4a\x5a\x52\x4d\x52\x53\x20'<concat>b'\x52\x52\x53\x51\x5c\x20\x52\x52\x53\x53\x5c\x20\x52\x51\x5c'<concat>b'\x53\x5c\x20\x52\x52\x4d\x51\x4a\x50\x48\x4e\x47\x20\x52\x51'<concat>b'\x4a\x4e\x47\x20\x52\x52\x4d\x53\x4a\x54\x48\x56\x47\x20\x52'<concat>b'\x53\x4a\x56\x47\x20\x52\x52\x4d\x4e\x4b\x4c\x4b\x4a\x4d\x20'<concat>b'\x52\x50\x4c\x4c\x4c\x4a\x4d\x20\x52\x52\x4d\x56\x4b\x58\x4b'<concat>b'\x5a\x4d\x20\x52\x54\x4c\x58\x4c\x5a\x4d\x20\x52\x52\x4d\x50'<concat>b'\x4e\x4f\x4f\x4f\x52\x20\x52\x52\x4d\x50\x4f\x4f\x52\x20\x52'<concat>b'\x52\x4d\x54\x4e\x55\x4f\x55\x52\x20\x52\x52\x4d\x54\x4f\x55'<concat>b'\x52\x5d\x4a\x5a\x52\x49\x52\x4b\x20\x52\x52\x4e\x52\x50\x20'<concat>b'\x52\x52\x53\x52\x55\x20\x52\x52\x59\x51\x5c\x20\x52\x52\x59'<concat>b'\x53\x5c\x20\x52\x51\x5c\x53\x5c\x20\x52\x52\x47\x51\x49\x50'<concat>b'\x4a\x20\x52\x52\x47\x53\x49\x54\x4a\x20\x52\x50\x4a\x52\x49'<concat>b'\x54\x4a\x20\x52\x52\x4b\x50\x4e\x4e\x4f\x4d\x4e\x20\x52\x52'<concat>b'\x4b\x54\x4e\x56\x4f\x57\x4e\x20\x52\x4e\x4f\x50\x4f\x52\x4e'<concat>b'\x54\x4f\x56\x4f\x20\x52\x52\x50\x50\x53\x4e\x54\x4c\x54\x4b'<concat>b'\x52\x4b\x53\x4c\x54\x20\x52\x52\x50\x54\x53\x56\x54\x58\x54'<concat>b'\x59\x52\x59\x53\x58\x54\x20\x52\x4e\x54\x50\x54\x52\x53\x54'<concat>b'\x54\x56\x54\x20\x52\x52\x55\x50\x58\x4f\x59\x4d\x5a\x4c\x5a'<concat>b'\x4b\x59\x4a\x57\x4a\x59\x4c\x5a\x20\x52\x52\x55\x54\x58\x55'<concat>b'\x59\x57\x5a\x58\x5a\x59\x59\x5a\x57\x5a\x59\x58\x5a\x20\x52'<concat>b'\x4d\x5a\x4f\x5a\x52\x59\x55\x5a\x57\x5a\x27\x4a\x5a\x52\x59'<concat>b'\x51\x5c\x20\x52\x52\x59\x53\x5c\x20\x52\x51\x5c\x53\x5c\x20'<concat>b'\x52\x52\x59\x55\x5a\x58\x5a\x5a\x58\x5a\x55\x59\x54\x57\x54'<concat>b'\x59\x52\x5a\x4f\x59\x4d\x57\x4c\x55\x4d\x56\x4a\x55\x48\x53'<concat>b'\x47\x51\x47\x4f\x48\x4e\x4a\x4f\x4d\x4d\x4c\x4b\x4d\x4a\x4f'<concat>b'\x4b\x52\x4d\x54\x4b\x54\x4a\x55\x4a\x58\x4c\x5a\x4f\x5a\x52'<concat>b'\x59\x1f\x4a\x5a\x52\x59\x51\x5c\x20\x52\x52\x59\x53\x5c\x20'<concat>b'\x52\x51\x5c\x53\x5c\x20\x52\x52\x59\x56\x58\x56\x56\x58\x55'<concat>b'\x58\x52\x5a\x51\x5a\x4c\x59\x49\x58\x48\x56\x48\x54\x47\x50'<concat>b'\x47\x4e\x48\x4c\x48\x4b\x49\x4a\x4c\x4a\x51\x4c\x52\x4c\x55'<concat>b'\x4e\x56\x4e\x58\x52\x59\x0e\x49\x5b\x49\x50\x4b\x52\x20\x52'<concat>b'\x4c\x4b\x4e\x50\x20\x52\x52\x47\x52\x4f\x20\x52\x58\x4b\x56'<concat>b'\x50\x20\x52\x5b\x50\x59\x52\x1b\x47\x5d\x49\x49\x4a\x4b\x4b'<concat>b'\x4f\x4b\x55\x4a\x59\x49\x5b\x20\x52\x5b\x49\x5a\x4b\x59\x4f'<concat>b'\x59\x55\x5a\x59\x5b\x5b\x20\x52\x49\x49\x4b\x4a\x4f\x4b\x55'<concat>b'\x4b\x59\x4a\x5b\x49\x20\x52\x49\x5b\x4b\x5a\x4f\x59\x55\x59'<concat>b'\x59\x5a\x5b\x5b\x36\x46\x5e\x52\x52\x52\x5b\x51\x5c\x20\x52'<concat>b'\x52\x56\x51\x5c\x20\x52\x52\x49\x51\x48\x4f\x48\x4e\x49\x4e'<concat>b'\x4b\x4f\x4e\x52\x52\x20\x52\x52\x49\x53\x48\x55\x48\x56\x49'<concat>b'\x56\x4b\x55\x4e\x52\x52\x20\x52\x52\x52\x4e\x4f\x4c\x4e\x4a'<concat>b'\x4e\x49\x4f\x49\x51\x4a\x52\x20\x52\x52\x52\x56\x4f\x58\x4e'<concat>b'\x5a\x4e\x5b\x4f\x5b\x51\x5a\x52\x20\x52\x52\x52\x4e\x55\x4c'<concat>b'\x56\x4a\x56\x49\x55\x49\x53\x4a\x52\x20\x52\x52\x52\x56\x55'<concat>b'\x58\x56\x5a\x56\x5b\x55\x5b\x53\x5a\x52\x2d\x4a\x5a\x55\x49'<concat>b'\x54\x4a\x55\x4b\x56\x4a\x56\x49\x55\x47\x53\x46\x51\x46\x4f'<concat>b'\x47\x4e\x49\x4e\x4b\x4f\x4d\x51\x4f\x56\x52\x20\x52\x4f\x4d'<concat>b'\x54\x50\x56\x52\x57\x54\x57\x56\x56\x58\x54\x5a\x20\x52\x50'<concat>b'\x4e\x4e\x50\x4d\x52\x4d\x54\x4e\x56\x50\x58\x55\x5b\x20\x52'<concat>b'\x4e\x56\x53\x59\x55\x5b\x56\x5d\x56\x5f\x55\x61\x53\x62\x51'<concat>b'\x62\x4f\x61\x4e\x5f\x4e\x5e\x4f\x5d\x50\x5e\x4f\x5f\x1d\x4a'<concat>b'\x5a\x52\x46\x51\x48\x52\x4a\x53\x48\x52\x46\x20\x52\x52\x46'<concat>b'\x52\x62\x20\x52\x52\x51\x51\x54\x52\x62\x53\x54\x52\x51\x20'<concat>b'\x52\x4c\x4d\x4e\x4e\x50\x4d\x4e\x4c\x4c\x4d\x20\x52\x4c\x4d'<concat>b'\x58\x4d\x20\x52\x54\x4d\x56\x4e\x58\x4d\x56\x4c\x54\x4d\x37'<concat>b'\x4a\x5a\x52\x46\x51\x48\x52\x4a\x53\x48\x52\x46\x20\x52\x52'<concat>b'\x46\x52\x54\x20\x52\x52\x50\x51\x52\x53\x56\x52\x58\x51\x56'<concat>b'\x53\x52\x52\x50\x20\x52\x52\x54\x52\x62\x20\x52\x52\x5e\x51'<concat>b'\x60\x52\x62\x53\x60\x52\x5e\x20\x52\x4c\x4d\x4e\x4e\x50\x4d'<concat>b'\x4e\x4c\x4c\x4d\x20\x52\x4c\x4d\x58\x4d\x20\x52\x54\x4d\x56'<concat>b'\x4e\x58\x4d\x56\x4c\x54\x4d\x20\x52\x4c\x5b\x4e\x5c\x50\x5b'<concat>b'\x4e\x5a\x4c\x5b\x20\x52\x4c\x5b\x58\x5b\x20\x52\x54\x5b\x56'<concat>b'\x5c\x58\x5b\x56\x5a\x54\x5b\x11\x45\x5f\x52\x49\x51\x4a\x52'<concat>b'\x4b\x53\x4a\x52\x49\x20\x52\x49\x59\x48\x5a\x49\x5b\x4a\x5a'<concat>b'\x49\x59\x20\x52\x5b\x59\x5a\x5a\x5b\x5b\x5c\x5a\x5b\x59\x20'<concat>b'\x46\x5e\x52\x48\x4e\x4c\x4b\x50\x4a\x53\x4a\x55\x4b\x57\x4d'<concat>b'\x58\x4f\x58\x51\x57\x52\x55\x20\x52\x52\x48\x56\x4c\x59\x50'<concat>b'\x5a\x53\x5a\x55\x59\x57\x57\x58\x55\x58\x53\x57\x52\x55\x20'<concat>b'\x52\x52\x55\x51\x59\x50\x5c\x20\x52\x52\x55\x53\x59\x54\x5c'<concat>b'\x20\x52\x50\x5c\x54\x5c\x19\x46\x5e\x52\x4e\x51\x4b\x50\x49'<concat>b'\x4e\x48\x4d\x48\x4b\x49\x4a\x4b\x4a\x4f\x4b\x52\x4c\x54\x4e'<concat>b'\x57\x52\x5c\x20\x52\x52\x4e\x53\x4b\x54\x49\x56\x48\x57\x48'<concat>b'\x59\x49\x5a\x4b\x5a\x4f\x59\x52\x58\x54\x56\x57\x52\x5c\x13'<concat>b'\x46\x5e\x52\x47\x50\x4a\x4c\x4f\x49\x52\x20\x52\x52\x47\x54'<concat>b'\x4a\x58\x4f\x5b\x52\x20\x52\x49\x52\x4c\x55\x50\x5a\x52\x5d'<concat>b'\x20\x52\x5b\x52\x58\x55\x54\x5a\x52\x5d\x2f\x46\x5e\x52\x54'<concat>b'\x54\x57\x56\x58\x58\x58\x5a\x57\x5b\x55\x5b\x53\x5a\x51\x58'<concat>b'\x50\x56\x50\x53\x51\x20\x52\x53\x51\x55\x4f\x56\x4d\x56\x4b'<concat>b'\x55\x49\x53\x48\x51\x48\x4f\x49\x4e\x4b\x4e\x4d\x4f\x4f\x51'<concat>b'\x51\x20\x52\x51\x51\x4e\x50\x4c\x50\x4a\x51\x49\x53\x49\x55'<concat>b'\x4a\x57\x4c\x58\x4e\x58\x50\x57\x52\x54\x20\x52\x52\x54\x51'<concat>b'\x59\x50\x5c\x20\x52\x52\x54\x53\x59\x54\x5c\x20\x52\x50\x5c'<concat>b'\x54\x5c\x2f\x49\x5b\x56\x2b\x53\x2d\x51\x2f\x50\x31\x4f\x34'<concat>b'\x4f\x38\x50\x3c\x54\x44\x55\x47\x55\x4a\x54\x4d\x52\x50\x20'<concat>b'\x52\x53\x2d\x51\x30\x50\x34\x50\x38\x51\x3b\x55\x43\x56\x47'<concat>b'\x56\x4a\x55\x4d\x52\x50\x4e\x52\x52\x54\x55\x57\x56\x5a\x56'<concat>b'\x5d\x55\x61\x51\x69\x50\x6c\x50\x70\x51\x74\x53\x77\x20\x52'<concat>b'\x52\x54\x54\x57\x55\x5a\x55\x5d\x54\x60\x50\x68\x4f\x6c\x4f'<concat>b'\x70\x50\x73\x51\x75\x53\x77\x56\x79\x2f\x49\x5b\x4e\x2b\x51'<concat>b'\x2d\x53\x2f\x54\x31\x55\x34\x55\x38\x54\x3c\x50\x44\x4f\x47'<concat>b'\x4f\x4a\x50\x4d\x52\x50\x20\x52\x51\x2d\x53\x30\x54\x34\x54'<concat>b'\x38\x53\x3b\x4f\x43\x4e\x47\x4e\x4a\x4f\x4d\x52\x50\x56\x52'<concat>b'\x52\x54\x4f\x57\x4e\x5a\x4e\x5d\x4f\x61\x53\x69\x54\x6c\x54'<concat>b'\x70\x53\x74\x51\x77\x20\x52\x52\x54\x50\x57\x4f\x5a\x4f\x5d'<concat>b'\x50\x60\x54\x68\x55\x6c\x55\x70\x54\x73\x53\x75\x51\x77\x4e'<concat>b'\x79\x1f\x49\x5b\x56\x2e\x53\x31\x51\x34\x4f\x38\x4e\x3d\x4e'<concat>b'\x43\x4f\x49\x50\x4d\x53\x58\x54\x5c\x55\x62\x55\x67\x54\x6c'<concat>b'\x53\x6f\x51\x73\x20\x52\x53\x31\x51\x35\x50\x38\x4f\x3d\x4f'<concat>b'\x42\x50\x48\x51\x4c\x54\x57\x55\x5b\x56\x61\x56\x67\x55\x6c'<concat>b'\x53\x70\x51\x73\x4e\x76\x1f\x49\x5b\x4e\x2e\x51\x31\x53\x34'<concat>b'\x55\x38\x56\x3d\x56\x43\x55\x49\x54\x4d\x51\x58\x50\x5c\x4f'<concat>b'\x62\x4f\x67\x50\x6c\x51\x6f\x53\x73\x20\x52\x51\x31\x53\x35'<concat>b'\x54\x38\x55\x3d\x55\x42\x54\x48\x53\x4c\x50\x57\x4f\x5b\x4e'<concat>b'\x61\x4e\x67\x4f\x6c\x51\x70\x53\x73\x56\x76\x0d\x37\x5a\x3a'<concat>b'\x52\x41\x52\x52\x6f\x20\x52\x40\x52\x51\x6f\x20\x52\x3f\x52'<concat>b'\x52\x72\x20\x52\x5a\x22\x56\x4a\x52\x72\x1a\x49\x5b\x54\x4d'<concat>b'\x56\x4e\x58\x50\x58\x4f\x57\x4e\x54\x4d\x51\x4d\x4e\x4e\x4d'<concat>b'\x4f\x4c\x51\x4c\x53\x4d\x55\x4f\x57\x53\x5a\x20\x52\x51\x4d'<concat>b'\x4f\x4e\x4e\x4f\x4d\x51\x4d\x53\x4e\x55\x53\x5a\x54\x5c\x54'<concat>b'\x5e\x53\x5f\x51\x5f\x2c\x47\x5d\x4c\x4d\x4b\x4e\x4a\x50\x4a'<concat>b'\x52\x4b\x55\x4f\x59\x50\x5b\x20\x52\x4a\x52\x4b\x54\x4f\x58'<concat>b'\x50\x5b\x50\x5d\x4f\x60\x4d\x62\x4c\x62\x4b\x61\x4a\x5f\x4a'<concat>b'\x5c\x4b\x58\x4d\x54\x4f\x51\x52\x4e\x54\x4d\x56\x4d\x59\x4e'<concat>b'\x5a\x50\x5a\x54\x59\x58\x57\x5a\x55\x5b\x54\x5b\x53\x5a\x53'<concat>b'\x58\x54\x57\x55\x58\x54\x59\x20\x52\x56\x4d\x58\x4e\x59\x50'<concat>b'\x59\x54\x58\x58\x57\x5a\x44\x45\x5f\x59\x47\x58\x48\x59\x49'<concat>b'\x5a\x48\x59\x47\x57\x46\x54\x46\x51\x47\x4f\x49\x4e\x4b\x4d'<concat>b'\x4e\x4c\x52\x4a\x5b\x49\x5f\x48\x61\x20\x52\x54\x46\x52\x47'<concat>b'\x50\x49\x4f\x4b\x4e\x4e\x4c\x57\x4b\x5b\x4a\x5e\x49\x60\x48'<concat>b'\x61\x46\x62\x44\x62\x43\x61\x43\x60\x44\x5f\x45\x60\x44\x61'<concat>b'\x20\x52\x5f\x47\x5e\x48\x5f\x49\x60\x48\x60\x47\x5f\x46\x5d'<concat>b'\x46\x5b\x47\x5a\x48\x59\x4a\x58\x4d\x55\x5b\x54\x5f\x53\x61'<concat>b'\x20\x52\x5d\x46\x5b\x48\x5a\x4a\x59\x4e\x57\x57\x56\x5b\x55'<concat>b'\x5e\x54\x60\x53\x61\x51\x62\x4f\x62\x4e\x61\x4e\x60\x4f\x5f'<concat>b'\x50\x60\x4f\x61\x20\x52\x49\x4d\x5e\x4d\x33\x46\x5e\x5b\x47'<concat>b'\x5a\x48\x5b\x49\x5c\x48\x5b\x47\x58\x46\x55\x46\x52\x47\x50'<concat>b'\x49\x4f\x4b\x4e\x4e\x4d\x52\x4b\x5b\x4a\x5f\x49\x61\x20\x52'<concat>b'\x55\x46\x53\x47\x51\x49\x50\x4b\x4f\x4e\x4d\x57\x4c\x5b\x4b'<concat>b'\x5e\x4a\x60\x49\x61\x47\x62\x45\x62\x44\x61\x44\x60\x45\x5f'<concat>b'\x46\x60\x45\x61\x20\x52\x59\x4d\x57\x54\x56\x58\x56\x5a\x57'<concat>b'\x5b\x5a\x5b\x5c\x59\x5d\x57\x20\x52\x5a\x4d\x58\x54\x57\x58'<concat>b'\x57\x5a\x58\x5b\x20\x52\x4a\x4d\x5a\x4d\x35\x46\x5e\x59\x47'<concat>b'\x58\x48\x59\x49\x5a\x48\x5a\x47\x58\x46\x20\x52\x5c\x46\x55'<concat>b'\x46\x52\x47\x50\x49\x4f\x4b\x4e\x4e\x4d\x52\x4b\x5b\x4a\x5f'<concat>b'\x49\x61\x20\x52\x55\x46\x53\x47\x51\x49\x50\x4b\x4f\x4e\x4d'<concat>b'\x57\x4c\x5b\x4b\x5e\x4a\x60\x49\x61\x47\x62\x45\x62\x44\x61'<concat>b'\x44\x60\x45\x5f\x46\x60\x45\x61\x20\x52\x5b\x46\x57\x54\x56'<concat>b'\x58\x56\x5a\x57\x5b\x5a\x5b\x5c\x59\x5d\x57\x20\x52\x5c\x46'<concat>b'\x58\x54\x57\x58\x57\x5a\x58\x5b\x20\x52\x4a\x4d\x59\x4d\x55'<concat>b'\x40\x63\x54\x47\x53\x48\x54\x49\x55\x48\x54\x47\x52\x46\x4f'<concat>b'\x46\x4c\x47\x4a\x49\x49\x4b\x48\x4e\x47\x52\x45\x5b\x44\x5f'<concat>b'\x43\x61\x20\x52\x4f\x46\x4d\x47\x4b\x49\x4a\x4b\x49\x4e\x47'<concat>b'\x57\x46\x5b\x45\x5e\x44\x60\x43\x61\x41\x62\x3f\x62\x3e\x61'<concat>b'\x3e\x60\x3f\x5f\x40\x60\x3f\x61\x20\x52\x60\x47\x5f\x48\x60'<concat>b'\x49\x61\x48\x60\x47\x5d\x46\x5a\x46\x57\x47\x55\x49\x54\x4b'<concat>b'\x53\x4e\x52\x52\x50\x5b\x4f\x5f\x4e\x61\x20\x52\x5a\x46\x58'<concat>b'\x47\x56\x49\x55\x4b\x54\x4e\x52\x57\x51\x5b\x50\x5e\x4f\x60'<concat>b'\x4e\x61\x4c\x62\x4a\x62\x49\x61\x49\x60\x4a\x5f\x4b\x60\x4a'<concat>b'\x61\x20\x52\x5e\x4d\x5c\x54\x5b\x58\x5b\x5a\x5c\x5b\x5f\x5b'<concat>b'\x61\x59\x62\x57\x20\x52\x5f\x4d\x5d\x54\x5c\x58\x5c\x5a\x5d'<concat>b'\x5b\x20\x52\x44\x4d\x5f\x4d\x57\x40\x63\x54\x47\x53\x48\x54'<concat>b'\x49\x55\x48\x54\x47\x52\x46\x4f\x46\x4c\x47\x4a\x49\x49\x4b'<concat>b'\x48\x4e\x47\x52\x45\x5b\x44\x5f\x43\x61\x20\x52\x4f\x46\x4d'<concat>b'\x47\x4b\x49\x4a\x4b\x49\x4e\x47\x57\x46\x5b\x45\x5e\x44\x60'<concat>b'\x43\x61\x41\x62\x3f\x62\x3e\x61\x3e\x60\x3f\x5f\x40\x60\x3f'<concat>b'\x61\x20\x52\x5e\x47\x5d\x48\x5e\x49\x5f\x48\x5f\x47\x5d\x46'<concat>b'\x20\x52\x61\x46\x5a\x46\x57\x47\x55\x49\x54\x4b\x53\x4e\x52'<concat>b'\x52\x50\x5b\x4f\x5f\x4e\x61\x20\x52\x5a\x46\x58\x47\x56\x49'<concat>b'\x55\x4b\x54\x4e\x52\x57\x51\x5b\x50\x5e\x4f\x60\x4e\x61\x4c'<concat>b'\x62\x4a\x62\x49\x61\x49\x60\x4a\x5f\x4b\x60\x4a\x61\x20\x52'<concat>b'\x60\x46\x5c\x54\x5b\x58\x5b\x5a\x5c\x5b\x5f\x5b\x61\x59\x62'<concat>b'\x57\x20\x52\x61\x46\x5d\x54\x5c\x58\x5c\x5a\x5d\x5b\x20\x52'<concat>b'\x44\x4d\x5e\x4d\x13\x4c\x59\x4d\x51\x4e\x4f\x50\x4d\x53\x4d'<concat>b'\x54\x4e\x54\x51\x52\x57\x52\x5a\x53\x5b\x20\x52\x52\x4d\x53'<concat>b'\x4e\x53\x51\x51\x57\x51\x5a\x52\x5b\x55\x5b\x57\x59\x58\x57'<concat>b'\x15\x4c\x58\x52\x4c\x4e\x57\x58\x50\x4c\x50\x56\x57\x52\x4c'<concat>b'\x20\x52\x52\x52\x52\x4c\x20\x52\x52\x52\x4c\x50\x20\x52\x52'<concat>b'\x52\x4e\x57\x20\x52\x52\x52\x56\x57\x20\x52\x52\x52\x58\x50'<concat>b'\x17\x46\x5e\x49\x55\x49\x53\x4a\x50\x4c\x4f\x4e\x4f\x50\x50'<concat>b'\x54\x53\x56\x54\x58\x54\x5a\x53\x5b\x51\x20\x52\x49\x53\x4a'<concat>b'\x51\x4c\x50\x4e\x50\x50\x51\x54\x54\x56\x55\x58\x55\x5a\x54'<concat>b'\x5b\x51\x5b\x4f'<line_sep>_index=b'\x00\x00\x03\x00\x0a\x00\x11\x00\x18\x00\x1f\x00\x26\x00\x2d'<concat>b'\x00\x34\x00\x3b\x00\x42\x00\x49\x00\x50\x00\x57\x00\x5e\x00'<concat>b'\x65\x00\x76\x00\x87\x00\x98\x00\xa9\x00\xbc\x00\xcf\x00\xe2'<concat>b'\x00\xf5\x00\x00\x01\x0b\x01\x16\x01\x21\x01\x50\x01\x7f\x01'<concat>b'\xae\x01\xdd\x01\x08\x02\x2f\x02\x64\x02\x7b\x02\x8e\x02\x99'<concat>b'\x02\xa6\x02\xb9\x02\xcc\x02\xf1\x02\xfe\x02\x09\x03\x16\x03'<concat>b'\x2f\x03\x3c\x03\x49\x03\x5c\x03\xa3\x03\xda\x03\xfd\x03\x20'<concat>b'\x04\x43\x04\x66\x04\x7d\x04\xa8\x04\xc3\x04\xda\x04\xf7\x04'<concat>b'\x1c\x05\x23\x05\x3a\x05\x57\x05\x98\x05\xad\x05\xf2\x05\x73'<concat>b'\x06\x30\x07\x81\x07\xc2\x07\xe1\x07\x1a\x08\x89\x08\xe6\x08'<concat>b'\x23\x09\x94\x09\xb9\x09\xfc\x09\x31\x0a\x5a\x0a\xbb\x0a\x1c'<concat>b'\x0b\x7d\x0b\xbe\x0b\xff\x0b\x1c\x0c\x53\x0c\xae\x0c\x39\x0d'<concat>b'\xa2\x0d\x0f\x0e\xbc\x0e\x6d\x0f\x96\x0f\xc3\x0f'<line_sep>INDEX=memoryview(_index)<line_sep>FONT=memoryview(_font)<line_sep> |
<import_from_stmt>unittest TestCase<import_from_stmt>. ProcessManager Step InvalidData<class_stmt>AddSwallows(Step)<block_start><def_stmt>__init__ self data<block_start>self.data=data<block_end><def_stmt>__str__ self<block_start><return>'swallows-needed'<block_end><def_stmt>validate self<block_start><if_stmt>self.data.swallows<l>2<block_start><raise>InvalidData('Not enough swallows')<block_end><block_end><block_end><class_stmt>AddCoconuts(Step)<block_start><def_stmt>__init__ self data<block_start>self.data=data<block_end><def_stmt>__str__ self<block_start><return>'coconuts-needed'<block_end><def_stmt>validate self<block_start><if_stmt>self.data.coconuts<l>1<block_start><raise>InvalidData('Need a coconut')<block_end><block_end><block_end><class_stmt>CoconutDelivery(ProcessManager)<block_start><def_stmt>__init__ self<block_start>self.swallows=0<line_sep>self.coconuts=0<block_end><def_stmt>__iter__ self<block_start><yield>AddSwallows(self)<line_sep><yield>AddCoconuts(self)<block_end><block_end><class_stmt>ProcessManagerTest(TestCase)<block_start><def_stmt>test_iter self<block_start>'ProcessManager.__iter__() returns the steps'<line_sep>process=CoconutDelivery()<line_sep>steps=list(map(str list(process)))<line_sep>self.assertEqual(steps ['swallows-needed' 'coconuts-needed'])<block_end><def_stmt>test_get_next_step self<block_start>'ProcessManager.get_next_step() returns the first step with invalid data'<line_sep>process=CoconutDelivery()<line_sep>process.coconuts=1<line_sep>self.assertEqual(str(process.get_next_step()) 'swallows-needed')<line_sep>process.swallows=2<line_sep>self.assertEqual(process.get_next_step() <none>)<line_sep>process.coconuts=0<line_sep>self.assertEqual(str(process.get_next_step()) 'coconuts-needed')<block_end><def_stmt>test_is_complete self<block_start>'ProcessManager.is_complete() returns true if all steps are satisfied'<line_sep>process=CoconutDelivery()<line_sep>self.assertFalse(process.is_complete())<line_sep>process.coconuts=1<line_sep>process.swallows=2<line_sep>self.assertTrue(process.is_complete())<block_end><def_stmt>test_item_access self<block_start>'You can index a ProcessManager using step names'<line_sep>process=CoconutDelivery()<line_sep>self.assertTrue(isinstance(process['coconuts-needed'] AddCoconuts))<def_stmt>invalid <block_start><return>process['spam-needed']<block_end>self.assertRaises(KeyError invalid)<block_end><def_stmt>test_errors self<block_start>'ProcessManager.get_errors() returns a dict of all invalid steps'<line_sep>process=CoconutDelivery()<line_sep>process.swallows=2<line_sep>errors=process.get_errors()<line_sep>self.assertFalse('swallows-needed'<in>errors)<line_sep>self.assertTrue('coconuts-needed'<in>errors)<block_end><block_end> |
<import_stmt>pytest<import_from_stmt>icevision.all *<import_from_stmt>icevision.models.torchvision retinanet<line_sep>@pytest.fixture<def_stmt>light_model_cls <block_start><class_stmt>LightModel(retinanet.lightning.ModelAdapter)<block_start><def_stmt>configure_optimizers self<block_start><return>SGD(self.parameters() lr=1e-4)<block_end><block_end><return>LightModel<block_end>@pytest.mark.parametrize("metrics" [[] [COCOMetric()]])<def_stmt>test_lightining_retinanet_train fridge_faster_rcnn_dls fridge_retinanet_model light_model_cls metrics<block_start>train_dl,valid_dl=fridge_faster_rcnn_dls<line_sep>light_model=light_model_cls(fridge_retinanet_model metrics=metrics)<line_sep>trainer=pl.Trainer(max_epochs=1 weights_summary=<none> num_sanity_val_steps=0 logger=<false> checkpoint_callback=<false> )<line_sep>trainer.fit(light_model train_dl valid_dl)<block_end> |
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
<import_from_stmt>.common BaseTest<class_stmt>AlarmTest(BaseTest)<block_start><def_stmt>test_delete self<block_start>alarm_name="c7n-test-alarm-delete"<line_sep>factory=self.replay_flight_data("test_alarm_delete")<line_sep>client=factory().client("cloudwatch")<line_sep>client.put_metric_alarm(AlarmName=alarm_name MetricName="CPUUtilization" Namespace="AWS/EC2" Statistic="Average" Period=3600 EvaluationPeriods=5 Threshold=10 ComparisonOperator="GreaterThanThreshold" )<line_sep>p=self.load_policy({"name":"delete-alarm" "resource":"alarm" "filters":[{"AlarmName":alarm_name}] "actions":["delete"] } session_factory=factory )<line_sep>resources=p.run()<line_sep>self.assertEqual(len(resources) 1)<line_sep>self.assertEqual(client.describe_alarms(AlarmNames=[alarm_name])["MetricAlarms"] [])<block_end><def_stmt>test_filter_tags self<block_start>factory=self.replay_flight_data("test_alarm_tags_filter")<line_sep>p=self.load_policy({"name":"filter-alarm-tags" "resource":"alarm" "filters":[{'type':'value' 'key':'tag:some-tag' 'value':'some-value' 'op':'eq'}] } session_factory=factory )<line_sep>resources=p.run()<line_sep>self.assertEqual(len(resources) 1)<line_sep>self.assertEqual(resources[0].get('c7n:MatchedFilters') ['tag:some-tag'])<block_end><def_stmt>test_add_alarm_tags self<block_start>factory=self.replay_flight_data("test_alarm_add_tags")<line_sep>p=self.load_policy({"name":"add-alarm-tags" "resource":"alarm" "actions":[{"type":"tag" "key":"OwnerName" "value":"SomeName"}] } session_factory=factory )<line_sep>resources=p.run()<line_sep>self.assertEqual(len(resources) 1)<line_sep>self.assertTrue({'Key':'OwnerName' 'Value':'SomeName'}<in>resources[0].get('Tags'))<block_end><block_end> |
"""A shim module for deprecated imports
"""<line_sep># Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
<import_stmt>sys<import_stmt>types<import_from_stmt>importlib import_module<import_from_stmt>.importstring import_item<class_stmt>ShimWarning(Warning)<block_start>"""A warning to show when a module has moved, and a shim is in its place."""<block_end><class_stmt>ShimImporter(object)<block_start>"""Import hook for a shim.
This ensures that submodule imports return the real target module,
not a clone that will confuse `is` and `isinstance` checks.
"""<def_stmt>__init__ self src mirror<block_start>self.src=src<line_sep>self.mirror=mirror<block_end><def_stmt>_mirror_name self fullname<block_start>"""get the name of the mirrored module"""<line_sep><return>self.mirror+fullname[len(self.src):]<block_end><def_stmt>find_module self fullname path=<none><block_start>"""Return self if we should be used to import the module."""<if_stmt>fullname.startswith(self.src+'.')<block_start>mirror_name=self._mirror_name(fullname)<try_stmt><block_start>mod=import_item(mirror_name)<block_end><except_stmt>ImportError<block_start><return><block_end><else_stmt><block_start><if_stmt><not>isinstance(mod types.ModuleType)# not a module
<block_start><return><none><block_end><return>self<block_end><block_end><block_end><def_stmt>load_module self fullname<block_start>"""Import the mirrored module, and insert it into sys.modules"""<line_sep>mirror_name=self._mirror_name(fullname)<line_sep>mod=import_item(mirror_name)<line_sep>sys.modules[fullname]=mod<line_sep><return>mod<block_end><block_end><class_stmt>ShimModule(types.ModuleType)<block_start><def_stmt>__init__ self *args **kwargs<block_start>self._mirror=kwargs.pop("mirror")<line_sep>src=kwargs.pop("src" <none>)<if_stmt>src<block_start>kwargs['name']=src.rsplit('.' 1)[-1]<block_end>super(ShimModule self).__init__(*args **kwargs)<line_sep># add import hook for descendent modules
<if_stmt>src<block_start>sys.meta_path.append(ShimImporter(src=src mirror=self._mirror))<block_end><block_end>@property<def_stmt>__path__ self<block_start><return>[]<block_end>@property<def_stmt>__spec__ self<block_start>"""Don't produce __spec__ until requested"""<line_sep><return>import_module(self._mirror).__spec__<block_end><def_stmt>__dir__ self<block_start><return>dir(import_module(self._mirror))<block_end>@property<def_stmt>__all__ self<block_start>"""Ensure __all__ is always defined"""<line_sep>mod=import_module(self._mirror)<try_stmt><block_start><return>mod.__all__<block_end><except_stmt>AttributeError<block_start><return>[name<for>name dir(mod)<if><not>name.startswith('_')]<block_end><block_end><def_stmt>__getattr__ self key# Use the equivalent of import_item(name), see below
<block_start>name="%s.%s"%(self._mirror key)<try_stmt><block_start><return>import_item(name)<block_end><except_stmt>ImportError<block_start><raise>AttributeError(key)<block_end><block_end><block_end> |
# Copyright (c) Facebook, Inc. and its affiliates.
<import_stmt>os<import_stmt>torch<import_from_stmt>importlib import_module<import_from_stmt>tqdm tqdm<import_stmt>omegaconf<import_stmt>hydra<import_from_stmt>common utils<line_sep>__all__=["get_dataset" ]<def_stmt>get_dataset dataset_cfg data_cfg transform logger# If there is _precomputed_metadata file passed in, load that in
<block_start>kwargs={}<line_sep>precomp_metadata_fpath=<none><if_stmt>'_precomputed_metadata_file'<in>dataset_cfg<block_start>precomp_metadata_fpath=dataset_cfg._precomputed_metadata_file<line_sep># Remove from the config since otherwise can't init the obj
<with_stmt>omegaconf.open_dict(dataset_cfg)<block_start><del_stmt>dataset_cfg['_precomputed_metadata_file']<block_end><if_stmt>os.path.exists(precomp_metadata_fpath)<block_start>_precomputed_metadata=torch.load(precomp_metadata_fpath)<line_sep>kwargs['_precomputed_metadata']=_precomputed_metadata<block_end><block_end>kwargs['transform']=transform<line_sep>kwargs['frame_rate']=data_cfg.frame_rate<line_sep>kwargs['frames_per_clip']=data_cfg.num_frames<line_sep># Have to call dict() here since relative interpolation somehow doesn't
# work once I get the subclips object
kwargs['subclips_options']=dict(data_cfg.subclips)<line_sep>kwargs['load_seg_labels']=data_cfg.load_seg_labels<line_sep>logger.info('Creating the dataset object...')<line_sep># Not recursive since many of the sub-instantiations would need positional
# arguments
_dataset=hydra.utils.instantiate(dataset_cfg _recursive_=<false> **kwargs)<try_stmt><block_start>logger.info('Computing clips...')<line_sep>_dataset.video_clips.compute_clips(data_cfg.num_frames 1 frame_rate=data_cfg.frame_rate)<line_sep>logger.info('Done')<block_end><except_stmt>AttributeError# if video_clips not in _dataset
<block_start>logger.warning('No video_clips present')<block_end>logger.info(f'Created dataset with {len(_dataset)} elts')<if_stmt>precomp_metadata_fpath<and><not>os.path.exists(precomp_metadata_fpath)<block_start>utils.save_on_master(_dataset.metadata precomp_metadata_fpath)<block_end><return>_dataset<block_end> |
<import_stmt>os<import_stmt>sys<line_sep>sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))<import_stmt>pytest<import_from_stmt>rl_coach.spaces DiscreteActionSpace BoxActionSpace<import_from_stmt>rl_coach.exploration_policies.additive_noise AdditiveNoise<import_from_stmt>rl_coach.schedules LinearSchedule<import_stmt>numpy<as>np<line_sep>@pytest.mark.unit_test<def_stmt>test_init # discrete control
<block_start>action_space=DiscreteActionSpace(3)<line_sep>noise_schedule=LinearSchedule(1.0 1.0 1000)<line_sep># additive noise requires a bounded range for the actions
action_space=BoxActionSpace(np.array([10]))<with_stmt>pytest.raises(ValueError)<block_start>policy=AdditiveNoise(action_space noise_schedule 0)<block_end><block_end>@pytest.mark.unit_test<def_stmt>test_get_action # make sure noise is in range
<block_start>action_space=BoxActionSpace(np.array([10]) -1 1)<line_sep>noise_schedule=LinearSchedule(1.0 1.0 1000)<line_sep>policy=AdditiveNoise(action_space noise_schedule 0)<line_sep># the action range is 2, so there is a ~0.1% chance that the noise will be larger than 3*std=3*2=6
<for_stmt>i range(1000)<block_start>action=policy.get_action(np.zeros([10]))<assert_stmt>np.all(action<l>10)<line_sep># make sure there is no clipping of the action since it should be the environment that clips actions
<assert_stmt>np.all(action<ne>1.0)<assert_stmt>np.all(action<ne>-1.0)<line_sep># make sure that each action element has a different value
<assert_stmt>np.all(action[0]<ne>action[1:])<block_end><block_end> |
<import_from_stmt>unittest.mock patch create_autospec PropertyMock<import_from_stmt>stack.expectmore ExpectMore<import_from_stmt>stack.switch.x1052 SwitchDellX1052<line_sep># Switch data to mock MAC address table
SWITCH_DATA="""
show mac address-table
show mac address-table
Flags: I - Internal usage VLAN
Aging time is 300 sec
Vlan Mac Address Port Type
------------ --------------------- ---------- ----------
1 00:00:00:00:00:00 gi1/0/10 dynamic
1 f4:8e:38:44:10:15 0 self
console#:
"""<line_sep># Intercept expectmore calls
mock_expectmore=patch(target="stack.switch.x1052.ExpectMore" autospec=<true>).start()<line_sep># Need to set the instance mock returned from calling ExpectMore()
mock_expectmore.return_value=create_autospec(spec=ExpectMore spec_set=<true> instance=<true> )<line_sep># Need to set the match_index to the base console prompt so that the switch thinks it is at the
# correct prompt, and wont try to page through output.
type(mock_expectmore.return_value).match_index=PropertyMock(return_value=SwitchDellX1052.CONSOLE_PROMPTS.index(SwitchDellX1052.CONSOLE_PROMPT))<line_sep># Return our SWITCH_DATA from ExpectMore().ask()
mock_expectmore.return_value.ask.return_value=SWITCH_DATA.splitlines()<line_sep> |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=unused-argument
# pylint: disable=line-too-long
# pylint: disable=no-value-for-parameter
<import_from_stmt>azure.cli.core.decorators Completer<line_sep>@Completer<def_stmt>get_eventhubs_command_completion_list cmd prefix namespace<block_start><import_from_stmt>._client_factory event_hub_mgmt_client_factory<line_sep>resource_group_name=namespace.resource_group_name<line_sep>namespace_name=namespace.name<line_sep>result=event_hub_mgmt_client_factory(cmd.cli_ctx).list_by_namespace(resource_group_name namespace_name)<line_sep><return>[r.name<for>r result]<block_end>@Completer<def_stmt>get_consumergroup_command_completion_list cmd prefix namespace<block_start><import_from_stmt>._client_factory consumer_groups_mgmt_client_factory<line_sep>resource_group_name=namespace.resource_group_name<line_sep>namespace_name=namespace.namespace_name<line_sep>eventhub_name=namespace.name<line_sep>result=consumer_groups_mgmt_client_factory(cmd.cli_ctx).list_by_event_hub(resource_group_name namespace_name eventhub_name)<line_sep><return>[r.name<for>r result]<block_end> |
<import_stmt>setuptools<import_stmt>re<import_from_stmt>pathlib Path<line_sep>path=Path('.')<with_stmt>(path/"README.md").open()<as>fh<block_start>long_description=fh.read()<block_end>version_file=path/"openmaptiles"/"__init__.py"<with_stmt>version_file.open()<as>fh<block_start>m=re.search(r"^__version__\s*=\s*(['\"])([^'\"]*)\1" fh.read().strip() re.M)<if_stmt><not>m<block_start><raise>ValueError(f"Version string is not found in {version_file}")<block_end>version=m.group(2)<block_end><with_stmt>(path/"requirements.txt").open(encoding="utf-8")<as>fh# Requirements will contain a list of libraries without version restrictions
# It seems this is a common practice for the setup.py vs requirements.txt
<block_start>requirements=[m.group(1)<for>m (re.match(r'^[ \t]*([^>=<!#\n]+).*' line)<for>line fh.readlines())<if>m]<block_end>scripts=[str(p)<for>p path.glob('bin/*')<if>p.is_file()]<line_sep>setuptools.setup(name='openmaptiles-tools' version=version packages=['openmaptiles'] description="The OpenMapTiles tools for generating TM2Source projects, imposm3 mappings and SQL instructions from "<concat>"OpenMapTiles layers. We encourage other people to use this for their vector tile projects as well "<concat>"since this approach works well for us." long_description=long_description long_description_content_type="text/markdown" url="https://github.com/openmaptiles/openmaptiles-tools" license='MIT' scripts=scripts install_requires=requirements )<line_sep> |
# <Copyright 2019, Argo AI, LLC. Released under the MIT license.>
<import_from_stmt>typing List Optional<import_stmt>numpy<as>np<import_from_stmt>argoverse.utils mayavi_wrapper<import_from_stmt>argoverse.utils.mesh_grid get_mesh_grid_as_point_cloud<import_from_stmt>argoverse.visualization.mayavi_utils Figure draw_mayavi_line_segment plot_3d_clipped_bbox_mayavi plot_points_3D_mayavi <def_stmt>populate_frustum_voxels planes:List[np.ndarray] fig:Figure axis_pair:str<arrow>Figure<block_start>"""
Generate grid in xy plane, and then treat it as grid in xz (ground) plane
in camera coordinate system.
Args:
planes: list of length 5. Each list element is a Numpy array
of shape (4,) representing the equation of a plane,
e.g. (a,b,c,d) in ax+by+cz=d
fig: Mayavi figure to draw on
axis_pair: Either "xz" or "yz"
Returns:
Mayavi figure
"""<line_sep>sparse_xz_voxel_grid=get_mesh_grid_as_point_cloud(-20 20 0 40 downsample_factor=0.1)<line_sep>sparse_voxel_grid=np.zeros((sparse_xz_voxel_grid.shape[0] 3))<if_stmt>axis_pair<eq>"xz"<block_start>sparse_voxel_grid[: 0]=sparse_xz_voxel_grid[: 0]<line_sep>sparse_voxel_grid[: 2]=sparse_xz_voxel_grid[: 1]<block_end><elif_stmt>axis_pair<eq>"yz"<block_start>sparse_voxel_grid[: 1]=sparse_xz_voxel_grid[: 0]<line_sep>sparse_voxel_grid[: 2]=sparse_xz_voxel_grid[: 1]<block_end># keep only the points that have signed distance > 0 (inside the frustum, plane
# normals also point into the frustum)
<for_stmt>plane planes<block_start>signed_d=np.matmul(sparse_voxel_grid plane[:3])+plane[3]<line_sep>sparse_voxel_grid=sparse_voxel_grid[np.where(signed_d<g>0)]<block_end>plot_points_3D_mayavi(sparse_voxel_grid fig fixed_color=(1 0 0))<line_sep><return>fig<block_end><def_stmt>plot_frustum_planes_and_normals planes:List[np.ndarray] cuboid_verts:Optional[np.ndarray]=<none> near_clip_dist:float=0.5 <arrow><none><block_start>"""
Args:
planes: list of length 5. Each list element is a Numpy array
of shape (4,) representing the equation of a plane,
e.g. (a,b,c,d) in ax+by+cz=d
cuboid_verts: Numpy array of shape (N,3) representing
cuboid vertices
Returns:
None
"""<line_sep>fig=mayavi_wrapper.mlab.figure(bgcolor=(1 1 1) size=(2000 1000))# type: ignore
<if_stmt>cuboid_verts<is><not><none># fig = plot_bbox_3d_mayavi(fig, cuboid_verts)
<block_start>fig=plot_3d_clipped_bbox_mayavi(fig planes cuboid_verts)<block_end>P=np.array([0.0 0.0 0.0])<for_stmt>i,plane enumerate(planes)<block_start>(a b c d)=plane<if_stmt>i<eq>0<block_start>color=(1 0 0)# red left
<block_end><elif_stmt>i<eq>1<block_start>color=(0 0 1)# blue right
<block_end><elif_stmt>i<eq>2<block_start>color=(1 1 0)# near yellow
P=np.array([0.0 0.0 near_clip_dist])<block_end><elif_stmt>i<eq>3<block_start>color=(0 1 0)# low is green
<block_end><elif_stmt>i<eq>4<block_start>color=(0 1 1)<block_end># top is teal
plane_pts=generate_grid_on_plane(a b c d P)<line_sep>fig=plot_points_3D_mayavi(plane_pts fig color)<line_sep># plot the normals at (0,0,0.5) and normal vector (u,v,w) given by (a,b,c)
mayavi_wrapper.mlab.quiver3d(# type: ignore
0 0 0.5 a<times>1000 b<times>1000 c<times>1000 color=color figure=fig line_width=8 )<block_end># draw teal line at top below the camera
pt1=np.array([-5 0 -5])<line_sep>pt2=np.array([5 0 -5])<line_sep>color=(0 1 1)<line_sep>draw_mayavi_line_segment(fig [pt1 pt2] color=color line_width=8)<line_sep># draw blue line in middle
pt1=np.array([-5 5 -5])<line_sep>pt2=np.array([5 5 -5])<line_sep>color=(0 0 1)<line_sep>draw_mayavi_line_segment(fig [pt1 pt2] color=color line_width=8)<line_sep># draw yellow, lowest line (+y axis is down)
pt1=np.array([-5 10 -5])<line_sep>pt2=np.array([5 10 -5])<line_sep>color=(1 1 0)<line_sep>draw_mayavi_line_segment(fig [pt1 pt2] color=color line_width=8)<line_sep>fig=populate_frustum_voxels(planes fig "xz")<line_sep>fig=populate_frustum_voxels(planes fig "yz")<line_sep>mayavi_wrapper.mlab.view(distance=200)# type: ignore
mayavi_wrapper.mlab.show()<block_end># type: ignore
<def_stmt>get_perpendicular n:np.ndarray<arrow>np.ndarray<block_start>"""
n guarantees that dot(n, getPerpendicular(n)) is zero, which is the
orthogonality condition, while also keeping the magnitude of the vector
as high as possible. Note that setting the component with the smallest
magnitude to 0 also guarantees that you don't get a 0,0,0 vector as a
result, unless that is already your input.
Args:
n: Numpy array of shape (3,)
Returns:
result: Numpy array of shape (3,)
"""<line_sep># find smallest component
i=np.argmin(n)<line_sep># get the other two indices
a=(i+1)%3<line_sep>b=(i+2)%3<line_sep>result=np.zeros(3)<line_sep>result[i]=0.0<line_sep>result[a]=n[b]<line_sep>result[b]=-n[a]<line_sep><return>result<block_end><def_stmt>generate_grid_on_plane a:float b:float c:float d:float P:np.ndarray radius:float=15<arrow>np.ndarray<block_start>"""
Args:
a,b,c,d: Coefficients of ``ax + by + cz = d`` defining plane
P: Numpy array of shape (3,) representing point on the plane
radius: Radius (default 15)
Returns:
plane_pts: Numpy array of shape (N,3) with points on the input plane
"""<line_sep>n=np.array([a b c])# a,b,c from your equation
perp=get_perpendicular(n)<line_sep>u=perp/np.linalg.norm(perp)<line_sep>v=np.cross(u n)<line_sep>N=100<line_sep># delta and epsilon are floats:
delta=radius/N# N is how many points you want max in one direction
epsilon=delta<times>0.5<line_sep>n_pts=int((2<times>radius+epsilon)/delta)<line_sep>pts=np.linspace(-radius radius+epsilon n_pts)<line_sep>plane_pts:List[float]=[]<for_stmt>y pts<block_start><for_stmt>x pts# if (x*x+y*y < radius*radius): # only in the circle:
<block_start>plane_pts<augadd>[P+x<times>u+y<times>v]<block_end><block_end># P is the point on the plane
<return>np.array(plane_pts)<block_end> |
<import_stmt>unittest<class_stmt>TestSample(unittest.TestCase)<block_start><def_stmt>setUp self<block_start><pass><block_end><def_stmt>test_add self<block_start>self.assertEqual((3+4) 7)<block_end><block_end> |
r'''
Copyright 2015 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''<class_stmt>TlsNotEnoughDataError(Exception)<block_start>"""Error in TLS parsing where the TLS record is so far valid but incomplete"""<line_sep><pass><block_end><class_stmt>TlsRecordIncompleteError(TlsNotEnoughDataError)<block_start>"""Error for when a TLS Record appears valid but is not enough data is present to parse
the record"""<def_stmt>__init__ self data_available record_size<block_start>self.data_available=data_available<line_sep>self.record_size=record_size<block_end><block_end><class_stmt>TlsMessageFragmentedError(TlsNotEnoughDataError)<block_start>"""Error for when not enough data is present to parse a TLS message because of
fragmentation"""<def_stmt>__init__ self fragment_data data_consumed<block_start>self.fragment_data=fragment_data<line_sep>self.data_consumed=data_consumed<block_end><block_end> |
'''
P11 and P12 must be connected together for this test to pass.
'''<import_from_stmt>machine UART<import_from_stmt>machine Pin<import_stmt>os<import_stmt>time<line_sep># do not execute this test on the GPy and FiPy
<if_stmt>os.uname().sysname<eq>'GPy'<or>os.uname().sysname<eq>'FiPy'<block_start>print("SKIP")<import_stmt>sys<line_sep>sys.exit()<block_end>uart=UART(2 115200)<line_sep>print(uart)<line_sep>uart.init(57600 8 <none> 1 pins=('P11' 'P12'))<line_sep>uart.init(baudrate=9600 stop=2 parity=UART.EVEN pins=('P11' 'P12'))<line_sep>uart.init(baudrate=115200 parity=UART.ODD stop=1 pins=('P11' 'P12'))<line_sep>uart.read()<line_sep>print(uart.read())<line_sep>print(uart.readline())<line_sep>buff=bytearray(1)<line_sep>print(uart.readinto(buff 1))<line_sep>print(uart.read())<line_sep>print(uart.any())<line_sep>print(uart.write('a'))<line_sep>uart.deinit()<line_sep>uart=UART(2 1000000 pins=('P12' 'P11'))<line_sep>print(uart)<line_sep>uart.read()<line_sep>print(uart.write(b'123456')<eq>6)<line_sep>print(uart.read()<eq>b'123456')<line_sep>uart.deinit()<line_sep>uart=UART(2 1000000 pins=('P11' 'P12'))<line_sep>print(uart)<line_sep>uart.read()<line_sep>print(uart.write(b'123456')<eq>6)<line_sep>print(uart.read()<eq>b'123456')<line_sep>uart.deinit()<line_sep>uart=UART(2 1000000 pins=('P11' 'P12'))<line_sep>print(uart.write(b'123')<eq>3)<line_sep>print(uart.read(1)<eq>b'1')<line_sep>print(uart.read(2)<eq>b'23')<line_sep>print(uart.read()<eq><none>)<line_sep>uart.write(b'123')<line_sep>buf=bytearray(3)<line_sep>print(uart.readinto(buf 1)<eq>1)<line_sep>print(buf)<line_sep>print(uart.readinto(buf)<eq>2)<line_sep>print(buf)<line_sep>uart.deinit()<line_sep># check for memory leaks...
<for_stmt>i range(0 1000)<block_start>uart=UART(2 1000000)<line_sep>uart.deinit()<block_end># next ones must raise
<try_stmt><block_start>UART(2 9600 parity=<none> pins=('GP12' 'GP13' 'GP7'))<block_end><except_stmt>Exception<block_start>print('Exception')<block_end><try_stmt><block_start>UART(2 9600 parity=UART.ODD pins=('GP12' 'GP7'))<block_end><except_stmt>Exception<block_start>print('Exception')<block_end># buffer overflow
uart=UART(2 1000000 pins=('P11' 'P12'))<line_sep>buf=bytearray([0x55AA]<times>567)<for_stmt>i range(200)<block_start>r=uart.write(buf)<block_end>r=uart.read()<line_sep>r=uart.read()<line_sep>print(r)<line_sep>print(uart.write(b'123456')<eq>6)<line_sep>print(uart.read()<eq>b'123456')<line_sep>uart.deinit()<line_sep> |
"""
Utilities to enable exception reraising across the master commands
"""<import_stmt>builtins<import_stmt>salt.exceptions<import_stmt>salt.utils.event<def_stmt>raise_error name=<none> args=<none> message=""<block_start>"""
Raise an exception with __name__ from name, args from args
If args is None Otherwise message from message\
If name is empty then use "Exception"
"""<line_sep>name=name<or>"Exception"<if_stmt>hasattr(salt.exceptions name)<block_start>ex=getattr(salt.exceptions name)<block_end><elif_stmt>hasattr(builtins name)<block_start>ex=getattr(builtins name)<block_end><else_stmt><block_start>name="SaltException"<line_sep>ex=getattr(salt.exceptions name)<block_end><if_stmt>args<is><not><none><block_start><raise>ex(*args)<block_end><else_stmt><block_start><raise>ex(message)<block_end><block_end><def_stmt>pack_exception exc<block_start><if_stmt>hasattr(exc "pack")<block_start>packed_exception=exc.pack()<block_end><else_stmt><block_start>packed_exception={"message":exc.__unicode__() "args":exc.args}<block_end><return>packed_exception<block_end><def_stmt>fire_exception exc opts job=<none> node="minion"<block_start>"""
Fire raw exception across the event bus
"""<if_stmt>job<is><none><block_start>job={}<block_end>event=salt.utils.event.SaltEvent(node opts=opts listen=<false>)<line_sep>event.fire_event(pack_exception(exc) "_salt_error")<block_end> |
# -*- coding: utf-8 -*-
<import_stmt>asyncio<import_from_stmt>.assertions assert_corofunction<def_stmt>thunk coro<block_start>"""
A thunk is a subroutine that is created, often automatically, to assist
a call to another subroutine.
Creates a thunk coroutine which returns coroutine function that accepts no
arguments and when invoked it schedules the wrapper coroutine and
returns the final result.
See Wikipedia page for more information about Thunk subroutines:
https://en.wikipedia.org/wiki/Thunk
Arguments:
value (coroutinefunction): wrapped coroutine function to invoke.
Returns:
coroutinefunction
Usage::
async def task():
return 'foo'
coro = paco.thunk(task)
await coro()
# => 'foo'
await coro()
# => 'foo'
"""<line_sep>assert_corofunction(coro=coro)<line_sep>@asyncio.coroutine<def_stmt>wrapper <block_start><return>(<yield><from>coro())<block_end><return>wrapper<block_end> |
<import_from_stmt>dts.utils.utils get_args<import_from_stmt>dts.utils.losses r2 smape nrmse_a nrmse_b nrmse_c nrmsd<import_from_stmt>dts.utils.experiments DTSExperiment log_metrics run_single_experiment run_grid_search<line_sep>metrics=['mse' 'mae' nrmse_a nrmse_b nrmsd r2 smape 'mape']<line_sep> |
<import_stmt>argparse<import_stmt>io<import_stmt>os<import_stmt>shutil<import_stmt>sys<import_stmt>mammoth<import_from_stmt>. writers<def_stmt>main <block_start>args=_parse_args()<if_stmt>args.style_map<is><none><block_start>style_map=<none><block_end><else_stmt><block_start><with_stmt>open(args.style_map)<as>style_map_fileobj<block_start>style_map=style_map_fileobj.read()<block_end><block_end><with_stmt>open(args.path "rb")<as>docx_fileobj<block_start><if_stmt>args.output_dir<is><none><block_start>convert_image=<none><line_sep>output_path=args.output<block_end><else_stmt><block_start>convert_image=mammoth.images.img_element(ImageWriter(args.output_dir))<line_sep>output_filename="{0}.html".format(os.path.basename(args.path).rpartition(".")[0])<line_sep>output_path=os.path.join(args.output_dir output_filename)<block_end>result=mammoth.convert(docx_fileobj style_map=style_map convert_image=convert_image output_format=args.output_format )<for_stmt>message result.messages<block_start>sys.stderr.write(message.message)<line_sep>sys.stderr.write("\n")<block_end>_write_output(output_path result.value)<block_end><block_end><class_stmt>ImageWriter(object)<block_start><def_stmt>__init__ self output_dir<block_start>self._output_dir=output_dir<line_sep>self._image_number=1<block_end><def_stmt>__call__ self element<block_start>extension=element.content_type.partition("/")[2]<line_sep>image_filename="{0}.{1}".format(self._image_number extension)<with_stmt>open(os.path.join(self._output_dir image_filename) "wb")<as>image_dest<block_start><with_stmt>element.open()<as>image_source<block_start>shutil.copyfileobj(image_source image_dest)<block_end><block_end>self._image_number<augadd>1<line_sep><return>{"src":image_filename}<block_end><block_end><def_stmt>_write_output path contents<block_start><if_stmt>path<is><none><block_start><if_stmt>sys.version_info[0]<le>2<block_start>stdout=sys.stdout<block_end><else_stmt><block_start>stdout=sys.stdout.buffer<block_end>stdout.write(contents.encode("utf-8"))<line_sep>stdout.flush()<block_end><else_stmt><block_start><with_stmt>io.open(path "w" encoding="utf-8")<as>fileobj<block_start>fileobj.write(contents)<block_end><block_end><block_end><def_stmt>_parse_args <block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("path" metavar="docx-path" help="Path to the .docx file to convert.")<line_sep>output_group=parser.add_mutually_exclusive_group()<line_sep>output_group.add_argument("output" nargs="?" metavar="output-path" help="Output path for the generated document. Images will be stored inline in the output document. Output is written to stdout if not set.")<line_sep>output_group.add_argument("--output-dir" help="Output directory for generated HTML and images. Images will be stored in separate files. Mutually exclusive with output-path.")<line_sep>parser.add_argument("--output-format" required=<false> choices=writers.formats() help="Output format.")<line_sep>parser.add_argument("--style-map" required=<false> help="File containg a style map.")<line_sep><return>parser.parse_args()<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end> |
<import_stmt>functools<import_from_stmt>shenfun.matrixbase SpectralMatrix<import_from_stmt>shenfun.la TDMA_O<import_from_stmt>. bases<line_sep>SD=bases.ShenDirichlet<line_sep>L=bases.Orthogonal<class_stmt>BLLmat(SpectralMatrix)<block_start>r"""Mass matrix for inner product
.. math::
B_{kj} = (L_j, L_k)_w
where
.. math::
j = 0, 1, ..., N \text{ and } k = 0, 1, ..., N
and :math:`L_k` is the Laguerre function.
"""<def_stmt>__init__ self test trial scale=1 measure=1<block_start><assert_stmt>isinstance(test[0] L)<assert_stmt>isinstance(trial[0] L)<line_sep>SpectralMatrix.__init__(self {0:1} test trial scale=scale measure=measure)<block_end><def_stmt>solve self b u=<none> axis=0 constraints=()<block_start><if_stmt>u<is><not><none><block_start>u[:]=b<line_sep>u<augdiv>(self.scale<times>self[0])<line_sep><return>u<block_end><else_stmt><block_start>b<augdiv>(self.scale<times>self[0])<line_sep><return>b<block_end><block_end><def_stmt>matvec self v c format=<none> axis=0<block_start>c[:]=v<line_sep>self.scale_array(c self.scale<times>self[0])<line_sep><return>c<block_end><block_end><class_stmt>BSDSDmat(SpectralMatrix)<block_start>r"""Mass matrix for inner product
.. math::
B_{kj} = (\phi_j, \phi_k)_w
where
.. math::
j = 0, 1, ..., N-1 \text{ and } k = 0, 1, ..., N-1
and :math:`\phi_k` is the Laguerre (function) Dirichlet basis function.
"""<def_stmt>__init__ self test trial scale=1 measure=1<block_start><assert_stmt>isinstance(test[0] SD)<assert_stmt>isinstance(trial[0] SD)<line_sep>d={0:2. 1:-1. -1:-1.}<line_sep>SpectralMatrix.__init__(self d test trial scale=scale measure=measure)<block_end><def_stmt>get_solver self<block_start><return>TDMA_O<block_end><block_end><class_stmt>ASDSDmat(SpectralMatrix)<block_start>r"""Mass matrix for inner product
.. math::
A_{kj} = (\phi'_j, \phi'_k)_w
where
.. math::
j = 0, 1, ..., N-1 \text{ and } k = 0, 1, ..., N-1
and :math:`\phi_k` is the Laguerre (function) Dirichlet basis function.
"""<def_stmt>__init__ self test trial scale=1 measure=1<block_start><assert_stmt>isinstance(test[0] SD)<assert_stmt>isinstance(trial[0] SD)<line_sep>d={0:0.5 1:0.25 -1:0.25}<line_sep>SpectralMatrix.__init__(self d test trial scale=scale measure=measure)<block_end><def_stmt>get_solver self<block_start><return>TDMA_O<block_end><block_end><class_stmt>_Lagmatrix(SpectralMatrix)<block_start><def_stmt>__init__ self test trial measure=1<block_start>SpectralMatrix.__init__(self {} test trial measure=measure)<block_end><block_end><class_stmt>_LagMatDict(dict)<block_start>"""Dictionary of inner product matrices
Matrices that are missing keys are generated from Vandermonde type
computations.
"""<def_stmt>__missing__ self key<block_start>measure=1<if>len(key)<eq>2<else>key[3]<line_sep>c=functools.partial(_Lagmatrix measure=measure)<line_sep>self[key]=c<line_sep><return>c<block_end><def_stmt>__getitem__ self key<block_start>matrix=dict.__getitem__(self key)<line_sep><return>matrix<block_end><block_end>mat=_LagMatDict({((SD 0) (SD 0)):BSDSDmat ((SD 1) (SD 1)):ASDSDmat ((L 0) (L 0)):BLLmat})<line_sep> |
<import_from_stmt>abc ABC abstractmethod<import_stmt>numpy<as>np<import_from_stmt>scipy stats<import_stmt>torch<class_stmt>Experiment(ABC)<block_start>'''
An Experiment manages the basic train/test loop and logs results.
Args:
writer (:torch.logging.writer:): A Writer object used for logging.
quiet (bool): If False, the Experiment will print information about
episode returns to standard out.
'''<def_stmt>__init__ self writer quiet<block_start>self._writer=writer<line_sep>self._quiet=quiet<line_sep>self._best_returns=-np.inf<line_sep>self._returns100=[]<block_end>@abstractmethod<def_stmt>train self frames=np.inf episodes=np.inf<block_start>'''
Train the agent for a certain number of frames or episodes.
If both frames and episodes are specified, then the training loop will exit
when either condition is satisfied.
Args:
frames (int): The maximum number of training frames.
episodes (bool): The maximum number of training episodes.
'''<block_end>@abstractmethod<def_stmt>test self episodes=100<block_start>'''
Test the agent in eval mode for a certain number of episodes.
Args:
episodes (int): The number of test episodes.
Returns:
list(float): A list of all returns received during testing.
'''<block_end>@property@abstractmethod<def_stmt>frame self<block_start>'''The index of the current training frame.'''<block_end>@property@abstractmethod<def_stmt>episode self<block_start>'''The index of the current training episode'''<block_end><def_stmt>_log_training_episode self returns fps<block_start><if_stmt><not>self._quiet<block_start>print('episode: {}, frame: {}, fps: {}, returns: {}'.format(self.episode self.frame int(fps) returns))<block_end><if_stmt>returns<g>self._best_returns<block_start>self._best_returns=returns<block_end>self._returns100.append(returns)<if_stmt>len(self._returns100)<eq>100<block_start>mean=np.mean(self._returns100)<line_sep>std=np.std(self._returns100)<line_sep>self._writer.add_summary('returns100' mean std step="frame")<line_sep>self._returns100=[]<block_end>self._writer.add_evaluation('returns/episode' returns step="episode")<line_sep>self._writer.add_evaluation('returns/frame' returns step="frame")<line_sep>self._writer.add_evaluation("returns/max" self._best_returns step="frame")<line_sep>self._writer.add_scalar('fps' fps step="frame")<block_end><def_stmt>_log_test_episode self episode returns<block_start><if_stmt><not>self._quiet<block_start>print('test episode: {}, returns: {}'.format(episode returns))<block_end><block_end><def_stmt>_log_test self returns<block_start><if_stmt><not>self._quiet<block_start>print('test returns (mean ± sem): {} ± {}'.format(np.mean(returns) stats.sem(returns)))<block_end>self._writer.add_summary('returns-test' np.mean(returns) np.std(returns))<block_end><def_stmt>save self<block_start><return>self._preset.save('{}/preset.pt'.format(self._writer.log_dir))<block_end><def_stmt>close self<block_start>self._writer.close()<block_end><block_end> |
# Databricks CLI
# Copyright 2017 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"), except
# that the use of services to which certain application programming
# interfaces (each, an "API") connect requires that the user first obtain
# a license for the use of the APIs from Databricks, Inc. ("Databricks"),
# by creating an account at www.databricks.com and agreeing to either (a)
# the Community Edition Terms of Service, (b) the Databricks Terms of
# Service, or (c) another written agreement between Licensee and Databricks
# for the use of the APIs.
#
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint:disable=protected-access
<import_stmt>json<import_stmt>mock<import_stmt>click<import_from_stmt>click.testing CliRunner<import_stmt>databricks_cli.configure.config<as>config<import_from_stmt>databricks_cli.utils InvalidConfigurationError eat_exceptions<import_from_stmt>databricks_cli.configure.provider DatabricksConfig<import_from_stmt>databricks_cli.click_types ContextObject<import_from_stmt>tests.utils provide_conf<line_sep>@provide_conf<def_stmt>test_debug_option # Test that context object debug_mode property changes with --debug flag fed.
<block_start>@click.command()@click.option('--debug-fed' type=bool)@config.debug_option<def_stmt>test_debug debug_fed# noqa
<block_start>ctx=click.get_current_context()<line_sep>context_object=ctx.ensure_object(ContextObject)<assert_stmt>context_object.debug_mode<is>debug_fed<block_end>result=CliRunner().invoke(test_debug ['--debug' '--debug-fed' <true>])<assert_stmt>result.exit_code<eq>0<line_sep>result=CliRunner().invoke(test_debug ['--debug-fed' <false>])<assert_stmt>result.exit_code<eq>0<line_sep># Test that with eat_exceptions wrapper, 'Traceback' appears or doesn't appear depending on
# whether --debug flag is given.
@click.command()@config.debug_option@eat_exceptions<def_stmt>test_debug_traceback # noqa
<block_start><assert_stmt><false><block_end>result=CliRunner().invoke(test_debug_traceback ['--debug'])<assert_stmt>result.exit_code<eq>1<assert_stmt>'Traceback'<in>result.output<line_sep>result=CliRunner().invoke(test_debug_traceback)<assert_stmt>result.exit_code<eq>1<assert_stmt>'Traceback'<not><in>result.output<block_end>@provide_conf<def_stmt>test_provide_api_client <block_start>@click.command()@click.option('--x' required=<true>)@[email protected]_api_client<def_stmt>test_command api_client x# noqa
<block_start>click.echo(x)<block_end>result=CliRunner().invoke(test_command ['--x' '1'])<assert_stmt>result.exit_code<eq>0<assert_stmt>result.output<eq>'1\n'<block_end><def_stmt>test_provide_api_client_invalid <block_start>@click.command()@click.option('--x' required=<true>)@[email protected]_api_client<def_stmt>test_command api_client x# noqa
<block_start>click.echo(x)<block_end>result=CliRunner().invoke(test_command ['--x' '1'])<assert_stmt>result.exit_code<eq>1<assert_stmt>isinstance(result.exception InvalidConfigurationError)<block_end>TEST_PROFILE_1='test-profile-1'<line_sep>TEST_PROFILE_2='test-profile-2'<def_stmt>test_provide_profile_twice <block_start>@click.group()@config.profile_option<def_stmt>test_group <block_start><pass><block_end>@click.command()@config.profile_option<def_stmt>test_command # noqa
<block_start><pass><block_end>test_group.add_command(test_command 'test')<line_sep>result=CliRunner().invoke(test_group ['--profile' TEST_PROFILE_1 'test' '--profile' TEST_PROFILE_2])<assert_stmt>'--profile can only be provided once. The profiles [{}, {}] were provided.'.format(TEST_PROFILE_1 TEST_PROFILE_2)<in>result.output<block_end>TEST_HOST='https://test.cloud.databricks.com'<line_sep>TEST_TOKEN='<PASSWORD>'<def_stmt>test_command_headers <block_start>@click.group()@config.profile_option<def_stmt>outer_test_group <block_start><pass><block_end>@click.group()@config.profile_option<def_stmt>inner_test_group <block_start><pass><block_end>@click.command()@click.option('--x' required=<true>)@[email protected]_api_client<def_stmt>test_command api_client x# noqa
<block_start>click.echo(json.dumps(api_client.default_headers))<block_end><with_stmt>mock.patch("databricks_cli.configure.config.get_config")<as>config_mock<block_start><with_stmt>mock.patch("uuid.uuid1")<as>uuid_mock<block_start>config_mock.return_value=DatabricksConfig.from_token(TEST_HOST TEST_TOKEN)<line_sep>uuid_mock.return_value='1234'<line_sep>inner_test_group.add_command(test_command 'subcommand')<line_sep>outer_test_group.add_command(inner_test_group 'command')<line_sep>result=CliRunner().invoke(outer_test_group ['command' 'subcommand' '--x' '12'])<assert_stmt>result.exception<is><none><line_sep>default_headers=json.loads(result.output)<assert_stmt>'user-agent'<in>default_headers<assert_stmt>"command-subcommand-1234"<in>default_headers['user-agent']<block_end><block_end><block_end> |
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: proto
<class_stmt>AuthMethod(object)<block_start>ANONYMOUS=0<line_sep>COOKIE=1<line_sep>TLS=2<line_sep>TICKET=3<line_sep>CRA=4<line_sep>SCRAM=5<line_sep>CRYPTOSIGN=6<block_end> |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Networking/Requests/Messages/SetPlayerTeamMessage.proto
<import_stmt>sys<line_sep>_b=sys.version_info[0]<l>3<and>(<lambda>x:x)<or>(<lambda>x:x.encode('latin1'))<import_from_stmt>google.protobuf descriptor<as>_descriptor<import_from_stmt>google.protobuf message<as>_message<import_from_stmt>google.protobuf reflection<as>_reflection<import_from_stmt>google.protobuf symbol_database<as>_symbol_database<import_from_stmt>google.protobuf descriptor_pb2<line_sep># @@protoc_insertion_point(imports)
_sym_db=_symbol_database.Default()<import_from_stmt>POGOProtos.Enums TeamColor_pb2<as>POGOProtos_dot_Enums_dot_TeamColor__pb2<line_sep>DESCRIPTOR=_descriptor.FileDescriptor(name='POGOProtos/Networking/Requests/Messages/SetPlayerTeamMessage.proto' package='POGOProtos.Networking.Requests.Messages' syntax='proto3' serialized_pb=_b('\nBPOGOProtos/Networking/Requests/Messages/SetPlayerTeamMessage.proto\x12\'POGOProtos.Networking.Requests.Messages\x1a POGOProtos/Enums/TeamColor.proto\"A\n\x14SetPlayerTeamMessage\x12)\n\x04team\x18\x01 \x01(\x0e\x32\x1b.POGOProtos.Enums.TeamColorb\x06proto3') dependencies=[POGOProtos_dot_Enums_dot_TeamColor__pb2.DESCRIPTOR ])<line_sep>_sym_db.RegisterFileDescriptor(DESCRIPTOR)<line_sep>_SETPLAYERTEAMMESSAGE=_descriptor.Descriptor(name='SetPlayerTeamMessage' full_name='POGOProtos.Networking.Requests.Messages.SetPlayerTeamMessage' filename=<none> file=DESCRIPTOR containing_type=<none> fields=[_descriptor.FieldDescriptor(name='team' full_name='POGOProtos.Networking.Requests.Messages.SetPlayerTeamMessage.team' index=0 number=1 type=14 cpp_type=8 label=1 has_default_value=<false> default_value=0 message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> options=<none>) ] extensions=[] nested_types=[] enum_types=[] options=<none> is_extendable=<false> syntax='proto3' extension_ranges=[] oneofs=[] serialized_start=145 serialized_end=210 )<line_sep>_SETPLAYERTEAMMESSAGE.fields_by_name['team'].enum_type=POGOProtos_dot_Enums_dot_TeamColor__pb2._TEAMCOLOR<line_sep>DESCRIPTOR.message_types_by_name['SetPlayerTeamMessage']=_SETPLAYERTEAMMESSAGE<line_sep>SetPlayerTeamMessage=_reflection.GeneratedProtocolMessageType('SetPlayerTeamMessage' (_message.Message ) dict(DESCRIPTOR=_SETPLAYERTEAMMESSAGE __module__='POGOProtos.Networking.Requests.Messages.SetPlayerTeamMessage_pb2'# @@protoc_insertion_point(class_scope:POGOProtos.Networking.Requests.Messages.SetPlayerTeamMessage)
))<line_sep>_sym_db.RegisterMessage(SetPlayerTeamMessage)<line_sep># @@protoc_insertion_point(module_scope)
|
<import_stmt>unittest<class_stmt>CaseA(unittest.TestCase)<block_start><def_stmt>runTest self<block_start>self.assertTrue(<true>)<block_end><block_end><class_stmt>CaseB(unittest.TestCase)<block_start><def_stmt>runTest self<block_start>self.assertTrue(<true>)<block_end><block_end><class_stmt>DotnameLoadingSuite(unittest.TestSuite)<block_start><def_stmt>__init__ self<block_start>super(DotnameLoadingSuite self).__init__()<line_sep>self.addTest(CaseA())<line_sep>self.addTest(CaseB())<block_end><block_end><class_stmt>DotnameLoadingTest(unittest.TestCase)<block_start><def_stmt>test_a self<block_start>self.assertTrue(<true>)<block_end><def_stmt>test_b self<block_start>self.assertTrue(<true>)<block_end><block_end><class_stmt>NotTestCase()<block_start><def_stmt>not_test self<block_start><pass><block_end><block_end> |
# THIS FILE IS AUTO-GENERATED. DO NOT EDIT
<import_from_stmt>verta._swagger.base_type BaseType<class_stmt>UacResources(BaseType)<block_start><def_stmt>__init__ self service=<none> resource_ids=<none> role_service_resource_type=<none> authz_service_resource_type=<none> modeldb_service_resource_type=<none><block_start>required={"service":<false> "resource_ids":<false> "role_service_resource_type":<false> "authz_service_resource_type":<false> "modeldb_service_resource_type":<false> }<line_sep>self.service=service<line_sep>self.resource_ids=resource_ids<line_sep>self.role_service_resource_type=role_service_resource_type<line_sep>self.authz_service_resource_type=authz_service_resource_type<line_sep>self.modeldb_service_resource_type=modeldb_service_resource_type<for_stmt>k,v required.items()<block_start><if_stmt>self[k]<is><none><and>v<block_start><raise>ValueError('attribute {} is required'.format(k))<block_end><block_end><block_end>@staticmethod<def_stmt>from_json d<block_start><import_from_stmt>.ServiceEnumService ServiceEnumService<import_from_stmt>.RoleResourceEnumRoleServiceResourceTypes RoleResourceEnumRoleServiceResourceTypes<import_from_stmt>.AuthzResourceEnumAuthzServiceResourceTypes AuthzResourceEnumAuthzServiceResourceTypes<import_from_stmt>.ModelResourceEnumModelDBServiceResourceTypes ModelResourceEnumModelDBServiceResourceTypes<line_sep>tmp=d.get('service' <none>)<if_stmt>tmp<is><not><none><block_start>d['service']=ServiceEnumService.from_json(tmp)<block_end>tmp=d.get('resource_ids' <none>)<if_stmt>tmp<is><not><none><block_start>d['resource_ids']=[tmp<for>tmp tmp]<block_end>tmp=d.get('role_service_resource_type' <none>)<if_stmt>tmp<is><not><none><block_start>d['role_service_resource_type']=RoleResourceEnumRoleServiceResourceTypes.from_json(tmp)<block_end>tmp=d.get('authz_service_resource_type' <none>)<if_stmt>tmp<is><not><none><block_start>d['authz_service_resource_type']=AuthzResourceEnumAuthzServiceResourceTypes.from_json(tmp)<block_end>tmp=d.get('modeldb_service_resource_type' <none>)<if_stmt>tmp<is><not><none><block_start>d['modeldb_service_resource_type']=ModelResourceEnumModelDBServiceResourceTypes.from_json(tmp)<block_end><return>UacResources(**d)<block_end><block_end> |
# -*- coding: utf-8 -*-
# PyQt
<import_from_stmt>qtpy.QtGui *<import_from_stmt>qtpy.QtWidgets *<import_from_stmt>qtpy.QtCore *<import_from_stmt>qtpy QtGui QtCore<line_sep># Others
<import_stmt>os<import_stmt>imp<import_stmt>sys<import_stmt>json<import_stmt>glob<import_from_stmt>functools partial<import_from_stmt>collections OrderedDict<line_sep># 导入按钮函数
#---------------------------------------------------------------------------------------
ALL_FUNC_BUTTON=[]<line_sep>funcBtnPath=os.getcwd()+'/func-button/'<line_sep>allPath=glob.glob(funcBtnPath+r'*.py')<for_stmt>path allPath<block_start>fileName=path.split("\\")[-1]<line_sep>modelName=fileName.split(".")[0]<line_sep>ALL_FUNC_BUTTON.append(modelName)<line_sep>imp.load_source('ctaFuncButttons' path)<block_end>BUTTON_FUNC={}<import_from_stmt>ctaFuncButttons *<for_stmt>func_bt ALL_FUNC_BUTTON<block_start>fn_obj=getattr(sys.modules['ctaFuncButttons'] func_bt)<line_sep>BUTTON_FUNC[func_bt]=fn_obj<block_end># 字符串转换
#---------------------------------------------------------------------------------------
<try_stmt><block_start>_fromUtf8=QtCore.QString.fromUtf8<block_end><except_stmt>AttributeError<block_start><def_stmt>_fromUtf8 s<block_start><return>s<block_end><block_end>########################################################################
<class_stmt>uiBasicIO(QWidget)<block_start>"""通过json文件,自动生成输入框和按钮的元类"""<line_sep>#----------------------------------------------------------------------
<def_stmt>__init__ self parent=<none> inpFile='' btnFile=''<block_start>"""初始化函数"""<line_sep>super(uiBasicIO self).__init__(parent)<line_sep># 输入框数据
self.classDict=OrderedDict()<line_sep>self.labelDict={}<line_sep>self.widthDict={}<line_sep>self.typeDict={}<line_sep>self.evalDict={}<line_sep>self.editDict={}<line_sep># 按钮数据
self.bClassDict=OrderedDict()<line_sep>self.bWidthDict={}<line_sep>self.bFunDict={}<line_sep>self.buttonDict={}<line_sep># 输入框和按钮
self.groupInput=<none><line_sep>self.groupProcess=<none><line_sep># 输入框和按钮的配置文件
self.inpFile=inpFile<line_sep>self.btnFile=btnFile<line_sep>self.loadInputSetting()<line_sep>self.loadButtonSetting()<line_sep>self.initBasicUi()<block_end>#----------------------------------------------------------------------
<def_stmt>getInputParamByName self name<block_start>"""获得输入框参数值"""<line_sep>typeName=self.typeDict[name]<line_sep>editCell=self.editDict[name]<line_sep>val=str(editCell.currentText())<if>typeName<eq>'List'<else>str(editCell.text())<try_stmt><block_start><return>(eval(val)<if>self.evalDict[name]<else>val)<block_end><except_stmt><block_start><return>val<block_end><block_end>#----------------------------------------------------------------------
<def_stmt>loadInputSetting self<block_start>"""载入输入框界面配置"""<line_sep>settingFile=self.inpFile<with_stmt>open(settingFile)<as>f<block_start><for_stmt>setting json.load(f)<block_start>name=setting['name']<line_sep>label=setting['label']<line_sep>typeName=setting['type']<line_sep>evalType=setting['eval']<line_sep>width=setting['width']<line_sep>className=setting['class']<line_sep>default=setting['default']<line_sep># 标签
self.labelDict[name]=QLabel(label)<line_sep>self.labelDict[name].setAlignment(QtCore.Qt.AlignCenter)<line_sep># 宽度
self.widthDict[name]=width<line_sep># 输入框类型
self.typeDict[name]=typeName<line_sep>self.evalDict[name]=evalType<line_sep># 分类
<if_stmt>className<in>self.classDict<block_start>self.classDict[className].append(name)<block_end><else_stmt><block_start>self.classDict[className]=[name]<block_end># 输入框
<if_stmt>typeName<eq>'Edit'<block_start>self.editDict[name]=QLineEdit()<line_sep>self.editDict[name].setText(default)<block_end><elif_stmt>typeName<eq>'List'<block_start>self.editDict[name]=QComboBox()<line_sep>self.editDict[name].addItems(eval(setting['ListVar']))<block_end><block_end><block_end><block_end>#----------------------------------------------------------------------
<def_stmt>loadButtonSetting self<block_start>"""载入按钮界面配置"""<line_sep>settingFile=self.btnFile<with_stmt>open(settingFile)<as>f<block_start><for_stmt>setting json.load(f)<block_start>label=setting['label']<line_sep>func=setting['func']<line_sep>width=setting['width']<line_sep>className=setting['class']<line_sep>style=setting['style']<line_sep># 按钮
self.buttonDict[func]=QPushButton(label)<line_sep>self.buttonDict[func].setObjectName(_fromUtf8(style))<line_sep>self.buttonDict[func].clicked.connect(partial(BUTTON_FUNC[func] self))<line_sep># 宽度
self.bWidthDict[func]=width<line_sep># 分类
<if_stmt>className<in>self.bClassDict<block_start>self.bClassDict[className].append(func)<block_end><else_stmt><block_start>self.bClassDict[className]=[func]<block_end><block_end><block_end><block_end>#----------------------------------------------------------------------
<def_stmt>initBasicUi self<block_start>"""初始化界面"""<line_sep># 根据配置文件生成输入框界面
self.groupInput=QGroupBox()<line_sep>self.groupInput.setTitle(u'')<line_sep>gridup=QGridLayout()<line_sep>i=0<for_stmt>className self.classDict<block_start>classIndex=i<line_sep># 标题和输入框
<for_stmt>name self.classDict[className]<block_start>width=self.widthDict[name]<line_sep>qLabel=self.labelDict[name]<line_sep>qEdit=self.editDict[name]<line_sep>gridup.addWidget(qLabel 1 i)<line_sep>gridup.addWidget(qEdit 2 i)<line_sep>gridup.setColumnStretch(i width)<line_sep>i<augadd>1<block_end># 分类标题
qcLabel=QLabel(className)<line_sep>qcLabel.setAlignment(QtCore.Qt.AlignCenter)<line_sep>qcLabel.setFont(QtGui.QFont("Roman times" 10 QtGui.QFont.Bold))<line_sep>gridup.addWidget(qcLabel 0 classIndex 1 i-classIndex)<line_sep># 分隔符
<for_stmt>j xrange(0 3)<block_start>qcSplit=QLabel(u'|')<line_sep>qcSplit.setAlignment(QtCore.Qt.AlignCenter)<line_sep>gridup.addWidget(qcSplit j i)<block_end>i<augadd>1<block_end>self.groupInput.setLayout(gridup)<line_sep># 根据配置文件生成按钮界面
self.groupProcess=QGroupBox()<line_sep>self.groupProcess.setTitle(u'')<line_sep>griddown=QGridLayout()<line_sep>i=0<for_stmt>className self.bClassDict<block_start>classIndex=i<line_sep># 标题和输入框
<for_stmt>name self.bClassDict[className]<block_start>width=self.bWidthDict[name]<line_sep>qButton=self.buttonDict[name]<line_sep>griddown.addWidget(qButton 1 i)<line_sep>griddown.setColumnStretch(i width)<line_sep>i<augadd>1<block_end># 分类标题
qcLabel=QLabel(className)<line_sep>qcLabel.setAlignment(QtCore.Qt.AlignCenter)<line_sep>qcLabel.setFont(QFont("Roman times" 10 QtGui.QFont.Bold))<line_sep>griddown.addWidget(qcLabel 0 classIndex 1 i-classIndex)<line_sep># 分隔符
<for_stmt>j xrange(0 2)<block_start>qcSplit=QLabel(u'|')<line_sep>qcSplit.setAlignment(QtCore.Qt.AlignCenter)<line_sep>griddown.addWidget(qcSplit j i)<block_end>i<augadd>1<block_end>self.groupProcess.setLayout(griddown)<block_end><block_end> |
<import_stmt>vtk<def_stmt>main <block_start>colors=vtk.vtkNamedColors()<line_sep># create a rendering window and renderer
ren=vtk.vtkRenderer()<line_sep>renWin=vtk.vtkRenderWindow()<line_sep>renWin.AddRenderer(ren)<line_sep># create a renderwindowinteractor
iren=vtk.vtkRenderWindowInteractor()<line_sep>iren.SetRenderWindow(renWin)<line_sep>style=vtk.vtkInteractorStyleTrackballActor()<line_sep>iren.SetInteractorStyle(style)<line_sep># create source
sphereSource=vtk.vtkSphereSource()<line_sep># mapper
mapper=vtk.vtkPolyDataMapper()<line_sep>mapper.SetInputConnection(sphereSource.GetOutputPort())<line_sep># actor
actor=vtk.vtkActor()<line_sep>actor.SetMapper(mapper)<line_sep>actor.GetProperty().SetColor(colors.GetColor3d('Chartreuse'))<line_sep># assign actor to the renderer
ren.AddActor(actor)<line_sep>ren.SetBackground(colors.GetColor3d('PaleGoldenrod'))<line_sep># enable user interface interactor
iren.Initialize()<line_sep>renWin.Render()<line_sep>iren.Start()<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end> |
<import_from_stmt>ufora.FORA.python.PurePython.testModules.same_line_number.B B<class_stmt>A(object)<block_start><def_stmt>__init__ self m<block_start>self.m=m<block_end><def_stmt>foo self<block_start><return>B(self.m)<block_end><block_end> |
<import_from_stmt>collections.abc Iterable<import_from_stmt>io StringIO<import_from_stmt>numbers Real<import_from_stmt>warnings warn<import_stmt>numpy<as>np<import_stmt>openmc.checkvalue<as>cv<import_from_stmt>openmc.mixin EqualityMixin<import_from_stmt>openmc.stats Univariate Tabular Uniform Legendre<import_from_stmt>.function INTERPOLATION_SCHEME<import_from_stmt>.data EV_PER_MEV<import_from_stmt>.endf get_head_record get_cont_record get_tab1_record get_list_record get_tab2_record<class_stmt>AngleDistribution(EqualityMixin)<block_start>"""Angle distribution as a function of incoming energy
Parameters
----------
energy : Iterable of float
Incoming energies in eV at which distributions exist
mu : Iterable of openmc.stats.Univariate
Distribution of scattering cosines corresponding to each incoming energy
Attributes
----------
energy : Iterable of float
Incoming energies in eV at which distributions exist
mu : Iterable of openmc.stats.Univariate
Distribution of scattering cosines corresponding to each incoming energy
"""<def_stmt>__init__ self energy mu<block_start>super().__init__()<line_sep>self.energy=energy<line_sep>self.mu=mu<block_end>@property<def_stmt>energy self<block_start><return>self._energy<block_end>@property<def_stmt>mu self<block_start><return>self._mu<block_end>@energy.setter<def_stmt>energy self energy<block_start>cv.check_type('angle distribution incoming energy' energy Iterable Real)<line_sep>self._energy=energy<block_end>@mu.setter<def_stmt>mu self mu<block_start>cv.check_type('angle distribution scattering cosines' mu Iterable Univariate)<line_sep>self._mu=mu<block_end><def_stmt>to_hdf5 self group<block_start>"""Write angle distribution to an HDF5 group
Parameters
----------
group : h5py.Group
HDF5 group to write to
"""<line_sep>dset=group.create_dataset('energy' data=self.energy)<line_sep># Make sure all data is tabular
mu_tabular=[mu_i<if>isinstance(mu_i Tabular)<else>mu_i.to_tabular()<for>mu_i self.mu]<line_sep># Determine total number of (mu,p) pairs and create array
n_pairs=sum([len(mu_i.x)<for>mu_i mu_tabular])<line_sep>pairs=np.empty((3 n_pairs))<line_sep># Create array for offsets
offsets=np.empty(len(mu_tabular) dtype=int)<line_sep>interpolation=np.empty(len(mu_tabular) dtype=int)<line_sep>j=0<line_sep># Populate offsets and pairs array
<for_stmt>i,mu_i enumerate(mu_tabular)<block_start>n=len(mu_i.x)<line_sep>offsets[i]=j<line_sep>interpolation[i]=1<if>mu_i.interpolation<eq>'histogram'<else>2<line_sep>pairs[0 j:j+n]=mu_i.x<line_sep>pairs[1 j:j+n]=mu_i.p<line_sep>pairs[2 j:j+n]=mu_i.c<line_sep>j<augadd>n<block_end># Create dataset for distributions
dset=group.create_dataset('mu' data=pairs)<line_sep># Write interpolation as attribute
dset.attrs['offsets']=offsets<line_sep>dset.attrs['interpolation']=interpolation<block_end>@classmethod<def_stmt>from_hdf5 cls group<block_start>"""Generate angular distribution from HDF5 data
Parameters
----------
group : h5py.Group
HDF5 group to read from
Returns
-------
openmc.data.AngleDistribution
Angular distribution
"""<line_sep>energy=group['energy'][()]<line_sep>data=group['mu']<line_sep>offsets=data.attrs['offsets']<line_sep>interpolation=data.attrs['interpolation']<line_sep>mu=[]<line_sep>n_energy=len(energy)<for_stmt>i range(n_energy)# Determine length of outgoing energy distribution and number of
# discrete lines
<block_start>j=offsets[i]<if_stmt>i<l>n_energy-1<block_start>n=offsets[i+1]-j<block_end><else_stmt><block_start>n=data.shape[1]-j<block_end>interp=INTERPOLATION_SCHEME[interpolation[i]]<line_sep>mu_i=Tabular(data[0 j:j+n] data[1 j:j+n] interp)<line_sep>mu_i.c=data[2 j:j+n]<line_sep>mu.append(mu_i)<block_end><return>cls(energy mu)<block_end>@classmethod<def_stmt>from_ace cls ace location_dist location_start<block_start>"""Generate an angular distribution from ACE data
Parameters
----------
ace : openmc.data.ace.Table
ACE table to read from
location_dist : int
Index in the XSS array corresponding to the start of a block,
e.g. JXS(9).
location_start : int
Index in the XSS array corresponding to the start of an angle
distribution array
Returns
-------
openmc.data.AngleDistribution
Angular distribution
"""<line_sep># Set starting index for angle distribution
idx=location_dist+location_start-1<line_sep># Number of energies at which angular distributions are tabulated
n_energies=int(ace.xss[idx])<line_sep>idx<augadd>1<line_sep># Incoming energy grid
energy=ace.xss[idx:idx+n_energies]<times>EV_PER_MEV<line_sep>idx<augadd>n_energies<line_sep># Read locations for angular distributions
lc=ace.xss[idx:idx+n_energies].astype(int)<line_sep>idx<augadd>n_energies<line_sep>mu=[]<for_stmt>i range(n_energies)<block_start><if_stmt>lc[i]<g>0# Equiprobable 32 bin distribution
<block_start>n_bins=32<line_sep>idx=location_dist+abs(lc[i])-1<line_sep>cos=ace.xss[idx:idx+n_bins+1]<line_sep>pdf=np.zeros(n_bins+1)<line_sep>pdf[:n_bins]=1.0/(n_bins<times>np.diff(cos))<line_sep>cdf=np.linspace(0.0 1.0 n_bins+1)<line_sep>mu_i=Tabular(cos pdf 'histogram' ignore_negative=<true>)<line_sep>mu_i.c=cdf<block_end><elif_stmt>lc[i]<l>0# Tabular angular distribution
<block_start>idx=location_dist+abs(lc[i])-1<line_sep>intt=int(ace.xss[idx])<line_sep>n_points=int(ace.xss[idx+1])<line_sep># Data is given as rows of (values, PDF, CDF)
data=ace.xss[idx+2:idx+2+3<times>n_points]<line_sep>data.shape=(3 n_points)<line_sep>mu_i=Tabular(data[0] data[1] INTERPOLATION_SCHEME[intt])<line_sep>mu_i.c=data[2]<block_end><else_stmt># Isotropic angular distribution
<block_start>mu_i=Uniform(-1. 1.)<block_end>mu.append(mu_i)<block_end><return>cls(energy mu)<block_end>@classmethod<def_stmt>from_endf cls ev mt<block_start>"""Generate an angular distribution from an ENDF evaluation
Parameters
----------
ev : openmc.data.endf.Evaluation
ENDF evaluation
mt : int
The MT value of the reaction to get angular distributions for
Returns
-------
openmc.data.AngleDistribution
Angular distribution
"""<line_sep>file_obj=StringIO(ev.section[4 mt])<line_sep># Read HEAD record
items=get_head_record(file_obj)<line_sep>lvt=items[2]<line_sep>ltt=items[3]<line_sep># Read CONT record
items=get_cont_record(file_obj)<line_sep>li=items[2]<line_sep>nk=items[4]<line_sep>center_of_mass=(items[3]<eq>2)<line_sep># Check for obsolete energy transformation matrix. If present, just skip
# it and keep reading
<if_stmt>lvt<g>0<block_start>warn('Obsolete energy transformation matrix in MF=4 angular '<concat>'distribution.')<for_stmt>_ range((nk+5)<floordiv>6)<block_start>file_obj.readline()<block_end><block_end><if_stmt>ltt<eq>0<and>li<eq>1# Purely isotropic
<block_start>energy=np.array([0. ev.info['energy_max']])<line_sep>mu=[Uniform(-1. 1.) Uniform(-1. 1.)]<block_end><elif_stmt>ltt<eq>1<and>li<eq>0# Legendre polynomial coefficients
<block_start>params,tab2=get_tab2_record(file_obj)<line_sep>n_energy=params[5]<line_sep>energy=np.zeros(n_energy)<line_sep>mu=[]<for_stmt>i range(n_energy)<block_start>items,al=get_list_record(file_obj)<line_sep>temperature=items[0]<line_sep>energy[i]=items[1]<line_sep>coefficients=np.asarray([1.0]+al)<line_sep>mu.append(Legendre(coefficients))<block_end><block_end><elif_stmt>ltt<eq>2<and>li<eq>0# Tabulated probability distribution
<block_start>params,tab2=get_tab2_record(file_obj)<line_sep>n_energy=params[5]<line_sep>energy=np.zeros(n_energy)<line_sep>mu=[]<for_stmt>i range(n_energy)<block_start>params,f=get_tab1_record(file_obj)<line_sep>temperature=params[0]<line_sep>energy[i]=params[1]<if_stmt>f.n_regions<g>1<block_start><raise>NotImplementedError('Angular distribution with multiple '<concat>'interpolation regions not supported.')<block_end>mu.append(Tabular(f.x f.y INTERPOLATION_SCHEME[f.interpolation[0]]))<block_end><block_end><elif_stmt>ltt<eq>3<and>li<eq>0# Legendre for low energies / tabulated for high energies
<block_start>params,tab2=get_tab2_record(file_obj)<line_sep>n_energy_legendre=params[5]<line_sep>energy_legendre=np.zeros(n_energy_legendre)<line_sep>mu=[]<for_stmt>i range(n_energy_legendre)<block_start>items,al=get_list_record(file_obj)<line_sep>temperature=items[0]<line_sep>energy_legendre[i]=items[1]<line_sep>coefficients=np.asarray([1.0]+al)<line_sep>mu.append(Legendre(coefficients))<block_end>params,tab2=get_tab2_record(file_obj)<line_sep>n_energy_tabulated=params[5]<line_sep>energy_tabulated=np.zeros(n_energy_tabulated)<for_stmt>i range(n_energy_tabulated)<block_start>params,f=get_tab1_record(file_obj)<line_sep>temperature=params[0]<line_sep>energy_tabulated[i]=params[1]<if_stmt>f.n_regions<g>1<block_start><raise>NotImplementedError('Angular distribution with multiple '<concat>'interpolation regions not supported.')<block_end>mu.append(Tabular(f.x f.y INTERPOLATION_SCHEME[f.interpolation[0]]))<block_end>energy=np.concatenate((energy_legendre energy_tabulated))<block_end><return>AngleDistribution(energy mu)<block_end><block_end> |
'''Tests the tools and utilities in pulsar.utils.'''<import_stmt>unittest<import_from_stmt>pulsar.utils.system platform get_maxfd<class_stmt>TestSystem(unittest.TestCase)<block_start>@unittest.skipUnless(platform.is_posix 'Posix platform required')<def_stmt>testPlatform self<block_start>self.assertFalse(platform.is_windows)<block_end><def_stmt>test_maxfd self<block_start>m=get_maxfd()<line_sep>self.assertTrue(m)<block_end><block_end> |
<import_stmt>supriya.osc<import_from_stmt>supriya.enums RequestId<import_from_stmt>.bases Request Response<class_stmt>ClearScheduleRequest(Request)### CLASS VARIABLES ###
<block_start>request_id=RequestId.CLEAR_SCHEDULE<line_sep>### PUBLIC METHODS ###
<def_stmt>to_osc self * with_placeholders=<false><block_start>request_id=self.request_name<line_sep>contents=[request_id]<line_sep>message=supriya.osc.OscMessage(*contents)<line_sep><return>message<block_end><block_end><class_stmt>DoneResponse(Response)### INITIALIZER ###
<block_start><def_stmt>__init__ self action=<none><block_start>self._action=action<block_end>### PUBLIC METHODS ###
@classmethod<def_stmt>from_osc_message cls osc_message<block_start>arguments=osc_message.contents<line_sep>response=cls(action=tuple(arguments))<line_sep><return>response<block_end>### PUBLIC PROPERTIES ###
@property<def_stmt>action self<block_start><return>self._action<block_end><block_end><class_stmt>DumpOscRequest(Request)<block_start>"""
A /dumpOSC request.
::
>>> import supriya.commands
>>> request = supriya.commands.DumpOscRequest(1)
>>> request
DumpOscRequest(
osc_status=1,
)
::
>>> request.to_osc()
OscMessage('/dumpOSC', 1)
"""<line_sep>### CLASS VARIABLES ###
request_id=RequestId.DUMP_OSC<line_sep>### INITIALIZER ###
<def_stmt>__init__ self osc_status=<none><block_start>Request.__init__(self)<line_sep>self._osc_status=int(osc_status)<block_end>### PUBLIC METHODS ###
<def_stmt>to_osc self * with_placeholders=<false><block_start>request_id=self.request_name<line_sep>osc_status=int(self.osc_status)<assert_stmt>0<le>osc_status<le>4<line_sep>message=supriya.osc.OscMessage(request_id osc_status)<line_sep><return>message<block_end>### PUBLIC PROPERTIES ###
@property<def_stmt>osc_status self<block_start><return>self._osc_status<block_end><block_end><class_stmt>FailResponse(Response)### INITIALIZER ###
<block_start><def_stmt>__init__ self failed_command=<none> failure_reason=<none><block_start>self._failed_command=failed_command<line_sep>self._failure_reason=failure_reason<block_end>### PUBLIC METHODS ###
@classmethod<def_stmt>from_osc_message cls osc_message<block_start>failed_command=osc_message.contents[0]<line_sep>failure_reason=osc_message.contents[1:]<if_stmt>failure_reason<block_start>failure_reason=tuple(failure_reason)<block_end>response=cls(failed_command=failed_command failure_reason=failure_reason)<line_sep><return>response<block_end>### PUBLIC PROPERTIES ###
@property<def_stmt>failed_command self<block_start><return>self._failed_command<block_end>@property<def_stmt>failure_reason self<block_start><return>self._failure_reason<block_end><block_end><class_stmt>NothingRequest(Request)### CLASS VARIABLES ###
<block_start>request_id=RequestId.NOTHING<line_sep>### PUBLIC METHODS ###
<def_stmt>to_osc self * with_placeholders=<false><block_start><return>supriya.osc.OscMessage(0)<block_end><block_end><class_stmt>NotifyRequest(Request)<block_start>"""
A /notify message.
::
>>> import supriya.commands
>>> request = supriya.commands.NotifyRequest(notify_status=True,)
>>> request
NotifyRequest(
notify_status=True,
)
::
>>> request.to_osc()
OscMessage('/notify', 1)
"""<line_sep>### CLASS VARIABLES ###
request_id=RequestId.NOTIFY<line_sep>### INITIALIZER ###
<def_stmt>__init__ self notify_status=<none><block_start>Request.__init__(self)<line_sep>self._notify_status=bool(notify_status)<block_end>### PUBLIC METHODS ###
<def_stmt>to_osc self * with_placeholders=<false><block_start>request_id=self.request_name<line_sep>notify_status=int(self.notify_status)<line_sep>message=supriya.osc.OscMessage(request_id notify_status)<line_sep><return>message<block_end>### PUBLIC PROPERTIES ###
@property<def_stmt>notify_status self<block_start><return>self._notify_status<block_end>@property<def_stmt>response_patterns self<block_start><return>["/done" "/notify"] ["/fail" "/notify"]<block_end><block_end><class_stmt>QuitRequest(Request)### CLASS VARIABLES ###
<block_start>request_id=RequestId.QUIT<line_sep>### INITIALIZER ###
<def_stmt>__init__ self<block_start>Request.__init__(self)<block_end>### PUBLIC METHODS ###
<def_stmt>to_osc self * with_placeholders=<false><block_start>request_id=self.request_name<line_sep>message=supriya.osc.OscMessage(request_id)<line_sep><return>message<block_end>### PUBLIC PROPERTIES ###
@property<def_stmt>response_patterns self<block_start><return>["/done" "/quit"] <none><block_end><block_end><class_stmt>StatusRequest(Request)<block_start>"""
A /status request.
::
>>> import supriya.commands
>>> request = supriya.commands.StatusRequest()
>>> request
StatusRequest()
::
>>> request.to_osc()
OscMessage('/status')
"""<line_sep>### CLASS VARIABLES ###
request_id=RequestId.STATUS<line_sep>### PUBLIC METHODS ###
<def_stmt>to_osc self * with_placeholders=<false><block_start>request_id=self.request_name<line_sep>message=supriya.osc.OscMessage(request_id)<line_sep><return>message<block_end>### PUBLIC PROPERTIES ###
@property<def_stmt>response_patterns self<block_start><return>["/status.reply"] <none><block_end><block_end><class_stmt>StatusResponse(Response)### INITIALIZER ###
<block_start><def_stmt>__init__ self actual_sample_rate=<none> average_cpu_usage=<none> group_count=<none> peak_cpu_usage=<none> synth_count=<none> synthdef_count=<none> target_sample_rate=<none> ugen_count=<none> <block_start>self._actual_sample_rate=actual_sample_rate<line_sep>self._average_cpu_usage=average_cpu_usage<line_sep>self._group_count=group_count<line_sep>self._peak_cpu_usage=peak_cpu_usage<line_sep>self._synth_count=synth_count<line_sep>self._synthdef_count=synthdef_count<line_sep>self._target_sample_rate=target_sample_rate<line_sep>self._ugen_count=ugen_count<block_end>### PUBLIC METHODS ###
@classmethod<def_stmt>from_osc_message cls osc_message<block_start>"""
Create response from OSC message.
::
>>> message = supriya.osc.OscMessage(
... "/status.reply",
... 1,
... 0,
... 0,
... 2,
... 4,
... 0.040679048746824265,
... 0.15118031203746796,
... 44100.0,
... 44100.00077873274,
... )
>>> supriya.commands.StatusResponse.from_osc_message(message)
StatusResponse(
actual_sample_rate=44100.00077873274,
average_cpu_usage=0.040679048746824265,
group_count=2,
peak_cpu_usage=0.15118031203746796,
synth_count=0,
synthdef_count=4,
target_sample_rate=44100.0,
ugen_count=0,
)
"""<line_sep>arguments=osc_message.contents[1:]<line_sep>(ugen_count synth_count group_count synthdef_count average_cpu_usage peak_cpu_usage target_sample_rate actual_sample_rate )=arguments<line_sep>response=cls(actual_sample_rate=actual_sample_rate average_cpu_usage=average_cpu_usage group_count=group_count peak_cpu_usage=peak_cpu_usage synth_count=synth_count synthdef_count=synthdef_count target_sample_rate=target_sample_rate ugen_count=ugen_count )<line_sep><return>response<block_end><def_stmt>to_dict self<block_start>"""
Convert StatusResponse to JSON-serializable dictionay.
::
>>> status_response = supriya.commands.StatusResponse(
... actual_sample_rate=44100.05692801021,
... average_cpu_usage=8.151924133300781,
... group_count=6,
... peak_cpu_usage=15.151398658752441,
... synth_count=19,
... synthdef_count=42,
... target_sample_rate=44100.0,
... ugen_count=685,
... )
::
>>> import json
>>> result = status_response.to_dict()
>>> result = json.dumps(result, indent=4, separators=(",", ": "), sort_keys=True,)
>>> print(result)
{
"server_status": {
"actual_sample_rate": 44100.05692801021,
"average_cpu_usage": 8.151924133300781,
"group_count": 6,
"peak_cpu_usage": 15.151398658752441,
"synth_count": 19,
"synthdef_count": 42,
"target_sample_rate": 44100.0,
"ugen_count": 685
}
}
"""<line_sep>result={"server_status":{"actual_sample_rate":self.actual_sample_rate "average_cpu_usage":self.average_cpu_usage "group_count":self.group_count "peak_cpu_usage":self.peak_cpu_usage "synth_count":self.synth_count "synthdef_count":self.synthdef_count "target_sample_rate":self.target_sample_rate "ugen_count":self.ugen_count }}<line_sep><return>result<block_end>### PUBLIC PROPERTIES ###
@property<def_stmt>actual_sample_rate self<block_start><return>self._actual_sample_rate<block_end>@property<def_stmt>average_cpu_usage self<block_start><return>self._average_cpu_usage<block_end>@property<def_stmt>group_count self<block_start><return>self._group_count<block_end>@property<def_stmt>peak_cpu_usage self<block_start><return>self._peak_cpu_usage<block_end>@property<def_stmt>synth_count self<block_start><return>self._synth_count<block_end>@property<def_stmt>synthdef_count self<block_start><return>self._synthdef_count<block_end>@property<def_stmt>target_sample_rate self<block_start><return>self._target_sample_rate<block_end>@property<def_stmt>ugen_count self<block_start><return>self._ugen_count<block_end><block_end><class_stmt>SyncedResponse(Response)### INITIALIZER ###
<block_start><def_stmt>__init__ self sync_id=<none><block_start>self._sync_id=sync_id<block_end>### PUBLIC METHODS ###
@classmethod<def_stmt>from_osc_message cls osc_message<block_start>arguments=osc_message.contents<line_sep>response=cls(*arguments)<line_sep><return>response<block_end>### PUBLIC PROPERTIES ###
@property<def_stmt>sync_id self<block_start><return>self._sync_id<block_end><block_end><class_stmt>SyncRequest(Request)<block_start>"""
A /sync request.
::
>>> import supriya.commands
>>> request = supriya.commands.SyncRequest(sync_id=1999,)
>>> request
SyncRequest(
sync_id=1999,
)
::
>>> request.to_osc()
OscMessage('/sync', 1999)
"""<line_sep>### CLASS VARIABLES ###
request_id=RequestId.SYNC<line_sep>### INITIALIZER ###
<def_stmt>__init__ self sync_id=<none><block_start>Request.__init__(self)<line_sep>self._sync_id=int(sync_id)<block_end>### PUBLIC METHODS ###
<def_stmt>to_osc self * with_placeholders=<false><block_start>request_id=self.request_name<line_sep>sync_id=int(self.sync_id)<line_sep>message=supriya.osc.OscMessage(request_id sync_id)<line_sep><return>message<block_end>### PUBLIC PROPERTIES ###
@property<def_stmt>response_patterns self<block_start><return>["/synced" self.sync_id] <none><block_end>@property<def_stmt>sync_id self<block_start><return>self._sync_id<block_end><block_end> |
load("@bazel_skylib//lib:shell.bzl" "shell")<def_stmt>kubebuilder_manifests name srcs config_root **kwargs<block_start>native.genrule(name=name srcs=srcs outs=[name+".yaml"] cmd="""
tmp=$$(mktemp --directory)
cp -aL "%s/." "$$tmp"
$(location @io_k8s_sigs_kustomize_kustomize_v4//:v4) build "$$tmp/default" > $@
rm -r "$$tmp"
"""%config_root tools=["@io_k8s_sigs_kustomize_kustomize_v4//:v4" ] **kwargs)<block_end><def_stmt>_ginkgo_test_impl ctx<block_start>wrapper=ctx.actions.declare_file(ctx.label.name)<line_sep>ctx.actions.write(output=wrapper content="""#!/usr/bin/env bash
set -e
exec {ginkgo} {ginkgo_args} {go_test} -- "$@"
""".format(ginkgo=shell.quote(ctx.executable._ginkgo.short_path) ginkgo_args=" ".join([shell.quote(arg)<for>arg ctx.attr.ginkgo_args]) # Ginkgo requires the precompiled binary end with ".test".
go_test=shell.quote(ctx.executable.go_test.short_path+".test") ) is_executable=<true> )<line_sep><return>[DefaultInfo(executable=wrapper runfiles=ctx.runfiles(files=ctx.files.data symlinks={ctx.executable.go_test.short_path+".test":ctx.executable.go_test} transitive_files=depset([] transitive=[ctx.attr._ginkgo.default_runfiles.files ctx.attr.go_test.default_runfiles.files]) ) )]<block_end>ginkgo_test=rule(implementation=_ginkgo_test_impl attrs={"data":attr.label_list(allow_files=<true>) "go_test":attr.label(executable=<true> cfg="target") "ginkgo_args":attr.string_list() "_ginkgo":attr.label(default="@com_github_onsi_ginkgo//ginkgo" executable=<true> cfg="target") } executable=<true> test=<true> )<line_sep> |
<import_from_stmt>bcc BPF<line_sep>bpf_source="""
BPF_HASH(cache, u64, u64);
int trace_start_time(struct pt_regs *ctx) {
u64 pid = bpf_get_current_pid_tgid();
u64 start_time_ns = bpf_ktime_get_ns();
cache.update(&pid, &start_time_ns);
return 0;
}
"""<line_sep>bpf_source<augadd>"""
int print_duration(struct pt_regs *ctx) {
u64 pid = bpf_get_current_pid_tgid();
u64 *start_time_ns = cache.lookup(&pid);
if (start_time_ns == 0) {
return 0;
}
u64 duration_ns = bpf_ktime_get_ns() - *start_time_ns;
bpf_trace_printk("Function call duration: %d\\n", duration_ns);
return 0;
}
"""<line_sep>bpf=BPF(text=bpf_source)<line_sep>bpf.attach_uprobe(name="./hello-bpf" sym="main.main" fn_name="trace_start_time")<line_sep>bpf.attach_uretprobe(name="./hello-bpf" sym="main.main" fn_name="print_duration")<line_sep>bpf.trace_print()<line_sep> |
<import_stmt>numpy<as>np<line_sep>a=np.arange(6)<line_sep>print(a)<line_sep># [0 1 2 3 4 5]
print(a.reshape(2 3))<line_sep># [[0 1 2]
# [3 4 5]]
print(a.reshape(-1 3))<line_sep># [[0 1 2]
# [3 4 5]]
print(a.reshape(2 -1))<line_sep># [[0 1 2]
# [3 4 5]]
# print(a.reshape(3, 4))
# ValueError: cannot reshape array of size 6 into shape (3,4)
# print(a.reshape(-1, 4))
# ValueError: cannot reshape array of size 6 into shape (4)
l=[0 1 2 3 4 5]<line_sep>print(np.array(l).reshape(-1 3).tolist())<line_sep># [[0, 1, 2], [3, 4, 5]]
print(np.array(l).reshape(3 -1).tolist())<line_sep># [[0, 1], [2, 3], [4, 5]]
|
<import_stmt>climetlab<as>cml<line_sep>url="https://www.cpc.ncep.noaa.gov/products/precip/CWlink/daily_ao_index/monthly.ao.index.b50.current.ascii"<line_sep>s=cml.load_source("url" url reader="fix_width_format")<line_sep>print(s.to_pandas())<line_sep> |
# Copyright 2018-2021 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>unittest<import_from_stmt>streamlit.errors StreamlitAPIException<import_from_stmt>streamlit.proto.ForwardMsg_pb2 ForwardMsg<import_from_stmt>streamlit.report_thread ReportContext<import_from_stmt>streamlit.state.session_state SessionState<import_from_stmt>streamlit.uploaded_file_manager UploadedFileManager<class_stmt>ReportContextTest(unittest.TestCase)<block_start><def_stmt>test_set_page_config_immutable self<block_start>"""st.set_page_config must be called at most once"""<line_sep>fake_enqueue=<lambda>msg:<none><line_sep>ctx=ReportContext("TestSessionID" fake_enqueue "" SessionState() UploadedFileManager() )<line_sep>msg=ForwardMsg()<line_sep>msg.page_config_changed.title="foo"<line_sep>ctx.enqueue(msg)<with_stmt>self.assertRaises(StreamlitAPIException)<block_start>ctx.enqueue(msg)<block_end><block_end><def_stmt>test_set_page_config_first self<block_start>"""st.set_page_config must be called before other st commands
when the script has been marked as started"""<line_sep>fake_enqueue=<lambda>msg:<none><line_sep>ctx=ReportContext("TestSessionID" fake_enqueue "" SessionState() UploadedFileManager() )<line_sep>ctx.on_script_start()<line_sep>markdown_msg=ForwardMsg()<line_sep>markdown_msg.delta.new_element.markdown.body="foo"<line_sep>msg=ForwardMsg()<line_sep>msg.page_config_changed.title="foo"<line_sep>ctx.enqueue(markdown_msg)<with_stmt>self.assertRaises(StreamlitAPIException)<block_start>ctx.enqueue(msg)<block_end><block_end><def_stmt>test_disallow_set_page_config_twice self<block_start>"""st.set_page_config cannot be called twice"""<line_sep>fake_enqueue=<lambda>msg:<none><line_sep>ctx=ReportContext("TestSessionID" fake_enqueue "" SessionState() UploadedFileManager() )<line_sep>ctx.on_script_start()<line_sep>msg=ForwardMsg()<line_sep>msg.page_config_changed.title="foo"<line_sep>ctx.enqueue(msg)<with_stmt>self.assertRaises(StreamlitAPIException)<block_start>same_msg=ForwardMsg()<line_sep>same_msg.page_config_changed.title="bar"<line_sep>ctx.enqueue(same_msg)<block_end><block_end><def_stmt>test_set_page_config_reset self<block_start>"""st.set_page_config should be allowed after a rerun"""<line_sep>fake_enqueue=<lambda>msg:<none><line_sep>ctx=ReportContext("TestSessionID" fake_enqueue "" SessionState() UploadedFileManager() )<line_sep>ctx.on_script_start()<line_sep>msg=ForwardMsg()<line_sep>msg.page_config_changed.title="foo"<line_sep>ctx.enqueue(msg)<line_sep>ctx.reset()<try_stmt><block_start>ctx.on_script_start()<line_sep>ctx.enqueue(msg)<block_end><except_stmt>StreamlitAPIException<block_start>self.fail("set_page_config should have succeeded after reset!")<block_end><block_end><block_end> |
<class_stmt>KeySpline(Freezable ISealable IFormattable)<block_start>"""
This class is used by a spline key frame to define animation progress.
KeySpline(controlPoint1: Point,controlPoint2: Point)
KeySpline()
KeySpline(x1: float,y1: float,x2: float,y2: float)
"""<def_stmt>CloneCore self *args<block_start>"""
CloneCore(self: KeySpline,sourceFreezable: Freezable)
Makes this instance a deep copy of the specified System.Windows.Media.Animation.KeySpline. When
copying dependency properties,this method copies resource references and data bindings (but
they might no longer resolve) but not animations or their current values.
sourceFreezable: The System.Windows.Media.Animation.KeySpline to clone.
"""<line_sep><pass><block_end><def_stmt>CloneCurrentValueCore self *args<block_start>"""
CloneCurrentValueCore(self: KeySpline,sourceFreezable: Freezable)
Makes this instance a modifiable deep copy of the specified
System.Windows.Media.Animation.KeySpline using current property values. Resource references,
data bindings,and animations are not copied,but their current values are.
sourceFreezable: The System.Windows.Media.Animation.KeySpline to clone.
"""<line_sep><pass><block_end><def_stmt>CreateInstance self *args<block_start>"""
CreateInstance(self: Freezable) -> Freezable
Initializes a new instance of the System.Windows.Freezable class.
Returns: The new instance.
"""<line_sep><pass><block_end><def_stmt>CreateInstanceCore self *args<block_start>"""
CreateInstanceCore(self: KeySpline) -> Freezable
Creates a new instance of System.Windows.Media.Animation.KeySpline.
Returns: A new instance of System.Windows.Media.Animation.KeySpline.
"""<line_sep><pass><block_end><def_stmt>FreezeCore self *args<block_start>"""
FreezeCore(self: Freezable,isChecking: bool) -> bool
Makes the System.Windows.Freezable object unmodifiable or tests whether it can be made
unmodifiable.
isChecking: true to return an indication of whether the object can be frozen (without actually freezing it);
false to actually freeze the object.
Returns: If isChecking is true,this method returns true if the System.Windows.Freezable can be made
unmodifiable,or false if it cannot be made unmodifiable. If isChecking is false,this method
returns true if the if the specified System.Windows.Freezable is now unmodifiable,or false if
it cannot be made unmodifiable.
"""<line_sep><pass><block_end><def_stmt>GetAsFrozenCore self *args<block_start>"""
GetAsFrozenCore(self: KeySpline,sourceFreezable: Freezable)
Makes this instance a clone of the specified System.Windows.Media.Animation.KeySpline object.
sourceFreezable: The System.Windows.Media.Animation.KeySpline object to clone.
"""<line_sep><pass><block_end><def_stmt>GetCurrentValueAsFrozenCore self *args<block_start>"""
GetCurrentValueAsFrozenCore(self: KeySpline,sourceFreezable: Freezable)
Makes this instance a frozen clone of the specified System.Windows.Media.Animation.KeySpline.
Resource references,data bindings,and animations are not copied,but their current values are.
sourceFreezable: The System.Windows.Media.Animation.KeySpline to copy and freeze.
"""<line_sep><pass><block_end><def_stmt>GetSplineProgress self linearProgress<block_start>"""
GetSplineProgress(self: KeySpline,linearProgress: float) -> float
Calculates spline progress from a supplied linear progress.
linearProgress: The linear progress to evaluate.
Returns: The calculated spline progress.
"""<line_sep><pass><block_end><def_stmt>OnChanged self *args<block_start>"""
OnChanged(self: KeySpline)
Called when the current System.Windows.Media.Animation.KeySpline object is modified.
"""<line_sep><pass><block_end><def_stmt>OnFreezablePropertyChanged self *args<block_start>"""
OnFreezablePropertyChanged(self: Freezable,oldValue: DependencyObject,newValue: DependencyObject,property: DependencyProperty)
This member supports the Windows Presentation Foundation (WPF) infrastructure and is not
intended to be used directly from your code.
oldValue: The previous value of the data member.
newValue: The current value of the data member.
property: The property that changed.
OnFreezablePropertyChanged(self: Freezable,oldValue: DependencyObject,newValue: DependencyObject)
Ensures that appropriate context pointers are established for a
System.Windows.DependencyObjectType data member that has just been set.
oldValue: The previous value of the data member.
newValue: The current value of the data member.
"""<line_sep><pass><block_end><def_stmt>OnPropertyChanged self *args<block_start>"""
OnPropertyChanged(self: Freezable,e: DependencyPropertyChangedEventArgs)
Overrides the System.Windows.DependencyObject implementation of
System.Windows.DependencyObject.OnPropertyChanged(System.Windows.DependencyPropertyChangedEventAr
gs) to also invoke any System.Windows.Freezable.Changed handlers in response to a changing
dependency property of type System.Windows.Freezable.
e: Event data that contains information about which property changed,and its old and new values.
"""<line_sep><pass><block_end><def_stmt>ReadPreamble self *args<block_start>"""
ReadPreamble(self: Freezable)
Ensures that the System.Windows.Freezable is being accessed from a valid thread. Inheritors of
System.Windows.Freezable must call this method at the beginning of any API that reads data
members that are not dependency properties.
"""<line_sep><pass><block_end><def_stmt>ShouldSerializeProperty self *args<block_start>"""
ShouldSerializeProperty(self: DependencyObject,dp: DependencyProperty) -> bool
Returns a value that indicates whether serialization processes should serialize the value for
the provided dependency property.
dp: The identifier for the dependency property that should be serialized.
Returns: true if the dependency property that is supplied should be value-serialized; otherwise,false.
"""<line_sep><pass><block_end><def_stmt>ToString self formatProvider=<none><block_start>"""
ToString(self: KeySpline,formatProvider: IFormatProvider) -> str
Creates a string representation of this System.Windows.Media.Animation.KeySpline based on the
supplied System.IFormatProvider.
formatProvider: The format provider to use. If provider is null,the current culture is used.
Returns: A string representation of this instance of System.Windows.Media.Animation.KeySpline.
ToString(self: KeySpline) -> str
Creates a string representation of this instance of System.Windows.Media.Animation.KeySpline
based on the current culture.
Returns: A string representation of this System.Windows.Media.Animation.KeySpline.
"""<line_sep><pass><block_end><def_stmt>WritePostscript self *args<block_start>"""
WritePostscript(self: Freezable)
Raises the System.Windows.Freezable.Changed event for the System.Windows.Freezable and invokes
its System.Windows.Freezable.OnChanged method. Classes that derive from System.Windows.Freezable
should call this method at the end of any API that modifies class members that are not stored as
dependency properties.
"""<line_sep><pass><block_end><def_stmt>WritePreamble self *args<block_start>"""
WritePreamble(self: Freezable)
Verifies that the System.Windows.Freezable is not frozen and that it is being accessed from a
valid threading context. System.Windows.Freezable inheritors should call this method at the
beginning of any API that writes to data members that are not dependency properties.
"""<line_sep><pass><block_end><def_stmt>__format__ self *args<block_start>""" __format__(formattable: IFormattable,format: str) -> str """<line_sep><pass><block_end><def_stmt>__init__ self *args<block_start>""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """<line_sep><pass><block_end>@staticmethod<def_stmt>__new__ self *__args<block_start>"""
__new__(cls: type)
__new__(cls: type,x1: float,y1: float,x2: float,y2: float)
__new__(cls: type,controlPoint1: Point,controlPoint2: Point)
"""<line_sep><pass><block_end><def_stmt>__str__ self *args<block_start><pass><block_end>ControlPoint1=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""The first control point used to define a Bezier curve that describes a System.Windows.Media.Animation.KeySpline.
Get: ControlPoint1(self: KeySpline) -> Point
Set: ControlPoint1(self: KeySpline)=value
"""<line_sep>ControlPoint2=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""The second control point used to define a Bezier curve that describes a System.Windows.Media.Animation.KeySpline.
Get: ControlPoint2(self: KeySpline) -> Point
Set: ControlPoint2(self: KeySpline)=value
"""<block_end> |
<import_from_future_stmt> print_function<line_sep>#!/usr/bin/env python
""" A little script illustrating how to use a (randomly initialized)
convolutional network to play a game of Pente. """<line_sep>__author__='<NAME>, <EMAIL>'<import_from_stmt>pybrain.rl.environments.twoplayergames.pente PenteGame<import_from_stmt>pybrain.rl.environments.twoplayergames.gomokuplayers.randomplayer RandomGomokuPlayer<import_from_stmt>pybrain.rl.environments.twoplayergames.gomokuplayers.moduledecision ModuleDecidingPlayer<import_from_stmt>pybrain.structure.networks.custom.convboard ConvolutionalBoardNetwork<line_sep>dim=7<line_sep>g=PenteGame((dim dim))<line_sep>print(g)<line_sep>n=ConvolutionalBoardNetwork(dim 5 3)<line_sep>p1=ModuleDecidingPlayer(n g)<line_sep>p2=RandomGomokuPlayer(g)<line_sep>p2.color=g.WHITE<line_sep>g.playToTheEnd(p1 p2)<line_sep>print(g)<line_sep> |
# from qmpy import *
<import_from_stmt>matplotlib rc<line_sep>rc("font" **{"family":"serif" "serif":["Century"]})<line_sep>params={"font.size":48}<import_stmt>matplotlib<line_sep>matplotlib.rcParams.update(params)<import_stmt>matplotlib.pylab<as>plt<import_stmt>pickle<import_stmt>sys<import_stmt>numpy<as>np<def_stmt>get_data <block_start><if_stmt><not>"new"<in>sys.argv<block_start><return>pickle.loads(open("dates.txt").read())<block_end>forms=Formation.objects.filter(fit="diff_refs" hull_distance__lte=0.025 entry__ntypes__gt=1 entry__icsd__coll_code__gt=0 ).select_related()<line_sep>dates=[]<for_stmt>form forms<block_start><try_stmt><block_start>struct=form.entry.input<line_sep>sdates=struct.similar.values_list("entry__reference__year" flat=<true>)<line_sep>dates.append(min(sdates))<block_end><except_stmt><block_start><continue><block_end><block_end>result=open("dates.txt" "w")<line_sep>result.write(pickle.dumps(dates))<line_sep>result.close()<block_end>dates=np.array(get_data())<line_sep>plt.hist(dates bins=max(dates)-min(dates) cumulative=<true>)<line_sep>plt.xlabel("Year")<line_sep>plt.ylabel("# of Stable Structures")<line_sep>plt.savefig("test.eps" bbox_inches="tight")<line_sep> |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Public API for tf.initializer namespace."""<import_from_stmt>tensorflow.python.ops init_ops<import_from_stmt>tensorflow.python.ops variables<as>_variables<line_sep># variable initializers
zeros=init_ops.zeros_initializer<line_sep>ones=init_ops.ones_initializer<line_sep>constant=init_ops.constant_initializer<line_sep>random_uniform=init_ops.random_uniform_initializer<line_sep>random_normal=init_ops.random_normal_initializer<line_sep>truncated_normal=init_ops.truncated_normal_initializer<line_sep>uniform_unit_scaling=init_ops.uniform_unit_scaling_initializer<line_sep>variance_scaling=init_ops.variance_scaling_initializer<line_sep>orthogonal=init_ops.orthogonal_initializer<line_sep>identity=init_ops.identity_initializer<line_sep># variable initializer ops
variables=_variables.variables_initializer<line_sep>global_variables=_variables.global_variables_initializer<line_sep>local_variables=_variables.local_variables_initializer<line_sep># Seal API.
<del_stmt>init_ops<del_stmt>_variables<line_sep> |
# coding:utf-8
<import_from_stmt>base TestBase<class_stmt>TestUriEncoding(TestBase)<block_start>"""Test for attributes \'uri_encoding\'"""<line_sep># for debug
# def tearDown(self):
# pass
uri_encoding_text="""
<!-- MarkdownTOC autolink="true" lowercase="only_ascii" {0} -->
<!-- /MarkdownTOC -->
# Camión, último
# España
# こんにちわ 世界
# Пример Example
# 一个标题
"""<line_sep># default: uri_encoding=true
<def_stmt>test_uri_encoding_default self<block_start>toc=self.init_update(self.uri_encoding_text.format(""))["toc"]<line_sep>self.assert_In("- [Camión, último](#cami%C3%B3n-%C3%BAltimo)" toc)<line_sep>self.assert_In("- [España](#espa%C3%B1a)" toc)<line_sep>self.assert_In("- [こんにちわ 世界](#%E3%81%93%E3%82%93%E3%81%AB%E3%81%A1%E3%82%8F-%E4%B8%96%E7%95%8C)" toc )<line_sep>self.assert_In("- [Пример Example](#%D0%9F%D1%80%D0%B8%D0%BC%D0%B5%D1%80-example)" toc)<line_sep>self.assert_In("- [一个标题](#%E4%B8%80%E4%B8%AA%E6%A0%87%E9%A2%98)" toc)<block_end><def_stmt>test_uri_encoding_true self<block_start>toc=self.init_update(self.uri_encoding_text.format("uri_encoding=true"))["toc"]<line_sep>self.assert_In("- [Camión, último](#cami%C3%B3n-%C3%BAltimo)" toc)<line_sep>self.assert_In("- [España](#espa%C3%B1a)" toc)<line_sep>self.assert_In("- [こんにちわ 世界](#%E3%81%93%E3%82%93%E3%81%AB%E3%81%A1%E3%82%8F-%E4%B8%96%E7%95%8C)" toc )<line_sep>self.assert_In("- [Пример Example](#%D0%9F%D1%80%D0%B8%D0%BC%D0%B5%D1%80-example)" toc)<line_sep>self.assert_In("- [一个标题](#%E4%B8%80%E4%B8%AA%E6%A0%87%E9%A2%98)" toc)<block_end><def_stmt>test_uri_encoding_false self<block_start>toc=self.init_update(self.uri_encoding_text.format("uri_encoding=false"))["toc"]<line_sep>self.assert_In("- [Camión, último](#camión-último)" toc)<line_sep>self.assert_In("- [España](#españa)" toc)<line_sep>self.assert_In("- [こんにちわ 世界](#こんにちわ-世界)" toc)<line_sep>self.assert_In("- [Пример Example](#Пример-example)" toc)<line_sep>self.assert_In("- [一个标题](#一个标题)" toc)<block_end><block_end> |
#! /usr/bin/env python
# $Id: test_get_parser_class.py 7504 2012-08-27 07:55:20Z grubert $
# Author: <NAME>
# Maintainer: <EMAIL>
# Copyright: This module has been placed in the public domain.
"""
test get_parser_class
"""<import_from_stmt>__init__ DocutilsTestSupport<import_from_stmt>docutils.parsers get_parser_class<class_stmt>GetParserClassTestCase(DocutilsTestSupport.StandardTestCase)<block_start><def_stmt>test_registered_parser self<block_start>rdr=get_parser_class('rst')<line_sep># raises ImportError on failure
<block_end><def_stmt>test_bogus_parser self<block_start>self.assertRaises(ImportError get_parser_class 'nope')<block_end><def_stmt>test_local_parser self# requires local-parser.py in test directory (testroot)
<block_start>wr=get_parser_class('local-parser')<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start><import_stmt>unittest<line_sep>unittest.main()<block_end> |
<import_from_stmt>datetime datetime<import_from_stmt>elasticsearch_dsl *<import_from_stmt>elasticsearch_dsl.connections connections<line_sep># Define a default Elasticsearch client
connection=connections.create_connection(hosts=['es' 'localhost'])<line_sep>path_analyzer=analysis.analyzer('path' tokenizer='path_hierarchy')<line_sep>entitlement_key_analyzer=analysis.analyzer('entitlement_key' tokenizer='char_group' tokenize_on_chars=['-' '.'])<class_stmt>Import(InnerDoc)<block_start>name=Text()<line_sep>demname=Text()<line_sep>flagname=Text()<line_sep>ordinal=Long()<line_sep>bind=Keyword()<line_sep>size=Long()<line_sep>type=Keyword()<line_sep>vaddr=Long()<line_sep>paddr=Long()<block_end><class_stmt>Export(InnerDoc)<block_start>name=Text()<line_sep>ordinal=Long()<line_sep>bind=Keyword()<line_sep>type=Keyword()<line_sep>plt=Long()<block_end><class_stmt>Segment(InnerDoc)<block_start>name=Text()<line_sep>flags=Text()<block_end><class_stmt>Executable(Document)<block_start>raw_path=Text(analyzer=path_analyzer)<line_sep>path=Text()<line_sep>strings=Text()<line_sep>info=Object()<line_sep>libraries=Text(analyzer=path_analyzer multi=<true>)<line_sep>imports=Nested(Import)<line_sep>exports=Nested(Export)<line_sep>segments=Nested(Segment)<class_stmt>Index<block_start>name='executable-*'<block_end><block_end><class_stmt>Method(InnerDoc)<block_start>name=Text()<line_sep>addr=Long()<block_end><class_stmt>Field(InnerDoc)<block_start>name=Text()<line_sep>addr=Long()<block_end><class_stmt>Clazz(InnerDoc)<block_start>classname=Text()<line_sep>methods=Nested(Method)<line_sep>fields=Nested(Field)<line_sep>index=Long()<line_sep>addr=Long()<block_end><class_stmt>RPath(InnerDoc)<block_start>prefix=Keyword()<line_sep>path=Text(analyzer=path_analyzer)<block_end><class_stmt>MachO(Executable)<block_start>classdump=Text()<line_sep>classes=Nested(Clazz)<line_sep>rpaths=Nested(RPath)<line_sep># code signature
ent=Text()# json
ent_str=Text()# xml
ent_keys=Text(analyzer=entitlement_key_analyzer multi=<true> fields={'raw':Keyword()})<line_sep>cs_flags=Long()<line_sep>cs_flags_str=Keyword(multi=<true>)<line_sep>lv=Boolean()<line_sep>signed=Boolean()<line_sep>apple=Boolean()<line_sep>codesign=Text()<line_sep>info_plist=Text()# json
info_plist_str=Text()# xml
<class_stmt>Index<block_start>name='macho-*'<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>i=Index('macho-10.14.2')<line_sep>i.delete()<line_sep># i.save()
index=MachO._index.as_template('macho-test')<line_sep>index.save()<block_end> |
<import_from_stmt>sqlalchemy.orm sessionmaker scoped_session<import_from_stmt>lib.model.database.Database Database<import_from_stmt>.lib.Cipher Cipher<import_stmt>logging<import_stmt>os<import_stmt>binascii<import_stmt>base64<import_stmt>filetype<line_sep>current_path=os.path.dirname(os.path.realpath(__file__))<def_stmt>onload <block_start>logging.debug("Cipher:loaded()")<block_end><def_stmt>onunload <block_start>logging.debug("Cipher:unloaded()")<block_end><def_stmt>parse module message data<block_start><if_stmt>(message['plugin']<eq>"cipher")# Create a thread local session
<block_start>engine=Database.get_engine()<line_sep>session_factory=sessionmaker(bind=engine)<line_sep>Session=scoped_session(session_factory)<line_sep>session=Session()<line_sep>Session.remove()<line_sep># Fetch application for this session ( could not use self.application
# because the usage must be thread local )
key_value=binascii.unhexlify(message["key"]).hex()<line_sep>iv=binascii.unhexlify(message["iv"]).hex()<line_sep># TODO have link from base64
kind_input=filetype.guess(binascii.unhexlify(message["arg"]))<line_sep>kind_result=filetype.guess(binascii.unhexlify(message["result"]))<line_sep># print('File extension: %s' % kind.extension)
# print('File MIME type: %s' % kind.mime)
<if_stmt>message["opmode"]<eq>1<block_start>opmode_info="ENCRYPT_MODE"<try_stmt><block_start>input_value=binascii.unhexlify(message["arg"]).decode('utf8')<block_end><except_stmt><block_start>input_value=base64.b64encode(binascii.unhexlify(message["arg"])).decode('utf8')<block_end>output_value=binascii.unhexlify(message["result"]).hex()<block_end><elif_stmt>message["opmode"]<eq>2<block_start>opmode_info="DECRYPT_MODE"<try_stmt><block_start>output_value=binascii.unhexlify(message["result"]).decode('utf8')<block_end><except_stmt><block_start>output_value=base64.b64encode(binascii.unhexlify(message["result"])).decode('utf8')<block_end>input_value=binascii.unhexlify(message["arg"]).hex()<block_end>cipher=Cipher(message["algo"] key_value iv opmode_info input_value output_value message["stack"])<line_sep>cipher.application_id=module.application.id<line_sep>logging.debug(repr(cipher))<line_sep>query=session.query(Cipher).filter(Cipher.application_id<eq>cipher.application_id).filter(Cipher.algorithm<eq>cipher.algorithm).filter(Cipher.key<eq>cipher.key).filter(Cipher.iv<eq>cipher.iv).filter(Cipher.opmode<eq>cipher.opmode).filter(Cipher.input_value<eq>cipher.input_value).filter(Cipher.output_value<eq>cipher.output_value)<line_sep>resultQuery=query.all()<line_sep># Prevent duplicates in DB
<if_stmt>len(resultQuery)<eq>0<block_start>session.add(cipher)<line_sep>session.commit()<block_end><block_end><block_end><def_stmt>get_frida_script <block_start>logging.debug("Cipher:get_frida_script()")<with_stmt>open(f"{current_path}/frida.js")<as>f<block_start><return>("Cipher" f.read())<block_end><block_end> |
""" Cisco_IOS_XR_manageability_object_tracking_datatypes
This module contains a collection of generally useful
derived YANG data types.
Copyright (c) 2013\-2018 by Cisco Systems, Inc.
All rights reserved.
"""<import_stmt>sys<import_from_stmt>collections OrderedDict<import_from_stmt>ydk.types Entity<as>_Entity_<import_from_stmt>ydk.types EntityPath Identity Enum YType YLeaf YLeafList YList LeafDataList Bits Empty Decimal64<import_from_stmt>ydk.types Entity EntityPath Identity Enum YType YLeaf YLeafList YList LeafDataList Bits Empty Decimal64<import_from_stmt>ydk.filters YFilter<import_from_stmt>ydk.errors YError YModelError<import_from_stmt>ydk.errors.error_handler handle_type_error<as>_handle_type_error<class_stmt>ObjectTrackingBooleanSign(Enum)<block_start>"""
ObjectTrackingBooleanSign (Enum Class)
Object tracking boolean sign
.. data:: without_not = 0
Object without not
.. data:: with_not = 1
Object with not
"""<line_sep>without_not=Enum.YLeaf(0 "without-not")<line_sep>with_not=Enum.YLeaf(1 "with-not")<line_sep>@staticmethod<def_stmt>_meta_info <block_start><import_from_stmt>ydk.models.cisco_ios_xr._meta _Cisco_IOS_XR_manageability_object_tracking_datatypes<as>meta<line_sep><return>meta._meta_table['ObjectTrackingBooleanSign']<block_end><block_end> |
# encoding: utf8
<import_stmt>support<import_stmt>pygubu<import_stmt>os<import_stmt>sys<import_stmt>unittest<try_stmt><block_start><import_stmt>tkinter<as>tk<import_stmt>tkinter.ttk<as>ttk<block_end><except_stmt><block_start><import_stmt>Tkinter<as>tk<import_stmt>ttk<block_end>pygubu_basedir=os.path.abspath(os.path.dirname(os.path.dirname(os.path.realpath(sys.argv[0]))))<if_stmt>pygubu_basedir<not><in>sys.path<block_start>sys.path.insert(0 pygubu_basedir)<block_end><class_stmt>TestOptionMenu(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>support.root_deiconify()<line_sep>xmldata='test_optionmenu.ui'<line_sep>self.builder=builder=pygubu.Builder()<line_sep>builder.add_from_file(xmldata)<line_sep>self.widget=builder.get_object('mainwindow')<block_end><def_stmt>tearDown self<block_start>support.root_withdraw()<block_end><def_stmt>test_class self<block_start>optionmenu=self.builder.get_object('optionmenu1')<line_sep>self.assertIsInstance(optionmenu tk.OptionMenu)<line_sep>self.widget.destroy()<block_end><def_stmt>test_no_variable_defined self<block_start>optionmenu2=self.builder.get_object('optionmenu2')<line_sep>self.assertIsInstance(optionmenu2 tk.OptionMenu)<line_sep>self.widget.destroy()<block_end><block_end> |
<import_from_stmt>.callback Callback<class_stmt>SchedulerFactory<block_start>""" Factory class for various schedulers """<def_stmt>instantiate self optimizer last_epoch=-1<arrow>Callback<block_start><raise>NotImplementedError<block_end><block_end> |
<import_stmt>FWCore.ParameterSet.Config<as>cms<import_from_stmt>DQM.TrackingMonitor.packedCandidateTrackValidator_cfi *<line_sep>packedCandidateTrackValidatorLostTracks=packedCandidateTrackValidator.clone(trackToPackedCandidateAssociation="lostTracks" rootFolder="Tracking/PackedCandidate/lostTracks")<line_sep>tracksDQMMiniAOD=cms.Sequence(packedCandidateTrackValidator+packedCandidateTrackValidatorLostTracks)<line_sep> |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
<import_stmt>logging<import_from_stmt>datetime datetime<import_from_stmt>distutils.version StrictVersion<import_from_stmt>typing Any Dict Optional Type<import_from_stmt>superset.db_engine_specs.base BaseEngineSpec<import_from_stmt>superset.db_engine_specs.exceptions SupersetDBAPIDatabaseError SupersetDBAPIOperationalError SupersetDBAPIProgrammingError <import_from_stmt>superset.utils core<as>utils<line_sep>logger=logging.getLogger()<class_stmt>ElasticSearchEngineSpec(BaseEngineSpec)# pylint: disable=abstract-method
<block_start>engine="elasticsearch"<line_sep>engine_name="ElasticSearch (SQL API)"<line_sep>time_groupby_inline=<true><line_sep>time_secondary_columns=<true><line_sep>allows_joins=<false><line_sep>allows_subqueries=<true><line_sep>allows_sql_comments=<false><line_sep>_time_grain_expressions={<none>:"{col}" "PT1S":"HISTOGRAM({col}, INTERVAL 1 SECOND)" "PT1M":"HISTOGRAM({col}, INTERVAL 1 MINUTE)" "PT1H":"HISTOGRAM({col}, INTERVAL 1 HOUR)" "P1D":"HISTOGRAM({col}, INTERVAL 1 DAY)" "P1M":"HISTOGRAM({col}, INTERVAL 1 MONTH)" "P1Y":"HISTOGRAM({col}, INTERVAL 1 YEAR)" }<line_sep>type_code_map:Dict[int str]={}# loaded from get_datatype only if needed
@classmethod<def_stmt>get_dbapi_exception_mapping cls<arrow>Dict[Type[Exception] Type[Exception]]# pylint: disable=import-error,import-outside-toplevel
<block_start><import_stmt>es.exceptions<as>es_exceptions<line_sep><return>{es_exceptions.DatabaseError:SupersetDBAPIDatabaseError es_exceptions.OperationalError:SupersetDBAPIOperationalError es_exceptions.ProgrammingError:SupersetDBAPIProgrammingError }<block_end>@classmethod<def_stmt>convert_dttm cls target_type:str dttm:datetime db_extra:Optional[Dict[str Any]]=<none><arrow>Optional[str]<block_start>db_extra=db_extra<or>{}<if_stmt>target_type.upper()<eq>utils.TemporalType.DATETIME<block_start>es_version=db_extra.get("version")<line_sep># The elasticsearch CAST function does not take effect for the time zone
# setting. In elasticsearch7.8 and above, we can use the DATETIME_PARSE
# function to solve this problem.
supports_dttm_parse=<false><try_stmt><block_start><if_stmt>es_version<block_start>supports_dttm_parse=StrictVersion(es_version)<ge>StrictVersion("7.8")<block_end><block_end><except_stmt>Exception<as>ex# pylint: disable=broad-except
<block_start>logger.error("Unexpected error while convert es_version" exc_info=<true>)<line_sep>logger.exception(ex)<block_end><if_stmt>supports_dttm_parse<block_start>datetime_formatted=dttm.isoformat(sep=" " timespec="seconds")<line_sep><return>(f"""DATETIME_PARSE('{datetime_formatted}', 'yyyy-MM-dd HH:mm:ss')""")<block_end><return>f"""CAST('{dttm.isoformat(timespec="seconds")}' AS DATETIME)"""<block_end><return><none><block_end><block_end><class_stmt>OpenDistroEngineSpec(BaseEngineSpec)# pylint: disable=abstract-method
<block_start>time_groupby_inline=<true><line_sep>time_secondary_columns=<true><line_sep>allows_joins=<false><line_sep>allows_subqueries=<true><line_sep>allows_sql_comments=<false><line_sep>_time_grain_expressions={<none>:"{col}" "PT1S":"date_format({col}, 'yyyy-MM-dd HH:mm:ss.000')" "PT1M":"date_format({col}, 'yyyy-MM-dd HH:mm:00.000')" "PT1H":"date_format({col}, 'yyyy-MM-dd HH:00:00.000')" "P1D":"date_format({col}, 'yyyy-MM-dd 00:00:00.000')" "P1M":"date_format({col}, 'yyyy-MM-01 00:00:00.000')" "P1Y":"date_format({col}, 'yyyy-01-01 00:00:00.000')" }<line_sep>engine="odelasticsearch"<line_sep>engine_name="ElasticSearch (OpenDistro SQL)"<line_sep>@classmethod<def_stmt>convert_dttm cls target_type:str dttm:datetime db_extra:Optional[Dict[str Any]]=<none><arrow>Optional[str]<block_start><if_stmt>target_type.upper()<eq>utils.TemporalType.DATETIME<block_start><return>f"""'{dttm.isoformat(timespec="seconds")}'"""<block_end><return><none><block_end>@staticmethod<def_stmt>_mutate_label label:str<arrow>str<block_start><return>label.replace("." "_")<block_end><block_end> |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A linear layer for output projection.
This is based on code in tf.contrib.seq2seq.
"""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_stmt>language.labs.exemplar_decoding.models.common dimension_value<import_stmt>tensorflow.compat.v1<as>tf<line_sep>_BIAS_VARIABLE_NAME="bias"<line_sep>_WEIGHTS_VARIABLE_NAME="kernel"<line_sep>__all__=["Linear" "HyperDense" ]<class_stmt>Linear(object)<block_start>"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch, n, Tensors.
output_size: int, second dimension of weight variable.
weights: (optional) a specified tensor.
dtype: data type for variables.
build_bias: boolean, whether to build a bias variable.
bias_initializer: starting value to initialize the bias
(default is all zeros).
kernel_initializer: starting value to initialize the weight.
Raises:
ValueError: if inputs_shape is wrong.
"""<def_stmt>__init__ self args output_size build_bias weights=<none> weight_initializer=<none> bias_initializer=<none><block_start>self._build_bias=build_bias<if_stmt>args<is><none><or>(tf.contrib.framework.nest.is_sequence(args)<and><not>args)<block_start><raise>ValueError("`args` must be specified")<block_end><if_stmt><not>tf.contrib.framework.nest.is_sequence(args)<block_start>args=[args]<line_sep>self._is_sequence=<false><block_end><else_stmt><block_start>self._is_sequence=<true><block_end># Calculate the total size of arguments on dimension 1.
total_arg_size=0<line_sep>shapes=[a.get_shape()<for>a args]<for_stmt>shape shapes<block_start><if_stmt>shape.ndims<ne>2<block_start><raise>ValueError("linear is expecting 2D arguments: %s"%shapes)<block_end><if_stmt>shape[1].value<is><none><block_start><raise>ValueError("linear expects shape[1] to be provided for shape %s, "<concat>"but saw %s"%(shape shape[1]))<block_end><else_stmt><block_start>total_arg_size<augadd>shape[1].value<block_end><block_end>dtype=[a.dtype<for>a args][0]<line_sep>scope=tf.get_variable_scope()<with_stmt>tf.variable_scope(scope)<as>outer_scope<block_start><if_stmt>weights<is><none><block_start>self._weights=tf.get_variable(_WEIGHTS_VARIABLE_NAME [total_arg_size output_size] dtype=dtype initializer=weight_initializer)<block_end><else_stmt><block_start>self._weights=weights<block_end><if_stmt>build_bias<block_start><with_stmt>tf.variable_scope(outer_scope)<as>inner_scope<block_start>inner_scope.set_partitioner(<none>)<if_stmt>bias_initializer<is><none><block_start>bias_initializer=tf.constant_initializer(0.0 dtype=dtype)<block_end>self._biases=tf.get_variable(_BIAS_VARIABLE_NAME [output_size] dtype=dtype initializer=bias_initializer)<block_end><block_end><block_end><block_end><def_stmt>__call__ self args<block_start><if_stmt><not>self._is_sequence<block_start>args=[args]<block_end><if_stmt>len(args)<eq>1<block_start>res=tf.matmul(args[0] self._weights)<block_end><else_stmt># Explicitly creating a one for a minor performance improvement.
<block_start>one=tf.constant(1 dtype=tf.int32)<line_sep>res=tf.matmul(tf.concat(args one) self._weights)<block_end><if_stmt>self._build_bias<block_start>res=tf.nn.bias_add(res self._biases)<block_end><return>res<block_end><block_end><class_stmt>HyperDense(tf.keras.layers.Layer)<block_start>"""Just your regular densely-connected NN layer.
`Dense` implements the operation:
`output = activation(dot(input, kernel) + bias)`
where `activation` is the element-wise activation function
passed as the `activation` argument, `kernel` is a weights matrix
created by the layer, and `bias` is a bias vector created by the layer
(only applicable if `use_bias` is `True`).
Note: if the input to the layer has a rank greater than 2, then
it is flattened prior to the initial dot product with `kernel`.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
nD tensor with shape: `(batch_size, ..., input_dim)`.
The most common situation would be
a 2D input with shape `(batch_size, input_dim)`.
Output shape:
nD tensor with shape: `(batch_size, ..., units)`.
For instance, for a 2D input with shape `(batch_size, input_dim)`,
the output would have shape `(batch_size, units)`.
"""<def_stmt>__init__ self units mem_input hps use_beam=<false> activation=<none> use_bias=<true> kernel_initializer="glorot_uniform" bias_initializer="zeros" kernel_regularizer=<none> bias_regularizer=<none> activity_regularizer=<none> kernel_constraint=<none> bias_constraint=<none> **kwargs<block_start><if_stmt>"input_shape"<not><in>kwargs<and>"input_dim"<in>kwargs<block_start>kwargs["input_shape"]=(kwargs.pop("input_dim") )<block_end>super(HyperDense self).__init__(activity_regularizer=tf.keras.regularizers.get(activity_regularizer) **kwargs)<line_sep>self.units=int(units)<line_sep>self.activation=tf.keras.activations.get(activation)<line_sep>self.use_bias=use_bias<line_sep>self.kernel_initializer=tf.keras.initializers.get(kernel_initializer)<line_sep>self.bias_initializer=tf.keras.initializers.get(bias_initializer)<line_sep>self.kernel_regularizer=tf.keras.regularizers.get(kernel_regularizer)<line_sep>self.bias_regularizer=tf.keras.regularizers.get(bias_regularizer)<line_sep>self.kernel_constraint=tf.keras.constraints.get(kernel_constraint)<line_sep>self.bias_constraint=tf.keras.constraints.get(bias_constraint)<line_sep>self._mem_input=mem_input<line_sep>self.supports_masking=<true><line_sep>self.input_spec=tf.keras.layers.InputSpec(min_ndim=2)<line_sep>self._can_use_graph_functions=<true><line_sep>self._decoder_dim=hps.decoder_dim<line_sep>self._rank=hps.rank<line_sep>self._tau=hps.tau<line_sep>self._sigma_norm=hps.sigma_norm<line_sep>self._beam_width=hps.beam_width<line_sep>self._use_beam=use_beam<block_end><def_stmt>build self input_shape<block_start>input_shape=tf.TensorShape(input_shape)<if_stmt>dimension_value(input_shape[-1])<is><none><block_start><raise>ValueError("The last dimension of the inputs to `Dense` "<concat>"should be defined. Found `None`.")<block_end>last_dim=dimension_value(input_shape[-1])<line_sep>self.input_spec=tf.keras.layers.InputSpec(min_ndim=2 axes={-1:last_dim})<line_sep>self._c=tf.get_variable("c" [self._decoder_dim self._rank] initializer=tf.contrib.layers.xavier_initializer() regularizer=self.kernel_regularizer constraint=self.kernel_constraint dtype=self.dtype trainable=<true>)<line_sep>sigma=tf.matmul(self._mem_input self._c)<if_stmt>self._sigma_norm<g>0.<block_start>sigma=tf.nn.l2_normalize(sigma axis=1)<times>self._sigma_norm<block_end><elif_stmt>self._sigma_norm<eq>-1.<block_start>sigma=tf.nn.softmax(sigma/self._tau axis=1)<block_end>sigma_diag=tf.matrix_diag(sigma)<line_sep>self._u=tf.get_variable("u" [last_dim self._rank] initializer=tf.contrib.layers.xavier_initializer() regularizer=self.kernel_regularizer constraint=self.kernel_constraint dtype=self.dtype trainable=<true>)<line_sep>self._v=tf.get_variable("v" [self._rank self.units] initializer=tf.contrib.layers.xavier_initializer() regularizer=self.kernel_regularizer constraint=self.kernel_constraint dtype=self.dtype trainable=<true>)<line_sep>self.kernel=tf.einsum("ij,ajk,kl->ail" self._u sigma_diag self._v)<if_stmt>self._use_beam<and>self._beam_width<block_start>self.kernel=tf.contrib.seq2seq.tile_batch(self.kernel multiplier=self._beam_width)<block_end><if_stmt>self.use_bias<block_start>self._b=self.add_weight("b" shape=[self.units self._rank] initializer=self.bias_initializer regularizer=self.bias_regularizer constraint=self.bias_constraint dtype=self.dtype trainable=<true>)<line_sep>self.bias=tf.einsum("ij,aj->ai" self._b sigma)<if_stmt>self._use_beam<and>self._beam_width<block_start>self.bias=tf.contrib.seq2seq.tile_batch(self.bias multiplier=self._beam_width)<block_end><block_end><else_stmt><block_start>self.bias=<none><block_end>self.built=<true><block_end><def_stmt>call self inputs<block_start>inputs=tf.convert_to_tensor(inputs)<line_sep>rank=tf.rank(inputs)<if_stmt>rank<g>2<block_start>outputs=tf.einsum("aki,aij->akj" inputs self.kernel)<line_sep># Reshape the output back to the original ndim of the input.
<if_stmt><not>tf.executing_eagerly()<block_start>shape=inputs.get_shape().as_list()<line_sep>output_shape=shape[:-1]+[self.units]<line_sep>outputs.set_shape(output_shape)<block_end><block_end><else_stmt><block_start><assert_stmt><false><line_sep># outputs = tf.mat_mul(inputs, self.kernel)
<block_end><if_stmt>self.use_bias<block_start>outputs=tf.nn.bias_add(outputs self.bias)<block_end><if_stmt>self.activation<is><not><none><block_start><return>self.activation(outputs)# pylint: disable=not-callable
<block_end><return>outputs<block_end><def_stmt>compute_output_shape self input_shape<block_start>input_shape=tf.TensorShape(input_shape)<line_sep>input_shape=input_shape.with_rank_at_least(2)<if_stmt>dimension_value(input_shape[-1])<is><none><block_start><raise>ValueError("The innermost dimension of input_shape must be defined, but saw: %s"%input_shape)<block_end><return>input_shape[:-1].concatenate(self.units)<block_end><def_stmt>get_config self<block_start>config={"units":self.units "activation":tf.keras.activations.serialize(self.activation) "use_bias":self.use_bias "kernel_initializer":tf.keras.initializers.serialize(self.kernel_initializer) "bias_initializer":tf.keras.initializers.serialize(self.bias_initializer) "kernel_regularizer":tf.keras.regularizers.serialize(self.kernel_regularizer) "bias_regularizer":tf.keras.regularizers.serialize(self.bias_regularizer) "activity_regularizer":tf.keras.regularizers.serialize(self.activity_regularizer) "kernel_constraint":tf.keras.constraints.serialize(self.kernel_constraint) "bias_constraint":tf.keras.constraints.serialize(self.bias_constraint)}<line_sep>base_config=super(HyperDense self).get_config()<line_sep><return>dict(list(base_config.items())+list(config.items()))<block_end><block_end> |
<import_from_stmt>ignite.engine Events<import_from_stmt>ignite.contrib.handlers.tqdm_logger ProgressBar<import_from_stmt>ignite.handlers Checkpoint DiskSaver global_step_from_engine<import_from_stmt>logger.base.base_logger BaseLogger<import_from_stmt>logger.base.utils *<import_from_stmt>logger.neptune.neptune_utils *<import_from_stmt>ignite.contrib.handlers.neptune_logger *<import_stmt>numpy<as>np<import_stmt>os<class_stmt>MyNeptuneLogger(BaseLogger)<block_start><def_stmt>__init__ self log_every=5 **kwargs<block_start>self.writer=NeptuneLogger(api_token=os.getenv('NEPTUNE_API_TOKEN') project_name=kwargs["project_name"] name=kwargs["name"] params=kwargs["params"] tags=kwargs["tags"])<line_sep>super().__init__(log_every=log_every)<block_end><def_stmt>_add_train_events self model=<none> optimizer=<none> scheduler=<none> metrics={}# self.writer.attach(self.trainer,
# log_handler=WeightsScalarHandler(model),
# event_name=Events.ITERATION_COMPLETED(every=100))
# self.writer.attach(self.trainer,
# log_handler=GradsScalarHandler(model),
# event_name=Events.ITERATION_COMPLETED(every=100))
<block_start>iteration_events=[training_iteration(self.writer) lr_iteration(optimizer self.writer)]<line_sep>completion_events=[train_metrics_completion(self.writer)]<line_sep>self._add_train_handlers(**{"iteration_events":iteration_events "completion_events":completion_events})<block_end><def_stmt>_add_eval_events self model=<none> optimizer=<none> scheduler=<none> metrics={}<block_start>iteration_events=[]<line_sep>completion_events=[validation_metrics_completion(self.trainer self.writer) ]<line_sep>self._add_evaluation_handlers(**{"iteration_events":iteration_events "completion_events":completion_events})<block_end><def_stmt>_end_of_training self<block_start>self.writer.experiment.stop()<block_end><block_end> |
<import_stmt>numpy<as>np<import_from_stmt>tensorflow.keras Sequential<import_from_stmt>tensorflow.keras.layers Dense<import_from_stmt>tcn TCN<line_sep># if you increase the sequence length make sure the receptive field of the TCN is big enough.
MAX_TIME_STEP=30<line_sep>"""
Input: sequence of length 7
Input: sequence of length 25
Input: sequence of length 29
Input: sequence of length 21
Input: sequence of length 20
Input: sequence of length 13
Input: sequence of length 9
Input: sequence of length 7
Input: sequence of length 4
Input: sequence of length 14
Input: sequence of length 10
Input: sequence of length 11
...
"""<def_stmt>get_x_y max_time_steps<block_start><for_stmt>k range(int(1e9))<block_start>time_steps=np.random.choice(range(1 max_time_steps) size=1)[0]<if_stmt>k%2<eq>0<block_start>x_train=np.expand_dims([np.insert(np.zeros(shape=(time_steps 1)) 0 1)] axis=-1)<line_sep>y_train=[1]<block_end><else_stmt><block_start>x_train=np.array([np.zeros(shape=(time_steps 1))])<line_sep>y_train=[0]<block_end><if_stmt>k%100<eq>0<block_start>print(f'({k}) Input: sequence of length {time_steps}.')<block_end><yield>x_train np.expand_dims(y_train axis=-1)<block_end><block_end>m=Sequential([TCN(input_shape=(<none> 1)) Dense(1 activation='sigmoid')])<line_sep>m.compile(optimizer='adam' loss='binary_crossentropy' metrics=['accuracy'])<line_sep>gen=get_x_y(max_time_steps=MAX_TIME_STEP)<line_sep>m.fit(gen epochs=1 steps_per_epoch=1000 max_queue_size=1 verbose=2)<line_sep> |
<import_from_stmt>datetime datetime<import_from_stmt>os.path dirname join<import_stmt>pytest<import_from_stmt>city_scrapers_core.utils file_response<import_from_stmt>freezegun freeze_time<import_from_stmt>city_scrapers.spiders.chi_police_retirement ChiPoliceRetirementSpider<line_sep>test_response=file_response(join(dirname(__file__) "files" "chi_police_retirement.html") url="http://www.chipabf.org/ChicagoPolicePension/MonthlyMeetings.html" )<line_sep>spider=ChiPoliceRetirementSpider()<line_sep>freezer=freeze_time("2019-05-05")<line_sep>freezer.start()<line_sep>parsed_items=[item<for>item spider.parse(test_response)]<line_sep>freezer.stop()<def_stmt>test_title <block_start><assert_stmt>parsed_items[0]["title"]<eq>"Retirement Board"<block_end><def_stmt>test_description <block_start><assert_stmt>parsed_items[0]["description"]<eq>""<block_end><def_stmt>test_start <block_start><assert_stmt>parsed_items[0]["start"]<eq>datetime(2019 1 31 9 0)<block_end><def_stmt>test_id <block_start><assert_stmt>(parsed_items[0]["id"]<eq>"chi_police_retirement/201901310900/x/"<concat>"retirement_board")<block_end><def_stmt>test_status <block_start><assert_stmt>parsed_items[0]["status"]<eq>"passed"<block_end><def_stmt>test_location <block_start><assert_stmt>parsed_items[0]["location"]<eq>{"name":"Policemen's Annuity and Benefit Fund" "address":"221 North LaSalle Street, Suite 1626, Chicago, "<concat>"Illinois 60601-1203" }<block_end><def_stmt>test_source <block_start><assert_stmt>(parsed_items[0]["source"]<eq>"http://www.chipabf.org/ChicagoPolicePension/MonthlyMeetings.html")<block_end><def_stmt>test_links <block_start><assert_stmt>parsed_items[0]["links"]<eq>[{"href":"http://www.chipabf.org/ChicagoPolicePension/PDF/Agenda/2019/2019AGENDA01.pdf" # noqa
"title":"Agenda" } {"href":"http://www.chipabf.org/ChicagoPolicePension/PDF/Minutes/2019/2019MINUTES01.pdf" # noqa
"title":"Minutes" } ]<block_end><def_stmt>test_classification <block_start><assert_stmt>parsed_items[0]["classification"]<eq>"Board"<block_end>@pytest.mark.parametrize("item" parsed_items)<def_stmt>test_all_day item<block_start><assert_stmt>item["all_day"]<is><false><block_end> |
<import_from_stmt>rpython.jit.metainterp.test.support LLJitMixin noConst<import_from_stmt>rpython.rlib jit<class_stmt>CallTest(object)<block_start><def_stmt>test_indirect_call self<block_start>@jit.dont_look_inside<def_stmt>f1 x<block_start><return>x+1<block_end>@jit.dont_look_inside<def_stmt>f2 x<block_start><return>x+2<block_end>@jit.dont_look_inside<def_stmt>choice i<block_start><if_stmt>i<block_start><return>f1<block_end><return>f2<block_end><def_stmt>f i<block_start>func=choice(i)<line_sep><return>func(i)<block_end>res=self.interp_operations(f [3])<assert_stmt>res<eq>f(3)<block_end><def_stmt>test_cond_call self<block_start><def_stmt>f l n<block_start>l.append(n)<block_end><def_stmt>main n<block_start>l=[]<line_sep>jit.conditional_call(n<eq>10 f l n)<line_sep><return>len(l)<block_end><assert_stmt>self.interp_operations(main [10])<eq>1<assert_stmt>self.interp_operations(main [5])<eq>0<block_end><def_stmt>test_cond_call_disappears self<block_start>driver=jit.JitDriver(greens=[] reds=['n'])<def_stmt>f n<block_start><raise>ValueError<block_end><def_stmt>main n<block_start><while_stmt>n<g>0<block_start>driver.jit_merge_point(n=n)<line_sep>jit.conditional_call(<false> f 10)<line_sep>n<augsub>1<block_end><return>42<block_end><assert_stmt>self.meta_interp(main [10])<eq>42<line_sep>self.check_resops(guard_no_exception=0)<block_end><def_stmt>test_cond_call_i self<block_start><def_stmt>f n<block_start><return>n<times>200<block_end><def_stmt>main n m<block_start><return>jit.conditional_call_elidable(n f m)<block_end><assert_stmt>self.interp_operations(main [0 10])<eq>2000<assert_stmt>self.interp_operations(main [15 42])<eq>15<block_end><def_stmt>test_cond_call_r self<block_start><def_stmt>f n<block_start><return>[n]<block_end><def_stmt>main n<block_start><if_stmt>n<eq>10<block_start>l=[]<block_end><else_stmt><block_start>l=<none><block_end>l=jit.conditional_call_elidable(l f n)<line_sep><return>len(l)<block_end><assert_stmt>main(10)<eq>0<assert_stmt>main(5)<eq>1<assert_stmt>self.interp_operations(main [10])<eq>0<assert_stmt>self.interp_operations(main [5])<eq>1<block_end><def_stmt>test_cond_call_constant_in_pyjitpl self<block_start><def_stmt>f a b<block_start><return>a+b<block_end><def_stmt>main n# this is completely constant-folded because the arguments
# to f() are constants.
<block_start><return>jit.conditional_call_elidable(n f 40 2)<block_end><assert_stmt>main(12)<eq>12<assert_stmt>main(0)<eq>42<assert_stmt>self.interp_operations(main [12])<eq>12<line_sep>self.check_operations_history({'finish':1})# empty history
<assert_stmt>self.interp_operations(main [0])<eq>42<line_sep>self.check_operations_history({'finish':1})<block_end># empty history
<def_stmt>test_cond_call_constant_in_optimizer self<block_start>myjitdriver=jit.JitDriver(greens=['m'] reds=['n' 'p'])<def_stmt>externfn x<block_start><return>x-3<block_end><class_stmt>V<block_start><def_stmt>__init__ self value<block_start>self.value=value<block_end><block_end><def_stmt>f n m p<block_start><while_stmt>n<g>0<block_start>myjitdriver.can_enter_jit(n=n p=p m=m)<line_sep>myjitdriver.jit_merge_point(n=n p=p m=m)<line_sep>m1=noConst(m)<line_sep>n<augsub>jit.conditional_call_elidable(p externfn m1)<block_end><return>n<block_end>res=self.meta_interp(f [21 5 0])<assert_stmt>res<eq>-1<line_sep># the COND_CALL_VALUE is constant-folded away by optimizeopt.py
self.check_resops({'int_sub':2 'int_gt':2 'guard_true':2 'jump':1})<block_end><def_stmt>test_cond_call_constant_in_optimizer_1 self# same as test_cond_call_constant_in_optimizer, but the 'value'
# argument changes
<block_start>myjitdriver=jit.JitDriver(greens=['m'] reds=['n' 'p'])<def_stmt>externfn x<block_start><return>x-3<block_end><class_stmt>V<block_start><def_stmt>__init__ self value<block_start>self.value=value<block_end><block_end><def_stmt>f n m p<block_start><while_stmt>n<g>0<block_start>myjitdriver.can_enter_jit(n=n p=p m=m)<line_sep>myjitdriver.jit_merge_point(n=n p=p m=m)<line_sep>m1=noConst(m)<line_sep>n<augsub>jit.conditional_call_elidable(p externfn m1)<block_end><return>n<block_end><assert_stmt>f(21 5 0)<eq>-1<line_sep>res=self.meta_interp(f [21 5 0])<assert_stmt>res<eq>-1<line_sep># the COND_CALL_VALUE is constant-folded away by optimizeopt.py
self.check_resops({'int_sub':2 'int_gt':2 'guard_true':2 'jump':1})<block_end><def_stmt>test_cond_call_constant_in_optimizer_2 self<block_start>myjitdriver=jit.JitDriver(greens=['m'] reds=['n' 'p'])<def_stmt>externfn x<block_start><return>2<block_end><def_stmt>f n m p<block_start><while_stmt>n<g>0<block_start>myjitdriver.can_enter_jit(n=n p=p m=m)<line_sep>myjitdriver.jit_merge_point(n=n p=p m=m)<assert_stmt>p<g>-1<assert_stmt>p<l>1<line_sep>n<augsub>jit.conditional_call_elidable(p externfn n)<block_end><return>n<block_end>res=self.meta_interp(f [21 5 0])<assert_stmt>res<eq>-1<line_sep># optimizer: the COND_CALL_VALUE is turned into a regular
# CALL_PURE, which itself becomes a CALL
self.check_resops(call_pure_i=0 cond_call_value_i=0 call_i=2 int_sub=2)<block_end><def_stmt>test_cond_call_constant_in_optimizer_3 self<block_start>myjitdriver=jit.JitDriver(greens=['m'] reds=['n' 'p'])<def_stmt>externfn x<block_start><return>1<block_end><def_stmt>f n m p<block_start><while_stmt>n<g>0<block_start>myjitdriver.can_enter_jit(n=n p=p m=m)<line_sep>myjitdriver.jit_merge_point(n=n p=p m=m)<assert_stmt>p<g>-1<assert_stmt>p<l>1<line_sep>n0=n<line_sep>n<augsub>jit.conditional_call_elidable(p externfn n0)<line_sep>n<augsub>jit.conditional_call_elidable(p externfn n0)<block_end><return>n<block_end>res=self.meta_interp(f [21 5 0])<assert_stmt>res<eq>-1<line_sep># same as test_cond_call_constant_in_optimizer_2, but the two
# intermediate CALL_PUREs are replaced with only one, because
# they are called with the same arguments
self.check_resops(call_pure_i=0 cond_call_value_i=0 call_i=2 int_sub=4)<block_end><def_stmt>test_cond_call_constant_in_optimizer_4 self<block_start><class_stmt>X<block_start><def_stmt>__init__ self value<block_start>self.value=value<line_sep>self.triple=0<block_end><def_stmt>_compute_triple self<block_start>self.triple=self.value<times>3<line_sep><return>self.triple<block_end><def_stmt>get_triple self<block_start><return>jit.conditional_call_elidable(self.triple X._compute_triple self)<block_end><block_end>myjitdriver=jit.JitDriver(greens=[] reds='auto')<def_stmt>main n<block_start>total=0<while_stmt>n<g>1<block_start>myjitdriver.jit_merge_point()<line_sep>x=X(n)<line_sep>total<augadd>x.get_triple()+x.get_triple()+x.get_triple()<line_sep>n<augsub>10<block_end><return>total<block_end>res=self.meta_interp(main [100])<assert_stmt>res<eq>main(100)<line_sep># remaining: only the first call to get_triple(), as a call_i
# because we know that x.triple == 0 here. The remaining calls
# are removed because equal to the first one.
self.check_resops(call_i=2 cond_call_value_i=0 new_with_vtable=2)<block_end># escapes: _compute_triple(self)
<def_stmt>test_cond_call_constant_in_optimizer_5 self<block_start><def_stmt>_compute_triple value<block_start><return>value<times>3<block_end><class_stmt>X<block_start><def_stmt>__init__ self value<block_start>self.value=value<line_sep>self.triple=0<block_end><def_stmt>get_triple self<block_start>res=jit.conditional_call_elidable(self.triple _compute_triple self.value)<line_sep>self.triple=res<line_sep><return>res<block_end><block_end>myjitdriver=jit.JitDriver(greens=[] reds='auto')<def_stmt>main n<block_start>total=0<while_stmt>n<g>1<block_start>myjitdriver.jit_merge_point()<line_sep>x=X(n)<line_sep>total<augadd>x.get_triple()+x.get_triple()+x.get_triple()<line_sep>n<augsub>10<block_end><return>total<block_end>res=self.meta_interp(main [100])<assert_stmt>res<eq>main(100)<line_sep># remaining: only the first call to get_triple(), as a call_i
# because we know that x.triple == 0 here. The remaining calls
# are removed because equal to the first one.
self.check_resops(call_i=2 cond_call_value_i=0 new_with_vtable=0)<block_end># all virtual
<def_stmt>test_cond_call_multiple_in_optimizer_1 self# test called several times with the same arguments, but
# the condition is not available to the short preamble.
# This means that the second cond_call_value after unrolling
# can't be removed.
<block_start>myjitdriver=jit.JitDriver(greens=[] reds=['n' 'p' 'm'])<def_stmt>externfn x<block_start><return>2000# never actually called
<block_end>@jit.dont_look_inside<def_stmt>randomish p<block_start><return>p+1<block_end><def_stmt>f n m p<block_start><while_stmt>n<g>0<block_start>myjitdriver.can_enter_jit(n=n p=p m=m)<line_sep>myjitdriver.jit_merge_point(n=n p=p m=m)<line_sep>n<augsub>jit.conditional_call_elidable(randomish(p) externfn m)<block_end><return>n<block_end><assert_stmt>f(21 5 1)<eq>-1<line_sep>res=self.meta_interp(f [21 5 1])<assert_stmt>res<eq>-1<line_sep>self.check_resops(call_pure_i=0 cond_call_value_i=2 call_i=2 # randomish()
int_sub=2)<block_end><def_stmt>test_cond_call_multiple_in_optimizer_2 self# test called several times with the same arguments. Ideally
# we would like them to be consolidated into one call even if
# the 'value' are different but available from the short
# preamble. We don't do it so far---it's a mess, because the
# short preamble is supposed to depend only on loop-invariant
# things, and 'value' is (most of the time) not loop-invariant.
<block_start>myjitdriver=jit.JitDriver(greens=[] reds=['n' 'p' 'm'])<def_stmt>externfn x<block_start><return>2# called only the first time
<block_end><def_stmt>f n m p<block_start><while_stmt>n<g>0<block_start>myjitdriver.can_enter_jit(n=n p=p m=m)<line_sep>myjitdriver.jit_merge_point(n=n p=p m=m)<line_sep>p=jit.conditional_call_elidable(p externfn m)<line_sep>n<augsub>p<block_end><return>n<block_end><assert_stmt>f(21 5 0)<eq>-1<line_sep>res=self.meta_interp(f [21 5 0])<assert_stmt>res<eq>-1<line_sep>self.check_resops(call_pure_i=0 cond_call_value_i=2 # ideally 1, but see above
int_sub=2)<block_end><def_stmt>test_cond_call_in_blackhole self<block_start>myjitdriver=jit.JitDriver(greens=[] reds=['n' 'p' 'm'])<def_stmt>externfn x<block_start><return>2<block_end><def_stmt>f n m p<block_start><while_stmt>n<g>0<block_start>myjitdriver.can_enter_jit(n=n p=p m=m)<line_sep>myjitdriver.jit_merge_point(n=n p=p m=m)<if_stmt>n<g>6# will fail and finish in the blackhole
<block_start><pass><block_end><if_stmt>jit.we_are_jitted()# manually inline here
<block_start>p=jit._jit_conditional_call_value(p externfn m)<block_end><else_stmt><block_start>p=jit.conditional_call_elidable(p externfn m)<block_end>n<augsub>p<block_end><return>n<block_end><assert_stmt>f(21 5 0)<eq>-1<line_sep>res=self.meta_interp(f [21 5 0])<assert_stmt>res<eq>-1<block_end><def_stmt>test_cond_call_raises self<block_start>myjitdriver=jit.JitDriver(greens=[] reds=['n' 'p' 'm'])<def_stmt>externfn x m<block_start><if_stmt>m<eq>1<or>m<eq>1008<block_start><raise>ValueError<block_end><return>x+m<block_end><def_stmt>f n m p<block_start><while_stmt>n<g>0<block_start>myjitdriver.can_enter_jit(n=n p=p m=m)<line_sep>myjitdriver.jit_merge_point(n=n p=p m=m)<try_stmt><block_start>p=jit.conditional_call_elidable(p externfn n m)<line_sep>p<augsub>(n+m)# => zero again
<block_end><except_stmt>ValueError<block_start>m<augadd>1000<block_end>m<augadd>1<line_sep>n<augsub>2<block_end><return>n<times>m<block_end><assert_stmt>f(21 0 0)<eq>-2011<line_sep>res=self.meta_interp(f [21 0 0])<assert_stmt>res<eq>-2011<block_end><block_end><class_stmt>TestCall(LLJitMixin CallTest)<block_start><pass><block_end> |
BEHIND_PROXY=<true><line_sep>SWAGGER_BASEPATH=""<line_sep>DEFAULT_DATABASE="dev"<line_sep>DATABASES=["test"]<line_sep>ENV="development"<line_sep>DEBUG=<true><line_sep> |
"""
@author: ArcREST Team
@contact: www.github.com/Esri/ArcREST
@company: Esri
@version: 1.0.0
@description: deletes a group on the AGOL site.
@requirements: Python 2.7.x, ArcGIS 10.2.2, ArcREST 2.0
@copyright: Esri, 2015
"""<import_stmt>os<import_from_stmt>arcpy env<import_from_stmt>arcpy mapping<import_from_stmt>arcpy da<import_stmt>arcpy<import_stmt>ConfigParser<import_stmt>arcrest<line_sep>#--------------------------------------------------------------------------
<class_stmt>FunctionError(Exception)<block_start>""" raised when a function fails to run """<line_sep><pass><block_end>#--------------------------------------------------------------------------
<def_stmt>trace <block_start>"""
trace finds the line, the filename
and error message and returns it
to the user
"""<import_stmt>traceback<import_stmt>sys<line_sep>tb=sys.exc_info()[2]<line_sep>tbinfo=traceback.format_tb(tb)[0]<line_sep># script name + line number
line=tbinfo.split(", ")[1]<line_sep># Get Python syntax error
#
synerror=traceback.format_exc().splitlines()[-1]<line_sep><return>line __file__ synerror<block_end>#--------------------------------------------------------------------------
<def_stmt>main *argv<block_start>""" main driver of program """<try_stmt># Inputs
#
<block_start>adminUsername=argv[0]<line_sep>adminPassword=argv[1]<line_sep>siteURL=argv[2]<line_sep>groupName=argv[3]<line_sep># Logic
#
sh=arcrest.AGOLTokenSecurityHandler(adminUsername adminPassword)<line_sep>admin=arcrest.manageorg.Administration(securityHandler=sh)<line_sep>community=admin.community<line_sep>g=community.getGroupIDs(groupNames=[groupName])<if_stmt>len(g)<eq>0<block_start>arcpy.AddWarning("No Group Exists with That Name %s"%groupName)<line_sep>arcpy.SetParameterAsText(4 <false>)<block_end><elif_stmt>len(g)<eq>1<block_start>groups=community.groups<line_sep>groups.deleteGroup(groupId=g[0])<line_sep>arcpy.AddWarning("%s was erased."%groupName)<line_sep>arcpy.SetParameterAsText(4 <true>)<block_end><else_stmt><block_start>arcpy.AddError("Multiple group names found, please manually delete!")<line_sep>arcpy.SetParameterAsText(4 <false>)<block_end><block_end><except_stmt>arcpy.ExecuteError<block_start>line,filename,synerror=trace()<line_sep>arcpy.AddError("error on line: %s"%line)<line_sep>arcpy.AddError("error in file name: %s"%filename)<line_sep>arcpy.AddError("with error message: %s"%synerror)<line_sep>arcpy.AddError("ArcPy Error Message: %s"%arcpy.GetMessages(2))<block_end><except_stmt>FunctionError f_e<block_start>messages=f_e.args[0]<line_sep>arcpy.AddError("error in function: %s"%messages["function"])<line_sep>arcpy.AddError("error on line: %s"%messages["line"])<line_sep>arcpy.AddError("error in file name: %s"%messages["filename"])<line_sep>arcpy.AddError("with error message: %s"%messages["synerror"])<line_sep>arcpy.AddError("ArcPy Error Message: %s"%messages["arc"])<block_end><except_stmt><block_start>line,filename,synerror=trace()<line_sep>arcpy.AddError("error on line: %s"%line)<line_sep>arcpy.AddError("error in file name: %s"%filename)<line_sep>arcpy.AddError("with error message: %s"%synerror)<block_end><block_end>#--------------------------------------------------------------------------
<if_stmt>__name__<eq>"__main__"<block_start>env.overwriteOutput=<true><line_sep>argv=tuple(str(arcpy.GetParameterAsText(i))<for>i xrange(arcpy.GetArgumentCount()))<line_sep>main(*argv)<block_end> |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=missing-function-docstring
"""These examples should be handle by the classicalfunction compiler"""<import_from_stmt>qiskit.circuit Int1<def_stmt>identity a:Int1<arrow>Int1<block_start><return>a<block_end><def_stmt>bit_and a:Int1 b:Int1<arrow>Int1<block_start><return>a&b<block_end><def_stmt>bit_or a:Int1 b:Int1<arrow>Int1<block_start><return>a|b<block_end><def_stmt>bool_or a:Int1 b:Int1<arrow>Int1<block_start><return>a<or>b<block_end><def_stmt>bool_not a:Int1<arrow>Int1<block_start><return><not>a<block_end><def_stmt>and_and a:Int1 b:Int1 c:Int1<arrow>Int1<block_start><return>a<and>b<and>c<block_end><def_stmt>multiple_binop a:Int1 b:Int1<arrow>Int1<block_start><return>(a<or>b)|(b&a)<and>(a&b)<block_end><def_stmt>id_assing a:Int1<arrow>Int1<block_start>b=a<line_sep><return>b<block_end><def_stmt>example1 a:Int1 b:Int1<arrow>Int1<block_start>c=a&b<line_sep>d=b|a<line_sep><return>c^a|d<block_end><def_stmt>grover_oracle a:Int1 b:Int1 c:Int1 d:Int1<arrow>Int1<block_start><return><not>a<and>b<and><not>c<and>d<block_end> |
<import_from_stmt>libs.utils humanize_date<import_from_stmt>libs NAP_DATE_FORMAT NAP_DIGIT_PRECISION<import_stmt>os<import_stmt>csv<import_stmt>decimal<line_sep>decimal.getcontext().rounding=decimal.ROUND_HALF_UP<def_stmt>export_to_csv list_object csv_file fieldnames<block_start>csv_list_object=humanize_date(list_object)<with_stmt>open(csv_file "w")<as>fd<block_start>writer=csv.DictWriter(fd fieldnames=fieldnames quotechar='"' quoting=csv.QUOTE_ALL )<line_sep>header={fieldname:fieldname.replace("_" " ").title()<for>fieldname fieldnames}<line_sep>writer.writerow(header)<for_stmt>elements csv_list_object<block_start>writer.writerow(elements)<block_end><block_end><block_end><def_stmt>export_statements file_path statements<block_start>export_to_csv(statements file_path ["trade_date" "settle_date" "currency" "activity_type" "company" "symbol_description" "symbol" "quantity" "price" "amount" ] )<block_end><def_stmt>export_app8_part1 file_path purchases<block_start>export_purchases=[]<for_stmt>stock_symbol,stock_queue purchases.items()<block_start><for_stmt>purchase stock_queue<block_start>count=purchase["quantity"].quantize(decimal.Decimal(NAP_DIGIT_PRECISION))<if_stmt>count<g>0<block_start>export_purchases.append({**{"stock_symbol":stock_symbol "count":str(count) "acquire_date":purchase["trade_date"].strftime(NAP_DATE_FORMAT) "purchase_price_in_currency":purchase["price_in_currency"] "purchase_price_in_lev":purchase["price"] } })<block_end><block_end><block_end>export_to_csv(export_purchases file_path ["stock_symbol" "count" "acquire_date" "purchase_price_in_currency" "purchase_price_in_lev"] )<block_end><def_stmt>export_app5_table2 file_path sales<block_start>sales=[{**{k:v<for>k,v sale.items()<if>k<not><in>["symbol" "avg_purchase_price" "sell_exchange_rate" "profit_in_currency" "loss_in_currency"]} **{"code":508} }<for>sale sales]<line_sep>export_to_csv(sales file_path ["code" "trade_date" "sell_price" "purchase_price" "profit" "loss"] )<block_end><def_stmt>export_app8_part4_1 file_path dividend_taxes<block_start>dividends=[{**{k:v<for>k,v dividend_tax.items()<if>k<not><in>["symbol"]} **{"profit_code":8141 "tax_code":1} **{"gross_profit_amount":dividend_tax["gross_profit_amount"].quantize(decimal.Decimal(NAP_DIGIT_PRECISION)) "paid_tax_amount":dividend_tax["paid_tax_amount"].quantize(decimal.Decimal(NAP_DIGIT_PRECISION)) "owe_tax":dividend_tax["owe_tax"].quantize(decimal.Decimal(NAP_DIGIT_PRECISION)) } }<for>dividend_tax dividend_taxes]<line_sep>export_to_csv(dividends file_path ["stock_symbol" "company" "profit_code" "tax_code" "gross_profit_amount" "paid_tax_amount" "owe_tax"] )<block_end> |
# Copyright 2020 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing the done callbacks mechanism."""<import_stmt>asyncio<import_stmt>logging<import_stmt>unittest<import_stmt>grpc<import_from_stmt>grpc.experimental aio<import_from_stmt>src.proto.grpc.testing messages_pb2<import_from_stmt>src.proto.grpc.testing test_pb2_grpc<import_from_stmt>tests_aio.unit._common inject_callbacks<import_from_stmt>tests_aio.unit._test_base AioTestBase<import_from_stmt>tests_aio.unit._test_server start_test_server<line_sep>_NUM_STREAM_RESPONSES=5<line_sep>_REQUEST_PAYLOAD_SIZE=7<line_sep>_RESPONSE_PAYLOAD_SIZE=42<line_sep>_REQUEST=b'\x01\x02\x03'<line_sep>_RESPONSE=b'\x04\x05\x06'<line_sep>_TEST_METHOD='/test/Test'<line_sep>_FAKE_METHOD='/test/Fake'<class_stmt>TestClientSideDoneCallback(AioTestBase)<block_start><async_keyword><def_stmt>setUp self<block_start>address,self._server=<await>start_test_server()<line_sep>self._channel=aio.insecure_channel(address)<line_sep>self._stub=test_pb2_grpc.TestServiceStub(self._channel)<block_end><async_keyword><def_stmt>tearDown self<block_start><await>self._channel.close()<line_sep><await>self._server.stop(<none>)<block_end><async_keyword><def_stmt>test_add_after_done self<block_start>call=self._stub.UnaryCall(messages_pb2.SimpleRequest())<line_sep>self.assertEqual(grpc.StatusCode.OK <await>call.code())<line_sep>validation=inject_callbacks(call)<line_sep><await>validation<block_end><async_keyword><def_stmt>test_unary_unary self<block_start>call=self._stub.UnaryCall(messages_pb2.SimpleRequest())<line_sep>validation=inject_callbacks(call)<line_sep>self.assertEqual(grpc.StatusCode.OK <await>call.code())<line_sep><await>validation<block_end><async_keyword><def_stmt>test_unary_stream self<block_start>request=messages_pb2.StreamingOutputCallRequest()<for_stmt>_ range(_NUM_STREAM_RESPONSES)<block_start>request.response_parameters.append(messages_pb2.ResponseParameters(size=_RESPONSE_PAYLOAD_SIZE))<block_end>call=self._stub.StreamingOutputCall(request)<line_sep>validation=inject_callbacks(call)<line_sep>response_cnt=0<async_keyword><for_stmt>response call<block_start>response_cnt<augadd>1<line_sep>self.assertIsInstance(response messages_pb2.StreamingOutputCallResponse)<line_sep>self.assertEqual(_RESPONSE_PAYLOAD_SIZE len(response.payload.body))<block_end>self.assertEqual(_NUM_STREAM_RESPONSES response_cnt)<line_sep>self.assertEqual(grpc.StatusCode.OK <await>call.code())<line_sep><await>validation<block_end><async_keyword><def_stmt>test_stream_unary self<block_start>payload=messages_pb2.Payload(body=b'\0'<times>_REQUEST_PAYLOAD_SIZE)<line_sep>request=messages_pb2.StreamingInputCallRequest(payload=payload)<async_keyword><def_stmt>gen <block_start><for_stmt>_ range(_NUM_STREAM_RESPONSES)<block_start><yield>request<block_end><block_end>call=self._stub.StreamingInputCall(gen())<line_sep>validation=inject_callbacks(call)<line_sep>response=<await>call<line_sep>self.assertIsInstance(response messages_pb2.StreamingInputCallResponse)<line_sep>self.assertEqual(_NUM_STREAM_RESPONSES<times>_REQUEST_PAYLOAD_SIZE response.aggregated_payload_size)<line_sep>self.assertEqual(grpc.StatusCode.OK <await>call.code())<line_sep><await>validation<block_end><async_keyword><def_stmt>test_stream_stream self<block_start>call=self._stub.FullDuplexCall()<line_sep>validation=inject_callbacks(call)<line_sep>request=messages_pb2.StreamingOutputCallRequest()<line_sep>request.response_parameters.append(messages_pb2.ResponseParameters(size=_RESPONSE_PAYLOAD_SIZE))<for_stmt>_ range(_NUM_STREAM_RESPONSES)<block_start><await>call.write(request)<line_sep>response=<await>call.read()<line_sep>self.assertIsInstance(response messages_pb2.StreamingOutputCallResponse)<line_sep>self.assertEqual(_RESPONSE_PAYLOAD_SIZE len(response.payload.body))<block_end><await>call.done_writing()<line_sep>self.assertEqual(grpc.StatusCode.OK <await>call.code())<line_sep><await>validation<block_end><block_end><class_stmt>TestServerSideDoneCallback(AioTestBase)<block_start><async_keyword><def_stmt>setUp self<block_start>self._server=aio.server()<line_sep>port=self._server.add_insecure_port('[::]:0')<line_sep>self._channel=aio.insecure_channel('localhost:%d'%port)<block_end><async_keyword><def_stmt>tearDown self<block_start><await>self._channel.close()<line_sep><await>self._server.stop(<none>)<block_end><async_keyword><def_stmt>_register_method_handler self method_handler<block_start>"""Registers method handler and starts the server"""<line_sep>generic_handler=grpc.method_handlers_generic_handler('test' dict(Test=method_handler) )<line_sep>self._server.add_generic_rpc_handlers((generic_handler ))<line_sep><await>self._server.start()<block_end><async_keyword><def_stmt>test_unary_unary self<block_start>validation_future=self.loop.create_future()<async_keyword><def_stmt>test_handler request:bytes context:aio.ServicerContext<block_start>self.assertEqual(_REQUEST request)<line_sep>validation_future.set_result(inject_callbacks(context))<line_sep><return>_RESPONSE<block_end><await>self._register_method_handler(grpc.unary_unary_rpc_method_handler(test_handler))<line_sep>response=<await>self._channel.unary_unary(_TEST_METHOD)(_REQUEST)<line_sep>self.assertEqual(_RESPONSE response)<line_sep>validation=<await>validation_future<line_sep><await>validation<block_end><async_keyword><def_stmt>test_unary_stream self<block_start>validation_future=self.loop.create_future()<async_keyword><def_stmt>test_handler request:bytes context:aio.ServicerContext<block_start>self.assertEqual(_REQUEST request)<line_sep>validation_future.set_result(inject_callbacks(context))<for_stmt>_ range(_NUM_STREAM_RESPONSES)<block_start><yield>_RESPONSE<block_end><block_end><await>self._register_method_handler(grpc.unary_stream_rpc_method_handler(test_handler))<line_sep>call=self._channel.unary_stream(_TEST_METHOD)(_REQUEST)<async_keyword><for_stmt>response call<block_start>self.assertEqual(_RESPONSE response)<block_end>validation=<await>validation_future<line_sep><await>validation<block_end><async_keyword><def_stmt>test_stream_unary self<block_start>validation_future=self.loop.create_future()<async_keyword><def_stmt>test_handler request_iterator context:aio.ServicerContext<block_start>validation_future.set_result(inject_callbacks(context))<async_keyword><for_stmt>request request_iterator<block_start>self.assertEqual(_REQUEST request)<block_end><return>_RESPONSE<block_end><await>self._register_method_handler(grpc.stream_unary_rpc_method_handler(test_handler))<line_sep>call=self._channel.stream_unary(_TEST_METHOD)()<for_stmt>_ range(_NUM_STREAM_RESPONSES)<block_start><await>call.write(_REQUEST)<block_end><await>call.done_writing()<line_sep>self.assertEqual(_RESPONSE <await>call)<line_sep>validation=<await>validation_future<line_sep><await>validation<block_end><async_keyword><def_stmt>test_stream_stream self<block_start>validation_future=self.loop.create_future()<async_keyword><def_stmt>test_handler request_iterator context:aio.ServicerContext<block_start>validation_future.set_result(inject_callbacks(context))<async_keyword><for_stmt>request request_iterator<block_start>self.assertEqual(_REQUEST request)<block_end><return>_RESPONSE<block_end><await>self._register_method_handler(grpc.stream_stream_rpc_method_handler(test_handler))<line_sep>call=self._channel.stream_stream(_TEST_METHOD)()<for_stmt>_ range(_NUM_STREAM_RESPONSES)<block_start><await>call.write(_REQUEST)<block_end><await>call.done_writing()<async_keyword><for_stmt>response call<block_start>self.assertEqual(_RESPONSE response)<block_end>validation=<await>validation_future<line_sep><await>validation<block_end><async_keyword><def_stmt>test_error_in_handler self<block_start>"""Errors in the handler still triggers callbacks."""<line_sep>validation_future=self.loop.create_future()<async_keyword><def_stmt>test_handler request:bytes context:aio.ServicerContext<block_start>self.assertEqual(_REQUEST request)<line_sep>validation_future.set_result(inject_callbacks(context))<line_sep><raise>RuntimeError('A test RuntimeError')<block_end><await>self._register_method_handler(grpc.unary_unary_rpc_method_handler(test_handler))<with_stmt>self.assertRaises(aio.AioRpcError)<as>exception_context<block_start><await>self._channel.unary_unary(_TEST_METHOD)(_REQUEST)<block_end>rpc_error=exception_context.exception<line_sep>self.assertEqual(grpc.StatusCode.UNKNOWN rpc_error.code())<line_sep>validation=<await>validation_future<line_sep><await>validation<block_end><async_keyword><def_stmt>test_error_in_callback self<block_start>"""Errors in the callback won't be propagated to client."""<line_sep>validation_future=self.loop.create_future()<async_keyword><def_stmt>test_handler request:bytes context:aio.ServicerContext<block_start>self.assertEqual(_REQUEST request)<def_stmt>exception_raiser unused_context<block_start><raise>RuntimeError('A test RuntimeError')<block_end>context.add_done_callback(exception_raiser)<line_sep>validation_future.set_result(inject_callbacks(context))<line_sep><return>_RESPONSE<block_end><await>self._register_method_handler(grpc.unary_unary_rpc_method_handler(test_handler))<line_sep>response=<await>self._channel.unary_unary(_TEST_METHOD)(_REQUEST)<line_sep>self.assertEqual(_RESPONSE response)<line_sep># Following callbacks won't be invoked, if one of the callback crashed.
validation=<await>validation_future<with_stmt>self.assertRaises(asyncio.TimeoutError)<block_start><await>validation<block_end># Invoke RPC one more time to ensure the toxic callback won't break the
# server.
<with_stmt>self.assertRaises(aio.AioRpcError)<as>exception_context<block_start><await>self._channel.unary_unary(_FAKE_METHOD)(_REQUEST)<block_end>rpc_error=exception_context.exception<line_sep>self.assertEqual(grpc.StatusCode.UNIMPLEMENTED rpc_error.code())<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>logging.basicConfig(level=logging.DEBUG)<line_sep>unittest.main(verbosity=2)<block_end> |
<import_from_stmt>include *<line_sep>#https://github.com/marvis/pytorch-yolo2/blob/master/FocalLoss.py
#https://github.com/unsky/focal-loss
<class_stmt>FocalLoss2d(nn.Module)<block_start><def_stmt>__init__ self gamma=2 size_average=<true><block_start>super(FocalLoss2d self).__init__()<line_sep>self.gamma=gamma<line_sep>self.size_average=size_average<block_end><def_stmt>forward self logit target class_weight=<none> type='softmax'<block_start>target=target.view(-1 1).long()<if_stmt>type<eq>'sigmoid'<block_start><if_stmt>class_weight<is><none><block_start>class_weight=[1]<times>2<block_end>#[0.5, 0.5]
prob=F.sigmoid(logit)<line_sep>prob=prob.view(-1 1)<line_sep>prob=torch.cat((1-prob prob) 1)<line_sep>select=torch.FloatTensor(len(prob) 2).zero_().cuda()<line_sep>select.scatter_(1 target 1.)<block_end><elif_stmt>type<eq>'softmax'<block_start>B,C,H,W=logit.size()<if_stmt>class_weight<is><none><block_start>class_weight=[1]<times>C<block_end>#[1/C]*C
logit=logit.permute(0 2 3 1).contiguous().view(-1 C)<line_sep>prob=F.softmax(logit 1)<line_sep>select=torch.FloatTensor(len(prob) C).zero_().cuda()<line_sep>select.scatter_(1 target 1.)<block_end>class_weight=torch.FloatTensor(class_weight).cuda().view(-1 1)<line_sep>class_weight=torch.gather(class_weight 0 target)<line_sep>prob=(prob<times>select).sum(1).view(-1 1)<line_sep>prob=torch.clamp(prob 1e-8 1-1e-8)<line_sep>batch_loss=-class_weight<times>(torch.pow((1-prob) self.gamma))<times>prob.log()<if_stmt>self.size_average<block_start>loss=batch_loss.mean()<block_end><else_stmt><block_start>loss=batch_loss<block_end><return>loss<block_end><block_end>##------------
<class_stmt>RobustFocalLoss2d(nn.Module)#assume top 10% is outliers
<block_start><def_stmt>__init__ self gamma=2 size_average=<true><block_start>super(RobustFocalLoss2d self).__init__()<line_sep>self.gamma=gamma<line_sep>self.size_average=size_average<block_end><def_stmt>forward self logit target class_weight=<none> type='softmax'<block_start>target=target.view(-1 1).long()<if_stmt>type<eq>'sigmoid'<block_start><if_stmt>class_weight<is><none><block_start>class_weight=[1]<times>2<block_end>#[0.5, 0.5]
prob=F.sigmoid(logit)<line_sep>prob=prob.view(-1 1)<line_sep>prob=torch.cat((1-prob prob) 1)<line_sep>select=torch.FloatTensor(len(prob) 2).zero_().cuda()<line_sep>select.scatter_(1 target 1.)<block_end><elif_stmt>type<eq>'softmax'<block_start>B,C,H,W=logit.size()<if_stmt>class_weight<is><none><block_start>class_weight=[1]<times>C<block_end>#[1/C]*C
logit=logit.permute(0 2 3 1).contiguous().view(-1 C)<line_sep>prob=F.softmax(logit 1)<line_sep>select=torch.FloatTensor(len(prob) C).zero_().cuda()<line_sep>select.scatter_(1 target 1.)<block_end>class_weight=torch.FloatTensor(class_weight).cuda().view(-1 1)<line_sep>class_weight=torch.gather(class_weight 0 target)<line_sep>prob=(prob<times>select).sum(1).view(-1 1)<line_sep>prob=torch.clamp(prob 1e-8 1-1e-8)<line_sep>focus=torch.pow((1-prob) self.gamma)<line_sep>#focus = torch.where(focus < 2.0, focus, torch.zeros(prob.size()).cuda())
focus=torch.clamp(focus 0 2)<line_sep>batch_loss=-class_weight<times>focus<times>prob.log()<if_stmt>self.size_average<block_start>loss=batch_loss.mean()<block_end><else_stmt><block_start>loss=batch_loss<block_end><return>loss<block_end><block_end>##------------
## http://geek.csdn.net/news/detail/126833
<class_stmt>PseudoBCELoss2d(nn.Module)<block_start><def_stmt>__init__ self<block_start>super(PseudoBCELoss2d self).__init__()<block_end><def_stmt>forward self logit truth<block_start>z=logit.view(-1)<line_sep>t=truth.view(-1)<line_sep>loss=z.clamp(min=0)-z<times>t+torch.log(1+torch.exp(-z.abs()))<line_sep>loss=loss.sum()/len(t)#w.sum()
<return>loss<block_end><block_end>#
# # https://github.com/bermanmaxim/jaccardSegment/blob/master/losses.py
# # https://discuss.pytorch.org/t/solved-what-is-the-correct-way-to-implement-custom-loss-function/3568/4
# class CrossEntropyLoss2d(nn.Module):
# def __init__(self, weight=None, size_average=True):
# super(CrossEntropyLoss2d, self).__init__()
# self.nll_loss = nn.NLLLoss2d(weight, size_average)
#
# def forward(self, logits, targets):
# return self.nll_loss(F.log_softmax(logits), targets)
#
# class BCELoss2d(nn.Module):
# def __init__(self, weight=None, size_average=True):
# super(BCELoss2d, self).__init__()
# self.bce_loss = nn.BCELoss(weight, size_average)
#
# def forward(self, logits, targets):
# probs = F.sigmoid(logits)
# probs_flat = probs.view (-1)
# targets_flat = targets.view(-1)
# return self.bce_loss(probs_flat, targets_flat)
#
#
# class SoftDiceLoss(nn.Module):
# def __init__(self): #weight=None, size_average=True):
# super(SoftDiceLoss, self).__init__()
#
#
# def forward(self, logits, targets):
#
# probs = F.sigmoid(logits)
# num = targets.size(0)
# m1 = probs.view(num,-1)
# m2 = targets.view(num,-1)
# intersection = (m1 * m2)
# score = 2. * (intersection.sum(1)+1) / (m1.sum(1) + m2.sum(1)+1)
# score = 1- score.sum()/num
# return score
#
#
#
# ## http://geek.csdn.net/news/detail/126833
# class WeightedBCELoss2d(nn.Module):
# def __init__(self):
# super(WeightedBCELoss2d, self).__init__()
#
# def forward(self, logits, labels, weights):
# w = weights.view(-1)
# z = logits.view (-1)
# t = labels.view (-1)
# loss = w*z.clamp(min=0) - w*z*t + w*torch.log(1 + torch.exp(-z.abs()))
# loss = loss.sum()/(w.sum()+ 1e-12)
# return loss
#
# class WeightedSoftDiceLoss(nn.Module):
# def __init__(self):
# super(WeightedSoftDiceLoss, self).__init__()
#
# def forward(self, logits, labels, weights):
# probs = F.sigmoid(logits)
# num = labels.size(0)
# w = (weights).view(num,-1)
# w2 = w*w
# m1 = (probs ).view(num,-1)
# m2 = (labels ).view(num,-1)
# intersection = (m1 * m2)
# score = 2. * ((w2*intersection).sum(1)+1) / ((w2*m1).sum(1) + (w2*m2).sum(1)+1)
# score = 1 - score.sum()/num
# return score
#
#
#
#
#
# def multi_loss(logits, labels):
# #l = BCELoss2d()(logits, labels)
#
#
# if 0:
# l = BCELoss2d()(logits, labels) + SoftDiceLoss()(logits, labels)
#
# #compute weights
# else:
# batch_size,C,H,W = labels.size()
# weights = Variable(torch.tensor.torch.ones(labels.size())).cuda()
#
# if 1: #use weights
# kernel_size = 5
# avg = F.avg_pool2d(labels,kernel_size=kernel_size,padding=kernel_size//2,stride=1)
# boundary = avg.ge(0.01) * avg.le(0.99)
# boundary = boundary.float()
#
# w0 = weights.sum()
# weights = weights + boundary*2
# w1 = weights.sum()
# weights = weights/w1*w0
#
# l = WeightedBCELoss2d()(logits, labels, weights) + \
# WeightedSoftDiceLoss()(logits, labels, weights)
#
# return l
#
#
# #
# #
# #
# #
# #
# #
# #
# # class SoftCrossEntroyLoss(nn.Module):
# # def __init__(self):
# # super(SoftCrossEntroyLoss, self).__init__()
# #
# # def forward(self, logits, soft_labels):
# # #batch_size, num_classes = logits.size()
# # # soft_labels = labels.view(-1,num_classes)
# # # logits = logits.view(-1,num_classes)
# #
# # logits = logits - logits.max()
# # log_sum_exp = torch.log(torch.sum(torch.exp(logits), 1))
# # loss = - (soft_labels*logits).sum(1) + log_sum_exp
# # loss = loss.mean()
# #
# # return loss
# #
# #
# #
# # # loss, accuracy -------------------------
# # def top_accuracy(probs, labels, top_k=(1,)):
# # """Computes the precision@k for the specified values of k"""
# #
# # probs = probs.data
# # labels = labels.data
# #
# # max_k = max(top_k)
# # batch_size = labels.size(0)
# #
# # values, indices = probs.topk(max_k, dim=1, largest=True, sorted=True)
# # indices = indices.t()
# # corrects = indices.eq(labels.view(1, -1).expand_as(indices))
# #
# # accuracy = []
# # for k in top_k:
# # # https://stackoverflow.com/questions/509211/explain-slice-notation
# # # a[:end] # items from the beginning through end-1
# # c = corrects[:k].view(-1).float().sum(0, keepdim=True)
# # accuracy.append(c.mul_(1. / batch_size))
# # return accuracy
# #
# #
# # ## focal loss ## ---------------------------------------------------
# # class CrossEntroyLoss(nn.Module):
# # def __init__(self):
# # super(CrossEntroyLoss, self).__init__()
# #
# # def forward(self, logits, labels):
# # #batch_size, num_classes = logits.size()
# # # labels = labels.view(-1,1)
# # # logits = logits.view(-1,num_classes)
# #
# # max_logits = logits.max()
# # log_sum_exp = torch.log(torch.sum(torch.exp(logits-max_logits), 1))
# # loss = log_sum_exp - logits.gather(dim=1, index=labels.view(-1,1)).view(-1) + max_logits
# # loss = loss.mean()
# #
# # return loss
# #
# # ## https://github.com/unsky/focal-loss
# # ## https://github.com/sciencefans/Focal-Loss
# # ## https://www.kaggle.com/c/carvana-image-masking-challenge/discussion/39951
# #
# # # https://raberrytv.wordpress.com/2017/07/01/pytorch-kludges-to-ensure-numerical-stability/
# # # https://github.com/pytorch/pytorch/issues/1620
# # class FocalLoss(nn.Module):
# # def __init__(self,gamma = 2, alpha=1.2):
# # super(FocalLoss, self).__init__()
# # self.gamma = gamma
# # self.alpha = alpha
# #
# #
# # def forward(self, logits, labels):
# # eps = 1e-7
# #
# # # loss = - np.power(1 - p, gamma) * np.log(p))
# # probs = F.softmax(logits)
# # probs = probs.gather(dim=1, index=labels.view(-1,1)).view(-1)
# # probs = torch.clamp(probs, min=eps, max=1-eps)
# #
# # loss = -torch.pow(1-probs, self.gamma) *torch.log(probs)
# # loss = loss.mean()*self.alpha
# #
# # return loss
# #
# #
# #
# #
# # # https://arxiv.org/pdf/1511.05042.pdf
# # class TalyorCrossEntroyLoss(nn.Module):
# # def __init__(self):
# # super(TalyorCrossEntroyLoss, self).__init__()
# #
# # def forward(self, logits, labels):
# # #batch_size, num_classes = logits.size()
# # # labels = labels.view(-1,1)
# # # logits = logits.view(-1,num_classes)
# #
# # talyor_exp = 1 + logits + logits**2
# # loss = talyor_exp.gather(dim=1, index=labels.view(-1,1)).view(-1) /talyor_exp.sum(dim=1)
# # loss = loss.mean()
# #
# # return loss
# #
# # # check #################################################################
# # def run_check_focal_loss():
# # batch_size = 64
# # num_classes = 15
# #
# # logits = np.random.uniform(-2,2,size=(batch_size,num_classes))
# # labels = np.random.choice(num_classes,size=(batch_size))
# #
# # logits = Variable(torch.from_numpy(logits)).cuda()
# # labels = Variable(torch.from_numpy(labels)).cuda()
# #
# # focal_loss = FocalLoss(gamma = 2)
# # loss = focal_loss(logits, labels)
# # print (loss)
# #
# #
# # def run_check_soft_cross_entropy_loss():
# # batch_size = 64
# # num_classes = 15
# #
# # logits = np.random.uniform(-2,2,size=(batch_size,num_classes))
# # soft_labels = np.random.uniform(-2,2,size=(batch_size,num_classes))
# #
# # logits = Variable(torch.from_numpy(logits)).cuda()
# # soft_labels = Variable(torch.from_numpy(soft_labels)).cuda()
# # soft_labels = F.softmax(soft_labels,1)
# #
# # soft_cross_entropy_loss = SoftCrossEntroyLoss()
# # loss = soft_cross_entropy_loss(logits, soft_labels)
# # print (loss)
#
# main #################################################################
<if_stmt>__name__<eq>'__main__'<block_start>print('%s: calling main function ... '%os.path.basename(__file__))<line_sep>print('\nsucess!')<block_end> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.