content
stringlengths 0
1.55M
|
---|
"""passlib.handlers.postgres_md5 - MD5-based algorithm used by Postgres for pg_shadow table"""<line_sep>#=============================================================================
# imports
#=============================================================================
# core
<import_from_stmt>hashlib md5<import_stmt>re<import_stmt>logging<line_sep>log=logging.getLogger(__name__)<import_from_stmt>warnings warn<line_sep># site
# pkg
<import_from_stmt>passlib.utils to_bytes<import_from_stmt>passlib.utils.compat b bytes str_to_uascii unicode u<import_stmt>passlib.utils.handlers<as>uh<line_sep># local
__all__=["postgres_md5" ]<line_sep>#=============================================================================
# handler
#=============================================================================
<class_stmt>postgres_md5(uh.HasUserContext uh.StaticHandler)<block_start>"""This class implements the Postgres MD5 Password hash, and follows the :ref:`password-hash-api`.
It does a single round of hashing, and relies on the username as the salt.
The :meth:`~passlib.ifc.PasswordHash.encrypt`, :meth:`~passlib.ifc.PasswordHash.genhash`, and :meth:`~passlib.ifc.PasswordHash.verify` methods all require the
following additional contextual keywords:
:type user: str
:param user: name of postgres user account this password is associated with.
"""<line_sep>#===================================================================
# algorithm information
#===================================================================
name="postgres_md5"<line_sep>_hash_prefix=u("md5")<line_sep>checksum_chars=uh.HEX_CHARS<line_sep>checksum_size=32<line_sep>#===================================================================
# primary interface
#===================================================================
<def_stmt>_calc_checksum self secret<block_start><if_stmt>isinstance(secret unicode)<block_start>secret=secret.encode("utf-8")<block_end>user=to_bytes(self.user "utf-8" param="user")<line_sep><return>str_to_uascii(md5(secret+user).hexdigest())<block_end>#===================================================================
# eoc
#===================================================================
<block_end>#=============================================================================
# eof
#=============================================================================
|
# Copyright 2021 The Waymo Open Dataset Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module to load and run a Faster R-CNN model."""<import_stmt>numpy<as>np<import_stmt>torch<import_stmt>torchvision<line_sep>model=<none><line_sep>DATA_FIELDS=['FRONT_IMAGE']<def_stmt>initialize_model <block_start><global>model<line_sep>model=torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=<true>)<line_sep>model.eval()<block_end><def_stmt>translate_label_to_wod label<block_start>"""Translate a single COCO class to its corresponding WOD class.
Note: Returns -1 if this COCO class has no corresponding class in WOD.
Args:
label: int COCO class label
Returns:
Int WOD class label, or -1.
"""<line_sep>label_conversion_map={1:2 # Person is ped
2:4 # Bicycle is bicycle
3:1 # Car is vehicle
4:1 # Motorcycle is vehicle
6:1 # Bus is vehicle
8:1 # Truck is vehicle
13:3 # Stop sign is sign
}<line_sep><return>label_conversion_map.get(label -1)<block_end># BEGIN-INTERNAL
# pylint: disable=invalid-name
# END-INTERNAL
<def_stmt>run_model FRONT_IMAGE<block_start>"""Run the model on the RGB image.
Args:
FRONT_IMAGE: H x W x 3 numpy ndarray.
Returns:
Dict from string to numpy ndarray.
"""<line_sep># Convert the image to a PyTorch-friendly format by casting it from uint8 to
# float32 (and dividing by 255 to take it from [0, 255] to [0, 1]) and
# transposing it from H x W x C to C x H x W.
transposed_float_img=np.transpose(FRONT_IMAGE.astype(np.float32)/255.0 [2 0 1])<line_sep>outputs=model([torch.from_numpy(transposed_float_img)])<line_sep>corners=outputs[0]['boxes'][0 <ellipsis>].detach().numpy()<line_sep>scores=outputs[0]['scores'][0 <ellipsis>].detach().numpy()<line_sep>coco_classes=outputs[0]['labels'][0 <ellipsis>].detach().numpy()<line_sep># Convert the classes from COCO classes to WOD classes, and only keep
# detections that belong to a WOD class.
wod_classes=np.vectorize(translate_label_to_wod)(coco_classes)<line_sep>corners=corners[wod_classes<g>0]<line_sep>scores=scores[wod_classes<g>0]<line_sep>classes=wod_classes[wod_classes<g>0]<line_sep># Note: Torchvision's pretrained models returns boxes in the format
# (ymin, xmin, ymax, xmax). Thus, this function needs to convert them to the
# format expected by WOD, namely (center_x, center_y, width, height).
boxes=np.zeros_like(corners)<line_sep>boxes[: 0]=(corners[: 3]+corners[: 1])/2.0<line_sep>boxes[: 1]=(corners[: 2]+corners[: 0])/2.0<line_sep>boxes[: 2]=(corners[: 3]-corners[: 1])<line_sep>boxes[: 3]=(corners[: 2]-corners[: 0])<line_sep><return>{'boxes':boxes 'scores':scores 'classes':classes }<block_end># BEGIN-INTERNAL
# pylint: disable=invalid-name
# END-INTERNAL
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Coordination and locking utilities."""<import_stmt>inspect<import_stmt>decorator<import_from_stmt>oslo_concurrency lockutils<import_from_stmt>oslo_log log<import_from_stmt>oslo_utils timeutils<line_sep>LOG=log.getLogger(__name__)<def_stmt>synchronized lock_name<block_start>"""Synchronization decorator.
:param str lock_name: Lock name.
Decorating a method like so::
@synchronized('mylock')
def foo(self, *args):
...
ensures that only one process will execute the foo method at a time.
Different methods can share the same lock::
@synchronized('mylock')
def foo(self, *args):
...
@synchronized('mylock')
def bar(self, *args):
...
This way only one of either foo or bar can be executing at a time.
Lock name can be formatted using Python format string syntax::
@synchronized('{f_name}-{resource.id}-{snap[name]}')
def foo(self, resource, snap):
...
Available field names are: decorated function parameters and
`f_name` as a decorated function name.
"""<line_sep>@decorator.decorator<def_stmt>_synchronized f *a **k<block_start>sig=inspect.signature(f).bind(*a **k)<line_sep>sig.apply_defaults()<line_sep>call_args=sig.arguments<line_sep>call_args['f_name']=f.__name__<line_sep>lock_format_name=lock_name.format(**call_args)<line_sep>t1=timeutils.now()<line_sep>t2=<none><try_stmt><block_start><with_stmt>lockutils.lock(lock_format_name)<block_start>t2=timeutils.now()<line_sep>LOG.debug('Lock "%(name)s" acquired by "%(function)s" :: '<concat>'waited %(wait_secs)0.3fs' {'name':lock_format_name 'function':f.__name__ 'wait_secs':(t2-t1)})<line_sep><return>f(*a **k)<block_end><block_end><finally_stmt><block_start>t3=timeutils.now()<if_stmt>t2<is><none><block_start>held_secs="N/A"<block_end><else_stmt><block_start>held_secs="%0.3fs"%(t3-t2)<block_end>LOG.debug('Lock "%(name)s" released by "%(function)s" :: held '<concat>'%(held_secs)s' {'name':lock_format_name 'function':f.__name__ 'held_secs':held_secs})<block_end><block_end><return>_synchronized<block_end>
|
<import_from_stmt>os PathLike<import_from_stmt>typing Dict Iterator Union Optional<import_from_stmt>allennlp.data.instance Instance<import_from_stmt>allennlp.data.dataset_readers.dataset_reader DatasetReader WorkerInfo DatasetReaderInput <line_sep>@DatasetReader.register("multitask")<class_stmt>MultiTaskDatasetReader(DatasetReader)<block_start>"""
This `DatasetReader` simply collects a dictionary of other `DatasetReaders`. It is designed for
a different class (the `MultiTaskDataLoader`) to actually read from each of the underlying
dataset readers, and so this really is just a glorified dictionary that we can construct as a
`DatasetReader`. We throw an error if you try to actually call `read()`, because you should be
doing that differently.
Registered as a `DatasetReader` with name "multitask".
# Parameters
readers : `Dict[str, DatasetReader]`
A mapping from dataset name to `DatasetReader` objects for reading that dataset. You can
use whatever names you want for the datasets, but they have to match the keys you use for
data files and in other places in the `MultiTaskDataLoader` and `MultiTaskScheduler`.
"""<def_stmt>__init__ self readers:Dict[str DatasetReader]<arrow><none><block_start>self.readers={task:_MultitaskDatasetReaderShim(reader task)<for>task,reader readers.items()}<block_end><def_stmt>read # type: ignore
self file_paths:Union[PathLike str Dict[str Union[PathLike str]]] * force_task:Optional[str]=<none><arrow>Union[Iterator[Instance] Dict[str Iterator[Instance]]]<block_start><if_stmt>force_task<is><none><block_start><raise>RuntimeError("This class is not designed to be called like this.")<block_end><return>self.readers[force_task].read(file_paths)<block_end><block_end>@DatasetReader.register("multitask_shim")<class_stmt>_MultitaskDatasetReaderShim(DatasetReader)<block_start>"""This dataset reader wraps another dataset reader and adds the name of the "task" into
each instance as a metadata field. You should not have to use this yourself."""<def_stmt>__init__ self inner:DatasetReader head:str **kwargs<block_start>super().__init__(**kwargs)<line_sep>self.inner=inner<line_sep>self.head=head<block_end><def_stmt>_set_worker_info self info:Optional[WorkerInfo]<arrow><none><block_start>"""
Should only be used internally.
"""<line_sep>super()._set_worker_info(info)<line_sep>self.inner._set_worker_info(info)<block_end><def_stmt>read self file_path:DatasetReaderInput<arrow>Iterator[Instance]<block_start><import_from_stmt>allennlp.data.fields MetadataField<for_stmt>instance self.inner.read(file_path)<block_start>instance.add_field("task" MetadataField(self.head))<line_sep><yield>instance<block_end><block_end><def_stmt>text_to_instance self *inputs<arrow>Instance<block_start><import_from_stmt>allennlp.data.fields MetadataField<line_sep>instance=self.inner.text_to_instance(*inputs)<line_sep>instance.add_field("task" MetadataField(self.head))<line_sep><return>instance<block_end><def_stmt>apply_token_indexers self instance:Instance<arrow><none><block_start>self.inner.apply_token_indexers(instance)<block_end><block_end>
|
"""
Functions to smooth-out interpolations performed in Z-space. These are based on
<NAME>'s corresponding functions in manim. These are only used for
the purpose of visualization; they do not affect training.
"""<import_stmt>torch<def_stmt>smooth t<block_start>error=torch.sigmoid(torch.tensor(-5.))<line_sep><return>torch.clamp(torch.sigmoid(10<times>(t-0.5)-error)/(1-2<times>error) 0 1)<block_end><def_stmt>there_and_back t<block_start>new_t=torch.where(t<l>0.5 2<times>t 2<times>(1-t))<line_sep><return>smooth(new_t)<block_end><def_stmt>mid_right_mid_left_mid steps round=<false><block_start>t=torch.linspace(0.0 1.0 steps)<line_sep>ltr=there_and_back(t)<line_sep>left_to_mid_to_left=ltr/2<line_sep>mid_to_right_to_mid=left_to_mid_to_left+0.5<line_sep>mid_to_left=torch.flip(left_to_mid_to_left[:steps<floordiv>2] (0 ))<line_sep>mid_to_left_to_mid=torch.cat([mid_to_left torch.flip(mid_to_left (0 ))])<line_sep>out=torch.flip(torch.cat([mid_to_right_to_mid mid_to_left_to_mid]) (0 ))<if_stmt>round# [0, steps-1]
<block_start>out=out.mul(steps-1).round().long()<block_end><else_stmt># [-1, 1]
<block_start>out=out.add(-0.5).mul(2)<block_end><return>out<block_end><def_stmt>left_to_right steps round=<false><block_start>t=torch.linspace(0.0 1.0 steps)<line_sep>out=there_and_back(t)<if_stmt>round<block_start>out.mul_(steps-1).round().long()<block_end><else_stmt><block_start>out.add_(-0.5).mul_(2)<block_end><return>out<block_end>
|
<import_stmt>unittest<import_from_stmt>django http<import_from_stmt>django.contrib.messages.middleware MessageMiddleware<class_stmt>MiddlewareTests(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.middleware=MessageMiddleware()<block_end><def_stmt>test_response_without_messages self<block_start>"""
MessageMiddleware is tolerant of messages not existing on request.
"""<line_sep>request=http.HttpRequest()<line_sep>response=http.HttpResponse()<line_sep>self.middleware.process_response(request response)<block_end><block_end>
|
<import_from_future_stmt> absolute_import division print_function<import_stmt>sys<import_stmt>os<import_stmt>time<import_stmt>requests<import_stmt>json<import_stmt>logging<import_stmt>numpy<as>np<if_stmt>sys.version_info<l>(3 0)<block_start><import_stmt>subprocess32<as>subprocess<block_end><else_stmt><block_start><import_stmt>subprocess<block_end>cur_dir=os.path.dirname(os.path.abspath(__file__))<line_sep>sys.path.insert(0 os.path.abspath("%s/../../clipper_admin"%cur_dir))<line_sep>sys.path.insert(0 os.path.abspath("%s/.."%cur_dir))<import_from_stmt>test_utils create_docker_connection headers BenchmarkException<line_sep>logging.basicConfig(format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s' datefmt='%y-%m-%d:%H:%M:%S' level=logging.INFO)<line_sep>logger=logging.getLogger(__name__)<line_sep>APP_NAME="rtest-app"<line_sep>APP_DEFAULT_VALUE="NONE"<line_sep>APP_SLO=1000000<line_sep>INPUT_TYPE="doubles"<line_sep>MODEL_NAME="rtest-model"<line_sep>MODEL_VERSION=1<line_sep>MODEL_IMAGE_NAME="default-cluster-rtest-model:1"<def_stmt>create_application clipper_conn<block_start>clipper_conn.register_application(APP_NAME INPUT_TYPE APP_DEFAULT_VALUE APP_SLO)<line_sep>time.sleep(1)<block_end><def_stmt>deploy_and_link_model clipper_conn<block_start>subprocess.check_call(["Rscript" "build_test_model.R"])<line_sep>clipper_conn.deploy_model(MODEL_NAME MODEL_VERSION INPUT_TYPE MODEL_IMAGE_NAME)<line_sep>clipper_conn.link_model_to_app(app_name=APP_NAME model_name=MODEL_NAME)<block_end><def_stmt>send_requests clipper_conn<block_start>success=<false><line_sep>num_tries=0<while_stmt><not>success<and>num_tries<l>5<block_start>time.sleep(30)<line_sep>num_preds=25<line_sep>num_success=0<line_sep>addr=clipper_conn.get_query_addr()<line_sep>logger.info("ADDR: {}".format(addr))<for_stmt>i range(num_preds)<block_start>response=requests.post("http://%s/%s/predict"%(addr APP_NAME) headers=headers data=json.dumps({'input':list(np.random.random(30))}))<line_sep>result=response.json()<if_stmt>response.status_code<eq>requests.codes.ok<and><not>result["default"]<block_start>num_success<augadd>1<block_end><else_stmt><block_start>logger.warning(result)<block_end><block_end><if_stmt>num_success<l>num_preds<block_start>logger.error("Error: %d/%d predictions were default or unsuccessful"%(num_preds-num_success num_preds))<block_end><if_stmt>num_success<g>num_preds/2.0<block_start>success=<true><block_end>num_tries<augadd>1<block_end><if_stmt><not>success<block_start><raise>BenchmarkException("Error querying R model")<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start><try_stmt><block_start>clipper_conn=create_docker_connection(cleanup=<true> start_clipper=<true>)<line_sep>time.sleep(10)<try_stmt><block_start>logger.info("Running R model deployment test")<line_sep>create_application(clipper_conn)<line_sep>deploy_and_link_model(clipper_conn)<line_sep>time.sleep(5)<line_sep>send_requests(clipper_conn)<line_sep>logger.info("R model deployment completed SUCCESSFULLY!")<block_end><except_stmt>BenchmarkException<as>e<block_start>logger.exception("BenchmarkException in R model deployment test")<line_sep>create_docker_connection(cleanup=<true> start_clipper=<false>)<line_sep>sys.exit(1)<block_end><else_stmt><block_start>create_docker_connection(cleanup=<true> start_clipper=<false>)<block_end><block_end><except_stmt>Exception<as>e<block_start>logger.exception("Exception")<line_sep>create_docker_connection(cleanup=<true> start_clipper=<false>)<line_sep>sys.exit(1)<block_end><block_end>
|
<import_stmt>os<import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>tqdm tqdm<import_stmt>bf3s.algorithms.fewshot.fewshot<as>fewshot<import_stmt>bf3s.utils<as>utils<def_stmt>compute_top1_and_top5_accuracy scores labels<block_start>_,topk_labels=scores.topk(5 1 <true> <true>)<line_sep>label_ind=labels.cpu().numpy()<line_sep>topk_ind=topk_labels.cpu().numpy()<line_sep>top1_correct=topk_ind[: 0]<eq>label_ind<line_sep>top5_correct=np.sum(topk_ind<eq>label_ind.reshape((-1 1)) axis=1)<line_sep><return>top1_correct.astype(float) top5_correct.astype(float)<block_end><def_stmt>softmax_with_novel_prior scores novel_inds base_inds prior_m<block_start>scores=torch.exp(scores)<line_sep>scores_novel=scores[: novel_inds]<line_sep>scores_base=scores[: base_inds]<line_sep>tol=0.0000001<line_sep>scores_novel<augmul>prior_m/(tol+torch.sum(scores_novel dim=1 keepdim=<true>).expand_as(scores_novel))<line_sep>scores_base<augmul>(1.0-prior_m)/(tol+torch.sum(scores_base dim=1 keepdim=<true>).expand_as(scores_base))<line_sep>scores[: novel_inds]=scores_novel<line_sep>scores[: base_inds]=scores_base<line_sep><return>scores<block_end><class_stmt>ImageNetLowShot(fewshot.FewShot)<block_start>"""Routines for evaluating a few-shot model on the ImageNet-FS benchmark."""<def_stmt>__init__ self opt _run=<none> _log=<none><block_start>super().__init__(opt _run _log)<line_sep>self.keep_best_model_metric_name="top5_novel"<block_end><def_stmt>preprocess_novel_training_data self training_data<block_start>"""Preprocess the novel training data."""<line_sep>images_train,labels_train,Kids,num_base,num_novel=training_data<line_sep>self.num_base=num_base<line_sep>self.num_novel=num_novel<line_sep># Insert an extra singleton dimension.
images_train=images_train.unsqueeze(dim=0)<line_sep>labels_train=labels_train.unsqueeze(dim=0)<line_sep>Kids=Kids.unsqueeze(dim=0)<line_sep>self.tensors["images_train"].resize_(images_train.size()).copy_(images_train)<line_sep>self.tensors["labels_train"].resize_(labels_train.size()).copy_(labels_train)<line_sep>self.tensors["Kids"].resize_(Kids.size()).copy_(Kids)<line_sep>labels_train=self.tensors["labels_train"]<line_sep>labels_train_1hot_size=list(labels_train.size())+[num_novel ]<line_sep>dim=len(labels_train_1hot_size)-1<line_sep>labels_train=labels_train.unsqueeze(dim=labels_train.dim())<line_sep>self.tensors["labels_train_1hot"].resize_(labels_train_1hot_size).fill_(0).scatter_(dim labels_train-num_base 1)<block_end><def_stmt>add_novel_categories self nove_cat_training_data<block_start>"""Add the training data of the novel categories to the model."""<line_sep>feature_extractor=self.networks["feature_extractor"]<line_sep>classifier=self.networks["classifier"]<line_sep>feature_extractor.eval()<line_sep>classifier.eval()<line_sep>self.preprocess_novel_training_data(nove_cat_training_data)<line_sep>images=self.tensors["images_train"].detach()<line_sep>labels_train_1hot=self.tensors["labels_train_1hot"].detach()<line_sep>Kids=self.tensors["Kids"].detach()<line_sep>base_ids=<none><if>(self.num_base<eq>0)<else>Kids[: :self.num_base].contiguous()<with_stmt>torch.no_grad()# *******************************************************************
# ****************** EXTRACT FEATS FROM EXEMPLARS *******************
<block_start>meta_batch_size=images.size(0)<line_sep>images=utils.convert_from_5d_to_4d(images)<line_sep>features_train=feature_extractor(images)<line_sep>features_train=utils.add_dimension(features_train meta_batch_size)<line_sep># *******************************************************************
# ****************** GET CLASSIFICATION WEIGHTS *********************
# The following routine returns the classification weight vectors of
# both the base and then novel categories. For the novel categories,
# the classification weight vectors are generated using the training
# features for those novel cateogories.
clsWeights=classifier.get_classification_weights(base_ids=base_ids features_train=features_train labels_train=labels_train_1hot )<line_sep># *******************************************************************
<block_end>self.tensors["clsWeights"]=clsWeights.clone().detach()<block_end><def_stmt>evaluate_model_on_test_images self data_loader base_classes novel_classes exp_id="" prior_m=0.8<block_start>"""Evaluate the model.
It is assumed that the user has already called the routine
add_novel_categories() before calling this function.
Args:
data_loader: data loader that feeds test images and lables in order
to evaluatethe model.
base_classes: A list with the labels of the base categories that
will be used for evaluation.
novel_classes: A list with the labels of the novel categories that
will be used for evaluation.
exp_id: A string with the id of the experiment.
prior_m: A scalar in the range [0, 1.0] that represents the prior
for whether a test image comes from the novel or base classes.
"""<line_sep>feature_extractor=self.networks["feature_extractor"]<line_sep>classifier=self.networks["classifier"]<line_sep>feature_extractor.eval()<line_sep>classifier.eval()<line_sep>clsWeights=self.tensors["clsWeights"]<line_sep>both_classes=base_classes+novel_classes<line_sep># Not valid classes are those that do not belong neighter to the base
# nor the nor the novel classes.
nKall=self.num_base+self.num_novel<line_sep>not_valid_classes=list(set(range(nKall)).difference(set(both_classes)))<line_sep>device=self.device<line_sep>not_valid_classes_torch=torch.LongTensor(not_valid_classes).to(device)<line_sep>base_classes_torch=torch.LongTensor(base_classes).to(device)<line_sep>novel_classes_torch=torch.LongTensor(novel_classes).to(device)<line_sep>top1,top1_novel,top1_base,top1_prior=<none> <none> <none> <none><line_sep>top5,top5_novel,top5_base,top5_prior=<none> <none> <none> <none><line_sep>all_labels=<none><for_stmt>idx,batch enumerate(tqdm(data_loader(0)))<block_start>images_test,labels_test=batch<line_sep>self.tensors["images_test"].resize_(images_test.size()).copy_(images_test)<line_sep>self.tensors["labels_test"].resize_(labels_test.size()).copy_(labels_test)<line_sep>images_test=self.tensors["images_test"].detach()<line_sep>labels_test=self.tensors["labels_test"].detach()<line_sep>num_test_examples=images_test.size(0)<with_stmt>torch.no_grad()<block_start>features=feature_extractor(images_test)<line_sep>features=features.view(1 num_test_examples -1)<line_sep>scores=classifier.apply_classification_weights(features clsWeights)<line_sep>scores=scores.view(num_test_examples -1)<line_sep>scores_prior=softmax_with_novel_prior(scores.clone() novel_classes_torch base_classes_torch prior_m)<line_sep>scores[: not_valid_classes_torch]=-1000<line_sep>top1_this,top5_this=compute_top1_and_top5_accuracy(scores labels_test)<line_sep>top1=top1_this<if>top1<is><none><else>np.concatenate((top1 top1_this))<line_sep>top5=top5_this<if>top5<is><none><else>np.concatenate((top5 top5_this))<line_sep>scores_prior[: not_valid_classes_torch]=-1000<line_sep>top1_this,top5_this=compute_top1_and_top5_accuracy(scores_prior labels_test)<line_sep>top1_prior=(top1_this<if>top1_prior<is><none><else>np.concatenate((top1_prior top1_this)))<line_sep>top5_prior=(top5_this<if>top5_prior<is><none><else>np.concatenate((top5_prior top5_this)))<line_sep>scores_novel=scores.clone()<line_sep>scores_novel[: base_classes_torch]=-1000<line_sep>top1_this,top5_this=compute_top1_and_top5_accuracy(scores_novel labels_test)<line_sep>top1_novel=(top1_this<if>(top1_novel<is><none>)<else>np.concatenate((top1_novel top1_this)))<line_sep>top5_novel=(top5_this<if>(top5_novel<is><none>)<else>np.concatenate((top5_novel top5_this)))<line_sep>scores_base=scores.clone()<line_sep>scores_base[: novel_classes_torch]=-1000<line_sep>top1_this,top5_this=compute_top1_and_top5_accuracy(scores_base labels_test)<line_sep>top1_base=(top1_this<if>(top1_base<is><none>)<else>np.concatenate((top1_base top1_this)))<line_sep>top5_base=(top5_this<if>(top5_base<is><none>)<else>np.concatenate((top5_base top5_this)))<line_sep>labels_test_np=labels_test.cpu().numpy()<line_sep>all_labels=(labels_test_np<if>(all_labels<is><none>)<else>np.concatenate((all_labels labels_test_np)))<block_end><block_end>is_novel=np.in1d(all_labels np.array(novel_classes))<line_sep>is_base=np.in1d(all_labels np.array(base_classes))<line_sep>is_either=is_novel|is_base<line_sep>top1_novel=100<times>np.mean(top1_novel[is_novel])<line_sep>top1_novel_all=100<times>np.mean(top1[is_novel])<line_sep>top1_base=100<times>np.mean(top1_base[is_base])<line_sep>top1_base_all=100<times>np.mean(top1[is_base])<line_sep>top1_all=100<times>np.mean(top1[is_either])<line_sep>top1_all_prior=100<times>np.mean(top1_prior[is_either])<line_sep>top5_novel=100<times>np.mean(top5_novel[is_novel])<line_sep>top5_novel_all=100<times>np.mean(top5[is_novel])<line_sep>top5_base=100<times>np.mean(top5_base[is_base])<line_sep>top5_base_all=100<times>np.mean(top5[is_base])<line_sep>top5_all=100<times>np.mean(top5[is_either])<line_sep>top5_all_prior=100<times>np.mean(top5_prior[is_either])<line_sep>self.logger.info(f"Experiment {exp_id}")<line_sep>self.logger.info("==> Top 5 Accuracies: [Novel: {:3.2f} | Base: {:3.2f} | All {:3.2f} | Novel vs All {:3.2f} | Base vs All {:3.2f} | All prior {:3.2f}]".format(top5_novel top5_base top5_all top5_novel_all top5_base_all top5_all_prior ))<line_sep>self.logger.info("==> Top 1 Accuracies: [Novel: {:3.2f} | Base: {:3.2f} | All {:3.2f} | Novel vs All {:3.2f} | Base vs All {:3.2f} | All prior {:3.2f}]".format(top1_novel top1_base top1_all top1_novel_all top1_base_all top1_all_prior ))<line_sep>results_array=np.array([top5_novel top5_base top5_all top5_novel_all top5_base_all top5_all_prior top1_novel top1_base top1_all top1_novel_all top1_base_all top1_all_prior ]).reshape(1 -1)<line_sep><return>results_array<block_end><def_stmt>lowshot_avg_results self results_all exp_id=""<block_start>results_all=np.concatenate(results_all axis=0)<line_sep>num_eval_experiments=results_all.shape[0]<line_sep>mu_results=results_all.mean(axis=0)<line_sep>top5_novel=mu_results[0]<line_sep>top5_base=mu_results[1]<line_sep>top5_all=mu_results[2]<line_sep>top5_novel_all=mu_results[3]<line_sep>top5_base_all=mu_results[4]<line_sep>top5_all_prior=mu_results[5]<line_sep>top1_novel=mu_results[6]<line_sep>top1_base=mu_results[7]<line_sep>top1_all=mu_results[8]<line_sep>top1_novel_all=mu_results[9]<line_sep>top1_base_all=mu_results[10]<line_sep>top1_all_prior=mu_results[11]<line_sep>std_results=results_all.std(axis=0)<line_sep>ci95_results=1.96<times>std_results/np.sqrt(results_all.shape[0])<line_sep>top5_novel_ci95=ci95_results[0]<line_sep>top5_base_ci95=ci95_results[1]<line_sep>top5_all_ci95=ci95_results[2]<line_sep>top5_novel_all_ci95=ci95_results[3]<line_sep>top5_base_all_ci95=ci95_results[4]<line_sep>top5_all_prior_ci95=ci95_results[5]<line_sep>top1_novel_ci95=ci95_results[6]<line_sep>top1_base_ci95=ci95_results[7]<line_sep>top1_all_ci95=ci95_results[8]<line_sep>top1_novel_all_ci95=ci95_results[9]<line_sep>top1_base_all_ci95=ci95_results[10]<line_sep>top1_all_prior_ci95=ci95_results[11]<line_sep>self.logger.info("----------------------------------------------------------------")<line_sep>self.logger.info(f"Average results of {num_eval_experiments} experiments: {exp_id}")<line_sep>self.logger.info("==> Top 5 Accuracies: [Novel: {:3.2f} | Base: {:3.2f} | All {:3.2f} | Novel vs All {:3.2f} | Base vs All {:3.2f} | All prior {:3.2f}]".format(top5_novel top5_base top5_all top5_novel_all top5_base_all top5_all_prior ))<line_sep>self.logger.info("==> Top 5 conf. intervals: [Novel: {:3.2f} | Base: {:3.2f} | All {:3.2f} | Novel vs All {:3.2f} | Base vs All {:3.2f} | All prior {:3.2f}]".format(top5_novel_ci95 top5_base_ci95 top5_all_ci95 top5_novel_all_ci95 top5_base_all_ci95 top5_all_prior_ci95 ))<line_sep>self.logger.info("----------------------------------------------------------------")<line_sep>self.logger.info("==> Top 1 Accuracies: [Novel: {:3.2f} | Base: {:3.2f} | All {:3.2f} | Novel vs All {:3.2f} | Base vs All {:3.2f} | All prior {:3.2f}]".format(top1_novel top1_base top1_all top1_novel_all top1_base_all top1_all_prior ))<line_sep>self.logger.info("==> Top 1 conf. intervals: [Novel: {:3.2f} | Base: {:3.2f} | All {:3.2f} | Novel vs All {:3.2f} | Base vs All {:3.2f} | All prior {:3.2f}]".format(top1_novel_ci95 top1_base_ci95 top1_all_ci95 top1_novel_all_ci95 top1_base_all_ci95 top1_all_prior_ci95 ))<line_sep>self.logger.info("----------------------------------------------------------------")<line_sep>results={"top5_novel":round(top5_novel 2) "top5_base":round(top5_base 2) "top5_all":round(top5_all 2) "top5_novel_all":round(top5_novel_all 2) "top5_base_all":round(top5_base_all 2) "top5_all_prior":round(top5_all_prior 2) "top5_novel_ci95":round(top5_novel_ci95 2) "top5_base_ci95":round(top5_base_ci95 2) "top5_all_ci95":round(top5_all_ci95 2) "top5_novel_all_ci95":round(top5_novel_all_ci95 2) "top5_base_all_ci95":round(top5_base_all_ci95 2) "top5_all_prior_ci95":round(top5_all_prior_ci95 2) }<line_sep><return>results<block_end><def_stmt>evaluate self dloader num_eval_exp=20 prior=0.8 suffix=""<block_start>self.logger.info("Evaluating: %s"%os.path.basename(self.exp_dir))<line_sep>self.logger.info("Num exemplars: %d"%dloader.n_exemplars)<line_sep>self.logger.info("Num evaluation experiments: %d"%num_eval_exp)<line_sep>self.logger.info("Prior: %f"%prior)<line_sep>results=[]<line_sep># Run args_opt.num_exp different number of evaluation experiments (each
# time sampling a different set of training images for the the novel
# categories).
<for_stmt>exp_id range(num_eval_exp)# Sample training data for the novel categories from the training
# set of ImageNet.
<block_start>nove_cat_data=dloader.sample_training_data_for_novel_categories(exp_id=exp_id)<line_sep># Feed the training data of the novel categories to the algorithm.
self.add_novel_categories(nove_cat_data)<line_sep># Evaluate on the validation images of ImageNet.
results_this=self.evaluate_model_on_test_images(data_loader=dloader base_classes=dloader.base_category_label_indices() novel_classes=dloader.novel_category_label_indices() exp_id="Exp_id = "+str(exp_id) prior_m=prior )<line_sep>results.append(results_this)<block_end># Print the average results.
self.logger.info("Evaluating: %s"%os.path.basename(self.exp_dir))<line_sep>avg_results=self.lowshot_avg_results(results exp_id="")<line_sep>eval_stats=utils.DAverageMeter("eval" self._run)<line_sep>eval_stats.update(avg_results)<line_sep>eval_stats.log()<line_sep>self.add_stats_to_tensorboard_writer(eval_stats.average() "test_")<line_sep><return>eval_stats<block_end><block_end>
|
# -*- coding: utf-8 -*-
<import_stmt>sys os importlib pdb random datetime collections pickle cv2 requests<import_stmt>matplotlib.pyplot<as>plt numpy<as>np scipy.spatial.distance<import_from_stmt>sklearn svm metrics calibration<import_from_stmt>PIL Image ExifTags<line_sep>random.seed(0)<line_sep>################################
# ImageInfo class and helpers
################################
<class_stmt>ImageInfo(object)<block_start>allFeatures=[]<def_stmt>__init__ self fname subdir parent=<none><block_start>self.fname=fname<line_sep>self.subdir=subdir<line_sep>self.children=[]<line_sep>self.parent=parent<if_stmt>parent<block_start>self.parent=self.shallowCopy(parent)<block_end><block_end><def_stmt>getFeat self<block_start><if_stmt>self.allFeatures<eq>[]<block_start><raise>Exception("Need to set/load DNN features first using e.g. this line 'ImageInfo.allFeatures = loadFromPickle(featuresPath)'")<block_end>key=self.subdir+"/"+self.fname<line_sep>feat=np.array(self.allFeatures[key] np.float32)<assert_stmt>(len(feat)<eq>4096<or>len(feat)<eq>2048<or>len(feat)<eq>512<or>len(feat)<eq>25088)<line_sep><return>feat<block_end><def_stmt>getImg self rootDir<block_start>imgPath=self.getImgPath(rootDir)<line_sep><return>imread(imgPath)<block_end><def_stmt>getImgPath self rootDir<block_start><return>rootDir+self.subdir+"/"+self.fname<block_end><def_stmt>addChild self node<block_start>node.parent=self<line_sep>self.children.append(node)<block_end><def_stmt>isSameClassAsParent self<block_start><return>self.subdir<eq>self.parent.subdir<block_end><def_stmt>shallowCopy self node<block_start><return>ImageInfo(node.fname node.subdir node.parent)<block_end><def_stmt>display self<block_start>print("Parent: "+self.node2Str(self))<for_stmt>childIndex,child enumerate(self.children)<block_start>print(" Child {:4} : {}".format(childIndex self.node2Str(child)))<block_end><block_end><def_stmt>node2Str self node<block_start><return>("fname = {}, subdir={}".format(node.fname node.subdir))<block_end><block_end>#, node.parent)
<def_stmt>getImgPaths imgInfos rootDir=""<block_start>paths=set()<for_stmt>imgInfo imgInfos<block_start>paths.add(rootDir+"/"+imgInfo.subdir+"/"+imgInfo.fname)<for_stmt>child imgInfo.children<block_start>paths.add(rootDir+"/"+child.subdir+"/"+child.fname)<block_end><block_end><return>paths<block_end><def_stmt>getRandomImgInfo imgFilenames subdirToExclude=<none><block_start>subdirs=list(imgFilenames.keys())<line_sep>subdir=getRandomListElement(subdirs)<while_stmt>subdir<eq>subdirToExclude<block_start>subdir=getRandomListElement(subdirs)<block_end>imgFilename=getRandomListElement(imgFilenames[subdir])<line_sep><return>ImageInfo(imgFilename subdir)<block_end>################################
# helper functions - svm
################################
<def_stmt>getImgPairsFeatures imgInfos metric boL2Normalize<block_start>feats=[]<line_sep>labels=[]<for_stmt>queryImgIndex,queryImgInfo enumerate(imgInfos)<block_start>queryFeat=queryImgInfo.getFeat()<if_stmt>boL2Normalize<block_start>queryFeat<augdiv>np.linalg.norm(queryFeat 2)<block_end><for_stmt>refImgInfo queryImgInfo.children<block_start>refFeat=refImgInfo.getFeat()<if_stmt>boL2Normalize<block_start>refFeat<augdiv>np.linalg.norm(refFeat 2)<block_end># Evaluate difference between the two images
featDiff=queryFeat-refFeat<if_stmt>metric.lower()<eq>'diff'<block_start>feat=featDiff<block_end><elif_stmt>metric.lower()<eq>'l1'<block_start>feat=abs(featDiff)<block_end><elif_stmt>metric.lower()<eq>'l2'<block_start>feat=featDiff<power>2<block_end><else_stmt><block_start><raise>Exception("Unknown metric: "+metric)<block_end>feats.append(np.float32(feat))<line_sep>labels.append(int(refImgInfo.isSameClassAsParent()))<block_end><block_end><return>feats labels<block_end><def_stmt>mineHardNegatives learner imgFilenames nrAddPerIter featureDifferenceMetric boL2Normalize maxNrRounds initialThreshold=1<block_start>hardNegatives=[]<line_sep>roundCounterHardNegFound=0<line_sep>hardNegThreshold=initialThreshold<line_sep># Hard negative mining by repeatedly selecting a pair of images and adding to the
# training set if they are misclassified by at least a certain threshold.
<for_stmt>roundCounter range(maxNrRounds)<block_start>roundCounterHardNegFound<augadd>1<if_stmt>len(hardNegatives)<ge>nrAddPerIter<block_start><break><block_end># Reduce threshold if no hard negative found after 1000 rounds
<if_stmt>roundCounterHardNegFound<g>1000<block_start>hardNegThreshold<augdiv>2.0<line_sep>roundCounterHardNegFound=0<line_sep>print(" Hard negative mining sampling round {:6d}: found {:4d} number of hard negatives; reducing hard negative threshold to {:3.3f}.".format(roundCounter len(hardNegatives) hardNegThreshold))<block_end># Sample two images from different ground truth class
ImageInfo1=getRandomImgInfo(imgFilenames)<line_sep>ImageInfo2=getRandomImgInfo(imgFilenames ImageInfo1.subdir)<line_sep>ImageInfo1.addChild(ImageInfo2)<line_sep># Evaluate svm
featCandidate,labelCandidate=getImgPairsFeatures([ImageInfo1] featureDifferenceMetric boL2Normalize)<assert_stmt>(len(labelCandidate)<eq>1<and>labelCandidate[0]<eq>0<and>ImageInfo1.subdir<ne>ImageInfo2.subdir)<line_sep>score=learner.decision_function(featCandidate)<line_sep># If confidence is sufficiently high then add to list of hard negatives
<if_stmt>score<g>hardNegThreshold<block_start>hardNegatives.append(featCandidate[0])<line_sep>roundCounterHardNegFound=0<block_end><block_end>print(" Hard negatives found: {}, after {} sampling rounds".format(len(hardNegatives) roundCounter+1))<line_sep><return>hardNegatives<block_end><def_stmt>getSampleWeights labels negPosRatio=1<block_start>indsNegatives=np.where(np.array(labels)<eq>0)[0]<line_sep>indsPositives=np.where(np.array(labels)<ne>0)[0]<line_sep>negWeight=float(negPosRatio)<times>len(indsPositives)/len(indsNegatives)<line_sep>weights=np.array([1.0]<times>len(labels))<line_sep>weights[indsNegatives]=negWeight<assert_stmt>(abs(sum(weights[indsNegatives])-negPosRatio<times>sum(weights[indsPositives]))<l>10<power>-3)<line_sep><return>weights<block_end><def_stmt>plotScoreVsProbability learner feats_train feats_test<block_start>probsTest=learner.predict_proba(feats_test)[: 1]<line_sep>probsTrain=learner.predict_proba(feats_train)[: 1]<line_sep>scoresTest=learner.base_estimator.decision_function(feats_test)<line_sep>scoresTrain=learner.base_estimator.decision_function(feats_train)<line_sep>plt.scatter(scoresTrain probsTrain c='r' label='train')<line_sep>plt.scatter(scoresTest probsTest c='b' label='test')<line_sep>plt.ylim([-0.02 1.02])<line_sep>plt.xlabel('SVM score')<line_sep>plt.ylabel('Probability')<line_sep>plt.title('Calibrated SVM - training set (red), test set (blue)')<line_sep><return>plt<block_end>################################
# helper functions - general
################################
<def_stmt>getImagePairs imgFilenames maxQueryImgsPerSubdir maxNegImgsPerQueryImg# Get sub-directories with at least two images in them
<block_start>querySubdirs=[s<for>s imgFilenames.keys()<if>len(imgFilenames[s])<g>1]<line_sep># Generate pos and neg pairs for each subdir
imgInfos=[]<for_stmt>querySubdir querySubdirs<block_start>queryFilenames=randomizeList(imgFilenames[querySubdir])<line_sep># Pick at most 'maxQueryImgsPerSubdir' query images at random
<for_stmt>queryFilename queryFilenames[:maxQueryImgsPerSubdir]<block_start>queryInfo=ImageInfo(queryFilename querySubdir)<line_sep># Add one positive example at random
refFilename=getRandomListElement(list(set(queryFilenames)-set([queryFilename])))<line_sep>queryInfo.children.append(ImageInfo(refFilename querySubdir queryInfo))<assert_stmt>(refFilename<ne>queryFilename)<line_sep># Add multiple negative examples at random
<for_stmt>_ range(maxNegImgsPerQueryImg)<block_start>refSubdir=getRandomListElement(list(set(querySubdirs)-set([querySubdir])))<line_sep>refFilename=getRandomListElement(imgFilenames[refSubdir])<line_sep>queryInfo.children.append(ImageInfo(refFilename refSubdir queryInfo))<assert_stmt>(refSubdir<ne>querySubdir)<block_end># Store
queryInfo.children=randomizeList(queryInfo.children)<line_sep>imgInfos.append(queryInfo)<block_end><block_end>print("Generated image pairs for {} query images, each with 1 positive image pair and {} negative image pairs.".format(len(imgInfos) maxNegImgsPerQueryImg))<line_sep><return>imgInfos<block_end><def_stmt>getImgLabelMap imgFilenames imgDir lut=<none><block_start>table=[]<for_stmt>label imgFilenames.keys()<block_start><for_stmt>imgFilename imgFilenames[label]<block_start>imgPath=imgDir+"/"+str(label)+"/"+imgFilename<if_stmt>lut<ne><none><block_start>table.append((imgPath lut[label]))<block_end><else_stmt><block_start>table.append((imgPath label))<block_end><block_end><block_end><return>table<block_end><def_stmt>balanceDatasetUsingDuplicates data<block_start>duplicates=[]<line_sep>counts=collections.Counter(getColumn(data 1))<line_sep>print("Before balancing of training set:")<for_stmt>item counts.items()<block_start>print(" Class {:3}: {:5} exmples".format(*item))<block_end># Get duplicates to balance dataset
targetCount=max(getColumn(counts.items() 1))<while_stmt>min(getColumn(counts.items() 1))<l>targetCount<block_start><for_stmt>imgPath,label data<block_start><if_stmt>counts[label]<l>targetCount<block_start>duplicates.append((imgPath label))<line_sep>counts[label]<augadd>1<block_end><block_end><block_end># Add duplicates to original dataset
print("After balancing: all classes now have {} images; added {} duplicates to the {} original images.".format(targetCount len(duplicates) len(data)))<line_sep>data<augadd>duplicates<line_sep>counts=collections.Counter(getColumn(data 1))<assert_stmt>(min(counts.values())<eq>max(counts.values())<eq>targetCount)<line_sep><return>data<block_end><def_stmt>printFeatLabelInfo title feats labels preString=" "<block_start>print(title)<line_sep>print(preString+"Number of examples: {}".format(len(feats)))<line_sep>print(preString+"Number of positive examples: {}".format(sum(np.array(labels)<eq>1)))<line_sep>print(preString+"Number of negative examples: {}".format(sum(np.array(labels)<eq>0)))<line_sep>print(preString+"Dimension of each example: {}".format(len(feats[0])))<block_end><def_stmt>sklearnAccuracy learner feats gtLabels<block_start>estimatedLabels=learner.predict(feats)<line_sep>confusionMatrix=metrics.confusion_matrix(gtLabels estimatedLabels)<line_sep><return>accsConfusionMatrix(confusionMatrix)<block_end>####################################
# Subset of helper library
# used in image similarity tutorial
####################################
# Typical meaning of variable names -- Computer Vision:
# pt = 2D point (column,row)
# img = image
# width,height (or w/h) = image dimensions
# bbox = bbox object (stores: left, top,right,bottom co-ordinates)
# rect = rectangle (order: left, top, right, bottom)
# angle = rotation angle in degree
# scale = image up/downscaling factor
# Typical meaning of variable names -- general:
# lines,strings = list of strings
# line,string = single string
# xmlString = string with xml tags
# table = 2D row/column matrix implemented using a list of lists
# row,list1D = single row in a table, i.e. single 1D-list
# rowItem = single item in a row
# list1D = list of items, not necessarily strings
# item = single item of a list1D
# slotValue = e.g. "terminator" in: play <movie> terminator </movie>
# slotTag = e.g. "<movie>" or "</movie>" in: play <movie> terminator </movie>
# slotName = e.g. "movie" in: play <movie> terminator </movie>
# slot = e.g. "<movie> terminator </movie>" in: play <movie> terminator </movie>
<def_stmt>readFile inputFile# Reading as binary, to avoid problems with end-of-text characters.
# Note that readlines() does not remove the line ending characters
<block_start><with_stmt>open(inputFile 'rb')<as>f<block_start>lines=f.readlines()<block_end><for_stmt>i,s enumerate(lines)<block_start>removeLineEndCharacters(s.decode('utf8'))<block_end><return>[removeLineEndCharacters(s.decode('utf8'))<for>s lines]<line_sep><block_end><def_stmt>writeFile outputFile lines header=<none><block_start><with_stmt>open(outputFile 'w')<as>f<block_start><if_stmt>header<ne><none><block_start>f.write("%s\n"%header)<block_end><for_stmt>line lines<block_start>f.write("%s\n"%line)<block_end><block_end><block_end><def_stmt>writeBinaryFile outputFile data<block_start><with_stmt>open(outputFile 'wb')<as>f<block_start>bytes=f.write(data)<block_end><return>bytes<block_end><def_stmt>readTable inputFile delimiter='\t'<block_start>lines=readFile(inputFile)<line_sep><return>splitStrings(lines delimiter)<block_end><def_stmt>writeTable outputFile table header=<none><block_start>lines=tableToList1D(table)<line_sep>writeFile(outputFile lines header)<block_end><def_stmt>loadFromPickle inputFile<block_start><with_stmt>open(inputFile 'rb')<as>filePointer<block_start>data=pickle.load(filePointer)<block_end><return>data<block_end><def_stmt>saveToPickle outputFile data<block_start>p=pickle.Pickler(open(outputFile "wb"))<line_sep>p.fast=<true><line_sep>p.dump(data)<block_end><def_stmt>makeDirectory directory<block_start><if_stmt><not>os.path.exists(directory)<block_start>os.makedirs(directory)<block_end><block_end><def_stmt>getFilesInDirectory directory postfix=""<block_start><if_stmt><not>os.path.exists(directory)<block_start><return>[]<block_end>fileNames=[s<for>s os.listdir(directory)<if><not>os.path.isdir(directory+"/"+s)]<if_stmt><not>postfix<or>postfix<eq>""<block_start><return>fileNames<block_end><else_stmt><block_start><return>[s<for>s fileNames<if>s.lower().endswith(postfix)]<block_end><block_end><def_stmt>getDirectoriesInDirectory directory<block_start><return>[s<for>s os.listdir(directory)<if>os.path.isdir(directory+"/"+s)]<block_end><def_stmt>downloadFromUrl url boVerbose=<true><block_start>data=[]<line_sep>url=url.strip()<try_stmt><block_start>r=requests.get(url timeout=1)<line_sep>data=r.content<block_end><except_stmt><block_start><if_stmt>boVerbose<block_start>print('Error downloading url {0}'.format(url))<block_end><block_end><if_stmt>boVerbose<and>data<eq>[]# and r.status_code != 200:
<block_start>print('Error {} downloading url {}'.format(r.status_code url))<block_end><return>data<block_end><def_stmt>removeLineEndCharacters line<block_start><if_stmt>line.endswith('\r\n')<block_start><return>line[:-2]<block_end><elif_stmt>line.endswith('\n')<block_start><return>line[:-1]<block_end><else_stmt><block_start><return>line<block_end><block_end><def_stmt>splitString string delimiter='\t' columnsToKeepIndices=<none><block_start><if_stmt>string<eq><none><block_start><return><none><block_end>items=string.split(delimiter)<if_stmt>columnsToKeepIndices<ne><none><block_start>items=getColumns([items] columnsToKeepIndices)<line_sep>items=items[0]<block_end><return>items<block_end><def_stmt>splitStrings strings delimiter columnsToKeepIndices=<none><block_start>table=[splitString(string delimiter columnsToKeepIndices)<for>string strings]<line_sep><return>table<block_end><def_stmt>getColumn table columnIndex<block_start>column=[]<for_stmt>row table<block_start>column.append(row[columnIndex])<block_end><return>column<block_end><def_stmt>tableToList1D table delimiter='\t'<block_start><return>[delimiter.join([str(s)<for>s row])<for>row table]<block_end><def_stmt>ToIntegers list1D<block_start><return>[int(float(x))<for>x list1D]<block_end><def_stmt>mergeDictionaries dict1 dict2<block_start>tmp=dict1.copy()<line_sep>tmp.update(dict2)<line_sep><return>tmp<block_end><def_stmt>getRandomNumber low high<block_start>randomNumber=random.randint(low high)<line_sep><return>randomNumber<block_end><def_stmt>randomizeList listND containsHeader=<false><block_start><if_stmt>containsHeader<block_start>header=listND[0]<line_sep>listND=listND[1:]<block_end>random.shuffle(listND)<if_stmt>containsHeader<block_start>listND.insert(0 header)<block_end><return>listND<block_end><def_stmt>getRandomListElement listND containsHeader=<false><block_start><if_stmt>containsHeader<block_start>index=getRandomNumber(1 len(listND)-1)<block_end><else_stmt><block_start>index=getRandomNumber(0 len(listND)-1)<block_end><return>listND[index]<block_end><def_stmt>accsConfusionMatrix confMatrix<block_start>perClassAccs=[(1.0<times>row[rowIndex]/sum(row))<for>rowIndex,row enumerate(confMatrix)]<line_sep><return>perClassAccs<block_end><def_stmt>computeVectorDistance vec1 vec2 method boL2Normalize weights=[] bias=[] learner=[]# Pre-processing
<block_start><if_stmt>boL2Normalize<block_start>vec1=vec1/np.linalg.norm(vec1 2)<line_sep>vec2=vec2/np.linalg.norm(vec2 2)<block_end><assert_stmt>(len(vec1)<eq>len(vec2))<line_sep># Distance computation
vecDiff=vec1-vec2<line_sep>method=method.lower()<if_stmt>method<eq>'random'<block_start>dist=random.random()<block_end><elif_stmt>method<eq>'l1'<block_start>dist=sum(abs(vecDiff))<block_end><elif_stmt>method<eq>'l2'<block_start>dist=np.linalg.norm(vecDiff 2)<block_end><elif_stmt>method<eq>'normalizedl2'<block_start>a=vec1/np.linalg.norm(vec1 2)<line_sep>b=vec2/np.linalg.norm(vec2 2)<line_sep>dist=np.linalg.norm(a-b 2)<block_end><elif_stmt>method<eq>"cosine"<block_start>dist=scipy.spatial.distance.cosine(vec1 vec2)<block_end><elif_stmt>method<eq>"correlation"<block_start>dist=scipy.spatial.distance.correlation(vec1 vec2)<block_end><elif_stmt>method<eq>"chisquared"<block_start>dist=chiSquared(vec1 vec2)<block_end><elif_stmt>method<eq>"normalizedchisquared"<block_start>a=vec1/sum(vec1)<line_sep>b=vec2/sum(vec2)<line_sep>dist=chiSquared(a b)<block_end><elif_stmt>method<eq>"hamming"<block_start>dist=scipy.spatial.distance.hamming(vec1<g>0 vec2<g>0)<block_end><elif_stmt>method<eq>"mahalanobis"#assumes covariance matric is provided, e..g. using: sampleCovMat = np.cov(np.transpose(np.array(feats)))
<block_start>dist=scipy.spatial.distance.mahalanobis(vec1 vec2 sampleCovMat)<block_end><elif_stmt>method<eq>'weightedl1'<block_start>feat=np.float32(abs(vecDiff))<line_sep>dist=np.dot(weights feat)+bias<line_sep>dist=-float(dist)<line_sep># assert(abs(dist - learnerL1.decision_function([feat])) < 0.000001)
<block_end><elif_stmt>method<eq>'weightedl2'<block_start>feat=(vecDiff)<power>2<line_sep>dist=np.dot(weights feat)+bias<line_sep>dist=-float(dist)<block_end><elif_stmt>method<eq>'weightedl2prob'<block_start>feat=(vecDiff)<power>2<line_sep>dist=learner.predict_proba([feat])[0][1]<line_sep>dist=float(dist)<block_end># elif method == 'learnerscore':
# feat = (vecDiff) ** 2
# dist = learner.base_estimator.decision_function([feat])[0]
# dist = -float(dist)
<else_stmt><block_start><raise>Exception("Distance method unknown: "+method)<block_end><assert_stmt>(<not>np.isnan(dist))<line_sep><return>dist<block_end><def_stmt>rotationFromExifTag imgPath<block_start>TAGSinverted={v:k<for>k,v list(ExifTags.TAGS.items())}<line_sep>orientationExifId=TAGSinverted['Orientation']<try_stmt><block_start>imageExifTags=Image.open(imgPath)._getexif()<block_end><except_stmt><block_start>imageExifTags=<none><block_end>#rotate the image if orientation exif tag is present
rotation=0<if_stmt>imageExifTags<ne><none><and>orientationExifId<ne><none><and>orientationExifId<in>imageExifTags<block_start>orientation=imageExifTags[orientationExifId]<if_stmt>orientation<eq>1<or>orientation<eq>0<block_start>rotation=0#no need to do anything
<block_end><elif_stmt>orientation<eq>6<block_start>rotation=-90<block_end><elif_stmt>orientation<eq>8<block_start>rotation=90<block_end><else_stmt><block_start><raise>Exception("ERROR: orientation = "+str(orientation)+" not_supported!")<block_end><block_end><return>rotation<block_end><def_stmt>imread imgPath boThrowErrorIfExifRotationTagSet=<true><block_start><if_stmt><not>os.path.exists(imgPath)<block_start><raise>Exception("ERROR: image path does not exist.")<block_end>rotation=rotationFromExifTag(imgPath)<if_stmt>boThrowErrorIfExifRotationTagSet<and>rotation<ne>0<block_start>print("Error: exif roation tag set, image needs to be rotated by %d degrees."%rotation)<block_end>img=cv2.imread(imgPath)<if_stmt>img<is><none><block_start><raise>Exception("ERROR: cannot load image "+imgPath)<block_end><if_stmt>rotation<ne>0<block_start>img=imrotate(img -90).copy()# To avoid occassional error: "TypeError: Layout of the output array img is incompatible with cv::Mat"
<block_end><return>img<block_end><def_stmt>imWidth input<block_start><return>imWidthHeight(input)[0]<block_end><def_stmt>imHeight input<block_start><return>imWidthHeight(input)[1]<block_end><def_stmt>imWidthHeight input<block_start><if_stmt>type(input)<is>str#or type(input) is unicode:
<block_start>width,height=Image.open(input).size# This does not load the full image
<block_end><else_stmt><block_start>width=input.shape[1]<line_sep>height=input.shape[0]<block_end><return>width height<block_end><def_stmt>imconvertCv2Numpy img<block_start>(b g r)=cv2.split(img)<line_sep><return>cv2.merge([r g b])<block_end><def_stmt>imconvertCv2Pil img<block_start>cv2_im=cv2.cvtColor(img cv2.COLOR_BGR2RGB)<line_sep>pil_im=Image.fromarray(cv2_im)<line_sep><return>pil_im<block_end><def_stmt>imconvertPil2Cv pilImg<block_start><return>imconvertPil2Numpy(pilImg)[: : ::-1]<block_end><def_stmt>imconvertPil2Numpy pilImg<block_start>rgb=pilImg.convert('RGB')<line_sep><return>np.array(rgb).copy()<block_end><def_stmt>imresize img scale interpolation=cv2.INTER_LINEAR<block_start><return>cv2.resize(img (0 0) fx=scale fy=scale interpolation=interpolation)<block_end><def_stmt>imresizeMaxDim img maxDim boUpscale=<false> interpolation=cv2.INTER_LINEAR<block_start>scale=1.0<times>maxDim/max(img.shape[:2])<if_stmt>scale<l>1<or>boUpscale<block_start>img=imresize(img scale interpolation)<block_end><else_stmt><block_start>scale=1.0<block_end><return>img scale<block_end><def_stmt>imresizeAndPad img width height padColor# resize image
<block_start>imgWidth,imgHeight=imWidthHeight(img)<line_sep>scale=min(float(width)/float(imgWidth) float(height)/float(imgHeight))<line_sep>imgResized=imresize(img scale)#, interpolation=cv2.INTER_NEAREST)
resizedWidth,resizedHeight=imWidthHeight(imgResized)<line_sep># pad image
top=int(max(0 np.round((height-resizedHeight)/2)))<line_sep>left=int(max(0 np.round((width-resizedWidth)/2)))<line_sep>bottom=height-top-resizedHeight<line_sep>right=width-left-resizedWidth<line_sep><return>cv2.copyMakeBorder(imgResized top bottom left right cv2.BORDER_CONSTANT value=padColor)<block_end><def_stmt>imrotate img angle<block_start>imgPil=imconvertCv2Pil(img)<line_sep>imgPil=imgPil.rotate(angle expand=<true>)<line_sep><return>imconvertPil2Cv(imgPil)<block_end><def_stmt>imshow img waitDuration=0 maxDim=<none> windowName='img'<block_start><if_stmt>isinstance(img str)# Test if 'img' is a string
<block_start>img=cv2.imread(img)<block_end><if_stmt>maxDim<is><not><none><block_start>scaleVal=1.0<times>maxDim/max(img.shape[:2])<if_stmt>scaleVal<l>1<block_start>img=imresize(img scaleVal)<block_end><block_end>cv2.imshow(windowName img)<line_sep>cv2.waitKey(waitDuration)<block_end>
|
# -*- coding: utf-8 -*-
# @author: 丛戎
# @target: 时序预测算法的预处理逻辑
<import_stmt>sys<import_from_stmt>ai_lib.time_series_prediction.algorithm.preprocess.data_preprocess_utils DataPreprocessUtils<line_sep>PY2=sys.version_info[0]<eq>2<class_stmt>DataPreprocessor(object)<block_start><def_stmt>data_preprocess self kpidata colname interval=<none> period=<none> process_method={} user_config={}<block_start>"""
:param kpidata:
:param colname:
:param interval: 数据点间隔,单位为秒
:param period: 一个周期内的点数
:param process_method: 预处理逻辑的参数,{'fillna':{'startdate': 0, 'enddate':0, 'interval' : '5min', 'fillvalue' :True}}
:param user_config: 用户在页面的高级配置
:return:
"""<line_sep>preprocessor=DataPreprocessUtils()<line_sep># 首先对数据进行去重处理
kpidata=kpidata.drop_duplicates(['ts'])<line_sep># 并按ts进行排序
kpidata=kpidata.sort_values(by=['ts'] ascending=[1])<line_sep># 填补缺失值操作
<if_stmt>'fillna'<in>process_method.keys()# 根据数据粒度计算插值频率
<block_start>interval_seconds=str(interval)+'S'<line_sep>fillvalue=process_method['fillna']['fillvalue']<if_stmt>"fillna_withzero"<in>user_config['advanced_config_map'].keys()<block_start>fillvalue=<false><block_end>kpidata=preprocessor.ts_fill_na(kpidata=kpidata startdate=kpidata['ts'].min() enddate=kpidata['ts'].max() freq=interval_seconds fillvalue=fillvalue)<block_end><return>kpidata<block_end><block_end>
|
<import_from_future_stmt> absolute_import unicode_literals<import_from_stmt>django.db models<import_from_stmt>django.utils.functional cached_property<import_from_stmt>django.utils.translation ugettext<as>_<import_from_stmt>wagtail.core.models Page<import_from_stmt>wagtail.core.fields StreamField<import_from_stmt>wagtail.admin.edit_handlers StreamFieldPanel<import_from_stmt>wagtail.core blocks<import_from_stmt>wagtail.admin.edit_handlers FieldPanel MultiFieldPanel<import_from_stmt>wagtailgeowidget.edit_handlers GeoPanel<import_from_stmt>wagtailgeowidget.blocks GeoBlock<class_stmt>StandardPage(Page)<block_start>address=models.CharField(max_length=250 blank=<true> null=<true>)<line_sep>location=models.CharField(max_length=250 blank=<true> null=<true>)<line_sep>content_panels=Page.content_panels+[MultiFieldPanel([FieldPanel('address') GeoPanel('location' address_field='address') ] _('Geo details')) ]<def_stmt>get_context self request<block_start>data=super(StandardPage self).get_context(request)<line_sep><return>data<block_end>@cached_property<def_stmt>point self<block_start><import_from_stmt>wagtailgeowidget.helpers geosgeometry_str_to_struct<line_sep><return>geosgeometry_str_to_struct(self.location)<block_end>@property<def_stmt>lat self<block_start><return>self.point['y']<block_end>@property<def_stmt>lng self<block_start><return>self.point['x']<block_end><block_end><class_stmt>StreamPage(Page)<block_start>body=StreamField([('map' GeoBlock()) ('map_struct' blocks.StructBlock([('address' blocks.CharBlock(required=<true>)) # ('map', GeoBlock(address_field='address')),
] icon='user'))])<line_sep>content_panels=Page.content_panels+[StreamFieldPanel('body') ]<block_end>
|
<import_stmt>logging<import_stmt>colorlog<line_sep>fmt="{log_color}{levelname} {name}: {message}"<line_sep>colorlog.basicConfig(level=logging.DEBUG style="{" format=fmt stream=<none>)<line_sep>log=logging.getLogger()<line_sep>log.warning("hello")<line_sep>
|
current=[0 1]<line_sep>someList=[]<while_stmt><true><block_start><for_stmt>n range(0 2)<block_start>current[n]<augadd>1<block_end>print(current)<line_sep>someList.append(current[:])#aqui faz uma cópia da lista e usa referêwncia para esta cópia, não a original
<if_stmt>current<eq>[2 3]<block_start><break><block_end><block_end>print(someList)<line_sep>#https://pt.stackoverflow.com/q/425908/101
|
# Generated by Django 2.2.5 on 2019-09-12 13:35
<import_from_stmt>django.conf settings<import_from_stmt>django.db migrations models<import_stmt>django.db.models.deletion<import_stmt>django.utils.timezone<import_stmt>olympia.amo.fields<import_stmt>olympia.amo.models<import_stmt>olympia.translations.fields<class_stmt>Migration(migrations.Migration)<block_start>initial=<true><line_sep>dependencies=[('translations' '__first__') ('addons' '0001_initial') migrations.swappable_dependency(settings.AUTH_USER_MODEL) ]<line_sep>operations=[migrations.CreateModel(name='Collection' fields=[('created' models.DateTimeField(blank=<true> default=django.utils.timezone.now editable=<false>)) ('modified' models.DateTimeField(auto_now=<true>)) ('id' olympia.amo.fields.PositiveAutoField(primary_key=<true> serialize=<false>)) ('uuid' models.UUIDField(blank=<true> null=<true> unique=<true>)) ('nickname' models.CharField(blank=<true> max_length=30 null=<true> unique=<true>)) ('slug' models.CharField(blank=<true> max_length=30 null=<true>)) ('default_locale' models.CharField(db_column='defaultlocale' default='en-US' max_length=10)) ('type' models.PositiveIntegerField(choices=[(0 'Normal') (1 'Synchronized') (2 'Featured') (3 'Generated Recommendations') (4 'Favorites') (5 'Mobile') (6 'Anonymous')] db_column='collection_type' default=0)) ('listed' models.BooleanField(default=<true> help_text='Collections are either listed or private.')) ('application' models.PositiveIntegerField(blank=<true> choices=[(1 'Firefox') (61 'Firefox for Android')] db_column='application_id' null=<true>)) ('addon_count' models.PositiveIntegerField(db_column='addonCount' default=0)) ] options={'db_table':'collections' 'get_latest_by':'created' 'abstract':<false> 'base_manager_name':'objects' } bases=(olympia.amo.models.SearchMixin olympia.amo.models.SaveUpdateMixin models.Model) ) migrations.CreateModel(name='FeaturedCollection' fields=[('created' models.DateTimeField(blank=<true> default=django.utils.timezone.now editable=<false>)) ('modified' models.DateTimeField(auto_now=<true>)) ('id' olympia.amo.fields.PositiveAutoField(primary_key=<true> serialize=<false>)) ('application' models.PositiveIntegerField(choices=[(1 'Firefox') (61 'Firefox for Android')] db_column='application_id')) ('locale' models.CharField(max_length=10 null=<true>)) ('collection' models.ForeignKey(on_delete=django.db.models.deletion.CASCADE to='bandwagon.Collection')) ] options={'db_table':'featured_collections' } bases=(olympia.amo.models.SearchMixin olympia.amo.models.SaveUpdateMixin models.Model) ) migrations.CreateModel(name='CollectionAddon' fields=[('created' models.DateTimeField(blank=<true> default=django.utils.timezone.now editable=<false>)) ('modified' models.DateTimeField(auto_now=<true>)) ('id' olympia.amo.fields.PositiveAutoField(primary_key=<true> serialize=<false>)) ('ordering' models.PositiveIntegerField(default=0 help_text='Add-ons are displayed in ascending order based on this field.')) ('addon' models.ForeignKey(on_delete=django.db.models.deletion.CASCADE to='addons.Addon')) ('collection' models.ForeignKey(on_delete=django.db.models.deletion.CASCADE to='bandwagon.Collection')) ('comments' olympia.translations.fields.LinkifiedField(blank=<true> db_column='comments' null=<true> on_delete=django.db.models.deletion.SET_NULL related_name='CollectionAddon_comments_set+' require_locale=<true> short=<true> to='translations.LinkifiedTranslation' to_field='id' unique=<true>)) ('user' models.ForeignKey(null=<true> on_delete=django.db.models.deletion.CASCADE to=settings.AUTH_USER_MODEL)) ] options={'db_table':'addons_collections' 'get_latest_by':'created' 'abstract':<false> 'base_manager_name':'objects' } bases=(olympia.amo.models.SearchMixin olympia.amo.models.SaveUpdateMixin models.Model) ) migrations.AddField(model_name='collection' name='addons' field=models.ManyToManyField(related_name='collections' through='bandwagon.CollectionAddon' to='addons.Addon') ) migrations.AddField(model_name='collection' name='author' field=models.ForeignKey(null=<true> on_delete=django.db.models.deletion.CASCADE related_name='collections' to=settings.AUTH_USER_MODEL) ) migrations.AddField(model_name='collection' name='description' field=olympia.translations.fields.NoURLsField(blank=<true> db_column='description' null=<true> on_delete=django.db.models.deletion.SET_NULL related_name='Collection_description_set+' require_locale=<false> short=<true> to='translations.NoURLsTranslation' to_field='id' unique=<true>) ) migrations.AddField(model_name='collection' name='name' field=olympia.translations.fields.TranslatedField(blank=<true> db_column='name' null=<true> on_delete=django.db.models.deletion.SET_NULL related_name='Collection_name_set+' require_locale=<false> short=<true> to='translations.Translation' to_field='id' unique=<true>) ) migrations.AddIndex(model_name='featuredcollection' index=models.Index(fields=['application'] name='application_id_idx') ) migrations.AddIndex(model_name='collectionaddon' index=models.Index(fields=['collection' 'created'] name='created_idx') ) migrations.AddIndex(model_name='collectionaddon' index=models.Index(fields=['addon'] name='addon_id') ) migrations.AddIndex(model_name='collectionaddon' index=models.Index(fields=['collection'] name='collection_id') ) migrations.AddIndex(model_name='collectionaddon' index=models.Index(fields=['user'] name='user_id') ) migrations.AddConstraint(model_name='collectionaddon' constraint=models.UniqueConstraint(fields=('addon' 'collection') name='addon_id_2') ) migrations.AddIndex(model_name='collection' index=models.Index(fields=['application'] name='application_id') ) migrations.AddIndex(model_name='collection' index=models.Index(fields=['created'] name='created_idx') ) migrations.AddIndex(model_name='collection' index=models.Index(fields=['listed'] name='listed') ) migrations.AddIndex(model_name='collection' index=models.Index(fields=['slug'] name='slug_idx') ) migrations.AddIndex(model_name='collection' index=models.Index(fields=['type'] name='type_idx') ) migrations.AddConstraint(model_name='collection' constraint=models.UniqueConstraint(fields=('author' 'slug') name='author_id') ) ]<block_end>
|
<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_from_stmt>sklearn.model_selection BaseCrossValidator<class_stmt>OrderedCV(BaseCrossValidator)<block_start>"""Cross-validation procedure with order of indexes taken into account.
Say you have an interval [a, b] and you want to make n splits with d test
indexes at each split -- for example 7 days. Then DatetimeCV will
return the n following splits:
- [a, b - d], [b - d, b]
- [a, b - 2*d], [b - 2*d, b - d]
- ...
- [a, b - (n-1)*d], [b - (n-1)*d, b - (n-2)*d]
- [a, b - n*d], [b - n*d, (n-1)*b]
Attributes:
n_splits (int): the number of desired splits.
delta (int or datetime.timedelta): the step to increase folds by.
"""<def_stmt>__init__ self n_splits delta<block_start>super().__init__()<line_sep>self.n_splits=n_splits<line_sep>self.delta=delta<block_end><def_stmt>split self X y=<none> groups=<none><block_start>"""
Args:
X (pd.DataFrame): a pd.DataFrame.
"""<if_stmt><not>isinstance(X pd.DataFrame)<block_start><raise>ValueError('X is not a pandas.DataFrame')<block_end>min_dt=X.index.min()<line_sep>max_dt=X.index.max()<line_sep>indices=np.arange(len(X))<for_stmt>i range(self.n_splits)<block_start>t0=min_dt<line_sep>t1=max_dt-self.delta<times>(i+1)<line_sep>t2=max_dt-self.delta<times>i<line_sep>train_idxs=indices[(X.index<ge>t0)&(X.index<le>t1)]<line_sep>test_idxs=indices[(X.index<g>t1)&(X.index<le>t2)]<if_stmt>train_idxs.size<eq>0<block_start><raise>ValueError('No data found in [{}, {}]'.format(t0 t1))<block_end><if_stmt>test_idxs.size<eq>0<block_start><raise>ValueError('No data found in ({}, {}]'.format(t1 t2))<block_end><yield>train_idxs test_idxs<block_end><block_end><def_stmt>get_n_splits self X=<none> y=<none> groups=<none><block_start>"""Returns the number of splitting iterations in the cross-validator"""<line_sep><return>self.n_splits<block_end><block_end>
|
<import_from_future_stmt> print_function<import_stmt>sys<import_stmt>math<def_stmt>lin_interp x1 y1 x2 y2 x<block_start>y=(((y2-y1)/(x2-x1))<times>(x-x1))+y1<line_sep><return>y<block_end><def_stmt>indInSeq arr v<block_start><for_stmt>i range(len(arr)-1)<block_start><if_stmt>((arr[i]<le>v)<and>(v<le>arr[i+1]))<block_start><return>i<block_end><block_end><raise>Exception("Index not found")<block_end><def_stmt>matrixCol mat c<block_start>col=[]<for_stmt>i range(len(mat))<block_start>col.append(mat[i][c])<block_end><return>col<block_end><def_stmt>interpY x_array y_array z_array x z<block_start>i=indInSeq(z_array z)<line_sep>x_z1=matrixCol(x_array i)<line_sep>y_z1=matrixCol(y_array i)<line_sep>x_z2=matrixCol(x_array i+1)<line_sep>y_z2=matrixCol(y_array i+1)<try_stmt><block_start>j=indInSeq(x_z1 x)<line_sep>k=indInSeq(x_z2 x)<block_end><except_stmt>Exception<as>exc<block_start><raise>Exception("Interpolation of y failed.")<block_end>y1=lin_interp(x_z1[j] y_z1[j] x_z1[j+1] y_z1[j+1] x)<line_sep>y2=lin_interp(x_z2[k] y_z2[k] x_z2[k+1] y_z2[k+1] x)<line_sep><return>lin_interp(z_array[i] y1 z_array[i+1] y2 z)<block_end><def_stmt>interpZ x_array y_array z_array x y<block_start><for_stmt>i range(len(z_array)-1)<block_start>x_z1=matrixCol(x_array i)<line_sep>y_z1=matrixCol(y_array i)<line_sep>x_z2=matrixCol(x_array i+1)<line_sep>y_z2=matrixCol(y_array i+1)<try_stmt><block_start>j=indInSeq(x_z1 x)<line_sep>k=indInSeq(x_z2 x)<block_end><except_stmt>Exception<as>exc<block_start><continue><block_end>y_lower=lin_interp(x_z1[j] y_z1[j] x_z1[j+1] y_z1[j+1] x)<line_sep>y_upper=lin_interp(x_z2[k] y_z2[k] x_z2[k+1] y_z2[k+1] x)<if_stmt>((y_lower<le>y)<and>(y<le>y_upper))<block_start><return>lin_interp(y_lower z_array[i] y_upper z_array[i+1] y)<block_end><block_end><raise>Exception("Interpolation of z failed.")<block_end>
|
<import_stmt>angr<import_stmt>logging<line_sep>l=logging.getLogger(name=__name__)<class_stmt>brk(angr.SimProcedure)<block_start>"""
This implements the brk system call.
"""<line_sep>#pylint:disable=arguments-differ
<def_stmt>run self new_brk<block_start>r=self.state.posix.set_brk(new_brk)<line_sep>l.debug('brk(%s) = %s' new_brk r)<line_sep><return>r<block_end><block_end>
|
<import_stmt>six<import_stmt>pytest<import_from_stmt>flex.exceptions ValidationError<import_from_stmt>flex.constants EMPTY<import_from_stmt>flex.error_messages MESSAGES<import_from_stmt>tests.utils generate_validator_from_schema assert_error_message_equal <line_sep>#
# minLength validation tests
#
@pytest.mark.parametrize('letters' ('a' 'b' <true> 1 2) )<def_stmt>test_enum_with_valid_array letters<block_start>schema={'enum':[2 1 'a' 'b' 'c' <true> <false>] }<line_sep>validator=generate_validator_from_schema(schema)<line_sep>validator(letters)<block_end>@pytest.mark.parametrize('letters' (<none> 1 0 2 'a') )<def_stmt>test_enum_with_invalid_items letters<block_start>schema={'enum':[<true> <false> 1.0 2.0 'A'] }<line_sep>validator=generate_validator_from_schema(schema)<with_stmt>pytest.raises(ValidationError)<as>err<block_start>validator(letters)<block_end>assert_error_message_equal(err.value.messages[0]['enum'][0] MESSAGES['enum']['invalid'] )<block_end><def_stmt>test_enum_noop_when_not_required_and_field_not_present <block_start>schema={'enum':[<true> <false> 1.0 2.0 'A'] }<line_sep>validator=generate_validator_from_schema(schema)<line_sep>validator(EMPTY)<block_end>@pytest.mark.parametrize('enum_value,value' ((six.text_type('test') six.text_type('test')) (six.text_type('test') b'test') (b'test' six.text_type('test')) (b'test' b'test') ))<def_stmt>test_enum_disperate_text_types enum_value value<block_start>schema={'enum':[enum_value] }<line_sep>validator=generate_validator_from_schema(schema)<line_sep>validator(value)<block_end>
|
<import_from_future_stmt> print_function<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_stmt>numpy<as>np<import_stmt>unittest<import_from_stmt>scipy.constants mu_0<import_from_stmt>SimPEG.electromagnetics natural_source<as>nsem<import_from_stmt>SimPEG maps<line_sep>TOL=1e-4<line_sep>FLR=1e-20# "zero", so if residual below this --> pass regardless of order
CONDUCTIVITY=1e1<line_sep>MU=mu_0<def_stmt>JvecAdjointTest sigmaHalf formulation="PrimSec"<block_start>forType="PrimSec"<not><in>formulation<line_sep>survey,sigma,sigBG,m1d=nsem.utils.test_utils.setup1DSurvey(sigmaHalf tD=forType structure=<false>)<line_sep>print("Adjoint test of e formulation for {:s} comp \n".format(formulation))<if_stmt>"PrimSec"<in>formulation<block_start>problem=nsem.Simulation1DPrimarySecondary(m1d survey=survey sigmaPrimary=sigBG sigmaMap=maps.IdentityMap(m1d))<block_end><else_stmt><block_start><raise>NotImplementedError("Only {} formulations are implemented.".format(formulation))<block_end>m=sigma<line_sep>u=problem.fields(m)<line_sep>np.random.seed(1983)<line_sep>v=np.random.rand(survey.nD )<line_sep># print problem.PropMap.PropModel.nP
w=np.random.rand(problem.mesh.nC )<line_sep>vJw=v.ravel().dot(problem.Jvec(m w u))<line_sep>wJtv=w.ravel().dot(problem.Jtvec(m v u))<line_sep>tol=np.max([TOL<times>(10<power>int(np.log10(np.abs(vJw)))) FLR])<line_sep>print(" vJw wJtv vJw - wJtv tol abs(vJw - wJtv) < tol")<line_sep>print(vJw wJtv vJw-wJtv tol np.abs(vJw-wJtv)<l>tol)<line_sep><return>np.abs(vJw-wJtv)<l>tol<block_end><class_stmt>NSEM_1D_AdjointTests(unittest.TestCase)<block_start><def_stmt>setUp self<block_start><pass><block_end># Test the adjoint of Jvec and Jtvec
# def test_JvecAdjoint_zxxr(self):self.assertTrue(JvecAdjointTest(random(1e-2),'zxxr',.1))
# def test_JvecAdjoint_zxxi(self):self.assertTrue(JvecAdjointTest(random(1e-2),'zxxi',.1))
# def test_JvecAdjoint_zxyr(self):self.assertTrue(JvecAdjointTest(random(1e-2),'zxyr',.1))
# def test_JvecAdjoint_zxyi(self):self.assertTrue(JvecAdjointTest(random(1e-2),'zxyi',.1))
# def test_JvecAdjoint_zyxr(self):self.assertTrue(JvecAdjointTest(random(1e-2),'zyxr',.1))
# def test_JvecAdjoint_zyxi(self):self.assertTrue(JvecAdjointTest(random(1e-2),'zyxi',.1))
# def test_JvecAdjoint_zyyr(self):self.assertTrue(JvecAdjointTest(random(1e-2),'zyyr',.1))
# def test_JvecAdjoint_zyyi(self):self.assertTrue(JvecAdjointTest(random(1e-2),'zyyi',.1))
<def_stmt>test_JvecAdjoint_All self<block_start>self.assertTrue(JvecAdjointTest(1e-2))<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
|
# Standard Library
<import_stmt>os<import_stmt>time<import_from_stmt>datetime datetime<line_sep># Third Party
<import_stmt>numpy<as>np<import_stmt>pytest<import_stmt>xgboost<import_from_stmt>tests.core.utils check_tf_events delete_local_trials verify_files<line_sep># First Party
<import_from_stmt>smdebug.core.modes ModeKeys<import_from_stmt>smdebug.core.save_config SaveConfig SaveConfigMode<import_from_stmt>smdebug.xgboost Hook<as>XG_Hook<line_sep>SMDEBUG_XG_HOOK_TESTS_DIR="/tmp/test_output/smdebug_xg/tests/"<def_stmt>simple_xg_model hook num_round=10 seed=42 with_timestamp=<false><block_start>np.random.seed(seed)<line_sep>train_data=np.random.rand(5 10)<line_sep>train_label=np.random.randint(2 size=5)<line_sep>dtrain=xgboost.DMatrix(train_data label=train_label)<line_sep>test_data=np.random.rand(5 10)<line_sep>test_label=np.random.randint(2 size=5)<line_sep>dtest=xgboost.DMatrix(test_data label=test_label)<line_sep>params={}<line_sep>scalars_to_be_saved=dict()<line_sep>ts=time.time()<line_sep>hook.save_scalar("xg_num_steps" num_round sm_metric=<true> timestamp=ts<if>with_timestamp<else><none>)<line_sep>scalars_to_be_saved["scalar/xg_num_steps"]=(ts num_round)<line_sep>ts=time.time()<line_sep>hook.save_scalar("xg_before_train" 1 sm_metric=<false> timestamp=ts<if>with_timestamp<else><none>)<line_sep>scalars_to_be_saved["scalar/xg_before_train"]=(ts 1)<line_sep>hook.set_mode(ModeKeys.TRAIN)<line_sep>xgboost.train(params dtrain evals=[(dtrain "train") (dtest "test")] num_boost_round=num_round callbacks=[hook] )<line_sep>ts=time.time()<line_sep>hook.save_scalar("xg_after_train" 1 sm_metric=<false> timestamp=ts<if>with_timestamp<else><none>)<line_sep>scalars_to_be_saved["scalar/xg_after_train"]=(ts 1)<line_sep><return>scalars_to_be_saved<block_end><def_stmt>helper_xgboost_tests collection save_config with_timestamp<block_start>coll_name,coll_regex=collection<line_sep>run_id="trial_"+coll_name+"-"+datetime.now().strftime("%Y%m%d-%H%M%S%f")<line_sep>trial_dir=os.path.join(SMDEBUG_XG_HOOK_TESTS_DIR run_id)<line_sep>hook=XG_Hook(out_dir=trial_dir include_collections=[coll_name] save_config=save_config export_tensorboard=<true> )<line_sep>saved_scalars=simple_xg_model(hook with_timestamp=with_timestamp)<line_sep>hook.close()<line_sep>verify_files(trial_dir save_config saved_scalars)<if_stmt>with_timestamp<block_start>check_tf_events(trial_dir saved_scalars)<block_end><block_end>@pytest.mark.parametrize("collection" [("all" ".*") ("scalars" "^scalar")])@pytest.mark.parametrize("save_config" [SaveConfig(save_steps=[0 2 4 6 8]) SaveConfig({ModeKeys.TRAIN:SaveConfigMode(save_interval=2) ModeKeys.GLOBAL:SaveConfigMode(save_interval=3) ModeKeys.EVAL:SaveConfigMode(save_interval=1) }) ] )@pytest.mark.parametrize("with_timestamp" [<true> <false>])<def_stmt>test_xgboost_save_scalar collection save_config with_timestamp<block_start>helper_xgboost_tests(collection save_config with_timestamp)<line_sep>delete_local_trials([SMDEBUG_XG_HOOK_TESTS_DIR])<block_end>
|
<import_stmt>lldb<import_from_stmt>lldbsuite.test.lldbtest *<import_from_stmt>lldbsuite.test.decorators *<import_stmt>lldbsuite.test.lldbutil<as>lldbutil<import_stmt>os<import_stmt>unittest2<class_stmt>TestSwiftFoundationValueTypeGlobal(TestBase)<block_start>mydir=TestBase.compute_mydir(__file__)<def_stmt>setUp self<block_start>TestBase.setUp(self)<block_end>@swiftTest@skipUnlessFoundation<def_stmt>test self<block_start>self.build()<line_sep>target=self.dbg.CreateTarget(self.getBuildArtifact())<line_sep>self.assertTrue(target VALID_TARGET)<line_sep># Target variable without a process.
# This is not actually expected to work, but also shouldn't crash.
self.expect("target variable g_url")<block_end><block_end>
|
<import_stmt>enum<import_stmt>warnings<import_from_stmt>functools partial<import_from_stmt>promise Promise is_thenable<import_from_stmt>sqlalchemy.orm.query Query<import_from_stmt>graphene NonNull<import_from_stmt>graphene.relay Connection ConnectionField<import_from_stmt>graphene.relay.connection connection_adapter page_info_adapter<import_from_stmt>graphql_relay.connection.arrayconnection connection_from_array_slice<import_from_stmt>.batching get_batch_resolver<import_from_stmt>.utils EnumValue get_query<class_stmt>UnsortedSQLAlchemyConnectionField(ConnectionField)<block_start>@property<def_stmt>type self<block_start><import_from_stmt>.types SQLAlchemyObjectType<line_sep>type_=super(ConnectionField self).type<line_sep>nullable_type=get_nullable_type(type_)<if_stmt>issubclass(nullable_type Connection)<block_start><return>type_<block_end><assert_stmt>issubclass(nullable_type SQLAlchemyObjectType) ("SQLALchemyConnectionField only accepts SQLAlchemyObjectType types, not {}").format(nullable_type.__name__)<assert_stmt>(nullable_type.connection) "The type {} doesn't have a connection".format(nullable_type.__name__)<assert_stmt>type_<eq>nullable_type ("Passing a SQLAlchemyObjectType instance is deprecated. "<concat>"Pass the connection type instead accessible via SQLAlchemyObjectType.connection")<line_sep><return>nullable_type.connection<block_end>@property<def_stmt>model self<block_start><return>get_nullable_type(self.type)._meta.node._meta.model<block_end>@classmethod<def_stmt>get_query cls model info **args<block_start><return>get_query(model info.context)<block_end>@classmethod<def_stmt>resolve_connection cls connection_type model info args resolved<block_start><if_stmt>resolved<is><none><block_start>resolved=cls.get_query(model info **args)<block_end><if_stmt>isinstance(resolved Query)<block_start>_len=resolved.count()<block_end><else_stmt><block_start>_len=len(resolved)<block_end><def_stmt>adjusted_connection_adapter edges pageInfo<block_start><return>connection_adapter(connection_type edges pageInfo)<block_end>connection=connection_from_array_slice(array_slice=resolved args=args slice_start=0 array_length=_len array_slice_length=_len connection_type=adjusted_connection_adapter edge_type=connection_type.Edge page_info_type=page_info_adapter )<line_sep>connection.iterable=resolved<line_sep>connection.length=_len<line_sep><return>connection<block_end>@classmethod<def_stmt>connection_resolver cls resolver connection_type model root info **args<block_start>resolved=resolver(root info **args)<line_sep>on_resolve=partial(cls.resolve_connection connection_type model info args)<if_stmt>is_thenable(resolved)<block_start><return>Promise.resolve(resolved).then(on_resolve)<block_end><return>on_resolve(resolved)<block_end><def_stmt>wrap_resolve self parent_resolver<block_start><return>partial(self.connection_resolver parent_resolver get_nullable_type(self.type) self.model )<block_end><block_end># TODO Rename this to SortableSQLAlchemyConnectionField
<class_stmt>SQLAlchemyConnectionField(UnsortedSQLAlchemyConnectionField)<block_start><def_stmt>__init__ self type_ *args **kwargs<block_start>nullable_type=get_nullable_type(type_)<if_stmt>"sort"<not><in>kwargs<and>issubclass(nullable_type Connection)# Let super class raise if type is not a Connection
<block_start><try_stmt><block_start>kwargs.setdefault("sort" nullable_type.Edge.node._type.sort_argument())<block_end><except_stmt>(AttributeError TypeError)<block_start><raise>TypeError('Cannot create sort argument for {}. A model is required. Set the "sort" argument'<concat>" to None to disabling the creation of the sort query argument".format(nullable_type.__name__))<block_end><block_end><elif_stmt>"sort"<in>kwargs<and>kwargs["sort"]<is><none><block_start><del_stmt>kwargs["sort"]<block_end>super(SQLAlchemyConnectionField self).__init__(type_ *args **kwargs)<block_end>@classmethod<def_stmt>get_query cls model info sort=<none> **args<block_start>query=get_query(model info.context)<if_stmt>sort<is><not><none><block_start><if_stmt><not>isinstance(sort list)<block_start>sort=[sort]<block_end>sort_args=[]<line_sep># ensure consistent handling of graphene Enums, enum values and
# plain strings
<for_stmt>item sort<block_start><if_stmt>isinstance(item enum.Enum)<block_start>sort_args.append(item.value.value)<block_end><elif_stmt>isinstance(item EnumValue)<block_start>sort_args.append(item.value)<block_end><else_stmt><block_start>sort_args.append(item)<block_end><block_end>query=query.order_by(*sort_args)<block_end><return>query<block_end><block_end><class_stmt>BatchSQLAlchemyConnectionField(UnsortedSQLAlchemyConnectionField)<block_start>"""
This is currently experimental.
The API and behavior may change in future versions.
Use at your own risk.
"""<def_stmt>wrap_resolve self parent_resolver<block_start><return>partial(self.connection_resolver self.resolver get_nullable_type(self.type) self.model )<block_end>@classmethod<def_stmt>from_relationship cls relationship registry **field_kwargs<block_start>model=relationship.mapper.entity<line_sep>model_type=registry.get_type_for_model(model)<line_sep><return>cls(model_type.connection resolver=get_batch_resolver(relationship) **field_kwargs)<block_end><block_end><def_stmt>default_connection_field_factory relationship registry **field_kwargs<block_start>model=relationship.mapper.entity<line_sep>model_type=registry.get_type_for_model(model)<line_sep><return>__connectionFactory(model_type **field_kwargs)<block_end># TODO Remove in next major version
__connectionFactory=UnsortedSQLAlchemyConnectionField<def_stmt>createConnectionField type_ **field_kwargs<block_start>warnings.warn('createConnectionField is deprecated and will be removed in the next '<concat>'major version. Use SQLAlchemyObjectType.Meta.connection_field_factory instead.' DeprecationWarning )<line_sep><return>__connectionFactory(type_ **field_kwargs)<block_end><def_stmt>registerConnectionFieldFactory factoryMethod<block_start>warnings.warn('registerConnectionFieldFactory is deprecated and will be removed in the next '<concat>'major version. Use SQLAlchemyObjectType.Meta.connection_field_factory instead.' DeprecationWarning )<line_sep><global>__connectionFactory<line_sep>__connectionFactory=factoryMethod<block_end><def_stmt>unregisterConnectionFieldFactory <block_start>warnings.warn('registerConnectionFieldFactory is deprecated and will be removed in the next '<concat>'major version. Use SQLAlchemyObjectType.Meta.connection_field_factory instead.' DeprecationWarning )<line_sep><global>__connectionFactory<line_sep>__connectionFactory=UnsortedSQLAlchemyConnectionField<block_end><def_stmt>get_nullable_type _type<block_start><if_stmt>isinstance(_type NonNull)<block_start><return>_type.of_type<block_end><return>_type<block_end>
|
"""
An example showing the plot_calibration_curve method
used by a scikit-learn classifier
"""<import_from_stmt>sklearn.ensemble RandomForestClassifier<import_from_stmt>sklearn.naive_bayes GaussianNB<import_from_stmt>sklearn.linear_model LogisticRegression<import_from_stmt>sklearn.svm LinearSVC<import_from_stmt>sklearn.datasets make_classification<import_stmt>matplotlib.pyplot<as>plt<import_stmt>scikitplot<as>skplt<line_sep>X,y=make_classification(n_samples=100000 n_features=20 n_informative=2 n_redundant=2 random_state=20)<line_sep>X_train,y_train,X_test,y_test=X[:1000] y[:1000] X[1000:] y[1000:]<line_sep>rf_probas=RandomForestClassifier().fit(X_train y_train).predict_proba(X_test)<line_sep>lr_probas=LogisticRegression().fit(X_train y_train).predict_proba(X_test)<line_sep>nb_probas=GaussianNB().fit(X_train y_train).predict_proba(X_test)<line_sep>sv_scores=LinearSVC().fit(X_train y_train).decision_function(X_test)<line_sep>probas_list=[rf_probas lr_probas nb_probas sv_scores]<line_sep>clf_names=['Random Forest' 'Logistic Regression' 'Gaussian Naive Bayes' 'Support Vector Machine']<line_sep>skplt.metrics.plot_calibration_curve(y_test probas_list=probas_list clf_names=clf_names n_bins=10)<line_sep>plt.show()<line_sep>
|
<import_from_stmt>rpython.memory.test test_semispace_gc<class_stmt>TestGenerationalGC(test_semispace_gc.TestSemiSpaceGC)<block_start><import_from_stmt>rpython.memory.gc.generation GenerationGC<as>GCClass<block_end>
|
<import_stmt>sys<import_stmt>subprocess<import_stmt>tempfile<import_stmt>os<import_stmt>platform<import_stmt>pkg_resources<import_stmt>xml.etree.ElementTree<as>ET<line_sep># Determine version of tensorflow-directml
installed_packages=pkg_resources.working_set<line_sep>tfdml_version=[p.version<for>p installed_packages<if>p.key<eq>"tensorflow-directml"]<if_stmt>tfdml_version<block_start>tfdml_version=tfdml_version[0]<block_end><else_stmt><block_start>tfdml_version="Not Installed"<block_end># Collect info from dxdiag.exe in Windows.
# NOTE: NamedTemporaryFile in a 'with' statement leaves the file open, which prevents dxdiag.exe
# from opening it a second time for writing on Windows. We must manually delete it without leaving
# the file open.
dxdiag_path=tempfile.NamedTemporaryFile(suffix=".xml" delete=<false>).name<try_stmt><block_start><if_stmt>os.name<eq>"nt"<block_start>subprocess.run(['dxdiag.exe' '/x' dxdiag_path] check=<true>)<block_end><else_stmt><block_start>dxdiag_path_windows=subprocess.run("wslpath -w {}".format(dxdiag_path) shell=<true> check=<true> capture_output=<true> text=<true>).stdout.rstrip().replace('\\' '\\\\')<line_sep>subprocess.run('dxdiag.exe /x {}'.format(dxdiag_path_windows) shell=<true> check=<true>)<block_end><with_stmt>open(dxdiag_path "r")<as>dxdiag_log<block_start>dxdiag=ET.parse(dxdiag_log).getroot()<block_end><block_end><finally_stmt><block_start><if_stmt>os.path.exists(dxdiag_path)<block_start>os.remove(dxdiag_path)<block_end><block_end>print("Host System\n{}".format('-'<times>80))<line_sep>print("Windows 10 Version : {}".format(dxdiag.find("./SystemInformation/OperatingSystem").text))<line_sep>print("Processor : {}".format(dxdiag.find("./SystemInformation/Processor").text))<line_sep>print("Memory : {}".format(dxdiag.find("./SystemInformation/Memory").text))<line_sep>print("DirectX Version : {}".format(dxdiag.find("./SystemInformation/DirectXVersion").text))<if_stmt>os.name<ne>"nt"<block_start><import_stmt>distro<line_sep>print("\nWindows Subsystem for Linux\n{}".format('-'<times>80))<line_sep>print("WSL Name : {}".format(os.environ["WSL_DISTRO_NAME"]))<line_sep>print("WSL Distribution : {}".format(" ".join(distro.linux_distribution())))<line_sep>print("WSL Kernel : {}".format(platform.release()))<block_end>print("\nPython Environment\n{}".format('-'<times>80))<line_sep>print("Python Version : {}".format(platform.python_version()))<line_sep>print("TensorFlow-DirectML : {}".format(tfdml_version))<for_stmt>device dxdiag.findall("./DisplayDevices/DisplayDevice")<block_start>print("\nDirectX Device\n{}".format('-'<times>80))<line_sep>print("Description : {}".format(device.find("./CardName").text))<line_sep>print("Manufacturer : {}".format(device.find("./Manufacturer").text))<line_sep>print("Chip Type : {}".format(device.find("./ChipType").text))<line_sep>print("Dedicated Memory : {}".format(device.find("./DedicatedMemory").text))<line_sep>print("Driver Version : {}".format(device.find("./DriverVersion").text))<line_sep>print("Driver Model : {}".format(device.find("./DriverModel").text))<line_sep>print("Driver Date : {}".format(device.find("./DriverDate").text))<line_sep>print("Feature Levels : {}".format(device.find("./FeatureLevels").text))<block_end>
|
__author__='alex'<line_sep>
|
<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>numpy<as>np<import_from_stmt>torch.nn functional<as>F<import_stmt>math<import_from_stmt>utils.tools make_positions<def_stmt>Embedding num_embeddings embedding_dim padding_idx=<none><block_start>m=nn.Embedding(num_embeddings embedding_dim padding_idx=padding_idx)<line_sep>nn.init.normal_(m.weight mean=0 std=embedding_dim<power>-0.5)<if_stmt>padding_idx<is><not><none><block_start>nn.init.constant_(m.weight[padding_idx] 0)<block_end><return>m<block_end><def_stmt>Linear in_features out_features bias=<true><block_start>m=nn.Linear(in_features out_features bias)<line_sep>nn.init.xavier_uniform_(m.weight)<if_stmt>bias<block_start>nn.init.constant_(m.bias 0.)<block_end><return>m<block_end><def_stmt>get_sinusoid_encoding_table n_position d_hid padding_idx=<none><block_start>""" Sinusoid position encoding table """<def_stmt>cal_angle position hid_idx<block_start><return>position/np.power(10000 2<times>(hid_idx<floordiv>2)/d_hid)<block_end><def_stmt>get_posi_angle_vec position<block_start><return>[cal_angle(position hid_j)<for>hid_j range(d_hid)]<block_end>sinusoid_table=np.array([get_posi_angle_vec(pos_i)<for>pos_i range(n_position)])<line_sep>sinusoid_table[: 0::2]=np.sin(sinusoid_table[: 0::2])# dim 2i
sinusoid_table[: 1::2]=np.cos(sinusoid_table[: 1::2])# dim 2i+1
<if_stmt>padding_idx<is><not><none># zero vector for padding dimension
<block_start>sinusoid_table[padding_idx]=0.0<block_end><return>torch.FloatTensor(sinusoid_table)<block_end><class_stmt>SinusoidalPositionalEmbedding(nn.Module)<block_start>"""This module produces sinusoidal positional embeddings of any length.
Padding symbols are ignored.
"""<def_stmt>__init__ self embedding_dim padding_idx init_size=1024<block_start>super().__init__()<line_sep>self.embedding_dim=embedding_dim<line_sep>self.padding_idx=padding_idx<line_sep>self.weights=SinusoidalPositionalEmbedding.get_embedding(init_size embedding_dim padding_idx )<line_sep>self.register_buffer("_float_tensor" torch.FloatTensor(1))<block_end>@staticmethod<def_stmt>get_embedding num_embeddings embedding_dim padding_idx=<none><block_start>"""Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""<line_sep>half_dim=embedding_dim<floordiv>2<line_sep>emb=math.log(10000)/(half_dim-1)<line_sep>emb=torch.exp(torch.arange(half_dim dtype=torch.float)<times>-emb)<line_sep>emb=torch.arange(num_embeddings dtype=torch.float).unsqueeze(1)<times>emb.unsqueeze(0)<line_sep>emb=torch.cat([torch.sin(emb) torch.cos(emb)] dim=1).view(num_embeddings -1)<if_stmt>embedding_dim%2<eq>1# zero pad
<block_start>emb=torch.cat([emb torch.zeros(num_embeddings 1)] dim=1)<block_end><if_stmt>padding_idx<is><not><none><block_start>emb[padding_idx :]=0<block_end><return>emb<block_end><def_stmt>forward self input incremental_state=<none> timestep=<none> positions=<none> **kwargs<block_start>"""Input is expected to be of size [bsz x seqlen]."""<line_sep>bsz,seq_len=input.shape[:2]<line_sep>max_pos=self.padding_idx+1+seq_len<if_stmt>self.weights<is><none><or>max_pos<g>self.weights.size(0)# recompute/expand embeddings if needed
<block_start>self.weights=SinusoidalPositionalEmbedding.get_embedding(max_pos self.embedding_dim self.padding_idx )<block_end>self.weights=self.weights.to(self._float_tensor)<if_stmt>incremental_state<is><not><none># positions is the same for every token when decoding a single step
<block_start>pos=timestep.view(-1)[0]+1<if>timestep<is><not><none><else>seq_len<line_sep><return>self.weights[self.padding_idx+pos :].expand(bsz 1 -1)<block_end>positions=make_positions(input self.padding_idx)<if>positions<is><none><else>positions<line_sep><return>self.weights.index_select(0 positions.view(-1)).view(bsz seq_len -1).detach()<block_end><def_stmt>max_positions self<block_start>"""Maximum number of supported positions."""<line_sep><return>int(1e5)<block_end><block_end># an arbitrary large number
<class_stmt>Swish(nn.Module)<block_start>"""
Swish is a smooth, non-monotonic function that consistently matches or outperforms ReLU on deep networks applied
to a variety of challenging domains such as Image classification and Machine translation.
"""<def_stmt>__init__ self<block_start>super(Swish self).__init__()<block_end><def_stmt>forward self inputs<block_start><return>inputs<times>inputs.sigmoid()<block_end><block_end><class_stmt>GLU(nn.Module)<block_start>"""
The gating mechanism is called Gated Linear Units (GLU), which was first introduced for natural language processing
in the paper “Language Modeling with Gated Convolutional Networks”
"""<def_stmt>__init__ self dim:int<arrow><none><block_start>super(GLU self).__init__()<line_sep>self.dim=dim<block_end><def_stmt>forward self inputs<block_start>outputs,gate=inputs.chunk(2 dim=self.dim)<line_sep><return>outputs<times>gate.sigmoid()<block_end><block_end><class_stmt>LayerNorm(torch.nn.LayerNorm)<block_start>"""Layer normalization module.
:param int nout: output dim size
:param int dim: dimension to be normalized
"""<def_stmt>__init__ self nout dim=-1<block_start>"""Construct an LayerNorm object."""<line_sep>super(LayerNorm self).__init__(nout eps=1e-12)<line_sep>self.dim=dim<block_end><def_stmt>forward self x<block_start>"""Apply layer normalization.
:param torch.Tensor x: input tensor
:return: layer normalized tensor
:rtype torch.Tensor
"""<if_stmt>self.dim<eq>-1<block_start><return>super(LayerNorm self).forward(x)<block_end><return>super(LayerNorm self).forward(x.transpose(1 -1)).transpose(1 -1)<block_end><block_end><class_stmt>LinearNorm(nn.Module)<block_start>""" LinearNorm Projection """<def_stmt>__init__ self in_features out_features bias=<false><block_start>super(LinearNorm self).__init__()<line_sep>self.linear=nn.Linear(in_features out_features bias)<line_sep>nn.init.xavier_uniform_(self.linear.weight)<if_stmt>bias<block_start>nn.init.constant_(self.linear.bias 0.0)<block_end><block_end><def_stmt>forward self x<block_start>x=self.linear(x)<line_sep><return>x<block_end><block_end><class_stmt>ConvBlock(nn.Module)<block_start>""" 1D Convolutional Block """<def_stmt>__init__ self in_channels out_channels kernel_size dropout=<none> normalization=nn.BatchNorm1d activation=nn.ReLU transpose=<false><block_start>super(ConvBlock self).__init__()<line_sep>self.conv_layer=nn.Sequential(ConvNorm(in_channels out_channels kernel_size=kernel_size stride=1 padding=int((kernel_size-1)/2) dilation=1 w_init_gain="tanh" transpose=transpose) normalization(out_channels) activation() )<line_sep>self.dropout=dropout<if>dropout<is><not><none><else><none><line_sep>self.transpose=transpose<block_end><def_stmt>forward self enc_input mask=<none><block_start><if_stmt><not>self.transpose<block_start>enc_input=enc_input.contiguous().transpose(1 2)<block_end>enc_output=self.conv_layer(enc_input)<if_stmt>self.dropout<is><not><none><block_start>enc_output=F.dropout(enc_output self.dropout training=<true>)<block_end># self.training)
<if_stmt><not>self.transpose<block_start>enc_output=enc_output.contiguous().transpose(1 2)<block_end><if_stmt>mask<is><not><none><block_start>enc_output=enc_output.masked_fill(mask.unsqueeze(-1) 0)<block_end><return>enc_output<block_end><block_end><class_stmt>ConvBlock2D(nn.Module)<block_start>""" 2D Convolutional Block """<def_stmt>__init__ self in_channels out_channels kernel_size dropout=<none> normalization=nn.BatchNorm2d activation=nn.ReLU transpose=<false><block_start>super(ConvBlock2D self).__init__()<line_sep>self.conv_layer=nn.Sequential(ConvNorm2D(in_channels out_channels kernel_size=(1 kernel_size) stride=1 padding=(0 int((kernel_size-1)/2)) bias=<false> w_init_gain="tanh" transpose=transpose ) normalization(out_channels) activation() )<line_sep>self.dropout=dropout<if>dropout<is><not><none><else><none><line_sep>self.transpose=transpose<block_end><def_stmt>forward self enc_input mask=<none><block_start>"""
enc_input -- [B, H, W, C_in]
mask -- [B, H]
"""<if_stmt><not>self.transpose<block_start>enc_input=enc_input.contiguous().permute(0 3 1 2)# [B, C_in, H, W]
<block_end>enc_output=self.conv_layer(enc_input)<if_stmt>self.dropout<is><not><none><block_start>enc_output=F.dropout(enc_output self.dropout self.training)<block_end><if_stmt><not>self.transpose<block_start>enc_output=enc_output.contiguous().permute(0 2 3 1)# [B, H, W, C_out]
<block_end><if_stmt>mask<is><not><none><block_start>enc_output=enc_output.masked_fill(mask.unsqueeze(-1).unsqueeze(-1) 0)<block_end><return>enc_output<block_end><block_end><class_stmt>ConvNorm(nn.Module)<block_start>""" 1D Convolution """<def_stmt>__init__ self in_channels out_channels kernel_size=1 stride=1 padding=<none> dilation=1 bias=<true> w_init_gain="linear" transpose=<false> <block_start>super(ConvNorm self).__init__()<if_stmt>padding<is><none><block_start><assert_stmt>kernel_size%2<eq>1<line_sep>padding=int(dilation<times>(kernel_size-1)/2)<block_end>self.conv=nn.Conv1d(in_channels out_channels kernel_size=kernel_size stride=stride padding=padding dilation=dilation bias=bias )<line_sep>torch.nn.init.xavier_uniform_(self.conv.weight gain=torch.nn.init.calculate_gain(w_init_gain))<line_sep>self.transpose=transpose<block_end><def_stmt>forward self x<block_start><if_stmt>self.transpose<block_start>x=x.contiguous().transpose(1 2)<block_end>x=self.conv(x)<if_stmt>self.transpose<block_start>x=x.contiguous().transpose(1 2)<block_end><return>x<block_end><block_end><class_stmt>ConvNorm2D(nn.Module)<block_start>""" 2D Convolution """<def_stmt>__init__ self in_channels out_channels kernel_size=1 stride=1 padding=<none> dilation=1 bias=<true> w_init_gain="linear" transpose=<false> <block_start>super(ConvNorm2D self).__init__()<if_stmt>padding<is><none><block_start><assert_stmt>kernel_size%2<eq>1<line_sep>padding=int(dilation<times>(kernel_size-1)/2)<block_end>self.conv=nn.Conv2d(in_channels out_channels kernel_size=kernel_size stride=stride padding=padding dilation=dilation bias=bias )<line_sep>torch.nn.init.xavier_uniform_(self.conv.weight gain=torch.nn.init.calculate_gain(w_init_gain))<line_sep>self.transpose=transpose<block_end><def_stmt>forward self x<block_start>"""
x -- [B, H, W, C] or [B, C, H, W]
"""<if_stmt>self.transpose<block_start>x=x.contiguous().permute(0 3 1 2)# [B, C, H, W]
<block_end>x=self.conv(x)<if_stmt>self.transpose<block_start>x=x.contiguous().permute(0 2 3 1)<block_end># [B, H, W, C]
<return>x<block_end><block_end>
|
<import_stmt>string<import_stmt>timeit<class_stmt>BadHash(str)<block_start><def_stmt>__hash__ self<block_start><return>42<block_end><block_end><class_stmt>GoodHash(str)<block_start><def_stmt>__hash__ self<block_start>"""
This is a slightly optimized version of twoletter_hash
"""<line_sep><return>ord(self[1])+26<times>ord(self[0])-2619<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>baddict=set()<line_sep>gooddict=set()<for_stmt>i string.ascii_lowercase<block_start><for_stmt>j string.ascii_lowercase<block_start>key=i+j<line_sep>baddict.add(BadHash(key))<line_sep>gooddict.add(GoodHash(key))<block_end><block_end>badtime=timeit.repeat("key in baddict" setup="from __main__ import baddict, BadHash; key = BadHash('zz')" repeat=3 number=100_000 )<line_sep>goodtime=timeit.repeat("key in gooddict" setup="from __main__ import gooddict, GoodHash; key = GoodHash('zz')" repeat=3 number=100_000 )<line_sep>print(f"Min lookup time for baddict: {min(badtime)}")<line_sep>print(f"Min lookup time for gooddict: {min(goodtime)}")<line_sep># Results:
# Min lookup time for baddict: 17.719061855008476
# Min lookup time for gooddict: 0.42408075400453527
<block_end>
|
<import_from_stmt>textwrap dedent<import_from_stmt>peru plugin<import_stmt>shared<def_stmt>assert_parallel n# The plugin module keep a global counter of all the jobs that run in
# parallel, so that we can write these tests.
<block_start><if_stmt>plugin.DEBUG_PARALLEL_MAX<ne>n<block_start><raise>AssertionError('Expected {} parallel {}. Counted {}.'.format(n 'job'<if>n<eq>1<else>'jobs' plugin.DEBUG_PARALLEL_MAX))<block_end><block_end><class_stmt>ParallelismTest(shared.PeruTest)<block_start><def_stmt>setUp self# Make sure nothing is fishy with the jobs counter, and reset the max.
<block_start>plugin.debug_assert_clean_parallel_count()<line_sep>plugin.DEBUG_PARALLEL_MAX=0<block_end><def_stmt>tearDown self# Make sure nothing is fishy with the jobs counter. No sense in
# resetting the max here, because the rest of our tests don't know to
# reset it anyway.
<block_start>plugin.debug_assert_clean_parallel_count()<block_end><def_stmt>test_two_jobs_in_parallel self# This just checks that two different modules can actually be fetched
# in parallel.
<block_start>foo=shared.create_dir()<line_sep>bar=shared.create_dir()<line_sep>peru_yaml=dedent('''\
imports:
foo: ./
bar: ./
cp module foo:
path: {}
cp module bar:
path: {}
'''.format(foo bar))<line_sep>test_dir=shared.create_dir({'peru.yaml':peru_yaml})<line_sep>shared.run_peru_command(['sync'] test_dir)<line_sep>assert_parallel(2)<block_end><def_stmt>test_jobs_flag self# This checks that the --jobs flag is respected, even when two modules
# could have been fetched in parallel.
<block_start>foo=shared.create_dir()<line_sep>bar=shared.create_dir()<line_sep>peru_yaml=dedent('''\
imports:
foo: ./
bar: ./
cp module foo:
path: {}
cp module bar:
path: {}
'''.format(foo bar))<line_sep>test_dir=shared.create_dir({'peru.yaml':peru_yaml})<line_sep>shared.run_peru_command(['sync' '-j1'] test_dir)<line_sep>assert_parallel(1)<block_end><def_stmt>test_identical_fields self# This checks that modules with identical fields are not fetched in
# parallel. This is the same logic that protects us from fetching a
# given module twice, like when it's imported with two different named
# rules.
<block_start>foo=shared.create_dir()<line_sep>peru_yaml=dedent('''\
imports:
foo1: ./
foo2: ./
cp module foo1:
path: {}
cp module foo2:
path: {}
'''.format(foo foo))<line_sep>test_dir=shared.create_dir({'peru.yaml':peru_yaml})<line_sep>shared.run_peru_command(['sync'] test_dir)<line_sep>assert_parallel(1)<block_end><def_stmt>test_identical_plugin_cache_fields self# Plugins that use caching also need to avoid running in parallel, if
# their cache directories are the same. The noop_cache plugin (created
# for this test) uses the path field (but not the nonce field) in its
# plugin cache key. Check that these two modules are not fetched in
# parallel, even though their module fields aren't exactly the same.
<block_start>foo=shared.create_dir()<line_sep>peru_yaml=dedent('''\
imports:
foo1: ./
foo2: ./
noop_cache module foo1:
path: {}
# nonce is ignored, but it makes foo1 different from foo2 as
# far as the module cache is concerned
nonce: '1'
noop_cache module foo2:
path: {}
nonce: '2'
'''.format(foo foo))<line_sep>test_dir=shared.create_dir({'peru.yaml':peru_yaml})<line_sep>shared.run_peru_command(['sync'] test_dir)<line_sep>assert_parallel(1)<block_end><block_end>
|
<import_from_future_stmt> with_statement<import_from_stmt>alembic context<import_from_stmt>sqlalchemy create_engine pool<import_from_stmt>logging.config fileConfig<import_stmt>os<import_stmt>sys<line_sep>kp_path=os.path.dirname(os.path.abspath(__file__))<line_sep>kp_path=os.path.normpath(os.path.join(kp_path '..' '..' '..' '..'))<line_sep>sys.path.insert(1 kp_path)<import_from_stmt>king_phisher.server.database manager<import_from_stmt>king_phisher.server.database models<import_stmt>yaml<line_sep># this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config=context.config<line_sep># Interpret the config file for Python logging.
# This line sets up loggers basically.
<if_stmt><not>config.get_main_option('skip_logger_config')<block_start>fileConfig(config.config_file_name)<block_end># add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata=models.Base.metadata<line_sep># other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
database_connection_url=config.get_main_option('sqlalchemy.url')<if_stmt><not>database_connection_url# consume the x arguments provided on the command line
<block_start>x_arguments=context.get_x_argument(as_dictionary=<true>)<if_stmt>'config'<in>x_arguments<block_start>server_config=yaml.load(open(x_arguments['config']))<line_sep>database_connection_url=server_config['server']['database']<block_end><elif_stmt>'database'<in>x_arguments<block_start>database_connection_url=x_arguments['database']<block_end><else_stmt><block_start>print('[-] the database connection string has not been specified, either')<line_sep>print('[-] \'config\' or \'database\' must be specified via the -x option')<line_sep>print('[-] for example:')<line_sep>print(' -x database=driver://user:pass@localhost/dbname')<line_sep>print(' -x config=/path/to/server/config/file')<line_sep>os._exit(os.EX_USAGE)<block_end>database_connection_url=manager.normalize_connection_url(database_connection_url)<block_end><def_stmt>run_migrations_offline <block_start>"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""<line_sep>context.configure(url=database_connection_url target_metadata=target_metadata)<with_stmt>context.begin_transaction()<block_start>context.run_migrations()<block_end><block_end><def_stmt>run_migrations_online <block_start>"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""<line_sep>engine=create_engine(database_connection_url poolclass=pool.NullPool)<line_sep>connection=engine.connect()<line_sep>context.configure(connection=connection target_metadata=target_metadata)<try_stmt><block_start><with_stmt>context.begin_transaction()<block_start>context.run_migrations()<block_end><block_end><finally_stmt><block_start>connection.close()<block_end><block_end><if_stmt>context.is_offline_mode()<block_start>run_migrations_offline()<block_end><else_stmt><block_start>run_migrations_online()<block_end>
|
<import_stmt>numpy<as>np<import_from_stmt>nltk wordpunct_tokenize<import_stmt>nltk<import_stmt>itertools<import_stmt>operator<import_stmt>sklearn<import_stmt>re string<import_stmt>math<line_sep>SENTENCE_START_TOKEN="sentence_<PASSWORD>"<line_sep>SENTENCE_END_TOKEN="sentence_<PASSWORD>"<line_sep>UNKNOWN_TOKEN="<PASSWORD>"<def_stmt>load_data loc='./data/'<block_start>trainloc=loc+'20_news_group_sentences.txt'<line_sep>sentences=[]<with_stmt>open(trainloc 'r' encoding='utf8')<as>f<block_start><for_stmt>line f<block_start>sentences.append("%s %s %s"%(SENTENCE_START_TOKEN line SENTENCE_END_TOKEN))<block_end><block_end><return>sentences<block_end><def_stmt>build_dictionary loc='./data/' vocabulary_size=-1<block_start>trainloc=loc+'20_news_group_sentences.txt'<line_sep>document_frequency={}<line_sep>total_document=0<with_stmt>open(trainloc 'r' encoding='utf8')<as>f<block_start><for_stmt>line f<block_start>sentence=my_tokenizer(line)<for_stmt>token set(sentence)<block_start><if_stmt>token<in>document_frequency<block_start>document_frequency[token]<augadd>1<block_end><else_stmt><block_start>document_frequency[token]=1<block_end><block_end>total_document<augadd>1<block_end><block_end><for_stmt>key,value document_frequency.items()<block_start>document_frequency[key]=math.log(total_document/document_frequency[key])<block_end>vocab=sorted(document_frequency.items() key=operator.itemgetter(1) reverse=<true>)<line_sep>word_to_index={}<line_sep>index_to_word={}<line_sep>word_to_index[SENTENCE_START_TOKEN]=0<line_sep>word_to_index[SENTENCE_END_TOKEN]=1<line_sep>word_to_index[UNKNOWN_TOKEN]=2<line_sep>index_to_word[0]=SENTENCE_START_TOKEN<line_sep>index_to_word[1]=SENTENCE_END_TOKEN<line_sep>index_to_word[2]=UNKNOWN_TOKEN<line_sep>counter=3<for_stmt>key,value vocab<block_start><if_stmt>len(key)<l>4<block_start><continue><block_end><elif_stmt>counter<eq>vocabulary_size<block_start><break><block_end>word_to_index[key]=counter<line_sep>index_to_word[counter]=key<line_sep>counter<augadd>1<block_end><return>word_to_index index_to_word<block_end><def_stmt>my_tokenizer input<block_start>token_list=[]<line_sep>tokens=wordpunct_tokenize(input.lower())<line_sep>token_list.extend([x<for>x tokens<if><not>re.fullmatch('['+string.punctuation+']+' x)])<line_sep><return>token_list<block_end><def_stmt>get_train_data vocabulary_size<block_start>word_to_index,index_to_word=build_dictionary(vocabulary_size=vocabulary_size)<line_sep>sentences=load_data()<line_sep>sentences_tokenized=[my_tokenizer(sent)<for>sent sentences]<for_stmt>i,sent enumerate(sentences_tokenized)<block_start>sentences_tokenized[i]=[w<if>w<in>word_to_index<else>UNKNOWN_TOKEN<for>w sent]<block_end>sentences_indices=[]<for_stmt>sentence sentences_tokenized<block_start>sentences_indices.append([word_to_index[word]<for>word sentence])<block_end><return>sentences_indices word_to_index index_to_word<block_end><def_stmt>get_train_data_reversed vocabulary_size<block_start>sentences_indices,word_to_index,index_to_word=get_train_data(vocabulary_size)<line_sep>sentences_indices_reversed=[]<for_stmt>index_list sentences_indices<block_start>temp=[]<line_sep>temp.extend(index_list)<line_sep>temp.reverse()<line_sep>sentences_indices_reversed.append(temp)<block_end><return>sentences_indices_reversed word_to_index index_to_word<block_end><def_stmt>get_train_sentences vocabulary_size<block_start>sentences_indices,word_to_index,index_to_word=get_train_data(vocabulary_size)<line_sep>all_sentences=[]<line_sep>all_sentences.extend(sentences_indices)<line_sep>x_train=np.asarray([[w<for>w sentence[:-1]]<for>sentence all_sentences])<line_sep>y_train=np.asarray([[w<for>w sentence[1:]]<for>sentence all_sentences])<line_sep><return>x_train y_train word_to_index index_to_word<block_end><def_stmt>get_train_sentences_reversed vocabulary_size<block_start>sentences_indices_reversed,word_to_index,index_to_word=get_train_data_reversed(vocabulary_size)<line_sep>all_sentences=[]<line_sep>all_sentences.extend(sentences_indices_reversed)<line_sep>x_train=np.asarray([[w<for>w sentence[:-1]]<for>sentence all_sentences])<line_sep>y_train=np.asarray([[w<for>w sentence[1:]]<for>sentence all_sentences])<line_sep><return>x_train y_train word_to_index index_to_word<block_end>
|
# Time: O(k * n^2)
# Space: O(n^2)
<class_stmt>Solution(object)<block_start><def_stmt>knightProbability self N K r c<block_start>"""
:type N: int
:type K: int
:type r: int
:type c: int
:rtype: float
"""<line_sep>directions=[[1 2] [1 -2] [2 1] [2 -1] [-1 2] [-1 -2] [-2 1] [-2 -1]]<line_sep>dp=[[[1<for>_ xrange(N)]<for>_ xrange(N)]<for>_ xrange(2)]<for_stmt>step xrange(1 K+1)<block_start><for_stmt>i xrange(N)<block_start><for_stmt>j xrange(N)<block_start>dp[step%2][i][j]=0<for_stmt>direction directions<block_start>rr,cc=i+direction[0] j+direction[1]<if_stmt>0<le>cc<l>N<and>0<le>rr<l>N<block_start>dp[step%2][i][j]<augadd>0.125<times>dp[(step-1)%2][rr][cc]<block_end><block_end><block_end><block_end><block_end><return>dp[K%2][r][c]<block_end><block_end>
|
<import_stmt>io<import_stmt>time<import_stmt>cv2<import_stmt>numpy<as>np<import_stmt>pyglet<import_stmt>tensorflow<as>tf<import_stmt>tensorflow.keras.backend<as>K<import_from_stmt>matplotlib.backends.backend_agg FigureCanvasAgg<import_from_stmt>matplotlib.figure Figure<import_from_stmt>pyglet.gl *<import_from_stmt>sklearn.decomposition PCA<import_from_stmt>config BATCH_SIZE CLIP_REWARD DISCOUNT_FACTOR ENV_NAME EVAL_LENGTH FRAMES_BETWEEN_EVAL INPUT_SHAPE LEARNING_RATE LOAD_FROM MAX_EPISODE_LENGTH MAX_NOOP_STEPS MEM_SIZE MIN_REPLAY_BUFFER_SIZE PRIORITY_SCALE SAVE_PATH TOTAL_FRAMES UPDATE_FREQ WRITE_TENSORBOARD <import_from_stmt>train_dqn Agent GameWrapper ReplayBuffer build_q_network process_frame <line_sep># My installations require I run this to avoid errors with cuDNN.
# You can remove it if your system doesn't require it.
# (it shouldn't mess anything up if you keep it in)
gpus=tf.config.experimental.list_physical_devices('GPU')<if_stmt>gpus<block_start><try_stmt><block_start><for_stmt>gpu gpus<block_start>tf.config.experimental.set_memory_growth(gpu <true>)<block_end><block_end><except_stmt>RuntimeError<as>e<block_start>print(e)<block_end><block_end># Change this to the path of the model you would like to visualize
RESTORE_PATH=<none><if_stmt>RESTORE_PATH<is><none><block_start><raise>UserWarning('Please change the variable `RESTORE_PATH` to where you would like to load the model from. If you haven\'t trained a model, try \'example-save\'')<block_end>ENV_NAME='BreakoutDeterministic-v4'<line_sep>DISPLAY_FPS=<false><line_sep>DISPLAY_HUMAN_RENDERED=<true><line_sep>DISPLAY_MACHINE_RENDERED=<true><line_sep>DISPLAY_Q_VALUES=<true><line_sep>DISPLAY_VAL_CHART=<true><line_sep>DISPLAY_HEATMAP=<true><line_sep># Create environment
game_wrapper=GameWrapper(ENV_NAME MAX_NOOP_STEPS)<line_sep>print("The environment has the following {} actions: {}".format(game_wrapper.env.action_space.n game_wrapper.env.unwrapped.get_action_meanings()))<line_sep># Create agent
MAIN_DQN=build_q_network(game_wrapper.env.action_space.n LEARNING_RATE input_shape=INPUT_SHAPE)<line_sep>TARGET_DQN=build_q_network(game_wrapper.env.action_space.n input_shape=INPUT_SHAPE)<line_sep>replay_buffer=ReplayBuffer(size=MEM_SIZE input_shape=INPUT_SHAPE)<line_sep>agent=Agent(MAIN_DQN TARGET_DQN replay_buffer game_wrapper.env.action_space.n input_shape=INPUT_SHAPE)<line_sep>print('Loading agent...')<line_sep>agent.load(RESTORE_PATH)<def_stmt>display_nparray arr maxwidth=500<block_start><assert_stmt>len(arr.shape)<eq>3<line_sep>height,width,_channels=arr.shape<if_stmt>width<g>maxwidth<block_start>scale=maxwidth/width<line_sep>width=int(scale<times>width)<line_sep>height=int(scale<times>height)<block_end>image=pyglet.image.ImageData(arr.shape[1] arr.shape[0] 'RGB' arr.tobytes() pitch=arr.shape[1]<times>-3)<line_sep>glTexParameteri(GL_TEXTURE_2D GL_TEXTURE_MAG_FILTER GL_NEAREST)<line_sep>texture=image.get_texture()<line_sep>texture.width=width<line_sep>texture.height=height<line_sep><return>texture<block_end><def_stmt>generate_heatmap frame model<block_start><with_stmt>tf.GradientTape()<as>tape<block_start>last_conv_layer=model.get_layer('conv2d_2')<line_sep>iterate=tf.keras.models.Model([model.inputs] [model.output last_conv_layer.output])<line_sep>model_out,last_conv_layer=iterate(frame[np.newaxis : : :])<line_sep>class_out=model_out[: np.argmax(model_out[0])]<line_sep>grads=tape.gradient(class_out last_conv_layer)<line_sep>pooled_grads=K.mean(grads axis=(0 1 2))<block_end>heatmap=tf.reduce_mean(tf.multiply(pooled_grads last_conv_layer) axis=-1)<line_sep>heatmap=np.maximum(heatmap 0)<line_sep>heatmap<augdiv>np.max(heatmap)<line_sep>heatmap=heatmap.reshape((7 7))<line_sep>heatmap=cv2.resize(heatmap (frame.shape[1] frame.shape[0]))<line_sep>heatmap=cv2.applyColorMap(np.uint8(255<times>heatmap) cv2.COLORMAP_JET)/255<line_sep><return>heatmap<block_end><class_stmt>VisWindow(pyglet.window.Window)<block_start><def_stmt>__init__ self *args **kwargs<block_start>super().__init__(*args **kwargs)<line_sep>self.set_minimum_size(400 300)<line_sep>self.frame_rate=1/60<line_sep>self.max_q_val=0.1<line_sep>self.min_q_val=-0.1<line_sep>self.fps_display=pyglet.window.FPSDisplay(self)<line_sep>self.fps_display.label.x=self.width-100<line_sep>self.fps_display.label.y=self.height-50<line_sep># For drawing screens
self.game_image=np.ones((210 160 3))<line_sep>self.state_image=np.ones((84 84 4))<line_sep># For keeping simulating the game
self.terminal=<true><line_sep>self.eval_rewards=[]<line_sep>self.evaluate_frame_number=0<line_sep>self.episode_reward_sum=0<line_sep>self.life_lost=<true><line_sep>self.q_vals=[0]<times>game_wrapper.env.action_space.n<line_sep>self.values=[]<line_sep># Text
self.human_title=pyglet.text.Label('Human-Rendered Game Screen' font_size=20 color=(0 0 0 255) x=10 y=self.height-20 anchor_y='center')<line_sep>self.q_val_title=pyglet.text.Label('Q-Values' font_size=20 color=(0 0 0 255) x=500 y=self.height-20 anchor_y='center')<line_sep>self.agent_title=pyglet.text.Label('Agent-Rendered Game Screen' font_size=20 color=(0 0 0 255) x=10 y=235 anchor_y='center')<line_sep>self.heatmap_title=pyglet.text.Label('Attention Heatmap' font_size=20 color=(0 0 0 255) x=1000 y=self.height-140 anchor_y='center')<line_sep>self.action_titles=[]<for_stmt>i,action enumerate(game_wrapper.env.unwrapped.get_action_meanings())<block_start>self.action_titles.append(pyglet.text.Label(action font_size=20 color=(0 0 0 255) x=0 y=0 anchor_x='center'))<block_end><block_end><def_stmt>on_draw self<block_start>self.clear()<line_sep>glClearColor(1. 1. 1. 1.)<line_sep>glTexParameteri(GL_TEXTURE_2D GL_TEXTURE_MAG_FILTER GL_NEAREST)<line_sep>glTexParameteri(GL_TEXTURE_2D GL_TEXTURE_MIN_FILTER GL_NEAREST)<line_sep>self.switch_to()<line_sep>self.dispatch_events()<line_sep># Draw FPS counter
<if_stmt>DISPLAY_FPS<block_start>self.fps_display.draw()<block_end># Display RGB "human" version of the game state
<if_stmt>DISPLAY_HUMAN_RENDERED<block_start>self.human_title.draw()<line_sep>base_dimensions=(210 160)<line_sep>scale=2<line_sep>display_nparray(cv2.resize(self.game_image dsize=(int(base_dimensions[1]<times>scale) int(base_dimensions[0]<times>scale)) interpolation=cv2.INTER_CUBIC)).blit(50 self.height-base_dimensions[0]<times>scale-50)<block_end># Display grayscale "machine" version of the game state (this is what the agent really sees)
<if_stmt>DISPLAY_MACHINE_RENDERED<block_start>self.agent_title.draw()<line_sep>base_dimensions=(84 84)<line_sep>scale=2.5<line_sep># For some strange reason, we must render this in RGB mode (not 'L' mode as implied in the pyglet docs)
# Because of this, we must repeat each frame 3 times to simulate RGB, despite being grayscale
state_images=[np.repeat(self.state_image[: : i np.newaxis] 3 axis=2)<for>i range(self.state_image.shape[-1])]<for_stmt>i,state_image enumerate(state_images)<block_start>display_nparray(cv2.resize(state_image dsize=(int(base_dimensions[1]<times>scale) int(base_dimensions[0]<times>scale)) interpolation=cv2.INTER_CUBIC)).blit(10+i<times>(84<times>scale+5) 10)<block_end><block_end># Display q-values
<if_stmt>DISPLAY_Q_VALUES<block_start>self.q_val_title.draw()<line_sep>LENGTH=80<line_sep>STARTING_X=400<for_stmt>i,(q_val label) enumerate(zip(self.q_vals[::-1] self.action_titles[::-1]))<block_start><if_stmt>q_val<g>self.max_q_val<block_start>self.max_q_val=q_val<block_end><elif_stmt>q_val<l>self.min_q_val<block_start>self.min_q_val=q_val<block_end># Draw square represention q-val
x_value=STARTING_X+i<times>(LENGTH+10)# x-coordinate to draw square
color=(150<power>(q_val<times>2))/(sum([150<power>(q<times>2)<for>q self.q_vals])+0.0001)<line_sep>pyglet.graphics.draw(4 GL_QUADS ('v2f' (x_value self.height-50 x_value+LENGTH self.height-50 x_value+LENGTH self.height-LENGTH-50 x_value self.height-LENGTH-50)) ('c3f' (color color color color color color color color color color color color)))<line_sep># Draw action label
glTranslatef(x_value+LENGTH/2 self.height-100-LENGTH 0.0)<line_sep>glRotatef(-90.0 0.0 0.0 1.0)<line_sep>label.draw()<line_sep>glRotatef(90.0 0.0 0.0 1.0)<line_sep>glTranslatef(-(x_value+LENGTH/2) -(self.height-100-LENGTH) 0.0)<block_end><block_end># Display value history (adapted from https://learning.oreilly.com/library/view/matplotlib-plotting-cookbook/9781849513265/ch08s06.html)
<if_stmt>DISPLAY_VAL_CHART<block_start>dpi_res=min(self.width self.height)/10<line_sep>fig=Figure((500/dpi_res 230/dpi_res) dpi=dpi_res)<line_sep>ax=fig.add_subplot(111)<line_sep># Set up plot
ax.set_title('Estimated Value over Time' fontsize=20)<line_sep>ax.set_xticklabels([])<line_sep>ax.set_ylabel('V(s)')<line_sep>ax.plot(self.values[max(len(self.values)-200 0):])# plot values
w,h=fig.get_size_inches()<line_sep>dpi_res=fig.get_dpi()<line_sep>w,h=int(np.ceil(w<times>dpi_res)) int(np.ceil(h<times>dpi_res))<line_sep>canvas=FigureCanvasAgg(fig)<line_sep>pic_data=io.BytesIO()<line_sep>canvas.print_raw(pic_data dpi=dpi_res)<line_sep>img=pyglet.image.ImageData(w h 'RGBA' pic_data.getvalue() -4<times>w)<line_sep>img.blit(375 265)<block_end># Display heatmap
<if_stmt>DISPLAY_HEATMAP<and>self.evaluate_frame_number<g>1<block_start>self.heatmap_title.draw()<line_sep>base_dimensions=(84 84)<line_sep>INTENSITY=0.1<line_sep>scale=10<line_sep>processed_frame=np.repeat(self.state_image[: : 3 np.newaxis] 3 axis=2)<line_sep>heatmap=generate_heatmap(game_wrapper.state agent.DQN)<line_sep>img=(heatmap<times>255<times>INTENSITY+processed_frame<times>0.8).astype(np.uint8)<line_sep>display_nparray(cv2.resize(img+(heatmap<times>255<times>INTENSITY).astype(np.uint8) dsize=(int(base_dimensions[1]<times>scale) int(base_dimensions[0]<times>scale)) interpolation=cv2.INTER_CUBIC)).blit(880 60)<block_end>self.flip()<block_end><def_stmt>update self dt<block_start><if_stmt>self.terminal<block_start>game_wrapper.reset(evaluation=<true>)<line_sep>self.life_lost=<true><line_sep>self.episode_reward_sum=0<line_sep>self.terminal=<false><block_end>self.q_vals,value=agent.get_intermediate_representation(game_wrapper.state ['add' 'dense'] stack_state=<false>)<line_sep>self.q_vals,value=self.q_vals[0] value[0]<line_sep>action=1<if>self.life_lost<else>self.q_vals.argmax()<line_sep>self.values.append(value)<line_sep>_,reward,self.terminal,self.life_lost,self.game_image=game_wrapper.step(action render_mode='rgb_array')<line_sep>self.evaluate_frame_number<augadd>1<line_sep>self.episode_reward_sum<augadd>reward<line_sep>self.state_image=game_wrapper.state<if_stmt>self.terminal<block_start>self.eval_rewards.append(self.episode_reward_sum)<line_sep>self.values=[]<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>print('Finished setup. Visualizing...')<line_sep>window=VisWindow(1400 720 "RL Visualizer" resizable=<true>)<line_sep>pyglet.clock.schedule_interval(window.update window.frame_rate)<line_sep>pyglet.app.run()<block_end>
|
"""
Given the root of a binary tree, return the sum of every tree node's tilt.
The tilt of a tree node is the absolute difference between the sum of all left subtree node values and all right subtree node values. If a node does not have a left child, then the sum of the left subtree node values is treated as 0. The rule is similar if there the node does not have a right child.
Example 1:
Input: root = [1,2,3]
Output: 1
Explanation:
Tilt of node 2 : |0-0| = 0 (no children)
Tilt of node 3 : |0-0| = 0 (no children)
Tile of node 1 : |2-3| = 1 (left subtree is just left child, so sum is 2; right subtree is just right child, so sum is 3)
Sum of every tilt : 0 + 0 + 1 = 1
Example 2:
Input: root = [4,2,9,3,5,null,7]
Output: 15
Explanation:
Tilt of node 3 : |0-0| = 0 (no children)
Tilt of node 5 : |0-0| = 0 (no children)
Tilt of node 7 : |0-0| = 0 (no children)
Tilt of node 2 : |3-5| = 2 (left subtree is just left child, so sum is 3; right subtree is just right child, so sum is 5)
Tilt of node 9 : |0-7| = 7 (no left child, so sum is 0; right subtree is just right child, so sum is 7)
Tilt of node 4 : |(3+5+2)-(9+7)| = |10-16| = 6 (left subtree values are 3, 5, and 2, which sums to 10; right subtree values are 9 and 7, which sums to 16)
Sum of every tilt : 0 + 0 + 0 + 2 + 7 + 6 = 15
Example 3:
Input: root = [21,7,14,1,1,2,2,3,3]
Output: 9
Constraints:
The number of nodes in the tree is in the range [0, 104].
-1000 <= Node.val <= 1000
"""<line_sep># Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
<class_stmt>Solution(object)<block_start><def_stmt>findTilt self root<block_start>"""
:type root: TreeNode
:rtype: int
"""<def_stmt>find_tilt root<block_start><if_stmt>root<is><none><block_start><return>0 0<block_end>lstilt,ls=find_tilt(root.left)<line_sep>rstilt,rs=find_tilt(root.right)<line_sep><return>abs(ls-rs)+lstilt+rstilt ls+rs+root.val<block_end>stilt,s=find_tilt(root)<line_sep><return>stilt<block_end><block_end>
|
<import_stmt>logging<import_from_stmt>. Backend register_backend<import_from_stmt>..errors CLEError<try_stmt><block_start><import_stmt>arpy<block_end><except_stmt>ImportError<block_start>arpy=<none><block_end>l=logging.getLogger(__name__)<class_stmt>StaticArchive(Backend)<block_start>@classmethod<def_stmt>is_compatible cls stream<block_start>stream.seek(0)<line_sep><return>stream.read(8)<eq>b'!<arch>\n'<block_end>is_default=<true><def_stmt>__init__ self *args **kwargs<block_start><if_stmt>arpy<is><none><block_start><raise>CLEError("run `pip install arpy==1.1.1` to load archive files")<block_end>super().__init__(*args **kwargs)<line_sep># hack: we are using a loader internal method in a non-kosher way which will cause our children to be
# marked as the main binary if we are also the main binary
# work around this by setting ourself here:
<if_stmt>self.loader.main_object<is><none><block_start>self.loader.main_object=self<block_end>ar=arpy.Archive(fileobj=self._binary_stream)<line_sep>ar.read_all_headers()<for_stmt>name,stream ar.archived_files.items()<block_start>child=self.loader._load_object_isolated(stream)<line_sep>child.binary=child.binary_basename=name.decode()<line_sep>child.parent_object=self<line_sep>self.child_objects.append(child)<block_end><if_stmt>self.child_objects<block_start>self.arch=self.child_objects[0].arch<block_end><else_stmt><block_start>l.warning("Loaded empty static archive?")<block_end>self.has_memory=<false><line_sep>self.pic=<true><line_sep># hack pt. 2
<if_stmt>self.loader.main_object<is>self<block_start>self.loader.main_object=<none><block_end><block_end><block_end>register_backend('AR' StaticArchive)<line_sep>
|
<import_stmt>sys<line_sep>pl_data=[]<with_stmt>open(sys.argv[1] "r")<as>f<block_start><for_stmt>line f<block_start>pl_data.append(line.strip())<block_end><block_end>pl_data=set(pl_data)<with_stmt>open(sys.argv[1]+".unique" "w")<as>f<block_start><for_stmt>elem pl_data<block_start>f.write(elem+"\n")<block_end><block_end>
|
<import_from_stmt>elastichq.globals scheduler<line_sep># TODO: rename this to Metrics Service and move to service package
<class_stmt>JobPool()<block_start>app=<none><def_stmt>init_app self app<block_start>self.app=app<line_sep><return>self<block_end><def_stmt>blah self<block_start>JOB={'trigger':'interval' 'seconds':3# ,
# 'args': (app, 'in')
}<line_sep>scheduler.add_job('job1' self.do_task **JOB)<block_end><def_stmt>do_task self<block_start><import_from_stmt>elastichq.service ClusterService<line_sep>clusters=ClusterService().get_clusters(create_if_missing=<false>)<line_sep><return>clusters<block_end><block_end>
|
<import_from_stmt>mindsdb.api.mysql.mysql_proxy.datahub.datahub init_datahub<line_sep>
|
# Copyright 2021 Kyoto University (<NAME>)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Chunkwise attention in MoChA at test time."""<import_stmt>logging<import_stmt>numpy<as>np<import_stmt>torch<line_sep>logger=logging.getLogger(__name__)<def_stmt>hard_chunkwise_attention alpha u mask chunk_size H_ca sharpening_factor share_chunkwise_attention<block_start>"""Chunkwise attention in MoChA at test time.
Args:
alpha (FloatTensor): `[B, H_ma, qlen, klen]`
u (FloatTensor): `[B, (H_ma*)H_ca, qlen, klen]`
mask (ByteTensor): `[B, qlen, klen]`
chunk_size (int): window size for chunkwise attention
H_ca (int): number of chunkwise attention heads
sharpening_factor (float): sharping factor for beta calculation
share_chunkwise_attention (int): share CA heads among MA heads
Returns:
beta (FloatTensor): `[B, H_ma * H_ca, qlen, klen]`
"""<line_sep>bs,H_ma,qlen,klen=alpha.size()<assert_stmt>(u.size(2)<eq>qlen)<and>(u.size(3)<eq>klen) (u.size() alpha.size())<line_sep>alpha=alpha.unsqueeze(2)# `[B, H_ma, 1, qlen, klen]`
u=u.unsqueeze(1)# `[B, 1, (H_ma*)H_ca, qlen, klen]`
<if_stmt>H_ca<g>1<block_start>alpha=alpha.repeat([1 1 H_ca 1 1])<block_end><if_stmt>H_ma<g>1<block_start><if_stmt>share_chunkwise_attention<block_start>u=u.repeat([1 H_ma 1 1 1])<block_end><else_stmt><block_start>u=u.view(bs H_ma H_ca qlen klen)<block_end><block_end>mask=alpha.clone().byte()# `[B, H_ma, H_ca, qlen, klen]`
<for_stmt>b range(bs)<block_start><for_stmt>h range(H_ma)<block_start><if_stmt>alpha[b h 0 0].sum()<g>0<block_start>boundary=alpha[b h 0 0].nonzero()[: -1].min().item()<if_stmt>chunk_size<eq>-1# infinite lookback attention
<block_start>mask[b h : 0 0:boundary+1]=1<block_end><else_stmt><block_start>mask[b h : 0 max(0 boundary-chunk_size+1):boundary+1]=1<block_end><block_end><block_end><block_end>NEG_INF=float(np.finfo(torch.tensor(0 dtype=u.dtype).numpy().dtype).min)<line_sep>u=u.masked_fill(mask<eq>0 NEG_INF)<line_sep>beta=torch.softmax(u dim=-1)<line_sep><return>beta.view(bs -1 qlen klen)<block_end>
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013, Roboterclub Aachen e.V.
# All rights reserved.
#
# The file is part of the xpcc library and is released under the 3-clause BSD
# license. See the file `LICENSE` for the full license governing this code.
# -----------------------------------------------------------------------------
<import_stmt>itertools<import_from_stmt>logger Logger<import_from_stmt>..writer XMLDeviceWriter<import_from_stmt>. avr_io<class_stmt>AVRDeviceWriter(XMLDeviceWriter)<block_start>""" AVRDeviceWriter
Translates the Device to a XPCC specific format.
"""<def_stmt>__init__ self device logger=<none><block_start>XMLDeviceWriter.__init__(self device logger)<line_sep>self.root.removeAttribute('size_id')<line_sep>self.log.info(("Generating Device File for '%s'."%self.device.ids.string))<line_sep>self.types=self.device.ids.getAttribute('type')<line_sep>self.pin_ids=self.device.ids.getAttribute('pin_id')<line_sep>self.names=self.device.ids.getAttribute('name')<line_sep>self.family=self.device.ids.intersection.family<line_sep># search the io dictionary for this device
# we only need one pin name to identify the device group
pin_name=self.device.getProperty('pin-name').values[0].value<line_sep>self.io=[a<for>a avr_io.pins<if>pin_name<in>a['devices']]<if_stmt>len(self.io)<g>0<block_start>self.io=self.io[0]<block_end><else_stmt><block_start>self.io={}<if_stmt>self.device.id.family<ne>'xmega'<block_start>self.log.warn("AvrWriter: IO not found for device '%s' with pin-name: '%s'"%(self.device.id.string pin_name))<block_end><block_end>self.addDeviceAttributesToNode(self.root 'flash')<line_sep>self.addDeviceAttributesToNode(self.root 'ram')<line_sep>self.addDeviceAttributesToNode(self.root 'eeprom')<line_sep>self.addDeviceAttributesToNode(self.root 'core')<line_sep>self.addDeviceAttributesToNode(self.root 'mcu')<line_sep>pin_count_child=self.root.addChild('pin-count')<if_stmt>self.family<eq>'xmega'# the int in the type is the package device_id
# ie. A1, B1 = 100 pins, A3, C3 = 64 pins, etc...
<block_start>pins=[0 100 0 64 44 32]<line_sep>pin_count_child.setValue(pins[int(self.types[0][1:])])<block_end><else_stmt># the AT90, ATtiny and ATmega have very weird pin counts, with so many different packages
<block_start>pin_count_child.setValue(0)<block_end><for_stmt>header ['avr/io.h' 'avr/interrupt.h']<block_start>header_child=self.root.addChild('header')<line_sep>header_child.setValue(header)<block_end># self.addDeviceAttributesToNode(self.root, 'define')
core_child=self.root.addChild('driver')<line_sep>core_child.setAttributes({'type':'core' 'name':'avr'})<line_sep>ram_sizes=self.device.getProperty('ram')<for_stmt>ram_size ram_sizes.values<block_start>size=ram_size.value<line_sep># for large RAM sizes, reserve 1kB for stack
# for small RAM sizes, reserve half of entire size for stack
<if_stmt>size<g>2048<block_start>size<augsub>1024<block_end><else_stmt><block_start>size<augdiv>2<block_end><for_stmt>device_id ram_size.ids.differenceFromIds(self.device.ids)<block_start>attr=self._getAttributeDictionaryFromId(device_id)<line_sep>attr['name']='ram_length'<line_sep>ram_size_child=core_child.addChild('parameter')<line_sep>ram_size_child.setAttributes(attr)<line_sep>ram_size_child.setValue(size)<line_sep>attr2=self._getAttributeDictionaryFromId(device_id)<line_sep>attr2['name']='ram_block_length'<line_sep>block_size=4<while_stmt>(size/block_size<g>127)<block_start>block_size<augmul>2<block_end>ram_block_child=core_child.addChild('parameter')<line_sep>ram_block_child.setAttributes(attr2)<line_sep>ram_block_child.setValue(block_size)<block_end><block_end># ADC
self.addAdcToNode(self.root)<line_sep># Clock
clock_child=self.root.addChild('driver')<line_sep>clock_child.setAttributes({'type':'clock' 'name':'avr'})<line_sep># DAC
self.addDacToNode(self.root)<line_sep># I2C aka TWI
self.addI2cToNode(self.root)<line_sep># SPI
self.addSpiToNode(self.root)<line_sep># Timer
self.addTimerToNode(self.root)<line_sep># UART
self.addUartToNode(self.root)<line_sep># USI can be used to emulate UART, SPI and I2C, so there should not be a seperate driver for it.
# self.addUsiToNode(self.root)
# GPIO
self.addGpioToNode(self.root)<block_end><def_stmt>addDeviceAttributesToNode self node name<block_start>properties=self.device.getProperty(name)<if_stmt>properties<eq><none><block_start><return><block_end><for_stmt>prop properties.values<block_start><for_stmt>device_id prop.ids.differenceFromIds(self.device.ids)<block_start>attr=self._getAttributeDictionaryFromId(device_id)<line_sep>child=node.addChild(name)<line_sep>child.setAttributes(attr)<line_sep>child.setValue(prop.value)<block_end><block_end><block_end><def_stmt>addModuleAttributesToNode self node peripheral name family=<none><block_start><if_stmt>family<eq><none><block_start>family=self.family<block_end>modules=self.device.getProperty('modules')<for_stmt>prop modules.values<block_start><if_stmt>any(m<for>m prop.value<if>m.startswith(peripheral))<block_start><for_stmt>device_id prop.ids.differenceFromIds(self.device.ids)<block_start>attr=self._getAttributeDictionaryFromId(device_id)<line_sep>driver=node.addChild('driver')<line_sep>driver.setAttributes(attr)<line_sep>driver.setAttributes({'type':name 'name':family})<block_end><block_end><block_end><block_end><def_stmt>addModuleInstancesAttributesToNode self node peripheral name family=<none><block_start><if_stmt>family<eq><none><block_start>family=self.family<block_end>modules=self.device.getProperty('modules')<for_stmt>prop modules.values<block_start>instances=[]<for_stmt>module [m<for>m prop.value<if>m.startswith(peripheral)]<block_start>instances.append(module[len(peripheral):])<block_end><if_stmt>len(instances)<eq>0<block_start><continue><block_end>instances.sort()<for_stmt>device_id prop.ids.differenceFromIds(self.device.ids)<block_start>attr=self._getAttributeDictionaryFromId(device_id)<line_sep>driver=node.addChild('driver')<line_sep>driver.setAttributes(attr)<line_sep>driver.setAttributes({'type':name 'name':family})<if_stmt>len(instances)<g>0<block_start>driver.setAttribute('instances' ",".join(instances))<block_end><if_stmt>name<in>self.io<block_start><for_stmt>io self.io[name]<block_start>ch=driver.addChild('gpio')<line_sep>ch.setAttributes(io)<block_end><block_end><block_end><block_end><block_end><def_stmt>addI2cToNode self node<block_start>family='at90_tiny_mega'<if>(self.family<in>['at90' 'attiny' 'atmega'])<else>self.family<if_stmt>self.family<eq>'xmega'<block_start>self.addModuleInstancesAttributesToNode(node 'TWI' 'i2c' family)<block_end><else_stmt><block_start>self.addModuleAttributesToNode(node 'TWI' 'i2c' family)<block_end><block_end><def_stmt>addSpiToNode self node<block_start>family='at90_tiny_mega'<if>(self.family<in>['at90' 'attiny' 'atmega'])<else>self.family<if_stmt>self.family<eq>'xmega'<block_start>self.addModuleInstancesAttributesToNode(node 'SPI' 'spi' family)<block_end><else_stmt><block_start>self.addModuleAttributesToNode(node 'SPI' 'spi' family)<block_end><block_end><def_stmt>addAdcToNode self node<block_start><if_stmt>self.family<eq>'at90'<and>self.types[0]<in>['usb' 'can' 'pwm']<block_start>family='at90'<block_end><else_stmt><block_start>family='at90_tiny_mega'<if>(self.family<in>['at90' 'attiny' 'atmega'])<else>self.family<block_end><if_stmt>self.family<eq>'xmega'<block_start>self.addModuleInstancesAttributesToNode(node 'ADC' 'adc' family)<block_end><else_stmt><block_start>self.addModuleAttributesToNode(node 'AD_CONVERTER' 'adc' family)<block_end><block_end><def_stmt>addDacToNode self node<block_start><if_stmt>self.family<eq>'xmega'<block_start>self.addModuleInstancesAttributesToNode(node 'DAC' 'dac')<block_end><else_stmt><block_start>self.addModuleAttributesToNode(node 'DA_CONVERTER' 'dac')<block_end><block_end><def_stmt>addUsiToNode self node<block_start><if_stmt>self.family<ne>'xmega'<block_start>family='at90_tiny_mega'<if>(self.family<in>['at90' 'attiny' 'atmega'])<else>self.family<line_sep>self.addModuleAttributesToNode(node 'USI' 'usi' family)<block_end><block_end><def_stmt>addTimerToNode self node<block_start><if_stmt>self.family<eq>'xmega'<block_start>self.addModuleInstancesAttributesToNode(node 'TC' 'timer')<block_end><else_stmt><block_start>self.addModuleInstancesAttributesToNode(node 'TIMER_COUNTER_' 'timer')<block_end><block_end><def_stmt>addUartToNode self node<block_start>family='at90_tiny_mega'<if>(self.family<in>['at90' 'attiny' 'atmega'])<else>self.family<line_sep># this is special, some AT90_Tiny_Megas can put their USART into SPI mode
# we have to parse this specially.
uartSpi='uartspi'<in>self.io<or>self.family<eq>'xmega'<line_sep>modules=self.device.getProperty('modules')<for_stmt>prop modules.values<block_start>instances=[]<for_stmt>module [m<for>m prop.value<if>m.startswith('USART')]<block_start><if_stmt>self.family<eq>'xmega'<block_start>instances.append(module[5:7])<block_end><else_stmt># some device only have a 'USART', but we want 'USART0'
<block_start>mod=module+'0'<line_sep>instances.append(mod[5:6])<block_end><block_end><if_stmt>instances<ne>[]<block_start>instances=list(set(instances))<line_sep>instances.sort()<for_stmt>device_id prop.ids.differenceFromIds(self.device.ids)<block_start>attr=self._getAttributeDictionaryFromId(device_id)<line_sep>driver=node.addChild('driver')<line_sep>driver.setAttributes(attr)<line_sep>driver.setAttributes({'type':'uart' 'name':family})<if_stmt>uartSpi<block_start>spiDriver=node.addChild('driver')<line_sep>spiDriver.setAttributes(attr)<line_sep>spiDriver.setAttributes({'type':'spi' 'name':family+"_uart"})<block_end>driver.setAttribute('instances' ",".join(instances))<if_stmt>uartSpi<block_start>spiDriver.setAttribute('instances' ",".join(instances))<block_end>ram_sizes=self.device.getProperty('ram')<for_stmt>ram_size ram_sizes.values<block_start>size=ram_size.value<line_sep># for small RAM sizes, reserve only 16 bytes for the tx buffer
<if_stmt>size<l>1024<or>size<g>1024<times>4<block_start><for_stmt>ram_id ram_size.ids.differenceFromIds(self.device.ids)<block_start>attr=self._getAttributeDictionaryFromId(ram_id)<line_sep>attr['name']='tx_buffer'<line_sep>ram_size_child=driver.addChild('parameter')<line_sep>ram_size_child.setAttributes(attr)<line_sep>ram_size_child.setValue(16<if>size<l>1024<else>250)<block_end><block_end><block_end><block_end><block_end><block_end><block_end><def_stmt>addGpioToNode self node<block_start>family='at90_tiny_mega'<if>(self.family<in>['at90' 'attiny' 'atmega'])<else>self.family<line_sep>props=self.device.getProperty('gpios')<line_sep>driver=node.addChild('driver')<line_sep>driver.setAttributes({'type':'gpio' 'name':family})<for_stmt>prop props.values<block_start>gpios=prop.value<line_sep>gpios.sort(key=<lambda>k:(k['port'] k['id']))<for_stmt>device_id prop.ids.differenceFromIds(self.device.ids)<block_start>device_dict=self._getAttributeDictionaryFromId(device_id)<for_stmt>gpio gpios<block_start>gpio_child=driver.addChild('gpio')<line_sep>gpio_child.setAttributes(device_dict)<for_stmt>name ['port' 'id' 'pcint' 'extint']<block_start><if_stmt>name<in>gpio<block_start>gpio_child.setAttribute(name gpio[name])<block_end><block_end><for_stmt>af gpio['af']<block_start>af_child=gpio_child.addChild('af')<line_sep>af_child.setAttributes(af)<block_end><block_end><block_end><block_end><block_end><def_stmt>_getAttributeDictionaryFromId self device_id<block_start>target=device_id.properties<line_sep>device_dict={}<for_stmt>attr target<block_start><if_stmt>target[attr]<ne><none><block_start><if_stmt>attr<eq>'type'<block_start>device_dict['device-type']=target[attr]<block_end><if_stmt>attr<eq>'name'<block_start>device_dict['device-name']=target[attr]<block_end><if_stmt>attr<eq>'pin_id'<block_start>device_dict['device-pin-id']=target[attr]<block_end><block_end><block_end><return>device_dict<block_end><def_stmt>_addNamingSchema self<block_start><if_stmt>self.family<eq>'xmega'<block_start>naming_schema='at{{ family }}{{ name }}{{ type }}{{ pin_id }}'<line_sep>identifiers=list(itertools.product(("at" ) (self.family ) self.names self.types self.pin_ids))<line_sep>devices=['at'+d.string.replace('none' '')<for>d self.device.ids]<block_end><elif_stmt>self.family<eq>'at90'<block_start>naming_schema='{{ family }}{{ type }}{{ name }}'<line_sep>identifiers=list(itertools.product((self.family ) self.types self.names))<line_sep>devices=[d.string.replace('none' '')<for>d self.device.ids]<block_end><else_stmt><block_start>naming_schema='{{ family }}{{ name }}{{ type }}'<line_sep>identifiers=list(itertools.product((self.family ) self.names self.types))<line_sep>devices=[d.string.replace('none' '')<for>d self.device.ids]<block_end><for_stmt>identifier_parts identifiers<block_start>identifier=''.join(identifier_parts).replace('none' '')<if_stmt>identifier<not><in>devices<block_start>child=self.root.prependChild('invalid-device')<line_sep>child.setValue(identifier)<block_end><else_stmt><block_start>devices.remove(identifier)<block_end><block_end><for_stmt>device devices<block_start>self.log.error("Found device not matching naming schema: '{}'".format(device))<block_end>child=self.root.prependChild('naming-schema')<line_sep>child.setValue(naming_schema)<block_end><def_stmt>write self folder<block_start>self._addNamingSchema()<line_sep>names=self.names<line_sep>names.sort(key=int)<line_sep>types=self.types<line_sep>name=self.family+"-".join(["_".join(names) "_".join(types)])+".xml"<if_stmt>self.family<eq>'xmega'<block_start>name=name[:-4]+"-"+"_".join(self.pin_ids)+".xml"<block_end>self.writeToFolder(folder name)<block_end><def_stmt>__repr__ self<block_start><return>self.__str__()<block_end><def_stmt>__str__ self<block_start><return>"AVRDeviceWriter(\n"+self.toString()+")"<block_end><block_end>
|
# coding: utf-8
<import_from_stmt>flask_admin.babel Translations<import_from_stmt>flask_admin.form rules# noqa
<import_from_stmt>flask_admin.form.fields DateTimeField JSONField Select2Field Select2TagsField TimeField <import_from_stmt>flask_admin.form.widgets Select2TagsWidget<import_from_stmt>flask_admin.model.fields InlineFieldList InlineFormField<import_from_stmt>flask_wtf FlaskForm<import_from_stmt>quokka.admin.fields SmartSelect2Field<import_from_stmt>quokka.admin.wtforms_html5 AutoAttrMeta<import_from_stmt>wtforms fields<as>_fields<import_from_stmt>wtforms widgets<as>_widgets<import_from_stmt>wtforms validators# noqa
<import_from_stmt>wtforms.validators ValidationError<line_sep># from wtforms_components import read_only # noqa
# from wtforms_components import ReadOnlyWidgetProxy # noqa
<class_stmt>PassiveField(object)<block_start>"""
Passive field that does not populate obj values.
"""<def_stmt>populate_obj self obj name<block_start><pass><block_end><block_end><class_stmt>PassiveHiddenField(PassiveField _fields.HiddenField)<block_start><pass><block_end><class_stmt>PassiveStringField(PassiveField _fields.StringField)<block_start><pass><block_end>fields=_fields# noqa
fields.SmartSelect2Field=SmartSelect2Field<line_sep>fields.DateTimeField=DateTimeField<line_sep>fields.TimeField=TimeField<line_sep>fields.Select2Field=Select2Field<line_sep>fields.Select2TagsField=Select2TagsField<line_sep>fields.JSONField=JSONField<line_sep>fields.InlineFieldList=InlineFieldList<line_sep>fields.InlineFormField=InlineFormField<line_sep>fields.PassiveHiddenField=PassiveHiddenField<line_sep>fields.PassiveStringField=PassiveStringField<line_sep>widgets=_widgets<line_sep>widgets.Select2TagsWidget=Select2TagsWidget<line_sep>READ_ONLY={'readonly':<true>}<class_stmt>Form(FlaskForm)<block_start>"""Base class to customize wtforms"""<line_sep>_translations=Translations()<line_sep>Meta=AutoAttrMeta<def_stmt>_get_translations self<block_start><return>self._translations<block_end><block_end><class_stmt>CallableValidator(object)<block_start>"""
Takes a callable and validates using it
"""<def_stmt>__init__ self function message=<none><block_start>self.function=function<line_sep>self.message=message<block_end><def_stmt>__call__ self form field<block_start>validation=self.function(form field)<if_stmt>validation<is><not><none><block_start><raise>ValidationError(self.message<or>validation)<block_end><block_end><block_end>validators.CallableValidator=CallableValidator<line_sep>rules.csrf_token=rules.Field('csrf_token' render_field='quokka_macros.render_hidden_field')<line_sep>
|
<import_stmt>os<line_sep>PKG_BASE=os.path.dirname(__file__)<line_sep>RESOURCES=os.path.join(PKG_BASE "resources")<line_sep>
|
<import_stmt>argparse<import_stmt>os.path<as>Path<import_stmt>warnings<import_stmt>custom_transforms<import_stmt>time<import_stmt>torch.backends.cudnn<as>cudnn<import_stmt>torch.nn.functional<as>F<import_stmt>torch.optim<import_stmt>torch.utils.data<import_from_stmt>logger AverageMeter<import_from_stmt>transforms3d.axangles mat2axangle<import_from_stmt>convert *<import_from_stmt>demon_metrics compute_motion_errors<import_from_stmt>models PoseNet<import_from_stmt>pose_sequence_folders SequenceFolder<line_sep>parser=argparse.ArgumentParser(description='DeepSFM pose subnet test script' formatter_class=argparse.ArgumentDefaultsHelpFormatter)<line_sep>parser.add_argument('data' metavar='DIR' help='path to dataset')<line_sep>parser.add_argument('-j' '--workers' default=4 type=int metavar='N' help='number of data loading workers')<line_sep>parser.add_argument('--sequence-length' type=int metavar='N' help='sequence length for training' default=2)<line_sep>parser.add_argument('-b' '--batch-size' default=1 type=int # 1
metavar='N' help='mini-batch size')<line_sep>parser.add_argument('--geo' '--geo-cost' default=<true> type=bool metavar='GC' help='whether add geometry cost')<line_sep>parser.add_argument('--pretrained-dps' dest='pretrained_dps' default='pose_checkpoint.pth.tar' metavar='PATH' help='path to pre-trained model')<line_sep>parser.add_argument('--seed' default=0 type=int help='seed for random functions, and network initialization')<line_sep>parser.add_argument('--save' default="I0" type=str help='save prefix')<line_sep>parser.add_argument('--ttype' default='test.txt' type=str help='Text file indicates input data')<line_sep>parser.add_argument('-f' '--training-output-freq' type=int help='frequence for outputting dispnet outputs and warped imgs at training for all scales if 0 will not output' metavar='N' default=100)<line_sep>parser.add_argument('--nlabel' type=int default=10 help='number of label')<line_sep>parser.add_argument('--std_tr' type=float default=0.27 help='translation')<line_sep>parser.add_argument('--std_rot' type=float default=0.12 help='rotation')<line_sep>parser.add_argument('--pose_init' default='demon' help='path to init pose')<line_sep>parser.add_argument('--depth_init' default='demon' help='path to init depth')<line_sep>n_iter=0<line_sep># NOTE: test set for testing
<def_stmt>main <block_start><global>n_iter<line_sep>args=parser.parse_args()<line_sep># Data loading code
normalize=custom_transforms.Normalize(mean=[0.5 0.5 0.5] std=[0.5 0.5 0.5])<line_sep>train_transform=custom_transforms.Compose([# custom_transforms.RandomScaleCrop(),
custom_transforms.ArrayToTensor() normalize])<line_sep>print("=> fetching scenes in '{}'".format(args.data))<line_sep>train_set=SequenceFolder(args.data transform=train_transform seed=args.seed ttype=args.ttype add_geo=args.geo depth_source=args.depth_init sequence_length=args.sequence_length gt_source='g' std=args.std_tr pose_init=args.pose_init dataset="" get_path=<true>)<line_sep>print('{} samples found in {} train scenes'.format(len(train_set) len(train_set.scenes)))<line_sep>val_loader=torch.utils.data.DataLoader(train_set batch_size=args.batch_size shuffle=<false> num_workers=args.workers pin_memory=<true>)<line_sep># create model
print("=> creating model")<line_sep>pose_net=PoseNet(args.nlabel args.std_tr args.std_rot add_geo_cost=args.geo depth_augment=<false>).cuda()<if_stmt>args.pretrained_dps# freeze feature extra layers
# for param in pose_net.feature_extraction.parameters():
# param.requires_grad = False
<block_start>print("=> using pre-trained weights for DPSNet")<line_sep>model_dict=pose_net.state_dict()<line_sep>weights=torch.load(args.pretrained_dps)['state_dict']<line_sep>pretrained_dict={k:v<for>k,v weights.items()<if>k<in>model_dict<and>weights[k].shape<eq>model_dict[k].shape}<line_sep>model_dict.update(pretrained_dict)<line_sep>pose_net.load_state_dict(model_dict)<block_end><else_stmt><block_start>pose_net.init_weights()<block_end>cudnn.benchmark=<true><line_sep>pose_net=torch.nn.DataParallel(pose_net)<line_sep><global>n_iter<line_sep>data_time=AverageMeter()<line_sep>pose_net.eval()<line_sep>end=time.time()<line_sep>errors=np.zeros((2 2 int(np.ceil(len(val_loader)))) np.float32)<with_stmt>torch.no_grad()<block_start><for_stmt>i,(tgt_img ref_imgs ref_poses intrinsics intrinsics_inv tgt_depth ref_depths ref_noise_poses initial_pose tgt_path ref_paths) enumerate(val_loader)<block_start>data_time.update(time.time()-end)<line_sep>tgt_img_var=Variable(tgt_img.cuda())<line_sep>ref_imgs_var=[Variable(img.cuda())<for>img ref_imgs]<line_sep>ref_poses_var=[Variable(pose.cuda())<for>pose ref_poses]<line_sep>ref_noise_poses_var=[Variable(pose.cuda())<for>pose ref_noise_poses]<line_sep>initial_pose_var=Variable(initial_pose.cuda())<line_sep>ref_depths_var=[Variable(dep.cuda())<for>dep ref_depths]<line_sep>intrinsics_var=Variable(intrinsics.cuda())<line_sep>intrinsics_inv_var=Variable(intrinsics_inv.cuda())<line_sep>tgt_depth_var=Variable(tgt_depth.cuda())<line_sep>pose=torch.cat(ref_poses_var 1)<line_sep>noise_pose=torch.cat(ref_noise_poses_var 1)<line_sep>pose_norm=torch.norm(noise_pose[: : :3 3] dim=-1 keepdim=<true>)# b * n* 1
p_angle,p_trans,rot_c,trans_c=pose_net(tgt_img_var ref_imgs_var initial_pose_var noise_pose intrinsics_var intrinsics_inv_var tgt_depth_var ref_depths_var trans_norm=pose_norm)<line_sep>batch_size=p_angle.shape[0]<line_sep>p_angle_v=torch.sum(F.softmax(p_angle dim=1).view(batch_size -1 1)<times>rot_c dim=1)<line_sep>p_trans_v=torch.sum(F.softmax(p_trans dim=1).view(batch_size -1 1)<times>trans_c dim=1)<line_sep>p_matrix=Variable(torch.zeros((batch_size 4 4)).float()).cuda()<line_sep>p_matrix[: 3 3]=1<line_sep>p_matrix[: :3 :]=torch.cat([angle2matrix(p_angle_v) p_trans_v.unsqueeze(-1)] dim=-1)# 2*3*4
p_rel_pose=torch.ones_like(noise_pose)<for_stmt>bat range(batch_size)<block_start>path=tgt_path[bat]<line_sep>dirname=Path.dirname(path)<line_sep>orig_poses=np.genfromtxt(Path.join(dirname args.pose_init+"_poses.txt"))<for_stmt>j range(len(ref_imgs))<block_start>p_rel_pose[: j]=torch.matmul(noise_pose[: j] inv(p_matrix))<line_sep>seq_num=int(Path.basename(ref_paths[bat][j])[:-4])<line_sep>orig_poses[seq_num]=p_rel_pose[bat j :3 :].data.cpu().numpy().reshape(12 )<line_sep>p_aa=mat2axangle(p_rel_pose[bat j :3 :3].data.cpu().numpy())<line_sep>gt_aa=mat2axangle(pose[bat j :3 :3].data.cpu().numpy() unit_thresh=1e-2)<line_sep>n_aa=mat2axangle(noise_pose[bat j :3 :3].data.cpu().numpy() unit_thresh=1e-2)<line_sep>p_t=p_rel_pose[bat j :3 3].data.cpu().numpy()<line_sep>gt_t=pose[bat j :3 3].data.cpu().numpy()<line_sep>n_t=noise_pose[bat j :3 3].data.cpu().numpy()<line_sep>p_aa=p_aa[0]<times>p_aa[1]<line_sep>n_aa=n_aa[0]<times>n_aa[1]<line_sep>gt_aa=gt_aa[0]<times>gt_aa[1]<line_sep>error=compute_motion_errors(np.concatenate([n_aa n_t]) np.concatenate([gt_aa gt_t]) <true>)<line_sep>error_p=compute_motion_errors(np.concatenate([p_aa p_t]) np.concatenate([gt_aa gt_t]) <true>)<line_sep>print("%d n r%.6f, t%.6f"%(i error[0] error[2]))<line_sep>print("%d p r%.6f, t%.6f"%(i error_p[0] error_p[2]))<line_sep>errors[0 0 i]<augadd>error[0]<line_sep>errors[0 1 i]<augadd>error[2]<line_sep>errors[1 0 i]<augadd>error_p[0]<line_sep>errors[1 1 i]<augadd>error_p[2]<block_end>errors[: : i]<augdiv>len(ref_imgs)<if_stmt>args.save<and><not>Path.exists(Path.join(dirname args.save+"_poses.txt"))<block_start>np.savetxt(Path.join(dirname args.save+"_poses.txt") orig_poses)<block_end><block_end><block_end>mean_error=errors.mean(2)<line_sep>error_names=['rot' 'trans']<line_sep>print("%s Results : "%args.pose_init)<line_sep>print("{:>10}, {:>10}".format(*error_names))<line_sep>print("{:10.4f}, {:10.4f}".format(*mean_error[0]))<line_sep>print("new Results : ")<line_sep>print("{:>10}, {:>10}".format(*error_names))<line_sep>print("{:10.4f}, {:10.4f}".format(*mean_error[1]))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
|
<import_from_stmt>pyhf.tensor BackendRetriever<as>tensor<import_from_stmt>pyhf.optimize OptimizerRetriever<as>optimize<import_from_stmt>pyhf._version version<as>__version__<import_from_stmt>pyhf.exceptions InvalidBackend InvalidOptimizer Unsupported<import_from_stmt>pyhf events<line_sep>tensorlib=<none><line_sep>optimizer=<none><def_stmt>get_backend <block_start>"""
Get the current backend and the associated optimizer
Example:
>>> import pyhf
>>> backend, optimizer = pyhf.get_backend()
>>> backend
<pyhf.tensor.numpy_backend.numpy_backend object at 0x...>
>>> optimizer
<pyhf.optimize.scipy_optimizer object at 0x...>
Returns:
backend, optimizer
"""<line_sep><global>tensorlib<line_sep><global>optimizer<line_sep><return>tensorlib optimizer<block_end>tensorlib=tensor.numpy_backend()<line_sep>default_backend=tensorlib<line_sep>optimizer=optimize.scipy_optimizer()<line_sep>default_optimizer=optimizer<line_sep>@events.register('change_backend')<def_stmt>set_backend backend custom_optimizer=<none> precision=<none><block_start>"""
Set the backend and the associated optimizer
Example:
>>> import pyhf
>>> pyhf.set_backend("tensorflow")
>>> pyhf.tensorlib.name
'tensorflow'
>>> pyhf.tensorlib.precision
'64b'
>>> pyhf.set_backend(b"pytorch", precision="32b")
>>> pyhf.tensorlib.name
'pytorch'
>>> pyhf.tensorlib.precision
'32b'
>>> pyhf.set_backend(pyhf.tensor.numpy_backend())
>>> pyhf.tensorlib.name
'numpy'
>>> pyhf.tensorlib.precision
'64b'
Args:
backend (:obj:`str` or `pyhf.tensor` backend): One of the supported pyhf backends: NumPy, TensorFlow, PyTorch, and JAX
custom_optimizer (`pyhf.optimize` optimizer): Optional custom optimizer defined by the user
precision (:obj:`str`): Floating point precision to use in the backend: ``64b`` or ``32b``. Default is backend dependent.
Returns:
None
"""<line_sep><global>tensorlib<line_sep><global>optimizer<line_sep>_supported_precisions=["32b" "64b"]<line_sep>backend_kwargs={}<if_stmt>isinstance(precision (str bytes))<block_start><if_stmt>isinstance(precision bytes)<block_start>precision=precision.decode("utf-8")<block_end>precision=precision.lower()<block_end><if_stmt>isinstance(backend (str bytes))<block_start><if_stmt>isinstance(backend bytes)<block_start>backend=backend.decode("utf-8")<block_end>backend=backend.lower()<if_stmt>precision<is><not><none><block_start>backend_kwargs["precision"]=precision<block_end><try_stmt><block_start>backend=getattr(tensor f"{backend:s}_backend")(**backend_kwargs)<block_end><except_stmt>TypeError<block_start><raise>InvalidBackend(f"The backend provided is not supported: {backend:s}. Select from one of the supported backends: numpy, tensorflow, pytorch")<block_end><block_end>_name_supported=getattr(tensor f"{backend.name:s}_backend")<if_stmt>_name_supported<block_start><if_stmt><not>isinstance(backend _name_supported)<block_start><raise>AttributeError(f"'{backend.name:s}' is not a valid name attribute for backend type {type(backend)}\n Custom backends must have names unique from supported backends")<block_end><if_stmt>backend.precision<not><in>_supported_precisions<block_start><raise>Unsupported(f"The backend precision provided is not supported: {backend.precision:s}. Select from one of the supported precisions: {', '.join([str(v)<for>v _supported_precisions])}")<block_end><block_end># If "precision" arg passed, it should always win
# If no "precision" arg, defer to tensor backend object API if set there
<if_stmt>precision<is><not><none><block_start><if_stmt>backend.precision<ne>precision<block_start>backend_kwargs["precision"]=precision<line_sep>backend=getattr(tensor f"{backend.name:s}_backend")(**backend_kwargs)<block_end><block_end># need to determine if the tensorlib changed or the optimizer changed for events
tensorlib_changed=bool((backend.name<ne>tensorlib.name)|(backend.precision<ne>tensorlib.precision))<line_sep>optimizer_changed=<false><if_stmt>custom_optimizer<block_start><if_stmt>isinstance(custom_optimizer (str bytes))<block_start><if_stmt>isinstance(custom_optimizer bytes)<block_start>custom_optimizer=custom_optimizer.decode("utf-8")<block_end><try_stmt><block_start>new_optimizer=getattr(optimize f"{custom_optimizer.lower()}_optimizer")()<block_end><except_stmt>TypeError<block_start><raise>InvalidOptimizer(f"The optimizer provided is not supported: {custom_optimizer}. Select from one of the supported optimizers: scipy, minuit")<block_end><block_end><else_stmt><block_start>_name_supported=getattr(optimize f"{custom_optimizer.name:s}_optimizer")<if_stmt>_name_supported<block_start><if_stmt><not>isinstance(custom_optimizer _name_supported)<block_start><raise>AttributeError(f"'{custom_optimizer.name}' is not a valid name attribute for optimizer type {type(custom_optimizer)}\n Custom optimizers must have names unique from supported optimizers")<block_end><block_end>new_optimizer=custom_optimizer<block_end><block_end><else_stmt><block_start>new_optimizer=optimize.scipy_optimizer()<block_end>optimizer_changed=bool(optimizer<ne>new_optimizer)<line_sep># set new backend
tensorlib=backend<line_sep>optimizer=new_optimizer<line_sep># trigger events
<if_stmt>tensorlib_changed<block_start>events.trigger("tensorlib_changed")()<block_end><if_stmt>optimizer_changed<block_start>events.trigger("optimizer_changed")()<block_end># set up any other globals for backend
tensorlib._setup()<block_end><import_from_stmt>pyhf.pdf Model<import_from_stmt>pyhf.workspace Workspace<import_from_stmt>pyhf simplemodels<import_from_stmt>pyhf infer<import_from_stmt>pyhf compat<import_from_stmt>pyhf.patchset PatchSet<line_sep>__all__=["Model" "PatchSet" "Workspace" "__version__" "compat" "exceptions" "get_backend" "infer" "interpolators" "modifiers" "optimizer" "parameters" "patchset" "pdf" "probability" "set_backend" "simplemodels" "tensor" "tensorlib" "utils" "workspace" ]<def_stmt>__dir__ <block_start><return>__all__<block_end>
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
<import_from_stmt>oci.util formatted_flat_dict NONE_SENTINEL value_allowed_none_or_none_sentinel# noqa: F401
<import_from_stmt>oci.decorators init_model_state_from_kwargs<line_sep>@init_model_state_from_kwargs<class_stmt>RecommendationSummary(object)<block_start>"""
Recommendation Definition.
"""<line_sep>#: A constant which can be used with the type property of a RecommendationSummary.
#: This constant has a value of "DETECTOR_PROBLEMS"
TYPE_DETECTOR_PROBLEMS="DETECTOR_PROBLEMS"<line_sep>#: A constant which can be used with the type property of a RecommendationSummary.
#: This constant has a value of "RESOLVED_PROBLEMS"
TYPE_RESOLVED_PROBLEMS="RESOLVED_PROBLEMS"<line_sep>#: A constant which can be used with the risk_level property of a RecommendationSummary.
#: This constant has a value of "CRITICAL"
RISK_LEVEL_CRITICAL="CRITICAL"<line_sep>#: A constant which can be used with the risk_level property of a RecommendationSummary.
#: This constant has a value of "HIGH"
RISK_LEVEL_HIGH="HIGH"<line_sep>#: A constant which can be used with the risk_level property of a RecommendationSummary.
#: This constant has a value of "MEDIUM"
RISK_LEVEL_MEDIUM="MEDIUM"<line_sep>#: A constant which can be used with the risk_level property of a RecommendationSummary.
#: This constant has a value of "LOW"
RISK_LEVEL_LOW="LOW"<line_sep>#: A constant which can be used with the risk_level property of a RecommendationSummary.
#: This constant has a value of "MINOR"
RISK_LEVEL_MINOR="MINOR"<line_sep>#: A constant which can be used with the lifecycle_state property of a RecommendationSummary.
#: This constant has a value of "CREATING"
LIFECYCLE_STATE_CREATING="CREATING"<line_sep>#: A constant which can be used with the lifecycle_state property of a RecommendationSummary.
#: This constant has a value of "UPDATING"
LIFECYCLE_STATE_UPDATING="UPDATING"<line_sep>#: A constant which can be used with the lifecycle_state property of a RecommendationSummary.
#: This constant has a value of "ACTIVE"
LIFECYCLE_STATE_ACTIVE="ACTIVE"<line_sep>#: A constant which can be used with the lifecycle_state property of a RecommendationSummary.
#: This constant has a value of "INACTIVE"
LIFECYCLE_STATE_INACTIVE="INACTIVE"<line_sep>#: A constant which can be used with the lifecycle_state property of a RecommendationSummary.
#: This constant has a value of "DELETING"
LIFECYCLE_STATE_DELETING="DELETING"<line_sep>#: A constant which can be used with the lifecycle_state property of a RecommendationSummary.
#: This constant has a value of "DELETED"
LIFECYCLE_STATE_DELETED="DELETED"<line_sep>#: A constant which can be used with the lifecycle_state property of a RecommendationSummary.
#: This constant has a value of "FAILED"
LIFECYCLE_STATE_FAILED="FAILED"<line_sep>#: A constant which can be used with the lifecycle_detail property of a RecommendationSummary.
#: This constant has a value of "OPEN"
LIFECYCLE_DETAIL_OPEN="OPEN"<line_sep>#: A constant which can be used with the lifecycle_detail property of a RecommendationSummary.
#: This constant has a value of "RESOLVED"
LIFECYCLE_DETAIL_RESOLVED="RESOLVED"<line_sep>#: A constant which can be used with the lifecycle_detail property of a RecommendationSummary.
#: This constant has a value of "DISMISSED"
LIFECYCLE_DETAIL_DISMISSED="DISMISSED"<def_stmt>__init__ self **kwargs<block_start>"""
Initializes a new RecommendationSummary object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param id:
The value to assign to the id property of this RecommendationSummary.
:type id: str
:param type:
The value to assign to the type property of this RecommendationSummary.
Allowed values for this property are: "DETECTOR_PROBLEMS", "RESOLVED_PROBLEMS", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type type: str
:param tenant_id:
The value to assign to the tenant_id property of this RecommendationSummary.
:type tenant_id: str
:param compartment_id:
The value to assign to the compartment_id property of this RecommendationSummary.
:type compartment_id: str
:param target_id:
The value to assign to the target_id property of this RecommendationSummary.
:type target_id: str
:param details:
The value to assign to the details property of this RecommendationSummary.
:type details: dict(str, str)
:param risk_level:
The value to assign to the risk_level property of this RecommendationSummary.
Allowed values for this property are: "CRITICAL", "HIGH", "MEDIUM", "LOW", "MINOR", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type risk_level: str
:param problem_count:
The value to assign to the problem_count property of this RecommendationSummary.
:type problem_count: int
:param lifecycle_state:
The value to assign to the lifecycle_state property of this RecommendationSummary.
Allowed values for this property are: "CREATING", "UPDATING", "ACTIVE", "INACTIVE", "DELETING", "DELETED", "FAILED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type lifecycle_state: str
:param lifecycle_detail:
The value to assign to the lifecycle_detail property of this RecommendationSummary.
Allowed values for this property are: "OPEN", "RESOLVED", "DISMISSED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type lifecycle_detail: str
:param time_created:
The value to assign to the time_created property of this RecommendationSummary.
:type time_created: datetime
:param time_updated:
The value to assign to the time_updated property of this RecommendationSummary.
:type time_updated: datetime
:param name:
The value to assign to the name property of this RecommendationSummary.
:type name: str
:param description:
The value to assign to the description property of this RecommendationSummary.
:type description: str
"""<line_sep>self.swagger_types={'id':'str' 'type':'str' 'tenant_id':'str' 'compartment_id':'str' 'target_id':'str' 'details':'dict(str, str)' 'risk_level':'str' 'problem_count':'int' 'lifecycle_state':'str' 'lifecycle_detail':'str' 'time_created':'datetime' 'time_updated':'datetime' 'name':'str' 'description':'str'}<line_sep>self.attribute_map={'id':'id' 'type':'type' 'tenant_id':'tenantId' 'compartment_id':'compartmentId' 'target_id':'targetId' 'details':'details' 'risk_level':'riskLevel' 'problem_count':'problemCount' 'lifecycle_state':'lifecycleState' 'lifecycle_detail':'lifecycleDetail' 'time_created':'timeCreated' 'time_updated':'timeUpdated' 'name':'name' 'description':'description'}<line_sep>self._id=<none><line_sep>self._type=<none><line_sep>self._tenant_id=<none><line_sep>self._compartment_id=<none><line_sep>self._target_id=<none><line_sep>self._details=<none><line_sep>self._risk_level=<none><line_sep>self._problem_count=<none><line_sep>self._lifecycle_state=<none><line_sep>self._lifecycle_detail=<none><line_sep>self._time_created=<none><line_sep>self._time_updated=<none><line_sep>self._name=<none><line_sep>self._description=<none><block_end>@property<def_stmt>id self<block_start>"""
**[Required]** Gets the id of this RecommendationSummary.
Unique identifier for Recommendation
:return: The id of this RecommendationSummary.
:rtype: str
"""<line_sep><return>self._id<block_end>@id.setter<def_stmt>id self id<block_start>"""
Sets the id of this RecommendationSummary.
Unique identifier for Recommendation
:param id: The id of this RecommendationSummary.
:type: str
"""<line_sep>self._id=id<block_end>@property<def_stmt>type self<block_start>"""
Gets the type of this RecommendationSummary.
Recommendation type
Allowed values for this property are: "DETECTOR_PROBLEMS", "RESOLVED_PROBLEMS", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The type of this RecommendationSummary.
:rtype: str
"""<line_sep><return>self._type<block_end>@type.setter<def_stmt>type self type<block_start>"""
Sets the type of this RecommendationSummary.
Recommendation type
:param type: The type of this RecommendationSummary.
:type: str
"""<line_sep>allowed_values=["DETECTOR_PROBLEMS" "RESOLVED_PROBLEMS"]<if_stmt><not>value_allowed_none_or_none_sentinel(type allowed_values)<block_start>type='UNKNOWN_ENUM_VALUE'<block_end>self._type=type<block_end>@property<def_stmt>tenant_id self<block_start>"""
Gets the tenant_id of this RecommendationSummary.
Tenant Identifier
:return: The tenant_id of this RecommendationSummary.
:rtype: str
"""<line_sep><return>self._tenant_id<block_end>@tenant_id.setter<def_stmt>tenant_id self tenant_id<block_start>"""
Sets the tenant_id of this RecommendationSummary.
Tenant Identifier
:param tenant_id: The tenant_id of this RecommendationSummary.
:type: str
"""<line_sep>self._tenant_id=tenant_id<block_end>@property<def_stmt>compartment_id self<block_start>"""
**[Required]** Gets the compartment_id of this RecommendationSummary.
Compartment Identifier
:return: The compartment_id of this RecommendationSummary.
:rtype: str
"""<line_sep><return>self._compartment_id<block_end>@compartment_id.setter<def_stmt>compartment_id self compartment_id<block_start>"""
Sets the compartment_id of this RecommendationSummary.
Compartment Identifier
:param compartment_id: The compartment_id of this RecommendationSummary.
:type: str
"""<line_sep>self._compartment_id=compartment_id<block_end>@property<def_stmt>target_id self<block_start>"""
**[Required]** Gets the target_id of this RecommendationSummary.
targetId associated with the problem
:return: The target_id of this RecommendationSummary.
:rtype: str
"""<line_sep><return>self._target_id<block_end>@target_id.setter<def_stmt>target_id self target_id<block_start>"""
Sets the target_id of this RecommendationSummary.
targetId associated with the problem
:param target_id: The target_id of this RecommendationSummary.
:type: str
"""<line_sep>self._target_id=target_id<block_end>@property<def_stmt>details self<block_start>"""
**[Required]** Gets the details of this RecommendationSummary.
Recommendation details
:return: The details of this RecommendationSummary.
:rtype: dict(str, str)
"""<line_sep><return>self._details<block_end>@details.setter<def_stmt>details self details<block_start>"""
Sets the details of this RecommendationSummary.
Recommendation details
:param details: The details of this RecommendationSummary.
:type: dict(str, str)
"""<line_sep>self._details=details<block_end>@property<def_stmt>risk_level self<block_start>"""
Gets the risk_level of this RecommendationSummary.
The Risk Level
Allowed values for this property are: "CRITICAL", "HIGH", "MEDIUM", "LOW", "MINOR", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The risk_level of this RecommendationSummary.
:rtype: str
"""<line_sep><return>self._risk_level<block_end>@risk_level.setter<def_stmt>risk_level self risk_level<block_start>"""
Sets the risk_level of this RecommendationSummary.
The Risk Level
:param risk_level: The risk_level of this RecommendationSummary.
:type: str
"""<line_sep>allowed_values=["CRITICAL" "HIGH" "MEDIUM" "LOW" "MINOR"]<if_stmt><not>value_allowed_none_or_none_sentinel(risk_level allowed_values)<block_start>risk_level='UNKNOWN_ENUM_VALUE'<block_end>self._risk_level=risk_level<block_end>@property<def_stmt>problem_count self<block_start>"""
**[Required]** Gets the problem_count of this RecommendationSummary.
Count number of the problem
:return: The problem_count of this RecommendationSummary.
:rtype: int
"""<line_sep><return>self._problem_count<block_end>@problem_count.setter<def_stmt>problem_count self problem_count<block_start>"""
Sets the problem_count of this RecommendationSummary.
Count number of the problem
:param problem_count: The problem_count of this RecommendationSummary.
:type: int
"""<line_sep>self._problem_count=problem_count<block_end>@property<def_stmt>lifecycle_state self<block_start>"""
**[Required]** Gets the lifecycle_state of this RecommendationSummary.
The current state of the Recommendation.
Allowed values for this property are: "CREATING", "UPDATING", "ACTIVE", "INACTIVE", "DELETING", "DELETED", "FAILED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The lifecycle_state of this RecommendationSummary.
:rtype: str
"""<line_sep><return>self._lifecycle_state<block_end>@lifecycle_state.setter<def_stmt>lifecycle_state self lifecycle_state<block_start>"""
Sets the lifecycle_state of this RecommendationSummary.
The current state of the Recommendation.
:param lifecycle_state: The lifecycle_state of this RecommendationSummary.
:type: str
"""<line_sep>allowed_values=["CREATING" "UPDATING" "ACTIVE" "INACTIVE" "DELETING" "DELETED" "FAILED"]<if_stmt><not>value_allowed_none_or_none_sentinel(lifecycle_state allowed_values)<block_start>lifecycle_state='UNKNOWN_ENUM_VALUE'<block_end>self._lifecycle_state=lifecycle_state<block_end>@property<def_stmt>lifecycle_detail self<block_start>"""
**[Required]** Gets the lifecycle_detail of this RecommendationSummary.
The lifecycleDetail will give more detail on the substate of the lifecycleState.
Allowed values for this property are: "OPEN", "RESOLVED", "DISMISSED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The lifecycle_detail of this RecommendationSummary.
:rtype: str
"""<line_sep><return>self._lifecycle_detail<block_end>@lifecycle_detail.setter<def_stmt>lifecycle_detail self lifecycle_detail<block_start>"""
Sets the lifecycle_detail of this RecommendationSummary.
The lifecycleDetail will give more detail on the substate of the lifecycleState.
:param lifecycle_detail: The lifecycle_detail of this RecommendationSummary.
:type: str
"""<line_sep>allowed_values=["OPEN" "RESOLVED" "DISMISSED"]<if_stmt><not>value_allowed_none_or_none_sentinel(lifecycle_detail allowed_values)<block_start>lifecycle_detail='UNKNOWN_ENUM_VALUE'<block_end>self._lifecycle_detail=lifecycle_detail<block_end>@property<def_stmt>time_created self<block_start>"""
Gets the time_created of this RecommendationSummary.
problem creating time
:return: The time_created of this RecommendationSummary.
:rtype: datetime
"""<line_sep><return>self._time_created<block_end>@time_created.setter<def_stmt>time_created self time_created<block_start>"""
Sets the time_created of this RecommendationSummary.
problem creating time
:param time_created: The time_created of this RecommendationSummary.
:type: datetime
"""<line_sep>self._time_created=time_created<block_end>@property<def_stmt>time_updated self<block_start>"""
Gets the time_updated of this RecommendationSummary.
problem updating time
:return: The time_updated of this RecommendationSummary.
:rtype: datetime
"""<line_sep><return>self._time_updated<block_end>@time_updated.setter<def_stmt>time_updated self time_updated<block_start>"""
Sets the time_updated of this RecommendationSummary.
problem updating time
:param time_updated: The time_updated of this RecommendationSummary.
:type: datetime
"""<line_sep>self._time_updated=time_updated<block_end>@property<def_stmt>name self<block_start>"""
**[Required]** Gets the name of this RecommendationSummary.
recommendation string showing on UX
:return: The name of this RecommendationSummary.
:rtype: str
"""<line_sep><return>self._name<block_end>@name.setter<def_stmt>name self name<block_start>"""
Sets the name of this RecommendationSummary.
recommendation string showing on UX
:param name: The name of this RecommendationSummary.
:type: str
"""<line_sep>self._name=name<block_end>@property<def_stmt>description self<block_start>"""
**[Required]** Gets the description of this RecommendationSummary.
description of the recommendation
:return: The description of this RecommendationSummary.
:rtype: str
"""<line_sep><return>self._description<block_end>@description.setter<def_stmt>description self description<block_start>"""
Sets the description of this RecommendationSummary.
description of the recommendation
:param description: The description of this RecommendationSummary.
:type: str
"""<line_sep>self._description=description<block_end><def_stmt>__repr__ self<block_start><return>formatted_flat_dict(self)<block_end><def_stmt>__eq__ self other<block_start><if_stmt>other<is><none><block_start><return><false><block_end><return>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>self<eq>other<block_end><block_end>
|
<import_stmt>radon.cli.colors<as>colors<def_stmt>test_color_enabled_yes monkeypatch<block_start>monkeypatch.setenv("COLOR" "yes")<assert_stmt>colors.color_enabled()<block_end><def_stmt>test_color_enabled_no monkeypatch<block_start>monkeypatch.setenv("COLOR" "no")<assert_stmt><not>colors.color_enabled()<block_end><def_stmt>test_color_enabled_auto monkeypatch mocker<block_start>monkeypatch.setenv("COLOR" "auto")<line_sep>isatty_mock=mocker.patch('sys.stdout.isatty')<line_sep>isatty_mock.return_value=<true><assert_stmt>colors.color_enabled()<line_sep>isatty_mock.return_value=<false><assert_stmt><not>colors.color_enabled()<block_end>
|
<import_stmt>os<import_stmt>tempfile<import_stmt>requests<def_stmt>download_mnist_libsvm mnist_data_dir<block_start>mnist_data_path=os.path.join(mnist_data_dir "mnist.bz2")<line_sep>data_url="https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/mnist.bz2"<line_sep>r=requests.get(data_url)<with_stmt>open(mnist_data_path "wb")<as>f<block_start>f.write(r.content)<block_end><block_end><def_stmt>get_mnist_dir # This folder is baked into the docker image
<block_start>MNIST_DATA_DIR="/data/mnist/"<if_stmt>os.path.isdir(MNIST_DATA_DIR)<and>os.path.isfile(os.path.join(MNIST_DATA_DIR 'mnist.bz2'))<block_start><return>MNIST_DATA_DIR<block_end>mnist_dir=tempfile.mkdtemp('_mnist_data')<line_sep>download_mnist_libsvm(mnist_dir)<line_sep><return>mnist_dir<block_end>
|
# Python Standard Library Imports
<import_stmt>json<line_sep># Django Imports
<import_from_stmt>django.http Http404<import_from_stmt>django.http HttpResponse<import_from_stmt>django.views.decorators.csrf csrf_exempt<import_from_stmt>django.views.decorators.http require_POST<line_sep># HTK Imports
<import_from_stmt>htk.lib.zuora.utils get_event_handler<line_sep>@require_POST@csrf_exempt<def_stmt>zuora_webhook_view request<block_start>payload=json.loads(request.body)<line_sep>event_type=request.GET.get('event')<if_stmt>event_type<is><none><block_start><raise>Http404<block_end>event_handler=get_event_handler(event_type)<if_stmt>event_handler<block_start>event_handler(event_type payload)<block_end>response=HttpResponse(status=200)<line_sep><return>response<block_end>
|
# python3.7
"""Collects all runners."""<import_from_stmt>.stylegan_runner StyleGANRunner<import_from_stmt>.encoder_runner EncoderRunner<line_sep>__all__=['StyleGANRunner' 'EncoderRunner']<line_sep>
|
"""
Generic DAO object for safe access to the MongoDB.
Issue: https://github.com/chovanecm/sacredboard/issues/61
"""<import_stmt>pymongo<import_from_stmt>pymongo.errors InvalidName<import_from_stmt>sacredboard.app.data DataSourceError<import_from_stmt>.mongocursor MongoDbCursor<class_stmt>GenericDAO<block_start>"""
Generic DAO object for safe access to the MongoDB.
Issue: https://github.com/chovanecm/sacredboard/issues/61
"""<def_stmt>__init__ self pymongo_client database_name<block_start>"""
Create a new GenericDAO object that will work on the given database.
:param pymongo_client: PyMongo client that is connected to MongoDB.
:param database_name: Name of the database this GenericDAO works with.
:raise DataSourceError
"""<line_sep>self._client=pymongo_client<line_sep>self._database=self._get_database(database_name)<block_end><def_stmt>find_record self collection_name query<block_start>"""
Return the first record mathing the given Mongo query.
:param collection_name: Name of the collection to search in.
:param query: MongoDB Query, e.g. {_id: 123}
:return: A single MongoDB record or None if not found.
:raise DataSourceError
"""<line_sep>cursor=self._get_collection(collection_name).find(query)<for_stmt>record cursor# Return the first record found.
<block_start><return>record<block_end># Return None if nothing found.
<return><none><block_end><def_stmt>find_records self collection_name query={} sort_by=<none> sort_direction=<none> start=0 limit=<none><block_start>"""
Return a cursor of records from the given MongoDB collection.
:param collection_name: Name of the MongoDB collection to query.
:param query: Standard MongoDB query. By default no restriction.
:param sort_by: Name of a single field to sort by.
:param sort_direction: The direction to sort, "asc" or "desc".
:param start: Skip first n results.
:param limit: The maximum number of results to return.
:return: Cursor -- An iterable with results.
:raise DataSourceError
"""<line_sep>cursor=self._get_collection(collection_name).find(query)<if_stmt>sort_by<is><not><none><block_start>cursor=self._apply_sort(cursor sort_by sort_direction)<block_end>cursor=cursor.skip(start)<if_stmt>limit<is><not><none><block_start>cursor=cursor.limit(limit)<block_end><return>MongoDbCursor(cursor)<block_end><def_stmt>delete_record self collection_name query<block_start>"""Delete record matching the given MongoDB query."""<line_sep><return>self._get_collection(collection_name).remove(query)<block_end><def_stmt>_get_database self database_name<block_start>"""
Get PyMongo client pointing to the current database.
:return: MongoDB client of the current database.
:raise DataSourceError
"""<try_stmt><block_start><return>self._client[database_name]<block_end><except_stmt>InvalidName<as>ex<block_start><raise>DataSourceError("Cannot connect to database %s!"%self._database)<from>ex<block_end><block_end><def_stmt>_get_collection self collection_name<block_start>"""
Get PyMongo client pointing to the current DB and the given collection.
:return: MongoDB client of the current database and given collection.
:raise DataSourceError
"""<try_stmt><block_start><return>self._database[collection_name]<block_end><except_stmt>InvalidName<as>ex<block_start><raise>DataSourceError("Cannot access MongoDB collection %s!"%collection_name)<from>ex<block_end><except_stmt>Exception<as>ex<block_start><raise>DataSourceError("Unexpected error when accessing MongoDB"<concat>"collection %s!"%collection_name)<from>ex<block_end><block_end><def_stmt>_apply_sort self cursor sort_by sort_direction<block_start>"""
Apply sort to a cursor.
:param cursor: The cursor to apply sort on.
:param sort_by: The field name to sort by.
:param sort_direction: The direction to sort, "asc" or "desc".
:return:
"""<if_stmt>sort_direction<is><not><none><and>sort_direction.lower()<eq>"desc"<block_start>sort=pymongo.DESCENDING<block_end><else_stmt><block_start>sort=pymongo.ASCENDING<block_end><return>cursor.sort(sort_by sort)<block_end><block_end>
|
# -*- coding: utf-8 -*-
# @Author : DevinYang(<EMAIL>)
<import_stmt>torch<import_from_stmt>torchtoolbox.tools summary<import_from_stmt>torchvision.models.resnet resnet50<import_from_stmt>torchvision.models.mobilenet mobilenet_v2<line_sep>model1=resnet50()<line_sep>model2=mobilenet_v2()<def_stmt>test_summary <block_start>summary(model1 torch.rand((1 3 224 224)) <true>)<line_sep>print(summary(model2 torch.rand((1 3 224 224))))<block_end>
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied.
# See the License for the specific language governing permissions and limitations under the License.
<import_stmt>logging<import_stmt>pathlib<import_stmt>re<import_from_stmt>datetime datetime<import_from_stmt>shutil copyfile<import_from_stmt>constants OSU_BENCHMARK_VERSION<import_from_stmt>utils render_jinja_template<line_sep>OSU_COMMON_DATADIR=pathlib.Path(__file__).parent/"data/osu/"<line_sep>SUPPORTED_MPIS=["openmpi" "intelmpi"]<def_stmt>compile_osu mpi_variant remote_command_executor<block_start>init_script=render_jinja_template(template_file_path=OSU_COMMON_DATADIR/"init_osu_benchmarks.sh" osu_benchmark_version=OSU_BENCHMARK_VERSION)<line_sep>remote_command_executor.run_remote_script(str(init_script) args=[mpi_variant] hide=<true> additional_files=[str(OSU_COMMON_DATADIR/f"osu-micro-benchmarks-{OSU_BENCHMARK_VERSION}.tgz") str(OSU_COMMON_DATADIR/"config.guess") str(OSU_COMMON_DATADIR/"config.sub") ] )<block_end><def_stmt>run_individual_osu_benchmark mpi_version benchmark_group benchmark_name partition remote_command_executor scheduler_commands num_instances slots_per_instance test_datadir submission_script_template_path=<none> rendered_template_path=<none> timeout=<none> <block_start>"""
Run the given OSU benchmark.
:param mpi_version: string, should be one of SUPPORTED_MPIS
:param benchmark_group: string, which of the MPI benchmarks to run. As of 5.7.1 this includes collective, one-sided,
pt2pt, and startup
:param benchmark_name: string, name of the benchmark to run from the given group
:param partition: string, partition on which to benchmark job (assumes the use of Slurm scheduler)
:param remote_command_executor: RemoteCommandExecutor instance, used to submit jobs
:param scheduler_commands: SchedulerlurmCommands instance, used to submit jobs
:param num_instances: int, number of instances to run benchmark across
:param slots_per_instance: int, number of processes to run on each node
:param test_datadir: Path, used to construct default output path when rendering submission script template
:param submission_script_template_path: string, override default path for source submission script template
:param rendered_template_path: string, override destination path when rendering submission script template
:param timeout: int, maximum number of minutes to wait for job to complete
:return: string, stdout of the benchmark job
"""<line_sep>logging.info(f"Running OSU benchmark {OSU_BENCHMARK_VERSION}: {benchmark_name} for {mpi_version}")<if_stmt>mpi_version<not><in>SUPPORTED_MPIS<block_start><raise>Exception(f"Unsupported MPI: '{mpi_version}'. Must be one of {' '.join(SUPPORTED_MPIS)}")<block_end>compile_osu(mpi_version remote_command_executor)<line_sep># Prepare submission script and pass to the scheduler for the job submission
<if_stmt><not>submission_script_template_path<block_start>submission_script_template_path=OSU_COMMON_DATADIR/f"osu_{benchmark_group}_submit_{mpi_version}.sh"<block_end><if_stmt><not>rendered_template_path<block_start>rendered_template_path=test_datadir/f"osu_{benchmark_group}_submit_{mpi_version}_{benchmark_name}.sh"<block_end>copyfile(submission_script_template_path rendered_template_path)<line_sep>slots=num_instances<times>slots_per_instance<line_sep>submission_script=render_jinja_template(template_file_path=rendered_template_path benchmark_name=benchmark_name osu_benchmark_version=OSU_BENCHMARK_VERSION num_of_processes=slots )<if_stmt>partition<block_start>result=scheduler_commands.submit_script(str(submission_script) slots=slots partition=partition nodes=num_instances)<block_end><else_stmt><block_start>result=scheduler_commands.submit_script(str(submission_script) slots=slots nodes=num_instances)<block_end>job_id=scheduler_commands.assert_job_submitted(result.stdout)<line_sep>scheduler_commands.wait_job_completed(job_id timeout=timeout)<line_sep>scheduler_commands.assert_job_succeeded(job_id)<line_sep>output=remote_command_executor.run_remote_command(f"cat /shared/{benchmark_name}.out").stdout<line_sep><return>job_id output<block_end><def_stmt>run_osu_benchmarks osu_benchmarks mpi_variant partition remote_command_executor scheduler_commands num_instances slots_per_instance region instance test_datadir dimensions <block_start><for_stmt>osu_benchmark_group,osu_benchmark_names osu_benchmarks.items()<block_start><for_stmt>osu_benchmark_name osu_benchmark_names<block_start>dimensions_copy=dimensions.copy()<line_sep>logging.info("Running benchmark %s" osu_benchmark_name)<line_sep>job_id,output=run_individual_osu_benchmark(mpi_version=mpi_variant benchmark_group=osu_benchmark_group benchmark_name=osu_benchmark_name partition=partition remote_command_executor=remote_command_executor scheduler_commands=scheduler_commands num_instances=num_instances slots_per_instance=slots_per_instance test_datadir=test_datadir timeout=40 )<line_sep>logging.info("Preparing benchmarks %s metrics" osu_benchmark_name)<line_sep>metric_data=[]<line_sep>submit_time=datetime.strptime(scheduler_commands.get_job_submit_time(job_id) "%Y-%m-%dT%H:%M:%S")<line_sep>start_time=datetime.strptime(scheduler_commands.get_job_start_time(job_id) "%Y-%m-%dT%H:%M:%S")<line_sep>wait_seconds=(start_time-submit_time).total_seconds()<if_stmt>wait_seconds<ge>15# After submission, if job waited more than 15 seconds before running, the job was probably
# waiting for compute nodes to be launched. Therefore, the wait time is pushed to CloudWatch
# as an indicator of how fast the compute nodes were launched.
<block_start>metric_data.append({"MetricName":"JobWaitTime" "Dimensions":[{"Name":name "Value":str(value)}<for>name,value dimensions_copy.items()] "Value":wait_seconds "Unit":"Seconds" })<block_end><for_stmt>packet_size,latency re.findall(r"(\d+)\s+(\d+)\." output)<block_start>dimensions_copy.update({"OsuBenchmarkGroup":osu_benchmark_group "OsuBenchmarkName":osu_benchmark_name "PacketSize":packet_size })<line_sep>metric_data.append({"MetricName":"Latency" "Dimensions":[{"Name":name "Value":str(value)}<for>name,value dimensions_copy.items()] "Value":int(latency) "Unit":"Microseconds" })<block_end><yield>metric_data<block_end><block_end><block_end>
|
<import_from_stmt>pathlib Path<import_from_stmt>typing Text<import_stmt>rasa.shared.utils.io<import_from_stmt>rasa.shared.core.domain Domain<import_from_stmt>rasa.shared.core.events ActionExecuted SlotSet UserUttered<import_from_stmt>rasa.shared.core.training_data visualization<import_stmt>rasa.utils.io<import_from_stmt>rasa.shared.nlu.constants TEXT INTENT<import_from_stmt>rasa.shared.nlu.training_data.message Message<import_from_stmt>rasa.shared.nlu.training_data.training_data TrainingData<def_stmt>test_style_transfer <block_start>r=visualization._transfer_style({"class":"dashed great"} {"class":"myclass"})<assert_stmt>r["class"]<eq>"myclass dashed"<block_end><def_stmt>test_style_transfer_empty <block_start>r=visualization._transfer_style({"class":"dashed great"} {"something":"else"})<assert_stmt>r["class"]<eq>"dashed"<block_end><def_stmt>test_common_action_prefix <block_start>this=[ActionExecuted("action_listen") ActionExecuted("greet") UserUttered("hey") ActionExecuted("amazing") # until this point they are the same
SlotSet("my_slot" "a") ActionExecuted("a") ActionExecuted("after_a") ]<line_sep>other=[ActionExecuted("action_listen") ActionExecuted("greet") UserUttered("hey") ActionExecuted("amazing") # until this point they are the same
SlotSet("my_slot" "b") ActionExecuted("b") ActionExecuted("after_b") ]<line_sep>num_common=visualization._length_of_common_action_prefix(this other)<assert_stmt>num_common<eq>3<block_end><def_stmt>test_common_action_prefix_equal <block_start>this=[ActionExecuted("action_listen") ActionExecuted("greet") UserUttered("hey") ActionExecuted("amazing") ]<line_sep>other=[ActionExecuted("action_listen") ActionExecuted("greet") UserUttered("hey") ActionExecuted("amazing") ]<line_sep>num_common=visualization._length_of_common_action_prefix(this other)<assert_stmt>num_common<eq>3<block_end><def_stmt>test_common_action_prefix_unequal <block_start>this=[ActionExecuted("action_listen") ActionExecuted("greet") UserUttered("hey") ]<line_sep>other=[ActionExecuted("greet") ActionExecuted("action_listen") UserUttered("hey") ]<line_sep>num_common=visualization._length_of_common_action_prefix(this other)<assert_stmt>num_common<eq>0<block_end><def_stmt>test_graph_persistence domain:Domain tmp_path:Path<block_start><import_from_stmt>os.path isfile<import_from_stmt>networkx.drawing nx_pydot<import_stmt>rasa.shared.core.training_data.loading<as>core_loading<line_sep>story_steps=core_loading.load_data_from_resource("data/test_yaml_stories/stories.yml" domain)<line_sep>out_file=str(tmp_path/"graph.html")<line_sep>generated_graph=visualization.visualize_stories(story_steps domain output_file=out_file max_history=3 should_merge_nodes=<false> )<line_sep>generated_graph=nx_pydot.to_pydot(generated_graph)<assert_stmt>isfile(out_file)<line_sep>content=rasa.shared.utils.io.read_file(out_file)<assert_stmt>"isClient = true"<in>content<assert_stmt>"graph = `{}`".format(generated_graph.to_string())<in>content<block_end><def_stmt>test_merge_nodes domain:Domain tmp_path:Path<block_start><import_from_stmt>os.path isfile<import_stmt>rasa.shared.core.training_data.loading<as>core_loading<line_sep>story_steps=core_loading.load_data_from_resource("data/test_yaml_stories/stories.yml" domain)<line_sep>out_file=str(tmp_path/"graph.html")<line_sep>visualization.visualize_stories(story_steps domain output_file=out_file max_history=3 should_merge_nodes=<true> )<assert_stmt>isfile(out_file)<block_end><def_stmt>test_story_visualization domain:Domain tmp_path:Path<block_start><import_stmt>rasa.shared.core.training_data.loading<as>core_loading<line_sep>story_steps=core_loading.load_data_from_resource("data/test_yaml_stories/stories.yml" domain)<line_sep>out_file=tmp_path/"graph.html"<line_sep>generated_graph=visualization.visualize_stories(story_steps domain output_file=str(out_file) max_history=3 should_merge_nodes=<false> )<assert_stmt>str(<none>)<not><in>out_file.read_text()<assert_stmt>"/affirm"<in>out_file.read_text()<assert_stmt>len(generated_graph.nodes())<eq>51<assert_stmt>len(generated_graph.edges())<eq>56<block_end><def_stmt>test_story_visualization_with_training_data domain:Domain tmp_path:Path nlu_data_path:Text<block_start><import_stmt>rasa.shared.core.training_data.loading<as>core_loading<line_sep>story_steps=core_loading.load_data_from_resource("data/test_yaml_stories/stories.yml" domain)<line_sep>out_file=tmp_path/"graph.html"<line_sep>test_text="test text"<line_sep>test_intent="affirm"<line_sep>generated_graph=visualization.visualize_stories(story_steps domain output_file=str(out_file) max_history=3 should_merge_nodes=<false> nlu_training_data=TrainingData([Message({TEXT:test_text INTENT:test_intent})]) )<assert_stmt>test_text<in>out_file.read_text()<assert_stmt>test_intent<not><in>out_file.read_text()<assert_stmt>len(generated_graph.nodes())<eq>51<assert_stmt>len(generated_graph.edges())<eq>56<block_end><def_stmt>test_story_visualization_with_merging domain:Domain<block_start><import_stmt>rasa.shared.core.training_data.loading<as>core_loading<line_sep>story_steps=core_loading.load_data_from_resource("data/test_yaml_stories/stories.yml" domain)<line_sep>generated_graph=visualization.visualize_stories(story_steps domain output_file=<none> max_history=3 should_merge_nodes=<true>)<assert_stmt>15<l>len(generated_graph.nodes())<l>33<assert_stmt>20<l>len(generated_graph.edges())<l>33<block_end>
|
default_app_config='machina.apps.forum_conversation.forum_polls.apps.ForumPollsAppConfig'<line_sep>
|
"""
src/zero_args.py
"""<import_from_stmt>datasets load_dataset<import_from_stmt>torch.optim Adam<import_from_stmt>torch.utils.data DataLoader<import_from_stmt>transformers GPT2LMHeadModel GPT2Tokenizer<import_stmt>deepspeed<import_stmt>torch.distributed<as>dist<line_sep>model=GPT2LMHeadModel.from_pretrained("gpt2")<line_sep>tokenizer=GPT2Tokenizer.from_pretrained("gpt2")<line_sep>tokenizer.pad_token=tokenizer.eos_token<line_sep>optimizer=Adam(model.parameters() lr=3e-5 weight_decay=3e-7)<line_sep>engine,optimizer,_,scheduler=deepspeed.initialize(optimizer=optimizer model=model config={"train_batch_size":16 "gradient_accumulation_steps":1 "scheduler":{"type":"WarmupDecayLR" "params":{"total_num_steps":300 "warmup_min_lr":0 "warmup_max_lr":3e-5 "warmup_num_steps":30 } } "fp16":{"enabled":<true> "initial_scale_power":32 "loss_scale_window":1000 "hysteresis":2 "min_loss_scale":1 } "zero_optimization":{"stage":1 "allgather_partitions":<true> "allgather_bucket_size":5e8 "overlap_comm":<false> "reduce_scatter":<true> "reduce_bucket_size":5e8 "contiguous_gradients":<true> } "zero_allow_untested_optimizer":<true> "wall_clock_breakdown":<false> "steps_per_print":9999999999 } )<line_sep>datasets=load_dataset("squad").data["train"]["context"]<line_sep>datasets=[str(sample)<for>sample datasets]<line_sep>data_loader=DataLoader(datasets batch_size=8 num_workers=8)<for_stmt>i,data enumerate(data_loader)<block_start>tokens=tokenizer(data return_tensors="pt" truncation=<true> padding=<true> max_length=1024 )<line_sep>loss=engine(input_ids=tokens.input_ids.cuda() attention_mask=tokens.attention_mask.cuda() labels=tokens.input_ids.cuda() ).loss<line_sep>engine.backward(loss)<line_sep>engine.step()<if_stmt>i%10<eq>0<and>dist.get_rank()<eq>0<block_start>print(f"step:{i}, loss:{loss}")<block_end><if_stmt>i<ge>300<block_start><break><block_end><block_end>
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
<import_from_stmt>typing Mapping Optional Union Tuple<import_stmt>copy<import_stmt>torch<import_from_stmt>torch.utils.data DataLoader<import_from_stmt>torch Tensor nn autograd<import_from_stmt>torch.nn.modules.loss _Loss<import_from_stmt>torch.optim.optimizer Optimizer<import_from_stmt>torch.optim.lr_scheduler _LRScheduler<import_from_stmt>overrides overrides<import_from_stmt>archai.common.config Config<import_from_stmt>archai.nas.arch_trainer ArchTrainer<import_from_stmt>archai.common utils ml_utils<import_from_stmt>archai.nas.model Model<import_from_stmt>archai.common.checkpoint CheckPoint<import_from_stmt>archai.common.common logger<import_from_stmt>archai.common.multi_optim MultiOptim OptimSched<class_stmt>DidartsArchTrainer(ArchTrainer)<block_start>"""Train network using different optimizers for alphas and other parameters"""<def_stmt>__init__ self conf_train:Config model:Model checkpoint:Optional[CheckPoint]<arrow><none><block_start>super().__init__(conf_train model checkpoint)<line_sep>self._conf_alpha_optim=conf_train['alpha_optimizer']<line_sep>self._conf_alpha_sched=conf_train['alpha_lr_schedule']<block_end>@overrides<def_stmt>create_multi_optim self train_len:int<arrow>MultiOptim# optimizers, schedulers needs to be recreated for each fit call
# as they have state specific to each run
<block_start>optim=self.create_optimizer(self.conf_optim self.model.nonarch_params(recurse=<true>))<line_sep># create scheduler for optim before applying amp
sched,sched_on_epoch=self.create_scheduler(self.conf_sched optim train_len)<line_sep>alpha_optim=self.create_optimizer(self._conf_alpha_optim self.model.all_owned().param_by_kind(<none>))<line_sep>alpha_sched,alpha_sched_on_epoch=self.create_scheduler(self._conf_alpha_sched alpha_optim train_len)<line_sep>multi_optim=MultiOptim()<line_sep>multi_optim.append(OptimSched(optim sched sched_on_epoch))<line_sep>multi_optim.append(OptimSched(alpha_optim alpha_sched alpha_sched_on_epoch))<line_sep>logger.info({'multi_optim_len':len(multi_optim)})<line_sep><return>multi_optim<block_end><block_end>
|
"""
Unit tests for Network runner
"""<import_stmt>logging<import_stmt>pytest<import_stmt>salt.runners.network<as>network<import_from_stmt>tests.support.mock MagicMock patch<line_sep>log=logging.getLogger(__name__)<line_sep>@pytest.fixture<def_stmt>mac_addr_list <block_start>test_list_mac_addresses=["08:00:27:82:b2:ca" "52:54:00:ee:eb:e1" "52:54:00:ee:eb:e1" ]<line_sep><return>test_list_mac_addresses.sort()<block_end>@pytest.fixture<def_stmt>id_minion <block_start><return>"test-host"<block_end>@pytest.fixture<def_stmt>cache_grain_data id_minion<block_start><return>{id_minion:{"cwd":"/" "ip_gw":<true> "ip4_gw":"192.168.0.1" "ip6_gw":<false> "dns":{"nameservers":["192.168.0.1"] "ip4_nameservers":["192.168.0.1"] "ip6_nameservers":[] "sortlist":[] "domain":"" "search":["example.org"] "options":[] } "fqdns":["Unknown.example.org"] "machine_id":"ae886ddffbcc4f0da1e72769adfe0171" "master":"192.168.0.109" "server_id":644891398 "localhost":"Unknown.example.org" "fqdn":"Unknown.example.org" "host":"Unknown" "domain":"example.org" "hwaddr_interfaces":{"lo":"00:00:00:00:00:00" "enp0s3":"08:00:27:82:b2:ca" "virbr0":"52:54:00:ee:eb:e1" "virbr0-nic":"52:54:00:ee:eb:e1" } "id":"test-host" "ip4_interfaces":{"lo":["127.0.0.1"] "enp0s3":["192.168.0.124"] "virbr0":["192.168.122.1"] "virbr0-nic":[] } "ip6_interfaces":{"lo":["::1"] "enp0s3":["fe80::a00:27ff:fe82:b2ca"] "virbr0":[] "virbr0-nic":[] } "ipv4":["127.0.0.1" "192.168.0.124" "192.168.122.1"] "ipv6":["::1" "fe80::a00:27ff:fe82:b2ca"] "fqdn_ip4":["192.168.0.70"] "fqdn_ip6":[] "ip_interfaces":{"lo":["127.0.0.1" "::1"] "enp0s3":["192.168.0.124" "fe80::a00:27ff:fe82:b2ca"] "virbr0":["192.168.122.1"] "virbr0-nic":[] } "kernelparams":[["BOOT_IMAGE" "/vmlinuz-3.10.0-1127.18.2.el7.x86_64"] ["root" "/dev/mapper/centos-root"] ["ro" <none>] ["rd.lvm.lv" "centos/root"] ["rd.lvm.lv" "centos/swap"] ["rhgb" <none>] ["quiet" <none>] ["LANG" "en_US.UTF-8"] ] "locale_info":{"defaultlanguage":"en_US" "defaultencoding":"UTF-8" "detectedencoding":"UTF-8" "timezone":"unknown" } "num_gpus":1 "gpus":[{"vendor":"vmware" "model":"SVGA II Adapter"}] "kernel":"Linux" "nodename":"Unknown.example.org" "kernelrelease":"3.10.0-1127.18.2.el7.x86_64" "kernelversion":"#1 SMP Sun Jul 26 15:27:06 UTC 2020" "cpuarch":"x86_64" "selinux":{"enabled":<false> "enforced":"Disabled"} "systemd":{"version":"219" "features":("+PAM +AUDIT +SELINUX +IMA -APPARMOR +SMACK +SYSVINIT +UTMP"<concat>" +LIBCRYPTSETUP +GCRYPT +GNUTLS +ACL +XZ +LZ4 -SECCOMP +BLKID"<concat>" +ELFUTILS +KMOD +IDN") } "init":"systemd" "lsb_distrib_id":"CentOS Linux" "lsb_distrib_codename":"CentOS Linux 7 (Core)" "osfullname":"CentOS Linux" "osrelease":"7.8.2003" "oscodename":"CentOS Linux 7 (Core)" "os":"CentOS" "num_cpus":1 "cpu_model":"Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz" "cpu_flags":["fpu" "vme" "de" "pse" "tsc" "msr" "pae" "mce" "cx8" "apic" "sep" "mtrr " "pge" "mca" "cmov" "pat" "pse36" "clflush" "mmx" "fxsr" "sse" "sse2" "ht" "syscall" "nx" "rdtscp" "lm" "constant_tsc" "rep_good" "nopl" "xtopology" "nonstop_tsc" "eagerfpu" "pni" "pclmulqdq" "monitor" "ssse3" "cx16" "pcid" "sse4_1" "sse4_2" "x2apic" "movbe" "popcnt" "aes" "xsave" "avx" "rdrand" "hypervisor" "lahf_lm" "abm" "3dnowprefetch" "invpcid_single" "fsgsbase" "avx2" "inv pcid" "rdseed" "clflushopt" "md_clear" "flush_l1d" ] "os_family":"RedHat" "osarch":"x86_64" "mem_total":1998 "swap_total":2047 "biosversion":"VirtualBox" "productname":"VirtualBox" "manufacturer":"innotek GmbH" "biosreleasedate":"12/01/2006" "uuid":"dd95fedd-1a2b-5e48-86a7-7e339f9f02a1" "serialnumber":"0" "virtual":"VirtualBox" "ps":"ps -efHww" "osrelease_info":[7 8 2003] "osmajorrelease":7 "osfinger":"CentOS Linux-7" "path":"/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin" "systempath":["/usr/local/sbin" "/usr/local/bin" "/usr/sbin" "/usr/bin" ] "pythonexecutable":"/usr/bin/python3" "pythonpath":["/usr/bin" "/usr/lib64/python36.zip" "/usr/lib64/python3.6" "/usr/lib64/python3.6/lib-dynload" "/usr/lib64/python3.6/site-packages" "/usr/lib/python3.6/site-packages" ] "pythonversion":[3 6 8 "final" 0] "saltpath":"/usr/lib/python3.6/site-packages/salt" "saltversion":"3003" "saltversioninfo":[3003] "zmqversion":"4.1.4" "disks":["sda" "sr0"] "ssds":[] "shell":"/bin/sh" "lvm":{"centos":["root" "swap"]} "mdadm":[] "username":"root" "groupname":"root" "pid":2469 "gid":0 "uid":0 "zfs_support":<false> "zfs_feature_flags":<false> }}<block_end>@pytest.fixture<def_stmt>configure_loader_modules <block_start><return>{network:{"__grains__":{"osarch":"x86_64" "os_family":"Redhat" "osmajorrelease":7 "kernelrelease":"3.10.0-1127.18.2.el7.x86_64" } } }<block_end><def_stmt>test_wolmatch cache_grain_data id_minion mac_addr_list<block_start>"""
Test wolmatch
"""<line_sep>cache_mock=MagicMock(return_value=cache_grain_data)<line_sep>patches={"cache.grains":cache_mock }<line_sep>wol_out=MagicMock(return_value=mac_addr_list)<with_stmt>patch.dict(network.__salt__ patches)<block_start><with_stmt>patch("salt.runners.network.wol" wol_out)<block_start>added=network.wolmatch(id_minion)<assert_stmt>added.sort()<eq>mac_addr_list<block_end><block_end><block_end>
|
# Generated by Django 2.2.17 on 2020-11-24 17:16
<import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('projects' '0066_make_imported_file_slug_nullable') ]<line_sep>operations=[migrations.AlterField(model_name='feature' name='feature_id' field=models.CharField(max_length=255 unique=<true> verbose_name='Feature identifier') ) ]<block_end>
|
# -*- coding:utf-8 -*-
# @project: GPT2-NewsTitle
# @filename: train.py
# @author: 刘聪NLP
# @contact: <EMAIL>
# @time: 2020/12/16 16:28
"""
文件说明:
通过新闻正文生成新闻标题的GPT2模型的训练文件
"""<import_stmt>torch<import_stmt>os<import_stmt>random<import_stmt>numpy<as>np<import_stmt>argparse<import_stmt>logging<import_from_stmt>transformers.modeling_gpt2 GPT2Config<import_from_stmt>model GPT2LMHeadModel<import_from_stmt>transformers BertTokenizer<import_from_stmt>data_set GPT2NewsTitleDataSet collate_func<import_from_stmt>torch.utils.data DataLoader RandomSampler SequentialSampler<import_from_stmt>transformers AdamW get_linear_schedule_with_warmup<import_from_stmt>tqdm tqdm trange<try_stmt><block_start><import_from_stmt>torch.utils.tensorboard SummaryWriter<block_end><except_stmt>ImportError<block_start><import_from_stmt>tensorboardX SummaryWriter<block_end>logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' datefmt='%m/%d/%Y %H:%M:%S' level=logging.INFO)<line_sep>logger=logging.getLogger(__name__)<def_stmt>train model device train_data test_data args<block_start>"""
训练模型
Args:
model: 模型
device: 设备信息
train_data: 训练数据类
test_data: 测试数据类
args: 训练参数配置信息
Returns:
"""<line_sep>tb_write=SummaryWriter()<if_stmt>args.gradient_accumulation_steps<l>1<block_start><raise>ValueError("gradient_accumulation_steps参数无效,必须大于等于1")<block_end># 计算真实的训练batch_size大小
train_batch_size=int(args.train_batch_size/args.gradient_accumulation_steps)<line_sep>train_sampler=RandomSampler(train_data)<line_sep>train_data_loader=DataLoader(train_data sampler=train_sampler batch_size=train_batch_size collate_fn=collate_func)<line_sep>total_steps=int(len(train_data_loader)<times>args.num_train_epochs/args.gradient_accumulation_steps)<line_sep>logger.info("总训练步数为:{}".format(total_steps))<line_sep>model.to(device)<line_sep># 获取模型所有参数
param_optimizer=list(model.named_parameters())<line_sep>no_decay=['bias' 'LayerNorm.bias' 'LayerNorm.weight']<line_sep>optimizer_grouped_parameters=[{'params':[p<for>n,p param_optimizer<if><not>any(nd<in>n<for>nd no_decay)] 'weight_decay':0.01} {'params':[p<for>n,p param_optimizer<if>any(nd<in>n<for>nd no_decay)] 'weight_decay':0.0}]<line_sep># 设置优化器
optimizer=AdamW(optimizer_grouped_parameters lr=args.learning_rate eps=args.adam_epsilon)<line_sep>scheduler=get_linear_schedule_with_warmup(optimizer num_warmup_steps=int(args.warmup_proportion<times>total_steps) num_training_steps=total_steps)<line_sep># 清空cuda缓存
torch.cuda.empty_cache()<line_sep># 将模型调至训练状态
model.train()<line_sep>title_id=train_data.title_id<line_sep>tr_loss,logging_loss,min_loss=0.0 0.0 0.0<line_sep>global_step=0<line_sep># 开始训练模型
<for_stmt>iepoch trange(0 int(args.num_train_epochs) desc="Epoch" disable=<false>)<block_start>iter_bar=tqdm(train_data_loader desc="Iter (loss=X.XXX)" disable=<false>)<for_stmt>step,batch enumerate(iter_bar)<block_start>input_ids=batch["input_ids"].to(device)<line_sep>token_type_ids=batch["token_type_ids"].to(device)<line_sep># 获取训练结果
outputs=model.forward(input_ids=input_ids token_type_ids=token_type_ids labels=input_ids title_id=title_id)<line_sep>loss=outputs[0]<line_sep>tr_loss<augadd>loss.item()<line_sep># 将损失值放到Iter中,方便观察
iter_bar.set_description("Iter (loss=%5.3f)"%loss.item())<line_sep># 判断是否进行梯度累积,如果进行,则将损失值除以累积步数
<if_stmt>args.gradient_accumulation_steps<g>1<block_start>loss=loss/args.gradient_accumulation_steps<block_end># 损失进行回传
loss.backward()<line_sep>torch.nn.utils.clip_grad_norm_(model.parameters() args.max_grad_norm)<line_sep># 当训练步数整除累积步数时,进行参数优化
<if_stmt>(step+1)%args.gradient_accumulation_steps<eq>0<block_start>optimizer.step()<line_sep>scheduler.step()<line_sep>optimizer.zero_grad()<line_sep>global_step<augadd>1<line_sep># 如果步数整除logging_steps,则记录学习率和训练集损失值
<if_stmt>args.logging_steps<g>0<and>global_step%args.logging_steps<eq>0<block_start>tb_write.add_scalar("lr" scheduler.get_lr()[0] global_step)<line_sep>tb_write.add_scalar("train_loss" (tr_loss-logging_loss)/(args.logging_steps<times>args.gradient_accumulation_steps) global_step)<line_sep>logging_loss=tr_loss<block_end># 如果步数整除eval_steps,则进行模型测试,记录测试集的损失
<if_stmt>args.eval_steps<g>0<and>global_step%args.eval_steps<eq>0<block_start>eval_loss=evaluate(model device test_data args)<line_sep>tb_write.add_scalar("test_loss" eval_loss global_step)<line_sep>model.train()<block_end><block_end><block_end># 每个epoch进行完,则保存模型
output_dir=os.path.join(args.output_dir "checkpoint-{}".format(global_step))<line_sep>model_to_save=model.module<if>hasattr(model "module")<else>model<line_sep>model_to_save.save_pretrained(output_dir)<line_sep># 清空cuda缓存
torch.cuda.empty_cache()<block_end><block_end><def_stmt>evaluate model device test_data args<block_start>"""
对测试数据集进行模型测试
Args:
model: 模型
device: 设备信息
test_data: 测试数据类
args: 训练参数配置信息
Returns:
"""<line_sep># 构造测试集的DataLoader
test_sampler=SequentialSampler(test_data)<line_sep>test_data_loader=DataLoader(test_data sampler=test_sampler batch_size=args.test_batch_size collate_fn=collate_func)<line_sep>iter_bar=tqdm(test_data_loader desc="iter" disable=<false>)<line_sep>title_id=test_data.title_id<line_sep>total_loss,total=0.0 0.0<line_sep># 进行测试
<for_stmt>step,batch enumerate(iter_bar)# 模型设为eval
<block_start>model.eval()<with_stmt>torch.no_grad()<block_start>input_ids=batch["input_ids"].to(device)<line_sep>token_type_ids=batch["token_type_ids"].to(device)<line_sep># 获取预测结果
outputs=model.forward(input_ids=input_ids token_type_ids=token_type_ids labels=input_ids title_id=title_id)<line_sep>loss=outputs[0]<line_sep>loss=loss.item()<line_sep># 对loss进行累加
total_loss<augadd>loss<times>len(batch["input_ids"])<line_sep>total<augadd>len(batch["input_ids"])<block_end><block_end># 计算最终测试集的loss结果
test_loss=total_loss/total<line_sep><return>test_loss<block_end><def_stmt>set_args <block_start>"""设置训练模型所需参数"""<line_sep>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--device' default='0' type=str help='设置训练或测试时使用的显卡')<line_sep>parser.add_argument('--config_path' default='./config/config.json' type=str help='模型参数配置信息')<line_sep>parser.add_argument('--vocab_path' default='./vocab/vocab.txt' type=str help='词表,该词表为小词表,并增加了一些新的标记')<line_sep>parser.add_argument('--train_file_path' default='./data_dir/train_data.json' type=str help='新闻标题生成的训练数据')<line_sep>parser.add_argument('--test_file_path' default='./data_dir/test_data.json' type=str help='新闻标题生成的测试数据')<line_sep>parser.add_argument('--pretrained_model_path' default=<none> type=str help='预训练的GPT2模型的路径')<line_sep>parser.add_argument('--data_dir' default='./data_dir' type=str help='生成缓存数据的存放路径')<line_sep>parser.add_argument('--num_train_epochs' default=5 type=int help='模型训练的轮数')<line_sep>parser.add_argument('--train_batch_size' default=16 type=int help='训练时每个batch的大小')<line_sep>parser.add_argument('--test_batch_size' default=8 type=int help='测试时每个batch的大小')<line_sep>parser.add_argument('--learning_rate' default=1e-4 type=float help='模型训练时的学习率')<line_sep>parser.add_argument('--warmup_proportion' default=0.1 type=float help='warm up概率,即训练总步长的百分之多少,进行warm up')<line_sep>parser.add_argument('--adam_epsilon' default=1e-8 type=float help='Adam优化器的epsilon值')<line_sep>parser.add_argument('--logging_steps' default=20 type=int help='保存训练日志的步数')<line_sep>parser.add_argument('--eval_steps' default=4000 type=int help='训练时,多少步进行一次测试')<line_sep>parser.add_argument('--gradient_accumulation_steps' default=4 type=int help='梯度积累')<line_sep>parser.add_argument('--max_grad_norm' default=1.0 type=float help='')<line_sep>parser.add_argument('--output_dir' default='output_dir/' type=str help='模型输出路径')<line_sep>parser.add_argument('--seed' type=int default=2020 help='随机种子')<line_sep>parser.add_argument('--max_len' type=int default=512 help='输入模型的最大长度,要比config中n_ctx小')<line_sep>parser.add_argument('--title_max_len' type=int default=32 help='生成标题的最大长度,要比max_len小')<line_sep><return>parser.parse_args()<block_end><def_stmt>main # 设置模型训练参数
<block_start>args=set_args()<line_sep># 设置显卡信息
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"<line_sep>os.environ["CUDA_VISIBLE_DEVICE"]=args.device<line_sep># 获取device信息,用于模型训练
device=torch.device("cuda"<if>torch.cuda.is_available()<and>int(args.device)<ge>0<else>"cpu")<line_sep># 设置随机种子,方便模型复现
<if_stmt>args.seed<block_start>torch.manual_seed(args.seed)<line_sep>random.seed(args.seed)<line_sep>np.random.seed(args.seed)<block_end># 加载模型的config
model_config=GPT2Config.from_json_file(args.config_path)<line_sep># 实例化GPT2LMHeadModel模型,这里我们没有加载预训练好的模型,而是直接从头开始训练。
# 为什么从头开始训练?我们采用的是小模型,只有6层,并且词表也做了修改,没有找到合适的预训练模型。(其实是,穷人,卡不行。)
# 判断是否使用预训练好的GPT2模型
<if_stmt>args.pretrained_model_path<block_start>model=GPT2LMHeadModel.from_pretrained(args.pretrained_model_path)<block_end><else_stmt># 如果没有指定的预训练模型,则初始化模型
<block_start>model=GPT2LMHeadModel(config=model_config)<block_end># model = GPT2LMHeadModel(config=model_config)
# 实例化tokenizer
tokenizer=BertTokenizer.from_pretrained(args.vocab_path do_lower_case=<true>)<line_sep># 将[space]作为一个分割整体,例如:"我爱[Space]中国。",使用原始tokenizer分词结果为"['我', '爱', '[', 'Space', ']', '中', '国', '。']";
# 增加分割符号后的结果为"['我', '爱', '[Space]', '中', '国', '。']"
tokenizer.add_tokens("[Space]" special_tokens=<true>)<line_sep># 创建模型的输出目录
<if_stmt><not>os.path.exists(args.output_dir)<block_start>os.mkdir(args.output_dir)<block_end># 加载训练数据和测试数据
train_data=GPT2NewsTitleDataSet(tokenizer args.max_len args.title_max_len args.data_dir "train" args.train_file_path)<line_sep>test_data=GPT2NewsTitleDataSet(tokenizer args.max_len args.title_max_len args.data_dir "test" args.test_file_path)<line_sep># 开始训练
train(model device train_data test_data args)<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
|
<import_from_stmt>django.db.models Sum<import_from_stmt>rest_framework.response Response<import_from_stmt>rest_framework.views APIView<import_from_stmt>usaspending_api.common.cache_decorator cache_response<import_from_stmt>usaspending_api.common.exceptions InvalidParameterException<import_from_stmt>usaspending_api.common.validator.tinyshield TinyShield<import_from_stmt>usaspending_api.references.models.gtas_sf133_balances GTASSF133Balances<import_from_stmt>usaspending_api.common.helpers.generic_helper get_account_data_time_period_message<class_stmt>TotalBudgetaryResources(APIView)<block_start>"""
This route sends a request to the backend to retrieve GTAS totals by FY/FP.
"""<line_sep>endpoint_doc="usaspending_api/api_contracts/contracts/v2/references/total_budgetary_resources.md"<line_sep>@cache_response()<def_stmt>get self request<block_start>model=[{"key":"fiscal_year" "name":"fiscal_year" "type":"integer" "min":2017 "optional":<true> "default":<none> "allow_nulls":<true> } {"key":"fiscal_period" "name":"fiscal_period" "type":"integer" "min":2 "max":12 "optional":<true> "default":<none> "allow_nulls":<true> } ]<line_sep>validated=TinyShield(model).block(request.query_params)<line_sep>fiscal_year=validated.get("fiscal_year" <none>)<line_sep>fiscal_period=validated.get("fiscal_period" <none>)<line_sep>gtas_queryset=GTASSF133Balances.objects.values("fiscal_year" "fiscal_period")<if_stmt>fiscal_period<block_start><if_stmt><not>fiscal_year<block_start><raise>InvalidParameterException("fiscal_period was provided without fiscal_year.")<block_end><else_stmt><block_start>gtas_queryset=gtas_queryset.filter(fiscal_year=fiscal_year fiscal_period=fiscal_period)<block_end><block_end><elif_stmt>fiscal_year<block_start>gtas_queryset=gtas_queryset.filter(fiscal_year=fiscal_year)<block_end>results=gtas_queryset.annotate(total_budgetary_resources=Sum("total_budgetary_resources_cpe")).order_by("-fiscal_year" "-fiscal_period")<line_sep><return>Response({"results":list(results) "messages":[get_account_data_time_period_message()]<if><not>fiscal_year<or>fiscal_year<l>2017<else>[] })<block_end><block_end>
|
<import_from_stmt>nose.plugins.attrib attr<import_from_stmt>indra.sources.tas process_from_web<line_sep>@attr('slow')<def_stmt>test_processor <block_start>tp=process_from_web(affinity_class_limit=10)<assert_stmt>tp<assert_stmt>tp.statements<line_sep>num_stmts=len(tp.statements)<line_sep># This is the total number of statements about human genes
<assert_stmt>num_stmts<eq>1123724 num_stmts<assert_stmt>all(len(s.evidence)<ge>1<for>s tp.statements) 'Some statements lack any evidence'<block_end>
|
<import_from_stmt>typing Optional<import_from_stmt>promnesia.common Visit<import_from_stmt>promnesia.sources.org extract_from_file<import_from_stmt>common tdata throw<def_stmt>declrf s:Optional[str]<arrow>Optional[str]<block_start><if_stmt>s<is><none><block_start><return><none><block_end># meh.. not sure how ot handle this properly, ideally should be via pytest?
# not sure if should just do it in the indexer? e.g. extension might not like it
<return>s.replace('\r' '')<block_end><def_stmt>test_org_indexer <arrow><none><block_start>[_ cpp cozy]=[v<if>isinstance(v Visit)<else>throw(v)<for>v extract_from_file(tdata('auto/orgs/file.org'))]<assert_stmt>cpp.url<eq>'https://www.youtube.com/watch?v=rHIkrotSwcc'<line_sep># TODO not sure about filetags?
exp='''
xxx /r/cpp :cpp:programming:
I've enjoyed [<NAME>'s _There Are No Zero-cost Abstractions_](
https://www.youtube.com/watch?v=rHIkrotSwcc) very much.
'''.lstrip()<assert_stmt>declrf(cpp.context)<eq>exp<assert_stmt>cozy.url<eq>'https://twitter.com/Mappletons/status/1255221220263563269'<block_end><def_stmt>test_org_indexer_2 <arrow><none><block_start>items=[v<if>isinstance(v Visit)<else>throw(v)<for>v extract_from_file(tdata('auto/orgs/file3.org'))]<assert_stmt>len(items)<eq>6<assert_stmt>items[0].url<eq>'https://www.reddit.com/r/androidapps/comments/4i36z9/how_you_use_your_android_to_the_maximum/d2uq24i'<assert_stmt>items[1].url<eq>'https://link.com'<assert_stmt>items[-2].url<eq>'https://en.wikipedia.org/wiki/Resilio_Sync'<line_sep># TODO shit def need org specific url extractor (and then extract from everything remaining)
# assert results[-1].url == 'https://en.wikipedia.org/wiki/InterPlanetary_File_System'
<block_end><def_stmt>test_heading <arrow><none><block_start>items=[v<if>isinstance(v Visit)<else>throw(v)<for>v extract_from_file(tdata('auto/orgs/file2.org'))]<assert_stmt>{i.url<for>i items}<eq>{'https://en.wikipedia.org/wiki/Computational_topology' 'http://graphics.stanford.edu/courses/cs468-09-fall/' 'https://en.wikipedia.org/wiki/Triangulation_(topology)' 'https://en.wikipedia.org/wiki/Digital_manifold' }<block_end>
|
'''
多类的朴素贝叶斯实现
'''<import_stmt>random<import_stmt>re<import_stmt>traceback<import_stmt>jieba<import_stmt>matplotlib.pyplot<as>plt<import_stmt>numpy<as>np<import_from_stmt>pylab mpl<import_from_stmt>sklearn.externals joblib<import_from_stmt>sklearn.naive_bayes MultinomialNB<line_sep>jieba.load_userdict("../train/word.txt")<line_sep>stop=[line.strip()<for>line open('../ad/stop.txt' 'r' encoding='utf-8').readlines()]# 停用词
<def_stmt>build_key_word path# 通过词频产生特征
<block_start>d={}<with_stmt>open(path encoding="utf-8")<as>fp<block_start><for_stmt>line fp<block_start><for_stmt>word jieba.cut(line.strip())<block_start>p=re.compile(b'\w' re.L)<line_sep>result=p.sub(b"" bytes(word encoding="utf-8")).decode("utf-8")<if_stmt><not>result<or>result<eq>' '# 空字符
<block_start><continue><block_end><if_stmt>len(word)<g>1# 避免大量无意义的词语进入统计范围
<block_start>d[word]=d.get(word 0)+1<block_end><block_end><block_end><block_end>kw_list=sorted(d key=<lambda>x:d[x] reverse=<true>)<line_sep>size=int(len(kw_list)<times>0.2)# 取最前的30%
mood=set(kw_list[:size])<line_sep><return>list(mood-set(stop))<block_end><def_stmt>loadDataSet path# 返回每条微博的分词与标签
<block_start>line_cut=[]<line_sep>label=[]<with_stmt>open(path encoding="utf-8")<as>fp<block_start><for_stmt>line fp<block_start>temp=line.strip()<try_stmt><block_start>sentence=temp[2:].lstrip()# 每条微博
label.append(int(temp[:2]))# 获取标注
word_list=[]<line_sep>sentence=str(sentence).replace('\u200b' '')<for_stmt>word jieba.cut(sentence.strip())<block_start>p=re.compile(b'\w' re.L)<line_sep>result=p.sub(b"" bytes(word encoding="utf-8")).decode("utf-8")<if_stmt><not>result<or>result<eq>' '# 空字符
<block_start><continue><block_end>word_list.append(word)<block_end>word_list=list(set(word_list)-set(stop)-set('\u200b')-set(' ')-set('\u3000')-set('️'))<line_sep>line_cut.append(word_list)<block_end><except_stmt>Exception<block_start><continue><block_end><block_end><block_end><return>line_cut label<block_end># 返回每条微博的分词和标注
<def_stmt>setOfWordsToVecTor vocabularyList moodWords# 每条微博向量化
<block_start>vocabMarked=[0]<times>len(vocabularyList)<for_stmt>smsWord moodWords<block_start><if_stmt>smsWord<in>vocabularyList<block_start>vocabMarked[vocabularyList.index(smsWord)]<augadd>1<block_end><block_end><return>np.array(vocabMarked)<block_end><def_stmt>setOfWordsListToVecTor vocabularyList train_mood_array# 将所有微博准备向量化
<block_start>vocabMarkedList=[]<for_stmt>i range(len(train_mood_array))<block_start>vocabMarked=setOfWordsToVecTor(vocabularyList train_mood_array[i])<line_sep>vocabMarkedList.append(vocabMarked)<block_end><return>vocabMarkedList<block_end><def_stmt>trainingNaiveBayes train_mood_array label# 计算先验概率
<block_start>numTrainDoc=len(train_mood_array)<line_sep>numWords=len(train_mood_array[0])<line_sep>prior_Pos,prior_Neg,prior_Neutral=0.0 0.0 0.0<for_stmt>i label<block_start><if_stmt>i<eq>1<block_start>prior_Pos=prior_Pos+1<block_end><elif_stmt>i<eq>2<block_start>prior_Neg=prior_Neg+1<block_end><else_stmt><block_start>prior_Neutral=prior_Neutral+1<block_end><block_end>prior_Pos=prior_Pos/float(numTrainDoc)<line_sep>prior_Neg=prior_Neg/float(numTrainDoc)<line_sep>prior_Neutral=prior_Neutral/float(numTrainDoc)<line_sep>wordsInPosNum=np.ones(numWords)<line_sep>wordsInNegNum=np.ones(numWords)<line_sep>wordsInNeutralNum=np.ones(numWords)<line_sep>PosWordsNum=2.0# 如果一个概率为0,乘积为0,故初始化1,分母2
NegWordsNum=2.0<line_sep>NeutralWordsNum=2.0<for_stmt>i range(0 numTrainDoc)<block_start><try_stmt><block_start><if_stmt>label[i]<eq>1<block_start>wordsInPosNum<augadd>train_mood_array[i]<line_sep>PosWordsNum<augadd>sum(train_mood_array[i])# 统计Pos中语料库中词汇出现的总次数
<block_end><elif_stmt>label[i]<eq>2<block_start>wordsInNegNum<augadd>train_mood_array[i]<line_sep>NegWordsNum<augadd>sum(train_mood_array[i])<block_end><else_stmt><block_start>wordsInNeutralNum<augadd>train_mood_array[i]<line_sep>NeutralWordsNum<augadd>sum(train_mood_array[i])<block_end><block_end><except_stmt>Exception<as>e<block_start>traceback.print_exc(e)<block_end><block_end>pWordsPosicity=np.log(wordsInPosNum/PosWordsNum)<line_sep>pWordsNegy=np.log(wordsInNegNum/NegWordsNum)<line_sep>pWordsNeutral=np.log(wordsInNeutralNum/NeutralWordsNum)<line_sep><return>pWordsPosicity pWordsNegy pWordsNeutral prior_Pos prior_Neg prior_Neutral<block_end><def_stmt>classify pWordsPosicity pWordsNegy pWordsNeutral prior_Pos prior_Neg prior_Neutral test_word_arrayMarkedArray<block_start>pP=sum(test_word_arrayMarkedArray<times>pWordsPosicity)+np.log(prior_Pos)<line_sep>pN=sum(test_word_arrayMarkedArray<times>pWordsNegy)+np.log(prior_Neg)<line_sep>pNeu=sum(test_word_arrayMarkedArray<times>pWordsNeutral)+np.log(prior_Neutral)<if_stmt>pP<g>pN<g>pNeu<or>pP<g>pNeu<g>pN<block_start><return>pP pN pNeu 1<block_end><elif_stmt>pN<g>pP<g>pNeu<or>pN<g>pNeu<g>pP<block_start><return>pP pN pNeu 2<block_end><else_stmt><block_start><return>pP pN pNeu 3<block_end><block_end><def_stmt>predict test_word_array test_word_arrayLabel testCount PosWords NegWords NeutralWords prior_Pos prior_Neg prior_Neutral<block_start>errorCount=0<for_stmt>j range(testCount)<block_start><try_stmt><block_start>pP,pN,pNeu,smsType=classify(PosWords NegWords NeutralWords prior_Pos prior_Neg prior_Neutral test_word_array[j])<if_stmt>smsType<ne>test_word_arrayLabel[j]<block_start>errorCount<augadd>1<block_end><block_end><except_stmt>Exception<as>e<block_start>traceback.print_exc(e)<block_end><block_end>print("Bayes" errorCount/testCount)<line_sep><return>errorCount/testCount<block_end><if_stmt>__name__<eq>'__main__'<block_start>multi_nb=[]<line_sep>bayes_nb=[]<for_stmt>m range(1 51)<block_start>vocabList=build_key_word("../train/train.txt")<line_sep>line_cut,label=loadDataSet("../train/train.txt")<line_sep>train_mood_array=setOfWordsListToVecTor(vocabList line_cut)<line_sep>test_word_array=[]<line_sep>test_word_arrayLabel=[]<line_sep>testCount=100# 从中随机选取100条用来测试,并删除原来的位置
<for_stmt>i range(testCount)<block_start><try_stmt><block_start>randomIndex=int(random.uniform(0 len(train_mood_array)))<line_sep>test_word_arrayLabel.append(label[randomIndex])<line_sep>test_word_array.append(train_mood_array[randomIndex])<del_stmt>(train_mood_array[randomIndex])<del_stmt>(label[randomIndex])<block_end><except_stmt>Exception<as>e<block_start>print(e)<block_end><block_end>multi=MultinomialNB()<line_sep>multi=multi.fit(train_mood_array label)<line_sep>joblib.dump(multi 'model/gnb.model')<line_sep>muljob=joblib.load('model/gnb.model')<line_sep>result=muljob.predict(test_word_array)<line_sep>count=0<for_stmt>i range(len(test_word_array))<block_start>type=result[i]<if_stmt>type<ne>test_word_arrayLabel[i]<block_start>count=count+1<line_sep># print(test_word_array[i], "----", result[i])
<block_end><block_end>print("MultinomialNB" count/float(testCount))<line_sep>multi_nb.append(count/float(testCount))<line_sep>PosWords,NegWords,NeutralWords,prior_Pos,prior_Neg,prior_Neutral=trainingNaiveBayes(train_mood_array label)<line_sep>accuracy=predict(test_word_array test_word_arrayLabel testCount PosWords NegWords NeutralWords prior_Pos prior_Neg prior_Neutral)<line_sep>bayes_nb.append(accuracy)<block_end># 画图
mpl.rcParams['font.sans-serif']=['SimHei']<line_sep>fig=plt.figure()<line_sep>ax=fig.add_subplot(111)<line_sep>ax.plot([x<for>x range(1 51)] multi_nb label='sklearn库' color='orange')<line_sep>ax.plot([x<for>x range(1 51)] bayes_nb label='实现' color='green')<line_sep>ax.set_xlabel('次数')<line_sep>ax.set_ylabel('准确率')<line_sep>plt.xlim([1 50])<line_sep>leg=ax.legend(loc='upper right' fancybox=<true>)<line_sep>leg.get_frame().set_alpha(0.7)<line_sep>plt.title("对比")<line_sep>plt.show()<block_end>
|
# Copyright 2017-2019 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>sys<import_stmt>requests<import_stmt>datetime<class_stmt>Task<block_start><def_stmt>__init__ self start end name parameters<block_start>self.start=start<line_sep>self.end=end<line_sep>self.name=name<line_sep>self.parameters=parameters<line_sep>self.started=<false><line_sep>self.ended=<false><block_end><block_end><class_stmt>Api<block_start>__API_URL="http://10.66.128.50:9999/pipeline/restapi/"<line_sep>__LOG_URL='run/{}/logs'<line_sep>__RESPONSE_STATUS_OK='OK'<line_sep>__DEFAULT_HEADER={'content-type':'application/json'}<line_sep>__DATE_FORMAT="%Y-%m-%d %H:%M:%S.%f"<def_stmt>__init__ self<block_start><pass><block_end><def_stmt>get_logs self run_id<block_start>result=requests.get(self.__API_URL+self.__LOG_URL.format(run_id) headers=self.__DEFAULT_HEADER)<if_stmt>hasattr(result.json() 'error')<or>result.json()['status']<ne>self.__RESPONSE_STATUS_OK<block_start><raise>RuntimeError('Failed to load run {} logs. API response: {}'.format(run_id result.json()['message']))<block_end>logs=result.json()['payload']<line_sep>tasks={}<for_stmt>log logs<block_start>id=log['task']['name']<line_sep>name=id<line_sep>parameters=''<if_stmt>'parameters'<in>log['task']<block_start>id<augadd>' '+log['task']['parameters']<line_sep>parameters=log['task']['parameters']<block_end><else_stmt><block_start><continue><block_end>date=datetime.datetime.strptime(log['date'] self.__DATE_FORMAT)<if_stmt>id<not><in>tasks<block_start>task=Task(date date name parameters)<line_sep>tasks[id]=task<block_end><else_stmt><block_start>task=tasks[id]<if_stmt>'logText'<in>log<and>'Kubernetes pod state: Running'<in>log['logText']<and><not>task.started<block_start>task.start=date<line_sep>task.started=<true><block_end><elif_stmt>log['status']<eq>"FAILURE"<or>log['status']<eq>"STOPPED"<or>log['status']<eq>"SUCCESS"<and><not>task.ended<block_start>task.end=date<line_sep>task.ended=<true><block_end><block_end><block_end>total_time=0<for_stmt>id tasks<block_start>task=tasks[id]<line_sep>task_time=(task.end-task.start).seconds<line_sep>minutes=task_time/60<line_sep>seconds=task_time%60<line_sep>print('{}\t{}\t{} min {} s'.format(task.name task.parameters minutes seconds))<line_sep>total_time<augadd>task_time<block_end>print<line_sep>print('Whole pipeline ran for {} s.'.format(total_time))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start><if_stmt>len(sys.argv)<l>2<block_start><raise>RuntimeError('Run ID is required for script')<block_end>run_id=sys.argv[1]<line_sep>api=Api()<line_sep>api.get_logs(run_id)<block_end>
|
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# CAM签名/鉴权错误。
AUTHFAILURE='AuthFailure'<line_sep># 操作失败。
FAILEDOPERATION='FailedOperation'<line_sep># 内部错误。
INTERNALERROR='InternalError'<line_sep># 创建私有域失败。
INTERNALERROR_CREATEPRIVATEZONE='InternalError.CreatePrivateZone'<line_sep># 创建私有域记录失败。
INTERNALERROR_CREATEPRIVATEZONERECORD='InternalError.CreatePrivateZoneRecord'<line_sep># 数据库错误。
INTERNALERROR_DBERROR='InternalError.DbError'<line_sep># 删除私有域记录失败。
INTERNALERROR_DELETEPRIVATEZONERECORD='InternalError.DeletePrivateZoneRecord'<line_sep># 查询vpc私有解析状态失败。
INTERNALERROR_DESCRIBEINTERNALENDPOINTDNSSTATUS='InternalError.DescribeInternalEndpointDnsStatus'<line_sep># 查询私有域列表失败。
INTERNALERROR_DESCRIBEPRIVATEZONELIST='InternalError.DescribePrivateZoneList'<line_sep># 查询私有域记录列表失败。
INTERNALERROR_DESCRIBEPRIVATEZONERECORDLIST='InternalError.DescribePrivateZoneRecordList'<line_sep># 查询开白vpc列表失败。
INTERNALERROR_DESCRIBEPRIVATEZONESERVICELIST='InternalError.DescribePrivateZoneServiceList'<line_sep># 目标冲突。
INTERNALERROR_ERRCONFLICT='InternalError.ErrConflict'<line_sep># 目标不存在。
INTERNALERROR_ERRNOTEXIST='InternalError.ErrNotExist'<line_sep># 鉴权失败。
INTERNALERROR_ERRUNAUTHORIZED='InternalError.ErrUnauthorized'<line_sep># 资源已存在。
INTERNALERROR_ERRORCONFLICT='InternalError.ErrorConflict'<line_sep># 资源超过配额。
INTERNALERROR_ERROROVERLIMIT='InternalError.ErrorOverLimit'<line_sep># Tcr实例内部错误。
INTERNALERROR_ERRORTCRINTERNAL='InternalError.ErrorTcrInternal'<line_sep># Tcr实例请求无效的Hearder类型。
INTERNALERROR_ERRORTCRINVALIDMEDIATYPE='InternalError.ErrorTcrInvalidMediaType'<line_sep># Tcr实例资源冲突。
INTERNALERROR_ERRORTCRRESOURCECONFLICT='InternalError.ErrorTcrResourceConflict'<line_sep># 没有Tcr操作权限。
INTERNALERROR_ERRORTCRUNAUTHORIZED='InternalError.ErrorTcrUnauthorized'<line_sep># 修改vpc与私有域关联关系失败。
INTERNALERROR_MODIFYPRIVATEZONEVPC='InternalError.ModifyPrivateZoneVpc'<line_sep># 未知错误。
INTERNALERROR_UNKNOWN='InternalError.Unknown'<line_sep># 参数错误。
INVALIDPARAMETER='InvalidParameter'<line_sep># 用户请求中的信息与其namespace不匹配。
INVALIDPARAMETER_ERRNSMISMATCH='InvalidParameter.ErrNSMisMatch'<line_sep># 命名空间名称已经存在。
INVALIDPARAMETER_ERRNAMESPACEEXIST='InvalidParameter.ErrNamespaceExist'<line_sep># 命名空间已被占用。
INVALIDPARAMETER_ERRNAMESPACERESERVED='InvalidParameter.ErrNamespaceReserved'<line_sep># 无效的参数,仓库已存在。
INVALIDPARAMETER_ERRREPOEXIST='InvalidParameter.ErrRepoExist'<line_sep># 触发器名称已存在。
INVALIDPARAMETER_ERRTRIGGEREXIST='InvalidParameter.ErrTriggerExist'<line_sep># 用户已经存在。
INVALIDPARAMETER_ERRUSEREXIST='InvalidParameter.ErrUserExist'<line_sep># 实例名称已存在。
INVALIDPARAMETER_ERRORNAMEEXISTS='InvalidParameter.ErrorNameExists'<line_sep># 实例名称非法。
INVALIDPARAMETER_ERRORNAMEILLEGAL='InvalidParameter.ErrorNameIllegal'<line_sep># 实例名称已保留。
INVALIDPARAMETER_ERRORNAMERESERVED='InvalidParameter.ErrorNameReserved'<line_sep># 实例名称非法,格式不正确或者已保留。
INVALIDPARAMETER_ERRORREGISTRYNAME='InvalidParameter.ErrorRegistryName'<line_sep># 云标签超过10个上线。
INVALIDPARAMETER_ERRORTAGOVERLIMIT='InvalidParameter.ErrorTagOverLimit'<line_sep># 无效的TCR请求。
INVALIDPARAMETER_ERRORTCRINVALIDPARAMETER='InvalidParameter.ErrorTcrInvalidParameter'<line_sep># 该地域不支持创建实例。
INVALIDPARAMETER_UNSUPPORTEDREGION='InvalidParameter.UnsupportedRegion'<line_sep># 用户命名空间达到配额。
LIMITEXCEEDED_ERRNAMESPACEMAXLIMIT='LimitExceeded.ErrNamespaceMaxLimit'<line_sep># 用户仓库已经达到最大配额。
LIMITEXCEEDED_ERRREPOMAXLIMIT='LimitExceeded.ErrRepoMaxLimit'<line_sep># 触发器达到配额。
LIMITEXCEEDED_ERRTRIGGERMAXLIMIT='LimitExceeded.ErrTriggerMaxLimit'<line_sep># 缺少参数错误。
MISSINGPARAMETER='MissingParameter'<line_sep># 缺少参数。
MISSINGPARAMETER_MISSINGPARAMETER='MissingParameter.MissingParameter'<line_sep># 操作被拒绝。
OPERATIONDENIED='OperationDenied'<line_sep># 实例状态异常。
RESOURCEINSUFFICIENT_ERRORINSTANCENOTRUNNING='ResourceInsufficient.ErrorInstanceNotRunning'<line_sep># Vpc dsn解析状态异常或未删除。
RESOURCEINSUFFICIENT_ERRORVPCDNSSTATUS='ResourceInsufficient.ErrorVpcDnsStatus'<line_sep># 资源不存在。
RESOURCENOTFOUND='ResourceNotFound'<line_sep># 用户没有创建命名空间。
RESOURCENOTFOUND_ERRNONAMESPACE='ResourceNotFound.ErrNoNamespace'<line_sep># 仓库不存在。
RESOURCENOTFOUND_ERRNOREPO='ResourceNotFound.ErrNoRepo'<line_sep># tag不存在。
RESOURCENOTFOUND_ERRNOTAG='ResourceNotFound.ErrNoTag'<line_sep># 触发器不存在。
RESOURCENOTFOUND_ERRNOTRIGGER='ResourceNotFound.ErrNoTrigger'<line_sep># 用户不存在(未注册)。
RESOURCENOTFOUND_ERRNOUSER='ResourceNotFound.ErrNoUser'<line_sep># Tcr实例中的资源未找到。
RESOURCENOTFOUND_TCRRESOURCENOTFOUND='ResourceNotFound.TcrResourceNotFound'<line_sep># 未授权操作。
UNAUTHORIZEDOPERATION='UnauthorizedOperation'<line_sep># 未知参数错误。
UNKNOWNPARAMETER='UnknownParameter'<line_sep># 操作不支持。
UNSUPPORTEDOPERATION='UnsupportedOperation'<line_sep>
|
<import_from_stmt>.noamopt *<line_sep>
|
<import_stmt>re<import_stmt>pytest<import_from_stmt>sunpy.net base_client dataretriever jsoc vso<import_from_stmt>sunpy.net.base_client QueryResponseTable convert_row_to_table<import_from_stmt>sunpy.net.dataretriever.sources.norh NoRHClient<line_sep>_REGEX=re.compile(r"Client")<line_sep>CLIENT_LIST=[]<for_stmt>a_import [vso jsoc dataretriever]<block_start><for_stmt>item dir(a_import)<block_start><if_stmt>_REGEX.search(item)<block_start>CLIENT_LIST.append(getattr(a_import item))<block_end><block_end><block_end>CLIENT_LIST.remove(dataretriever.client.GenericClient)<line_sep># We can access the registry directly
CLIENT_NAMES=base_client.BaseClient._registry.keys()<line_sep>CLIENTS_REG=base_client.BaseClient._registry.items()<line_sep>@pytest.mark.parametrize("client" CLIENT_LIST)<def_stmt>test_registry client<block_start>"""
Check if each client has been registered.
"""<assert_stmt>client<in>CLIENT_NAMES<assert_stmt>(client client._can_handle_query)<in>CLIENTS_REG<block_end>@pytest.fixture<def_stmt>dummy_response <block_start><return>QueryResponseTable([{'hello':1}] client=NoRHClient())<block_end><def_stmt>test_slice dummy_response<block_start><assert_stmt>len(dummy_response)<eq>1<line_sep>row=dummy_response[0]<line_sep>table=row.as_table()<assert_stmt>len(table)<eq>1<assert_stmt>isinstance(table.client NoRHClient)<line_sep>col=dummy_response['hello']<line_sep>table=col.as_table()<assert_stmt>len(table)<eq>1<assert_stmt>isinstance(table.client NoRHClient)<block_end><def_stmt>test_path_format_keys dummy_response<block_start><assert_stmt>dummy_response.path_format_keys()<eq>{'hello'}<block_end><def_stmt>test_convert_row_to_table dummy_response<block_start>@convert_row_to_table<def_stmt>example self query_results **kwargs<block_start><return>query_results<block_end><assert_stmt>example(<none> dummy_response)<is>dummy_response<line_sep># This is a single row table anyway
<assert_stmt>example(<none> dummy_response[0])<eq>dummy_response<block_end>
|
# Copyright (c) OpenMMLab. All rights reserved.
<import_stmt>torch.nn<as>nn<import_from_stmt>mmcv.runner BaseModule Sequential<import_stmt>mmocr.utils<as>utils<import_from_stmt>mmocr.models.builder BACKBONES<import_from_stmt>mmocr.models.textrecog.layers BasicBlock<line_sep>@BACKBONES.register_module()<class_stmt>ResNetABI(BaseModule)<block_start>"""Implement ResNet backbone for text recognition, modified from `ResNet.
<https://arxiv.org/pdf/1512.03385.pdf>`_ and
`<https://github.com/FangShancheng/ABINet>`_
Args:
in_channels (int): Number of channels of input image tensor.
stem_channels (int): Number of stem channels.
base_channels (int): Number of base channels.
arch_settings (list[int]): List of BasicBlock number for each stage.
strides (Sequence[int]): Strides of the first block of each stage.
out_indices (None | Sequence[int]): Indices of output stages. If not
specified, only the last stage will be returned.
last_stage_pool (bool): If True, add `MaxPool2d` layer to last stage.
"""<def_stmt>__init__ self in_channels=3 stem_channels=32 base_channels=32 arch_settings=[3 4 6 6 3] strides=[2 1 2 1 1] out_indices=<none> last_stage_pool=<false> init_cfg=[dict(type='Xavier' layer='Conv2d') dict(type='Constant' val=1 layer='BatchNorm2d')]<block_start>super().__init__(init_cfg=init_cfg)<assert_stmt>isinstance(in_channels int)<assert_stmt>isinstance(stem_channels int)<assert_stmt>utils.is_type_list(arch_settings int)<assert_stmt>utils.is_type_list(strides int)<assert_stmt>len(arch_settings)<eq>len(strides)<assert_stmt>out_indices<is><none><or>isinstance(out_indices (list tuple))<assert_stmt>isinstance(last_stage_pool bool)<line_sep>self.out_indices=out_indices<line_sep>self.last_stage_pool=last_stage_pool<line_sep>self.block=BasicBlock<line_sep>self.inplanes=stem_channels<line_sep>self._make_stem_layer(in_channels stem_channels)<line_sep>self.res_layers=[]<line_sep>planes=base_channels<for_stmt>i,num_blocks enumerate(arch_settings)<block_start>stride=strides[i]<line_sep>res_layer=self._make_layer(block=self.block inplanes=self.inplanes planes=planes blocks=num_blocks stride=stride)<line_sep>self.inplanes=planes<times>self.block.expansion<line_sep>planes<augmul>2<line_sep>layer_name=f'layer{i+1}'<line_sep>self.add_module(layer_name res_layer)<line_sep>self.res_layers.append(layer_name)<block_end><block_end><def_stmt>_make_layer self block inplanes planes blocks stride=1<block_start>layers=[]<line_sep>downsample=<none><if_stmt>stride<ne>1<or>inplanes<ne>planes<block_start>downsample=nn.Sequential(nn.Conv2d(inplanes planes 1 stride bias=<false>) nn.BatchNorm2d(planes) )<block_end>layers.append(block(inplanes planes use_conv1x1=<true> stride=stride downsample=downsample))<line_sep>inplanes=planes<for_stmt>_ range(1 blocks)<block_start>layers.append(block(inplanes planes use_conv1x1=<true>))<block_end><return>Sequential(*layers)<block_end><def_stmt>_make_stem_layer self in_channels stem_channels<block_start>self.conv1=nn.Conv2d(in_channels stem_channels kernel_size=3 stride=1 padding=1)<line_sep>self.bn1=nn.BatchNorm2d(stem_channels)<line_sep>self.relu1=nn.ReLU(inplace=<true>)<block_end><def_stmt>forward self x<block_start>"""
Args:
x (Tensor): Image tensor of shape :math:`(N, 3, H, W)`.
Returns:
Tensor or list[Tensor]: Feature tensor. Its shape depends on
ResNetABI's config. It can be a list of feature outputs at specific
layers if ``out_indices`` is specified.
"""<line_sep>x=self.conv1(x)<line_sep>x=self.bn1(x)<line_sep>x=self.relu1(x)<line_sep>outs=[]<for_stmt>i,layer_name enumerate(self.res_layers)<block_start>res_layer=getattr(self layer_name)<line_sep>x=res_layer(x)<if_stmt>self.out_indices<and>i<in>self.out_indices<block_start>outs.append(x)<block_end><block_end><return>tuple(outs)<if>self.out_indices<else>x<block_end><block_end>
|
<import_stmt>sys<import_stmt>os.path<import_stmt>xml.etree.ElementTree<as>ET<if_stmt>__name__<eq>'__main__'<block_start>aiml_dir=sys.argv[1]<line_sep>csv_file=sys.argv[2]<line_sep>print("aiml_dir:" aiml_dir)<line_sep>print("csv_file:" csv_file)<line_sep>questions=[]<line_sep>files=0<for_stmt>dirpath,dirnames,filenames os.walk(aiml_dir)<block_start><for_stmt>filename filenames<block_start>files<augadd>1<line_sep>aiml_file=os.path.join(dirpath filename)<line_sep>print(aiml_file)<try_stmt><block_start>tree=ET.parse(aiml_file)<line_sep>aiml=tree.getroot()<line_sep>categories=aiml.findall('category')<for_stmt>category categories<block_start>pattern_text=""<line_sep>pattern=category.find("pattern")<for_stmt>elt pattern.iter()<block_start>comma=<false><if_stmt>elt.tag<eq>"pattern"<block_start><if_stmt>elt.text<is><not><none><block_start>text=elt.text.strip().upper()<line_sep>pattern_text<augadd>" ".join(text.split())<line_sep>comma=<true><block_end><block_end><elif_stmt>elt.tag<eq>"set"<block_start><if_stmt>'name'<in>elt.attrib<block_start>name=elt.attrib['name']<block_end><else_stmt><block_start>name=elt.text.strip()<block_end><if_stmt>comma<is><true><block_start>pattern_text<augadd>" "<block_end>pattern_text<augadd>" SET[%s]"%name<if_stmt>text<block_start>pattern_text<augadd>" "<line_sep>pattern_text<augadd>" ".join(text.split())<block_end>comma=<true><block_end><elif_stmt>elt.tag<eq>"bot"<block_start><if_stmt>'name'<in>elt.attrib<block_start>name=elt.attrib['name']<block_end><else_stmt><block_start>name=elt.text.strip()<block_end><if_stmt>comma<is><true><block_start>pattern_text<augadd>" "<block_end>pattern_text<augadd>" BOT[%s]"%name<if_stmt>text<block_start>pattern_text<augadd>" "<line_sep>pattern_text<augadd>" ".join(text.split())<block_end>comma=<true><block_end><if_stmt>elt.tail<is><not><none><and>elt.tail.strip()<ne>""<block_start><if_stmt>comma<is><true><block_start>pattern_text<augadd>" "<block_end>text=elt.tail.strip().upper()<if_stmt>text<block_start>pattern_text<augadd>" "<line_sep>pattern_text<augadd>" ".join(text.split())<block_end>comma=<true><block_end><if_stmt>pattern_text<is><not><none><block_start>pattern_text=pattern_text.strip()<if_stmt>len(pattern_text)<g>0<block_start>questions.append([aiml_file pattern_text])<block_end><block_end><block_end><block_end><block_end><except_stmt>Exception<as>e<block_start>print(e)<line_sep><raise>e<block_end><block_end><block_end>questions.sort(key=<lambda>x:x[1])<with_stmt>open(csv_file "w+")<as>output_file<block_start><for_stmt>line questions<block_start>new_line=", ".join(line[1].split())<line_sep>output_file.write(line[0])<line_sep>output_file.write(", ")<line_sep>output_file.write(new_line)<line_sep>output_file.write("\n")<block_end><block_end>print("Files: %d"%files)<line_sep>print("Patterns: %d"%len(questions))<block_end>
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 TH<NAME>, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""<import_from_stmt>gcloud.iam_auth IAMMeta<import_from_stmt>gcloud.iam_auth.intercept ViewInterceptor<import_from_stmt>gcloud.iam_auth.utils iam_resource_auth_or_raise iam_multi_resource_auth_or_raise<class_stmt>YamlImportInterceptor(ViewInterceptor)<block_start><def_stmt>process self request *args **kwargs<block_start>data=request.data<line_sep>template_type=data["template_type"]<line_sep>template_ids=list(data.get("override_mappings" {}).values())<line_sep>username=request.user.username<if_stmt>template_type<eq>"project"<block_start>project_resource_id=data["project_id"]<line_sep>project_get_resource_func="resources_for_project"<line_sep>project_action=IAMMeta.FLOW_CREATE_ACTION<line_sep>template_action=IAMMeta.FLOW_EDIT_ACTION<line_sep>template_get_resource_func="resources_list_for_flows"<block_end><else_stmt><block_start>project_resource_id=<none><line_sep>project_get_resource_func=<none><line_sep>project_action=IAMMeta.COMMON_FLOW_CREATE_ACTION<line_sep>template_action=IAMMeta.COMMON_FLOW_EDIT_ACTION<line_sep>template_get_resource_func="resources_list_for_common_flows"<block_end>iam_resource_auth_or_raise(username project_action project_resource_id project_get_resource_func)<if_stmt>template_ids<block_start>iam_multi_resource_auth_or_raise(username template_action template_ids template_get_resource_func)<block_end><block_end><block_end><class_stmt>YamlExportInterceptor(ViewInterceptor)<block_start><def_stmt>process self request *args **kwargs<block_start>data=request.data<line_sep>template_type=data["template_type"]<line_sep>template_ids=data["template_id_list"]<if_stmt>template_type<eq>"project"<block_start>template_action=IAMMeta.FLOW_VIEW_ACTION<line_sep>template_get_resource_func="resources_list_for_flows"<block_end><else_stmt><block_start>template_action=IAMMeta.COMMON_FLOW_VIEW_ACTION<line_sep>template_get_resource_func="resources_list_for_common_flows"<block_end>iam_multi_resource_auth_or_raise(request.user.username template_action template_ids template_get_resource_func)<block_end><block_end>
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
<import_from_future_stmt> absolute_import<import_from_stmt>pex.enum Enum<class_stmt>BinPath(Enum["BinPath.Value"])<block_start><class_stmt>Value(Enum.Value)<block_start><pass><block_end>FALSE=Value("false")<line_sep>PREPEND=Value("prepend")<line_sep>APPEND=Value("append")<block_end>
|
# Generated by Django 2.2 on 2019-06-21 09:29
<import_from_stmt>django.conf settings<import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[migrations.swappable_dependency(settings.AUTH_USER_MODEL) ("projects" "0022_availableauditparameters_is_active") ]<line_sep>operations=[migrations.AddField(model_name="project" name="admins" field=models.ManyToManyField(blank=<true> related_name="admin_of" to=settings.AUTH_USER_MODEL) ) migrations.AlterField(model_name="project" name="members" field=models.ManyToManyField(blank=<true> related_name="member_of" to=settings.AUTH_USER_MODEL) ) ]<block_end>
|
<import_stmt>copy<import_stmt>tensorflow<as>tf<import_from_stmt>onnx_tf.handlers.backend_handler BackendHandler<import_from_stmt>onnx_tf.handlers.handler onnx_op<import_from_stmt>onnx_tf.handlers.handler tf_func<line_sep>@onnx_op("Dropout")@tf_func(tf.nn.dropout)<class_stmt>Dropout(BackendHandler)<block_start>@classmethod<def_stmt>_common cls node **kwargs<block_start>tensor_dict=kwargs["tensor_dict"]<line_sep>x=tensor_dict[node.inputs[0]]<line_sep>attrs=copy.deepcopy(node.attrs)<if_stmt>cls.SINCE_VERSION<l>7<and>attrs.pop("is_test" 0)<eq>0<block_start>attrs["keep_prob"]=1-attrs.pop("ratio" 0.5)<line_sep><return>[cls.make_tensor_from_onnx_node(node attrs=attrs **kwargs)]<block_end><elif_stmt>cls.SINCE_VERSION<l>12# for Opset 7, 10
# at inference mode, is_test attribute is always set to 1
# dropout at inference mode is a no-op
<block_start><return>[x]<block_end><else_stmt># for Opset 12, 13
# ratio and training_mode are optional and passed as inputs
<block_start>ratio=0.5# default ratio
<if_stmt>len(node.inputs)<g>1<block_start>ratio=tensor_dict[node.inputs[1]]<block_end>training_mode=<false># default is false
<if_stmt>len(node.inputs)<eq>3<block_start>training_mode=tensor_dict[node.inputs[2]]<block_end>return_mask=len(node.outputs)<eq>2# if there are 2 outputs, mask is requested
<if_stmt>ratio<eq>0<or>training_mode<is><false># Inferencing
<block_start><if_stmt>return_mask<is><true><block_start><return>x tf.ones(x.shape dtype=tf.bool)<block_end><else_stmt><block_start><return>[x]<block_end><block_end><else_stmt># Training
# seed is passed in as an attribute
<block_start>seed=attrs.pop("seed" <none>)<line_sep>noise_shape=<none># noise_shape is not passed in so default to None
dropout_result=cls.make_tensor_from_onnx_node(node inputs=[x ratio noise_shape seed] attrs=attrs **kwargs)<if_stmt>return_mask<is><true># Create the mask based on the result of the Dropout
<block_start>mask=tf.dtypes.cast(dropout_result tf.bool)<line_sep><return>dropout_result mask<block_end><else_stmt><block_start><return>[dropout_result]<block_end><block_end><block_end><block_end>@classmethod<def_stmt>version_1 cls node **kwargs<block_start><return>cls._common(node **kwargs)<block_end>@classmethod<def_stmt>version_6 cls node **kwargs<block_start><return>cls._common(node **kwargs)<block_end>@classmethod<def_stmt>version_7 cls node **kwargs<block_start><return>cls._common(node **kwargs)<block_end>@classmethod<def_stmt>version_10 cls node **kwargs<block_start><return>cls._common(node **kwargs)<block_end>@classmethod<def_stmt>version_12 cls node **kwargs<block_start><return>cls._common(node **kwargs)<block_end>@classmethod<def_stmt>version_13 cls node **kwargs<block_start><return>cls._common(node **kwargs)<block_end><block_end>
|
<import_stmt>numpy<as>np<import_from_stmt>bokeh.io curdoc<import_from_stmt>bokeh.plotting figure<line_sep>N=4000<line_sep>x=np.random.random(size=N)<times>100<line_sep>y=np.random.random(size=N)<times>100<line_sep>radii=np.random.random(size=N)<times>1.5<line_sep>colors=["#%02x%02x%02x"%(int(r) int(g) 150)<for>r,g zip(50+2<times>x 30+2<times>y)]<line_sep>p=figure(tools="" toolbar_location=<none>)<line_sep>p.circle(x y radius=radii fill_color=colors fill_alpha=0.6 line_color=<none>)<line_sep>curdoc().add_root(p)<line_sep>
|
# Generated by Django 3.0.7 on 2020-11-09 12:46
<import_from_stmt>django.db migrations models<import_stmt>part.settings<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('part' '0052_partrelated') ]<line_sep>operations=[migrations.AlterField(model_name='part' name='active' field=models.BooleanField(default=<true> help_text='Is this part active?' verbose_name='Active') ) migrations.AlterField(model_name='part' name='component' field=models.BooleanField(default=part.settings.part_component_default help_text='Can this part be used to build other parts?' verbose_name='Component') ) migrations.AlterField(model_name='part' name='purchaseable' field=models.BooleanField(default=part.settings.part_purchaseable_default help_text='Can this part be purchased from external suppliers?' verbose_name='Purchaseable') ) migrations.AlterField(model_name='part' name='salable' field=models.BooleanField(default=part.settings.part_salable_default help_text='Can this part be sold to customers?' verbose_name='Salable') ) migrations.AlterField(model_name='part' name='trackable' field=models.BooleanField(default=part.settings.part_trackable_default help_text='Does this part have tracking for unique items?' verbose_name='Trackable') ) migrations.AlterField(model_name='part' name='virtual' field=models.BooleanField(default=<false> help_text='Is this a virtual part, such as a software product or license?' verbose_name='Virtual') ) ]<block_end>
|
<import_stmt>pandas<as>pd<line_sep>df=pd.read_csv('data/src/sample_pandas_normal.csv' index_col=0)<line_sep>print(df)<line_sep># age state point
# name
# Alice 24 NY 64
# Bob 42 CA 92
# Charlie 18 CA 70
# Dave 68 TX 70
# Ellen 24 CA 88
# Frank 30 NY 57
print(df['age'])<line_sep>print(type(df['age']))<line_sep># name
# Alice 24
# Bob 42
# Charlie 18
# Dave 68
# Ellen 24
# Frank 30
# Name: age, dtype: int64
# <class 'pandas.core.series.Series'>
print(df.age)<line_sep>print(type(df.age))<line_sep># name
# Alice 24
# Bob 42
# Charlie 18
# Dave 68
# Ellen 24
# Frank 30
# Name: age, dtype: int64
# <class 'pandas.core.series.Series'>
print(df[['age' 'point']])<line_sep>print(type(df[['age' 'point']]))<line_sep># age point
# name
# Alice 24 64
# Bob 42 92
# Charlie 18 70
# Dave 68 70
# Ellen 24 88
# Frank 30 57
# <class 'pandas.core.frame.DataFrame'>
print(df[['age']])<line_sep>print(type(df[['age']]))<line_sep># age
# name
# Alice 24
# Bob 42
# Charlie 18
# Dave 68
# Ellen 24
# Frank 30
# <class 'pandas.core.frame.DataFrame'>
print(df['age':'point'])<line_sep># Empty DataFrame
# Columns: [age, state, point]
# Index: []
print(df.loc[: 'age':'point'])<line_sep>print(type(df.loc[: 'age':'point']))<line_sep># age state point
# name
# Alice 24 NY 64
# Bob 42 CA 92
# Charlie 18 CA 70
# Dave 68 TX 70
# Ellen 24 CA 88
# Frank 30 NY 57
# <class 'pandas.core.frame.DataFrame'>
print(df.iloc[: [0 2]])<line_sep>print(type(df.iloc[: [0 2]]))<line_sep># age point
# name
# Alice 24 64
# Bob 42 92
# Charlie 18 70
# Dave 68 70
# Ellen 24 88
# Frank 30 57
# <class 'pandas.core.frame.DataFrame'>
print(df[1:4])<line_sep>print(type(df[1:4]))<line_sep># age state point
# name
# Bob 42 CA 92
# Charlie 18 CA 70
# Dave 68 TX 70
# <class 'pandas.core.frame.DataFrame'>
print(df[:-3])<line_sep>print(type(df[:-3]))<line_sep># age state point
# name
# Alice 24 NY 64
# Bob 42 CA 92
# Charlie 18 CA 70
# <class 'pandas.core.frame.DataFrame'>
print(df[::2])<line_sep>print(type(df[::2]))<line_sep># age state point
# name
# Alice 24 NY 64
# Charlie 18 CA 70
# Ellen 24 CA 88
# <class 'pandas.core.frame.DataFrame'>
print(df[1::2])<line_sep>print(type(df[1::2]))<line_sep># age state point
# name
# Bob 42 CA 92
# Dave 68 TX 70
# Frank 30 NY 57
# <class 'pandas.core.frame.DataFrame'>
# print(df[1])
# KeyError: 1
print(df[1:2])<line_sep>print(type(df[1:2]))<line_sep># age state point
# name
# Bob 42 CA 92
# <class 'pandas.core.frame.DataFrame'>
print(df['Bob':'Ellen'])<line_sep>print(type(df['Bob':'Ellen']))<line_sep># age state point
# name
# Bob 42 CA 92
# Charlie 18 CA 70
# Dave 68 TX 70
# Ellen 24 CA 88
# <class 'pandas.core.frame.DataFrame'>
print(df.loc['Bob'])<line_sep>print(type(df.loc['Bob']))<line_sep># age 42
# state CA
# point 92
# Name: Bob, dtype: object
# <class 'pandas.core.series.Series'>
print(df.loc[['Bob' 'Ellen']])<line_sep>print(type(df.loc[['Bob' 'Ellen']]))<line_sep># age state point
# name
# Bob 42 CA 92
# Ellen 24 CA 88
# <class 'pandas.core.frame.DataFrame'>
print(df.iloc[[1 4]])<line_sep>print(type(df.iloc[[1 4]]))<line_sep># age state point
# name
# Bob 42 CA 92
# Ellen 24 CA 88
# <class 'pandas.core.frame.DataFrame'>
print(df['age']['Alice'])<line_sep># 24
print(df['Bob':'Dave'][['age' 'point']])<line_sep># age point
# name
# Bob 42 92
# Charlie 18 70
# Dave 68 70
print(df.at['Alice' 'age'])<line_sep># 24
print(df.loc['Bob':'Dave' ['age' 'point']])<line_sep># age point
# name
# Bob 42 92
# Charlie 18 70
# Dave 68 70
|
# Copyright ClusterHQ Inc. See LICENSE file for details.
"""
Helpers for testing our test code.
Only put stuff here that is specific to testing code about unit testing.
"""<import_from_stmt>hypothesis.strategies sampled_from<import_stmt>unittest<import_from_stmt>testtools.matchers AfterPreprocessing Equals MatchesStructure <import_from_stmt>._base AsyncTestCase TestCase<line_sep>base_test_cases=sampled_from([AsyncTestCase TestCase])<def_stmt>throw exception<block_start>"""
Raise 'exception'.
"""<line_sep><raise>exception<block_end><def_stmt>only_skips tests_run reasons<block_start>"""
Matches results that only had skips, and only for the given reasons.
"""<line_sep><return>has_results(tests_run=Equals(tests_run) skipped=AfterPreprocessing(<lambda>xs:list(unicode(x[1])<for>x xs) Equals(reasons)) )<block_end><def_stmt>has_results errors=<none> failures=<none> skipped=<none> expected_failures=<none> unexpected_successes=<none> tests_run=<none><block_start>"""
Return a matcher on test results.
By default, will match a result that has no tests run.
"""<if_stmt>errors<is><none><block_start>errors=Equals([])<block_end><if_stmt>failures<is><none><block_start>failures=Equals([])<block_end><if_stmt>skipped<is><none><block_start>skipped=Equals([])<block_end><if_stmt>expected_failures<is><none><block_start>expected_failures=Equals([])<block_end><if_stmt>unexpected_successes<is><none><block_start>unexpected_successes=Equals([])<block_end><if_stmt>tests_run<is><none><block_start>tests_run=Equals(0)<block_end><return>MatchesStructure(errors=errors failures=failures skipped=skipped expectedFailures=expected_failures unexpectedSuccesses=unexpected_successes testsRun=tests_run )<block_end><def_stmt>run_test case<block_start>"""
Run a test and return its results.
"""<line_sep># XXX: How many times have I written something like this?
result=unittest.TestResult()<line_sep>case.run(result)<line_sep><return>result<block_end><def_stmt>make_test_case base_case<block_start>"""
Make a single test that subclasses ``base_case`` and passes.
:param type base_case: A ``TestCase`` class.
:rtype: ``base_case``
"""<class_stmt>FooTests(base_case)<block_start><def_stmt>test_something self<block_start><pass><block_end><block_end><return>FooTests('test_something')<block_end>
|
<import_from_stmt>django.core.management.base BaseCommand<import_from_stmt>data_refinery_common.models Sample<import_from_stmt>data_refinery_common.performant_pagination.pagination PAGE_SIZE PerformantPaginator<class_stmt>Command(BaseCommand)<block_start><def_stmt>handle self *args **options<block_start>samples=Sample.processed_objects.all()<line_sep>paginator=PerformantPaginator(samples PAGE_SIZE)<line_sep>page=paginator.page()<line_sep>counter=0<while_stmt><true><block_start><for_stmt>sample page.object_list<block_start>counter<augadd>1<if_stmt>sample.results.count()<eq>0<block_start>print(sample.accession_code)<block_end><block_end><if_stmt><not>page.has_next()<block_start><break><block_end><else_stmt><block_start>page=paginator.page(page.next_page_number())<block_end><if_stmt>counter%10000<eq>0<block_start>print("Checked another 10000k samples.")<block_end><block_end><block_end><block_end>
|
# Copyright (C) 2019-2021 Ruhr West University of Applied Sciences, Bottrop, Germany
# AND Elektronische Fahrwerksysteme GmbH, Gaimersheim Germany
#
# This Source Code Form is subject to the terms of the Apache License 2.0
# If a copy of the APL2 was not distributed with this
# file, You can obtain one at https://www.apache.org/licenses/LICENSE-2.0.txt.
"""
Methods for measuring miscalibration. The common methods are given with the
'Average Calibration Error (ACE)', 'Expected Calibration Error (ECE)' and 'Maximum Calibration Error (MCE)'.
Each methods bins the samples by their confidence and measures the accuracy in each bin. The ECE gives the
mean gap between confidence and observed accuracy in each bin weighted by the number of samples.
The MCE returns the highest observed deviation. The ACE is similar to the ECE but weights each bin equally.
Available classes
=================
.. autosummary::
:toctree: _autosummary_metric
:template: custom_class.rst
ACE
ECE
MCE
MMCE
PICP
"""<import_from_stmt>.ACE ACE<import_from_stmt>.ECE ECE<import_from_stmt>.MCE MCE<import_from_stmt>.Miscalibration _Miscalibration<import_from_stmt>.PICP PICP<import_from_stmt>.MMCE MMCE<line_sep>
|
<import_stmt>os<import_stmt>lintreview.docker<as>docker<import_from_stmt>lintreview.review IssueComment<import_from_stmt>lintreview.tools Tool process_quickfix extract_version<class_stmt>Yamllint(Tool)<block_start>name='yamllint'<def_stmt>version self<block_start>output=docker.run('python2' ['yamllint' '--version'] self.base_path)<line_sep><return>extract_version(output)<block_end><def_stmt>check_dependencies self<block_start>"""
See if python2 image is installed
"""<line_sep><return>docker.image_exists('python2')<block_end><def_stmt>match_file self filename<block_start>base=os.path.basename(filename)<line_sep>name,ext=os.path.splitext(base)<line_sep><return>ext<in>['.yml' '.yaml']<block_end><def_stmt>process_files self files<block_start>"""
Run code checks with yamllint.
Only a single process is made for all files
to save resources.
Configuration is not supported at this time
"""<line_sep>command=['yamllint' '--format=parsable']<line_sep># Add config file if its present
<if_stmt>self.options.get('config')<block_start>command<augadd>['-c' docker.apply_base(self.options['config'])]<block_end>command<augadd>files<line_sep>output=docker.run('python2' command self.base_path)<if_stmt><not>output<block_start><return><false><block_end><if_stmt>'No such file'<in>output<and>'Traceback'<in>output<block_start>error=output.strip().split("\n")[-1]<line_sep>msg=(u'`yamllint` failed with the following error:\n'<concat>'```\n'<concat>'{}\n'<concat>'```\n')<line_sep><return>self.problems.add(IssueComment(msg.format(error)))<block_end>output=output.split("\n")<line_sep>process_quickfix(self.problems output docker.strip_base)<block_end><block_end>
|
<import_stmt>cv2<import_stmt>numpy<as>np<import_stmt>params<line_sep>METERS_PER_ENCODER_TICK=params.WHEEL_TICK_LENGTH<def_stmt>draw_steering bgr steering servo center=(320 420)# make steering wheel, lower center
#servo = 128*(servo - 125)/70.0
<block_start>servo=steering<line_sep># sdeg = steering # just 1:1 i guess?
sdeg=params.STEER_DIRECTION<times>servo# just 1:1 i guess?
srad=sdeg<times>np.pi/180.0<line_sep>S,C=16<times>30<times>np.sin(srad) 16<times>30<times>np.cos(srad)<line_sep>cv2.circle(bgr center 30 (255 255 255) 1 cv2.LINE_AA)<line_sep>scenter=(center[0]<times>16 center[1]<times>16)<line_sep>cv2.line(bgr (int(scenter[0]-C) int(scenter[1]+S)) (int(scenter[0]+C) int(scenter[1]-S)) (255 255 255) 1 cv2.LINE_AA 4)<line_sep>cv2.ellipse(bgr center (30 30) 0 -90 -90+steering (255 180 180) 5 cv2.LINE_AA)<line_sep>cv2.ellipse(bgr center (30 30) 0 -90 -90+servo (0 180 255) 2 cv2.LINE_AA)<block_end>last_ts=<none><line_sep>last_wheels=<none><def_stmt>draw_speed bgr tstamp wheels periods center=(40 420) radius=30# draw a little spedometer in the lower left
# just draw the needle for each period now
<block_start><global>last_ts last_wheels<line_sep>av=np.mean(periods[:params.NUM_ENCODERS])<if_stmt>av<ne>0<block_start>av=METERS_PER_ENCODER_TICK<times>1e6/av<block_end># cv2.putText(bgr, "%0.1f %0.1f %0.1f %0.1f m/s" % tuple(v), (10, 470),
# cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1,
# cv2.LINE_AA)
<if_stmt>last_ts<is><none><block_start>last_ts=tstamp<line_sep>last_wheels=wheels<line_sep><return><block_end>dw=wheels-last_wheels<if_stmt>np.all(dw<eq>0)<block_start>last_ts=tstamp<line_sep>last_wheels=wheels<line_sep><return><block_end># vv = METERS_PER_ENCODER_TICK * np.float32(dw) / (tstamp - last_ts)
# av = 0.5 * np.mean(v[dw != 0] + vv[dw != 0])
mph=2.23694<times>av<line_sep># draw ticks
<for_stmt>i range(13)<block_start>phi=(i-6)<times>0.4<line_sep>C,S=radius<times>np.cos(phi) radius<times>np.sin(phi)<line_sep>cv2.line(bgr (int(center[0]+S) int(center[1]-C)) (int(center[0]+0.8<times>S) int(center[1]-0.8<times>C)) (255 255 255) 1 cv2.LINE_AA)<block_end>phi=(mph-6)<times>0.4<line_sep>C,S=radius<times>np.cos(phi) radius<times>np.sin(phi)<line_sep>cv2.line(bgr (int(center[0]+S) int(center[1]-C)) (int(center[0]) int(center[1])) (180 255 180) 2 cv2.LINE_AA)<line_sep>cv2.putText(bgr "%0.1f mph"%(mph) (center[0]-10 center[1]+40) cv2.FONT_HERSHEY_PLAIN 1 (255 255 255) 1 cv2.LINE_AA)<line_sep>last_ts=tstamp<line_sep>last_wheels=wheels<block_end><def_stmt>draw_throttle img throttle center=(320 470)<block_start>cv2.line(img center (center[0]+throttle center[1]) throttle<g>0<and>(0 255 0)<or>(0 95 255) 5)<block_end><def_stmt>draw_accelerometer bgr accel gyro center=(470 470)<block_start>cv2.circle(bgr center 30 (255 255 255) 1 cv2.LINE_AA)<line_sep>cv2.ellipse(bgr center (30 30) 0 -90 -90-180<times>gyro[2]/np.pi (100 255 180) 3 cv2.LINE_AA)<line_sep>cv2.line(bgr center (int(center[0]-accel[1]<times>30) int(center[1]+accel[0]<times>30)) (100 255 100) 2 cv2.LINE_AA)<block_end>
|
#
# This file is part of LiteX.
#
# Copyright (c) 2019 <NAME> <<EMAIL>>
# SPDX-License-Identifier: BSD-2-Clause
<import_from_stmt>migen *<import_from_stmt>migen.fhdl.specials Tristate<import_from_stmt>litex.soc.interconnect.csr *<line_sep># I2C Master Bit-Banging ---------------------------------------------------------------------------
<class_stmt>I2CMaster(Module AutoCSR)<block_start>"""I2C Master Bit-Banging
Provides the minimal hardware to do software I2C Master bit banging.
On the same write CSRStorage (_w), software can control:
- SCL (I2C_SCL).
- SDA direction and value (I2C_OE, I2C_W).
Software get back SDA value with the read CSRStatus (_r).
"""<line_sep>pads_layout=[("scl" 1) ("sda" 1)]<def_stmt>__init__ self pads=<none><block_start><if_stmt>pads<is><none><block_start>pads=Record(self.pads_layout)<block_end>self.pads=pads<line_sep>self._w=CSRStorage(fields=[CSRField("scl" size=1 offset=0) CSRField("oe" size=1 offset=1) CSRField("sda" size=1 offset=2)] name="w")<line_sep>self._r=CSRStatus(fields=[CSRField("sda" size=1 offset=0)] name="r")<line_sep>self.connect(pads)<block_end><def_stmt>connect self pads# SCL
<block_start>self.specials<augadd>Tristate(pads.scl o=0 # I2C uses Pull-ups, only drive low.
oe=~self._w.fields.scl# Drive when scl is low.
)<line_sep># SDA
self.specials<augadd>Tristate(pads.sda o=0 # I2C uses Pull-ups, only drive low.
oe=self._w.fields.oe&~self._w.fields.sda # Drive when oe and sda is low.
i=self._r.fields.sda)<block_end><block_end><class_stmt>I2CMasterSim(I2CMaster)<block_start>"""I2C Master Bit-Banging for Verilator simulation
Uses separate pads for SDA IN/OUT as Verilator does not support tristate pins well.
"""<line_sep>pads_layout=[("scl" 1) ("sda_in" 1) ("sda_out" 1)]<def_stmt>connect self pads<block_start>_sda_w=Signal()<line_sep>_sda_oe=Signal()<line_sep>_sda_r=Signal()<line_sep>_sda_in=Signal()<line_sep>self.comb<augadd>[pads.scl.eq(self._w.fields.scl) _sda_oe.eq(self._w.fields.oe) _sda_w.eq(self._w.fields.sda) If(_sda_oe pads.sda_out.eq(_sda_w) self._r.fields.sda.eq(_sda_w) ).Else(pads.sda_out.eq(1) self._r.fields.sda.eq(pads.sda_in) )]<block_end><block_end># SPI Master Bit-Banging ---------------------------------------------------------------------------
<class_stmt>SPIMaster(Module AutoCSR)<block_start>"""3/4-wire SPI Master Bit-Banging
Provides the minimal hardware to do software 3/4-wire SPI Master bit banging.
On the same write CSRStorage (_w), software can control CLK (SPI_CLK), MOSI (SPI_MOSI), MOSI
direction (SPI_OE) in the case 3-wire SPI and up to 4 Chip Selects (SPI_CS). Software get back
MISO (SPI_MISO) with the read CSRStatus (_r).
"""<line_sep>pads_layout=[("clk" 1) ("cs_n" 4) ("mosi" 1) ("miso" 1)]<def_stmt>__init__ self pads=<none><block_start><if_stmt>pads<is><none><block_start>pads=Record(self.pads_layout)<block_end>self.pads=pads<assert_stmt>len(pads.cs_n)<le>4<line_sep>self._w=CSRStorage(fields=[CSRField("clk" size=1 offset=0) CSRField("mosi" size=1 offset=1) CSRField("oe" size=1 offset=2) CSRField("cs" size=1 offset=4)] name="w")<line_sep>self._r=CSRStatus(fields=[CSRField("miso" size=1 offset=0) CSRField("mosi" size=1 offset=1)] name="r")<line_sep># # #
_mosi_w=Signal()<line_sep>_mosi_oe=Signal()<line_sep>_mosi_r=Signal()<line_sep>_cs=Signal(4)<line_sep>self.comb<augadd>[pads.clk.eq(self._w.fields.clk) _mosi_w.eq(self._w.fields.mosi) _mosi_oe.eq(self._w.fields.oe) pads.cs_n.eq(~self._w.fields.cs) self._r.fields.mosi.eq(_mosi_r) ]<if_stmt>hasattr(pads "miso")<block_start>self.comb<augadd>self._r.fields.miso.eq(pads.miso)<block_end>self.specials<augadd>Tristate(pads.mosi _mosi_w _mosi_oe _mosi_r)<block_end><block_end>
|
# -*- coding: utf-8 -*-
"""
This package implements various parameterisations of properties from the
litterature with relevance in chemistry.
"""<line_sep>
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
<import_from_stmt>typing Dict List Optional Union<import_from_stmt>azure.core.exceptions HttpResponseError<import_stmt>msrest.serialization<import_from_stmt>._machine_learning_compute_management_client_enums *<class_stmt>AcsClusterProperties(msrest.serialization.Model)<block_start>"""Information about the container service backing the cluster.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar cluster_fqdn: The FQDN of the cluster.
:vartype cluster_fqdn: str
:param orchestrator_type: Required. Type of orchestrator. It cannot be changed once the cluster
is created. Possible values include: "Kubernetes", "None".
:type orchestrator_type: str or ~azure.mgmt.machinelearningcompute.models.OrchestratorType
:param orchestrator_properties: Orchestrator specific properties.
:type orchestrator_properties:
~azure.mgmt.machinelearningcompute.models.KubernetesClusterProperties
:param system_services: The system services deployed to the cluster.
:type system_services: list[~azure.mgmt.machinelearningcompute.models.SystemService]
:param master_count: The number of master nodes in the container service.
:type master_count: int
:param agent_count: The number of agent nodes in the Container Service. This can be changed to
scale the cluster.
:type agent_count: int
:param agent_vm_size: The Azure VM size of the agent VM nodes. This cannot be changed once the
cluster is created. This list is non exhaustive; refer to
https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes for the possible VM
sizes. Possible values include: "Standard_A0", "Standard_A1", "Standard_A2", "Standard_A3",
"Standard_A4", "Standard_A5", "Standard_A6", "Standard_A7", "Standard_A8", "Standard_A9",
"Standard_A10", "Standard_A11", "Standard_D1", "Standard_D2", "Standard_D3", "Standard_D4",
"Standard_D11", "Standard_D12", "Standard_D13", "Standard_D14", "Standard_D1_v2",
"Standard_D2_v2", "Standard_D3_v2", "Standard_D4_v2", "Standard_D5_v2", "Standard_D11_v2",
"Standard_D12_v2", "Standard_D13_v2", "Standard_D14_v2", "Standard_G1", "Standard_G2",
"Standard_G3", "Standard_G4", "Standard_G5", "Standard_DS1", "Standard_DS2", "Standard_DS3",
"Standard_DS4", "Standard_DS11", "Standard_DS12", "Standard_DS13", "Standard_DS14",
"Standard_GS1", "Standard_GS2", "Standard_GS3", "Standard_GS4", "Standard_GS5". Default value:
"Standard_D3_v2".
:type agent_vm_size: str or ~azure.mgmt.machinelearningcompute.models.AgentVMSizeTypes
"""<line_sep>_validation={'cluster_fqdn':{'readonly':<true>} 'orchestrator_type':{'required':<true>} 'master_count':{'maximum':5 'minimum':1} 'agent_count':{'maximum':100 'minimum':1} }<line_sep>_attribute_map={'cluster_fqdn':{'key':'clusterFqdn' 'type':'str'} 'orchestrator_type':{'key':'orchestratorType' 'type':'str'} 'orchestrator_properties':{'key':'orchestratorProperties' 'type':'KubernetesClusterProperties'} 'system_services':{'key':'systemServices' 'type':'[SystemService]'} 'master_count':{'key':'masterCount' 'type':'int'} 'agent_count':{'key':'agentCount' 'type':'int'} 'agent_vm_size':{'key':'agentVmSize' 'type':'str'} }<def_stmt>__init__ self * orchestrator_type:Union[str "OrchestratorType"] orchestrator_properties:Optional["KubernetesClusterProperties"]=<none> system_services:Optional[List["SystemService"]]=<none> master_count:Optional[int]=1 agent_count:Optional[int]=2 agent_vm_size:Optional[Union[str "AgentVMSizeTypes"]]="Standard_D3_v2" **kwargs<block_start>super(AcsClusterProperties self).__init__(**kwargs)<line_sep>self.cluster_fqdn=<none><line_sep>self.orchestrator_type=orchestrator_type<line_sep>self.orchestrator_properties=orchestrator_properties<line_sep>self.system_services=system_services<line_sep>self.master_count=master_count<line_sep>self.agent_count=agent_count<line_sep>self.agent_vm_size=agent_vm_size<block_end><block_end><class_stmt>AppInsightsCredentials(msrest.serialization.Model)<block_start>"""AppInsights credentials.
:param app_id: The AppInsights application ID.
:type app_id: str
:param instrumentation_key: The AppInsights instrumentation key. This is not returned in
response of GET/PUT on the resource. To see this please call listKeys API.
:type instrumentation_key: str
"""<line_sep>_attribute_map={'app_id':{'key':'appId' 'type':'str'} 'instrumentation_key':{'key':'instrumentationKey' 'type':'str'} }<def_stmt>__init__ self * app_id:Optional[str]=<none> instrumentation_key:Optional[str]=<none> **kwargs<block_start>super(AppInsightsCredentials self).__init__(**kwargs)<line_sep>self.app_id=app_id<line_sep>self.instrumentation_key=instrumentation_key<block_end><block_end><class_stmt>AppInsightsProperties(msrest.serialization.Model)<block_start>"""Properties of App Insights.
:param resource_id: ARM resource ID of the App Insights.
:type resource_id: str
"""<line_sep>_attribute_map={'resource_id':{'key':'resourceId' 'type':'str'} }<def_stmt>__init__ self * resource_id:Optional[str]=<none> **kwargs<block_start>super(AppInsightsProperties self).__init__(**kwargs)<line_sep>self.resource_id=resource_id<block_end><block_end><class_stmt>AutoScaleConfiguration(msrest.serialization.Model)<block_start>"""AutoScale configuration properties.
:param status: If auto-scale is enabled for all services. Each service can turn it off
individually. Possible values include: "Enabled", "Disabled".
:type status: str or ~azure.mgmt.machinelearningcompute.models.Status
:param min_replicas: The minimum number of replicas for each service.
:type min_replicas: int
:param max_replicas: The maximum number of replicas for each service.
:type max_replicas: int
:param target_utilization: The target utilization.
:type target_utilization: float
:param refresh_period_in_seconds: Refresh period in seconds.
:type refresh_period_in_seconds: int
"""<line_sep>_validation={'min_replicas':{'minimum':1} 'max_replicas':{'minimum':1} }<line_sep>_attribute_map={'status':{'key':'status' 'type':'str'} 'min_replicas':{'key':'minReplicas' 'type':'int'} 'max_replicas':{'key':'maxReplicas' 'type':'int'} 'target_utilization':{'key':'targetUtilization' 'type':'float'} 'refresh_period_in_seconds':{'key':'refreshPeriodInSeconds' 'type':'int'} }<def_stmt>__init__ self * status:Optional[Union[str "Status"]]=<none> min_replicas:Optional[int]=1 max_replicas:Optional[int]=100 target_utilization:Optional[float]=<none> refresh_period_in_seconds:Optional[int]=<none> **kwargs<block_start>super(AutoScaleConfiguration self).__init__(**kwargs)<line_sep>self.status=status<line_sep>self.min_replicas=min_replicas<line_sep>self.max_replicas=max_replicas<line_sep>self.target_utilization=target_utilization<line_sep>self.refresh_period_in_seconds=refresh_period_in_seconds<block_end><block_end><class_stmt>AvailableOperations(msrest.serialization.Model)<block_start>"""Available operation list.
:param value: An array of available operations.
:type value: list[~azure.mgmt.machinelearningcompute.models.ResourceOperation]
"""<line_sep>_attribute_map={'value':{'key':'value' 'type':'[ResourceOperation]'} }<def_stmt>__init__ self * value:Optional[List["ResourceOperation"]]=<none> **kwargs<block_start>super(AvailableOperations self).__init__(**kwargs)<line_sep>self.value=value<block_end><block_end><class_stmt>CheckSystemServicesUpdatesAvailableResponse(msrest.serialization.Model)<block_start>"""Information about updates available for system services in a cluster.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar updates_available: Yes if updates are available for the system services, No if not.
Possible values include: "Yes", "No".
:vartype updates_available: str or ~azure.mgmt.machinelearningcompute.models.UpdatesAvailable
"""<line_sep>_validation={'updates_available':{'readonly':<true>} }<line_sep>_attribute_map={'updates_available':{'key':'updatesAvailable' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(CheckSystemServicesUpdatesAvailableResponse self).__init__(**kwargs)<line_sep>self.updates_available=<none><block_end><block_end><class_stmt>ContainerRegistryCredentials(msrest.serialization.Model)<block_start>"""Information about the Azure Container Registry which contains the images deployed to the cluster.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar login_server: The ACR login server name. User name is the first part of the FQDN.
:vartype login_server: str
:ivar password: The ACR primary password.
:vartype password: str
:ivar password2: The ACR secondary password.
:vartype password2: str
:ivar username: The ACR login username.
:vartype username: str
"""<line_sep>_validation={'login_server':{'readonly':<true>} 'password':{'readonly':<true>} 'password2':{'readonly':<true>} 'username':{'readonly':<true>} }<line_sep>_attribute_map={'login_server':{'key':'loginServer' 'type':'str'} 'password':{'key':'password' 'type':'str'} 'password2':{'key':'password2' 'type':'str'} 'username':{'key':'username' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(ContainerRegistryCredentials self).__init__(**kwargs)<line_sep>self.login_server=<none><line_sep>self.password=<none><line_sep>self.password2=<none><line_sep>self.username=<none><block_end><block_end><class_stmt>ContainerRegistryProperties(msrest.serialization.Model)<block_start>"""Properties of Azure Container Registry.
:param resource_id: ARM resource ID of the Azure Container Registry used to store Docker images
for web services in the cluster. If not provided one will be created. This cannot be changed
once the cluster is created.
:type resource_id: str
"""<line_sep>_attribute_map={'resource_id':{'key':'resourceId' 'type':'str'} }<def_stmt>__init__ self * resource_id:Optional[str]=<none> **kwargs<block_start>super(ContainerRegistryProperties self).__init__(**kwargs)<line_sep>self.resource_id=resource_id<block_end><block_end><class_stmt>ContainerServiceCredentials(msrest.serialization.Model)<block_start>"""Information about the Azure Container Registry which contains the images deployed to the cluster.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar acs_kube_config: The ACS kube config file.
:vartype acs_kube_config: str
:ivar service_principal_configuration: Service principal configuration used by Kubernetes.
:vartype service_principal_configuration:
~azure.mgmt.machinelearningcompute.models.ServicePrincipalProperties
:ivar image_pull_secret_name: The ACR image pull secret name which was created in Kubernetes.
:vartype image_pull_secret_name: str
"""<line_sep>_validation={'acs_kube_config':{'readonly':<true>} 'service_principal_configuration':{'readonly':<true>} 'image_pull_secret_name':{'readonly':<true>} }<line_sep>_attribute_map={'acs_kube_config':{'key':'acsKubeConfig' 'type':'str'} 'service_principal_configuration':{'key':'servicePrincipalConfiguration' 'type':'ServicePrincipalProperties'} 'image_pull_secret_name':{'key':'imagePullSecretName' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(ContainerServiceCredentials self).__init__(**kwargs)<line_sep>self.acs_kube_config=<none><line_sep>self.service_principal_configuration=<none><line_sep>self.image_pull_secret_name=<none><block_end><block_end><class_stmt>ErrorDetail(msrest.serialization.Model)<block_start>"""Error detail information.
All required parameters must be populated in order to send to Azure.
:param code: Required. Error code.
:type code: str
:param message: Required. Error message.
:type message: str
"""<line_sep>_validation={'code':{'required':<true>} 'message':{'required':<true>} }<line_sep>_attribute_map={'code':{'key':'code' 'type':'str'} 'message':{'key':'message' 'type':'str'} }<def_stmt>__init__ self * code:str message:str **kwargs<block_start>super(ErrorDetail self).__init__(**kwargs)<line_sep>self.code=code<line_sep>self.message=message<block_end><block_end><class_stmt>ErrorResponse(msrest.serialization.Model)<block_start>"""Error response information.
All required parameters must be populated in order to send to Azure.
:param code: Required. Error code.
:type code: str
:param message: Required. Error message.
:type message: str
:param details: An array of error detail objects.
:type details: list[~azure.mgmt.machinelearningcompute.models.ErrorDetail]
"""<line_sep>_validation={'code':{'required':<true>} 'message':{'required':<true>} }<line_sep>_attribute_map={'code':{'key':'code' 'type':'str'} 'message':{'key':'message' 'type':'str'} 'details':{'key':'details' 'type':'[ErrorDetail]'} }<def_stmt>__init__ self * code:str message:str details:Optional[List["ErrorDetail"]]=<none> **kwargs<block_start>super(ErrorResponse self).__init__(**kwargs)<line_sep>self.code=code<line_sep>self.message=message<line_sep>self.details=details<block_end><block_end><class_stmt>ErrorResponseWrapper(msrest.serialization.Model)<block_start>"""Wrapper for error response to follow ARM guidelines.
:param error: The error response.
:type error: ~azure.mgmt.machinelearningcompute.models.ErrorResponse
"""<line_sep>_attribute_map={'error':{'key':'error' 'type':'ErrorResponse'} }<def_stmt>__init__ self * error:Optional["ErrorResponse"]=<none> **kwargs<block_start>super(ErrorResponseWrapper self).__init__(**kwargs)<line_sep>self.error=error<block_end><block_end><class_stmt>GlobalServiceConfiguration(msrest.serialization.Model)<block_start>"""Global configuration for services in the cluster.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, str]
:param etag: The configuration ETag for updates.
:type etag: str
:param ssl: The SSL configuration properties.
:type ssl: ~azure.mgmt.machinelearningcompute.models.SslConfiguration
:param service_auth: Optional global authorization keys for all user services deployed in
cluster. These are used if the service does not have auth keys.
:type service_auth: ~azure.mgmt.machinelearningcompute.models.ServiceAuthConfiguration
:param auto_scale: The auto-scale configuration.
:type auto_scale: ~azure.mgmt.machinelearningcompute.models.AutoScaleConfiguration
"""<line_sep>_attribute_map={'additional_properties':{'key':'' 'type':'{str}'} 'etag':{'key':'etag' 'type':'str'} 'ssl':{'key':'ssl' 'type':'SslConfiguration'} 'service_auth':{'key':'serviceAuth' 'type':'ServiceAuthConfiguration'} 'auto_scale':{'key':'autoScale' 'type':'AutoScaleConfiguration'} }<def_stmt>__init__ self * additional_properties:Optional[Dict[str str]]=<none> etag:Optional[str]=<none> ssl:Optional["SslConfiguration"]=<none> service_auth:Optional["ServiceAuthConfiguration"]=<none> auto_scale:Optional["AutoScaleConfiguration"]=<none> **kwargs<block_start>super(GlobalServiceConfiguration self).__init__(**kwargs)<line_sep>self.additional_properties=additional_properties<line_sep>self.etag=etag<line_sep>self.ssl=ssl<line_sep>self.service_auth=service_auth<line_sep>self.auto_scale=auto_scale<block_end><block_end><class_stmt>KubernetesClusterProperties(msrest.serialization.Model)<block_start>"""Kubernetes cluster specific properties.
:param service_principal: The Azure Service Principal used by Kubernetes.
:type service_principal: ~azure.mgmt.machinelearningcompute.models.ServicePrincipalProperties
"""<line_sep>_attribute_map={'service_principal':{'key':'servicePrincipal' 'type':'ServicePrincipalProperties'} }<def_stmt>__init__ self * service_principal:Optional["ServicePrincipalProperties"]=<none> **kwargs<block_start>super(KubernetesClusterProperties self).__init__(**kwargs)<line_sep>self.service_principal=service_principal<block_end><block_end><class_stmt>Resource(msrest.serialization.Model)<block_start>"""Azure resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Specifies the resource ID.
:vartype id: str
:ivar name: Specifies the name of the resource.
:vartype name: str
:param location: Required. Specifies the location of the resource.
:type location: str
:ivar type: Specifies the type of the resource.
:vartype type: str
:param tags: A set of tags. Contains resource tags defined as key/value pairs.
:type tags: dict[str, str]
"""<line_sep>_validation={'id':{'readonly':<true>} 'name':{'readonly':<true>} 'location':{'required':<true>} 'type':{'readonly':<true>} }<line_sep>_attribute_map={'id':{'key':'id' 'type':'str'} 'name':{'key':'name' 'type':'str'} 'location':{'key':'location' 'type':'str'} 'type':{'key':'type' 'type':'str'} 'tags':{'key':'tags' 'type':'{str}'} }<def_stmt>__init__ self * location:str tags:Optional[Dict[str str]]=<none> **kwargs<block_start>super(Resource self).__init__(**kwargs)<line_sep>self.id=<none><line_sep>self.name=<none><line_sep>self.location=location<line_sep>self.type=<none><line_sep>self.tags=tags<block_end><block_end><class_stmt>OperationalizationCluster(Resource)<block_start>"""Instance of an Azure ML Operationalization Cluster resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Specifies the resource ID.
:vartype id: str
:ivar name: Specifies the name of the resource.
:vartype name: str
:param location: Required. Specifies the location of the resource.
:type location: str
:ivar type: Specifies the type of the resource.
:vartype type: str
:param tags: A set of tags. Contains resource tags defined as key/value pairs.
:type tags: dict[str, str]
:param description: The description of the cluster.
:type description: str
:ivar created_on: The date and time when the cluster was created.
:vartype created_on: ~datetime.datetime
:ivar modified_on: The date and time when the cluster was last modified.
:vartype modified_on: ~datetime.datetime
:ivar provisioning_state: The provision state of the cluster. Valid values are Unknown,
Updating, Provisioning, Succeeded, and Failed. Possible values include: "Unknown", "Updating",
"Creating", "Deleting", "Succeeded", "Failed", "Canceled".
:vartype provisioning_state: str or ~azure.mgmt.machinelearningcompute.models.OperationStatus
:ivar provisioning_errors: List of provisioning errors reported by the resource provider.
:vartype provisioning_errors:
list[~azure.mgmt.machinelearningcompute.models.ErrorResponseWrapper]
:param cluster_type: The cluster type. Possible values include: "ACS", "Local".
:type cluster_type: str or ~azure.mgmt.machinelearningcompute.models.ClusterType
:param storage_account: Storage Account properties.
:type storage_account: ~azure.mgmt.machinelearningcompute.models.StorageAccountProperties
:param container_registry: Container Registry properties.
:type container_registry: ~azure.mgmt.machinelearningcompute.models.ContainerRegistryProperties
:param container_service: Parameters for the Azure Container Service cluster.
:type container_service: ~azure.mgmt.machinelearningcompute.models.AcsClusterProperties
:param app_insights: AppInsights configuration.
:type app_insights: ~azure.mgmt.machinelearningcompute.models.AppInsightsProperties
:param global_service_configuration: Contains global configuration for the web services in the
cluster.
:type global_service_configuration:
~azure.mgmt.machinelearningcompute.models.GlobalServiceConfiguration
"""<line_sep>_validation={'id':{'readonly':<true>} 'name':{'readonly':<true>} 'location':{'required':<true>} 'type':{'readonly':<true>} 'created_on':{'readonly':<true>} 'modified_on':{'readonly':<true>} 'provisioning_state':{'readonly':<true>} 'provisioning_errors':{'readonly':<true>} }<line_sep>_attribute_map={'id':{'key':'id' 'type':'str'} 'name':{'key':'name' 'type':'str'} 'location':{'key':'location' 'type':'str'} 'type':{'key':'type' 'type':'str'} 'tags':{'key':'tags' 'type':'{str}'} 'description':{'key':'properties.description' 'type':'str'} 'created_on':{'key':'properties.createdOn' 'type':'iso-8601'} 'modified_on':{'key':'properties.modifiedOn' 'type':'iso-8601'} 'provisioning_state':{'key':'properties.provisioningState' 'type':'str'} 'provisioning_errors':{'key':'properties.provisioningErrors' 'type':'[ErrorResponseWrapper]'} 'cluster_type':{'key':'properties.clusterType' 'type':'str'} 'storage_account':{'key':'properties.storageAccount' 'type':'StorageAccountProperties'} 'container_registry':{'key':'properties.containerRegistry' 'type':'ContainerRegistryProperties'} 'container_service':{'key':'properties.containerService' 'type':'AcsClusterProperties'} 'app_insights':{'key':'properties.appInsights' 'type':'AppInsightsProperties'} 'global_service_configuration':{'key':'properties.globalServiceConfiguration' 'type':'GlobalServiceConfiguration'} }<def_stmt>__init__ self * location:str tags:Optional[Dict[str str]]=<none> description:Optional[str]=<none> cluster_type:Optional[Union[str "ClusterType"]]=<none> storage_account:Optional["StorageAccountProperties"]=<none> container_registry:Optional["ContainerRegistryProperties"]=<none> container_service:Optional["AcsClusterProperties"]=<none> app_insights:Optional["AppInsightsProperties"]=<none> global_service_configuration:Optional["GlobalServiceConfiguration"]=<none> **kwargs<block_start>super(OperationalizationCluster self).__init__(location=location tags=tags **kwargs)<line_sep>self.description=description<line_sep>self.created_on=<none><line_sep>self.modified_on=<none><line_sep>self.provisioning_state=<none><line_sep>self.provisioning_errors=<none><line_sep>self.cluster_type=cluster_type<line_sep>self.storage_account=storage_account<line_sep>self.container_registry=container_registry<line_sep>self.container_service=container_service<line_sep>self.app_insights=app_insights<line_sep>self.global_service_configuration=global_service_configuration<block_end><block_end><class_stmt>OperationalizationClusterCredentials(msrest.serialization.Model)<block_start>"""Credentials to resources in the cluster.
:param storage_account: Credentials for the Storage Account.
:type storage_account: ~azure.mgmt.machinelearningcompute.models.StorageAccountCredentials
:param container_registry: Credentials for Azure Container Registry.
:type container_registry:
~azure.mgmt.machinelearningcompute.models.ContainerRegistryCredentials
:param container_service: Credentials for Azure Container Service.
:type container_service: ~azure.mgmt.machinelearningcompute.models.ContainerServiceCredentials
:param app_insights: Credentials for Azure AppInsights.
:type app_insights: ~azure.mgmt.machinelearningcompute.models.AppInsightsCredentials
:param service_auth_configuration: Global authorization keys for all user services deployed in
cluster. These are used if the service does not have auth keys.
:type service_auth_configuration:
~azure.mgmt.machinelearningcompute.models.ServiceAuthConfiguration
:param ssl_configuration: The SSL configuration for the services.
:type ssl_configuration: ~azure.mgmt.machinelearningcompute.models.SslConfiguration
"""<line_sep>_attribute_map={'storage_account':{'key':'storageAccount' 'type':'StorageAccountCredentials'} 'container_registry':{'key':'containerRegistry' 'type':'ContainerRegistryCredentials'} 'container_service':{'key':'containerService' 'type':'ContainerServiceCredentials'} 'app_insights':{'key':'appInsights' 'type':'AppInsightsCredentials'} 'service_auth_configuration':{'key':'serviceAuthConfiguration' 'type':'ServiceAuthConfiguration'} 'ssl_configuration':{'key':'sslConfiguration' 'type':'SslConfiguration'} }<def_stmt>__init__ self * storage_account:Optional["StorageAccountCredentials"]=<none> container_registry:Optional["ContainerRegistryCredentials"]=<none> container_service:Optional["ContainerServiceCredentials"]=<none> app_insights:Optional["AppInsightsCredentials"]=<none> service_auth_configuration:Optional["ServiceAuthConfiguration"]=<none> ssl_configuration:Optional["SslConfiguration"]=<none> **kwargs<block_start>super(OperationalizationClusterCredentials self).__init__(**kwargs)<line_sep>self.storage_account=storage_account<line_sep>self.container_registry=container_registry<line_sep>self.container_service=container_service<line_sep>self.app_insights=app_insights<line_sep>self.service_auth_configuration=service_auth_configuration<line_sep>self.ssl_configuration=ssl_configuration<block_end><block_end><class_stmt>OperationalizationClusterUpdateParameters(msrest.serialization.Model)<block_start>"""Parameters for PATCH operation on an operationalization cluster.
:param tags: A set of tags. Gets or sets a list of key value pairs that describe the resource.
These tags can be used in viewing and grouping this resource (across resource groups). A
maximum of 15 tags can be provided for a resource. Each tag must have a key no greater in
length than 128 characters and a value no greater in length than 256 characters.
:type tags: dict[str, str]
"""<line_sep>_attribute_map={'tags':{'key':'tags' 'type':'{str}'} }<def_stmt>__init__ self * tags:Optional[Dict[str str]]=<none> **kwargs<block_start>super(OperationalizationClusterUpdateParameters self).__init__(**kwargs)<line_sep>self.tags=tags<block_end><block_end><class_stmt>PaginatedOperationalizationClustersList(msrest.serialization.Model)<block_start>"""Paginated list of operationalization clusters.
:param value: An array of cluster objects.
:type value: list[~azure.mgmt.machinelearningcompute.models.OperationalizationCluster]
:param next_link: A continuation link (absolute URI) to the next page of results in the list.
:type next_link: str
"""<line_sep>_attribute_map={'value':{'key':'value' 'type':'[OperationalizationCluster]'} 'next_link':{'key':'nextLink' 'type':'str'} }<def_stmt>__init__ self * value:Optional[List["OperationalizationCluster"]]=<none> next_link:Optional[str]=<none> **kwargs<block_start>super(PaginatedOperationalizationClustersList self).__init__(**kwargs)<line_sep>self.value=value<line_sep>self.next_link=next_link<block_end><block_end><class_stmt>ResourceOperation(msrest.serialization.Model)<block_start>"""Resource operation.
:param name: Name of this operation.
:type name: str
:param display: Display of the operation.
:type display: ~azure.mgmt.machinelearningcompute.models.ResourceOperationDisplay
:param origin: The operation origin.
:type origin: str
"""<line_sep>_attribute_map={'name':{'key':'name' 'type':'str'} 'display':{'key':'display' 'type':'ResourceOperationDisplay'} 'origin':{'key':'origin' 'type':'str'} }<def_stmt>__init__ self * name:Optional[str]=<none> display:Optional["ResourceOperationDisplay"]=<none> origin:Optional[str]=<none> **kwargs<block_start>super(ResourceOperation self).__init__(**kwargs)<line_sep>self.name=name<line_sep>self.display=display<line_sep>self.origin=origin<block_end><block_end><class_stmt>ResourceOperationDisplay(msrest.serialization.Model)<block_start>"""Display of the operation.
:param provider: The resource provider name.
:type provider: str
:param resource: The resource name.
:type resource: str
:param operation: The operation.
:type operation: str
:param description: The description of the operation.
:type description: str
"""<line_sep>_attribute_map={'provider':{'key':'provider' 'type':'str'} 'resource':{'key':'resource' 'type':'str'} 'operation':{'key':'operation' 'type':'str'} 'description':{'key':'description' 'type':'str'} }<def_stmt>__init__ self * provider:Optional[str]=<none> resource:Optional[str]=<none> operation:Optional[str]=<none> description:Optional[str]=<none> **kwargs<block_start>super(ResourceOperationDisplay self).__init__(**kwargs)<line_sep>self.provider=provider<line_sep>self.resource=resource<line_sep>self.operation=operation<line_sep>self.description=description<block_end><block_end><class_stmt>ServiceAuthConfiguration(msrest.serialization.Model)<block_start>"""Global service auth configuration properties. These are the data-plane authorization keys and are used if a service doesn't define it's own.
All required parameters must be populated in order to send to Azure.
:param primary_auth_key_hash: Required. The primary auth key hash. This is not returned in
response of GET/PUT on the resource.. To see this please call listKeys API.
:type primary_auth_key_hash: str
:param secondary_auth_key_hash: Required. The secondary auth key hash. This is not returned in
response of GET/PUT on the resource.. To see this please call listKeys API.
:type secondary_auth_key_hash: str
"""<line_sep>_validation={'primary_auth_key_hash':{'required':<true>} 'secondary_auth_key_hash':{'required':<true>} }<line_sep>_attribute_map={'primary_auth_key_hash':{'key':'primaryAuthKeyHash' 'type':'str'} 'secondary_auth_key_hash':{'key':'secondaryAuthKeyHash' 'type':'str'} }<def_stmt>__init__ self * primary_auth_key_hash:str secondary_auth_key_hash:str **kwargs<block_start>super(ServiceAuthConfiguration self).__init__(**kwargs)<line_sep>self.primary_auth_key_hash=primary_auth_key_hash<line_sep>self.secondary_auth_key_hash=secondary_auth_key_hash<block_end><block_end><class_stmt>ServicePrincipalProperties(msrest.serialization.Model)<block_start>"""The Azure service principal used by Kubernetes for configuring load balancers.
All required parameters must be populated in order to send to Azure.
:param client_id: Required. The service principal client ID.
:type client_id: str
:param secret: Required. The service principal secret. This is not returned in response of
GET/PUT on the resource. To see this please call listKeys.
:type secret: str
"""<line_sep>_validation={'client_id':{'required':<true>} 'secret':{'required':<true>} }<line_sep>_attribute_map={'client_id':{'key':'clientId' 'type':'str'} 'secret':{'key':'secret' 'type':'str'} }<def_stmt>__init__ self * client_id:str secret:str **kwargs<block_start>super(ServicePrincipalProperties self).__init__(**kwargs)<line_sep>self.client_id=client_id<line_sep>self.secret=secret<block_end><block_end><class_stmt>SslConfiguration(msrest.serialization.Model)<block_start>"""SSL configuration. If configured data-plane calls to user services will be exposed over SSL only.
:param status: SSL status. Allowed values are Enabled and Disabled. Possible values include:
"Enabled", "Disabled".
:type status: str or ~azure.mgmt.machinelearningcompute.models.Status
:param cert: The SSL cert data in PEM format.
:type cert: str
:param key: The SSL key data in PEM format. This is not returned in response of GET/PUT on the
resource. To see this please call listKeys API.
:type key: str
:param cname: The CName of the certificate.
:type cname: str
"""<line_sep>_attribute_map={'status':{'key':'status' 'type':'str'} 'cert':{'key':'cert' 'type':'str'} 'key':{'key':'key' 'type':'str'} 'cname':{'key':'cname' 'type':'str'} }<def_stmt>__init__ self * status:Optional[Union[str "Status"]]=<none> cert:Optional[str]=<none> key:Optional[str]=<none> cname:Optional[str]=<none> **kwargs<block_start>super(SslConfiguration self).__init__(**kwargs)<line_sep>self.status=status<line_sep>self.cert=cert<line_sep>self.key=key<line_sep>self.cname=cname<block_end><block_end><class_stmt>StorageAccountCredentials(msrest.serialization.Model)<block_start>"""Access information for the storage account.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar resource_id: The ARM resource ID of the storage account.
:vartype resource_id: str
:ivar primary_key: The primary key of the storage account.
:vartype primary_key: str
:ivar secondary_key: The secondary key of the storage account.
:vartype secondary_key: str
"""<line_sep>_validation={'resource_id':{'readonly':<true>} 'primary_key':{'readonly':<true>} 'secondary_key':{'readonly':<true>} }<line_sep>_attribute_map={'resource_id':{'key':'resourceId' 'type':'str'} 'primary_key':{'key':'primaryKey' 'type':'str'} 'secondary_key':{'key':'secondaryKey' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(StorageAccountCredentials self).__init__(**kwargs)<line_sep>self.resource_id=<none><line_sep>self.primary_key=<none><line_sep>self.secondary_key=<none><block_end><block_end><class_stmt>StorageAccountProperties(msrest.serialization.Model)<block_start>"""Properties of Storage Account.
:param resource_id: ARM resource ID of the Azure Storage Account to store CLI specific files.
If not provided one will be created. This cannot be changed once the cluster is created.
:type resource_id: str
"""<line_sep>_attribute_map={'resource_id':{'key':'resourceId' 'type':'str'} }<def_stmt>__init__ self * resource_id:Optional[str]=<none> **kwargs<block_start>super(StorageAccountProperties self).__init__(**kwargs)<line_sep>self.resource_id=resource_id<block_end><block_end><class_stmt>SystemService(msrest.serialization.Model)<block_start>"""Information about a system service deployed in the cluster.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param system_service_type: Required. The system service type. Possible values include: "None",
"ScoringFrontEnd", "BatchFrontEnd".
:type system_service_type: str or ~azure.mgmt.machinelearningcompute.models.SystemServiceType
:ivar public_ip_address: The public IP address of the system service.
:vartype public_ip_address: str
:ivar version: The state of the system service.
:vartype version: str
"""<line_sep>_validation={'system_service_type':{'required':<true>} 'public_ip_address':{'readonly':<true>} 'version':{'readonly':<true>} }<line_sep>_attribute_map={'system_service_type':{'key':'systemServiceType' 'type':'str'} 'public_ip_address':{'key':'publicIpAddress' 'type':'str'} 'version':{'key':'version' 'type':'str'} }<def_stmt>__init__ self * system_service_type:Union[str "SystemServiceType"] **kwargs<block_start>super(SystemService self).__init__(**kwargs)<line_sep>self.system_service_type=system_service_type<line_sep>self.public_ip_address=<none><line_sep>self.version=<none><block_end><block_end><class_stmt>UpdateSystemServicesResponse(msrest.serialization.Model)<block_start>"""Response of the update system services API.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar update_status: Update status. Possible values include: "Unknown", "Updating", "Creating",
"Deleting", "Succeeded", "Failed", "Canceled".
:vartype update_status: str or ~azure.mgmt.machinelearningcompute.models.OperationStatus
:ivar update_started_on: The date and time when the last system services update was started.
:vartype update_started_on: ~datetime.datetime
:ivar update_completed_on: The date and time when the last system services update completed.
:vartype update_completed_on: ~datetime.datetime
"""<line_sep>_validation={'update_status':{'readonly':<true>} 'update_started_on':{'readonly':<true>} 'update_completed_on':{'readonly':<true>} }<line_sep>_attribute_map={'update_status':{'key':'updateStatus' 'type':'str'} 'update_started_on':{'key':'updateStartedOn' 'type':'iso-8601'} 'update_completed_on':{'key':'updateCompletedOn' 'type':'iso-8601'} }<def_stmt>__init__ self **kwargs<block_start>super(UpdateSystemServicesResponse self).__init__(**kwargs)<line_sep>self.update_status=<none><line_sep>self.update_started_on=<none><line_sep>self.update_completed_on=<none><block_end><block_end>
|
<import_stmt>torch<import_from_stmt>torch.nn functional<as>F<import_from_stmt>importlib import_module<import_from_stmt>torch.optim AdamW<import_from_stmt>torch.distributions.kl kl_divergence<import_from_stmt>torch.distributions Normal<import_from_stmt>agoge AbstractSolver<import_from_stmt>.utils sigmoidal_annealing<class_stmt>DX7VAE(AbstractSolver)<block_start>"""
Solver used to train DX7VAE model
"""<def_stmt>__init__ self model Optim=AdamW optim_opts=dict(lr=1e-4) max_beta=0.5 beta_temp=1e-4 **kwargs<block_start><if_stmt>isinstance(Optim str)<block_start>Optim=import_module(Optim)<block_end>self.optim=Optim(params=model.parameters() **optim_opts)<line_sep>self.max_beta=max_beta<line_sep>self.model=model<line_sep>self.iter=0<line_sep>self.beta_temp=beta_temp<block_end><def_stmt>loss self X X_hat flow<block_start>"""
Computes the VAE loss objective and collects some training statistics
X - data tensor, torch.LongTensor(batch_size, num_parameters=155)
X_hat - data tensor, torch.FloatTensor(batch_size, num_parameters=155, max_value=128)
flow - the namedtuple returned by TriangularSylvesterFlow
for reference, the namedtuple is ('Flow', ('q_z', 'log_det', 'z_0', 'z_k', 'flow'))
"""<line_sep>p_z_k=Normal(0 1).log_prob(flow.z_k).sum(-1)<line_sep>q_z_0=flow.q_z.log_prob(flow.z_0).sum(-1)<line_sep>kl=(q_z_0-p_z_k-flow.log_det).mean()/flow.z_k.shape[-1]<line_sep>beta=sigmoidal_annealing(self.iter self.beta_temp).item()<line_sep>reconstruction_loss=F.cross_entropy(X_hat.transpose(-1 -2) X)<line_sep>accuracy=(X_hat.argmax(-1)<eq>X).float().mean()<line_sep>loss=reconstruction_loss+self.max_beta<times>beta<times>kl<line_sep><return>loss {'accuracy':accuracy 'reconstruction_loss':reconstruction_loss 'kl':kl 'beta':beta 'log_det':flow.log_det.mean() 'p_z_k':p_z_k.mean() 'q_z_0':q_z_0.mean() # 'iter': self.iter // self.
}<block_end><def_stmt>solve self X **kwargs<block_start>"""
Take a gradient step given an input X
X - data tensor, torch.LongTensor(batch_size, num_parameters=155)
"""<line_sep>Y=self.model(**X)<line_sep>loss,L=self.loss(**X **Y)<if_stmt>loss<ne>loss<block_start><raise>ValueError('Nan Values detected')<block_end><if_stmt>self.model.training<block_start>self.iter<augadd>1<line_sep>self.optim.zero_grad()<line_sep>loss.backward()<line_sep>self.optim.step()<block_end><return>L<block_end><def_stmt>step self<block_start><pass><block_end><def_stmt>state_dict self<block_start>state_dict={'optim':self.optim.state_dict() 'iter':self.iter}<line_sep><return>state_dict<block_end><def_stmt>load_state_dict self state_dict<block_start>self.optim.load_state_dict(state_dict['optim'])<line_sep>self.iter=state_dict['iter']<block_end><block_end>
|
<import_stmt>unittest<import_stmt>numpy<import_stmt>chainer<import_from_stmt>chainer functions<import_from_stmt>chainer testing<import_from_stmt>chainer utils<line_sep>@testing.parameterize(*testing.product({'function_name':['max' 'min'] 'shape':[(3 2 4)] 'dtype':[numpy.float32] 'axis':[<none> 0 1 2 # axis
-1 # negative_axis
(0 1) # multi_axis
(1 0) # multi_axis_invert
(0 -1) # negative_multi_axis
(-2 0) # negative_multi_axis_invert
] 'keepdims':[<true> <false>] }))@testing.fix_random()@testing.inject_backend_tests(<none> # CPU tests
[{} ]# GPU tests
+testing.product({'use_cuda':[<true>] 'cuda_device':[0 1] })# ChainerX tests
+testing.product({'use_chainerx':[<true>] 'chainerx_device':['native:0' 'cuda:0' 'cuda:1'] }))<class_stmt>TestMinMax(testing.FunctionTestCase)<block_start><def_stmt>setUp self<block_start>self.check_backward_options.update({'eps':1e-5 'atol':1e-3 'rtol':1e-2})<line_sep>self.check_double_backward_options.update({'eps':1e-5 'atol':1e-3 'rtol':1e-2})<block_end><def_stmt>generate_inputs self<block_start>eps=1e-5<line_sep># Sample x with single maximum/minimum value
<while_stmt><true><block_start>x=numpy.random.uniform(-1 1 self.shape).astype(self.dtype)<if_stmt>self.function_name<eq>'max'<block_start>y=x.max(axis=self.axis keepdims=<true>)<if_stmt><not>numpy.all((x<g>y-2<times>eps).sum(axis=self.axis)<eq>1)<block_start><continue><block_end><block_end><elif_stmt>self.function_name<eq>'min'<block_start>y=x.min(axis=self.axis keepdims=<true>)<if_stmt><not>numpy.all((x<l>y+2<times>eps).sum(axis=self.axis)<eq>1)<block_start><continue><block_end><block_end><return>x <block_end><block_end><def_stmt>forward self inputs device<block_start>x,=inputs<line_sep>function=getattr(functions self.function_name)<line_sep>y=function(x axis=self.axis keepdims=self.keepdims)<line_sep><return>y <block_end><def_stmt>forward_expected self inputs<block_start>x,=inputs<line_sep>function=getattr(numpy 'a'+self.function_name)<line_sep>expected=function(x axis=self.axis keepdims=self.keepdims)<line_sep>expected=utils.force_array(expected)<line_sep><return>expected <block_end><block_end>@testing.parameterize(*testing.product({'function_name':['max' 'min'] }))<class_stmt>TestMinMaxInvalid(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.function=getattr(functions self.function_name)<line_sep>self.x=numpy.array([1] dtype=numpy.float32)<block_end><def_stmt>test_invalid_axis_type self<block_start><with_stmt>self.assertRaises(TypeError)<block_start>self.function(self.x [0])<block_end><block_end><def_stmt>test_invalid_axis_type_in_tuple self<block_start><with_stmt>self.assertRaises(TypeError)<block_start>self.function(self.x (1 'x'))<block_end><block_end><def_stmt>test_duplicate_axis self<block_start><with_stmt>self.assertRaises(ValueError)<block_start>self.function(self.x (0 0))<block_end><block_end><def_stmt>test_pos_neg_duplicate_axis self<block_start>x_data=numpy.random.uniform(-1 1 (3 2 4)).astype(numpy.float32)<line_sep>x=chainer.Variable(x_data)<with_stmt>self.assertRaises(ValueError)<block_start>self.function(x axis=(1 -2))<block_end><block_end><block_end>@testing.parameterize(*testing.product({'function_name':['argmax' 'argmin'] 'axis':[<none> 0 1 2 -1 -2 -3] 'dtype':[numpy.float16 numpy.float32 numpy.float64] 'shape':[(3 2 4)] }))@testing.fix_random()@testing.inject_backend_tests(<none> # CPU tests
[{} ]# GPU tests
+testing.product({'use_cuda':[<true>] 'cuda_device':[0 1] })# ChainerX tests
+testing.product({'use_chainerx':[<true>] 'chainerx_device':['native:0' 'cuda:0' 'cuda:1'] }))<class_stmt>TestArgMinMax(testing.FunctionTestCase)<block_start>skip_backward_test=<true><line_sep>skip_double_backward_test=<true><def_stmt>generate_inputs self<block_start>x=numpy.random.uniform(-1 1 self.shape).astype(self.dtype)<line_sep><return>x <block_end><def_stmt>forward self inputs device<block_start>x,=inputs<line_sep>function=getattr(functions self.function_name)<line_sep>y=function(x axis=self.axis)<line_sep>y=functions.cast(y numpy.int64)<line_sep><return>y <block_end><def_stmt>forward_expected self inputs<block_start>x,=inputs<line_sep>function=getattr(numpy self.function_name)<line_sep>expected=function(x axis=self.axis)<line_sep>expected=utils.force_array(expected)<line_sep><return>expected <block_end><block_end>@testing.parameterize(*testing.product({'function_name':['argmax' 'argmin'] }))<class_stmt>TestArgMinMaxInvalid(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.function=getattr(functions self.function_name)<line_sep>self.x=numpy.random.uniform(-1 1 (3 2 4)).astype(numpy.float32)<block_end><def_stmt>test_invalid_axis_type self<block_start><with_stmt>self.assertRaises(TypeError)<block_start>self.function(self.x [0])<block_end><block_end><def_stmt>test_invalid_axis_type_in_tuple self<block_start><with_stmt>self.assertRaises(TypeError)<block_start>self.function(self.x (1 'x'))<block_end><block_end><block_end>testing.run_module(__name__ __file__)<line_sep>
|
<import_from_stmt>unittest TestCase<import_stmt>textwrap<import_stmt>simplejson<as>json<import_from_stmt>simplejson.compat StringIO<class_stmt>TestIndent(TestCase)<block_start><def_stmt>test_indent self<block_start>h=[['blorpie'] ['whoops'] [] 'd-shtaeou' 'd-nthiouh' 'i-vhbjkhnth' {'nifty':87} {'field':'yes' 'morefield':<false>}]<line_sep>expect=textwrap.dedent("""\
[
\t[
\t\t"blorpie"
\t],
\t[
\t\t"whoops"
\t],
\t[],
\t"d-shtaeou",
\t"d-nthiouh",
\t"i-vhbjkhnth",
\t{
\t\t"nifty": 87
\t},
\t{
\t\t"field": "yes",
\t\t"morefield": false
\t}
]""")<line_sep>d1=json.dumps(h)<line_sep>d2=json.dumps(h indent='\t' sort_keys=<true> separators=(',' ': '))<line_sep>d3=json.dumps(h indent=' ' sort_keys=<true> separators=(',' ': '))<line_sep>d4=json.dumps(h indent=2 sort_keys=<true> separators=(',' ': '))<line_sep>h1=json.loads(d1)<line_sep>h2=json.loads(d2)<line_sep>h3=json.loads(d3)<line_sep>h4=json.loads(d4)<line_sep>self.assertEqual(h1 h)<line_sep>self.assertEqual(h2 h)<line_sep>self.assertEqual(h3 h)<line_sep>self.assertEqual(h4 h)<line_sep>self.assertEqual(d3 expect.replace('\t' ' '))<line_sep>self.assertEqual(d4 expect.replace('\t' ' '))<line_sep># NOTE: Python 2.4 textwrap.dedent converts tabs to spaces,
# so the following is expected to fail. Python 2.4 is not a
# supported platform in simplejson 2.1.0+.
self.assertEqual(d2 expect)<block_end><def_stmt>test_indent0 self<block_start>h={3:1}<def_stmt>check indent expected<block_start>d1=json.dumps(h indent=indent)<line_sep>self.assertEqual(d1 expected)<line_sep>sio=StringIO()<line_sep>json.dump(h sio indent=indent)<line_sep>self.assertEqual(sio.getvalue() expected)<block_end># indent=0 should emit newlines
check(0 '{\n"3": 1\n}')<line_sep># indent=None is more compact
check(<none> '{"3": 1}')<block_end><def_stmt>test_separators self<block_start>lst=[1 2 3 4]<line_sep>expect='[\n1,\n2,\n3,\n4\n]'<line_sep>expect_spaces='[\n1, \n2, \n3, \n4\n]'<line_sep># Ensure that separators still works
self.assertEqual(expect_spaces json.dumps(lst indent=0 separators=(', ' ': ')))<line_sep># Force the new defaults
self.assertEqual(expect json.dumps(lst indent=0 separators=(',' ': ')))<line_sep># Added in 2.1.4
self.assertEqual(expect json.dumps(lst indent=0))<block_end><block_end>
|
<import_from_stmt>timesformer_pytorch.timesformer_pytorch TimeSformer<line_sep>
|
# @File : lr_scheduler.py
# @Author: X.Yang
# @Contact : <EMAIL>
# @Date : 18-12-27
<import_from_future_stmt> division<import_from_stmt>math pi cos<import_from_stmt>mxnet lr_scheduler<class_stmt>IterLRScheduler(lr_scheduler.LRScheduler)<block_start>r"""Learning Rate Scheduler
For mode='step', we multiply lr with `step_factor` at each epoch in `step`.
For mode='poly'::
lr = targetlr + (baselr - targetlr) * (1 - iter / maxiter) ^ power
For mode='cosine'::
lr = targetlr + (baselr - targetlr) * (1 + cos(pi * iter / maxiter)) / 2
If warmup_epochs > 0, a warmup stage will be inserted before the main lr scheduler.
For warmup_mode='linear'::
lr = warmup_lr + (baselr - warmup_lr) * iter / max_warmup_iter
For warmup_mode='constant'::
lr = warmup_lr
Parameters
----------
mode : str
Modes for learning rate scheduler.
Currently it supports 'step', 'poly' and 'cosine'.
baselr : float
Base learning rate, i.e. the starting learning rate.
niters : int
Number of iterations in training.
step : list
A list of iterations to decay the learning rate.
step_factor : float
Learning rate decay factor.
targetlr : float
Target learning rate for poly and cosine, as the ending learning rate.
power : float
Power of poly function.
warmup_iters : int
Number of iterations for the warmup stage.
warmup_lr : float
The base learning rate for the warmup stage.
warmup_mode : str
Modes for the warmup stage.
Currently it supports 'linear' and 'constant'.
"""<def_stmt>__init__ self mode baselr niters step=(30e3 60e3 90e3) step_factor=0.1 targetlr=0 power=0.9 warmup_iters=0 warmup_lr=0 warmup_mode='linear'<block_start>super(IterLRScheduler self).__init__()<assert_stmt>(mode<in>['step' 'poly' 'cosine'])<assert_stmt>(warmup_mode<in>['linear' 'constant'])<line_sep>self.mode=mode<line_sep>self.baselr=baselr<line_sep>self.learning_rate=self.baselr<line_sep>self.niters=niters<line_sep>self.step=step<line_sep>self.step_factor=step_factor<line_sep>self.targetlr=targetlr<line_sep>self.power=power<line_sep>self.warmup_iters=warmup_iters<line_sep>self.warmup_lr=warmup_lr<line_sep>self.warmup_mode=warmup_mode<block_end><def_stmt>__call__ self num_update<block_start><if_stmt>self.warmup_iters<g>num_update<block_start><if_stmt>self.warmup_mode<eq>'linear'<block_start>self.learning_rate=self.warmup_lr+(self.baselr-self.warmup_lr)<times>num_update/self.warmup_iters<block_end><elif_stmt>self.warmup_mode<eq>'constant'<block_start>self.learning_rate=self.warmup_lr<block_end><else_stmt><block_start><raise>NotImplementedError<block_end><block_end><else_stmt><block_start><if_stmt>self.mode<eq>'step'<block_start>count=sum([1<for>s self.step<if>s<le>num_update])<line_sep>self.learning_rate=self.baselr<times>pow(self.step_factor count)<block_end><elif_stmt>self.mode<eq>'poly'<block_start>self.learning_rate=self.targetlr+(self.baselr-self.targetlr)<times>pow(1-(num_update-self.warmup_iters)/(self.niters-self.warmup_iters) self.power)<block_end><elif_stmt>self.mode<eq>'cosine'<block_start>self.learning_rate=self.targetlr+(self.baselr-self.targetlr)<times>(1+cos(pi<times>(num_update-self.warmup_iters)/(self.niters-self.warmup_iters)))/2<block_end><else_stmt><block_start><raise>NotImplementedError<block_end><block_end><return>self.learning_rate<block_end><block_end>
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
<import_stmt>numpy<as>np<import_from_stmt>._cutils is_symmetric_and_hollow_cy<import_from_stmt>._cutils distmat_reorder_cy distmat_reorder_condensed_cy<def_stmt>is_symmetric_and_hollow mat<block_start>"""
Check if a Distance Matrix is symmetric and hollow.
Equivalent to [not (mat.T != mat).any(), np.trace(mat) == 0]
Parameters
----------
mat : 2D array_like
Distance matrix.
Result:
-------
is_symmetric: Boolean
not (mat.T != mat).any()
is_hollow: Boolean
np.trace(mat) == 0
"""<line_sep># is_symmetric_and_hollow_cy is optimized
# for the common cas of c_contiguous.
# For all other cases, make a copy.
<if_stmt><not>mat.flags.c_contiguous<block_start>mat=np.asarray(mat order='C')<block_end><return>is_symmetric_and_hollow_cy(mat)<block_end><def_stmt>is_symmetric mat<block_start>"""
Check if a Distance Matrix is symmetric.
Equivalent to not (mat.T != mat).any()
Parameters
----------
mat : 2D array_like
Distance matrix.
Result:
-------
is_symmetric: Boolean
not (mat.T != mat).any()
"""<line_sep># the is_hollow check is really cheap,
# so can reuse is_symmetric_and_hollow
<return>is_symmetric_and_hollow(mat)[0]<block_end><def_stmt>is_hollow mat<block_start>"""
Check if a Distance Matrix is hollow.
Equivalent to np.trace(mat) == 0
Parameters
----------
mat : 2D array_like
Distance matrix.
Result:
-------
is_hollow: Boolean
np.trace(mat) == 0
"""<line_sep># is_symmetric_and_hollow_cy spends most
# of its time in symetry check, just use numpy
<return>(np.trace(mat)<eq>0)<block_end><def_stmt>distmat_reorder_buf in_mat reorder_vec out_mat validate=<false><block_start>"""
Reorder the rows and columns of a distance matrix
given a reorder vector.
Not all of the columns need to be used.
For example:
[ [0, 1, 2, 3] ,
[1, 0, 4, 5] ,
[2, 4, 0, 6] ,
[3, 5, 6, 0] ]
with
[1,0,3,2]
will result in
[ [0, 1, 5, 4] ,
[1, 0, 3, 2] ,
[5, 3, 0, 6] ,
[4, 2, 6, 0] ]
Parameters
----------
in_mat : 2D array_like
Distance matrix
reorder_vec : 1D_array_like
List of permutation indexes
out_mat : 2D array_like
Output, Distance matrix,
must be in c_order and same size as reorder_vec
validate: boolean
Optional, if True, validate reorder_vec content, detaults to False
"""<line_sep>np_reorder=np.asarray(reorder_vec dtype=np.long)<if_stmt>validate<block_start>maxsize=in_mat.shape[0]<line_sep>bad_cnt=np.where((np_reorder<l>0)<or>(np_reorder<ge>maxsize))[0].size<if_stmt>bad_cnt<g>0<block_start><raise>ValueError("Invalid reorder_vec")<block_end><block_end><if_stmt><not>in_mat.flags.c_contiguous<block_start>in_mat=np.asarray(in_mat order='C')<block_end>distmat_reorder_cy(in_mat np_reorder out_mat)<block_end><def_stmt>distmat_reorder in_mat reorder_vec validate=<false><block_start>"""
Reorder the rows and columns of a distance matrix
given a reorder vector.
Not all of the columns need to be used.
For example:
[ [0, 1, 2, 3] ,
[1, 0, 4, 5] ,
[2, 4, 0, 6] ,
[3, 5, 6, 0] ]
with
[1,0,3,2]
will result in
[ [0, 1, 5, 4] ,
[1, 0, 3, 2] ,
[5, 3, 0, 6] ,
[4, 2, 6, 0] ]
Parameters
----------
in_mat : 2D array_like
Distance matrix, must be in c_order
reorder_vec : 1D_array_like
List of permutation indexes
validate: boolean
Optional, if True, validate reorder_vec content, detaults to False
Returns
-------
out_mat : 2D array_like
Distance matrix
"""<line_sep>np_reorder=np.asarray(reorder_vec dtype=np.long)<if_stmt>validate<block_start>maxsize=in_mat.shape[0]<line_sep>bad_cnt=np.where((np_reorder<l>0)<or>(np_reorder<ge>maxsize))[0].size<if_stmt>bad_cnt<g>0<block_start><raise>ValueError("Invalid reorder_vec")<block_end><block_end><if_stmt><not>in_mat.flags.c_contiguous<block_start>in_mat=np.asarray(in_mat order='C')<block_end>out_mat=np.empty([np_reorder.size np_reorder.size] in_mat.dtype)<line_sep>distmat_reorder_cy(in_mat np_reorder out_mat)<line_sep><return>out_mat<block_end><def_stmt>distmat_reorder_condensed in_mat reorder_vec validate=<false><block_start>"""
Reorder the rows and columns of a distance matrix
given a reorder vector.
Not all of the columns need to be used.
For example:
[ [0, 1, 2, 3] ,
[1, 0, 4, 5] ,
[2, 4, 0, 6] ,
[3, 5, 6, 0] ]
with
[1,0,3,2]
will result in
[ 1, 5, 4 , 3, 2, 6 ]
Parameters
----------
in_mat : 2D array_like
Distance matrix, must be in c_order
reorder_vec : 1D_array_like
List of permutation indexes
validate: boolean
Optional, if True, validate reorder_vec content, detaults to False
Returns
-------
out_mat_condensed : 1D array_like
Condensed distance matrix
"""<line_sep>np_reorder=np.asarray(reorder_vec dtype=np.long)<if_stmt>validate<block_start>maxsize=in_mat.shape[0]<line_sep>bad_cnt=np.where((np_reorder<l>0)<or>(np_reorder<ge>maxsize))[0].size<if_stmt>bad_cnt<g>0<block_start><raise>ValueError("Invalid reorder_vec")<block_end><block_end><if_stmt><not>in_mat.flags.c_contiguous<block_start>in_mat=np.asarray(in_mat order='C')<block_end>csize=np.long(((np_reorder.size-1)<times>np_reorder.size)/2)<line_sep>out_mat_condensed=np.empty([csize] in_mat.dtype)<line_sep>distmat_reorder_condensed_cy(in_mat np_reorder out_mat_condensed)<line_sep><return>out_mat_condensed<block_end>
|
<import_from_stmt>.utils.setup_musescore setup_musescore<line_sep>setup_musescore()<line_sep>
|
<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_from_stmt>..registry LOSSES<line_sep>@LOSSES.register_module<class_stmt>MSELoss(nn.Module)<block_start><def_stmt>__init__ self ratio=1 size_average=<none> reduce=<none> reduction='mean'<block_start>super(MSELoss self).__init__()<line_sep>self.ratio=ratio<line_sep>self.size_average=size_average<line_sep>self.reduce=reduce<line_sep>self.reduction=reduction<block_end><def_stmt>forward self input target avg_factor=<none><block_start><return>self.ratio<times>F.mse_loss(input target reduction=self.reduction)<block_end><block_end>
|
<import_from_stmt>dataloader.paths PathsDataset<import_from_stmt>dataloader indoor_scenes<import_from_stmt>active_selection.vote_entropy VoteEntropySelector<import_from_stmt>utils.misc turn_on_dropout visualize_entropy visualize_spx_dataset<import_stmt>constants<import_stmt>torch<import_from_stmt>torch.utils.data DataLoader<import_from_stmt>tqdm tqdm<import_stmt>numpy<as>np<import_from_stmt>collections OrderedDict defaultdict<class_stmt>RegionalVoteEntropySelector<block_start><def_stmt>__init__ self dataset lmdb_handle superpixel_dir base_size batch_size num_classes region_size overlap_handler mode<block_start>self.lmdb_handle=lmdb_handle<line_sep>self.base_size=base_size<line_sep>self.batch_size=batch_size<line_sep>self.dataset=dataset<line_sep>self.superpixel_dir=superpixel_dir<line_sep>self.overlap_handler=overlap_handler<line_sep>self.vote_entropy_selector=VoteEntropySelector(dataset lmdb_handle base_size batch_size num_classes)<line_sep>self.region_size=region_size<if_stmt>mode<eq>'window'<block_start>self.select_next_batch=self.select_next_batch_with_windows<block_end><elif_stmt>mode<eq>'superpixel'<block_start>self.select_next_batch=self.select_next_batch_with_superpixels<block_end><else_stmt><block_start><raise>NotImplementedError<block_end><block_end># superpixel based selection methods
<def_stmt>select_next_batch_with_superpixels self model training_set selection_count<block_start>model.eval()<line_sep>model.apply(turn_on_dropout)<line_sep>loader=DataLoader(indoor_scenes.IndoorScenesWithAllInfo(self.dataset self.lmdb_handle self.superpixel_dir self.base_size training_set.all_train_paths) batch_size=self.batch_size shuffle=<false> num_workers=0)<line_sep>scores=[]<line_sep>superpixel_masks=[]<line_sep>#visualize_entropy.max_weight = 96*96
<for_stmt>sample tqdm(loader desc='Entropy')<block_start>image_batch=sample['image'].cuda()<line_sep>label_batch=sample['label'].cuda()<line_sep>superpixel_batch=sample['superpixel']<line_sep>superpixel_masks.extend([superpixel_batch[i : :]<for>i range(superpixel_batch.shape[0])])<line_sep>scores.extend(self.vote_entropy_selector.batch_entropy_func(model image_batch label_batch superpixel_batch.numpy()))<block_end>all_train_scenes=sorted(list(set([indoor_scenes.IndoorScenesWithAllInfo.get_scene_id_from_image_path(self.dataset x)<for>x training_set.all_train_paths])))<line_sep>scene_indices=[all_train_scenes.index(indoor_scenes.IndoorScenesWithAllInfo.get_scene_id_from_image_path(self.dataset im_path))<for>im_path training_set.all_train_paths]<line_sep>superpixel_ids=[]<line_sep>superpixel_scores_expanded=[]<for_stmt>image_score_idx,superpixel_scores enumerate(scores)<block_start><for_stmt>superpixel_idx superpixel_scores.keys()<block_start>superpixel_ids.append((scene_indices[image_score_idx] image_score_idx superpixel_idx))<line_sep>superpixel_scores_expanded.append(superpixel_scores[superpixel_idx])<block_end><block_end>_sorted_scores=np.array(list(list(zip(*sorted(zip(superpixel_ids superpixel_scores_expanded) key=<lambda>x:x[1] reverse=<true>)))[0]))<line_sep>sorted_scores=np.zeros((_sorted_scores.shape[0] _sorted_scores.shape[1]+1) dtype=np.int32)<line_sep>sorted_scores[: 0:_sorted_scores.shape[1]]=_sorted_scores<line_sep>total_pixels_selected=0<line_sep>selected_regions=OrderedDict()<line_sep>image_superpixels=defaultdict(list)<line_sep>ctr=0<line_sep>print('Selecting superpixels...')<line_sep>pbar=tqdm(total=selection_count)<while_stmt>total_pixels_selected<l>selection_count<times>self.base_size[0]<times>self.base_size[1]<and>ctr<l>sorted_scores.shape[0]<block_start><if_stmt>sorted_scores[ctr 2]<not><in>training_set.image_superpixels[training_set.all_train_paths[sorted_scores[ctr 1]]]<and><not>(sorted_scores[ctr 3]<eq>1)<block_start>mask=(superpixel_masks[sorted_scores[ctr 1]]<eq>sorted_scores[ctr 2]).numpy().astype(np.uint8)<if_stmt>training_set.all_train_paths[sorted_scores[ctr 1]]<in>selected_regions<block_start>selected_regions[training_set.all_train_paths[sorted_scores[ctr 1]]]=selected_regions[training_set.all_train_paths[sorted_scores[ctr 1]]]|mask<block_end><else_stmt><block_start>selected_regions[training_set.all_train_paths[sorted_scores[ctr 1]]]=mask<block_end>image_superpixels[training_set.all_train_paths[sorted_scores[ctr 1]]].append(sorted_scores[ctr 2])<line_sep>valid_pixels=mask.sum()<line_sep>total_pixels_selected<augadd>valid_pixels<line_sep>pbar.update(valid_pixels/(self.base_size[0]<times>self.base_size[1]))<if_stmt><not>self.overlap_handler<is><none><block_start>overlapping_indices=[]<line_sep>tgt_scene_id=indoor_scenes.IndoorScenesWithAllInfo.get_scene_id_from_image_path(self.dataset training_set.all_train_paths[sorted_scores[ctr 1]])<line_sep>overlap_dict=self.overlap_handler.get_overlap_dict_for_scene(tgt_scene_id)<line_sep>tgt_scene_list_index=all_train_scenes.index(tgt_scene_id)<line_sep>sorted_scores_view_mask=sorted_scores[: 0]<eq>tgt_scene_list_index<line_sep>sorted_scores_view=sorted_scores[sorted_scores_view_mask]<for_stmt>sc_idx range(sorted_scores_view.shape[0])<block_start>src_scene_id=indoor_scenes.IndoorScenesWithAllInfo.get_scene_id_from_image_path(self.dataset training_set.all_train_paths[sorted_scores_view[sc_idx 1]])<if_stmt>sorted_scores[ctr 1]<in>overlap_dict<and>(sorted_scores[ctr 2] sorted_scores_view[sc_idx 1] sorted_scores_view[sc_idx 2])<in>overlap_dict[sorted_scores[ctr 1]]<block_start><if_stmt>overlap_dict[sorted_scores[ctr 1]][(sorted_scores[ctr 2] sorted_scores_view[sc_idx 1] sorted_scores_view[sc_idx 2])]<g>self.overlap_handler.superpixel_overlap<block_start>sorted_scores_view[sc_idx 3]=1<block_end><block_end><block_end>sorted_scores[sorted_scores_view_mask]=sorted_scores_view<block_end><block_end>ctr<augadd>1<block_end>pbar.close()<line_sep>print('Selected ' total_pixels_selected/(self.base_size[0]<times>self.base_size[1]) 'images')<line_sep>model.eval()<line_sep>training_set.expand_training_set(selected_regions image_superpixels)<block_end># window based selection methods
<def_stmt>nms self img_idx score_map<block_start>selected_score_map_pts=[]<for_stmt>i range((score_map.shape[0]<times>score_map.shape[1])<floordiv>(self.region_size<times>self.region_size))<block_start>argmax=score_map.view(-1).argmax()<line_sep>r,c=argmax<floordiv>score_map.shape[1] argmax%score_map.shape[1]<line_sep>selected_score_map_pts.append((img_idx r.cpu().item() c.cpu().item() score_map[r c].cpu().item()))<line_sep>score_map[max(0 r-self.region_size):min(score_map.shape[0] r+self.region_size) max(0 c-self.region_size):min(score_map.shape[1] c+self.region_size)]=0<block_end><return>selected_score_map_pts<block_end><def_stmt>select_next_batch_with_windows self model training_set selection_count<block_start>model.eval()<line_sep>model.apply(turn_on_dropout)<line_sep>weights=torch.cuda.FloatTensor(self.region_size self.region_size).fill_(1.)<line_sep>loader=DataLoader(PathsDataset(self.lmdb_handle self.base_size training_set.all_train_paths) batch_size=self.batch_size shuffle=<false> num_workers=0)<line_sep>map_ctr=0<line_sep>scores=[]<for_stmt>sample tqdm(loader desc='Entropy')<block_start>image_batch=sample['image'].cuda()<line_sep>label_batch=sample['label'].cuda()<for_stmt>batch_idx,entropy_map enumerate(self.vote_entropy_selector.batch_entropy_func(model image_batch label_batch))<block_start><if_stmt>training_set.all_train_paths[map_ctr]<in>training_set.get_selections()<block_start>entropy_map[training_set.get_selections()[training_set.all_train_paths[map_ctr]]<eq>1]=0<block_end>convolution_output=torch.nn.functional.conv2d(torch.cuda.FloatTensor(entropy_map).unsqueeze(0).unsqueeze(0) weights.unsqueeze(0).unsqueeze(0)).squeeze().squeeze()<line_sep>scores.extend(self.nms(map_ctr convolution_output))<line_sep>map_ctr<augadd>1<block_end><block_end>selected_samples=sorted(scores key=<lambda>x:x[3] reverse=<true>)[:int(0.5+selection_count<times>self.base_size[0]<times>self.base_size[1]/(self.region_size<times>self.region_size))]<line_sep>print('Last selected sample: ' selected_samples[-1])<line_sep>selected_regions=OrderedDict()<line_sep>total_pixels_selected=0<for_stmt>ss selected_samples<block_start>mask=np.zeros(self.base_size dtype=np.int)<eq>1<line_sep>mask[ss[1]:ss[1]+self.region_size ss[2]:ss[2]+self.region_size]=<true><line_sep>valid_pixels=mask.sum()<line_sep>total_pixels_selected<augadd>valid_pixels<if_stmt>training_set.all_train_paths[ss[0]]<in>selected_regions<block_start>selected_regions[training_set.all_train_paths[ss[0]]]=selected_regions[training_set.all_train_paths[ss[0]]]|mask<block_end><else_stmt><block_start>selected_regions[training_set.all_train_paths[ss[0]]]=mask<block_end><block_end>model.eval()<line_sep>print('Selected ' total_pixels_selected/(self.base_size[0]<times>self.base_size[1]) 'images')<line_sep>training_set.expand_training_set(selected_regions [])<block_end><block_end>
|
###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
<import_from_stmt>starthinker.util.bigquery query_to_view<import_from_stmt>starthinker.util.bigquery table_create<import_from_stmt>starthinker.util.data get_rows<import_from_stmt>starthinker.util.data put_rows<import_from_stmt>starthinker.util.google_api API_DV360<import_from_stmt>starthinker.util.discovery_to_bigquery Discovery_To_BigQuery<import_from_stmt>starthinker.util.regexp lookup_id<import_from_stmt>starthinker.util.sheets sheets_clear<import_from_stmt>starthinker.task.dv_targeter.edit edit_log<import_from_stmt>starthinker.task.dv_targeter.edit edit_preview<import_from_stmt>starthinker.util.dv_targeting Assigned_Targeting<line_sep>TARGETING_TYPES=['TARGETING_TYPE_EXCHANGE' 'TARGETING_TYPE_SUB_EXCHANGE' 'TARGETING_TYPE_BROWSER' 'TARGETING_TYPE_LANGUAGE' 'TARGETING_TYPE_DEVICE_MAKE_MODEL' 'TARGETING_TYPE_OPERATING_SYSTEM' 'TARGETING_TYPE_LANGUAGE' 'TARGETING_TYPE_CARRIER_AND_ISP' 'TARGETING_TYPE_CATEGORY' 'TARGETING_TYPE_APP_CATEGORY' ]<def_stmt>targeting_clear config task<block_start>table_create(config task['auth_bigquery'] config.project task['dataset'] 'DV_Targeting_Options' Discovery_To_BigQuery('displayvideo' 'v1').resource_schema('TargetingOption'))<line_sep>sheets_clear(config task['auth_sheets'] task['sheet'] 'Targeting Options' 'A2:Q')<line_sep>table_create(config task['auth_bigquery'] config.project task['dataset'] 'DV_Targeting_Assigned' Discovery_To_BigQuery('displayvideo' 'v1').resource_schema('AssignedTargetingOption'))<block_end><def_stmt>targeting_clear_changes config task<block_start>sheets_clear(config task['auth_sheets'] task['sheet'] 'Destination Targeting' 'A2:Z')<line_sep>sheets_clear(config task['auth_sheets'] task['sheet'] 'Brand Safety Targeting' 'A2:Z')<line_sep>sheets_clear(config task['auth_sheets'] task['sheet'] 'Demographic Targeting' 'A2:Z')<line_sep>sheets_clear(config task['auth_sheets'] task['sheet'] 'Audience Targeting' 'A2:Z')<line_sep>sheets_clear(config task['auth_sheets'] task['sheet'] 'Device Targeting' 'A2:Z')<line_sep>sheets_clear(config task['auth_sheets'] task['sheet'] 'Geography Targeting' 'A2:Z')<line_sep>sheets_clear(config task['auth_sheets'] task['sheet'] 'Viewability Targeting' 'A2:Z')<block_end><def_stmt>targeting_load config task# load multiple from user defined sheet
<block_start><def_stmt>load_multiple <block_start>advertisers=get_rows(config task['auth_sheets'] {'sheets':{'sheet':task['sheet'] 'tab':'Advertisers' "header":<false> 'range':'A2:A'}})<for_stmt>advertiser advertisers<block_start><for_stmt>targeting_type TARGETING_TYPES<block_start><yield><from>API_DV360(config task['auth_dv'] iterate=<true>).targetingTypes().targetingOptions().list(advertiserId=str(lookup_id(advertiser[0])) targetingType=targeting_type).execute()<block_end><block_end><block_end>targeting_clear(config task)<line_sep># write to database
put_rows(config task['auth_bigquery'] {'bigquery':{'dataset':task['dataset'] 'table':'DV_Targeting_Options' 'schema':Discovery_To_BigQuery('displayvideo' 'v1').method_schema('targetingTypes.targetingOptions.list') 'format':'JSON'}} load_multiple())<line_sep># write app category
put_rows(config task['auth_sheets'] {'sheets':{'sheet':task['sheet'] 'tab':'Targeting Options' "header":<false> 'range':'A2:A'}} get_rows(config task['auth_bigquery'] {'bigquery':{'dataset':task['dataset'] 'query':"""SELECT
DISTINCT(appCategoryDetails.displayName)
FROM `{dataset}.DV_Targeting_Options`
ORDER BY 1
""".format(**task) 'legacy':<false>}}))<line_sep># write exchange
put_rows(config task['auth_sheets'] {'sheets':{'sheet':task['sheet'] 'tab':'Targeting Options' "header":<false> 'range':'B2:B'}} get_rows(config task['auth_bigquery'] {'bigquery':{'dataset':task['dataset'] 'query':"""SELECT
DISTINCT(subExchangeDetails.displayName)
FROM `{dataset}.DV_Targeting_Options`
ORDER BY 1
""".format(**task) 'legacy':<false>}}))<line_sep># write browser
put_rows(config task['auth_sheets'] {'sheets':{'sheet':task['sheet'] 'tab':'Targeting Options' "header":<false> 'range':'C2:C'}} get_rows(config task['auth_bigquery'] {'bigquery':{'dataset':task['dataset'] 'query':"""SELECT
DISTINCT(browserDetails.displayName)
FROM `{dataset}.DV_Targeting_Options`
ORDER BY 1
""".format(**task) 'legacy':<false>}}))<line_sep># write make / model
put_rows(config task['auth_sheets'] {'sheets':{'sheet':task['sheet'] 'tab':'Targeting Options' "header":<false> 'range':'D2:D'}} get_rows(config task['auth_bigquery'] {'bigquery':{'dataset':task['dataset'] 'query':"""SELECT
DISTINCT(deviceMakeModelDetails.displayName)
FROM `{dataset}.DV_Targeting_Options`
ORDER BY 1
""".format(**task) 'legacy':<false>}}))<line_sep># write category
put_rows(config task['auth_sheets'] {'sheets':{'sheet':task['sheet'] 'tab':'Targeting Options' "header":<false> 'range':'E2:E'}} get_rows(config task['auth_bigquery'] {'bigquery':{'dataset':task['dataset'] 'query':"""SELECT
DISTINCT(categoryDetails.displayName)
FROM `{dataset}.DV_Targeting_Options`
ORDER BY 1
""".format(**task) 'legacy':<false>}}))<line_sep># write language
put_rows(config task['auth_sheets'] {'sheets':{'sheet':task['sheet'] 'tab':'Targeting Options' "header":<false> 'range':'F2:F'}} get_rows(config task['auth_bigquery'] {'bigquery':{'dataset':task['dataset'] 'query':"""SELECT
DISTINCT(languageDetails.displayName)
FROM `{dataset}.DV_Targeting_Options`
ORDER BY 1
""".format(**task) 'legacy':<false>}}))<line_sep># write operating system
put_rows(config task['auth_sheets'] {'sheets':{'sheet':task['sheet'] 'tab':'Targeting Options' "header":<false> 'range':'G2:G'}} get_rows(config task['auth_bigquery'] {'bigquery':{'dataset':task['dataset'] 'query':"""SELECT
DISTINCT(operatingSystemDetails.displayName)
FROM `{dataset}.DV_Targeting_Options`
ORDER BY 1
""".format(**task) 'legacy':<false>}}))<line_sep># write carrier and isp
put_rows(config task['auth_sheets'] {'sheets':{'sheet':task['sheet'] 'tab':'Targeting Options' "header":<false> 'range':'H2:H'}} get_rows(config task['auth_bigquery'] {'bigquery':{'dataset':task['dataset'] 'query':"""SELECT
CONCAT(carrierAndIspDetails.displayName, ' - ', SUBSTR(carrierAndIspDetails.type, 22))
FROM `{dataset}.DV_Targeting_Options`
GROUP BY 1
ORDER BY 1
""".format(**task) 'legacy':<false>}}))<block_end><def_stmt>targeting_combine config task# read destination targeting
<block_start>put_rows(config task["auth_bigquery"] {"bigquery":{"dataset":task["dataset"] "table":"SHEET_Destination_Targeting" "schema":[{"name":"Action" "type":"STRING"} {"name":"Partner" "type":"STRING"} {"name":"Advertiser" "type":"STRING"} {"name":"LineItem" "type":"STRING"} {"name":"Authorized_Seller" "type":"STRING"} {"name":"User_Rewarded_Content" "type":"STRING"} {"name":"Exchange" "type":"STRING"} {"name":"Sub_Exchange" "type":"STRING"} {"name":"Channel" "type":"STRING"} {"name":"Channel_Negative" "type":"BOOLEAN"} {"name":"Inventory_Source" "type":"STRING"} {"name":"Inventory_Group" "type":"STRING"} {"name":"URL" "type":"STRING"} {"name":"URL_Negative" "type":"BOOLEAN"} {"name":"App" "type":"STRING"} {"name":"App_Negative" "type":"BOOLEAN"} {"name":"App_Category" "type":"STRING"} {"name":"App_Category_Negative" "type":"BOOLEAN"} ] "format":"CSV"}} get_rows(config task["auth_sheets"] {"sheets":{"sheet":task["sheet"] "tab":"Destination Targeting" "header":<false> "range":"A2:Z"}}))<line_sep># read brand safety targeting
put_rows(config task["auth_bigquery"] {"bigquery":{"dataset":task["dataset"] "table":"SHEET_Brand_Safety_Targeting" "schema":[{"name":"Action" "type":"STRING"} {"name":"Partner" "type":"STRING"} {"name":"Advertiser" "type":"STRING"} {"name":"LineItem" "type":"STRING"} {"name":"Content_Label" "type":"STRING"} {"name":"Sensitive_Category" "type":"STRING"} {"name":"Negative_Keyword_List" "type":"STRING"} {"name":"Category" "type":"STRING"} {"name":"Category_Negative" "type":"BOOLEAN"} {"name":"Keyword" "type":"STRING"} {"name":"Keyword_Negative" "type":"BOOLEAN"} ] "format":"CSV"}} get_rows(config task["auth_sheets"] {"sheets":{"sheet":task["sheet"] "tab":"Brand Safety Targeting" "header":<false> "range":"A2:Z"}}))<line_sep># read demographic targeting
put_rows(config task["auth_bigquery"] {"bigquery":{"dataset":task["dataset"] "table":"SHEET_Demographic_Targeting" "schema":[{"name":"Action" "type":"STRING"} {"name":"Partner" "type":"STRING"} {"name":"Advertiser" "type":"STRING"} {"name":"LineItem" "type":"STRING"} {"name":"Age_Range" "type":"STRING"} {"name":"Gender" "type":"STRING"} {"name":"Parental_Status" "type":"STRING"} {"name":"Household_Income" "type":"STRING"} {"name":"Language" "type":"STRING"} {"name":"Language_Negative" "type":"BOOLEAN"} ] "format":"CSV"}} get_rows(config task["auth_sheets"] {"sheets":{"sheet":task["sheet"] "tab":"Demographic Targeting" "header":<false> "range":"A2:Z"}}))<line_sep># read audience targeting
put_rows(config task["auth_bigquery"] {"bigquery":{"dataset":task["dataset"] "table":"SHEET_Audience_Targeting" "schema":[{"name":"Action" "type":"STRING"} {"name":"Partner" "type":"STRING"} {"name":"Advertiser" "type":"STRING"} {"name":"LineItem" "type":"STRING"} {"name":"Included_1P_And_3P_Group" "type":"INTEGER"} {"name":"Included_1P_And_3P" "type":"STRING"} {"name":"Included_1P_And_3P_Recency" "type":"STRING"} {"name":"Excluded_1P_And_3P" "type":"STRING"} {"name":"Excluded_1P_And_3P_Recency" "type":"STRING"} {"name":"Included_Google" "type":"STRING"} {"name":"Excluded_Google" "type":"STRING"} {"name":"Included_Custom" "type":"STRING"} {"name":"Included_Combined" "type":"STRING"} ] "format":"CSV"}} get_rows(config task["auth_sheets"] {"sheets":{"sheet":task["sheet"] "tab":"Audience Targeting" "header":<false> "range":"A2:Z"}}))<line_sep># read device targeting
put_rows(config task["auth_bigquery"] {"bigquery":{"dataset":task["dataset"] "table":"SHEET_Device_Targeting" "schema":[{"name":"Action" "type":"STRING"} {"name":"Partner" "type":"STRING"} {"name":"Advertiser" "type":"STRING"} {"name":"LineItem" "type":"STRING"} {"name":"Device_Type" "type":"STRING"} {"name":"Make_Model" "type":"STRING"} {"name":"Make_Model_Negative" "type":"BOOLEAN"} {"name":"Operating_System" "type":"STRING"} {"name":"Operating_System_Negative" "type":"BOOLEAN"} {"name":"Browser" "type":"STRING"} {"name":"Browser_Negative" "type":"BOOLEAN"} {"name":"Environment" "type":"STRING"} {"name":"Carrier_And_ISP" "type":"STRING"} {"name":"Carrier_And_ISP_Negative" "type":"BOOLEAN"} ] "format":"CSV"}} get_rows(config task["auth_sheets"] {"sheets":{"sheet":task["sheet"] "tab":"Device Targeting" "header":<false> "range":"A2:Z"}}))<line_sep># read geography targeting
put_rows(config task["auth_bigquery"] {"bigquery":{"dataset":task["dataset"] "table":"SHEET_Geography_Targeting" "schema":[{"name":"Action" "type":"STRING"} {"name":"Partner" "type":"STRING"} {"name":"Advertiser" "type":"STRING"} {"name":"LineItem" "type":"STRING"} {"name":"Day_Of_Week" "type":"STRING"} {"name":"Hour_Start" "type":"INTEGER"} {"name":"Hour_End" "type":"INTEGER"} {"name":"Timezone" "type":"STRING"} {"name":"Geo_Region" "type":"STRING"} {"name":"Geo_Region_Type" "type":"STRING"} {"name":"Geo_Region_Negative" "type":"BOOLEAN"} {"name":"Proximity_Location_List" "type":"STRING"} {"name":"Proximity_Location_List_Radius_Range" "type":"STRING"} {"name":"Regional_Location_List" "type":"STRING"} {"name":"Regional_Location_List_Negative" "type":"BOOLEAN"} ] "format":"CSV"}} get_rows(config task["auth_sheets"] {"sheets":{"sheet":task["sheet"] "tab":"Geography Targeting" "header":<false> "range":"A2:Z"}}))<line_sep># read viewability targeting
put_rows(config task["auth_bigquery"] {"bigquery":{"dataset":task["dataset"] "table":"SHEET_Viewability_Targeting" "schema":[{"name":"Action" "type":"STRING"} {"name":"Partner" "type":"STRING"} {"name":"Advertiser" "type":"STRING"} {"name":"LineItem" "type":"STRING"} {"name":"Video_Player_Size" "type":"STRING"} {"name":"In_Stream_Position" "type":"STRING"} {"name":"Out_Stream_Position" "type":"BOOLEAN"} {"name":"On_Screen_Position" "type":"STRING"} {"name":"Viewability" "type":"STRING"} ] "format":"CSV"}} get_rows(config task["auth_sheets"] {"sheets":{"sheet":task["sheet"] "tab":"Viewability Targeting" "header":<false> "range":"A2:Z"}}))<line_sep>query_to_view(config task["auth_bigquery"] config.project task["dataset"] "SHEET_Combined_Targeting" """SELECT
COALESCE(
L.advertiserId,
A.advertiserId,
CAST(REGEXP_EXTRACT(Advertiser, r' - (\d+)$') AS INT64)
) AS Advertiser_Lookup,
T.*
FROM (
SELECT
COALESCE(A.Action,B.Action,C.Action,D.Action,E.Action,F.Action,G.Action) AS Action,
COALESCE(A.partner,B.Partner,C.Partner,D.partner,E.Partner,F.Partner,G.Partner) AS Partner,
COALESCE(A.Advertiser,B.Advertiser,C.Advertiser,D.Advertiser,E.Advertiser,F.Advertiser,G.Advertiser) AS Advertiser,
COALESCE(A.LineItem,B.LineItem,C.LineItem,D.LineItem,E.LineItem,F.LineItem,G.LineItem) AS LineItem,
* EXCEPT (Action, Partner, Advertiser, LineItem)
FROM `{dataset}.SHEET_Destination_Targeting` AS A
FULL OUTER JOIN `{dataset}.SHEET_Brand_Safety_Targeting` AS B
ON A.Action=B.Action
AND A.Partner=B.Partner
AND A.Advertiser=B.Advertiser
AND A.LineItem=B.LineItem
FULL OUTER JOIN `{dataset}.SHEET_Demographic_Targeting` AS C
ON A.Action=C.Action
AND A.Partner=C.Partner
AND A.Advertiser=C.Advertiser
AND A.LineItem=C.LineItem
FULL OUTER JOIN `{dataset}.SHEET_Audience_Targeting` AS D
ON A.Action=D.Action
AND A.Partner=D.Partner
AND A.Advertiser=D.Advertiser
AND A.LineItem=D.LineItem
FULL OUTER JOIN `{dataset}.SHEET_Device_Targeting` AS E
ON A.Action=E.Action
AND A.Partner=E.Partner
AND A.Advertiser=E.Advertiser
AND A.LineItem=E.LineItem
FULL OUTER JOIN `{dataset}.SHEET_Geography_Targeting` AS F
ON A.Action=F.Action
AND A.Partner=F.Partner
AND A.Advertiser=F.Advertiser
AND A.LineItem=F.LineItem
FULL OUTER JOIN `{dataset}.SHEET_Viewability_Targeting` AS G
ON A.Action=G.Action
AND A.Partner=G.Partner
AND A.Advertiser=G.Advertiser
AND A.LineItem=G.LineItem
) AS T
LEFT JOIN `{dataset}.DV_LineItems` AS L
ON CAST(REGEXP_EXTRACT(T.LineItem, r' - (\d+)$') AS INT64)=L.lineItemId
LEFT JOIN (
SELECT partnerId, advertiserId
FROM `{dataset}.DV_Advertisers`
GROUP BY 1,2
) AS A
ON CAST(REGEXP_EXTRACT(T.Partner, r' - (\d+)$') AS INT64)=A.partnerId
""".format(**task) legacy=<false>)<block_end><def_stmt>targeting_edit config task commit=<false><block_start>edits=[]<line_sep>targetings={}<line_sep>targeting_combine(config task)<for_stmt>row get_rows(config task["auth_bigquery"] {"bigquery":{"dataset":task["dataset"] "table":"SHEET_Combined_Targeting" }} as_object=<true>)# check if settings are applied at this layer
<block_start><if_stmt><not>row['Action']<block_start><continue><block_end># create new batch of candidates
candidates=[]<line_sep># check partner ID from sheet
<if_stmt>row['Partner']# if action is at Advertiser layer, translate partner into list of advertisers
<block_start><if_stmt>'ADVERTISERS'<in>row['Action'].upper()<block_start><for_stmt>advertiserId get_rows(config task['auth_bigquery'] {'bigquery':{'dataset':task['dataset'] 'query':"SELECT advertiserId FROM `{dataset}.DV_Advertisers` WHERE partnerId={partnerId};".format(dataset=task['dataset'] partnerId=lookup_id(row['Partner'])) 'legacy':<false>}} unnest=<true>)<block_start>candidates.append(targetings.setdefault(('Advertiser' 'Partner {0} : {1}'.format(row['Partner'] advertiserId)) Assigned_Targeting(config task["auth_dv"] <none> advertiserId <none>)))<block_end><block_end># if action is at LineItem layer, translate partner into list of lineitems
<elif_stmt>'LINEITEMS'<in>row['Action'].upper()<block_start>print("NOT IMPLEMENTED UNTIL FURTHER EVALUATION")<block_end># if action is directly on Partner, only add it to the list
<else_stmt><block_start>candidates.append(targetings.setdefault(('Partner' row['Partner']) Assigned_Targeting(config task["auth_dv"] lookup_id(row['Partner']) row['Advertiser_Lookup'] # required by API for lookup of values ( not for targeting )
<none>)))<block_end><block_end># check advertiser ID from sheet
<if_stmt>row['Advertiser']# if action is at LineItem layer, translate advertiser into list of lineitems
<block_start><if_stmt>'LINEITEMS'<in>row['Action'].upper()<block_start><for_stmt>lineItemId get_rows(config task['auth_bigquery'] {'bigquery':{'dataset':task['dataset'] 'query':"SELECT lineItemId FROM `{dataset}.DV_LineItems` WHERE advertiserId={advertiserId};".format(dataset=task['dataset'] advertiserId=lookup_id(row['Advertiser'])) 'legacy':<false>}} unnest=<true>)<block_start>candidates.append(targetings.setdefault(('LineItem' 'Advertiser {0} : {1}'.format(row['Advertiser'] lineItemId)) Assigned_Targeting(config task["auth_dv"] <none> lookup_id(row['Advertiser']) lineItemId)))<block_end><block_end># if action is directly on Advertiser, only add it to the list
<else_stmt><block_start>candidates.append(targetings.setdefault(('Advertiser' row['Advertiser']) Assigned_Targeting(config task["auth_dv"] <none> lookup_id(row['Advertiser']) <none>)))<block_end><block_end># check lineitem ID from sheet
<if_stmt>row['LineItem']<block_start>candidates.append(targetings.setdefault(('LineItem' row['LineItem']) Assigned_Targeting(config task["auth_dv"] <none> row['Advertiser_Lookup'] lookup_id(row['LineItem']))))<block_end># attempt targeting changes for each candidate
<for_stmt>targeting candidates<block_start><if_stmt>row['Authorized_Seller']<block_start><if_stmt>'ADD'<in>row['Action'].upper()<block_start>targeting.add_authorized_seller(row['Authorized_Seller'])<block_end><elif_stmt>'DELETE'<in>row['Action'].upper()<block_start>targeting.delete_authorized_seller(row['Authorized_Seller'])<block_end><block_end><if_stmt>row['User_Rewarded_Content']<block_start><if_stmt>'ADD'<in>row['Action'].upper()<block_start>targeting.add_user_rewarded_content(row['User_Rewarded_Content'])<block_end><elif_stmt>'DELETE'<in>row['Action'].upper()<block_start>targeting.delete_user_rewarded_content(row['User_Rewarded_Content'])<block_end><block_end><if_stmt>row['Exchange']<block_start><if_stmt>'ADD'<in>row['Action'].upper()<block_start>targeting.add_exchange(row['Exchange'])<block_end><elif_stmt>'DELETE'<in>row['Action'].upper()<block_start>targeting.delete_exchange(row['Exchange'])<block_end><block_end><if_stmt>row['Sub_Exchange']<block_start><if_stmt>'ADD'<in>row['Action'].upper()<block_start>targeting.add_sub_exchange(row['Sub_Exchange'])<block_end><elif_stmt>'DELETE'<in>row['Action'].upper()<block_start>targeting.delete_sub_exchange(row['Sub_Exchange'])<block_end><block_end><if_stmt>row['Channel']<block_start>identifier=lookup_id(row['Channel'])<if_stmt>'ADD'<in>row['Action'].upper()<block_start>targeting.add_channel(identifier row['Channel_Negative']<or><false>)<block_end><elif_stmt>'DELETE'<in>row['Action'].upper()<block_start>targeting.delete_channel(identifier)<block_end><block_end><if_stmt>row['Inventory_Source']<block_start>identifier=lookup_id(row['Inventory_Source'])<if_stmt>'ADD'<in>row['Action'].upper()<block_start>targeting.add_inventory_source(identifier)<block_end><elif_stmt>'DELETE'<in>row['Action'].upper()<block_start>targeting.delete_inventory_source(identifier)<block_end><block_end><if_stmt>row['Inventory_Group']<block_start>identifier=lookup_id(row['Inventory_Group'])<if_stmt>'ADD'<in>row['Action'].upper()<block_start>targeting.add_inventory_source_group(identifier)<block_end><elif_stmt>'DELETE'<in>row['Action'].upper()<block_start>targeting.delete_inventory_source_group(identifier)<block_end><block_end><if_stmt>row['URL']<block_start><if_stmt>'ADD'<in>row['Action'].upper()<block_start>targeting.add_url(row['URL'] row['URL_Negative']<or><false>)<block_end><elif_stmt>'DELETE'<in>row['Action'].upper()<block_start>targeting.delete_url(row['URL'])<block_end><block_end><if_stmt>row['App']<block_start>identifier=lookup_id(row['App'])<if_stmt>'ADD'<in>row['Action'].upper()<block_start>targeting.add_app(identifier row['App_Negative']<or><false>)<block_end><elif_stmt>'DELETE'<in>row['Action'].upper()<block_start>targeting.delete_app(identifier)<block_end><block_end><if_stmt>row['App_Category']<block_start><if_stmt>'ADD'<in>row['Action'].upper()<block_start>targeting.add_app_category(row['App_Category'] row['App_Category_Negative']<or><false>)<block_end><elif_stmt>'DELETE'<in>row['Action'].upper()<block_start>targeting.delete_app_category(row['App_Category'])<block_end><block_end><if_stmt>row['Content_Label']<block_start><if_stmt>'ADD'<in>row['Action'].upper()<block_start>targeting.add_content_label(row['Content_Label'])<block_end><elif_stmt>'DELETE'<in>row['Action'].upper()<block_start>targeting.delete_content_label(row['Content_Label'])<block_end><block_end><if_stmt>row['Sensitive_Category']<block_start><if_stmt>'ADD'<in>row['Action'].upper()<block_start>targeting.add_sensitive_category(row['Sensitive_Category'])<block_end><elif_stmt>'DELETE'<in>row['Action'].upper()<block_start>targeting.delete_sensitive_category(row['Sensitive_Category'])<block_end><block_end><if_stmt>row['Negative_Keyword_List']<block_start>identifier=lookup_id(row['Negative_Keyword_List'])<if_stmt>'ADD'<in>row['Action'].upper()<block_start>targeting.add_negative_keyword_list(identifier)<block_end><elif_stmt>'DELETE'<in>row['Action'].upper()<block_start>targeting.delete_negative_keyword_list(identifier)<block_end><block_end><if_stmt>row['Keyword']<block_start><if_stmt>'ADD'<in>row['Action'].upper()<block_start>targeting.add_keyword(row['Keyword'] row['Keyword_Negative']<or><false>)<block_end><elif_stmt>'DELETE'<in>row['Action'].upper()<block_start>targeting.delete_keyword(row['Keyword'])<block_end><block_end><if_stmt>row['Category']<block_start><if_stmt>'ADD'<in>row['Action'].upper()<block_start>targeting.add_category(row['Category'] row['Category_Negative']<or><false>)<block_end><elif_stmt>'DELETE'<in>row['Action'].upper()<block_start>targeting.delete_category(row['Category'])<block_end><block_end><if_stmt>row['Age_Range']<block_start><if_stmt>'ADD'<in>row['Action'].upper()<block_start>targeting.add_age_range(row['Age_Range'])<block_end><elif_stmt>'DELETE'<in>row['Action'].upper()<block_start>targeting.delete_age_range(row['Age_Range'])<block_end><block_end><if_stmt>row['Gender']<block_start><if_stmt>'ADD'<in>row['Action'].upper()<block_start>targeting.add_gender(row['Gender'])<block_end><elif_stmt>'DELETE'<in>row['Action'].upper()<block_start>targeting.delete_gender(row['Gender'])<block_end><block_end><if_stmt>row['Parental_Status']<block_start><if_stmt>'ADD'<in>row['Action'].upper()<block_start>targeting.add_parental_status(row['Parental_Status'])<block_end><elif_stmt>'DELETE'<in>row['Action'].upper()<block_start>targeting.delete_parental_status(row['Parental_Status'])<block_end><block_end><if_stmt>row['Geo_Region']<block_start><if_stmt>'ADD'<in>row['Action'].upper()<block_start>targeting.add_geo_region(row['Geo_Region'] row['Geo_Region_Type'] row['Geo_Region_Negative']<or><false>)<block_end><elif_stmt>'DELETE'<in>row['Action'].upper()<block_start>targeting.delete_geo_region(row['Geo_Region'])<block_end><block_end><if_stmt>row['Proximity_Location_List']<block_start><if_stmt>'ADD'<in>row['Action'].upper()<block_start>targeting.add_proximity_location_list(row['Proximity_Location_List'] row['Proximity_Location_List_Radius_Range'])<block_end><elif_stmt>'DELETE'<in>row['Action'].upper()<block_start>targeting.delete_proximity_location_list(row['Proximity_Location_List'])<block_end><block_end><if_stmt>row['Regional_Location_List']<block_start>identifier=lookup_id(row['Regional_Location_List'])<if_stmt>'ADD'<in>row['Action'].upper()<block_start>targeting.add_regional_location_list(identifier row['Regional_Location_List_Negative']<or><false>)<block_end><elif_stmt>'DELETE'<in>row['Action'].upper()<block_start>targeting.delete_regional_location_list(identifier)<block_end><block_end><if_stmt>row['Household_Income']<block_start><if_stmt>'ADD'<in>row['Action'].upper()<block_start>targeting.add_household_income(row['Household_Income'])<block_end><elif_stmt>'DELETE'<in>row['Action'].upper()<block_start>targeting.delete_household_income(row['Household_Income'])<block_end><block_end><if_stmt>row['Language']<block_start><if_stmt>'ADD'<in>row['Action'].upper()<block_start>targeting.add_language(row['Language'] row['Language_Negative']<or><false>)<block_end><elif_stmt>'DELETE'<in>row['Action'].upper()<block_start>targeting.delete_language(row['Language'])<block_end><block_end><if_stmt>row['Included_1P_And_3P']<block_start>identifier=lookup_id(row['Included_1P_And_3P'])<if_stmt>'ADD'<in>row['Action'].upper()<block_start>targeting.add_included_1p_and_3p_audience(identifier row['Included_1P_And_3P_Recency'] row['Included_1P_And_3P_Group'])<block_end><elif_stmt>'DELETE'<in>row['Action'].upper()<block_start>targeting.delete_included_1p_and_3p_audience(identifier row['Included_1P_And_3P_Recency'] row['Included_1P_And_3P_Group'])<block_end><block_end><if_stmt>row['Excluded_1P_And_3P']<block_start>identifier=lookup_id(row['Excluded_1P_And_3P'])<if_stmt>'ADD'<in>row['Action'].upper()<block_start>targeting.add_excluded_1p_and_3p_audience(identifier row['Excluded_1P_And_3P_Recency'])<block_end><elif_stmt>'DELETE'<in>row['Action'].upper()<block_start>targeting.delete_excluded_1p_and_3p_audience(identifier row['Excluded_1P_And_3P_Recency'])<block_end><block_end><if_stmt>row['Included_Google']<block_start>identifier=lookup_id(row['Included_Google'])<if_stmt>'ADD'<in>row['Action'].upper()<block_start>targeting.add_included_google_audience(identifier)<block_end><elif_stmt>'DELETE'<in>row['Action'].upper()<block_start>targeting.delete_included_google_audience(identifier)<block_end><block_end><if_stmt>row['Excluded_Google']<block_start>identifier=lookup_id(row['Excluded_Google'])<if_stmt>'ADD'<in>row['Action'].upper()<block_start>targeting.add_excluded_google_audience(identifier)<block_end><elif_stmt>'DELETE'<in>row['Action'].upper()<block_start>targeting.delete_excluded_google_audience(identifier)<block_end><block_end><if_stmt>row['Included_Custom']<block_start>identifier=lookup_id(row['Included_Custom'])<if_stmt>'ADD'<in>row['Action'].upper()<block_start>targeting.add_included_custom_audience(identifier)<block_end><elif_stmt>'DELETE'<in>row['Action'].upper()<block_start>targeting.delete_included_custom_audience(identifier)<block_end><block_end><if_stmt>row['Included_Combined']<block_start>identifier=lookup_id(row['Included_Combined'])<if_stmt>'ADD'<in>row['Action'].upper()<block_start>targeting.add_included_combined_audience(identifier)<block_end><elif_stmt>'DELETE'<in>row['Action'].upper()<block_start>targeting.delete_included_combined_audience(identifier)<block_end><block_end><if_stmt>row['Device_Type']<block_start><if_stmt>'ADD'<in>row['Action'].upper()<block_start>targeting.add_device_type(row['Device_Type'])<block_end><elif_stmt>'DELETE'<in>row['Action'].upper()<block_start>targeting.delete_device_type(row['Device_Type'])<block_end><block_end><if_stmt>row['Make_Model']<block_start><if_stmt>'ADD'<in>row['Action'].upper()<block_start>targeting.add_make_model(row['Make_Model'] row['Make_Model_Negative']<or><false>)<block_end><elif_stmt>'DELETE'<in>row['Action'].upper()<block_start>targeting.delete_make_model(row['Make_Model'])<block_end><block_end><if_stmt>row['Operating_System']<block_start><if_stmt>'ADD'<in>row['Action'].upper()<block_start>targeting.add_operating_system(row['Operating_System'] row['Operating_System_Negative']<or><false>)<block_end><elif_stmt>'DELETE'<in>row['Action'].upper()<block_start>targeting.delete_operating_system(row['Operating_System'])<block_end><block_end><if_stmt>row['Browser']<block_start><if_stmt>'ADD'<in>row['Action'].upper()<block_start>targeting.add_browser(row['Browser'] row['Browser_Negative']<or><false>)<block_end><elif_stmt>'DELETE'<in>row['Action'].upper()<block_start>targeting.delete_browser(row['Browser'])<block_end><block_end><if_stmt>row['Environment']<block_start><if_stmt>'ADD'<in>row['Action'].upper()<block_start>targeting.add_environment(row['Environment'])<block_end><elif_stmt>'DELETE'<in>row['Action'].upper()<block_start>targeting.delete_environment(row['Environment'])<block_end><block_end><if_stmt>row['Carrier_And_ISP']<block_start><if_stmt>'ADD'<in>row['Action'].upper()<block_start>targeting.add_carrier_and_isp(row['Carrier_And_ISP'] row['Carrier_And_ISP_Negative'])<block_end><elif_stmt>'DELETE'<in>row['Action'].upper()<block_start>targeting.delete_carrier_and_isp(row['Carrier_And_ISP'])<block_end><block_end><if_stmt>row['Day_Of_Week']<block_start><if_stmt>'ADD'<in>row['Action'].upper()<block_start>targeting.add_day_and_time(row['Day_Of_Week'] row['Hour_Start'] row['Hour_End'] row['Timezone'])<block_end><elif_stmt>'DELETE'<in>row['Action'].upper()<block_start>targeting.delete_day_and_time(row['Day_Of_Week'] row['Hour_Start'] row['Hour_End'] row['Timezone'])<block_end><block_end><if_stmt>row['Video_Player_Size']<block_start><if_stmt>'ADD'<in>row['Action'].upper()<block_start>targeting.add_video_player_size(row['Video_Player_Size'])<block_end><elif_stmt>'DELETE'<in>row['Action'].upper()<block_start>targeting.delete_video_player_size(row['Video_Player_Size'])<block_end><block_end><if_stmt>row['In_Stream_Position']<block_start><if_stmt>'ADD'<in>row['Action'].upper()<block_start>targeting.add_instream_position(row['In_Stream_Position'])<block_end><elif_stmt>'DELETE'<in>row['Action'].upper()<block_start>targeting.delete_instream_position(row['In_Stream_Position'])<block_end><block_end><if_stmt>row['Out_Stream_Position']<block_start><if_stmt>'ADD'<in>row['Action'].upper()<block_start>targeting.add_outstream_position(row['Out_Stream_Position'])<block_end><elif_stmt>'DELETE'<in>row['Action'].upper()<block_start>targeting.delete_outstream_position()<block_end><block_end><if_stmt>row['On_Screen_Position']<block_start><if_stmt>'ADD'<in>row['Action'].upper()<block_start>targeting.add_on_screen_position(row['On_Screen_Position'])<block_end><elif_stmt>'DELETE'<in>row['Action'].upper()<block_start>targeting.delete_on_screen_position(row['On_Screen_Position'])<block_end><block_end><if_stmt>row['Viewability']<block_start><if_stmt>'ADD'<in>row['Action'].upper()<block_start>targeting.add_viewability(row['Viewability'])<block_end><elif_stmt>'DELETE'<in>row['Action'].upper()<block_start>targeting.delete_viewability(row['Viewability'])<block_end><block_end><block_end><block_end><for_stmt>layer_and_name,targeting targetings.items()<block_start>layer,name=layer_and_name<line_sep>body=targeting.get_body()<line_sep>warnings=targeting.get_warnings()<if_stmt>body<block_start>parameters={'body':body}<if_stmt>layer<eq>'Partner'<block_start>parameters['partnerId']=str(targeting.partner)<block_end><elif_stmt>layer<eq>'Advertiser'<block_start>parameters['advertiserId']=str(targeting.advertiser)<block_end><elif_stmt>layer<eq>'LineItem'<block_start>parameters['advertiserId']=str(targeting.advertiser)<line_sep>parameters['lineItemId']=str(targeting.lineitem)<block_end>edits.append({"layer":layer "partner":name<if>layer<eq>'Partner'<else>'' "advertiser":name<if>layer<eq>'Advertiser'<else>'' "line_item":name<if>layer<eq>'LineItem'<else>'' "parameters":parameters})<block_end><if_stmt>warnings<block_start>edit_log(config task {"layer":layer "partner":name<if>layer<eq>'Partner'<else>'' "advertiser":name<if>layer<eq>'Advertiser'<else>'' "line_item":name<if>layer<eq>'LineItem'<else>'' "warning":"\n".join(warnings)})<block_end><block_end>edit_preview(config task edits)<if_stmt>commit<block_start>targeting_commit(config task edits)<block_end><block_end><def_stmt>targeting_commit config task edits<block_start><for_stmt>edit edits<block_start><try_stmt><block_start><if_stmt>edit.get("line_item")<block_start>print("API LINE ITEM:" edit["line_item"])<line_sep>response=API_DV360(config task["auth_dv"]).advertisers().lineItems().bulkEditLineItemAssignedTargetingOptions(**edit["parameters"]).execute()<line_sep>edit["success"]=len(response.get("createdAssignedTargetingOptions" []))<block_end><elif_stmt>edit.get("advertiser")<block_start>print("API ADVERTISER:" edit["advertiser"])<line_sep>response=API_DV360(config task["auth_dv"]).advertisers().bulkEditAdvertiserAssignedTargetingOptions(**edit["parameters"]).execute()<line_sep>edit["success"]=len(response.get("createdAssignedTargetingOptions" []))<block_end><elif_stmt>edit.get("partner")<block_start>print("API PARTNER:" edit["partner"])<line_sep>response=API_DV360(config task["auth_dv"]).partners().bulkEditPartnerAssignedTargetingOptions(**edit["parameters"]).execute()<line_sep>edit["success"]=len(response.get("createdAssignedTargetingOptions" []))<block_end><block_end><except_stmt>Exception<as>e<block_start>edit["error"]=str(e)<block_end><finally_stmt><block_start>edit_log(config task edit)<block_end><block_end>edit_log(config task)<block_end>
|
# -*- coding: utf-8 -*- #
"""*********************************************************************************************"""<line_sep># FileName [ example_extract_finetune.py ]
# Synopsis [ an example code of using the wrapper class for downstream feature extraction or finetune ]
# Author [ <NAME> (Andi611) ]
# Copyright [ Copyleft(c), Speech Lab, NTU, Taiwan ]
"""*********************************************************************************************"""<line_sep>###############
# IMPORTATION #
###############
<import_stmt>torch<import_from_stmt>transformer.nn_transformer TRANSFORMER<import_from_stmt>downstream.model example_classifier<import_from_stmt>downstream.solver get_optimizer<line_sep>################
# EXAMPLE CODE #
################
# setup the transformer model
"""
`options`: a python dictionary containing the following keys:
ckpt_file: str, a path specifying the pre-trained ckpt file
load_pretrain: str, ['True', 'False'], whether to load pre-trained weights
no_grad: str, ['True', 'False'], whether to have gradient flow over this class
dropout: float/str, use float to modify dropout value during downstream finetune, or use the str `default` for pre-train default values
spec_aug: str, ['True', 'False'], whether to apply SpecAugment on inputs (used for ASR training)
spec_aug_prev: str, ['True', 'False'], apply spec augment on input acoustic features if True, else apply on output representations (used for ASR training)
weighted_sum: str, ['True', 'False'], whether to use a learnable weighted sum to integrate hidden representations from all layers, if False then use the last
select_layer: int, select from all hidden representations, set to -1 to select the last (will only be used when weighted_sum is False)
permute_input: str, ['True', 'False'], this attribute is for the forward method. If Ture then input ouput is in the shape of (T, B, D), if False then in (B, T, D)
"""<line_sep>options={'ckpt_file':'./result/result_transformer/tera/fmllrBase960-F-N-K-libri/states-1000000.ckpt' 'load_pretrain':'True' 'no_grad':'True' 'dropout':'default' 'spec_aug':'False' 'spec_aug_prev':'True' 'weighted_sum':'False' 'select_layer':-1 'permute_input':'False' }<line_sep>transformer=TRANSFORMER(options=options inp_dim=40)<line_sep># setup your downstream class model
classifier=example_classifier(input_dim=768 hidden_dim=128 class_num=2).cuda()<line_sep># construct the optimizer
params=list(transformer.named_parameters())+list(classifier.named_parameters())<line_sep>optimizer=get_optimizer(params=params lr=4e-3 warmup_proportion=0.7 training_steps=50000)<line_sep># forward
example_inputs=torch.zeros(3 1200 40)# A batch of spectrograms: (batch_size, time_step, feature_size)
reps=transformer(example_inputs)# returns: (batch_size, time_step, feature_size)
labels=torch.LongTensor([0 1 0]).cuda()<line_sep>loss=classifier(reps labels)<line_sep># update
loss.backward()<line_sep>optimizer.step()<line_sep># save
PATH_TO_SAVE_YOUR_MODEL='example.ckpt'<line_sep>states={'Classifier':classifier.state_dict() 'Transformer':transformer.state_dict()}<line_sep># torch.save(states, PATH_TO_SAVE_YOUR_MODEL)
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A helper library for coverage_test.py - coverage is added to this library."""<def_stmt>simple_func a<block_start><return>2<times>a<block_end><def_stmt>if_func a<block_start>x=a<if_stmt>x<block_start><return>2<block_end><else_stmt><block_start><return>3<block_end><block_end><def_stmt>cmp_less a b<block_start><return>a<l>b<block_end><def_stmt>cmp_greater a b<block_start><return>a<g>b<block_end><def_stmt>cmp_const_less a<block_start><return>1<l>a<block_end><def_stmt>cmp_const_less_inverted a<block_start><return>a<l>1<block_end><def_stmt>regex_match re_obj a<block_start>re_obj.match(a)<block_end>
|
<import_from_stmt>neuraxle.pipeline Pipeline<import_from_stmt>neuraxle.base BaseStep MetaStepMixin<import_from_stmt>neuraxle.union Identity<import_from_stmt>testing.mocks.step_mocks SomeMetaStepWithHyperparams<class_stmt>SomeMetaStep(MetaStepMixin BaseStep)<block_start><def_stmt>__init__ self wrapped:BaseStep<block_start>BaseStep.__init__(self)<line_sep>MetaStepMixin.__init__(self wrapped)<block_end><def_stmt>transform self data_inputs<block_start>self.wrapped.transform(data_inputs)<block_end><block_end><def_stmt>test_metastepmixin_set_train_should_set_train_to_false <block_start>p=SomeMetaStep(Pipeline([Identity()]))<line_sep>p.set_train(<false>)<assert_stmt><not>p.is_train<assert_stmt><not>p.wrapped[0].is_train<assert_stmt><not>p.wrapped.is_train<block_end><def_stmt>test_metastepmixin_set_train_should_set_train_to_true <block_start>p=SomeMetaStep(Pipeline([Identity()]))<assert_stmt>p.is_train<assert_stmt>p.wrapped[0].is_train<assert_stmt>p.wrapped.is_train<block_end><def_stmt>test_basestep_str_representation_works_correctly <block_start>output=str(SomeMetaStepWithHyperparams())<assert_stmt>output<eq>"SomeMetaStepWithHyperparams(SomeStepWithHyperparams(name='MockStep'), name='SomeMetaStepWithHyperparams')"<block_end>
|
<import_from_stmt>.lda_gibbs GibbsLDA<import_from_stmt>.lda_vb vbLDA<import_from_stmt>.slda_gibbs GibbsSupervisedLDA<import_from_stmt>.collabotm CollaborativeTopicModel<import_from_stmt>.rtm RelationalTopicModel<import_from_stmt>.diln DILN<import_from_stmt>.hmm_lda HMM_LDA<import_from_stmt>.at_model AuthorTopicModel<line_sep>
|
<import_stmt>unittest<import_from_stmt>abrvalg ast<import_from_stmt>abrvalg.lexer Lexer TokenStream<import_from_stmt>abrvalg.parser Parser<class_stmt>ParserTest(unittest.TestCase)<block_start><def_stmt>_parse self s<block_start><return>Parser().parse(TokenStream(Lexer().tokenize(s))).body<block_end><def_stmt>_assertNodesEq self s nodes<block_start><return>self.assertEqual(self._parse(s) nodes)<block_end><def_stmt>test_simple self<block_start>self._assertNodesEq('1' [ast.Number(1)])<block_end><block_end>
|
<import_stmt>pytest<import_from_stmt>helpers.cluster ClickHouseCluster<line_sep>cluster=ClickHouseCluster(__file__)<line_sep>instance=cluster.add_instance("instance" clickhouse_path_dir="clickhouse_path")<line_sep>@pytest.fixture(scope="module")<def_stmt>started_cluster <block_start><try_stmt><block_start>cluster.start()<line_sep>instance.query("CREATE DATABASE test")<line_sep><yield>cluster<block_end><finally_stmt><block_start>cluster.shutdown()<block_end><block_end><def_stmt>create_simple_table <block_start>instance.query("DROP TABLE IF EXISTS test.simple")<line_sep>instance.query("""
CREATE TABLE test.simple (key UInt64, value String)
ENGINE = MergeTree ORDER BY tuple();
""")<block_end><def_stmt>test_protobuf_format_input started_cluster<block_start>create_simple_table()<line_sep>instance.http_query("INSERT INTO test.simple SETTINGS format_schema='simple:KeyValuePair' FORMAT Protobuf" "\x07\x08\x01\x12\x03abc\x07\x08\x02\x12\x03def" )<assert_stmt>instance.query("SELECT * from test.simple")<eq>"1\tabc\n2\tdef\n"<block_end><def_stmt>test_protobuf_format_output started_cluster<block_start>create_simple_table()<line_sep>instance.query("INSERT INTO test.simple VALUES (1, 'abc'), (2, 'def')")<assert_stmt>(instance.http_query("SELECT * FROM test.simple FORMAT Protobuf SETTINGS format_schema='simple:KeyValuePair'")<eq>"\x07\x08\x01\x12\x03abc\x07\x08\x02\x12\x03def")<block_end>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.