content
stringlengths
0
1.55M
<import_stmt>FWCore.ParameterSet.Config<as>cms<import_from_stmt>DQMServices.Core.DQMEDAnalyzer DQMEDAnalyzer<line_sep>digiValid=DQMEDAnalyzer('Phase2TrackerValidateDigi' Verbosity=cms.bool(<false>) TopFolderName=cms.string("Ph2TkPixelDigi") PixelPlotFillingFlag=cms.bool(<false>) OuterTrackerDigiSource=cms.InputTag("mix" "Tracker") OuterTrackerDigiSimLinkSource=cms.InputTag("simSiPixelDigis" "Tracker") InnerPixelDigiSource=cms.InputTag("simSiPixelDigis" "Pixel") InnerPixelDigiSimLinkSource=cms.InputTag("simSiPixelDigis" "Pixel") PSimHitSource=cms.VInputTag('g4SimHits:TrackerHitsPixelBarrelLowTof' 'g4SimHits:TrackerHitsPixelBarrelHighTof' 'g4SimHits:TrackerHitsPixelEndcapLowTof' 'g4SimHits:TrackerHitsPixelEndcapHighTof' 'g4SimHits:TrackerHitsTIBLowTof' 'g4SimHits:TrackerHitsTIBHighTof' 'g4SimHits:TrackerHitsTIDLowTof' 'g4SimHits:TrackerHitsTIDHighTof' 'g4SimHits:TrackerHitsTOBLowTof' 'g4SimHits:TrackerHitsTOBHighTof' 'g4SimHits:TrackerHitsTECLowTof' 'g4SimHits:TrackerHitsTECHighTof') SimTrackSource=cms.InputTag("g4SimHits") SimVertexSource=cms.InputTag("g4SimHits") GeometryType=cms.string('idealForDigi') PtCutOff=cms.double(2.0) #9.5 EtaCutOff=cms.double(3.5) TOFLowerCutOff=cms.double(-12.5) TOFUpperCutOff=cms.double(12.5) TrackPtH=cms.PSet(Nbins=cms.int32(50) xmin=cms.double(0.0) xmax=cms.double(100.0) switch=cms.bool(<true>)) TrackEtaH=cms.PSet(Nbins=cms.int32(45) xmin=cms.double(-4.5) xmax=cms.double(4.5) switch=cms.bool(<true>)) TrackPhiH=cms.PSet(Nbins=cms.int32(64) xmin=cms.double(-3.2) xmax=cms.double(3.2) switch=cms.bool(<true>)) SimHitElossH=cms.PSet(Nbins=cms.int32(100) xmin=cms.double(0.0) xmax=cms.double(100000.0) switch=cms.bool(<true>)) SimHitDxH=cms.PSet(Nbins=cms.int32(1000) xmin=cms.double(0.0) xmax=cms.double(0.1) switch=cms.bool(<true>)) SimHitDyH=cms.PSet(Nbins=cms.int32(1000) xmin=cms.double(0.0) xmax=cms.double(0.1) switch=cms.bool(<true>)) SimHitDzH=cms.PSet(Nbins=cms.int32(150) xmin=cms.double(0.0) xmax=cms.double(0.03) switch=cms.bool(<true>)) XYPositionMapH=cms.PSet(Nxbins=cms.int32(1250) xmin=cms.double(-1250.) xmax=cms.double(1250.) Nybins=cms.int32(1250) ymin=cms.double(-1250.) ymax=cms.double(1250.) switch=cms.bool(<false>)) RZPositionMapH=cms.PSet(Nxbins=cms.int32(3000) xmin=cms.double(-3000.) xmax=cms.double(3000.) Nybins=cms.int32(1250) ymin=cms.double(0.) ymax=cms.double(1250.) switch=cms.bool(<false>)) TOFEtaMapH=cms.PSet(Nxbins=cms.int32(45) xmin=cms.double(-4.5) xmax=cms.double(4.5) Nybins=cms.int32(100) ymin=cms.double(0.) ymax=cms.double(50.) switch=cms.bool(<false>)) TOFPhiMapH=cms.PSet(Nxbins=cms.int32(64) xmin=cms.double(-3.2) xmax=cms.double(3.2) Nybins=cms.int32(100) ymin=cms.double(0.) ymax=cms.double(50.) switch=cms.bool(<false>)) TOFZMapH=cms.PSet(Nxbins=cms.int32(3000) xmin=cms.double(-300.) xmax=cms.double(300.) Nybins=cms.int32(100) ymin=cms.double(0.) ymax=cms.double(50.) switch=cms.bool(<false>)) TOFRMapH=cms.PSet(Nxbins=cms.int32(1200) xmin=cms.double(0.) xmax=cms.double(120.) Nybins=cms.int32(100) ymin=cms.double(0.) ymax=cms.double(50.) switch=cms.bool(<false>)))<import_from_stmt>Configuration.ProcessModifiers.premix_stage2_cff premix_stage2<line_sep>premix_stage2.toModify(digiValid InnerPixelDigiSource="mixData:Pixel" OuterTrackerDigiSource="mixData:Tracker" OuterTrackerDigiSimLinkSource="mixData:Phase2OTDigiSimLink" InnerPixelDigiSimLinkSource="mixData:PixelDigiSimLink" )<line_sep>
<import_stmt>copy<import_stmt>datetime<import_stmt>errno<import_stmt>hashlib<import_stmt>os<import_stmt>time<import_from_stmt>collections defaultdict deque OrderedDict<import_stmt>torch<import_stmt>torch.distributed<as>dist<class_stmt>SmoothedValue<block_start>"""Track a series of values and provide access to smoothed values over a window or the global series average. """<def_stmt>__init__ self window_size=20 fmt=<none><block_start><if_stmt>fmt<is><none><block_start>fmt="{median:.4f} ({global_avg:.4f})"<block_end>self.deque=deque(maxlen=window_size)<line_sep>self.total=0.0<line_sep>self.count=0<line_sep>self.fmt=fmt<block_end><def_stmt>update self value n=1<block_start>self.deque.append(value)<line_sep>self.count<augadd>n<line_sep>self.total<augadd>value<times>n<block_end><def_stmt>synchronize_between_processes self<block_start>""" Warning: does not synchronize the deque! """<line_sep>t=reduce_across_processes([self.count self.total])<line_sep>t=t.tolist()<line_sep>self.count=int(t[0])<line_sep>self.total=t[1]<block_end>@property<def_stmt>median self<block_start>d=torch.tensor(list(self.deque))<line_sep><return>d.median().item()<block_end>@property<def_stmt>avg self<block_start>d=torch.tensor(list(self.deque) dtype=torch.float32)<line_sep><return>d.mean().item()<block_end>@property<def_stmt>global_avg self<block_start><return>self.total/self.count<block_end>@property<def_stmt>max self<block_start><return>max(self.deque)<block_end>@property<def_stmt>value self<block_start><return>self.deque[-1]<block_end><def_stmt>__str__ self<block_start><return>self.fmt.format(median=self.median avg=self.avg global_avg=self.global_avg max=self.max value=self.value)<block_end><block_end><class_stmt>MetricLogger<block_start><def_stmt>__init__ self delimiter="\t"<block_start>self.meters=defaultdict(SmoothedValue)<line_sep>self.delimiter=delimiter<block_end><def_stmt>update self **kwargs<block_start><for_stmt>k,v kwargs.items()<block_start><if_stmt>isinstance(v torch.Tensor)<block_start>v=v.item()<block_end><assert_stmt>isinstance(v (float int))<line_sep>self.meters[k].update(v)<block_end><block_end><def_stmt>__getattr__ self attr<block_start><if_stmt>attr<in>self.meters<block_start><return>self.meters[attr]<block_end><if_stmt>attr<in>self.__dict__<block_start><return>self.__dict__[attr]<block_end><raise>AttributeError(f"'{type(self).__name__}' object has no attribute '{attr}'")<block_end><def_stmt>__str__ self<block_start>loss_str=[]<for_stmt>name,meter self.meters.items()<block_start>loss_str.append(f"{name}: {str(meter)}")<block_end><return>self.delimiter.join(loss_str)<block_end><def_stmt>synchronize_between_processes self<block_start><for_stmt>meter self.meters.values()<block_start>meter.synchronize_between_processes()<block_end><block_end><def_stmt>add_meter self name meter<block_start>self.meters[name]=meter<block_end><def_stmt>log_every self iterable print_freq header=<none><block_start>i=0<if_stmt><not>header<block_start>header=""<block_end>start_time=time.time()<line_sep>end=time.time()<line_sep>iter_time=SmoothedValue(fmt="{avg:.4f}")<line_sep>data_time=SmoothedValue(fmt="{avg:.4f}")<line_sep>space_fmt=":"+str(len(str(len(iterable))))+"d"<if_stmt>torch.cuda.is_available()<block_start>log_msg=self.delimiter.join([header "[{0"+space_fmt+"}/{1}]" "eta: {eta}" "{meters}" "time: {time}" "data: {data}" "max mem: {memory:.0f}" ])<block_end><else_stmt><block_start>log_msg=self.delimiter.join([header "[{0"+space_fmt+"}/{1}]" "eta: {eta}" "{meters}" "time: {time}" "data: {data}"])<block_end>MB=1024.0<times>1024.0<for_stmt>obj iterable<block_start>data_time.update(time.time()-end)<line_sep><yield>obj<line_sep>iter_time.update(time.time()-end)<if_stmt>i%print_freq<eq>0<block_start>eta_seconds=iter_time.global_avg<times>(len(iterable)-i)<line_sep>eta_string=str(datetime.timedelta(seconds=int(eta_seconds)))<if_stmt>torch.cuda.is_available()<block_start>print(log_msg.format(i len(iterable) eta=eta_string meters=str(self) time=str(iter_time) data=str(data_time) memory=torch.cuda.max_memory_allocated()/MB ))<block_end><else_stmt><block_start>print(log_msg.format(i len(iterable) eta=eta_string meters=str(self) time=str(iter_time) data=str(data_time)))<block_end><block_end>i<augadd>1<line_sep>end=time.time()<block_end>total_time=time.time()-start_time<line_sep>total_time_str=str(datetime.timedelta(seconds=int(total_time)))<line_sep>print(f"{header} Total time: {total_time_str}")<block_end><block_end><class_stmt>ExponentialMovingAverage(torch.optim.swa_utils.AveragedModel)<block_start>"""Maintains moving averages of model parameters using an exponential decay. ``ema_avg = decay * avg_model_param + (1 - decay) * model_param`` `torch.optim.swa_utils.AveragedModel <https://pytorch.org/docs/stable/optim.html#custom-averaging-strategies>`_ is used to compute the EMA. """<def_stmt>__init__ self model decay device="cpu"<block_start><def_stmt>ema_avg avg_model_param model_param num_averaged<block_start><return>decay<times>avg_model_param+(1-decay)<times>model_param<block_end>super().__init__(model device ema_avg)<block_end><def_stmt>update_parameters self model<block_start><for_stmt>p_swa,p_model zip(self.module.state_dict().values() model.state_dict().values())<block_start>device=p_swa.device<line_sep>p_model_=p_model.detach().to(device)<if_stmt>self.n_averaged<eq>0<block_start>p_swa.detach().copy_(p_model_)<block_end><else_stmt><block_start>p_swa.detach().copy_(self.avg_fn(p_swa.detach() p_model_ self.n_averaged.to(device)))<block_end><block_end>self.n_averaged<augadd>1<block_end><block_end><def_stmt>accuracy output target topk=(1 )<block_start>"""Computes the accuracy over the k top predictions for the specified values of k"""<with_stmt>torch.inference_mode()<block_start>maxk=max(topk)<line_sep>batch_size=target.size(0)<if_stmt>target.ndim<eq>2<block_start>target=target.max(dim=1)[1]<block_end>_,pred=output.topk(maxk 1 <true> <true>)<line_sep>pred=pred.t()<line_sep>correct=pred.eq(target[<none>])<line_sep>res=[]<for_stmt>k topk<block_start>correct_k=correct[:k].flatten().sum(dtype=torch.float32)<line_sep>res.append(correct_k<times>(100.0/batch_size))<block_end><return>res<block_end><block_end><def_stmt>mkdir path<block_start><try_stmt><block_start>os.makedirs(path)<block_end><except_stmt>OSError<as>e<block_start><if_stmt>e.errno<ne>errno.EEXIST<block_start><raise><block_end><block_end><block_end><def_stmt>setup_for_distributed is_master<block_start>""" This function disables printing when not in master process """<import_stmt>builtins<as>__builtin__<line_sep>builtin_print=__builtin__.print<def_stmt>print *args **kwargs<block_start>force=kwargs.pop("force" <false>)<if_stmt>is_master<or>force<block_start>builtin_print(*args **kwargs)<block_end><block_end>__builtin__.print=print<block_end><def_stmt>is_dist_avail_and_initialized <block_start><if_stmt><not>dist.is_available()<block_start><return><false><block_end><if_stmt><not>dist.is_initialized()<block_start><return><false><block_end><return><true><block_end><def_stmt>get_world_size <block_start><if_stmt><not>is_dist_avail_and_initialized()<block_start><return>1<block_end><return>dist.get_world_size()<block_end><def_stmt>get_rank <block_start><if_stmt><not>is_dist_avail_and_initialized()<block_start><return>0<block_end><return>dist.get_rank()<block_end><def_stmt>is_main_process <block_start><return>get_rank()<eq>0<block_end><def_stmt>save_on_master *args **kwargs<block_start><if_stmt>is_main_process()<block_start>torch.save(*args **kwargs)<block_end><block_end><def_stmt>init_distributed_mode args<block_start><if_stmt>"RANK"<in>os.environ<and>"WORLD_SIZE"<in>os.environ<block_start>args.rank=int(os.environ["RANK"])<line_sep>args.world_size=int(os.environ["WORLD_SIZE"])<line_sep>args.gpu=int(os.environ["LOCAL_RANK"])<block_end><elif_stmt>"SLURM_PROCID"<in>os.environ<block_start>args.rank=int(os.environ["SLURM_PROCID"])<line_sep>args.gpu=args.rank%torch.cuda.device_count()<block_end><elif_stmt>hasattr(args "rank")<block_start><pass><block_end><else_stmt><block_start>print("Not using distributed mode")<line_sep>args.distributed=<false><line_sep><return><block_end>args.distributed=<true><line_sep>torch.cuda.set_device(args.gpu)<line_sep>args.dist_backend="nccl"<line_sep>print(f"| distributed init (rank {args.rank}): {args.dist_url}" flush=<true>)<line_sep>torch.distributed.init_process_group(backend=args.dist_backend init_method=args.dist_url world_size=args.world_size rank=args.rank)<line_sep>setup_for_distributed(args.rank<eq>0)<block_end><def_stmt>average_checkpoints inputs<block_start>"""Loads checkpoints from inputs and returns a model with averaged weights. Original implementation taken from: https://github.com/pytorch/fairseq/blob/a48f235636557b8d3bc4922a6fa90f3a0fa57955/scripts/average_checkpoints.py#L16 Args: inputs (List[str]): An iterable of string paths of checkpoints to load from. Returns: A dict of string keys mapping to various values. The 'model' key from the returned dict should correspond to an OrderedDict mapping string parameter names to torch Tensors. """<line_sep>params_dict=OrderedDict()<line_sep>params_keys=<none><line_sep>new_state=<none><line_sep>num_models=len(inputs)<for_stmt>fpath inputs<block_start><with_stmt>open(fpath "rb")<as>f<block_start>state=torch.load(f map_location=(<lambda>s _:torch.serialization.default_restore_location(s "cpu")) )<block_end># Copies over the settings from the first checkpoint <if_stmt>new_state<is><none><block_start>new_state=state<block_end>model_params=state["model"]<line_sep>model_params_keys=list(model_params.keys())<if_stmt>params_keys<is><none><block_start>params_keys=model_params_keys<block_end><elif_stmt>params_keys<ne>model_params_keys<block_start><raise>KeyError(f"For checkpoint {f}, expected list of params: {params_keys}, but found: {model_params_keys}")<block_end><for_stmt>k params_keys<block_start>p=model_params[k]<if_stmt>isinstance(p torch.HalfTensor)<block_start>p=p.float()<block_end><if_stmt>k<not><in>params_dict<block_start>params_dict[k]=p.clone()<line_sep># NOTE: clone() is needed in case of p is a shared parameter <block_end><else_stmt><block_start>params_dict[k]<augadd>p<block_end><block_end><block_end>averaged_params=OrderedDict()<for_stmt>k,v params_dict.items()<block_start>averaged_params[k]=v<if_stmt>averaged_params[k].is_floating_point()<block_start>averaged_params[k].div_(num_models)<block_end><else_stmt><block_start>averaged_params[k]<augfloordiv>num_models<block_end><block_end>new_state["model"]=averaged_params<line_sep><return>new_state<block_end><def_stmt>store_model_weights model checkpoint_path checkpoint_key="model" strict=<true><block_start>""" This method can be used to prepare weights files for new models. It receives as input a model architecture and a checkpoint from the training script and produces a file with the weights ready for release. Examples: from torchvision import models as M # Classification model = M.mobilenet_v3_large(pretrained=False) print(store_model_weights(model, './class.pth')) # Quantized Classification model = M.quantization.mobilenet_v3_large(pretrained=False, quantize=False) model.fuse_model() model.qconfig = torch.ao.quantization.get_default_qat_qconfig('qnnpack') _ = torch.ao.quantization.prepare_qat(model, inplace=True) print(store_model_weights(model, './qat.pth')) # Object Detection model = M.detection.fasterrcnn_mobilenet_v3_large_fpn(pretrained=False, pretrained_backbone=False) print(store_model_weights(model, './obj.pth')) # Segmentation model = M.segmentation.deeplabv3_mobilenet_v3_large(pretrained=False, pretrained_backbone=False, aux_loss=True) print(store_model_weights(model, './segm.pth', strict=False)) Args: model (pytorch.nn.Module): The model on which the weights will be loaded for validation purposes. checkpoint_path (str): The path of the checkpoint we will load. checkpoint_key (str, optional): The key of the checkpoint where the model weights are stored. Default: "model". strict (bool): whether to strictly enforce that the keys in :attr:`state_dict` match the keys returned by this module's :meth:`~torch.nn.Module.state_dict` function. Default: ``True`` Returns: output_path (str): The location where the weights are saved. """<line_sep># Store the new model next to the checkpoint_path checkpoint_path=os.path.abspath(checkpoint_path)<line_sep>output_dir=os.path.dirname(checkpoint_path)<line_sep># Deep copy to avoid side-effects on the model object. model=copy.deepcopy(model)<line_sep>checkpoint=torch.load(checkpoint_path map_location="cpu")<line_sep># Load the weights to the model to validate that everything works # and remove unnecessary weights (such as auxiliaries, etc) <if_stmt>checkpoint_key<eq>"model_ema"<block_start><del_stmt>checkpoint[checkpoint_key]["n_averaged"]<line_sep>torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(checkpoint[checkpoint_key] "module.")<block_end>model.load_state_dict(checkpoint[checkpoint_key] strict=strict)<line_sep>tmp_path=os.path.join(output_dir str(model.__hash__()))<line_sep>torch.save(model.state_dict() tmp_path)<line_sep>sha256_hash=hashlib.sha256()<with_stmt>open(tmp_path "rb")<as>f# Read and update hash string value in blocks of 4K <block_start><for_stmt>byte_block iter(<lambda>:f.read(4096) b"")<block_start>sha256_hash.update(byte_block)<block_end>hh=sha256_hash.hexdigest()<block_end>output_path=os.path.join(output_dir "weights-"+str(hh[:8])+".pth")<line_sep>os.replace(tmp_path output_path)<line_sep><return>output_path<block_end><def_stmt>reduce_across_processes val<block_start><if_stmt><not>is_dist_avail_and_initialized()# nothing to sync, but we still convert to tensor for consistency with the distributed case. <block_start><return>torch.tensor(val)<block_end>t=torch.tensor(val device="cuda")<line_sep>dist.barrier()<line_sep>dist.all_reduce(t)<line_sep><return>t<block_end>
"""GUI support for the IPython ZeroMQ kernel - GTK toolkit support. """<line_sep>#----------------------------------------------------------------------------- # Copyright (C) 2010-2011 The IPython Development Team # # Distributed under the terms of the BSD License. The full license is in # the file COPYING.txt, distributed as part of this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # stdlib <import_stmt>sys<line_sep># Third-party <import_stmt>gobject<import_stmt>gtk<line_sep>#----------------------------------------------------------------------------- # Classes and functions #----------------------------------------------------------------------------- <class_stmt>GTKEmbed(object)<block_start>"""A class to embed a kernel into the GTK main event loop. """<def_stmt>__init__ self kernel<block_start>self.kernel=kernel<line_sep># These two will later store the real gtk functions when we hijack them self.gtk_main=<none><line_sep>self.gtk_main_quit=<none><block_end><def_stmt>start self<block_start>"""Starts the GTK main event loop and sets our kernel startup routine. """<line_sep># Register our function to initiate the kernel and start gtk gobject.idle_add(self._wire_kernel)<line_sep>gtk.main()<block_end><def_stmt>_wire_kernel self<block_start>"""Initializes the kernel inside GTK. This is meant to run only once at startup, so it does its job and returns False to ensure it doesn't get run again by GTK. """<line_sep>self.gtk_main,self.gtk_main_quit=self._hijack_gtk()<line_sep>gobject.timeout_add(int(1000<times>self.kernel._poll_interval) self.iterate_kernel)<line_sep><return><false><block_end><def_stmt>iterate_kernel self<block_start>"""Run one iteration of the kernel and return True. GTK timer functions must return True to be called again, so we make the call to :meth:`do_one_iteration` and then return True for GTK. """<line_sep>self.kernel.do_one_iteration()<line_sep><return><true><block_end><def_stmt>stop self# FIXME: this one isn't getting called because we have no reliable # kernel shutdown. We need to fix that: once the kernel has a # shutdown mechanism, it can call this. <block_start>self.gtk_main_quit()<line_sep>sys.exit()<block_end><def_stmt>_hijack_gtk self<block_start>"""Hijack a few key functions in GTK for IPython integration. Modifies pyGTK's main and main_quit with a dummy so user code does not block IPython. This allows us to use %run to run arbitrary pygtk scripts from a long-lived IPython session, and when they attempt to start or stop Returns ------- The original functions that have been hijacked: - gtk.main - gtk.main_quit """<def_stmt>dummy *args **kw<block_start><pass><block_end># save and trap main and main_quit from gtk orig_main,gtk.main=gtk.main dummy<line_sep>orig_main_quit,gtk.main_quit=gtk.main_quit dummy<line_sep><return>orig_main orig_main_quit<block_end><block_end>
<import_from_stmt>flask Blueprint<line_sep>ivr=Blueprint('ivr' __name__)<import_from_stmt>. views<line_sep>
<import_from_stmt>pudzu.charts *<line_sep>df=pd.read_csv("datasets/flagssubdivisions.csv")<line_sep>FONT=sans<line_sep>fg,bg="black" "#EEEEEE"<line_sep>default_img="https://upload.wikimedia.org/wikipedia/commons/thumb/b/b0/No_flag.svg/1024px-No_flag.svg.png"<def_stmt>process d<block_start><if_stmt><not>d<block_start><return><none><block_end>description=get_non(d 'description')<line_sep>description="{}".format(description)<if>description<else>" "<line_sep>size=get_non(d 'size')<line_sep>size="{} km²".format(size)<if>size<else>" "<line_sep>flag=Image.from_url_with_cache(get_non(d 'image' default_img)).to_rgba()<line_sep>flag=flag.resize_fixed_aspect(width=318)<if>flag.width/flag.height<g>1.8<else>flag.resize((318 198))<line_sep>flag=flag.pad(1<if>"Ohio"<not><in>d["name"]<else>0 "grey")<line_sep><return>Image.from_column([Image.from_text(d['name'].replace(r'\n' '\n') FONT(28 bold=<true>) align="center" beard_line=<true> fg=fg) Image.from_text(description FONT(24 italics=<true>) fg=fg beard_line=<true>) Image.from_text(size FONT(24 italics=<true>) fg=fg beard_line=<true>) flag] padding=2 bg=bg equal_widths=<true>)<block_end>title=Image.from_text("Flags of the largest country subdivisions".upper() FONT(68 bold=<true>) fg=fg bg=bg align="center").pad(30 bg).pad((0 0 0 10) bg)<line_sep>footer=Image.from_text("*Antarctic territorial claims are not recognised widely internationally, though the UK, France, Australia, New Zealand and Norway\nall recognize each other's claims. "<concat>"Some claims (specficially those of the UK, Argentina and Chile) overlap." FONT(28) fg=fg bg=bg).pad(10 bg)<line_sep>groups=list(remove_duplicates(df.group))<line_sep>groups1=[g<for>g groups<if>g<not><in>["_E"]]<line_sep>array=[[dict(r)<for>_,r df.iterrows()<if>r.group<eq>g]<for>g groups1]<line_sep>data=pd.DataFrame(array index=groups1)<line_sep>grid1=grid_chart(data process padding=(10 20) fg=fg bg=bg yalign=0 row_label=<lambda>r:Image.from_text("{}".format(data.index[r]).upper() FONT(32 bold=<true>) align="center" line_spacing=3)<if><not>data.index[r].startswith("_")<else><none>).pad((10 0) bg)<line_sep>title2=Image.from_text("Bonus: some proposed and secessionist flags".upper() FONT(40 italics=<true>) fg=fg bg=bg align="center").pad(30 bg).pad(10 bg)<line_sep>groups2=[g<for>g groups<if>g<in>["_E"]]<line_sep>array=[[dict(r)<for>_,r df.iterrows()<if>r.group<eq>g]<for>g groups2]<line_sep>data=pd.DataFrame(array index=groups2)<line_sep>grid2=grid_chart(data process padding=(10 20) fg=fg bg=bg yalign=0 row_label=<lambda>r:Image.from_text("{}".format(data.index[r]).upper() FONT(32 bold=<true>) align="center" line_spacing=3)<if><not>data.index[r].startswith("_")<else><none>).pad((10 0) bg)<line_sep>img=Image.from_column([title grid1 footer title2 grid2 Rectangle((0 20))] bg=bg)<line_sep>img.place(Image.from_text("/u/Udzu" FONT(24) fg=fg bg=bg padding=5).pad((1 1 0 0) fg) align=1 padding=5 copy=<false>)<line_sep>img.save("output/flagssubdivisions.png")<line_sep>
<import_stmt>torch<import_stmt>torchvision.transforms<as>TT<import_from_stmt>wetectron.config cfg<import_from_stmt>wetectron.data transforms<as>T<import_from_stmt>wetectron.structures.image_list to_image_list<import_from_stmt>wetectron.structures.bounding_box BoxList<import_from_stmt>wetectron.modeling.roi_heads.box_head.inference make_roi_box_post_processor<def_stmt>im_detect_bbox_aug model images device rois=<none># Collect detections computed under different transformations <block_start>boxlists_ts=[]<for_stmt>_ range(len(images))<block_start>boxlists_ts.append([])<block_end><def_stmt>add_preds_t boxlists_t<block_start><for_stmt>i,boxlist_t enumerate(boxlists_t)<block_start><if_stmt>len(boxlists_ts[i])<eq>0# The first one is identity transform, no need to resize the boxlist <block_start>boxlists_ts[i].append(boxlist_t)<block_end><else_stmt># Resize the boxlist as the first one <block_start>boxlists_ts[i].append(boxlist_t.resize(boxlists_ts[i][0].size))<block_end><block_end><block_end># Compute detections for the original image (identity transform) boxlists_i=im_detect_bbox(model images cfg.INPUT.MIN_SIZE_TEST cfg.INPUT.MAX_SIZE_TEST device rois=rois)<line_sep>add_preds_t(boxlists_i)<line_sep># Perform detection on the horizontally flipped image <if_stmt>cfg.TEST.BBOX_AUG.H_FLIP<block_start>boxlists_hf=im_detect_bbox_hflip(model images cfg.INPUT.MIN_SIZE_TEST cfg.INPUT.MAX_SIZE_TEST device rois=rois)<line_sep>add_preds_t(boxlists_hf)<block_end># Compute detections at different scales <for_stmt>scale cfg.TEST.BBOX_AUG.SCALES<block_start>max_size=cfg.TEST.BBOX_AUG.MAX_SIZE<line_sep>boxlists_scl=im_detect_bbox_scale(model images scale max_size device rois=rois)<line_sep>add_preds_t(boxlists_scl)<if_stmt>cfg.TEST.BBOX_AUG.SCALE_H_FLIP<block_start>boxlists_scl_hf=im_detect_bbox_scale(model images scale max_size device hflip=<true> rois=rois)<line_sep>add_preds_t(boxlists_scl_hf)<block_end><block_end># Merge boxlists detected by different bbox aug params boxlists=[]<for_stmt>i,boxlist_ts enumerate(boxlists_ts)<block_start><if_stmt>cfg.TEST.BBOX_AUG.HEUR<eq>'UNION'<block_start>bbox=torch.cat([boxlist_t.bbox<for>boxlist_t boxlist_ts])<line_sep>scores=torch.cat([boxlist_t.get_field('scores')<for>boxlist_t boxlist_ts])<block_end><elif_stmt>cfg.TEST.BBOX_AUG.HEUR<eq>'AVG'<block_start>bbox=torch.mean(torch.stack([boxlist_t.bbox<for>boxlist_t boxlist_ts]) dim=0)<line_sep>scores=torch.mean(torch.stack([boxlist_t.get_field('scores')<for>boxlist_t boxlist_ts]) dim=0)<block_end><else_stmt><block_start><raise>ValueError('please use proper BBOX_AUG.HEUR ')<block_end>boxlist=BoxList(bbox boxlist_ts[0].size boxlist_ts[0].mode)<line_sep>boxlist.add_field('scores' scores)<line_sep>boxlists.append(boxlist)<block_end># Apply NMS and limit the final detections results=[]<line_sep>post_processor=make_roi_box_post_processor(cfg)<for_stmt>boxlist boxlists<block_start>results.append(post_processor.filter_results(boxlist cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES))<block_end><return>results<block_end><def_stmt>im_detect_bbox model images target_scale target_max_size device rois=<none><block_start>""" Performs bbox detection on the original image. """<line_sep>transform=T.Compose([T.Resize(target_scale target_max_size) T.ToTensor() T.Normalize(mean=cfg.INPUT.PIXEL_MEAN std=cfg.INPUT.PIXEL_STD to_bgr255=cfg.INPUT.TO_BGR255)])<line_sep>t_images=[]<line_sep>t_rois=[]<for_stmt>image,roi zip(images rois)<block_start>t_img,_,t_roi=transform(image rois=roi)<line_sep>t_images.append(t_img)<line_sep>t_rois.append(t_roi)<block_end>t_images=to_image_list(t_images cfg.DATALOADER.SIZE_DIVISIBILITY)<line_sep>t_rois=[r.to(device)<if>r<is><not><none><else><none><for>r t_rois]<line_sep><return>model(t_images.to(device) rois=t_rois)<block_end><def_stmt>im_detect_bbox_hflip model images target_scale target_max_size device rois=<none><block_start>""" Performs bbox detection on the horizontally flipped image. Function signature is the same as for im_detect_bbox. """<line_sep>transform=T.Compose([T.Resize(target_scale target_max_size) T.RandomHorizontalFlip(1.0) T.ToTensor() T.Normalize(mean=cfg.INPUT.PIXEL_MEAN std=cfg.INPUT.PIXEL_STD to_bgr255=cfg.INPUT.TO_BGR255)])<line_sep>t_images=[]<line_sep>t_rois=[]<for_stmt>image,roi zip(images rois)<block_start>t_img,_,t_roi=transform(image rois=roi)<line_sep>t_images.append(t_img)<line_sep>t_rois.append(t_roi)<block_end>t_images=to_image_list(t_images cfg.DATALOADER.SIZE_DIVISIBILITY)<line_sep>t_rois=[r.to(device)<if>r<is><not><none><else><none><for>r t_rois]<line_sep>boxlists=model(t_images.to(device) rois=t_rois)<line_sep># Invert the detections computed on the flipped image boxlists_inv=[boxlist.transpose(0)<for>boxlist boxlists]<line_sep><return>boxlists_inv<block_end><def_stmt>im_detect_bbox_scale model images target_scale target_max_size device hflip=<false> rois=<none><block_start>""" Computes bbox detections at the given scale. Returns predictions in the scaled image space. """<if_stmt>hflip<block_start>boxlists_scl=im_detect_bbox_hflip(model images target_scale target_max_size device rois=rois)<block_end><else_stmt><block_start>boxlists_scl=im_detect_bbox(model images target_scale target_max_size device rois=rois)<block_end><return>boxlists_scl<block_end>
<import_from_stmt>.Algorithm *<import_from_stmt>.UnsupervisedModel UnsupervisedModel<import_from_stmt>.FeatureClassificationModel FeatureClassificationModel<line_sep>
<import_stmt>argparse<import_stmt>os<import_from_stmt>random seed<import_stmt>torch<import_from_stmt>allennlp.data.iterators BucketIterator<import_from_stmt>allennlp.data.vocabulary DEFAULT_OOV_TOKEN DEFAULT_PADDING_TOKEN<import_from_stmt>allennlp.data.vocabulary Vocabulary<import_from_stmt>allennlp.modules.text_field_embedders BasicTextFieldEmbedder<import_from_stmt>gector.bert_token_embedder PretrainedBertEmbedder<import_from_stmt>gector.datareader Seq2LabelsDatasetReader<import_from_stmt>gector.seq2labels_model Seq2Labels<import_from_stmt>gector.trainer Trainer<import_from_stmt>gector.wordpiece_indexer PretrainedBertIndexer<import_from_stmt>utils.helpers get_weights_name<def_stmt>fix_seed <block_start>torch.manual_seed(1)<line_sep>torch.backends.cudnn.enabled=<false><line_sep>torch.backends.cudnn.benchmark=<false><line_sep>torch.backends.cudnn.deterministic=<true><line_sep>seed(43)<block_end><def_stmt>get_token_indexers model_name max_pieces_per_token=5 lowercase_tokens=<true> special_tokens_fix=0 is_test=<false><block_start>bert_token_indexer=PretrainedBertIndexer(pretrained_model=model_name max_pieces_per_token=max_pieces_per_token do_lowercase=lowercase_tokens use_starting_offsets=<true> special_tokens_fix=special_tokens_fix is_test=is_test)<line_sep><return>{'bert':bert_token_indexer}<block_end><def_stmt>get_token_embedders model_name tune_bert=<false> special_tokens_fix=0<block_start>take_grads=<true><if>tune_bert<g>0<else><false><line_sep>bert_token_emb=PretrainedBertEmbedder(pretrained_model=model_name top_layer_only=<true> requires_grad=take_grads special_tokens_fix=special_tokens_fix)<line_sep>token_embedders={'bert':bert_token_emb}<line_sep>embedder_to_indexer_map={"bert":["bert" "bert-offsets"]}<line_sep>text_filed_emd=BasicTextFieldEmbedder(token_embedders=token_embedders embedder_to_indexer_map=embedder_to_indexer_map allow_unmatched_keys=<true>)<line_sep><return>text_filed_emd<block_end><def_stmt>get_data_reader model_name max_len skip_correct=<false> skip_complex=0 test_mode=<false> tag_strategy="keep_one" broken_dot_strategy="keep" lowercase_tokens=<true> max_pieces_per_token=3 tn_prob=0 tp_prob=1 special_tokens_fix=0 <block_start>token_indexers=get_token_indexers(model_name max_pieces_per_token=max_pieces_per_token lowercase_tokens=lowercase_tokens special_tokens_fix=special_tokens_fix is_test=test_mode)<line_sep>reader=Seq2LabelsDatasetReader(token_indexers=token_indexers max_len=max_len skip_correct=skip_correct skip_complex=skip_complex test_mode=test_mode tag_strategy=tag_strategy broken_dot_strategy=broken_dot_strategy lazy=<true> tn_prob=tn_prob tp_prob=tp_prob)<line_sep><return>reader<block_end><def_stmt>get_model model_name vocab tune_bert=<false> predictor_dropout=0 label_smoothing=0.0 confidence=0 special_tokens_fix=0<block_start>token_embs=get_token_embedders(model_name tune_bert=tune_bert special_tokens_fix=special_tokens_fix)<line_sep>model=Seq2Labels(vocab=vocab text_field_embedder=token_embs predictor_dropout=predictor_dropout label_smoothing=label_smoothing confidence=confidence)<line_sep><return>model<block_end><def_stmt>main args<block_start>fix_seed()<if_stmt><not>os.path.exists(args.model_dir)<block_start>os.mkdir(args.model_dir)<block_end>weights_name=get_weights_name(args.transformer_model args.lowercase_tokens)<line_sep># read datasets reader=get_data_reader(weights_name args.max_len skip_correct=bool(args.skip_correct) skip_complex=args.skip_complex test_mode=<false> tag_strategy=args.tag_strategy lowercase_tokens=args.lowercase_tokens max_pieces_per_token=args.pieces_per_token tn_prob=args.tn_prob tp_prob=args.tp_prob special_tokens_fix=args.special_tokens_fix)<line_sep>train_data=reader.read(args.train_set)<line_sep>dev_data=reader.read(args.dev_set)<line_sep>default_tokens=[DEFAULT_OOV_TOKEN DEFAULT_PADDING_TOKEN]<line_sep>namespaces=['labels' 'd_tags']<line_sep>tokens_to_add={x:default_tokens<for>x namespaces}<line_sep># build vocab <if_stmt>args.vocab_path<block_start>vocab=Vocabulary.from_files(args.vocab_path)<block_end><else_stmt><block_start>vocab=Vocabulary.from_instances(train_data max_vocab_size={'tokens':30000 'labels':args.target_vocab_size 'd_tags':2} tokens_to_add=tokens_to_add)<block_end>vocab.save_to_files(os.path.join(args.model_dir 'vocabulary'))<line_sep>print("Data is loaded")<line_sep>model=get_model(weights_name vocab tune_bert=args.tune_bert predictor_dropout=args.predictor_dropout label_smoothing=args.label_smoothing special_tokens_fix=args.special_tokens_fix)<line_sep>device=torch.device("cuda:0"<if>torch.cuda.is_available()<else>"cpu")<if_stmt>torch.cuda.is_available()<block_start><if_stmt>torch.cuda.device_count()<g>1<block_start>cuda_device=list(range(torch.cuda.device_count()))<block_end><else_stmt><block_start>cuda_device=0<block_end><block_end><else_stmt><block_start>cuda_device=-1<block_end><if_stmt>args.pretrain<block_start>model.load_state_dict(torch.load(os.path.join(args.pretrain_folder args.pretrain+'.th')))<block_end>model=model.to(device)<line_sep>print("Model is set")<line_sep>optimizer=torch.optim.Adam(model.parameters() lr=args.lr)<line_sep>scheduler=torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer factor=0.1 patience=10)<line_sep>instances_per_epoch=<none><if><not>args.updates_per_epoch<else>int(args.updates_per_epoch<times>args.batch_size<times>args.accumulation_size)<line_sep>iterator=BucketIterator(batch_size=args.batch_size sorting_keys=[("tokens" "num_tokens")] biggest_batch_first=<true> max_instances_in_memory=args.batch_size<times>20000 instances_per_epoch=instances_per_epoch )<line_sep>iterator.index_with(vocab)<line_sep>trainer=Trainer(model=model optimizer=optimizer scheduler=scheduler iterator=iterator train_dataset=train_data validation_dataset=dev_data serialization_dir=args.model_dir patience=args.patience num_epochs=args.n_epoch cuda_device=cuda_device shuffle=<false> accumulated_batch_count=args.accumulation_size cold_step_count=args.cold_steps_count cold_lr=args.cold_lr cuda_verbose_step=int(args.cuda_verbose_steps)<if>args.cuda_verbose_steps<else><none>)<line_sep>print("Start training")<line_sep>trainer.train()<line_sep># Here's how to save the model. out_model=os.path.join(args.model_dir 'model.th')<with_stmt>open(out_model 'wb')<as>f<block_start>torch.save(model.state_dict() f)<block_end>print("Model is dumped")<block_end><if_stmt>__name__<eq>'__main__'# read parameters <block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--train_set' help='Path to the train data' required=<true>)<line_sep>parser.add_argument('--dev_set' help='Path to the dev data' required=<true>)<line_sep>parser.add_argument('--model_dir' help='Path to the model dir' required=<true>)<line_sep>parser.add_argument('--vocab_path' help='Path to the model vocabulary directory.'<concat>'If not set then build vocab from data' default='')<line_sep>parser.add_argument('--batch_size' type=int help='The size of the batch.' default=32)<line_sep>parser.add_argument('--max_len' type=int help='The max sentence length'<concat>'(all longer will be truncated)' default=50)<line_sep>parser.add_argument('--target_vocab_size' type=int help='The size of target vocabularies.' default=1000)<line_sep>parser.add_argument('--n_epoch' type=int help='The number of epoch for training model.' default=20)<line_sep>parser.add_argument('--patience' type=int help='The number of epoch with any improvements'<concat>' on validation set.' default=3)<line_sep>parser.add_argument('--skip_correct' type=int help='If set than correct sentences will be skipped '<concat>'by data reader.' default=1)<line_sep>parser.add_argument('--skip_complex' type=int help='If set than complex corrections will be skipped '<concat>'by data reader.' choices=[0 1 2 3 4 5] default=0)<line_sep>parser.add_argument('--tune_bert' type=int help='If more then 0 then fine tune bert.' default=1)<line_sep>parser.add_argument('--tag_strategy' choices=['keep_one' 'merge_all'] help='The type of the data reader behaviour.' default='keep_one')<line_sep>parser.add_argument('--accumulation_size' type=int help='How many batches do you want accumulate.' default=4)<line_sep>parser.add_argument('--lr' type=float help='Set initial learning rate.' default=1e-5)<line_sep>parser.add_argument('--cold_steps_count' type=int help='Whether to train only classifier layers first.' default=4)<line_sep>parser.add_argument('--cold_lr' type=float help='Learning rate during cold_steps.' default=1e-3)<line_sep>parser.add_argument('--predictor_dropout' type=float help='The value of dropout for predictor.' default=0.0)<line_sep>parser.add_argument('--lowercase_tokens' type=int help='Whether to lowercase tokens.' default=0)<line_sep>parser.add_argument('--pieces_per_token' type=int help='The max number for pieces per token.' default=5)<line_sep>parser.add_argument('--cuda_verbose_steps' help='Number of steps after which CUDA memory information is printed. '<concat>'Makes sense for local testing. Usually about 1000.' default=<none>)<line_sep>parser.add_argument('--label_smoothing' type=float help='The value of parameter alpha for label smoothing.' default=0.0)<line_sep>parser.add_argument('--tn_prob' type=float help='The probability to take TN from data.' default=0)<line_sep>parser.add_argument('--tp_prob' type=float help='The probability to take TP from data.' default=1)<line_sep>parser.add_argument('--updates_per_epoch' type=int help='If set then each epoch will contain the exact amount of updates.' default=0)<line_sep>parser.add_argument('--pretrain_folder' help='The name of the pretrain folder.')<line_sep>parser.add_argument('--pretrain' help='The name of the pretrain weights in pretrain_folder param.' default='')<line_sep>parser.add_argument('--transformer_model' choices=['bert' 'distilbert' 'gpt2' 'roberta' 'transformerxl' 'xlnet' 'albert'] help='Name of the transformer model.' default='roberta')<line_sep>parser.add_argument('--special_tokens_fix' type=int help='Whether to fix problem with [CLS], [SEP] tokens tokenization.' default=1)<line_sep>args=parser.parse_args()<line_sep>main(args)<block_end>
<import_stmt>unittest<import_from_stmt>unittest TestCase<import_from_stmt>e2cnn.nn *<import_from_stmt>e2cnn.gspaces *<import_stmt>numpy<as>np<class_stmt>TestUpsampling(TestCase)<block_start><def_stmt>test_cyclic_even_bilinear self<block_start>g=Rot2dOnR2(8)<line_sep>self.check_upsampling(g "bilinear")<block_end><def_stmt>test_cyclic_odd_bilinear self<block_start>g=Rot2dOnR2(9)<line_sep>self.check_upsampling(g "bilinear")<block_end><def_stmt>test_dihedral_even_bilinear self<block_start>g=FlipRot2dOnR2(8)<line_sep>self.check_upsampling(g "bilinear")<block_end><def_stmt>test_dihedral_odd_bilinear self<block_start>g=Rot2dOnR2(9)<line_sep>self.check_upsampling(g "bilinear")<block_end><def_stmt>test_so2_bilinear self<block_start>g=Rot2dOnR2(8)<line_sep>self.check_upsampling(g "bilinear")<block_end><def_stmt>test_o2_bilinear self<block_start>g=Rot2dOnR2(8)<line_sep>self.check_upsampling(g "bilinear")<block_end># "NEAREST" method is not equivariant!! As a result, all the following tests fail <def_stmt>test_cyclic_even_nearest self<block_start>g=Rot2dOnR2(8)<line_sep>self.check_upsampling(g "nearest")<block_end><def_stmt>test_cyclic_odd_nearest self<block_start>g=Rot2dOnR2(9)<line_sep>self.check_upsampling(g "nearest")<block_end><def_stmt>test_dihedral_even_nearest self<block_start>g=FlipRot2dOnR2(8)<line_sep>self.check_upsampling(g "nearest")<block_end><def_stmt>test_dihedral_odd_nearest self<block_start>g=Rot2dOnR2(9)<line_sep>self.check_upsampling(g "nearest")<block_end><def_stmt>test_so2_nearest self<block_start>g=Rot2dOnR2(8)<line_sep>self.check_upsampling(g "nearest")<block_end><def_stmt>test_o2_nearest self<block_start>g=Rot2dOnR2(8)<line_sep>self.check_upsampling(g "nearest")<block_end><def_stmt>check_upsampling self g mode<block_start><for_stmt>s [2 3 5]<block_start>print(f"\nScale: {s}\n")<for_stmt>r g.representations.values()<block_start>r1=FieldType(g [r])<line_sep>ul=R2Upsampling(r1 mode=mode scale_factor=s)<line_sep>ul.check_equivariance()<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
# Copyright 2017 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. <import_stmt>json5_generator<def_stmt>sort_keyword_properties_by_canonical_order css_properties css_value_keywords_file json5_file_parameters<block_start>"""Sort all keyword CSS properties by the order of the keyword in css_value_keywords.json5 Args: css_properties: css_properties excluding extra fields. css_value_keywords_file: file containing all css keywords. json5_file_parameters: current context json5 parameters. Returns: New css_properties object with sorted keywords. """<line_sep>css_values_dictionary=json5_generator.Json5File.load_from_files([css_value_keywords_file] default_parameters=json5_file_parameters).name_dictionaries<line_sep>css_values_dictionary=[x['name'].original<for>x css_values_dictionary]<line_sep>name_to_position_dictionary=dict(zip(css_values_dictionary range(len(css_values_dictionary))))<for_stmt>css_property css_properties<block_start><if_stmt>css_property['field_template']<eq>'keyword'<and>len(css_property['include_paths'])<eq>0<block_start>css_property['keywords']=sorted(css_property['keywords'] key=<lambda>x:name_to_position_dictionary[x])<block_end><block_end><return>css_properties<block_end>
<import_from_stmt>. geotiff<import_from_stmt>.known BaseKnownVLR<import_from_stmt>.vlr VLR<line_sep>
# --- # jupyter: # jupytext: # cell_markers: region,endregion # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- 1+2+3<line_sep># region active="" # This is a raw cell # endregion # This is a markdown cell
<import_from_stmt>numpy inf nan<import_from_stmt>sklearn.decomposition KernelPCA<as>Op<import_from_stmt>lale.docstrings set_docstrings<import_from_stmt>lale.operators make_operator<class_stmt>_KernelPCAImpl<block_start><def_stmt>__init__ self **hyperparams<block_start>self._hyperparams=hyperparams<line_sep>self._wrapped_model=Op(**self._hyperparams)<block_end><def_stmt>fit self X y=<none><block_start><if_stmt>y<is><not><none><block_start>self._wrapped_model.fit(X y)<block_end><else_stmt><block_start>self._wrapped_model.fit(X)<block_end><return>self<block_end><def_stmt>transform self X<block_start><return>self._wrapped_model.transform(X)<block_end><block_end>_hyperparams_schema={"$schema":"http://json-schema.org/draft-04/schema#" "description":"inherited docstring for KernelPCA Kernel Principal component analysis (KPCA)" "allOf":[{"type":"object" "required":["n_components" "kernel" "gamma" "degree" "coef0" "kernel_params" "alpha" "fit_inverse_transform" "eigen_solver" "tol" "max_iter" "remove_zero_eig" "random_state" "copy_X" "n_jobs" ] "relevantToOptimizer":["n_components" "kernel" "degree" "coef0" "alpha" "eigen_solver" "tol" "max_iter" "remove_zero_eig" "copy_X" ] "additionalProperties":<false> "properties":{"n_components":{"anyOf":[{"type":"integer" "minimumForOptimizer":2 "maximumForOptimizer":256 "distribution":"uniform" } {"enum":[<none>]} ] "default":<none> "description":"Number of components" } "kernel":{"enum":["linear" "poly" "rbf" "sigmoid" "cosine" "precomputed" ] "default":"linear" "description":"Kernel" } "gamma":{"XXX TODO XXX":"float, default=1/n_features" "description":"Kernel coefficient for rbf, poly and sigmoid kernels" "enum":[<none>] "default":<none> } "degree":{"type":"integer" "minimumForOptimizer":2 "maximumForOptimizer":3 "distribution":"uniform" "default":3 "description":"Degree for poly kernels" } "coef0":{"type":"number" "minimumForOptimizer":0.0 "maximumForOptimizer":1.0 "distribution":"uniform" "default":1 "description":"Independent term in poly and sigmoid kernels" } "kernel_params":{"XXX TODO XXX":"mapping of string to any, default=None" "description":"Parameters (keyword arguments) and values for kernel passed as callable object" "enum":[<none>] "default":<none> } "alpha":{"anyOf":[{"type":"integer" "forOptimizer":<false>} {"type":"number" "minimumForOptimizer":1e-10 "maximumForOptimizer":1.0 "distribution":"loguniform" } ] "default":1.0 "description":"Hyperparameter of the ridge regression that learns the inverse transform (when fit_inverse_transform=True)." } "fit_inverse_transform":{"type":"boolean" "default":<false> "description":"Learn the inverse transform for non-precomputed kernels" } "eigen_solver":{"enum":["auto" "dense" "arpack"] "default":"auto" "description":"Select eigensolver to use" } "tol":{"type":"number" "minimumForOptimizer":1e-08 "maximumForOptimizer":0.01 "distribution":"loguniform" "default":0 "description":"Convergence tolerance for arpack" } "max_iter":{"anyOf":[{"type":"integer" "minimumForOptimizer":10 "maximumForOptimizer":1000 "distribution":"uniform" } {"enum":[<none>]} ] "default":<none> "description":"Maximum number of iterations for arpack" } "remove_zero_eig":{"type":"boolean" "default":<false> "description":"If True, then all components with zero eigenvalues are removed, so that the number of components in the output may be < n_components (and sometimes even zero due to numerical instability)" } "random_state":{"anyOf":[{"type":"integer"} {"laleType":"numpy.random.RandomState"} {"enum":[<none>]} ] "default":<none> "description":"If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`" } "copy_X":{"type":"boolean" "default":<true> "description":"If True, input X is copied and stored by the model in the `X_fit_` attribute" } "n_jobs":{"anyOf":[{"type":"integer"} {"enum":[<none>]}] "default":1 "description":"The number of parallel jobs to run" } } }] }<line_sep>_input_fit_schema={"$schema":"http://json-schema.org/draft-04/schema#" "description":"Fit the model from data in X." "type":"object" "required":["X"] "properties":{"X":{"type":"array" "items":{"type":"array" "items":{"type":"number"}} "description":"Training vector, where n_samples in the number of samples and n_features is the number of features." }} }<line_sep>_input_transform_schema={"$schema":"http://json-schema.org/draft-04/schema#" "description":"Transform X." "type":"object" "required":["X"] "properties":{"X":{"type":"array" "items":{"type":"array" "items":{"type":"number"}}}} }<line_sep>_output_transform_schema={"$schema":"http://json-schema.org/draft-04/schema#" "description":"Transform X." "type":"array" "items":{"type":"array" "items":{"type":"number"}} }<line_sep>_combined_schemas={"$schema":"http://json-schema.org/draft-04/schema#" "description":"Combined schema for expected data and hyperparameters." "documentation_url":"https://scikit-learn.org/0.20/modules/generated/sklearn.decomposition.KernelPCA#sklearn-decomposition-kernelpca" "import_from":"sklearn.decomposition" "type":"object" "tags":{"pre":[] "op":["transformer"] "post":[]} "properties":{"hyperparams":_hyperparams_schema "input_fit":_input_fit_schema "input_transform":_input_transform_schema "output_transform":_output_transform_schema } }<line_sep>KernelPCA=make_operator(_KernelPCAImpl _combined_schemas)<line_sep>set_docstrings(KernelPCA)<line_sep>
# -*- coding: utf-8 -*- <import_stmt>logging<import_from_stmt>rest_framework.views APIView<import_from_stmt>rest_framework.authentication SessionAuthentication<import_from_stmt>rest_framework.permissions IsAuthenticatedOrReadOnly<import_from_stmt>rest_framework.response Response<import_from_stmt>rest_framework status<import_from_stmt>seaserv seafile_api<import_from_stmt>seahub.api2.authentication TokenAuthentication<import_from_stmt>seahub.api2.throttling UserRateThrottle<import_from_stmt>seahub.api2.utils api_error<import_from_stmt>seahub.utils.repo is_valid_repo_id_format<import_from_stmt>seahub.utils HAS_FILE_SEARCH<import_from_stmt>seahub.wiki.models Wiki<if_stmt>HAS_FILE_SEARCH<block_start><import_from_stmt>seahub.search.utils search_files<block_end>logger=logging.getLogger('seafes')<class_stmt>PublishedRepoSearchView(APIView)<block_start>""" Search public repos """<line_sep>authentication_classes=(TokenAuthentication SessionAuthentication)<line_sep>permission_classes=(IsAuthenticatedOrReadOnly )<line_sep>throttle_classes=(UserRateThrottle )<def_stmt>get self request# is search supported <block_start><if_stmt><not>HAS_FILE_SEARCH<block_start>error_msg='Search not supported.'<line_sep><return>api_error(status.HTTP_404_NOT_FOUND error_msg)<block_end># argument check keyword=request.GET.get('q' <none>)<if_stmt><not>keyword<block_start>error_msg='q invalid.'<line_sep><return>api_error(status.HTTP_400_BAD_REQUEST error_msg)<block_end>repo_id=request.GET.get('repo_id' <none>)<if_stmt><not>is_valid_repo_id_format(repo_id)<block_start>error_msg='repo_id invalid.'<line_sep><return>api_error(status.HTTP_400_BAD_REQUEST error_msg)<block_end># recourse check repo=seafile_api.get_repo(repo_id)<if_stmt><not>repo<block_start>error_msg='Library %s not found.'%repo_id<line_sep><return>api_error(status.HTTP_404_NOT_FOUND error_msg)<block_end># permission check wiki=Wiki.objects.filter(repo_id=repo_id)[0]<if_stmt><not>wiki.has_read_perm(request)<block_start>error_msg='Permission denied.'<line_sep><return>api_error(status.HTTP_403_FORBIDDEN error_msg)<block_end><try_stmt><block_start>current_page=int(request.GET.get('page' '1'))<line_sep>per_page=int(request.GET.get('per_page' '10'))<if_stmt>per_page<g>100<block_start>per_page=100<block_end><block_end><except_stmt>ValueError<block_start>current_page=1<line_sep>per_page=10<block_end>start=(current_page-1)<times>per_page<line_sep>size=per_page<if_stmt>start<l>0<or>size<l>0<block_start>error_msg='page or per_page invalid.'<line_sep><return>api_error(status.HTTP_400_BAD_REQUEST error_msg)<block_end>repo_id_map={}<line_sep>map_id=repo.origin_repo_id<if>repo.origin_repo_id<else>repo_id<line_sep>repo_id_map[map_id]=repo<line_sep># search file <try_stmt><block_start>results,total=search_files(repo_id_map <none> keyword <none> start size org_id=<none>)<block_end><except_stmt>Exception<as>e<block_start>logger.error(e)<line_sep>error_msg='Internal Server Error'<line_sep><return>api_error(status.HTTP_500_INTERNAL_SERVER_ERROR error_msg)<block_end><for_stmt>result results<block_start>result.pop('repo' <none>)<line_sep>result.pop('exists' <none>)<line_sep>result.pop('last_modified_by' <none>)<line_sep>result.pop('name_highlight' <none>)<line_sep>result.pop('score' <none>)<line_sep>result['repo_type']='public'<block_end>has_more=<true><if>total<g>current_page<times>per_page<else><false><line_sep><return>Response({"total":total "results":results "has_more":has_more})<block_end><block_end>
<import_from_stmt>threading Lock<import_from_stmt>flask Flask render_template session request<import_from_stmt>flask_socketio SocketIO Namespace emit join_room leave_room close_room rooms disconnect<line_sep># Set this variable to "threading", "eventlet" or "gevent" to test the # different async modes, or leave it set to None for the application to choose # the best option based on installed packages. async_mode=<none><line_sep>app=Flask(__name__)<line_sep>app.config['SECRET_KEY']='secret!'<line_sep>socketio=SocketIO(app async_mode=async_mode)<line_sep>thread=<none><line_sep>thread_lock=Lock()<def_stmt>background_thread <block_start>"""Example of how to send server generated events to clients."""<line_sep>count=0<while_stmt><true><block_start>socketio.sleep(10)<line_sep>count<augadd>1<line_sep>socketio.emit('my_response' {'data':'Server generated event' 'count':count})<block_end><block_end>@app.route('/')<def_stmt>index <block_start><return>render_template('index.html' async_mode=socketio.async_mode)<block_end><class_stmt>MyNamespace(Namespace)<block_start><def_stmt>on_my_event self message<block_start>session['receive_count']=session.get('receive_count' 0)+1<line_sep>emit('my_response' {'data':message['data'] 'count':session['receive_count']})<block_end><def_stmt>on_my_broadcast_event self message<block_start>session['receive_count']=session.get('receive_count' 0)+1<line_sep>emit('my_response' {'data':message['data'] 'count':session['receive_count']} broadcast=<true>)<block_end><def_stmt>on_join self message<block_start>join_room(message['room'])<line_sep>session['receive_count']=session.get('receive_count' 0)+1<line_sep>emit('my_response' {'data':'In rooms: '+', '.join(rooms()) 'count':session['receive_count']})<block_end><def_stmt>on_leave self message<block_start>leave_room(message['room'])<line_sep>session['receive_count']=session.get('receive_count' 0)+1<line_sep>emit('my_response' {'data':'In rooms: '+', '.join(rooms()) 'count':session['receive_count']})<block_end><def_stmt>on_close_room self message<block_start>session['receive_count']=session.get('receive_count' 0)+1<line_sep>emit('my_response' {'data':'Room '+message['room']+' is closing.' 'count':session['receive_count']} room=message['room'])<line_sep>close_room(message['room'])<block_end><def_stmt>on_my_room_event self message<block_start>session['receive_count']=session.get('receive_count' 0)+1<line_sep>emit('my_response' {'data':message['data'] 'count':session['receive_count']} room=message['room'])<block_end><def_stmt>on_disconnect_request self<block_start>session['receive_count']=session.get('receive_count' 0)+1<line_sep>emit('my_response' {'data':'Disconnected!' 'count':session['receive_count']})<line_sep>disconnect()<block_end><def_stmt>on_my_ping self<block_start>emit('my_pong')<block_end><def_stmt>on_connect self<block_start><global>thread<with_stmt>thread_lock<block_start><if_stmt>thread<is><none><block_start>thread=socketio.start_background_task(background_thread)<block_end><block_end>emit('my_response' {'data':'Connected' 'count':0})<block_end><def_stmt>on_disconnect self<block_start>print('Client disconnected' request.sid)<block_end><block_end>socketio.on_namespace(MyNamespace('/'))<if_stmt>__name__<eq>'__main__'<block_start>socketio.run(app)<block_end>
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Universal charset detector code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 2001 # the Initial Developer. All Rights Reserved. # # Contributor(s): # <NAME> - port to Python # <NAME> - original C code # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### """ Module containing the UniversalDetector detector class, which is the primary class a user of ``chardet`` should use. :author: <NAME> (initial port to Python) :author: <NAME> (original C code) :author: <NAME> (major refactoring for 3.0) :author: <NAME> """<import_stmt>codecs<import_stmt>logging<import_stmt>re<import_from_stmt>.charsetgroupprober CharSetGroupProber<import_from_stmt>.enums InputState LanguageFilter ProbingState<import_from_stmt>.escprober EscCharSetProber<import_from_stmt>.latin1prober Latin1Prober<import_from_stmt>.mbcsgroupprober MBCSGroupProber<import_from_stmt>.sbcsgroupprober SBCSGroupProber<class_stmt>UniversalDetector(object)<block_start>""" The ``UniversalDetector`` class underlies the ``chardet.detect`` function and coordinates all of the different charset probers. To get a ``dict`` containing an encoding and its confidence, you can simply run: .. code:: u = UniversalDetector() u.feed(some_bytes) u.close() detected = u.result """<line_sep>MINIMUM_THRESHOLD=0.20<line_sep>HIGH_BYTE_DETECTOR=re.compile(b'[\x80-\xFF]')<line_sep>ESC_DETECTOR=re.compile(b'(\033|~{)')<line_sep>WIN_BYTE_DETECTOR=re.compile(b'[\x80-\x9F]')<line_sep>ISO_WIN_MAP={'iso-8859-1':'Windows-1252' 'iso-8859-2':'Windows-1250' 'iso-8859-5':'Windows-1251' 'iso-8859-6':'Windows-1256' 'iso-8859-7':'Windows-1253' 'iso-8859-8':'Windows-1255' 'iso-8859-9':'Windows-1254' 'iso-8859-13':'Windows-1257'}<def_stmt>__init__ self lang_filter=LanguageFilter.ALL<block_start>self._esc_charset_prober=<none><line_sep>self._charset_probers=[]<line_sep>self.result=<none><line_sep>self.done=<none><line_sep>self._got_data=<none><line_sep>self._input_state=<none><line_sep>self._last_char=<none><line_sep>self.lang_filter=lang_filter<line_sep>self.logger=logging.getLogger(__name__)<line_sep>self._has_win_bytes=<none><line_sep>self.reset()<block_end><def_stmt>reset self<block_start>""" Reset the UniversalDetector and all of its probers back to their initial states. This is called by ``__init__``, so you only need to call this directly in between analyses of different documents. """<line_sep>self.result={'encoding':<none> 'confidence':0.0 'language':<none>}<line_sep>self.done=<false><line_sep>self._got_data=<false><line_sep>self._has_win_bytes=<false><line_sep>self._input_state=InputState.PURE_ASCII<line_sep>self._last_char=b''<if_stmt>self._esc_charset_prober<block_start>self._esc_charset_prober.reset()<block_end><for_stmt>prober self._charset_probers<block_start>prober.reset()<block_end><block_end><def_stmt>feed self byte_str<block_start>""" Takes a chunk of a document and feeds it through all of the relevant charset probers. After calling ``feed``, you can check the value of the ``done`` attribute to see if you need to continue feeding the ``UniversalDetector`` more data, or if it has made a prediction (in the ``result`` attribute). .. note:: You should always call ``close`` when you're done feeding in your document if ``done`` is not already ``True``. """<if_stmt>self.done<block_start><return><block_end><if_stmt><not>len(byte_str)<block_start><return><block_end><if_stmt><not>isinstance(byte_str bytearray)<block_start>byte_str=bytearray(byte_str)<block_end># First check for known BOMs, since these are guaranteed to be correct <if_stmt><not>self._got_data# If the data starts with BOM, we know it is UTF <block_start><if_stmt>byte_str.startswith(codecs.BOM_UTF8)# EF BB BF UTF-8 with BOM <block_start>self.result={'encoding':"UTF-8-SIG" 'confidence':1.0 'language':''}<block_end><elif_stmt>byte_str.startswith((codecs.BOM_UTF32_LE codecs.BOM_UTF32_BE))# FF FE 00 00 UTF-32, little-endian BOM # 00 00 FE FF UTF-32, big-endian BOM <block_start>self.result={'encoding':"UTF-32" 'confidence':1.0 'language':''}<block_end><elif_stmt>byte_str.startswith(b'\xFE\xFF\x00\x00')# FE FF 00 00 UCS-4, unusual octet order BOM (3412) <block_start>self.result={'encoding':"X-ISO-10646-UCS-4-3412" 'confidence':1.0 'language':''}<block_end><elif_stmt>byte_str.startswith(b'\x00\x00\xFF\xFE')# 00 00 FF FE UCS-4, unusual octet order BOM (2143) <block_start>self.result={'encoding':"X-ISO-10646-UCS-4-2143" 'confidence':1.0 'language':''}<block_end><elif_stmt>byte_str.startswith((codecs.BOM_LE codecs.BOM_BE))# FF FE UTF-16, little endian BOM # FE FF UTF-16, big endian BOM <block_start>self.result={'encoding':"UTF-16" 'confidence':1.0 'language':''}<block_end>self._got_data=<true><if_stmt>self.result['encoding']<is><not><none><block_start>self.done=<true><line_sep><return><block_end><block_end># If none of those matched and we've only see ASCII so far, check # for high bytes and escape sequences <if_stmt>self._input_state<eq>InputState.PURE_ASCII<block_start><if_stmt>self.HIGH_BYTE_DETECTOR.search(byte_str)<block_start>self._input_state=InputState.HIGH_BYTE<block_end><elif_stmt>self._input_state<eq>InputState.PURE_ASCII<and>self.ESC_DETECTOR.search(self._last_char+byte_str)<block_start>self._input_state=InputState.ESC_ASCII<block_end><block_end>self._last_char=byte_str[-1:]<line_sep># If we've seen escape sequences, use the EscCharSetProber, which # uses a simple state machine to check for known escape sequences in # HZ and ISO-2022 encodings, since those are the only encodings that # use such sequences. <if_stmt>self._input_state<eq>InputState.ESC_ASCII<block_start><if_stmt><not>self._esc_charset_prober<block_start>self._esc_charset_prober=EscCharSetProber(self.lang_filter)<block_end><if_stmt>self._esc_charset_prober.feed(byte_str)<eq>ProbingState.FOUND_IT<block_start>self.result={'encoding':self._esc_charset_prober.charset_name 'confidence':self._esc_charset_prober.get_confidence() 'language':self._esc_charset_prober.language}<line_sep>self.done=<true><block_end><block_end># If we've seen high bytes (i.e., those with values greater than 127), # we need to do more complicated checks using all our multi-byte and # single-byte probers that are left. The single-byte probers # use character bigram distributions to determine the encoding, whereas # the multi-byte probers use a combination of character unigram and # bigram distributions. <elif_stmt>self._input_state<eq>InputState.HIGH_BYTE<block_start><if_stmt><not>self._charset_probers<block_start>self._charset_probers=[MBCSGroupProber(self.lang_filter)]<line_sep># If we're checking non-CJK encodings, use single-byte prober <if_stmt>self.lang_filter&LanguageFilter.NON_CJK<block_start>self._charset_probers.append(SBCSGroupProber())<block_end>self._charset_probers.append(Latin1Prober())<block_end><for_stmt>prober self._charset_probers<block_start><if_stmt>prober.feed(byte_str)<eq>ProbingState.FOUND_IT<block_start>self.result={'encoding':prober.charset_name 'confidence':prober.get_confidence() 'language':prober.language}<line_sep>self.done=<true><line_sep><break><block_end><block_end><if_stmt>self.WIN_BYTE_DETECTOR.search(byte_str)<block_start>self._has_win_bytes=<true><block_end><block_end><block_end><def_stmt>close self<block_start>""" Stop analyzing the current document and come up with a final prediction. :returns: The ``result`` attribute, a ``dict`` with the keys `encoding`, `confidence`, and `language`. """<line_sep># Don't bother with checks if we're already done <if_stmt>self.done<block_start><return>self.result<block_end>self.done=<true><if_stmt><not>self._got_data<block_start>self.logger.debug('no data received!')<block_end># Default to ASCII if it is all we've seen so far <elif_stmt>self._input_state<eq>InputState.PURE_ASCII<block_start>self.result={'encoding':'ascii' 'confidence':1.0 'language':''}<block_end># If we have seen non-ASCII, return the best that met MINIMUM_THRESHOLD <elif_stmt>self._input_state<eq>InputState.HIGH_BYTE<block_start>prober_confidence=<none><line_sep>max_prober_confidence=0.0<line_sep>max_prober=<none><for_stmt>prober self._charset_probers<block_start><if_stmt><not>prober<block_start><continue><block_end>prober_confidence=prober.get_confidence()<if_stmt>prober_confidence<g>max_prober_confidence<block_start>max_prober_confidence=prober_confidence<line_sep>max_prober=prober<block_end><block_end><if_stmt>max_prober<and>(max_prober_confidence<g>self.MINIMUM_THRESHOLD)<block_start>charset_name=max_prober.charset_name<line_sep>lower_charset_name=max_prober.charset_name.lower()<line_sep>confidence=max_prober.get_confidence()<line_sep># Use Windows encoding name instead of ISO-8859 if we saw any # extra Windows-specific bytes <if_stmt>lower_charset_name.startswith('iso-8859')<block_start><if_stmt>self._has_win_bytes<block_start>charset_name=self.ISO_WIN_MAP.get(lower_charset_name charset_name)<block_end><block_end>self.result={'encoding':charset_name 'confidence':confidence 'language':max_prober.language}<block_end><block_end># Log all prober confidences if none met MINIMUM_THRESHOLD <if_stmt>self.logger.getEffectiveLevel()<eq>logging.DEBUG<block_start><if_stmt>self.result['encoding']<is><none><block_start>self.logger.debug('no probers hit minimum threshold')<for_stmt>group_prober self._charset_probers<block_start><if_stmt><not>group_prober<block_start><continue><block_end><if_stmt>isinstance(group_prober CharSetGroupProber)<block_start><for_stmt>prober group_prober.probers<block_start>self.logger.debug('%s %s confidence = %s' prober.charset_name prober.language prober.get_confidence())<block_end><block_end><else_stmt><block_start>self.logger.debug('%s %s confidence = %s' prober.charset_name prober.language prober.get_confidence())<block_end><block_end><block_end><block_end><return>self.result<block_end><block_end>
# Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """ This file is used to define the node of graph and associated base types. """<import_from_stmt>mindinsight.utils.exceptions ParamValueError<import_from_stmt>mindinsight.datavisual.common.log logger<as>log<class_stmt>NodeTree<block_start>"""A class for building a node tree."""<def_stmt>__init__ self node_name='' node_type=<none><block_start>self.node_name=node_name<line_sep>self._node_type=node_type<line_sep>self._children={}<block_end>@property<def_stmt>node_type self<block_start>"""The property of node type."""<line_sep><return>self._node_type<block_end>@node_type.setter<def_stmt>node_type self value<block_start>"""Set the node type."""<line_sep>self._node_type=value<block_end><def_stmt>add self name node_type=<none><block_start>"""Add sub node."""<line_sep>sub_name='/'.join([self.node_name name])<if>self.node_name<else>name<line_sep>sub_node=NodeTree(sub_name node_type)<line_sep>self._children[name]=sub_node<line_sep><return>sub_node<block_end><def_stmt>get self sub_name<block_start>"""Get sub node."""<line_sep><return>self._children.get(sub_name)<block_end><def_stmt>get_children self<block_start>"""Get all childrens."""<for_stmt>name_scope,sub_node self._children.items()<block_start><yield>name_scope sub_node<block_end><block_end><def_stmt>remove self sub_name<block_start>"""Remove sub node."""<try_stmt><block_start>self._children.pop(sub_name)<block_end><except_stmt>KeyError<as>err<block_start>log.error("Failed to find node %s. %s" sub_name err)<line_sep><raise>ParamValueError("Failed to find node {}".format(sub_name))<block_end><block_end><block_end>
"""Add FileCoverage unique constraint Revision ID: 15bd4b7e6622 Revises: <PASSWORD> Create Date: 2014-05-09 11:06:50.845168 """<line_sep># revision identifiers, used by Alembic. revision='15bd4b7e6622'<line_sep>down_revision='3d8177ef<PASSWORD>'<import_from_stmt>alembic op<def_stmt>upgrade <block_start>op.create_unique_constraint('unq_job_filname' 'filecoverage' ['job_id' 'filename'])<block_end><def_stmt>downgrade <block_start>op.drop_constraint('unq_job_filname' 'filecoverage')<block_end>
# Copyright (c) 2015-present, Facebook, Inc. # All rights reserved. """ Implementation of Cross-Covariance Image Transformer (XCiT) Based on timm and DeiT code bases https://github.com/rwightman/pytorch-image-models/tree/master/timm https://github.com/facebookresearch/deit/ """<import_stmt>math<import_stmt>torch<import_stmt>torch.nn<as>nn<import_from_stmt>functools partial<import_from_stmt>timm.models.vision_transformer _cfg Mlp<import_from_stmt>timm.models.registry register_model<import_from_stmt>timm.models.layers DropPath trunc_normal_ to_2tuple<class_stmt>PositionalEncodingFourier(nn.Module)<block_start>""" Positional encoding relying on a fourier kernel matching the one used in the "Attention is all of Need" paper. The implementation builds on DeTR code https://github.com/facebookresearch/detr/blob/master/models/position_encoding.py """<def_stmt>__init__ self hidden_dim=32 dim=768 temperature=10000<block_start>super().__init__()<line_sep>self.token_projection=nn.Conv2d(hidden_dim<times>2 dim kernel_size=1)<line_sep>self.scale=2<times>math.pi<line_sep>self.temperature=temperature<line_sep>self.hidden_dim=hidden_dim<line_sep>self.dim=dim<block_end><def_stmt>forward self B H W<block_start>mask=torch.zeros(B H W).bool().to(self.token_projection.weight.device)<line_sep>not_mask=~mask<line_sep>y_embed=not_mask.cumsum(1 dtype=torch.float32)<line_sep>x_embed=not_mask.cumsum(2 dtype=torch.float32)<line_sep>eps=1e-6<line_sep>y_embed=y_embed/(y_embed[: -1: :]+eps)<times>self.scale<line_sep>x_embed=x_embed/(x_embed[: : -1:]+eps)<times>self.scale<line_sep>dim_t=torch.arange(self.hidden_dim dtype=torch.float32 device=mask.device)<line_sep>dim_t=self.temperature<power>(2<times>(dim_t<floordiv>2)/self.hidden_dim)<line_sep>pos_x=x_embed[: : : <none>]/dim_t<line_sep>pos_y=y_embed[: : : <none>]/dim_t<line_sep>pos_x=torch.stack((pos_x[: : : 0::2].sin() pos_x[: : : 1::2].cos()) dim=4).flatten(3)<line_sep>pos_y=torch.stack((pos_y[: : : 0::2].sin() pos_y[: : : 1::2].cos()) dim=4).flatten(3)<line_sep>pos=torch.cat((pos_y pos_x) dim=3).permute(0 3 1 2)<line_sep>pos=self.token_projection(pos)<line_sep><return>pos<block_end><block_end><def_stmt>conv3x3 in_planes out_planes stride=1<block_start>"""3x3 convolution with padding"""<line_sep><return>torch.nn.Sequential(nn.Conv2d(in_planes out_planes kernel_size=3 stride=stride padding=1 bias=<false>) nn.SyncBatchNorm(out_planes))<block_end><class_stmt>ConvPatchEmbed(nn.Module)<block_start>""" Image to Patch Embedding using multiple convolutional layers """<def_stmt>__init__ self img_size=224 patch_size=16 in_chans=3 embed_dim=768<block_start>super().__init__()<line_sep>img_size=to_2tuple(img_size)<line_sep>patch_size=to_2tuple(patch_size)<line_sep>num_patches=(img_size[1]<floordiv>patch_size[1])<times>(img_size[0]<floordiv>patch_size[0])<line_sep>self.img_size=img_size<line_sep>self.patch_size=patch_size<line_sep>self.num_patches=num_patches<if_stmt>patch_size[0]<eq>16<block_start>self.proj=torch.nn.Sequential(conv3x3(3 embed_dim<floordiv>8 2) nn.GELU() conv3x3(embed_dim<floordiv>8 embed_dim<floordiv>4 2) nn.GELU() conv3x3(embed_dim<floordiv>4 embed_dim<floordiv>2 2) nn.GELU() conv3x3(embed_dim<floordiv>2 embed_dim 2) )<block_end><elif_stmt>patch_size[0]<eq>8<block_start>self.proj=torch.nn.Sequential(conv3x3(3 embed_dim<floordiv>4 2) nn.GELU() conv3x3(embed_dim<floordiv>4 embed_dim<floordiv>2 2) nn.GELU() conv3x3(embed_dim<floordiv>2 embed_dim 2) )<block_end><else_stmt><block_start><raise>("For convolutional projection, patch size has to be in [8, 16]")<block_end><block_end><def_stmt>forward self x padding_size=<none><block_start>B,C,H,W=x.shape<line_sep>x=self.proj(x)<line_sep>Hp,Wp=x.shape[2] x.shape[3]<line_sep>x=x.flatten(2).transpose(1 2)<line_sep><return>x (Hp Wp)<block_end><block_end><class_stmt>LPI(nn.Module)<block_start>""" Local Patch Interaction module that allows explicit communication between tokens in 3x3 windows to augment the implicit communcation performed by the block diagonal scatter attention. Implemented using 2 layers of separable 3x3 convolutions with GeLU and BatchNorm2d """<def_stmt>__init__ self in_features hidden_features=<none> out_features=<none> act_layer=nn.GELU drop=0. kernel_size=3<block_start>super().__init__()<line_sep>out_features=out_features<or>in_features<line_sep>padding=kernel_size<floordiv>2<line_sep>self.conv1=torch.nn.Conv2d(in_features out_features kernel_size=kernel_size padding=padding groups=out_features)<line_sep>self.act=act_layer()<line_sep>self.bn=nn.SyncBatchNorm(in_features)<line_sep>self.conv2=torch.nn.Conv2d(in_features out_features kernel_size=kernel_size padding=padding groups=out_features)<block_end><def_stmt>forward self x H W<block_start>B,N,C=x.shape<line_sep>x=x.permute(0 2 1).reshape(B C H W)<line_sep>x=self.conv1(x)<line_sep>x=self.act(x)<line_sep>x=self.bn(x)<line_sep>x=self.conv2(x)<line_sep>x=x.reshape(B C N).permute(0 2 1)<line_sep><return>x<block_end><block_end><class_stmt>ClassAttention(nn.Module)<block_start>"""Class Attention Layer as in CaiT https://arxiv.org/abs/2103.17239 """<def_stmt>__init__ self dim num_heads=8 qkv_bias=<false> qk_scale=<none> attn_drop=0. proj_drop=0.<block_start>super().__init__()<line_sep>self.num_heads=num_heads<line_sep>head_dim=dim<floordiv>num_heads<line_sep>self.scale=qk_scale<or>head_dim<power>-0.5<line_sep>self.qkv=nn.Linear(dim dim<times>3 bias=qkv_bias)<line_sep>self.attn_drop=nn.Dropout(attn_drop)<line_sep>self.proj=nn.Linear(dim dim)<line_sep>self.proj_drop=nn.Dropout(proj_drop)<block_end><def_stmt>forward self x<block_start>B,N,C=x.shape<line_sep>qkv=self.qkv(x).reshape(B N 3 self.num_heads C<floordiv>self.num_heads)<line_sep>qkv=qkv.permute(2 0 3 1 4)<line_sep>q,k,v=qkv[0] qkv[1] qkv[2]# make torchscript happy (cannot use tensor as tuple) qc=q[: : 0:1]# CLS token attn_cls=(qc<times>k).sum(dim=-1)<times>self.scale<line_sep>attn_cls=attn_cls.softmax(dim=-1)<line_sep>attn_cls=self.attn_drop(attn_cls)<line_sep>cls_tkn=(attn_cls.unsqueeze(2)@v).transpose(1 2).reshape(B 1 C)<line_sep>cls_tkn=self.proj(cls_tkn)<line_sep>x=torch.cat([self.proj_drop(cls_tkn) x[: 1:]] dim=1)<line_sep><return>x<block_end><block_end><class_stmt>ClassAttentionBlock(nn.Module)<block_start>"""Class Attention Layer as in CaiT https://arxiv.org/abs/2103.17239 """<def_stmt>__init__ self dim num_heads mlp_ratio=4. qkv_bias=<false> qk_scale=<none> drop=0. attn_drop=0. drop_path=0. act_layer=nn.GELU norm_layer=nn.LayerNorm eta=<none> tokens_norm=<false><block_start>super().__init__()<line_sep>self.norm1=norm_layer(dim)<line_sep>self.attn=ClassAttention(dim num_heads=num_heads qkv_bias=qkv_bias qk_scale=qk_scale attn_drop=attn_drop proj_drop=drop)<line_sep>self.drop_path=DropPath(drop_path)<if>drop_path<g>0.<else>nn.Identity()<line_sep>self.norm2=norm_layer(dim)<line_sep>mlp_hidden_dim=int(dim<times>mlp_ratio)<line_sep>self.mlp=Mlp(in_features=dim hidden_features=mlp_hidden_dim act_layer=act_layer drop=drop)<if_stmt>eta<is><not><none># LayerScale Initialization (no layerscale when None) <block_start>self.gamma1=nn.Parameter(eta<times>torch.ones(dim) requires_grad=<true>)<line_sep>self.gamma2=nn.Parameter(eta<times>torch.ones(dim) requires_grad=<true>)<block_end><else_stmt><block_start>self.gamma1,self.gamma2=1.0 1.0<block_end># FIXME: A hack for models pre-trained with layernorm over all the tokens not just the CLS self.tokens_norm=tokens_norm<block_end><def_stmt>forward self x H W mask=<none><block_start>x=x+self.drop_path(self.gamma1<times>self.attn(self.norm1(x)))<if_stmt>self.tokens_norm<block_start>x=self.norm2(x)<block_end><else_stmt><block_start>x[: 0:1]=self.norm2(x[: 0:1])<block_end>x_res=x<line_sep>cls_token=x[: 0:1]<line_sep>cls_token=self.gamma2<times>self.mlp(cls_token)<line_sep>x=torch.cat([cls_token x[: 1:]] dim=1)<line_sep>x=x_res+self.drop_path(x)<line_sep><return>x<block_end><block_end><class_stmt>XCA(nn.Module)<block_start>""" Cross-Covariance Attention (XCA) operation where the channels are updated using a weighted sum. The weights are obtained from the (softmax normalized) Cross-covariance matrix (Q^T K \\in d_h \\times d_h) """<def_stmt>__init__ self dim num_heads=8 qkv_bias=<false> qk_scale=<none> attn_drop=0. proj_drop=0.<block_start>super().__init__()<line_sep>self.num_heads=num_heads<line_sep>self.temperature=nn.Parameter(torch.ones(num_heads 1 1))<line_sep>self.qkv=nn.Linear(dim dim<times>3 bias=qkv_bias)<line_sep>self.attn_drop=nn.Dropout(attn_drop)<line_sep>self.proj=nn.Linear(dim dim)<line_sep>self.proj_drop=nn.Dropout(proj_drop)<block_end><def_stmt>forward self x<block_start>B,N,C=x.shape<line_sep>qkv=self.qkv(x).reshape(B N 3 self.num_heads C<floordiv>self.num_heads)<line_sep>qkv=qkv.permute(2 0 3 1 4)<line_sep>q,k,v=qkv[0] qkv[1] qkv[2]# make torchscript happy (cannot use tensor as tuple) q=q.transpose(-2 -1)<line_sep>k=k.transpose(-2 -1)<line_sep>v=v.transpose(-2 -1)<line_sep>q=torch.nn.functional.normalize(q dim=-1)<line_sep>k=torch.nn.functional.normalize(k dim=-1)<line_sep>attn=([email protected](-2 -1))<times>self.temperature<line_sep>attn=attn.softmax(dim=-1)<line_sep>attn=self.attn_drop(attn)<line_sep>x=(attn@v).permute(0 3 1 2).reshape(B N C)<line_sep>x=self.proj(x)<line_sep>x=self.proj_drop(x)<line_sep><return>x<block_end>@torch.jit.ignore<def_stmt>no_weight_decay self<block_start><return>{'temperature'}<block_end><block_end><class_stmt>XCABlock(nn.Module)<block_start><def_stmt>__init__ self dim num_heads mlp_ratio=4. qkv_bias=<false> qk_scale=<none> drop=0. attn_drop=0. drop_path=0. act_layer=nn.GELU norm_layer=nn.LayerNorm num_tokens=196 eta=<none><block_start>super().__init__()<line_sep>self.norm1=norm_layer(dim)<line_sep>self.attn=XCA(dim num_heads=num_heads qkv_bias=qkv_bias qk_scale=qk_scale attn_drop=attn_drop proj_drop=drop)<line_sep>self.drop_path=DropPath(drop_path)<if>drop_path<g>0.<else>nn.Identity()<line_sep>self.norm2=norm_layer(dim)<line_sep>mlp_hidden_dim=int(dim<times>mlp_ratio)<line_sep>self.mlp=Mlp(in_features=dim hidden_features=mlp_hidden_dim act_layer=act_layer drop=drop)<line_sep>self.norm3=norm_layer(dim)<line_sep>self.local_mp=LPI(in_features=dim act_layer=act_layer)<line_sep>self.gamma1=nn.Parameter(eta<times>torch.ones(dim) requires_grad=<true>)<line_sep>self.gamma2=nn.Parameter(eta<times>torch.ones(dim) requires_grad=<true>)<line_sep>self.gamma3=nn.Parameter(eta<times>torch.ones(dim) requires_grad=<true>)<block_end><def_stmt>forward self x H W<block_start>x=x+self.drop_path(self.gamma1<times>self.attn(self.norm1(x)))<line_sep>x=x+self.drop_path(self.gamma3<times>self.local_mp(self.norm3(x) H W))<line_sep>x=x+self.drop_path(self.gamma2<times>self.mlp(self.norm2(x)))<line_sep><return>x<block_end><block_end><class_stmt>XCiT(nn.Module)<block_start>""" Based on timm and DeiT code bases https://github.com/rwightman/pytorch-image-models/tree/master/timm https://github.com/facebookresearch/deit/ """<def_stmt>__init__ self img_size=224 patch_size=16 in_chans=3 num_classes=1000 embed_dim=768 depth=12 num_heads=12 mlp_ratio=4. qkv_bias=<true> qk_scale=<none> drop_rate=0. attn_drop_rate=0. drop_path_rate=0. norm_layer=<none> cls_attn_layers=2 use_pos=<true> patch_proj='linear' eta=<none> tokens_norm=<false><block_start>""" Args: img_size (int, tuple): input image size patch_size (int, tuple): patch size in_chans (int): number of input channels num_classes (int): number of classes for classification head embed_dim (int): embedding dimension depth (int): depth of transformer num_heads (int): number of attention heads mlp_ratio (int): ratio of mlp hidden dim to embedding dim qkv_bias (bool): enable bias for qkv if True qk_scale (float): override default qk scale of head_dim ** -0.5 if set drop_rate (float): dropout rate attn_drop_rate (float): attention dropout rate drop_path_rate (float): stochastic depth rate norm_layer: (nn.Module): normalization layer cls_attn_layers: (int) Depth of Class attention layers use_pos: (bool) whether to use positional encoding eta: (float) layerscale initialization value tokens_norm: (bool) Whether to normalize all tokens or just the cls_token in the CA """<line_sep>super().__init__()<line_sep>self.num_classes=num_classes<line_sep>self.num_features=self.embed_dim=embed_dim<line_sep>norm_layer=norm_layer<or>partial(nn.LayerNorm eps=1e-6)<line_sep>self.patch_embed=ConvPatchEmbed(img_size=img_size embed_dim=embed_dim patch_size=patch_size)<line_sep>num_patches=self.patch_embed.num_patches<line_sep>self.cls_token=nn.Parameter(torch.zeros(1 1 embed_dim))<line_sep>self.pos_drop=nn.Dropout(p=drop_rate)<line_sep>dpr=[drop_path_rate<for>i range(depth)]<line_sep>self.blocks=nn.ModuleList([XCABlock(dim=embed_dim num_heads=num_heads mlp_ratio=mlp_ratio qkv_bias=qkv_bias qk_scale=qk_scale drop=drop_rate attn_drop=attn_drop_rate drop_path=dpr[i] norm_layer=norm_layer num_tokens=num_patches eta=eta)<for>i range(depth)])<line_sep>self.cls_attn_blocks=nn.ModuleList([ClassAttentionBlock(dim=embed_dim num_heads=num_heads mlp_ratio=mlp_ratio qkv_bias=qkv_bias qk_scale=qk_scale drop=drop_rate attn_drop=attn_drop_rate norm_layer=norm_layer eta=eta tokens_norm=tokens_norm)<for>i range(cls_attn_layers)])<line_sep>self.norm=norm_layer(embed_dim)<line_sep>self.head=nn.Linear(self.num_features num_classes)<if>num_classes<g>0<else>nn.Identity()<line_sep>self.pos_embeder=PositionalEncodingFourier(dim=embed_dim)<line_sep>self.use_pos=use_pos<line_sep># Classifier head trunc_normal_(self.cls_token std=.02)<line_sep>self.apply(self._init_weights)<block_end><def_stmt>_init_weights self m<block_start><if_stmt>isinstance(m nn.Linear)<block_start>trunc_normal_(m.weight std=.02)<if_stmt>isinstance(m nn.Linear)<and>m.bias<is><not><none><block_start>nn.init.constant_(m.bias 0)<block_end><block_end><elif_stmt>isinstance(m nn.LayerNorm)<block_start>nn.init.constant_(m.bias 0)<line_sep>nn.init.constant_(m.weight 1.0)<block_end><block_end>@torch.jit.ignore<def_stmt>no_weight_decay self<block_start><return>{'pos_embed' 'cls_token' 'dist_token'}<block_end><def_stmt>forward_features self x<block_start>B,C,H,W=x.shape<line_sep>x,(Hp Wp)=self.patch_embed(x)<if_stmt>self.use_pos<block_start>pos_encoding=self.pos_embeder(B Hp Wp).reshape(B -1 x.shape[1]).permute(0 2 1)<line_sep>x=x+pos_encoding<block_end>x=self.pos_drop(x)<for_stmt>blk self.blocks<block_start>x=blk(x Hp Wp)<block_end>cls_tokens=self.cls_token.expand(B -1 -1)<line_sep>x=torch.cat((cls_tokens x) dim=1)<for_stmt>blk self.cls_attn_blocks<block_start>x=blk(x Hp Wp)<block_end>x=self.norm(x)[: 0]<line_sep><return>x<block_end><def_stmt>forward self x<block_start>x=self.forward_features(x)<line_sep>x=self.head(x)<if_stmt>self.training<block_start><return>x x<block_end><else_stmt><block_start><return>x<block_end><block_end><block_end># Patch size 16x16 models @register_model<def_stmt>xcit_nano_12_p16 pretrained=<false> **kwargs<block_start>model=XCiT(patch_size=16 embed_dim=128 depth=12 num_heads=4 mlp_ratio=4 qkv_bias=<true> norm_layer=partial(nn.LayerNorm eps=1e-6) eta=1.0 tokens_norm=<false> **kwargs)<line_sep>model.default_cfg=_cfg()<line_sep><return>model<block_end>@register_model<def_stmt>xcit_tiny_12_p16 pretrained=<false> **kwargs<block_start>model=XCiT(patch_size=16 embed_dim=192 depth=12 num_heads=4 mlp_ratio=4 qkv_bias=<true> norm_layer=partial(nn.LayerNorm eps=1e-6) eta=1.0 tokens_norm=<true> **kwargs)<line_sep>model.default_cfg=_cfg()<line_sep><return>model<block_end>@register_model<def_stmt>xcit_small_12_p16 pretrained=<false> **kwargs<block_start>model=XCiT(patch_size=16 embed_dim=384 depth=12 num_heads=8 mlp_ratio=4 qkv_bias=<true> norm_layer=partial(nn.LayerNorm eps=1e-6) eta=1.0 tokens_norm=<true> **kwargs)<line_sep>model.default_cfg=_cfg()<line_sep><return>model<block_end>@register_model<def_stmt>xcit_tiny_24_p16 pretrained=<false> **kwargs<block_start>model=XCiT(patch_size=16 embed_dim=192 depth=24 num_heads=4 mlp_ratio=4 qkv_bias=<true> norm_layer=partial(nn.LayerNorm eps=1e-6) eta=1e-5 tokens_norm=<true> **kwargs)<line_sep>model.default_cfg=_cfg()<line_sep><return>model<block_end>@register_model<def_stmt>xcit_small_24_p16 pretrained=<false> **kwargs<block_start>model=XCiT(patch_size=16 embed_dim=384 depth=24 num_heads=8 mlp_ratio=4 qkv_bias=<true> norm_layer=partial(nn.LayerNorm eps=1e-6) eta=1e-5 tokens_norm=<true> **kwargs)<line_sep>model.default_cfg=_cfg()<line_sep><return>model<block_end>@register_model<def_stmt>xcit_medium_24_p16 pretrained=<false> **kwargs<block_start>model=XCiT(patch_size=16 embed_dim=512 depth=24 num_heads=8 mlp_ratio=4 qkv_bias=<true> norm_layer=partial(nn.LayerNorm eps=1e-6) eta=1e-5 tokens_norm=<true> **kwargs)<line_sep>model.default_cfg=_cfg()<line_sep><return>model<block_end>@register_model<def_stmt>xcit_large_24_p16 pretrained=<false> **kwargs<block_start>model=XCiT(patch_size=16 embed_dim=768 depth=24 num_heads=16 mlp_ratio=4 qkv_bias=<true> norm_layer=partial(nn.LayerNorm eps=1e-6) eta=1e-5 tokens_norm=<true> **kwargs)<line_sep>model.default_cfg=_cfg()<line_sep><return>model<block_end># Patch size 8x8 models @register_model<def_stmt>xcit_nano_12_p8 pretrained=<false> **kwargs<block_start>model=XCiT(patch_size=8 embed_dim=128 depth=12 num_heads=4 mlp_ratio=4 qkv_bias=<true> norm_layer=partial(nn.LayerNorm eps=1e-6) eta=1.0 tokens_norm=<false> **kwargs)<line_sep>model.default_cfg=_cfg()<line_sep><return>model<block_end>@register_model<def_stmt>xcit_tiny_12_p8 pretrained=<false> **kwargs<block_start>model=XCiT(patch_size=8 embed_dim=192 depth=12 num_heads=4 mlp_ratio=4 qkv_bias=<true> norm_layer=partial(nn.LayerNorm eps=1e-6) eta=1.0 tokens_norm=<true> **kwargs)<line_sep>model.default_cfg=_cfg()<line_sep><return>model<block_end>@register_model<def_stmt>xcit_small_12_p8 pretrained=<false> **kwargs<block_start>model=XCiT(patch_size=8 embed_dim=384 depth=12 num_heads=8 mlp_ratio=4 qkv_bias=<true> norm_layer=partial(nn.LayerNorm eps=1e-6) eta=1.0 tokens_norm=<true> **kwargs)<line_sep>model.default_cfg=_cfg()<line_sep><return>model<block_end>@register_model<def_stmt>xcit_tiny_24_p8 pretrained=<false> **kwargs<block_start>model=XCiT(patch_size=8 embed_dim=192 depth=24 num_heads=4 mlp_ratio=4 qkv_bias=<true> norm_layer=partial(nn.LayerNorm eps=1e-6) eta=1e-5 tokens_norm=<true> **kwargs)<line_sep>model.default_cfg=_cfg()<line_sep><return>model<block_end>@register_model<def_stmt>xcit_small_24_p8 pretrained=<false> **kwargs<block_start>model=XCiT(patch_size=8 embed_dim=384 depth=24 num_heads=8 mlp_ratio=4 qkv_bias=<true> norm_layer=partial(nn.LayerNorm eps=1e-6) eta=1e-5 tokens_norm=<true> **kwargs)<line_sep>model.default_cfg=_cfg()<line_sep><return>model<block_end>@register_model<def_stmt>xcit_medium_24_p8 pretrained=<false> **kwargs<block_start>model=XCiT(patch_size=8 embed_dim=512 depth=24 num_heads=8 mlp_ratio=4 qkv_bias=<true> norm_layer=partial(nn.LayerNorm eps=1e-6) eta=1e-5 tokens_norm=<true> **kwargs)<line_sep>model.default_cfg=_cfg()<line_sep><return>model<block_end>@register_model<def_stmt>xcit_large_24_p8 pretrained=<false> **kwargs<block_start>model=XCiT(patch_size=8 embed_dim=768 depth=24 num_heads=16 mlp_ratio=4 qkv_bias=<true> norm_layer=partial(nn.LayerNorm eps=1e-6) eta=1e-5 tokens_norm=<true> **kwargs)<line_sep>model.default_cfg=_cfg()<line_sep><return>model<block_end>
<import_stmt>os<import_from_stmt>shutil rmtree<import_from_stmt>uuid uuid4<import_from_stmt>mezzanine.conf settings<import_from_stmt>mezzanine.core.templatetags.mezzanine_tags thumbnail<import_from_stmt>mezzanine.galleries.models GALLERIES_UPLOAD_DIR Gallery<import_from_stmt>mezzanine.utils.tests TestCase copy_test_to_media<class_stmt>GalleriesTests(TestCase)<block_start><def_stmt>test_gallery_import self<block_start>""" Test that a gallery creates images when given a zip file to import, and that descriptions are created. """<line_sep>zip_name="gallery.zip"<line_sep>copy_test_to_media("mezzanine.core" zip_name)<line_sep>title=str(uuid4())<line_sep>gallery=Gallery.objects.create(title=title zip_import=zip_name)<line_sep>images=list(gallery.images.all())<line_sep>self.assertTrue(images)<line_sep>self.assertTrue(all([image.description<for>image images]))<line_sep># Clean up. rmtree(os.path.join(settings.MEDIA_ROOT GALLERIES_UPLOAD_DIR title))<block_end><def_stmt>test_thumbnail_generation self<block_start>""" Test that a thumbnail is created and resized. """<try_stmt><block_start><import_from_stmt>PIL Image<block_end><except_stmt>ImportError<block_start><return><block_end>image_name="image.jpg"<line_sep>size=(24 24)<line_sep>copy_test_to_media("mezzanine.core" image_name)<line_sep>thumb_name=os.path.join(settings.THUMBNAILS_DIR_NAME image_name image_name.replace("." "-%sx%s."%size) )<line_sep>thumb_path=os.path.join(settings.MEDIA_ROOT thumb_name)<line_sep>thumb_image=thumbnail(image_name *size)<line_sep>self.assertEqual(os.path.normpath(thumb_image.lstrip("/")) thumb_name)<line_sep>self.assertNotEqual(os.path.getsize(thumb_path) 0)<line_sep>thumb=Image.open(thumb_path)<line_sep>self.assertEqual(thumb.size size)<line_sep># Clean up. <del_stmt>thumb<line_sep>os.remove(os.path.join(settings.MEDIA_ROOT image_name))<line_sep>os.remove(os.path.join(thumb_path))<line_sep>rmtree(os.path.join(os.path.dirname(thumb_path)))<block_end><block_end>
<import_stmt>numpy<as>np<import_stmt>torch<import_stmt>torch.nn<as>nn<import_from_stmt>....ops.iou3d_nms iou3d_nms_utils<class_stmt>ProposalTargetLayer(nn.Module)<block_start><def_stmt>__init__ self roi_sampler_cfg<block_start>super().__init__()<line_sep>self.roi_sampler_cfg=roi_sampler_cfg<block_end><def_stmt>forward self batch_dict<block_start>""" Args: batch_dict: batch_size: rois: (B, num_rois, 7 + C) roi_scores: (B, num_rois) gt_boxes: (B, N, 7 + C + 1) roi_labels: (B, num_rois) Returns: batch_dict: rois: (B, M, 7 + C) gt_of_rois: (B, M, 7 + C) gt_iou_of_rois: (B, M) roi_scores: (B, M) roi_labels: (B, M) reg_valid_mask: (B, M) rcnn_cls_labels: (B, M) """<line_sep>batch_rois,batch_gt_of_rois,batch_roi_ious,batch_roi_scores,batch_roi_labels=self.sample_rois_for_rcnn(batch_dict=batch_dict)<line_sep># regression valid mask reg_valid_mask=(batch_roi_ious<g>self.roi_sampler_cfg.REG_FG_THRESH).long()<line_sep># classification label <if_stmt>self.roi_sampler_cfg.CLS_SCORE_TYPE<eq>'cls'<block_start>batch_cls_labels=(batch_roi_ious<g>self.roi_sampler_cfg.CLS_FG_THRESH).long()<line_sep>ignore_mask=(batch_roi_ious<g>self.roi_sampler_cfg.CLS_BG_THRESH)&(batch_roi_ious<l>self.roi_sampler_cfg.CLS_FG_THRESH)<line_sep>batch_cls_labels[ignore_mask<g>0]=-1<block_end><elif_stmt>self.roi_sampler_cfg.CLS_SCORE_TYPE<eq>'roi_iou'<block_start>iou_bg_thresh=self.roi_sampler_cfg.CLS_BG_THRESH<line_sep>iou_fg_thresh=self.roi_sampler_cfg.CLS_FG_THRESH<line_sep>fg_mask=batch_roi_ious<g>iou_fg_thresh<line_sep>bg_mask=batch_roi_ious<l>iou_bg_thresh<line_sep>interval_mask=(fg_mask<eq>0)&(bg_mask<eq>0)<line_sep>batch_cls_labels=(fg_mask<g>0).float()<line_sep>batch_cls_labels[interval_mask]=(batch_roi_ious[interval_mask]-iou_bg_thresh)/(iou_fg_thresh-iou_bg_thresh)<block_end><elif_stmt>self.roi_sampler_cfg.CLS_SCORE_TYPE<eq>'raw_roi_iou'<block_start>batch_cls_labels=batch_roi_ious<block_end><else_stmt><block_start><raise>NotImplementedError<block_end>targets_dict={'rois':batch_rois 'gt_of_rois':batch_gt_of_rois 'gt_iou_of_rois':batch_roi_ious 'roi_scores':batch_roi_scores 'roi_labels':batch_roi_labels 'reg_valid_mask':reg_valid_mask 'rcnn_cls_labels':batch_cls_labels}<line_sep><return>targets_dict<block_end><def_stmt>sample_rois_for_rcnn self batch_dict<block_start>""" Args: batch_dict: batch_size: rois: (B, num_rois, 7 + C) roi_scores: (B, num_rois) gt_boxes: (B, N, 7 + C + 1) roi_labels: (B, num_rois) Returns: """<line_sep>batch_size=batch_dict['batch_size']<line_sep>rois=batch_dict['rois']<line_sep>roi_scores=batch_dict['roi_scores']<line_sep>roi_labels=batch_dict['roi_labels']<line_sep>gt_boxes=batch_dict['gt_boxes']<line_sep>code_size=rois.shape[-1]<line_sep>batch_rois=rois.new_zeros(batch_size self.roi_sampler_cfg.ROI_PER_IMAGE code_size)<line_sep>batch_gt_of_rois=rois.new_zeros(batch_size self.roi_sampler_cfg.ROI_PER_IMAGE code_size+1)<line_sep>batch_roi_ious=rois.new_zeros(batch_size self.roi_sampler_cfg.ROI_PER_IMAGE)<line_sep>batch_roi_scores=rois.new_zeros(batch_size self.roi_sampler_cfg.ROI_PER_IMAGE)<line_sep>batch_roi_labels=rois.new_zeros((batch_size self.roi_sampler_cfg.ROI_PER_IMAGE) dtype=torch.long)<for_stmt>index range(batch_size)<block_start>cur_roi,cur_gt,cur_roi_labels,cur_roi_scores=rois[index] gt_boxes[index] roi_labels[index] roi_scores[index]<line_sep>k=cur_gt.__len__()-1<while_stmt>k<g>0<and>cur_gt[k].sum()<eq>0<block_start>k<augsub>1<block_end>cur_gt=cur_gt[:k+1]<line_sep>cur_gt=cur_gt.new_zeros((1 cur_gt.shape[1]))<if>len(cur_gt)<eq>0<else>cur_gt<if_stmt>self.roi_sampler_cfg.get('SAMPLE_ROI_BY_EACH_CLASS' <false>)<block_start>max_overlaps,gt_assignment=self.get_max_iou_with_same_class(rois=cur_roi roi_labels=cur_roi_labels gt_boxes=cur_gt[: 0:7] gt_labels=cur_gt[: -1].long())<block_end><else_stmt><block_start>iou3d=iou3d_nms_utils.boxes_iou3d_gpu(cur_roi cur_gt[: 0:7])# (M, N) max_overlaps,gt_assignment=torch.max(iou3d dim=1)<block_end>sampled_inds=self.subsample_rois(max_overlaps=max_overlaps)<line_sep>batch_rois[index]=cur_roi[sampled_inds]<line_sep>batch_roi_labels[index]=cur_roi_labels[sampled_inds]<line_sep>batch_roi_ious[index]=max_overlaps[sampled_inds]<line_sep>batch_roi_scores[index]=cur_roi_scores[sampled_inds]<line_sep>batch_gt_of_rois[index]=cur_gt[gt_assignment[sampled_inds]]<block_end><return>batch_rois batch_gt_of_rois batch_roi_ious batch_roi_scores batch_roi_labels<block_end><def_stmt>subsample_rois self max_overlaps# sample fg, easy_bg, hard_bg <block_start>fg_rois_per_image=int(np.round(self.roi_sampler_cfg.FG_RATIO<times>self.roi_sampler_cfg.ROI_PER_IMAGE))<line_sep>fg_thresh=min(self.roi_sampler_cfg.REG_FG_THRESH self.roi_sampler_cfg.CLS_FG_THRESH)<line_sep>fg_inds=torch.nonzero((max_overlaps<ge>fg_thresh)).view(-1)<line_sep>easy_bg_inds=torch.nonzero((max_overlaps<l>self.roi_sampler_cfg.CLS_BG_THRESH_LO)).view(-1)<line_sep>hard_bg_inds=torch.nonzero((max_overlaps<l>self.roi_sampler_cfg.REG_FG_THRESH)&(max_overlaps<ge>self.roi_sampler_cfg.CLS_BG_THRESH_LO)).view(-1)<line_sep>fg_num_rois=fg_inds.numel()<line_sep>bg_num_rois=hard_bg_inds.numel()+easy_bg_inds.numel()<if_stmt>fg_num_rois<g>0<and>bg_num_rois<g>0# sampling fg <block_start>fg_rois_per_this_image=min(fg_rois_per_image fg_num_rois)<line_sep>rand_num=torch.from_numpy(np.random.permutation(fg_num_rois)).type_as(max_overlaps).long()<line_sep>fg_inds=fg_inds[rand_num[:fg_rois_per_this_image]]<line_sep># sampling bg bg_rois_per_this_image=self.roi_sampler_cfg.ROI_PER_IMAGE-fg_rois_per_this_image<line_sep>bg_inds=self.sample_bg_inds(hard_bg_inds easy_bg_inds bg_rois_per_this_image self.roi_sampler_cfg.HARD_BG_RATIO)<block_end><elif_stmt>fg_num_rois<g>0<and>bg_num_rois<eq>0# sampling fg <block_start>rand_num=np.floor(np.random.rand(self.roi_sampler_cfg.ROI_PER_IMAGE)<times>fg_num_rois)<line_sep>rand_num=torch.from_numpy(rand_num).type_as(max_overlaps).long()<line_sep>fg_inds=fg_inds[rand_num]<line_sep>bg_inds=[]<block_end><elif_stmt>bg_num_rois<g>0<and>fg_num_rois<eq>0# sampling bg <block_start>bg_rois_per_this_image=self.roi_sampler_cfg.ROI_PER_IMAGE<line_sep>bg_inds=self.sample_bg_inds(hard_bg_inds easy_bg_inds bg_rois_per_this_image self.roi_sampler_cfg.HARD_BG_RATIO)<block_end><else_stmt><block_start>print('maxoverlaps:(min=%f, max=%f)'%(max_overlaps.min().item() max_overlaps.max().item()))<line_sep>print('ERROR: FG=%d, BG=%d'%(fg_num_rois bg_num_rois))<line_sep><raise>NotImplementedError<block_end>sampled_inds=torch.cat((fg_inds bg_inds) dim=0)<line_sep><return>sampled_inds<block_end>@staticmethod<def_stmt>sample_bg_inds hard_bg_inds easy_bg_inds bg_rois_per_this_image hard_bg_ratio<block_start><if_stmt>hard_bg_inds.numel()<g>0<and>easy_bg_inds.numel()<g>0<block_start>hard_bg_rois_num=min(int(bg_rois_per_this_image<times>hard_bg_ratio) len(hard_bg_inds))<line_sep>easy_bg_rois_num=bg_rois_per_this_image-hard_bg_rois_num<line_sep># sampling hard bg rand_idx=torch.randint(low=0 high=hard_bg_inds.numel() size=(hard_bg_rois_num )).long()<line_sep>hard_bg_inds=hard_bg_inds[rand_idx]<line_sep># sampling easy bg rand_idx=torch.randint(low=0 high=easy_bg_inds.numel() size=(easy_bg_rois_num )).long()<line_sep>easy_bg_inds=easy_bg_inds[rand_idx]<line_sep>bg_inds=torch.cat([hard_bg_inds easy_bg_inds] dim=0)<block_end><elif_stmt>hard_bg_inds.numel()<g>0<and>easy_bg_inds.numel()<eq>0<block_start>hard_bg_rois_num=bg_rois_per_this_image<line_sep># sampling hard bg rand_idx=torch.randint(low=0 high=hard_bg_inds.numel() size=(hard_bg_rois_num )).long()<line_sep>bg_inds=hard_bg_inds[rand_idx]<block_end><elif_stmt>hard_bg_inds.numel()<eq>0<and>easy_bg_inds.numel()<g>0<block_start>easy_bg_rois_num=bg_rois_per_this_image<line_sep># sampling easy bg rand_idx=torch.randint(low=0 high=easy_bg_inds.numel() size=(easy_bg_rois_num )).long()<line_sep>bg_inds=easy_bg_inds[rand_idx]<block_end><else_stmt><block_start><raise>NotImplementedError<block_end><return>bg_inds<block_end>@staticmethod<def_stmt>get_max_iou_with_same_class rois roi_labels gt_boxes gt_labels<block_start>""" Args: rois: (N, 7) roi_labels: (N) gt_boxes: (N, ) gt_labels: Returns: """<line_sep>""" :param rois: (N, 7) :param roi_labels: (N) :param gt_boxes: (N, 8) :return: """<line_sep>max_overlaps=rois.new_zeros(rois.shape[0])<line_sep>gt_assignment=roi_labels.new_zeros(roi_labels.shape[0])<for_stmt>k range(gt_labels.min().item() gt_labels.max().item()+1)<block_start>roi_mask=(roi_labels<eq>k)<line_sep>gt_mask=(gt_labels<eq>k)<if_stmt>roi_mask.sum()<g>0<and>gt_mask.sum()<g>0<block_start>cur_roi=rois[roi_mask]<line_sep>cur_gt=gt_boxes[gt_mask]<line_sep>original_gt_assignment=gt_mask.nonzero().view(-1)<line_sep>iou3d=iou3d_nms_utils.boxes_iou3d_gpu(cur_roi cur_gt)# (M, N) cur_max_overlaps,cur_gt_assignment=torch.max(iou3d dim=1)<line_sep>max_overlaps[roi_mask]=cur_max_overlaps<line_sep>gt_assignment[roi_mask]=original_gt_assignment[cur_gt_assignment]<block_end><block_end><return>max_overlaps gt_assignment<block_end><block_end>
<class_stmt>PlayerInfo<block_start><def_stmt>__init__ self manager<block_start>self.window=manager.window<line_sep>self.game=manager.game<line_sep>self.manager=manager<line_sep>self._bottom_index=0<line_sep>self._top_index=0<line_sep>self.bottom_num_pages=3<line_sep>self.top_num_pages=2<block_end><def_stmt>__call__ self<block_start>self.setup_ui()<line_sep>self.refresh_page()<block_end>@property<def_stmt>bottom_index self<block_start><return>self._bottom_index<block_end>@bottom_index.setter<def_stmt>bottom_index self value<block_start><if_stmt>value<l>0<block_start>self._bottom_index=self.bottom_num_pages-1<block_end><elif_stmt>value<g>self.bottom_num_pages-1<block_start>self._bottom_index=0<block_end><else_stmt><block_start>self._bottom_index=value<block_end><block_end>@property<def_stmt>top_index self<block_start><return>self._top_index<block_end>@top_index.setter<def_stmt>top_index self value<block_start><if_stmt>value<l>0<block_start>self._top_index=self.top_num_pages-1<block_end><elif_stmt>value<g>self.top_num_pages-1<block_start>self._top_index=0<block_end><else_stmt><block_start>self._top_index=value<block_end><block_end><def_stmt>setup_ui self<block_start>self.window.print(self.game.player.name (50 1))<line_sep>self.window.print(self.game.player.job.name.capitalize() (49 2))<line_sep>self.window.print(f"Level - {self.game.player.level}" (67 2))<line_sep>self.window.print(f'XP {self.game.player.experience}/{self.game.player.xp_to_next_level}' (49 3))<line_sep>self.window.print(f'Health {self.game.player.health}/{self.game.player.max_health}' (49 4))<line_sep>self.window.print(f'Mana {self.game.player.mana}/{self.game.player.max_mana}' (49 5))<block_end><def_stmt>setup_equipmnt self<block_start>self.window.print(' EQUIPMNT ' (57 7))<line_sep>i=0<for_stmt>slot_name,slot_item self.game.player.inventory.equipped.as_dict.items()<block_start><if_stmt><not>isinstance(slot_item list)<block_start>self.window.print(f'{slot_name.upper()} : {slot_item.capitalize()}' (49 8+i))<block_end><else_stmt><block_start>self.window.print(f'{slot_name.upper()} : '<concat>f'{", ".join([s.capitalize()<for>s slot_item])}' (49 8+i))<block_end>i<augadd>1<block_end><block_end><def_stmt>setup_commands self<block_start>self.window.print(' COMMANDS ' (57 7))<line_sep><pass><block_end><def_stmt>clear_page self<block_start>self.window.print(' '<times>10 (57 14))<line_sep>self.window.print(' '<times>10 (57 7))<for_stmt>i range(8)<block_start>self.window.print(' '<times>29 (48 15+i))<block_end><for_stmt>i range(6)<block_start>self.window.print(' '<times>29 (48 8+i))<block_end><block_end><def_stmt>setup_stats self<block_start>self.window.print('STATS' (59 14))<line_sep>i=0<for_stmt>key,value self.game.player.stats.as_dict.items()<block_start>self.window.print(f'{key.upper()} - {value}' (49 15+i))<line_sep>i<augadd>1<block_end><block_end><def_stmt>setup_saving_throws self<block_start>self.window.print('SAV.THROWS' (57 14))<line_sep>i=0<for_stmt>key,value self.game.player.job.saving_throws.as_dict.items()<block_start>self.window.print(f'{key.upper()} - {value}' (49 15+i))<line_sep>i<augadd>1<block_end><block_end><def_stmt>setup_money self<block_start>self.window.print(' MONEY ' (57 14))<line_sep>i=0<for_stmt>key,value self.game.player.inventory.money.coins.items()<block_start>self.window.print(f'{key.upper()} : {value}' (49 15+i))<line_sep>i<augadd>1<block_end>self.window.print(f'GEMS : {self.game.player.inventory.money.gems_value} GC' (49 15+i))<line_sep>self.window.print(f'JEWELS : {self.game.player.inventory.money.jewels_value} GC' (49 16+i))<line_sep>self.window.print(f'TOTAL : {self.game.player.inventory.money.value:02} GC' (49 17+i))<block_end><def_stmt>on_bottom_page_left self event<block_start>self.bottom_index<augsub>1<line_sep>self.refresh_page()<block_end><def_stmt>on_bottom_page_right self event<block_start>self.bottom_index<augadd>1<line_sep>self.refresh_page()<block_end><def_stmt>on_top_page_left self event<block_start>self.top_index<augsub>1<line_sep>self.refresh_page()<block_end><def_stmt>on_top_page_right self event<block_start>self.top_index<augadd>1<line_sep>self.refresh_page()<block_end><def_stmt>refresh_page self<block_start>self.clear_page()<line_sep>[self.setup_stats self.setup_saving_throws self.setup_money][self.bottom_index]()<line_sep>[self.setup_equipmnt self.setup_commands][self.top_index]()<line_sep>self.window.button('<' (56 14) self.on_bottom_page_left)<line_sep>self.window.button('<' (56 7) self.on_top_page_left)<line_sep>self.window.button('>' (67 14) self.on_bottom_page_right)<line_sep>self.window.button('>' (67 7) self.on_top_page_right)<block_end><block_end>
# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Functionality related to describing availability and information of sparsification algorithms to models within in the ML frameworks. The file is executable and will get the sparsification info for a given framework: ########## Command help: usage: info.py [-h] [--path PATH] framework Compile the available setup and information for the sparsification of a model in a given framework. positional arguments: framework the ML framework or path to a framework file to load the sparsification info for optional arguments: -h, --help show this help message and exit --path PATH A full file path to save the sparsification info to. If not supplied, will print out the sparsification info to the console. ######### EXAMPLES ######### ########## Example command for getting the sparsification info for pytorch. python src/sparseml/sparsification/info.py pytorch """<import_stmt>argparse<import_stmt>logging<import_stmt>os<import_from_stmt>enum Enum<import_from_stmt>typing Any List Optional<import_from_stmt>pydantic BaseModel Field<import_from_stmt>sparseml.base execute_in_sparseml_framework<import_from_stmt>sparseml.utils clean_path create_parent_dirs<line_sep>__all__=["ModifierType" "ModifierPropInfo" "ModifierInfo" "SparsificationInfo" "sparsification_info" "save_sparsification_info" "load_sparsification_info" ]<line_sep>_LOGGER=logging.getLogger(__name__)<class_stmt>ModifierType(Enum)<block_start>""" Types of modifiers for grouping what functionality a Modifier falls under. """<line_sep>general="general"<line_sep>training="training"<line_sep>pruning="pruning"<line_sep>quantization="quantization"<line_sep>act_sparsity="act_sparsity"<line_sep>misc="misc"<block_end><class_stmt>ModifierPropInfo(BaseModel)<block_start>""" Class for storing information and associated metadata for a property on a given Modifier. Extends pydantics BaseModel class for serialization to and from json in addition to proper type checking on construction. """<line_sep>name:str=Field(title="name" description=("Name of the property for a Modifier. "<concat>"It can be accessed by this name on the modifier instance.") )<line_sep>description:str=Field(title="description" description="Description and information for the property for a Modifier." )<line_sep>type_:str=Field(title="type_" description=("The format type for the property for a Modifier such as "<concat>"int, float, str, etc.") )<line_sep>restrictions:Optional[List[Any]]=Field(default=<none> title="restrictions" description=("Value restrictions for the property for a Modifier. "<concat>"If set, restrict the set value to one of the contained restrictions.") )<block_end><class_stmt>ModifierInfo(BaseModel)<block_start>""" Class for storing information and associated metadata for a given Modifier. Extends pydantics BaseModel class for serialization to and from json in addition to proper type checking on construction. """<line_sep>name:str=Field(title="name" description=("Name/class of the Modifier to be used for construction and identification.") )<line_sep>description:str=Field(title="description" description="Description and info for the Modifier and what its used for." )<line_sep>type_:ModifierType=Field(default=ModifierType.misc title="type_" description=("The type the given Modifier is for grouping by similar functionality.") )<line_sep>props:List[ModifierPropInfo]=Field(default=[] title="props" description="The properties for the Modifier that can be set and controlled." )<line_sep>warnings:Optional[List[str]]=Field(default=<none> title="warnings" description=("Any warnings that apply for the Modifier and using it within a system") )<block_end><class_stmt>SparsificationInfo(BaseModel)<block_start>""" Class for storing the information for sparsifying in a given framework. Extends pydantics BaseModel class for serialization to and from json in addition to proper type checking on construction. """<line_sep>modifiers:List[ModifierInfo]=Field(default=[] title="modifiers" description="A list of the information for the available modifiers" )<def_stmt>type_modifiers self type_:ModifierType<arrow>List[ModifierInfo]<block_start>""" Get the contained Modifiers for a specific ModifierType. :param type_: The ModifierType to filter the returned list of Modifiers by. :type type_: ModifierType :return: The filtered list of Modifiers that match the given type_. :rtype: List[ModifierInfo] """<line_sep>modifiers=[]<for_stmt>mod self.modifiers<block_start><if_stmt>mod.type_<eq>type_<block_start>modifiers.append(mod)<block_end><block_end><return>modifiers<block_end><block_end><def_stmt>sparsification_info framework:Any<arrow>SparsificationInfo<block_start>""" Get the available setup for sparsifying model in the given framework. :param framework: The item to detect the ML framework for. See :func:`detect_framework` for more information. :type framework: Any :return: The sparsification info for the given framework :rtype: SparsificationInfo """<line_sep>_LOGGER.debug("getting sparsification info for framework %s" framework)<line_sep>info:SparsificationInfo=execute_in_sparseml_framework(framework "sparsification_info")<line_sep>_LOGGER.info("retrieved sparsification info for framework %s: %s" framework info)<line_sep><return>info<block_end><def_stmt>save_sparsification_info framework:Any path:Optional[str]=<none><block_start>""" Save the sparsification info for a given framework. If path is provided, will save to a json file at that path. If path is not provided, will print out the info. :param framework: The item to detect the ML framework for. See :func:`detect_framework` for more information. :type framework: Any :param path: The path, if any, to save the info to in json format. If not provided will print out the info. :type path: Optional[str] """<line_sep>_LOGGER.debug("saving sparsification info for framework %s to %s" framework path<if>path<else>"sys.out" )<line_sep>info=(sparsification_info(framework)<if><not>isinstance(framework SparsificationInfo)<else>framework)<if_stmt>path<block_start>path=clean_path(path)<line_sep>create_parent_dirs(path)<with_stmt>open(path "w")<as>file<block_start>file.write(info.json())<block_end>_LOGGER.info("saved sparsification info for framework %s in file at %s" framework path) <block_end><else_stmt><block_start>print(info.json(indent=4))<line_sep>_LOGGER.info("printed out sparsification info for framework %s" framework)<block_end><block_end><def_stmt>load_sparsification_info load:str<arrow>SparsificationInfo<block_start>""" Load the sparsification info from a file or raw json. If load exists as a path, will read from the file and use that. Otherwise will try to parse the input as a raw json str. :param load: Either a file path to a json file or a raw json string. :type load: str :return: The loaded sparsification info. :rtype: SparsificationInfo """<line_sep>load_path=clean_path(load)<if_stmt>os.path.exists(load_path)<block_start><with_stmt>open(load_path "r")<as>file<block_start>load=file.read()<block_end><block_end>info=SparsificationInfo.parse_raw(load)<line_sep><return>info<block_end><def_stmt>_parse_args <block_start>parser=argparse.ArgumentParser(description=("Compile the available setup and information for the sparsification "<concat>"of a model in a given framework."))<line_sep>parser.add_argument("framework" type=str help=("the ML framework or path to a framework file to load the "<concat>"sparsification info for") )<line_sep>parser.add_argument("--path" type=str default=<none> help=("A full file path to save the sparsification info to. "<concat>"If not supplied, will print out the sparsification info to the console.") )<line_sep><return>parser.parse_args()<block_end><def_stmt>_main <block_start>args=_parse_args()<line_sep>save_sparsification_info(args.framework args.path)<block_end><if_stmt>__name__<eq>"__main__"<block_start>_main()<block_end>
<import_stmt>keras.backend<as>K<import_from_stmt>keras.layers AtrousConvolution1D<import_from_stmt>keras.utils.np_utils conv_output_length<def_stmt>categorical_mean_squared_error y_true y_pred<block_start>"""MSE for categorical variables."""<line_sep><return>K.mean(K.square(K.argmax(y_true axis=-1)-K.argmax(y_pred axis=-1)))<block_end><class_stmt>CausalAtrousConvolution1D(AtrousConvolution1D)<block_start><def_stmt>__init__ self nb_filter filter_length init='glorot_uniform' activation=<none> weights=<none> border_mode='valid' subsample_length=1 atrous_rate=1 W_regularizer=<none> b_regularizer=<none> activity_regularizer=<none> W_constraint=<none> b_constraint=<none> bias=<true> causal=<false> **kwargs<block_start>super(CausalAtrousConvolution1D self).__init__(nb_filter filter_length init activation weights border_mode subsample_length atrous_rate W_regularizer b_regularizer activity_regularizer W_constraint b_constraint bias **kwargs)<line_sep>self.causal=causal<if_stmt>self.causal<and>border_mode<ne>'valid'<block_start><raise>ValueError("Causal mode dictates border_mode=valid.")<block_end><block_end><def_stmt>get_output_shape_for self input_shape<block_start>input_length=input_shape[1]<if_stmt>self.causal<block_start>input_length<augadd>self.atrous_rate<times>(self.filter_length-1)<block_end>length=conv_output_length(input_length self.filter_length self.border_mode self.subsample[0] dilation=self.atrous_rate)<line_sep><return>(input_shape[0] length self.nb_filter)<block_end><def_stmt>call self x mask=<none><block_start><if_stmt>self.causal<block_start>x=K.asymmetric_temporal_padding(x self.atrous_rate<times>(self.filter_length-1) 0)<block_end><return>super(CausalAtrousConvolution1D self).call(x mask)<block_end><block_end>
"""import_state.py A rough implementation of PEP 405. This module centers on manipulating the normal Python import machinery through its defined state. Any other approach, such as replacing builtins.__import__ is certainly legal, but not supported here. """<line_sep>__all__=['ImportState' 'default_import_state' 'globalstate']<import_stmt>sys<import_stmt>builtins<import_stmt>site<import_stmt>importlib<import_stmt>_imp<import_from_stmt>collections namedtuple<class_stmt>GlobalImportLock# no need for a generic ImportLock type, since all import states # use the same lock <block_start>@property<def_stmt>acquire self<block_start>_imp.acquire_lock()<block_end>@property<def_stmt>release self<block_start>_imp.release_lock()<block_end>@property<def_stmt>lock_held self<block_start>_imp.lock_held()<block_end><block_end>_ImportState=namedtuple('_ImportState' ('modules' 'meta_path' 'path' 'path_hooks' 'path_importer_cache' ))<class_stmt>ImportState(_ImportState)<block_start>"""A container for the import state (a la PEP 406). The dictionary in sys.modules is a special case, since it is part of the CPython interpreter state. Binding a different dict there is problematic, since the import machinery may use the internal reference to the original dict, rather than looking up sys.modules. The consequence is that the _contents_ of sys.modules must be swapped in and out, rather than simply binding something else there. ImportState objects may be used as context managers, to activate the state temporarily. During a with statement the dict in self.modules may not reflect the actual state. However, it _will_ be correct before and after the with statement. """<line_sep># all import states use the same lock lock=GlobalImportLock()<def_stmt>__init__ self *args **kwargs<block_start>self._saved=<none><block_end><def_stmt>__enter__ self<block_start>self.lock.acquire()<line_sep>self.activate()<block_end><def_stmt>__exit__ self *args **kwargs<block_start>self.deactivate()<line_sep>self.lock.release()<block_end><def_stmt>copy self<block_start>"""Return a shallow copy of the import state."""<line_sep><return>type(self)(self.modules.copy() self.meta_path[:] self.path[:] self.path_hooks[:] self.path_importer_cache.copy())<block_end><def_stmt>activate self force=<false><block_start>"""Have the interpreter use this import state, saving the old."""<if_stmt>self._saved<is><not><none><and><not>force<block_start><raise>TypeError("Already activated; try using a copy")<block_end>self._saved=_ImportState(sys.modules.copy() # saving away the contents sys.meta_path sys.path sys.path_hooks sys.path_importer_cache )<line_sep>#sys.modules = self.modules sys.meta_path=self.meta_path<line_sep>sys.path=self.path<line_sep>sys.path_hooks=self.meta_path<line_sep>sys.path_importer_cache=self.path_importer_cache<line_sep># accommodate sys.module's quirkiness sys.modules.clear()<line_sep>sys.modules.update(self.modules)<block_end><def_stmt>deactivate self<block_start>"""Restore the import state saved when this one activated."""<if_stmt><not>self._saved<block_start><raise>TypeError("Not activated yet")<block_end># sys.modules = self.modules sys.meta_path=self._saved.meta_path<line_sep>sys.path=self._saved.path<line_sep>sys.path_hooks=self._saved.path_hooks<line_sep>sys.path_importer_cache=self._saved.path_importer_cache<line_sep># accommodate sys.module's quirkiness self.modules.clear()<line_sep>self.modules.update(sys.modules)<line_sep>sys.modules.clear()<line_sep>sys.modules.update(self._saved.modules)<line_sep>self._saved=<none><block_end><block_end><def_stmt>default_import_state **overrides<block_start>"""Return an ImportState with defaults to the initial import state."""<line_sep>state={'modules':{} 'meta_path':[] 'path':site.getsitepackages() 'path_hooks':[] 'path_importer_cache':{} }<line_sep>state.update(overrides)<line_sep><return>ImportState(**state)<block_end><class_stmt>GlobalImportState(ImportState)<block_start>"""An ImportState that wraps the current state"""<line_sep># The underlying ImportState values will be ignored. <def_stmt>__new__ cls<block_start><return>super(GlobalImportState cls).__new__(cls *([<none>]<times>5))<block_end>@property<def_stmt>modules self<block_start>"""The cache of modules that have already been imported."""<line_sep><return>sys.modules<block_end>@property<def_stmt>meta_path self<block_start>"""The PEP 302 finders queried before 'path' is traversed."""<line_sep><return>sys.meta_path<block_end>@property<def_stmt>path self<block_start>"""The directories in which top-level packages are located."""<line_sep><return>sys.path<block_end>@property<def_stmt>path_hooks self<block_start>"""The PEP 302 path importers that are queried for a path."""<line_sep><return>sys.path_hooks<block_end>@property<def_stmt>path_importer_cache self<block_start>"""The cache of finders previously found through path_hooks."""<line_sep><return>sys.path_importer_cache<block_end><block_end>globalstate=GlobalImportState()<line_sep>
<import_stmt>pytest<import_from_stmt>regexploit.ast.at EndOfString<import_from_stmt>regexploit.ast.sre SreOpParser<def_stmt>from_regex pattern:str<block_start><return>SreOpParser().parse_sre(pattern)<block_end>@pytest.mark.parametrize("r" [r".*b*" r".*\w*b*" r".+b*" ] )<def_stmt>test_cannot_backtrack r<block_start>dollar=EndOfString()<line_sep>dollar.set_character(from_regex(r).elements)<assert_stmt>dollar.character.is_any<block_end>@pytest.mark.parametrize("r" [r"x[ab]*b*" r"x+[ab]*" r"x+a*[ab]*a*b*" ] )<def_stmt>test_dollar_simple r<block_start>dollar=EndOfString()<line_sep>dollar.set_character(from_regex(r).elements)<assert_stmt>dollar.character<eq>from_regex("[ab]")<block_end>@pytest.mark.parametrize("r" [r"\w*b*" r"x\w*\w*b*" r"\w+b*" ] )<def_stmt>test_dollar_optionals_contained_by_mandatory r<block_start>dollar=EndOfString()<line_sep>dollar.set_character(from_regex(r).elements)<assert_stmt>dollar.character<eq>from_regex(r"[\w]").expand_categories()<block_end><def_stmt>test_whole_string <block_start>dollar=EndOfString()<line_sep>dollar.set_character(from_regex(r"a*a*").elements)<assert_stmt>dollar.character<eq>from_regex(r"[a]")<block_end><def_stmt>test_real <block_start>dollar=EndOfString()<line_sep>dollar.set_character(from_regex(r"-\d+(\s*\s*\s*)").elements)<assert_stmt>dollar.character<eq>from_regex(r"[\s]")<block_end>
<import_stmt>glob<import_stmt>logging<import_stmt>os<import_from_stmt>easyprocess EasyProcess<import_from_stmt>entrypoint2 entrypoint<line_sep># (cmd,grab,background) commands=["python3 -m pyscreenshot.check.versions" "python3 -m pyscreenshot.examples.virtdisp" "python3 -m pyscreenshot.check.speedtest" "python3 -m pyscreenshot.check.speedtest --childprocess 0" ]<def_stmt>empty_dir dir<block_start>files=glob.glob(os.path.join(dir "*"))<for_stmt>f files<block_start>os.remove(f)<block_end><block_end>@entrypoint<def_stmt>main <block_start>gendir=os.path.join(os.path.dirname(os.path.abspath(__file__)) "gen")<line_sep>logging.info("gendir: %s" gendir)<line_sep>os.makedirs(gendir exist_ok=<true>)<line_sep>empty_dir(gendir)<line_sep>pls=[]<try_stmt><block_start>os.chdir("gen")<for_stmt>cmd commands<block_start>logging.info("cmd: %s" cmd)<line_sep>fname_base=cmd.replace(" " "_")<line_sep>fname=fname_base+".txt"<line_sep>logging.info("cmd: %s" cmd)<line_sep>print("file name: %s"%fname)<with_stmt>open(fname "w")<as>f<block_start>f.write("$ "+cmd+"\n")<line_sep>p=EasyProcess(cmd).call()<line_sep>f.write(p.stdout)<line_sep>f.write(p.stderr)<line_sep>pls<augadd>[p]<block_end><block_end><block_end><finally_stmt><block_start>os.chdir("..")<for_stmt>p pls<block_start>p.stop()<block_end><block_end>embedme=EasyProcess(["npx" "embedme" "../README.md"])<line_sep>embedme.call()<line_sep>print(embedme.stdout)<assert_stmt>embedme.return_code<eq>0<assert_stmt><not>"but file does not exist"<in>embedme.stdout<block_end>
""" Doc string """<def_stmt>asdf <block_start><pass><block_end>
<import_stmt>tensorflow<as>tf<import_from_stmt>tensorflow.losses Reduction<def_stmt>distance_func name x1 x2 eps:float=0.0<block_start><if_stmt>name<eq>"l1"<block_start>ax=1<line_sep><return>l1_dist(x1 x2 ax eps)<block_end><if_stmt>name<eq>"l2"<block_start>ax=1<line_sep><return>l2_dist(x1 x2 ax eps)<block_end><if_stmt>name<eq>"cosine"<block_start>ax=-1<line_sep><return>cosine_dist(x1 x2 ax eps)<block_end><block_end><def_stmt>l1_dist x1 x2 ax:int eps:float=0.0# sum over |x| + eps, i.e. L1 norm <block_start>x=x1-x2<line_sep><return>tf.reduce_sum(tf.abs(x) axis=ax)+eps<block_end><def_stmt>l2_dist x1 x2 ax:int eps:float=0.0# sqrt((sum over x^2) + eps)), i.e. L2 norm <block_start>x=x1-x2<line_sep><return>(tf.reduce_sum(x<power>2 axis=ax)+eps)<power>0.5<block_end><def_stmt>cosine_dist x1 x2 ax:int eps:float=0.0# normalize by sqrt(max(sum(x**2), 1e-12)) <block_start>normalize_x1=tf.nn.l2_normalize(x1 dim=1)<line_sep>normalize_x2=tf.nn.l2_normalize(x2 dim=1)<line_sep>dist=(tf.losses.cosine_distance(normalize_x1 normalize_x2 axis=ax reduction=Reduction.NONE)+eps)<line_sep>dist=tf.squeeze(dist)<line_sep>dist=tf.cast(dist tf.float64)<line_sep><return>dist<block_end>
''' /* * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013 Nicira, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* OpenFlow: protocol between controller and datapath. */ Created on 2015/7/13 :author: hubo '''<import_from_stmt>.common *<import_from_stmt>. common<import_from_stmt>namedstruct.namedstruct StructDefWarning<import_stmt>warnings<as>_warnings<with_stmt>_warnings.catch_warnings()<block_start>_warnings.filterwarnings('ignore' '^padding' StructDefWarning)<line_sep>''' /* Port number(s) meaning * --------------- -------------------------------------- * 0x0000 not assigned a meaning by OpenFlow 1.0 * 0x0001...0xfeff "physical" ports * 0xff00...0xfff7 "reserved" but not assigned a meaning by OpenFlow 1.0 * 0xfff8...0xffff "reserved" OFPP_* ports with assigned meanings */ /* Ranges. */ '''<line_sep>ofp_port=enum('ofp_port' globals() uint16 OFPP_MAX=0xff00 # /* Max # of switch ports. */ # /* Reserved output "ports". */ OFPP_IN_PORT=0xfff8 # /* Where the packet came in. */ OFPP_TABLE=0xfff9 # /* Perform actions in flow table. */ OFPP_NORMAL=0xfffa # /* Process with normal L2/L3. */ OFPP_FLOOD=0xfffb # /* All ports except input port and # * ports disabled by STP. */ OFPP_ALL=0xfffc # /* All ports except input port. */ OFPP_CONTROLLER=0xfffd # /* Send to controller. */ OFPP_LOCAL=0xfffe # /* Local openflow "port". */ OFPP_NONE=0xffff# /* Not associated with any port. */ )<line_sep>ofp_port_no=ofp_port<line_sep>OFPP_FIRST_RESV=0xfff8 # /* First assigned reserved port. */ OFPP_LAST_RESV=0xffff # /* Last assigned reserved port. */ ofp_type=ofp_type.extend(globals() OFPT_VENDOR=4 OFPT_FEATURES_REQUEST=5 #/* Controller/switch message */ OFPT_FEATURES_REPLY=6 #/* Controller/switch message */ OFPT_GET_CONFIG_REQUEST=7 #/* Controller/switch message */ OFPT_GET_CONFIG_REPLY=8 #/* Controller/switch message */ OFPT_SET_CONFIG=9 #/* Controller/switch message */ OFPT_PACKET_IN=10 #/* Async message */ OFPT_FLOW_REMOVED=11 #/* Async message */ OFPT_PORT_STATUS=12 #/* Async message */ OFPT_PACKET_OUT=13 #/* Controller/switch message */ OFPT_FLOW_MOD=14 #/* Controller/switch message */ OFPT_PORT_MOD=15 #/* Controller/switch message */ OFPT_STATS_REQUEST=16 #/* Controller/switch message */ OFPT_STATS_REPLY=17 #/* Controller/switch message */ OFPT_BARRIER_REQUEST=18 #/* Controller/switch message */ OFPT_BARRIER_REPLY=19 #/* Controller/switch message */ OFPT_QUEUE_GET_CONFIG_REQUEST=20 #/* Controller/switch message */ OFPT_QUEUE_GET_CONFIG_REPLY=21#/* Controller/switch message */ )<line_sep>ofp_type_reply_set=set([OFPT_ECHO_REPLY OFPT_FEATURES_REPLY OFPT_GET_CONFIG_REPLY OFPT_STATS_REPLY OFPT_BARRIER_REPLY OFPT_QUEUE_GET_CONFIG_REPLY])<line_sep>ofp_type_asyncmessage_set=set([OFPT_PACKET_IN OFPT_FLOW_REMOVED OFPT_PORT_STATUS])<line_sep>OFP_VERSION=OFP10_VERSION<line_sep>ofp_msg=nstruct(name='ofp_msg' base=common.ofp_msg_mutable criteria=<lambda>x:x.header.version<eq>OFP_VERSION init=packvalue(OFP_VERSION 'header' 'version') classifyby=(OFP_VERSION ) classifier=<lambda>x:x.header.type extend={('header' 'type'):ofp_type})<line_sep>ofp_vendor=nstruct((experimenter_ids 'vendor') name='ofp_vendor' base=ofp_msg criteria=<lambda>x:x.header.type<eq>OFPT_VENDOR classifyby=(OFPT_VENDOR ) init=packvalue(OFPT_VENDOR 'header' 'type'))<line_sep>ofp_error_type=ofp_error_type.extend(globals() OFPET_FLOW_MOD_FAILED=3 OFPET_PORT_MOD_FAILED=4 OFPET_QUEUE_OP_FAILED=5)<line_sep>''' /* ofp_error_msg 'code' values for OFPET_FLOW_MOD_FAILED. 'data' contains * at least the first 64 bytes of the failed request. */ '''<line_sep>ofp_flow_mod_failed_code=enum('ofp_flow_mod_failed_code' globals() OFPFMFC_ALL_TABLES_FULL=0 # /* Flow not added because of full tables. */ OFPFMFC_OVERLAP=1 # /* Attempted to add overlapping flow with # * CHECK_OVERLAP flag set. */ OFPFMFC_EPERM=2 # /* Permissions error. */ OFPFMFC_BAD_EMERG_TIMEOUT=3 # /* Flow not added because of non-zero idle/hard # * timeout. */ OFPFMFC_BAD_COMMAND=4 # /* Unknown command. */ OFPFMFC_UNSUPPORTED=5 # /* Unsupported action list - cannot process in # * the order specified. */ )<line_sep>''' /* ofp_error_msg 'code' values for OFPET_PORT_MOD_FAILED. 'data' contains * at least the first 64 bytes of the failed request. */ '''<line_sep>ofp_port_mod_failed_code=enum('ofp_port_mod_failed_code' globals() OFPPMFC_BAD_PORT=0 #/* Specified port does not exist. */ OFPPMFC_BAD_HW_ADDR=1 #/* Specified hardware address is wrong. */ )<line_sep>''' /* ofp_error msg 'code' values for OFPET_QUEUE_OP_FAILED. 'data' contains * at least the first 64 bytes of the failed request */ '''<line_sep>ofp_queue_op_failed_code=enum('ofp_queue_op_failed_code' globals() OFPQOFC_BAD_PORT=0 # /* Invalid port (or port does not exist). */ OFPQOFC_BAD_QUEUE=1 # /* Queue does not exist. */ OFPQOFC_EPERM=2# /* Permissions error. */ )<line_sep>ofp_error_types=dict(ofp_error_types)<line_sep>ofp_error_types.update({OFPET_FLOW_MOD_FAILED:ofp_error_typedef(OFPET_FLOW_MOD_FAILED ofp_flow_mod_failed_code OFP_VERSION ofp_error_type) OFPET_PORT_MOD_FAILED:ofp_error_typedef(OFPET_PORT_MOD_FAILED ofp_port_mod_failed_code OFP_VERSION ofp_error_type) OFPET_QUEUE_OP_FAILED:ofp_error_typedef(OFPET_QUEUE_OP_FAILED ofp_queue_op_failed_code OFP_VERSION ofp_error_type)})<line_sep>ofp_switch_config=nstruct((ofp_config_flags 'flags') (uint16 'miss_send_len') name='ofp_switch_config' base=ofp_msg criteria=<lambda>x:x.header.type<eq>OFPT_GET_CONFIG_REPLY<or>x.header.type<eq>OFPT_SET_CONFIG classifyby=(OFPT_GET_CONFIG_REPLY OFPT_SET_CONFIG) init=packvalue(OFPT_SET_CONFIG 'header' 'type'))<line_sep>''' /* OpenFlow 1.0 specific capabilities supported by the datapath (struct * ofp_switch_features, member capabilities). */ '''<line_sep>ofp_capabilities=ofp_capabilities.extend(globals() OFPC_STP=1<lshift>3 #/* 802.1d spanning tree. */ OFPC_RESERVED=1<lshift>4)<line_sep>#/* Reserved, must not be set. */ ''' /* OpenFlow 1.0 specific current state of the physical port. These are not * configurable from the controller. */ /* The OFPPS10_STP_* bits have no effect on switch operation. The * controller must adjust OFPPC_NO_RECV, OFPPC_NO_FWD, and * OFPPC_NO_PACKET_IN appropriately to fully implement an 802.1D spanning * tree. */ '''<line_sep>ofp_port_state=ofp_port_state.extend(globals() OFPPS_STP_LISTEN=0<lshift>8 # /* Not learning or relaying frames. */ OFPPS_STP_LEARN=1<lshift>8 # /* Learning but not relaying frames. */ OFPPS_STP_FORWARD=2<lshift>8 # /* Learning and relaying frames. */ OFPPS_STP_BLOCK=3<lshift>8# /* Not part of spanning tree. */ )<line_sep># /* Bit mask for OFPPS10_STP_* values. */ OFPPS_STP_MASK=3<lshift>8<line_sep>OFPPS_ALL=OFPPS_LINK_DOWN|OFPPS_STP_MASK<line_sep>ofp_action_type=enum('ofp_action_type' globals() uint16 OFPAT_OUTPUT=0 #/* Output to switch port. */ OFPAT_SET_VLAN_VID=1 #/* Set the 802.1q VLAN id. */ OFPAT_SET_VLAN_PCP=2 #/* Set the 802.1q priority. */ OFPAT_STRIP_VLAN=3 #/* Strip the 802.1q header. */ OFPAT_SET_DL_SRC=4 #/* Ethernet source address. */ OFPAT_SET_DL_DST=5 #/* Ethernet destination address. */ OFPAT_SET_NW_SRC=6 #/* IP source address. */ OFPAT_SET_NW_DST=7 #/* IP destination address. */ OFPAT_SET_NW_TOS=8 #/* IP ToS (DSCP field, 6 bits). */ OFPAT_SET_TP_SRC=9 #/* TCP/UDP source port. */ OFPAT_SET_TP_DST=10 #/* TCP/UDP destination port. */ OFPAT_ENQUEUE=11 #/* Output to queue. */ OFPAT_VENDOR=0xffff)<line_sep>ofp_action=nstruct((ofp_action_type 'type') (uint16 'len') name='ofp_action' size=<lambda>x:x.len prepack=packsize('len') classifier=<lambda>x:x.type)<line_sep>ofp_action_vendor=nstruct((experimenter_ids 'vendor') name='ofp_action_vendor' base=ofp_action criteria=<lambda>x:x.type<eq>OFPAT_VENDOR classifyby=(OFPAT_VENDOR ) init=packvalue(OFPAT_VENDOR 'type'))<line_sep>''' /* Action structure for OFPAT10_OUTPUT, which sends packets out 'port'. * When the 'port' is the OFPP_CONTROLLER, 'max_len' indicates the max * number of bytes to send. A 'max_len' of zero means no bytes of the * packet should be sent. */ '''<line_sep>ofp_action_output=nstruct((ofp_port 'port') (uint16 'max_len') name='ofp_action_output' base=ofp_action criteria=<lambda>x:x.type<eq>OFPAT_OUTPUT classifyby=(OFPAT_OUTPUT ) init=packvalue(OFPAT_OUTPUT 'type'))<line_sep>''' /* Action structure for OFPAT10_SET_VLAN_VID and OFPAT11_SET_VLAN_VID. */ '''<line_sep>ofp_action_vlan_vid=nstruct((uint16 'vlan_vid') # /* VLAN id. */ (uint8[2] ) name='ofp_action_vlan_vid' base=ofp_action criteria=<lambda>x:x.type<eq>OFPAT_SET_VLAN_VID classifyby=(OFPAT_SET_VLAN_VID ) init=packvalue(OFPAT_SET_VLAN_VID 'type'))<line_sep>''' /* Action structure for OFPAT10_SET_VLAN_PCP and OFPAT11_SET_VLAN_PCP. */ '''<line_sep>ofp_action_vlan_pcp=nstruct((uint8 'vlan_pcp') # /* VLAN priority. */ (uint8[3] ) name='ofp_action_vlan_pcp' base=ofp_action criteria=<lambda>x:x.type<eq>OFPAT_SET_VLAN_PCP classifyby=(OFPAT_SET_VLAN_PCP ) init=packvalue(OFPAT_SET_VLAN_PCP 'type'))<line_sep>''' /* Action structure for OFPAT10_SET_DL_SRC/DST and OFPAT11_SET_DL_SRC/DST. */ '''<line_sep>ofp_action_dl_addr=nstruct((mac_addr 'dl_addr') # /* Ethernet address. */ (uint8[6] ) name='ofp_action_dl_addr' base=ofp_action criteria=<lambda>x:x.type<eq>OFPAT_SET_DL_SRC<or>x.type<eq>OFPAT_SET_DL_DST classifyby=(OFPAT_SET_DL_SRC OFPAT_SET_DL_DST) init=packvalue(OFPAT_SET_DL_SRC 'type'))<line_sep>''' /* Action structure for OFPAT10_SET_NW_SRC/DST and OFPAT11_SET_NW_SRC/DST. */ '''<line_sep>ofp_action_nw_addr=nstruct((ip4_addr 'nw_addr') # /* IP address. */ name='ofp_action_nw_addr' base=ofp_action criteria=<lambda>x:x.type<eq>OFPAT_SET_NW_SRC<or>x.type<eq>OFPAT_SET_NW_DST classifyby=(OFPAT_SET_NW_SRC OFPAT_SET_NW_DST) init=packvalue(OFPAT_SET_NW_SRC 'type'))<line_sep>''' /* Action structure for OFPAT10_SET_NW_TOS and OFPAT11_SET_NW_TOS. */ '''<line_sep>ofp_action_nw_tos=nstruct((uint8 'nw_tos') # /* DSCP in high 6 bits, rest ignored. */ (uint8[3] ) name='ofp_action_nw_tos' base=ofp_action criteria=<lambda>x:x.type<eq>OFPAT_SET_NW_TOS classifyby=(OFPAT_SET_NW_TOS ) init=packvalue(OFPAT_SET_NW_TOS 'type'))<line_sep>''' /* Action structure for OFPAT10_SET_TP_SRC/DST and OFPAT11_SET_TP_SRC/DST. */ '''<line_sep>ofp_action_tp_port=nstruct((uint16 'tp_port') # /* TCP/UDP port. */ (uint8[2] ) name='ofp_action_tp_port' base=ofp_action criteria=<lambda>x:x.type<eq>OFPAT_SET_TP_SRC<or>x.type<eq>OFPAT_SET_TP_DST classifyby=(OFPAT_SET_TP_SRC OFPAT_SET_TP_DST) init=packvalue(OFPAT_SET_TP_SRC 'type'))<line_sep>''' /* OpenFlow 1.0 specific features of physical ports available in a datapath. */ '''<line_sep>ofp_port_features=ofp_port_features.extend(globals() OFPPF_COPPER=1<lshift>7 #/* Copper medium. */ OFPPF_FIBER=1<lshift>8 #/* Fiber medium. */ OFPPF_AUTONEG=1<lshift>9 #/* Auto-negotiation. */ OFPPF_PAUSE=1<lshift>10 #/* Pause. */ OFPPF_PAUSE_ASYM=1<lshift>11#/* Asymmetric pause. */ )<line_sep>''' /* Description of a physical port */ '''<line_sep>ofp_phy_port=nstruct((ofp_port 'port_no') (mac_addr 'hw_addr') (char[OFP_MAX_PORT_NAME_LEN] 'name') #/* Null-terminated */ (ofp_port_config 'config') # /* Bitmap of OFPPC_* and OFPPC10_* flags. */ (ofp_port_state 'state') # /* Bitmap of OFPPS_* and OFPPS10_* flags. */ #/* Bitmaps of OFPPF_* and OFPPF10_* that describe features. All bits # * zeroed if unsupported or unavailable. */ (ofp_port_features 'curr') # /* Current features. */ (ofp_port_features 'advertised') # /* Features being advertised by the port. */ (ofp_port_features 'supported') # /* Features supported by the port. */ (ofp_port_features 'peer') # /* Features advertised by peer. */ name='ofp_phy_port')<line_sep>ofp_action_type_bitwise=enum('ofp_action_type_bitwise' <none> uint32 <true> **dict((k 1<lshift>v)<for>(k v) ofp_action_type.getDict().items()<if>v<l>32))<line_sep>ofp_switch_features=nstruct((uint64 'datapath_id') (uint32 'n_buffers') (uint8 'n_tables') (uint8[3] ) (ofp_capabilities 'capabilities') (ofp_action_type_bitwise 'actions') (ofp_phy_port[0] 'ports') name='ofp_switch_features' base=ofp_msg criteria=<lambda>x:x.header.type<eq>OFPT_FEATURES_REPLY classifyby=(OFPT_FEATURES_REPLY ) init=packvalue(OFPT_FEATURES_REPLY 'header' 'type'))<line_sep>''' /* Modify behavior of the physical port */ '''<line_sep>ofp_port_mod=nstruct((ofp_port 'port_no') (mac_addr 'hw_addr') (ofp_port_config 'config') # /* Bitmap of OFPPC_* flags. */ (ofp_port_config 'mask') # /* Bitmap of OFPPC_* flags to be changed. */ (ofp_port_features 'advertise') # /* Bitmap of "ofp_port_features"s. Zero all bits to prevent any action taking place. */ (uint8[4] ) # /* Pad to 64-bits. */ name='ofp_port_mod' base=ofp_msg criteria=<lambda>x:x.header.type<eq>OFPT_PORT_MOD classifyby=(OFPT_PORT_MOD ) init=packvalue(OFPT_PORT_MOD 'header' 'type'))<line_sep>ofp_queue_prop_header=nstruct((ofp_queue_properties 'property') (uint16 'len') (uint8[4] ) name='ofp_queue_prop_header')<line_sep>ofp_queue_prop=nstruct((ofp_queue_prop_header 'prop_header') name='ofp_queue_prop' size=<lambda>x:x.prop_header.len prepack=packrealsize('prop_header' 'len') classifier=<lambda>x:x.prop_header.property)<line_sep>ofp_queue_prop_min_rate=nstruct((uint16 'rate') (uint8[6] ) base=ofp_queue_prop criteria=<lambda>x:x.prop_header.property<eq>OFPQT_MIN_RATE classifyby=(OFPQT_MIN_RATE ) init=packvalue(OFPQT_MIN_RATE 'prop_header' 'property') name='ofp_queue_prop_min_rate')<line_sep>ofp_packet_queue=nstruct((uint32 'queue_id') # /* id for the specific queue. */ (uint16 'len') # /* Length in bytes of this queue desc. */ (uint8[2] ) # /* 64-bit alignment. */ (ofp_queue_prop[0] 'properties') name='ofp_packet_queue' size=<lambda>x:x.len prepack=packsize('len'))<line_sep>''' /* Query for port queue configuration. */ '''<line_sep>ofp_queue_get_config_request=nstruct((uint16 'port') # /* Port to be queried. Should refer to a valid physical port (i.e. < OFPP_MAX) */ (uint8[2] ) name='ofp_queue_get_config_request' base=ofp_msg criteria=<lambda>x:x.header.type<eq>OFPT_QUEUE_GET_CONFIG_REQUEST classifyby=(OFPT_QUEUE_GET_CONFIG_REQUEST ) init=packvalue(OFPT_QUEUE_GET_CONFIG_REQUEST 'header' 'type'))<line_sep>''' /* Queue configuration for a given port. */ '''<line_sep>ofp_queue_get_config_reply=nstruct((uint16 'port') (uint8[6] ) (ofp_packet_queue[0] 'queues') # /* List of configured queues. */ base=ofp_msg name='ofp_queue_get_config_reply' criteria=<lambda>x:x.header.type<eq>OFPT_QUEUE_GET_CONFIG_REPLY classifyby=(OFPT_QUEUE_GET_CONFIG_REPLY ) init=packvalue(OFPT_QUEUE_GET_CONFIG_REPLY 'header' 'type'))<line_sep>''' /* Packet received on port (datapath -> controller). */ '''<line_sep>ofp_packet_in=nstruct((uint32 'buffer_id') # /* ID assigned by datapath. */ (uint16 'total_len') # /* Full length of frame. */ (ofp_port 'in_port') # /* Port on which frame was received. */ (ofp_packet_in_reason 'reason') # /* Reason packet is being sent (one of OFPR_*) */ (uint8 ) (raw 'data') base=ofp_msg name='ofp_packet_in' criteria=<lambda>x:x.header.type<eq>OFPT_PACKET_IN classifyby=(OFPT_PACKET_IN ) init=packvalue(OFPT_PACKET_IN 'header' 'type'))<line_sep>''' /* OFPAT10_ENQUEUE action struct: send packets to given queue on port. */ '''<line_sep>ofp_action_enqueue=nstruct((uint16 'port') # /* Port that queue belongs. Should (uint8[6] ) # /* Pad for 64-bit alignment. */ (uint32 'queue_id') # /* Where to enqueue the packets. */ name='ofp_action_enqueue' base=ofp_action criteria=<lambda>x:x.type<eq>OFPAT_ENQUEUE classifyby=(OFPAT_ENQUEUE ) init=packvalue(OFPAT_ENQUEUE 'type'))<line_sep>''' /* Send packet (controller -> datapath). */ '''<def_stmt>_ofp_packet_out_actions_packsize x<block_start>x.actions_len=x._realsize()-2<block_end>ofp_packet_out_actions=nstruct((uint16 'actions_len') (ofp_action[0] 'actions') name='ofp_packet_out_actions' size=<lambda>x:x.actions_len+2 prepack=_ofp_packet_out_actions_packsize padding=1)<line_sep>ofp_packet_out=nstruct((uint32 'buffer_id') # /* ID assigned by datapath or UINT32_MAX. */ (ofp_port 'in_port') # /* Packet's input port (OFPP_NONE if none). */ (ofp_packet_out_actions ) (raw 'data') name='ofp_packet_out' base=ofp_msg criteria=<lambda>x:x.header.type<eq>OFPT_PACKET_OUT classifyby=(OFPT_PACKET_OUT ) init=packvalue(OFPT_PACKET_OUT 'header' 'type'))<line_sep>''' /* Flow wildcards. */ '''<line_sep>OFPFW_NW_SRC_SHIFT=8<line_sep>OFPFW_NW_SRC_BITS=6<line_sep>OFPFW_NW_DST_SHIFT=14<line_sep>OFPFW_NW_DST_BITS=6<line_sep>ofp_flow_wildcards=enum('ofp_flow_wildcards' globals() uint32 <true> OFPFW_IN_PORT=1<lshift>0 #/* Switch input port. */ OFPFW_DL_VLAN=1<lshift>1 #/* VLAN vid. */ OFPFW_DL_SRC=1<lshift>2 #/* Ethernet source address. */ OFPFW_DL_DST=1<lshift>3 #/* Ethernet destination address. */ OFPFW_DL_TYPE=1<lshift>4 #/* Ethernet frame type. */ OFPFW_NW_PROTO=1<lshift>5 #/* IP protocol. */ OFPFW_TP_SRC=1<lshift>6 #/* TCP/UDP source port. */ OFPFW_TP_DST=1<lshift>7 #/* TCP/UDP destination port. */ #/* IP source address wildcard bit count. 0 is exact match, 1 ignores the #* LSB, 2 ignores the 2 least-significant bits, ..., 32 and higher wildcard #* the entire field. This is the *opposite* of the usual convention where #* e.g. /24 indicates that 8 bits (not 24 bits) are wildcarded. */ OFPFW_NW_SRC_MASK=(((1<lshift>OFPFW_NW_SRC_BITS)-1)<lshift>OFPFW_NW_SRC_SHIFT) OFPFW_NW_SRC_ALL=32<lshift>OFPFW_NW_SRC_SHIFT # /* IP destination address wildcard bit count. Same format as source. */ OFPFW_NW_DST_MASK=(((1<lshift>OFPFW_NW_DST_BITS)-1)<lshift>OFPFW_NW_DST_SHIFT) OFPFW_NW_DST_ALL=32<lshift>OFPFW_NW_DST_SHIFT OFPFW_DL_VLAN_PCP=1<lshift>20 # /* VLAN priority. */ OFPFW_NW_TOS=1<lshift>21 # /* IP ToS (DSCP field, 6 bits). */ # /* Wildcard all fields. */ OFPFW_ALL=((1<lshift>22)-1))<line_sep>#/* The wildcards for ICMP type and code fields use the transport source # * and destination port fields, respectively. */ OFPFW_ICMP_TYPE=OFPFW_TP_SRC<line_sep>OFPFW_ICMP_CODE=OFPFW_TP_DST<line_sep>#/* The VLAN id is 12-bits, so we can use the entire 16 bits to indicate # * special conditions. All ones indicates that 802.1Q header is not present. # */ OFP_VLAN_NONE=0xffff<line_sep>''' /* Fields to match against flows */ '''<line_sep>ofp_match=nstruct((ofp_flow_wildcards 'wildcards') # /* Wildcard fields. */ (ofp_port 'in_port') # /* Input switch port. */ (mac_addr 'dl_src') # /* Ethernet source address. */ (mac_addr 'dl_dst') # /* Ethernet destination address. */ (uint16 'dl_vlan') # /* Input VLAN. */ (uint8 'dl_vlan_pcp') # /* Input VLAN priority. */ (uint8[1] ) # /* Align to 64-bits. */ (ethertype 'dl_type') # /* Ethernet frame type. */ (uint8 'nw_tos') # /* IP ToS (DSCP field, 6 bits). */ (uint8 'nw_proto') # /* IP protocol or lower 8 bits of ARP opcode. */ (uint8[2] ) # /* Align to 64-bits. */ (ip4_addr 'nw_src') # /* IP source address. */ (ip4_addr 'nw_dst') # /* IP destination address. */ (uint16 'tp_src') # /* TCP/UDP source port. */ (uint16 'tp_dst') # /* TCP/UDP destination port. */ name='ofp_match')<line_sep>ofp_flow_mod_flags=ofp_flow_mod_flags.extend(globals() OFPFF_EMERG=1<lshift>2#/* Part of "emergency flow cache". */ )<line_sep>''' /* Flow setup and teardown (controller -> datapath). */ '''<line_sep>ofp_flow_mod=nstruct((ofp_match 'match') # /* Fields to match */ (uint64 'cookie') # /* Opaque controller-issued identifier. */ # /* Flow actions. */ (ofp_flow_mod_command 'command') # /* One of OFPFC_*. */ (uint16 'idle_timeout') # /* Idle time before discarding (seconds). */ (uint16 'hard_timeout') # /* Max time before discarding (seconds). */ (uint16 'priority') # /* Priority level of flow entry. */ (uint32 'buffer_id') # /* Buffered packet to apply to (or -1). Not meaningful for OFPFC_DELETE*. */ #/* For OFPFC_DELETE* commands, require matching entries to include this as an # output port. A value of OFPP_NONE indicates no restriction. */ (ofp_port 'out_port') (ofp_flow_mod_flags 'flags') # /* One of OFPFF_*. */ (ofp_action[0] 'actions') # /* The action length is inferred from the length field in the header. */ base=ofp_msg criteria=<lambda>x:x.header.type<eq>OFPT_FLOW_MOD classifyby=(OFPT_FLOW_MOD ) init=packvalue(OFPT_FLOW_MOD 'header' 'type') name='ofp_flow_mod')<line_sep>''' /* Flow removed (datapath -> controller). */ '''<line_sep>ofp_flow_removed=nstruct((ofp_match 'match') # /* Description of fields. */ (uint64 'cookie') # /* Opaque controller-issued identifier. */ (uint16 'priority') # /* Priority level of flow entry. */ (ofp_flow_removed_reason 'reason') # /* One of OFPRR_*. */ (uint8[1] ) # /* Align to 32-bits. */ (uint32 'duration_sec') # /* Time flow was alive in seconds. */ (uint32 'duration_nsec') # /* Time flow was alive in nanoseconds beyond duration_sec. */ (uint16 'idle_timeout') # /* Idle timeout from original flow mod. */ (uint8[2] ) # /* Align to 64-bits. */ (uint64 'packet_count') (uint64 'byte_count') name='ofp_flow_removed' base=ofp_msg criteria=<lambda>x:x.header.type<eq>OFPT_FLOW_REMOVED classifyby=(OFPT_FLOW_REMOVED ) init=packvalue(OFPT_FLOW_REMOVED 'header' 'type'))<line_sep>ofp_port_status=nstruct((ofp_port_reason 'reason') (uint8[7] ) (ofp_phy_port 'desc') name='ofp_port_status' base=ofp_msg criteria=<lambda>x:x.header.type<eq>OFPT_PORT_STATUS classifyby=(OFPT_PORT_STATUS ) init=packvalue(OFPT_PORT_STATUS 'header' 'type'))<line_sep>''' /* Statistics request or reply message. */ '''<line_sep>ofp_stats_types=enum('ofp_stats_types' globals() uint16 #/* Description of this OpenFlow switch. #* The request body is empty. #* The reply body is struct ofp_desc_stats. */ OFPST_DESC=0 #/* Individual flow statistics. #* The request body is struct ofp_flow_stats_request. #* The reply body is an array of struct ofp_flow_stats. */ OFPST_FLOW=1 #/* Aggregate flow statistics. #* The request body is struct ofp_aggregate_stats_request. #* The reply body is struct ofp_aggregate_stats_reply. */ OFPST_AGGREGATE=2 #/* Flow table statistics. #* The request body is empty. #* The reply body is an array of struct ofp_table_stats. */ OFPST_TABLE=3 #/* Physical port statistics. #* The request body is struct ofp_port_stats_request. #* The reply body is an array of struct ofp_port_stats. */ OFPST_PORT=4 #/* Queue statistics for a port #* The request body defines the port #* The reply body is an array of struct ofp_queue_stats */ OFPST_QUEUE=5 #/* Vendor extension. #* The request and reply bodies begin with a 32-bit vendor ID, which takes #* the same form as in "struct ofp_vendor_header". The request and reply #* bodies are otherwise vendor-defined. */ OFPST_VENDOR=0xffff)<line_sep>ofp_stats_msg=nstruct((ofp_stats_types 'type') # /* One of the OFPST_* constants. */ (ofp_stats_reply_flags 'flags') # /* Requests: always 0. # * Replies: 0 or OFPSF_REPLY_MORE. */ name='ofp_stats_msg' base=ofp_msg criteria=<lambda>x:x.header.type<eq>OFPT_STATS_REQUEST<or>x.header.type<eq>OFPT_STATS_REPLY classifyby=(OFPT_STATS_REQUEST OFPT_STATS_REPLY) init=packvalue(OFPT_STATS_REQUEST 'header' 'type'))<line_sep>ofp_stats_request=nstruct(name='ofp_stats_request' base=ofp_stats_msg criteria=<lambda>x:x.header.type<eq>OFPT_STATS_REQUEST classifier=<lambda>x:x.type init=packvalue(OFPT_STATS_REQUEST 'header' 'type'))<line_sep>ofp_stats_reply=nstruct(name='ofp_stats_request' base=ofp_stats_msg criteria=<lambda>x:x.header.type<eq>OFPT_STATS_REPLY classifier=<lambda>x:x.type init=packvalue(OFPT_STATS_REPLY 'header' 'type'))<line_sep>DESC_STR_LEN=256<line_sep>SERIAL_NUM_LEN=32<line_sep>ofp_desc_stats=nstruct((char[DESC_STR_LEN] 'mfr_desc') (char[DESC_STR_LEN] 'hw_desc') (char[DESC_STR_LEN] 'sw_desc') (char[SERIAL_NUM_LEN] 'serial_num') (char[DESC_STR_LEN] 'dp_desc') name='ofp_desc_stats')<line_sep>ofp_desc_stats_reply=nstruct((ofp_desc_stats ) name='ofp_desc_stats_reply' base=ofp_stats_reply criteria=<lambda>x:x.type<eq>OFPST_DESC classifyby=(OFPST_DESC ) init=packvalue(OFPST_DESC 'type'))<line_sep>''' /* Stats request of type OFPST_AGGREGATE or OFPST_FLOW. */ '''<line_sep>ofp_flow_stats_request=nstruct((ofp_match 'match') # /* Fields to match. */ (ofp_table 'table_id') # /* ID of table to read (from ofp_table_stats) or 0xff for all tables. */ (uint8 ) # /* Align to 32 bits. */ (ofp_port 'out_port') # /* Require matching entries to include this as an output port. A value of OFPP_NONE indicates no restriction. */ name='ofp_flow_stats_request' base=ofp_stats_request criteria=<lambda>x:x.type<eq>OFPST_FLOW<or>x.type<eq>OFPST_AGGREGATE classifyby=(OFPST_FLOW OFPST_AGGREGATE) init=packvalue(OFPST_FLOW 'type'))<line_sep>''' /* Body of reply to OFPST_FLOW request. */ '''<line_sep>ofp_flow_stats=nstruct((uint16 'length') #/* Length of this entry. */ (uint8 'table_id') #/* ID of table flow came from. */ (uint8 ) (ofp_match 'match') #/* Description of fields. */ (uint32 'duration_sec') #/* Time flow has been alive in seconds. */ (uint32 'duration_nsec') #/* Time flow has been alive in nanoseconds beyond duration_sec. */ (uint16 'priority') #/* Priority of the entry. Only meaningful when this is not an exact-match entry. */ (uint16 'idle_timeout') #/* Number of seconds idle before expiration. */ (uint16 'hard_timeout') #/* Number of seconds before expiration. */ (uint8[6] ) #/* Align to 64 bits. */ (uint64 'cookie') #/* Opaque controller-issued identifier. */ (uint64 'packet_count') #/* Number of packets in flow. */ (uint64 'byte_count') #/* Number of bytes in flow. */ (ofp_action[0] 'actions') #/* Actions. */ name='ofp_flow_stats' size=<lambda>x:x.length prepack=packsize('length'))<line_sep>ofp_flow_stats_reply=nstruct((ofp_flow_stats[0] 'stats') name='ofp_flow_stats_reply' base=ofp_stats_reply criteria=<lambda>x:x.type<eq>OFPST_FLOW classifyby=(OFPST_FLOW ) init=packvalue(OFPST_FLOW 'type'))<line_sep>ofp_table=enum('ofp_table' globals() uint8 OFPTT_ALL=0xff)<line_sep>''' /* Body for ofp_stats_request of type OFPST_AGGREGATE. */ '''<line_sep>ofp_aggregate_stats_request=nstruct((ofp_match 'match') # /* Fields to match. */ (ofp_table 'table_id') # /* ID of table to read (from ofp_table_stats) # 0xff for all tables or 0xfe for emergency. */ (uint8 ) # /* Align to 32 bits. */ (ofp_port 'out_port') # /* Require matching entries to include this # as an output port. A value of OFPP_NONE # indicates no restriction. */ base=ofp_stats_request criteria=<lambda>x:x.type<eq>OFPST_AGGREGATE classifyby=(OFPST_AGGREGATE ) init=packvalue(OFPST_AGGREGATE 'type') name='ofp_aggregate_stats_request')<line_sep>''' /* Body of reply to OFPST_AGGREGATE request. */ '''<line_sep>ofp_aggregate_stats_reply=nstruct((uint64 'packet_count') # /* Number of packets in flows. */ (uint64 'byte_count') # /* Number of bytes in flows. */ (uint32 'flow_count') # /* Number of flows. */ (uint8[4] ) base=ofp_stats_reply criteria=<lambda>x:x.type<eq>OFPST_AGGREGATE classifyby=(OFPST_AGGREGATE ) init=packvalue(OFPST_AGGREGATE 'type') name='ofp_aggregate_stats_reply')<line_sep>''' /* Body of reply to OFPST_TABLE request. */ '''<line_sep>ofp_table_stats=nstruct((uint8 'table_id') # /* Identifier of table. Lower numbered tables are consulted first. */ (uint8[3] ) # /* Align to 32-bits. */ (char[OFP_MAX_TABLE_NAME_LEN] 'name') (ofp_flow_wildcards 'wildcards') # /* Bitmap of OFPFW_* wildcards that are supported by the table. */ (uint32 'max_entries') # /* Max number of entries supported. */ (uint32 'active_count') # /* Number of active entries. */ (uint64 'lookup_count') # /* # of packets looked up in table. */ (uint64 'matched_count') # /* Number of packets that hit table. */ name='ofp_table_stats')<line_sep>ofp_table_stats_reply=nstruct((ofp_table_stats[0] 'stats') name='ofp_table_stats_reply' base=ofp_stats_reply criteria=<lambda>x:x.type<eq>OFPST_TABLE classifyby=(OFPST_TABLE ) init=packvalue(OFPST_TABLE 'type'))<line_sep>''' /* Stats request of type OFPST_PORT. */ '''<line_sep>ofp_port_stats_request=nstruct((ofp_port 'port_no') #/* OFPST_PORT message may request statistics for a single port (specified with port_no) # or for all ports (port_no == OFPP_NONE). */ (uint8[6] ) name='ofp_port_stats_request' base=ofp_stats_request criteria=<lambda>x:x.type<eq>OFPST_PORT classifyby=(OFPST_PORT ) init=packvalue(OFPST_PORT 'type'))<line_sep>''' /* Body of reply to OFPST_PORT request. If a counter is unsupported, set * the field to all ones. */ '''<line_sep>ofp_port_stats=nstruct((uint16 'port_no') (uint8[6] ) (uint64 'rx_packets') # /* Number of received packets. */ (uint64 'tx_packets') # /* Number of transmitted packets. */ (uint64 'rx_bytes') # /* Number of received bytes. */ (uint64 'tx_bytes') # /* Number of transmitted bytes. */ (uint64 'rx_dropped') # /* Number of packets dropped by RX. */ (uint64 'tx_dropped') # /* Number of packets dropped by TX. */ (uint64 'rx_errors') # /* Number of receive errors. This is a #super-set of receive errors and should be #great than or equal to the sum of all #rx_*_err values. */ (uint64 'tx_errors') # /* Number of transmit errors. This is a super-set of transmit errors. */ (uint64 'rx_frame_err') # /* Number of frame alignment errors. */ (uint64 'rx_over_err') # /* Number of packets with RX overrun. */ (uint64 'rx_crc_err') # /* Number of CRC errors. */ (uint64 'collisions') # /* Number of collisions. */ name='ofp_port_stats')<line_sep>ofp_port_stats_reply=nstruct((ofp_port_stats[0] 'stats') name='ofp_port_stats_reply' base=ofp_stats_reply criteria=<lambda>x:x.type<eq>OFPST_PORT classifyby=(OFPST_PORT ) init=packvalue(OFPST_PORT 'type'))<line_sep>''' /* All ones is used to indicate all queues in a port (for stats retrieval). */ '''<line_sep>ofp_queue=enum('ofp_queue' globals() uint32 OFPQ_ALL=0xffffffff)<line_sep>''' /* Body for stats request of type OFPST_QUEUE. */ '''<line_sep>ofp_queue_stats_request=nstruct((ofp_port 'port_no') # /* All ports if OFPP_ALL. */ (uint8[2] ) # /* Align to 32-bits. */ (ofp_queue 'queue_id') # /* All queues if OFPQ_ALL. */ name='ofp_queue_stats_request' base=ofp_stats_request criteria=<lambda>x:x.type<eq>OFPST_QUEUE classifyby=(OFPST_QUEUE ) init=packvalue(OFPST_QUEUE 'type'))<line_sep>''' /* Body for stats reply of type OFPST_QUEUE consists of an array of this * structure type. */ '''<line_sep>ofp_queue_stats=nstruct((uint16 'port_no') (uint8[2] ) # /* Align to 32-bits. */ (uint32 'queue_id') # /* Queue id. */ (uint64 'tx_bytes') # /* Number of transmitted bytes. */ (uint64 'tx_packets') # /* Number of transmitted packets. */ (uint64 'tx_errors') # /* # of packets dropped due to overrun. */ name='ofp_queue_stats')<line_sep>ofp_queue_stats_reply=nstruct((ofp_queue_stats[0] 'stats') name='ofp_queue_stats_reply' base=ofp_stats_reply criteria=<lambda>x:x.type<eq>OFPST_QUEUE classifyby=(OFPST_QUEUE ) init=packvalue(OFPST_QUEUE 'type'))<line_sep>''' /* Vendor extension stats message. */ '''<line_sep>ofp_vendor_stats_request=nstruct((experimenter_ids 'vendor') name='ofp_vendor_stats_request' base=ofp_stats_request criteria=<lambda>x:x.type<eq>OFPST_VENDOR classifyby=(OFPST_VENDOR ) init=packvalue(OFPST_VENDOR 'type')# /* Followed by vendor-defined arbitrary additional data. */ )<line_sep>ofp_vendor_stats_reply=nstruct((experimenter_ids 'vendor') name='ofp_vendor_stats_reply' base=ofp_stats_reply criteria=<lambda>x:x.type<eq>OFPST_VENDOR classifyby=(OFPST_VENDOR ) init=packvalue(OFPST_VENDOR 'type')# /* Followed by vendor-defined arbitrary additional data. */ )<line_sep>ofp_vendor_vendorid='vendor'<line_sep>ofp_vendor_subtype='subtype'<line_sep>ofp_action_vendor_vendorid='vendor'<line_sep>ofp_action_vendor_subtype='subtype'<line_sep>ofp_stats_vendor_vendorid='vendor'<line_sep>ofp_stats_vendor_subtype='subtype'<import_from_stmt>.nicira_ext *<line_sep>''' /* Header for Nicira vendor requests and replies. */ '''<line_sep>nicira_header=nstruct((nxt_subtype 'subtype') name='nicira_header' base=ofp_vendor criteria=<lambda>x:x.vendor<eq>NX_VENDOR_ID init=packvalue(NX_VENDOR_ID 'vendor') classifier=<lambda>x:x.subtype)<line_sep>''' /* Header for Nicira-defined actions. */ '''<line_sep>nx_action=nstruct((nx_action_subtype 'subtype') # /* NXAST_*. */ name='nx_action' base=ofp_action_vendor criteria=<lambda>x:x.vendor<eq>NX_VENDOR_ID init=packvalue(NX_VENDOR_ID 'vendor') classifier=<lambda>x:x.subtype)<line_sep>nx_stats_request=nstruct((nx_stats_subtype 'subtype') (uint8[4] ) base=ofp_vendor_stats_request criteria=<lambda>x:x.vendor<eq>NX_VENDOR_ID init=packvalue(NX_VENDOR_ID 'vendor') name='nx_stats_request' classifier=<lambda>x:getattr(x 'subtype'))<line_sep>nx_stats_reply=nstruct((nx_stats_subtype 'subtype') (uint8[4] ) base=ofp_vendor_stats_reply criteria=<lambda>x:x.vendor<eq>NX_VENDOR_ID init=packvalue(NX_VENDOR_ID 'vendor') name='nx_stats_reply' classifier=<lambda>x:getattr(x 'subtype'))<line_sep>create_extension(globals() nicira_header nx_action nx_stats_request nx_stats_reply ofp_vendor_subtype ofp_action_vendor_subtype ofp_stats_vendor_subtype)<block_end>
# -*- coding: utf-8 -*- # # Modified by <NAME> # Contact: <EMAIL> # # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved <import_from_stmt>detectron2.config CfgNode<as>CN<def_stmt>add_onenet_config cfg<block_start>""" Add config for OneNet. """<line_sep>cfg.MODEL.OneNet=CN()<line_sep>cfg.MODEL.OneNet.NUM_CLASSES=80<line_sep>cfg.MODEL.OneNet.HEAD="FCOS"<line_sep># Head. # cfg.MODEL.OneNet.IN_FEATURES = ["res2", "res3", "res4", "res5"] cfg.MODEL.OneNet.IN_FEATURES=["p3" "p4" "p5" "p6" "p7"]<line_sep>cfg.MODEL.OneNet.FEATURES_STRIDE=[8 16 32 64 128]<line_sep>cfg.MODEL.OneNet.NUM_CONV=4<line_sep>cfg.MODEL.OneNet.CONV_NORM="GN"<line_sep>cfg.MODEL.OneNet.CONV_CHANNELS=256<line_sep>cfg.MODEL.OneNet.ACTIVATION='relu'<line_sep>cfg.MODEL.OneNet.NMS=<false># for ablation # Deconv cfg.MODEL.OneNet.DECONV_CHANNEL=[2048 256 128 64]<line_sep>cfg.MODEL.OneNet.DECONV_KERNEL=[4 4 4]<line_sep>cfg.MODEL.OneNet.DCN=<true><line_sep>cfg.MODEL.OneNet.MODULATE_DEFORM=<true><line_sep># Loss. cfg.MODEL.OneNet.CLASS_WEIGHT=2.0<line_sep>cfg.MODEL.OneNet.GIOU_WEIGHT=2.0<line_sep>cfg.MODEL.OneNet.L1_WEIGHT=5.0<line_sep># Focal Loss. cfg.MODEL.OneNet.ALPHA=0.25<line_sep>cfg.MODEL.OneNet.GAMMA=2.0<line_sep>cfg.MODEL.OneNet.PRIOR_PROB=0.01<line_sep># Optimizer. cfg.SOLVER.OPTIMIZER="ADAMW"<line_sep>cfg.SOLVER.BACKBONE_MULTIPLIER=1.0<line_sep>cfg.MODEL.CONDINST=CN()<line_sep>cfg.MODEL.CONDINST.SIZES_OF_INTEREST=[64 128 256 512]<line_sep># the downsampling ratio of the final instance masks to the input image cfg.MODEL.CONDINST.MASK_OUT_STRIDE=4<line_sep>cfg.MODEL.CONDINST.BOTTOM_PIXELS_REMOVED=-1<line_sep># if not -1, we only compute the mask loss for MAX_PROPOSALS random proposals PER GPU cfg.MODEL.CONDINST.MAX_PROPOSALS=-1<line_sep># if not -1, we only compute the mask loss for top `TOPK_PROPOSALS_PER_IM` proposals # PER IMAGE in terms of their detection scores cfg.MODEL.CONDINST.TOPK_PROPOSALS_PER_IM=-1<line_sep>cfg.MODEL.CONDINST.MASK_HEAD=CN()<line_sep>cfg.MODEL.CONDINST.MASK_HEAD.CHANNELS=16<line_sep>cfg.MODEL.CONDINST.MASK_HEAD.NUM_LAYERS=3<line_sep>cfg.MODEL.CONDINST.MASK_HEAD.USE_FP16=<false><line_sep>cfg.MODEL.CONDINST.MASK_HEAD.DISABLE_REL_COORDS=<false><line_sep>cfg.MODEL.CONDINST.MASK_BRANCH=CN()<line_sep>cfg.MODEL.CONDINST.MASK_BRANCH.OUT_CHANNELS=8<line_sep>cfg.MODEL.CONDINST.MASK_BRANCH.IN_FEATURES=["p3" "p4" "p5"]<line_sep>cfg.MODEL.CONDINST.MASK_BRANCH.CHANNELS=128<line_sep>cfg.MODEL.CONDINST.MASK_BRANCH.NORM="BN"<line_sep>cfg.MODEL.CONDINST.MASK_BRANCH.NUM_CONVS=4<line_sep>cfg.MODEL.CONDINST.MASK_BRANCH.SEMANTIC_LOSS_ON=<false><block_end>
<import_stmt>json<import_stmt>logging<import_stmt>os<import_from_stmt>pathlib Path<import_stmt>plyvel<import_stmt>pytest<import_from_stmt>pkg_resources parse_version<import_from_stmt>plyvel._plyvel Error<import_from_stmt>loopchain.blockchain TransactionVerifier<import_from_stmt>loopchain.blockchain.blocks BlockVersioner BlockVerifier BlockSerializer<import_from_stmt>loopchain.blockchain.transactions TransactionVersioner<line_sep>Logger=logging.getLogger(__name__)<line_sep>@pytest.fixture<def_stmt>base_dir <arrow>Path# FIXME : base_dir that you want to test <block_start>base=Path(os.getcwd()).parents[1]<line_sep><return>base<block_end>@pytest.fixture<def_stmt>plyvel_db base_dir<arrow>plyvel.DB<block_start>base_dir=base_dir/'.storage'<line_sep>db_path=Path()<line_sep>Logger.info(f"base_dir : {base_dir}")<if_stmt><not>os.path.exists(base_dir)<block_start>pytest.skip(f"'{base_dir}' does not exist")<block_end><for_stmt>path os.listdir(base_dir)<block_start><if_stmt>path.startswith('db')<and>path.endswith('icon_dex')<block_start>db_path=base_dir/path<line_sep><break><block_end><block_end>Logger.info(f"db_path : {db_path}")<line_sep>db=<none><try_stmt><block_start>db=plyvel.DB(db_path.as_posix())<block_end><except_stmt>(Error IOError)<block_start>pytest.skip("db data must be prepared for this verify test")<block_end><return>db<block_end>@pytest.fixture<def_stmt>block_versioner <block_start>block_versioner=BlockVersioner()<line_sep># FIXME : block versions mainnet_test=<false><if_stmt>mainnet_test<block_start>block_versions={"0.1a":0 "0.3":10324749 "0.4":12640761 "0.5":14473622}<block_end><else_stmt><block_start>block_versions={"0.1a":0 "0.4":1 "0.5":30}<block_end><for_stmt>version,height block_versions.items()<block_start>block_versioner.add_version(height version)<block_end><return>block_versioner<block_end>@pytest.fixture<def_stmt>tx_versioner <block_start>hash_versions={"genesis":0 "0x2":1 "0x3":1}<line_sep>tx_versioner=TransactionVersioner()<for_stmt>tx_version,tx_hash_version hash_versions.items()<block_start>tx_versioner.hash_generator_versions[tx_version]=tx_hash_version<block_end><return>tx_versioner<block_end><class_stmt>TestSignatureVerify<block_start><def_stmt>test_verify self plyvel_db block_versioner tx_versioner<block_start>""" 1. prepare plyvel db, block_versioner, tx_versioner 2. pick block, transaction, vote, etc from db 3. verify block, vote transaction, vote, etc... """<line_sep># given db instance, block_versioner, tx_versioner block_key=plyvel_db.get(b'last_block_key')<while_stmt><true># when get block from db <block_start>block_dumped=plyvel_db.get(block_key)<line_sep>Logger.info(f"block_dump : {block_dumped}")<line_sep>block_serialized=json.loads(block_dumped)<line_sep>block_height=block_versioner.get_height(block_serialized)<line_sep>block_version=block_versioner.get_version(block_height)<line_sep>block_serializer=BlockSerializer.new(block_version tx_versioner)<line_sep>block=block_serializer.deserialize(block_serialized)<line_sep>Logger.info(f"block_height : {block_height}, block_version : {block_version}")<if_stmt>block_height<eq>0<block_start><break><block_end># then block verify block_verifier=BlockVerifier.new(block_version tx_versioner)<line_sep>block_verifier.verify_signature(block)<line_sep># then vote verify <if_stmt>parse_version(block_version)<ge>parse_version("0.3")<block_start>Logger.info(f"leader_votes : {block.body.leader_votes}")<for_stmt>leader_vote block.body.leader_votes<block_start><if_stmt><not>leader_vote<block_start><continue><block_end>leader_vote.verify()<block_end>Logger.info(f"prev_votes : {block.body.prev_votes}")<for_stmt>block_vote block.body.prev_votes<block_start><if_stmt><not>block_vote<block_start><continue><block_end>block_vote.verify()<block_end><block_end># then transaction verify <for_stmt>tx block.body.transactions.values()<block_start>tv=TransactionVerifier.new(tx.version tx.type() tx_versioner)<line_sep>tv.verify_signature(tx)<block_end>Logger.info(f"prev_hash : {block.header.prev_hash}, {bytes(block.header.prev_hash)}")<line_sep>block_key=block.header.prev_hash.hex().encode("utf-8")<block_end><block_end><block_end>
""" SynthTIGER Copyright (c) 2021-present NAVER Corp. MIT license """<import_from_stmt>synthtiger.components.wrapper.iterator Iterator<import_from_stmt>synthtiger.components.wrapper.selector Selector<import_from_stmt>synthtiger.components.wrapper.switch Switch<line_sep>__all__=["Iterator" "Selector" "Switch"]<line_sep>
# -*- coding: utf-8 -*- """ Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available. Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """<import_from_stmt>django.conf settings<class_stmt>ConfFixture(object)<block_start>BACKEND_TYPE="bk_token"<line_sep>USER_BACKEND="bk_token.backends.TokenBackend"<line_sep>LOGIN_REQUIRED_MIDDLEWARE="bk_token.middlewares.LoginRequiredMiddleware"<line_sep>USER_MODEL="bk_token.models.UserProxy"<line_sep>CONSOLE_LOGIN_URL=settings.BK_PAAS_HOST<line_sep>LOGIN_URL=settings.BK_LOGIN_URL+"/"<line_sep>LOGIN_PLAIN_URL=settings.BK_LOGIN_URL+"/plain/"<line_sep>VERIFY_URL=settings.BK_LOGIN_INNER_URL+"/accounts/is_login/"<line_sep>USER_INFO_URL=settings.BK_LOGIN_INNER_URL+"/accounts/get_user/"<line_sep>HAS_PLAIN=<false><line_sep>ADD_CROSS_PREFIX=<false><line_sep>ADD_APP_CODE=<true><line_sep>IFRAME_HEIGHT=400<line_sep>IFRAME_WIDTH=400<line_sep>WEIXIN_BACKEND_TYPE="null"<line_sep>WEIXIN_MIDDLEWARE="null.NullMiddleware"<line_sep>WEIXIN_BACKEND="null.NullBackend"<line_sep>SMS_CLIENT_MODULE="cmsi"<line_sep>SMS_CLIENT_FUNC="send_sms"<line_sep>SMS_CLIENT_USER_ARGS_NAME="receiver__username"<line_sep>SMS_CLIENT_CONTENT_ARGS_NAME="content"<line_sep>RIO_BACKEND_TYPE="null"<line_sep>RIO_MIDDLEWARE="null.NullMiddleware"<line_sep>RIO_BACKEND="null.NullBackend"<line_sep>BK_JWT_MIDDLEWARE="bk_jwt.middlewares.BkJwtLoginRequiredMiddleware"<line_sep>BK_JWT_BACKEND="bk_jwt.backends.BkJwtBackend"<block_end>
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Integration test for Google Cloud DICOM IO connector. This e2e test will first create a temporary empty DICOM storage and send 18 DICOM files from gs://apache-beam-samples/healthcare/dicom/io_test_files to it. The test will compare the metadata of a persistent DICOM storage, which reprensets ground truths and has 18 files stored, to the temporary storage in order to check if the connectors are functioning correctly. """<line_sep># pytype: skip-file <import_stmt>datetime<import_stmt>random<import_stmt>string<import_stmt>unittest<import_stmt>pytest<import_stmt>apache_beam<as>beam<import_from_stmt>apache_beam.io fileio<import_from_stmt>apache_beam.testing.test_pipeline TestPipeline<import_from_stmt>apache_beam.testing.util assert_that<import_from_stmt>apache_beam.testing.util equal_to<line_sep># pylint: disable=wrong-import-order, wrong-import-position <try_stmt><block_start><import_from_stmt>apache_beam.io.gcp.dicomclient DicomApiHttpClient<import_from_stmt>apache_beam.io.gcp.dicomio DicomSearch<import_from_stmt>apache_beam.io.gcp.dicomio UploadToDicomStore<import_from_stmt>google.auth default<import_from_stmt>google.auth.transport requests<block_end><except_stmt>ImportError<block_start>DicomSearch=<none><block_end># pylint: enable=wrong-import-order, wrong-import-position REGION='us-central1'<line_sep>DATA_SET_ID='apache-beam-integration-testing'<line_sep>HEALTHCARE_BASE_URL='https://healthcare.googleapis.com/v1'<line_sep>GCS_BASE_URL='https://storage.googleapis.com/storage/v1'<line_sep>PERSISTENT_DICOM_STORE_NAME="dicom_it_persistent_store"<line_sep>BUCKET_NAME='apache-beam-samples'<line_sep>DICOM_DIR_PATH='healthcare/dicom'<line_sep>DICOM_FILES_PATH='gs://'+BUCKET_NAME+'/'+DICOM_DIR_PATH<line_sep>METADATA_DIR_PATH=DICOM_DIR_PATH+'/io_test_metadata/'<line_sep>META_DATA_ALL_NAME='Dicom_io_it_test_data.json'<line_sep>META_DATA_REFINED_NAME='Dicom_io_it_test_refined_data.json'<line_sep>NUM_INSTANCE=18<line_sep>RAND_LEN=15<def_stmt>random_string_generator length<block_start>letters_and_digits=string.ascii_letters+string.digits<line_sep>result=''.join((random.choice(letters_and_digits)<for>i range(length)))<line_sep><return>result<block_end><def_stmt>create_dicom_store project_id dataset_id region dicom_store_id# Create a an empty DICOM store <block_start>credential,_=default()<line_sep>session=requests.AuthorizedSession(credential)<line_sep>api_endpoint="{}/projects/{}/locations/{}".format(HEALTHCARE_BASE_URL project_id region)<line_sep># base of dicomweb path. dicomweb_path="{}/datasets/{}/dicomStores".format(api_endpoint dataset_id)<line_sep>response=session.post(dicomweb_path params={"dicomStoreId":dicom_store_id})<line_sep>response.raise_for_status()<line_sep><return>response.status_code<block_end><def_stmt>delete_dicom_store project_id dataset_id region dicom_store_id# Delete an existing DICOM store <block_start>credential,_=default()<line_sep>session=requests.AuthorizedSession(credential)<line_sep>api_endpoint="{}/projects/{}/locations/{}".format(HEALTHCARE_BASE_URL project_id region)<line_sep># base of dicomweb path. dicomweb_path="{}/datasets/{}/dicomStores/{}".format(api_endpoint dataset_id dicom_store_id)<line_sep>response=session.delete(dicomweb_path)<line_sep>response.raise_for_status()<line_sep><return>response.status_code<block_end><def_stmt>get_gcs_file_http file_name# Get gcs file from REST Api <block_start>file_name=file_name.replace('/' '%2F')<line_sep>api_endpoint="{}/b/{}/o/{}?alt=media".format(GCS_BASE_URL BUCKET_NAME file_name)<line_sep>credential,_=default()<line_sep>session=requests.AuthorizedSession(credential)<line_sep>response=session.get(api_endpoint)<line_sep>response.raise_for_status()<line_sep><return>response.json()<block_end>@unittest.skipIf(DicomSearch<is><none> 'GCP dependencies are not installed')<class_stmt>DICOMIoIntegrationTest(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.test_pipeline=TestPipeline(is_integration_test=<true>)<line_sep>self.project=self.test_pipeline.get_option('project')<line_sep>self.expected_output_all_metadata=get_gcs_file_http(METADATA_DIR_PATH+META_DATA_ALL_NAME)<line_sep>self.expected_output_refined_metadata=get_gcs_file_http(METADATA_DIR_PATH+META_DATA_REFINED_NAME)<line_sep># create a temp Dicom store based on the time stamp self.temp_dicom_store="DICOM_store_"+datetime.datetime.now().strftime('%Y-%m-%d_%H%M%S.%f_')+random_string_generator(RAND_LEN)<line_sep>create_dicom_store(self.project DATA_SET_ID REGION self.temp_dicom_store)<block_end><def_stmt>tearDown self# clean up the temp Dicom store <block_start>delete_dicom_store(self.project DATA_SET_ID REGION self.temp_dicom_store)<block_end>@pytest.mark.it_postcommit<def_stmt>test_dicom_search_instances self# Search and compare the metadata of a persistent DICOM store. # Both refine and comprehensive search will be tested. <block_start>input_dict_all={}<line_sep>input_dict_all['project_id']=self.project<line_sep>input_dict_all['region']=REGION<line_sep>input_dict_all['dataset_id']=DATA_SET_ID<line_sep>input_dict_all['dicom_store_id']=PERSISTENT_DICOM_STORE_NAME<line_sep>input_dict_all['search_type']="instances"<line_sep>input_dict_refine={}<line_sep>input_dict_refine['project_id']=self.project<line_sep>input_dict_refine['region']=REGION<line_sep>input_dict_refine['dataset_id']=DATA_SET_ID<line_sep>input_dict_refine['dicom_store_id']=PERSISTENT_DICOM_STORE_NAME<line_sep>input_dict_refine['search_type']="instances"<line_sep>input_dict_refine['params']={'StudyInstanceUID':'study_000000001' 'limit':500 'offset':0}<line_sep>expected_dict_all={}<line_sep>expected_dict_all['result']=self.expected_output_all_metadata<line_sep>expected_dict_all['status']=200<line_sep>expected_dict_all['input']=input_dict_all<line_sep>expected_dict_all['success']=<true><line_sep>expected_dict_refine={}<line_sep>expected_dict_refine['result']=self.expected_output_refined_metadata<line_sep>expected_dict_refine['status']=200<line_sep>expected_dict_refine['input']=input_dict_refine<line_sep>expected_dict_refine['success']=<true><with_stmt>self.test_pipeline<as>p<block_start>results_all=(p|'create all dict'<rshift>beam.Create([input_dict_all])|'search all'<rshift>DicomSearch())<line_sep>results_refine=(p|'create refine dict'<rshift>beam.Create([input_dict_refine])|'search refine'<rshift>DicomSearch())<line_sep>assert_that(results_all equal_to([expected_dict_all]) label='all search assert')<line_sep>assert_that(results_refine equal_to([expected_dict_refine]) label='refine search assert')<block_end><block_end>@pytest.mark.it_postcommit<def_stmt>test_dicom_store_instance_from_gcs self# Store DICOM files to a empty DICOM store from a GCS bucket, # then check if the store metadata match. <block_start>input_dict_store={}<line_sep>input_dict_store['project_id']=self.project<line_sep>input_dict_store['region']=REGION<line_sep>input_dict_store['dataset_id']=DATA_SET_ID<line_sep>input_dict_store['dicom_store_id']=self.temp_dicom_store<line_sep>expected_output=[<true>]<times>NUM_INSTANCE<with_stmt>self.test_pipeline<as>p<block_start>gcs_path=DICOM_FILES_PATH+"/io_test_files/*"<line_sep>results=(p|fileio.MatchFiles(gcs_path)|fileio.ReadMatches()|UploadToDicomStore(input_dict_store 'fileio')|beam.Map(<lambda>x:x['success']))<line_sep>assert_that(results equal_to(expected_output) label='store first assert')<block_end># Check the metadata using client result,status_code=DicomApiHttpClient().qido_search(self.project REGION DATA_SET_ID self.temp_dicom_store 'instances')<line_sep>self.assertEqual(status_code 200)<line_sep># List comparison based on different version of python self.assertCountEqual(result self.expected_output_all_metadata)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
# # Copyright (c) 2021 Facebook, Inc. and its affiliates. # # This file is part of NeuralDB. # See https://github.com/facebookresearch/NeuralDB for further info. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # <import_stmt>bz2<import_stmt>json<import_from_stmt>collections defaultdict<import_from_stmt>json JSONDecodeError<import_stmt>pydash<import_from_stmt>argparse ArgumentParser<import_from_stmt>tqdm tqdm<import_from_stmt>ndb_data.wikidata_common.wikidata Wikidata<def_stmt>read_dump wikidata_file<block_start><with_stmt>bz2.open(wikidata_file mode="rt")<as>f<block_start>f.read(2)<for_stmt>line f<block_start><yield>line.rstrip(",\n")<block_end><block_end><block_end><def_stmt>get_indexable instance<block_start>wikidata_id=pydash.get(instance "id")<line_sep>english_name=pydash.get(instance "labels.en.value")<line_sep>claims=pydash.get(instance "claims")<line_sep>properties=set()<line_sep>property_entity=defaultdict(list)<for_stmt>property,claims claims.items()<block_start>properties.add(property)<for_stmt>claim claims<block_start>property_entity[property].append((pydash.get(claim "mainsnak.datavalue.value") list(pydash.get(claim "qualifiers").values())<if>pydash.get(claim "qualifiers")<is><not><none><else><none> ))<block_end><block_end>sitelinks=pydash.get(instance "sitelinks")<line_sep>enwiki=pydash.get(instance "sitelinks.enwiki.title")<line_sep><yield>wikidata_id english_name sitelinks enwiki list(properties) dict(property_entity)<block_end><def_stmt>index_dump dump<block_start><for_stmt>idx,line enumerate(dump)<block_start><try_stmt><block_start><yield><from>get_indexable(json.loads(line))<block_end><except_stmt>JSONDecodeError<as>e<block_start>print(e)<line_sep><pass><block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>parser=ArgumentParser()<line_sep>parser.add_argument("wikidata_file")<line_sep>args=parser.parse_args()<line_sep>wiki=Wikidata()<line_sep>collection=wiki.collection<line_sep>insert_count=0<line_sep>dump=read_dump(args.wikidata_file)<line_sep>batch=[]<line_sep>_tqdm_iter=tqdm(index_dump(dump) total=90e6)<for_stmt>w_id,e_name,sitelinks,enwiki,props,prop_dict _tqdm_iter<block_start>batch.append({"wikidata_id":w_id "english_name":e_name "english_wiki":enwiki "property_types":props "properties":prop_dict "sitelinks":list(sitelinks.values()) })<if_stmt>len(batch)<ge>5000<block_start>collection.insert_many(batch)<line_sep>batch=[]<line_sep>insert_count<augadd>1<line_sep>_tqdm_iter.desc=f"Insert batch {insert_count}"<block_end><block_end>print("last")<line_sep>collection.insert_many(batch)<block_end>
# -*- coding: utf-8 -*- <def_stmt>parametrized dec<block_start><def_stmt>layer *args **kwargs<block_start><def_stmt>repl f<block_start><return>dec(f *args **kwargs)<block_end><return>repl<block_end><return>layer<block_end>@parametrized<def_stmt>dependency module *_deps<block_start>module.deps=_deps<line_sep><return>module<block_end>@parametrized<def_stmt>source module _source<block_start>module.source=_source<line_sep><return>module<block_end>@parametrized<def_stmt>version module _ver<block_start>module.version=_ver<line_sep><return>module<block_end>@dependency()@source('unknown')@version('latest')<class_stmt>Module(object)<block_start><def_stmt>__init__ self composer<block_start>self.composer=composer<block_end><def_stmt>__repr__ self<block_start><return>'%-13s %-6s (%s)'%(self.name() self.version self.source)<block_end><def_stmt>build self<block_start><pass><block_end><def_stmt>expose self<block_start><return>[]<block_end><def_stmt>name self<block_start><return>self.__class__.__name__.lower()<block_end><block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>HGVHistoProducerAlgoBlock=cms.PSet(minEta=cms.double(-4.5) maxEta=cms.double(4.5) nintEta=cms.int32(100) useFabsEta=cms.bool(<false>) #parameters for energy minEne=cms.double(0.) maxEne=cms.double(500.) nintEne=cms.int32(250) #parameters for pt minPt=cms.double(0.) maxPt=cms.double(100.) nintPt=cms.int32(100) #parameters for phi minPhi=cms.double(-3.2) maxPhi=cms.double(3.2) nintPhi=cms.int32(80) #parameters for counting mixed hits clusters minMixedHitsSimCluster=cms.double(0.) maxMixedHitsSimCluster=cms.double(800.) nintMixedHitsSimCluster=cms.int32(100) #parameters for counting mixed hits clusters minMixedHitsCluster=cms.double(0.) maxMixedHitsCluster=cms.double(800.) nintMixedHitsCluster=cms.int32(100) #parameters for the total amount of energy clustered by all layer clusters (fraction over caloparticles) minEneCl=cms.double(0.) maxEneCl=cms.double(110.) nintEneCl=cms.int32(110) #parameters for the longitudinal depth barycenter minLongDepBary=cms.double(0.) maxLongDepBary=cms.double(110.) nintLongDepBary=cms.int32(110) #z position of vertex minZpos=cms.double(-550.) maxZpos=cms.double(550.) nintZpos=cms.int32(1100) #Parameters for the total number of simclusters per layer minTotNsimClsperlay=cms.double(0.) maxTotNsimClsperlay=cms.double(50.) nintTotNsimClsperlay=cms.int32(50) #Parameters for the total number of layer clusters per layer minTotNClsperlay=cms.double(0.) maxTotNClsperlay=cms.double(50.) nintTotNClsperlay=cms.int32(50) #Parameters for the energy clustered by layer clusters per layer (fraction) minEneClperlay=cms.double(0.) maxEneClperlay=cms.double(110.) nintEneClperlay=cms.int32(110) #Parameters for the score both for: #1. calo particle to layer clusters association per layer #2. layer cluster to calo particles association per layer minScore=cms.double(0.) maxScore=cms.double(1.02) nintScore=cms.int32(51) #Parameters for shared energy fraction. That is: #1. Fraction of each of the layer clusters energy related to a # calo particle over that calo particle's energy. #2. Fraction of each of the calo particles energy # related to a layer cluster over that layer cluster's energy. minSharedEneFrac=cms.double(0.) maxSharedEneFrac=cms.double(1.) nintSharedEneFrac=cms.int32(100) minTSTSharedEneFracEfficiency=cms.double(0.5) #Same as above for tracksters minTSTSharedEneFrac=cms.double(0.) maxTSTSharedEneFrac=cms.double(1.0) nintTSTSharedEneFrac=cms.int32(100) #Parameters for the total number of simclusters per thickness minTotNsimClsperthick=cms.double(0.) maxTotNsimClsperthick=cms.double(800.) nintTotNsimClsperthick=cms.int32(100) #Parameters for the total number of layer clusters per thickness minTotNClsperthick=cms.double(0.) maxTotNClsperthick=cms.double(800.) nintTotNClsperthick=cms.int32(100) #Parameters for the total number of cells per per thickness per layer minTotNcellsperthickperlayer=cms.double(0.) maxTotNcellsperthickperlayer=cms.double(500.) nintTotNcellsperthickperlayer=cms.int32(100) #Parameters for the distance of cluster cells to seed cell per thickness per layer minDisToSeedperthickperlayer=cms.double(0.) maxDisToSeedperthickperlayer=cms.double(300.) nintDisToSeedperthickperlayer=cms.int32(100) #Parameters for the energy weighted distance of cluster cells to seed cell per thickness per layer minDisToSeedperthickperlayerenewei=cms.double(0.) maxDisToSeedperthickperlayerenewei=cms.double(10.) nintDisToSeedperthickperlayerenewei=cms.int32(50) #Parameters for the distance of cluster cells to max cell per thickness per layer minDisToMaxperthickperlayer=cms.double(0.) maxDisToMaxperthickperlayer=cms.double(300.) nintDisToMaxperthickperlayer=cms.int32(100) #Parameters for the energy weighted distance of cluster cells to max cell per thickness per layer minDisToMaxperthickperlayerenewei=cms.double(0.) maxDisToMaxperthickperlayerenewei=cms.double(50.) nintDisToMaxperthickperlayerenewei=cms.int32(50) #Parameters for the distance of cluster cells to max cell per thickness per layer minDisSeedToMaxperthickperlayer=cms.double(0.) maxDisSeedToMaxperthickperlayer=cms.double(300.) nintDisSeedToMaxperthickperlayer=cms.int32(100) #Parameters for the energy of a cluster per thickness per layer minClEneperthickperlayer=cms.double(0.) maxClEneperthickperlayer=cms.double(10.) nintClEneperthickperlayer=cms.int32(100) #Parameters for the energy density of cluster cells per thickness minCellsEneDensperthick=cms.double(0.) maxCellsEneDensperthick=cms.double(100.) nintCellsEneDensperthick=cms.int32(200) #Parameters for the total number of tracksters per event #We always treet one event as two events, one in +z one in -z minTotNTSTs=cms.double(0.) maxTotNTSTs=cms.double(50.) nintTotNTSTs=cms.int32(50) #Parameters for the total number of layer clusters in trackster minTotNClsinTSTs=cms.double(0.) maxTotNClsinTSTs=cms.double(400.) nintTotNClsinTSTs=cms.int32(100) #Parameters for the total number of layer clusters in trackster per layer minTotNClsinTSTsperlayer=cms.double(0.) maxTotNClsinTSTsperlayer=cms.double(50.) nintTotNClsinTSTsperlayer=cms.int32(50) #Parameters for the multiplicity of layer clusters in trackster minMplofLCs=cms.double(0.) maxMplofLCs=cms.double(20.) nintMplofLCs=cms.int32(20) #Parameters for cluster size minSizeCLsinTSTs=cms.double(0.) maxSizeCLsinTSTs=cms.double(50.) nintSizeCLsinTSTs=cms.int32(50) #Parameters for the energy of a cluster per multiplicity minClEnepermultiplicity=cms.double(0.) maxClEnepermultiplicity=cms.double(10.) nintClEnepermultiplicity=cms.int32(10) #parameters for X minX=cms.double(-300.) maxX=cms.double(300.) nintX=cms.int32(100) #parameters for Y minY=cms.double(-300.) maxY=cms.double(300.) nintY=cms.int32(100) #parameters for Z minZ=cms.double(-550.) maxZ=cms.double(550.) nintZ=cms.int32(1100))<line_sep>
<import_stmt>appdaemon.plugins.hass.hassapi<as>hass<line_sep># # Provide the list of HA entities for Alexa Apps # # # Args: # # switchable: dict of switchable entities # temperature: dict of temperature sensors # door: dict of reed sensors showing if the door is completely open # door_tilted: dict of reed sensors showing if a door is partially/leaning open # window: dict of reed sensors showing if a window is open # # # Release Notes # # Version 1.0: # Initial Version <class_stmt>ListService(hass.Hass)<block_start><def_stmt>initialize self<block_start><return><block_end><def_stmt>getSwitchable self<block_start><return>self.args["switchable"]<block_end><def_stmt>getTemperature self<block_start><return>self.args["temperature"]<block_end><def_stmt>getDoor self<block_start><return>self.args["door"]<block_end><def_stmt>getWindow self<block_start><return>self.args["window"]<block_end><def_stmt>getDoorTilted self<block_start><return>self.args["door_tilted"]<block_end><block_end>
__author__="<NAME>, <NAME>"<import_stmt>os<import_stmt>unittest<import_from_stmt>ruamel.yaml YAML<import_from_stmt>fireworks.user_objects.firetasks.filepad_tasks AddFilesTask DeleteFilesTask GetFilesByQueryTask GetFilesTask <import_from_stmt>fireworks.utilities.filepad FilePad<line_sep>module_dir=os.path.abspath(os.path.dirname(__file__))<class_stmt>FilePadTasksTest(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.paths=[os.path.join(module_dir "write.yaml") os.path.join(module_dir "delete.yaml")]<line_sep>self.identifiers=["write" "delete"]<line_sep>self.fp=FilePad.auto_load()<block_end><def_stmt>test_addfilestask_run self<block_start>t=AddFilesTask(paths=self.paths identifiers=self.identifiers)<line_sep>t.run_task({})<line_sep>write_file_contents,_=self.fp.get_file("write")<with_stmt>open(self.paths[0])<as>f<block_start>self.assertEqual(write_file_contents f.read().encode())<block_end>del_file_contents,_=self.fp.get_file("delete")<with_stmt>open(self.paths[1])<as>f<block_start>self.assertEqual(del_file_contents f.read().encode())<block_end><block_end><def_stmt>test_deletefilestask_run self<block_start>t=DeleteFilesTask(identifiers=self.identifiers)<line_sep>t.run_task({})<line_sep>file_contents,doc=self.fp.get_file("write")<line_sep>self.assertIsNone(file_contents)<line_sep>self.assertIsNone(doc)<line_sep>file_contents,doc=self.fp.get_file("delete")<line_sep>self.assertIsNone(file_contents)<line_sep>self.assertIsNone(doc)<block_end><def_stmt>test_getfilestask_run self<block_start>t=AddFilesTask(paths=self.paths identifiers=self.identifiers)<line_sep>t.run_task({})<line_sep>dest_dir=os.path.abspath(".")<line_sep>identifiers=["write"]<line_sep>new_file_names=["write_2.yaml"]<line_sep>t=GetFilesTask(identifiers=identifiers dest_dir=dest_dir new_file_names=new_file_names)<line_sep>t.run_task({})<line_sep>write_file_contents,_=self.fp.get_file("write")<with_stmt>open(os.path.join(dest_dir new_file_names[0]))<as>f<block_start>self.assertEqual(write_file_contents f.read().encode())<block_end>os.remove(os.path.join(dest_dir new_file_names[0]))<block_end><def_stmt>test_getfilesbyquerytask_run self<block_start>"""Tests querying objects from FilePad by metadata"""<line_sep>t=AddFilesTask(paths=self.paths identifiers=self.identifiers metadata={"key":"value"})<line_sep>t.run_task({})<line_sep>dest_dir=os.path.abspath(".")<line_sep>new_file_names=["test_file.yaml"]<line_sep>t=GetFilesByQueryTask(query={"metadata->key":"value"} dest_dir=dest_dir new_file_names=new_file_names)<line_sep>t.run_task({})<line_sep>test_file_contents,_=self.fp.get_file("test_idenfifier")<line_sep>self.assertEqual(test_file_contents open(os.path.join(dest_dir new_file_names[0])).read().encode())<line_sep>os.remove(os.path.join(dest_dir new_file_names[0]))<block_end><def_stmt>test_getfilesbyquerytask_run self<block_start>"""Tests querying objects from FilePad by metadata"""<with_stmt>open("original_test_file.txt" "w")<as>f<block_start>f.write("Some file with some content")<block_end>t=AddFilesTask(paths=["original_test_file.txt"] identifiers=["some_identifier"] metadata={"key":"value"})<line_sep>t.run_task({})<line_sep>os.remove("original_test_file.txt")<line_sep>dest_dir=os.path.abspath(".")<line_sep>t=GetFilesByQueryTask(query={"metadata->key":"value"} dest_dir=dest_dir new_file_names=["queried_test_file.txt"])<line_sep>t.run_task({})<line_sep>test_file_contents,_=self.fp.get_file("some_identifier")<with_stmt>open(os.path.join(dest_dir "queried_test_file.txt"))<as>f<block_start>self.assertEqual(test_file_contents f.read().encode())<block_end>os.remove(os.path.join(dest_dir "queried_test_file.txt"))<block_end><def_stmt>test_getfilesbyquerytask_metafile_run self<block_start>"""Tests writing metadata to a yaml file"""<with_stmt>open("original_test_file.txt" "w")<as>f<block_start>f.write("Some file with some content")<block_end>t=AddFilesTask(paths=["original_test_file.txt"] identifiers=["test_identifier"] metadata={"key":"value"})<line_sep>t.run_task({})<line_sep>os.remove("original_test_file.txt")<line_sep>dest_dir=os.path.abspath(".")<line_sep>t=GetFilesByQueryTask(query={"metadata->key":"value"} meta_file=<true> meta_file_suffix=".meta.yaml" dest_dir=dest_dir new_file_names=["queried_test_file.txt"] )<line_sep>t.run_task({})<with_stmt>open("queried_test_file.txt.meta.yaml")<as>f<block_start>yaml=YAML(typ="safe")<line_sep>metadata=yaml.load(f)<block_end>self.assertEqual(metadata["key"] "value")<line_sep>os.remove(os.path.join(dest_dir "queried_test_file.txt"))<line_sep>os.remove(os.path.join(dest_dir "queried_test_file.txt.meta.yaml"))<block_end><def_stmt>test_getfilesbyquerytask_ignore_empty_result_run self<block_start>"""Tests on ignoring empty results from FilePad query"""<line_sep>dest_dir=os.path.abspath(".")<line_sep>t=GetFilesByQueryTask(query={"metadata->key":"value"} fizzle_empty_result=<false> dest_dir=dest_dir new_file_names=["queried_test_file.txt"] )<line_sep>t.run_task({})<line_sep># test successful if no exception raised <block_end><def_stmt>test_getfilesbyquerytask_raise_empty_result_run self<block_start>"""Tests on raising exception on empty results from FilePad query"""<line_sep>dest_dir=os.path.abspath(".")<line_sep>t=GetFilesByQueryTask(query={"metadata->key":"value"} fizzle_empty_result=<true> dest_dir=dest_dir new_file_names=["queried_test_file.txt"] )<with_stmt>self.assertRaises(ValueError)<block_start>t.run_task({})<block_end># test successful if exception raised <block_end><def_stmt>test_getfilesbyquerytask_ignore_degenerate_file_name self<block_start>"""Tests on ignoring degenerate file name in result from FilePad query"""<with_stmt>open("degenerate_file.txt" "w")<as>f<block_start>f.write("Some file with some content")<block_end>t=AddFilesTask(paths=["degenerate_file.txt"] identifiers=["some_identifier"] metadata={"key":"value"})<line_sep>t.run_task({})<with_stmt>open("degenerate_file.txt" "w")<as>f<block_start>f.write("Some other file with some other content BUT same file name")<block_end>t=AddFilesTask(paths=["degenerate_file.txt"] identifiers=["some_other_identifier"] metadata={"key":"value"})<line_sep>t.run_task({})<line_sep>os.remove("degenerate_file.txt")<line_sep>t=GetFilesByQueryTask(query={"metadata->key":"value"} fizzle_degenerate_file_name=<false>)<line_sep>t.run_task({})<line_sep># test successful if no exception raised <block_end><def_stmt>test_getfilesbyquerytask_raise_degenerate_file_name self<block_start>"""Tests on raising exception on degenerate file name from FilePad query"""<with_stmt>open("degenerate_file.txt" "w")<as>f<block_start>f.write("Some file with some content")<block_end>t=AddFilesTask(paths=["degenerate_file.txt"] identifiers=["some_identifier"] metadata={"key":"value"})<line_sep>t.run_task({})<with_stmt>open("degenerate_file.txt" "w")<as>f<block_start>f.write("Some other file with some other content BUT same file name")<block_end>t=AddFilesTask(paths=["degenerate_file.txt"] identifiers=["some_other_identifier"] metadata={"key":"value"})<line_sep>t.run_task({})<line_sep>os.remove("degenerate_file.txt")<line_sep>t=GetFilesByQueryTask(query={"metadata->key":"value"} fizzle_degenerate_file_name=<true>)<with_stmt>self.assertRaises(ValueError)<block_start>t.run_task({})<block_end># test successful if exception raised <block_end><def_stmt>test_getfilesbyquerytask_sort_ascending_name_run self<block_start>"""Tests on sorting queried files in ascending order"""<line_sep>file_contents=["Some file with some content" "Some other file with some other content"]<with_stmt>open("degenerate_file.txt" "w")<as>f<block_start>f.write(file_contents[0])<block_end>t=AddFilesTask(paths=["degenerate_file.txt"] identifiers=["some_identifier"] metadata={"key":"value" "sort_key":0})<line_sep>t.run_task({})<with_stmt>open("degenerate_file.txt" "w")<as>f<block_start>f.write(file_contents[-1])<block_end>t=AddFilesTask(paths=["degenerate_file.txt"] identifiers=["some_other_identifier"] metadata={"key":"value" "sort_key":1} )<line_sep>t.run_task({})<line_sep>os.remove("degenerate_file.txt")<line_sep>t=GetFilesByQueryTask(query={"metadata->key":"value"} fizzle_degenerate_file_name=<false> sort_key="sort_key" sort_direction=1)<line_sep>t.run_task({})<with_stmt>open("degenerate_file.txt")<as>f<block_start>self.assertEqual(file_contents[-1] f.read())<block_end><block_end><def_stmt>test_getfilesbyquerytask_sort_descending_name_run self<block_start>"""Tests on sorting queried files in descending order"""<line_sep>file_contents=["Some file with some content" "Some other file with some other content"]<with_stmt>open("degenerate_file.txt" "w")<as>f<block_start>f.write(file_contents[0])<block_end>t=AddFilesTask(paths=["degenerate_file.txt"] identifiers=["some_identifier"] metadata={"key":"value" "sort_key":10})<line_sep>t.run_task({})<with_stmt>open("degenerate_file.txt" "w")<as>f<block_start>f.write(file_contents[-1])<block_end>t=AddFilesTask(paths=["degenerate_file.txt"] identifiers=["some_other_identifier"] metadata={"key":"value" "sort_key":20} )<line_sep>t.run_task({})<line_sep>os.remove("degenerate_file.txt")<line_sep>t=GetFilesByQueryTask(query={"metadata->key":"value"} fizzle_degenerate_file_name=<false> sort_key="metadata.sort_key" sort_direction=-1 )<line_sep>t.run_task({})<with_stmt>open("degenerate_file.txt")<as>f<block_start>self.assertEqual(file_contents[0] f.read())<block_end>os.remove("degenerate_file.txt")<block_end><def_stmt>test_addfilesfrompatterntask_run self<block_start>t=AddFilesTask(paths="*.yaml" directory=module_dir)<line_sep>t.run_task({})<line_sep>write_file_contents,_=self.fp.get_file(self.paths[0])<with_stmt>open(self.paths[0])<as>f<block_start>self.assertEqual(write_file_contents f.read().encode())<block_end>del_file_contents,wdoc=self.fp.get_file(self.paths[1])<with_stmt>open(self.paths[1])<as>f<block_start>self.assertEqual(del_file_contents f.read().encode())<block_end><block_end><def_stmt>tearDown self<block_start>self.fp.reset()<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) <import_from_stmt>spack *<class_stmt>RInsight(RPackage)<block_start>"""Easy Access to Model Information for Various Model Objects A tool to provide an easy, intuitive and consistent access to information contained in various R models, like model formulas, model terms, information about random effects, data that was used to fit the model or data from response variables. 'insight' mainly revolves around two types of functions: Functions that find (the names of) information, starting with 'find_', and functions that get the underlying data, starting with 'get_'. The package has a consistent syntax and works with many different model objects, where otherwise functions to access these information are missing."""<line_sep>homepage="https://easystats.github.io/insight/"<line_sep>cran="insight"<line_sep>version('0.14.1' sha256='0e7761997a46ee33039cdeff1779dbc210de3644e4444c6e893e4ef2f12cc129')<line_sep>depends_on('[email protected]:' type=('build' 'run'))<block_end>
model=dict(type='TSN2D' backbone=dict(type='ResNet' pretrained='modelzoo://resnet50' nsegments=8 depth=50 out_indices=(3 ) tsm=<true> bn_eval=<false> partial_bn=<false>) spatial_temporal_module=dict(type='SimpleSpatialModule' spatial_type='avg' spatial_size=7) segmental_consensus=dict(type='SimpleConsensus' consensus_type='avg') cls_head=dict(type='ClsHead' with_avg_pool=<false> temporal_feature_size=1 spatial_feature_size=1 dropout_ratio=0.5 in_channels=2048 num_classes=174))<line_sep>train_cfg=<none><line_sep>test_cfg=<none><line_sep># dataset settings dataset_type='RawFramesDataset'<line_sep>data_root=''<line_sep>data_root_val=''<line_sep>img_norm_cfg=dict(mean=[123.675 116.28 103.53] std=[58.395 57.12 57.375] to_rgb=<true>)<line_sep>data=dict(videos_per_gpu=8 workers_per_gpu=8 train=dict(type=dataset_type ann_file='data/sthv1/train_videofolder.txt' img_prefix=data_root img_norm_cfg=img_norm_cfg num_segments=8 new_length=1 new_step=1 random_shift=<true> modality='RGB' image_tmpl='{:05d}.jpg' img_scale=256 input_size=224 flip_ratio=0.5 resize_keep_ratio=<true> resize_crop=<true> color_jitter=<true> color_space_aug=<true> oversample=<none> max_distort=1 test_mode=<false>) val=dict(type=dataset_type ann_file='data/sthv1/val_videofolder.txt' img_prefix=data_root_val img_norm_cfg=img_norm_cfg num_segments=8 new_length=1 new_step=1 random_shift=<false> modality='RGB' image_tmpl='{:05d}.jpg' img_scale=256 input_size=224 flip_ratio=0 resize_keep_ratio=<true> oversample=<none> test_mode=<false>) test=dict(type=dataset_type ann_file='data/sthv1/val_videofolder.txt' img_prefix=data_root_val img_norm_cfg=img_norm_cfg num_segments=16 new_length=1 new_step=1 random_shift=<false> modality='RGB' image_tmpl='{:05d}.jpg' img_scale=256 input_size=256 flip_ratio=0 resize_keep_ratio=<true> oversample="three_crop" test_mode=<true>))<line_sep># optimizer optimizer=dict(type='SGD' lr=0.01 momentum=0.9 weight_decay=0.0005 nesterov=<true>)<line_sep>optimizer_config=dict(grad_clip=dict(max_norm=20 norm_type=2))<line_sep># learning policy lr_config=dict(policy='step' step=[75 125])<line_sep>checkpoint_config=dict(interval=1)<line_sep>workflow=[('train' 1)]<line_sep># yapf:disable log_config=dict(interval=20 hooks=[dict(type='TextLoggerHook') # dict(type='TensorboardLoggerHook') ])<line_sep># yapf:enable # runtime settings total_epochs=150<line_sep>dist_params=dict(backend='nccl')<line_sep>log_level='INFO'<line_sep>load_from=<none><line_sep>resume_from=<none><line_sep>
<import_stmt>json<import_from_stmt>datetime datetime<import_from_stmt>flask current_app send_from_directory<import_from_stmt>webargs fields<import_from_stmt>webargs.flaskparser use_args FlaskParser<import_from_stmt>. main<import_from_stmt>.. socketio<import_from_stmt>.config ferm_active_sessions_path picoferm_firmware_path MachineType<import_from_stmt>.firmware firmware_filename minimum_firmware firmware_upgrade_required<import_from_stmt>.model PicoFermSession<import_from_stmt>.session_parser active_ferm_sessions<line_sep>arg_parser=FlaskParser()<line_sep># Register: /API/PicoFerm/isRegistered?uid={uid}&token={token} # Response: '#{0}#' where {0} : 1 = Registered, 0 = Not Registered ferm_registered_args={'uid':fields.Str(required=<true>) # 12 character alpha-numeric serial number 'token':fields.Str(required=<true>) # 8 character alpha-numberic number }<line_sep>@main.route('/API/PicoFerm/isRegistered')@use_args(ferm_registered_args location='querystring')<def_stmt>process_ferm_registered args<block_start>uid=args['uid']<if_stmt>uid<not><in>active_ferm_sessions<block_start>active_ferm_sessions[uid]=PicoFermSession()<block_end><return>'#1#'<block_end># Check Firmware: /API/PicoFerm/checkFirmware?uid={UID}&version={VERSION} # Response: '#{0}#' where {0} : 1 = Update Available, 0 = No Updates check_ferm_firmware_args={'uid':fields.Str(required=<true>) # 12 character alpha-numeric serial number 'version':fields.Str(required=<true>) # Current firmware version - i.e. 0.1.11 }<line_sep>@main.route('/API/PicoFerm/checkFirmware')@use_args(check_ferm_firmware_args location='querystring')<def_stmt>process_check_ferm_firmware args<block_start><if_stmt>firmware_upgrade_required(MachineType.PICOFERM args['version'])<block_start><return>'#1#'<block_end><return>'#0#'<block_end># Get Firmware: /API/pico/getFirmware?uid={UID} # Response: RAW Bin File Contents get_firmware_args={'uid':fields.Str(required=<true>) # 12 character alpha-numeric serial number }<line_sep>@main.route('/API/PicoFerm/getFirmwareAddress')@use_args(get_firmware_args location='querystring')<def_stmt>process_get_firmware_address args<block_start>filename=firmware_filename(MachineType.PICOFERM minimum_firmware(MachineType.PICOFERM))<line_sep><return>'#http://picobrew.com/firmware/picoferm/{}#'.format(filename)<block_end># Get Firmware: /firmware/picoferm/<version> # Response: RAW Bin File @main.route('/firmware/picoferm/<file>' methods=['GET'])<def_stmt>process_picoferm_firmware file<block_start>current_app.logger.debug('DEBUG: PicoFerm fetch firmware file={}'.format(file))<line_sep><return>send_from_directory(picoferm_firmware_path() file)<block_end># Get State: /API/PicoFerm/getState?uid={UID} # Response: '#{0}#' where {0} : 2,4 = nothing to do, 10,0 = in progress/send data, 10,16 = in progress/error, 2,16 = complete/stop sending data get_ferm_state_args={'uid':fields.Str(required=<true>) # 12 character alpha-numeric serial number }<line_sep>@main.route('/API/PicoFerm/getState')@use_args(get_ferm_state_args location='querystring')<def_stmt>process_get_ferm_state args<block_start>uid=args['uid']<if_stmt>uid<not><in>active_ferm_sessions<block_start>active_ferm_sessions[uid]=PicoFermSession()<block_end>session=active_ferm_sessions[uid]<if_stmt>session.active<eq><true><block_start><return>'#10,0#'<block_end><elif_stmt>session.uninit<or>session.file<eq><none><block_start><return>'#2,4'<block_end><block_end># LogDataSet: /API/PicoFerm/logDataSet?uid={UID}&rate={RATE}&voltage={VOLTAGE}&data={DATA} # Response: '#{0}#' where {0} : 10,0 = in progress/send data, ????? log_ferm_dataset_args={'uid':fields.Str(required=<true>) # 12 character alpha-numeric serial number 'rate':fields.Float(required=<true>) # Rate between samples (minutes) 'voltage':fields.Float(required=<true>) # %0.2f Voltage 'data':fields.Str(required=<true>) # List of dictionary (Temperature (S1), Pressure (S2)): [{"s1":%0.2f,"s2":%0.2f},] }<line_sep>@main.route('/API/PicoFerm/logDataSet')@use_args(log_ferm_dataset_args location='querystring')<def_stmt>process_log_ferm_dataset args<block_start>uid=args['uid']<if_stmt>uid<not><in>active_ferm_sessions<or>active_ferm_sessions[uid].uninit<block_start>create_new_session(uid)<block_end>data=json.loads(args['data'])<line_sep>time_delta=args['rate']<times>60<times>1000<line_sep>time=((datetime.utcnow()-datetime(1970 1 1)).total_seconds()<times>1000)-(time_delta<times>(len(data)-1))<line_sep>session_data=[]<line_sep>log_data=''<for_stmt>d data<block_start>point={'time':time 'temp':d['s1'] 'pres':d['s2'] }<line_sep>session_data.append(point)<line_sep>time=time+time_delta<line_sep>log_data<augadd>'\n\t{},'.format(json.dumps(point))<block_end>active_ferm_sessions[uid].data.extend(session_data)<line_sep>active_ferm_sessions[uid].voltage=str(args['voltage'])+'V'<line_sep>graph_update=json.dumps({'voltage':args['voltage'] 'data':session_data})<line_sep>socketio.emit('ferm_session_update|{}'.format(args['uid']) graph_update)<line_sep># end fermentation only when user specifies fermentation is complete <if_stmt>active_ferm_sessions[uid].uninit<eq><false><and>active_ferm_sessions[uid].active<eq><false><block_start>active_ferm_sessions[uid].file.write('{}\n\n]'.format(log_data[:-2]))<line_sep>active_ferm_sessions[uid].cleanup()<line_sep># The server makes a determination when fermenting is done based on the datalog after it sends '2,4' <return>'#2,4#'<block_end><else_stmt><block_start>active_ferm_sessions[uid].active=<true><line_sep>active_ferm_sessions[uid].file.write(log_data)<line_sep>active_ferm_sessions[uid].file.flush()<line_sep># Errors like '10,16' send data but mark data error. # '10,0' tells the PicoFerm to continue to send data. <return>'#10,0#'<block_end><block_end># -------- Utility -------- <def_stmt>create_new_session uid<block_start><if_stmt>uid<not><in>active_ferm_sessions<block_start>active_ferm_sessions[uid]=PicoFermSession()<block_end>active_ferm_sessions[uid].uninit=<false><line_sep>active_ferm_sessions[uid].start_time=datetime.now()# Not now, but X interval seconds ago active_ferm_sessions[uid].filepath=ferm_active_sessions_path().joinpath('{0}#{1}.json'.format(active_ferm_sessions[uid].start_time.strftime('%Y%m%d_%H%M%S') uid))<line_sep>active_ferm_sessions[uid].file=open(active_ferm_sessions[uid].filepath 'w')<line_sep>active_ferm_sessions[uid].file.write('[')<block_end>
""" Vtrace notitifers base classes and examples Vtrace supports the idea of callback notifiers which get called whenever particular events occur in the target process. Notifiers may be registered to recieve a callback on any of the vtrace.NOTIFY_FOO events from vtrace. One notifier *may* be registered with more than one trace, as the "notify" method is passed a reference to the trace for which an event has occured... """<line_sep># Copyright (C) 2007 Invisigoth - See LICENSE file for details <import_stmt>logging<import_stmt>traceback<import_stmt>vtrace<line_sep>logger=logging.getLogger(__name__)<class_stmt>Notifier(object)<block_start>""" The top level example notifier... Anything which registers itself for trace events or tracegroup events should implement the notify method as shown here. """<def_stmt>__init__ self<block_start>""" All extenders *must* call this. Mostly because all the goop necessary for the remote debugging stuff... (if notifier is instantiated on server, all is well, if it's on the client it needs a proxy...) """<line_sep><pass><block_end><def_stmt>handleEvent self event trace<block_start>""" An "internal" handler so if we need to do something from an API perspective before calling the notify method we can have a good "all at once" hook """<line_sep>self.notify(event trace)<block_end><def_stmt>notify self event trace<block_start>logger.info("Got event: %d from pid %d" event trace.getPid())<block_end><block_end><class_stmt>VerboseNotifier(Notifier)<block_start><def_stmt>notify self event trace<block_start>logger.info("PID %d - ThreadID (%d) got" trace.getPid() trace.getMeta("ThreadId"))<if_stmt>event<eq>vtrace.NOTIFY_ALL<block_start>("WTF, how did we get a vtrace.NOTIFY_ALL event?!?!")<block_end><elif_stmt>event<eq>vtrace.NOTIFY_SIGNAL<block_start>signo=trace.getCurrentSignal()<line_sep>("vtrace.NOTIFY_SIGNAL %d (0x%08x)"%(signo signo))<if_stmt>trace.getMeta("Platform")<eq>"windows"<block_start>logger.info(repr(trace.getMeta("Win32Event")))<block_end><block_end><elif_stmt>event<eq>vtrace.NOTIFY_BREAK<block_start>logger.info("vtrace.NOTIFY_BREAK")<line_sep>logger.info("\tIP: 0x%08x" trace.getProgramCounter())<block_end><elif_stmt>event<eq>vtrace.NOTIFY_SYSCALL<block_start>logger.info("vtrace.NOTIFY_SYSCALL")<block_end><elif_stmt>event<eq>vtrace.NOTIFY_CONTINUE<block_start>logger.info("vtrace.NOTIFY_CONTINUE")<block_end><elif_stmt>event<eq>vtrace.NOTIFY_EXIT<block_start>logger.info("vtrace.NOTIFY_EXIT")<line_sep>logger.info("\tExitCode: %d" trace.getMeta("ExitCode"))<block_end><elif_stmt>event<eq>vtrace.NOTIFY_ATTACH<block_start>logger.info("vtrace.NOTIFY_ATTACH")<block_end><elif_stmt>event<eq>vtrace.NOTIFY_DETACH<block_start>logger.info("vtrace.NOTIFY_DETACH")<block_end><elif_stmt>event<eq>vtrace.NOTIFY_LOAD_LIBRARY<block_start>logger.info("vtrace.NOTIFY_LOAD_LIBRARY")<line_sep>logger.info("\tLoaded library %s" trace.getMeta('LatestLibrary'))<block_end><elif_stmt>event<eq>vtrace.NOTIFY_UNLOAD_LIBRARY<block_start>logger.info("vtrace.NOTIFY_UNLOAD_LIBRARY")<block_end><elif_stmt>event<eq>vtrace.NOTIFY_CREATE_THREAD<block_start>logger.info("vtrace.NOTIFY_CREATE_THREAD")<line_sep>logger.info("\tNew thread - ThreadID: %d" trace.getMeta("ThreadId"))<block_end><elif_stmt>event<eq>vtrace.NOTIFY_EXIT_THREAD<block_start>logger.info("vtrace.NOTIFY_EXIT_THREAD")<line_sep>logger.info("Thread exited - ThreadID: %d" trace.getMeta("ExitThread" -1))<block_end><elif_stmt>event<eq>vtrace.NOTIFY_STEP<block_start>logger.info("vtrace.NOTIFY_STEP")<block_end><else_stmt><block_start>logger.warning("Unhandled vtrace event type of: %d" event)<block_end><block_end><block_end><class_stmt>DistributedNotifier(Notifier)<block_start>""" A notifier which will distributed notifications out to locally registered notifiers so that remote tracer's notifier callbacks only require once across the wire. """<line_sep># NOTE: once you turn on vtrace.NOTIFY_ALL it can't be turned back off yet. <def_stmt>__init__ self<block_start>Notifier.__init__(self)<line_sep>self.shared=<false><line_sep>self.events=[]<line_sep>self.notifiers={}<for_stmt>i range(vtrace.NOTIFY_MAX)<block_start>self.notifiers[i]=[]<block_end><block_end><def_stmt>notify self event trace<block_start>self.fireNotifiers(event trace)<block_end><def_stmt>fireNotifiers self event trace<block_start>""" Fire all our registerd local-notifiers """<line_sep>nlist=self.notifiers.get(vtrace.NOTIFY_ALL [])<for_stmt>notifier nlist<block_start><try_stmt><block_start>notifier.handleEvent(event trace)<block_end><except_stmt>Exception<block_start>logger.error("Exception in notifier:\n%s" traceback.format_exc())<block_end><block_end>nlist=self.notifiers.get(event [])<for_stmt>notifier nlist<block_start><try_stmt><block_start>notifier.handleEvent(event trace)<block_end><except_stmt>Exception<block_start>logger.error("Exception in notifier:\n%s" traceback.format_exc())<block_end><block_end><block_end><def_stmt>registerNotifier self event notif<block_start>""" Register a sub-notifier to get the remote callback's via our local delivery. """<line_sep>nlist=self.notifiers.get(event)<line_sep>nlist.append(notif)<block_end><def_stmt>deregisterNotifier self event notif<block_start>nlist=self.notifiers.get(event)<line_sep>nlist.remove(notif)<block_end><block_end>
# Copyright (c) 2016-2020 <NAME> <<EMAIL>> # Copyright (c) 2016-2019 <NAME> <<EMAIL>> # Copyright (c) 2016 <NAME> <<EMAIL>> # Copyright (c) 2016 <NAME> <<EMAIL>> # Copyright (c) 2016 <NAME> <<EMAIL>> # Copyright (c) 2017, 2020 hippo91 <<EMAIL>> # Copyright (c) 2017 Mitar <<EMAIL>> # Copyright (c) 2018, 2020 <NAME> <<EMAIL>> # Copyright (c) 2018 <NAME> <<EMAIL>> # Copyright (c) 2018 ssolanki <<EMAIL>> # Copyright (c) 2018 <NAME> <<EMAIL>> # Copyright (c) 2018 <NAME> <<EMAIL>> # Copyright (c) 2019 <NAME> <<EMAIL>> # Copyright (c) 2019 <NAME> <<EMAIL>> # Copyright (c) 2019 <NAME> <<EMAIL>> # Copyright (c) 2021 <NAME> <<EMAIL>> # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html # For details: https://github.com/PyCQA/pylint/blob/master/LICENSE """Utility methods for docstring checking."""<import_stmt>re<import_from_stmt>typing List<import_stmt>astroid<import_from_stmt>pylint.checkers utils<def_stmt>space_indentation s<block_start>"""The number of leading spaces in a string :param str s: input string :rtype: int :return: number of leading spaces """<line_sep><return>len(s)-len(s.lstrip(" "))<block_end><def_stmt>get_setters_property_name node<block_start>"""Get the name of the property that the given node is a setter for. :param node: The node to get the property name for. :type node: str :rtype: str or None :returns: The name of the property that the node is a setter for, or None if one could not be found. """<line_sep>decorators=node.decorators.nodes<if>node.decorators<else>[]<for_stmt>decorator decorators<block_start><if_stmt>(isinstance(decorator astroid.Attribute)<and>decorator.attrname<eq>"setter"<and>isinstance(decorator.expr astroid.Name))<block_start><return>decorator.expr.name<block_end><block_end><return><none><block_end><def_stmt>get_setters_property node<block_start>"""Get the property node for the given setter node. :param node: The node to get the property for. :type node: astroid.FunctionDef :rtype: astroid.FunctionDef or None :returns: The node relating to the property of the given setter node, or None if one could not be found. """<line_sep>property_=<none><line_sep>property_name=get_setters_property_name(node)<line_sep>class_node=utils.node_frame_class(node)<if_stmt>property_name<and>class_node<block_start>class_attrs=class_node.getattr(node.name)<for_stmt>attr class_attrs<block_start><if_stmt>utils.decorated_with_property(attr)<block_start>property_=attr<line_sep><break><block_end><block_end><block_end><return>property_<block_end><def_stmt>returns_something return_node<block_start>"""Check if a return node returns a value other than None. :param return_node: The return node to check. :type return_node: astroid.Return :rtype: bool :return: True if the return node returns a value other than None, False otherwise. """<line_sep>returns=return_node.value<if_stmt>returns<is><none><block_start><return><false><block_end><return><not>(isinstance(returns astroid.Const)<and>returns.value<is><none>)<block_end><def_stmt>_get_raise_target node<block_start><if_stmt>isinstance(node.exc astroid.Call)<block_start>func=node.exc.func<if_stmt>isinstance(func (astroid.Name astroid.Attribute))<block_start><return>utils.safe_infer(func)<block_end><block_end><return><none><block_end><def_stmt>_split_multiple_exc_types target:str<arrow>List[str]<block_start>delimiters=r"(\s*,(?:\s*or\s)?\s*|\s+or\s+)"<line_sep><return>re.split(delimiters target)<block_end><def_stmt>possible_exc_types node<block_start>""" Gets all of the possible raised exception types for the given raise node. .. note:: Caught exception types are ignored. :param node: The raise node to find exception types for. :type node: astroid.node_classes.NodeNG :returns: A list of exception types possibly raised by :param:`node`. :rtype: set(str) """<line_sep>excs=[]<if_stmt>isinstance(node.exc astroid.Name)<block_start>inferred=utils.safe_infer(node.exc)<if_stmt>inferred<block_start>excs=[inferred.name]<block_end><block_end><elif_stmt>node.exc<is><none><block_start>handler=node.parent<while_stmt>handler<and><not>isinstance(handler astroid.ExceptHandler)<block_start>handler=handler.parent<block_end><if_stmt>handler<and>handler.type<block_start>inferred_excs=astroid.unpack_infer(handler.type)<line_sep>excs=(exc.name<for>exc inferred_excs<if>exc<is><not>astroid.Uninferable)<block_end><block_end><else_stmt><block_start>target=_get_raise_target(node)<if_stmt>isinstance(target astroid.ClassDef)<block_start>excs=[target.name]<block_end><elif_stmt>isinstance(target astroid.FunctionDef)<block_start><for_stmt>ret target.nodes_of_class(astroid.Return)<block_start><if_stmt>ret.frame()<ne>target# return from inner function - ignore it <block_start><continue><block_end>val=utils.safe_infer(ret.value)<if_stmt>(val<and>isinstance(val (astroid.Instance astroid.ClassDef))<and>utils.inherit_from_std_ex(val))<block_start>excs.append(val.name)<block_end><block_end><block_end><block_end><try_stmt><block_start><return>{exc<for>exc excs<if><not>utils.node_ignores_exception(node exc)}<block_end><except_stmt>astroid.InferenceError<block_start><return>set()<block_end><block_end><def_stmt>docstringify docstring default_type="default"<block_start><for_stmt>docstring_type [SphinxDocstring EpytextDocstring GoogleDocstring NumpyDocstring ]<block_start>instance=docstring_type(docstring)<if_stmt>instance.is_valid()<block_start><return>instance<block_end><block_end>docstring_type=DOCSTRING_TYPES.get(default_type Docstring)<line_sep><return>docstring_type(docstring)<block_end><class_stmt>Docstring<block_start>re_for_parameters_see=re.compile(r""" For\s+the\s+(other)?\s*parameters\s*,\s+see """ re.X|re.S )<line_sep>supports_yields:bool=<false><line_sep>"""True if the docstring supports a "yield" section. False if the docstring uses the returns section to document generators. """<line_sep># These methods are designed to be overridden # pylint: disable=no-self-use <def_stmt>__init__ self doc<block_start>doc=doc<or>""<line_sep>self.doc=doc.expandtabs()<block_end><def_stmt>is_valid self<block_start><return><false><block_end><def_stmt>exceptions self<block_start><return>set()<block_end><def_stmt>has_params self<block_start><return><false><block_end><def_stmt>has_returns self<block_start><return><false><block_end><def_stmt>has_rtype self<block_start><return><false><block_end><def_stmt>has_property_returns self<block_start><return><false><block_end><def_stmt>has_property_type self<block_start><return><false><block_end><def_stmt>has_yields self<block_start><return><false><block_end><def_stmt>has_yields_type self<block_start><return><false><block_end><def_stmt>match_param_docs self<block_start><return>set() set()<block_end><def_stmt>params_documented_elsewhere self<block_start><return>self.re_for_parameters_see.search(self.doc)<is><not><none><block_end><block_end><class_stmt>SphinxDocstring(Docstring)<block_start>re_type=r""" [~!.]? # Optional link style prefix \w(?:\w|\.[^\.])* # Valid python name """<line_sep>re_simple_container_type=r""" {type} # a container type [\(\[] [^\n\s]+ [\)\]] # with the contents of the container """.format(type=re_type)<line_sep>re_multiple_simple_type=r""" (?:{container_type}|{type}) (?:(?:\s+(?:of|or)\s+|\s*,\s*)(?:{container_type}|{type}))* """.format(type=re_type container_type=re_simple_container_type)<line_sep>re_xref=r""" (?::\w+:)? # optional tag `{}` # what to reference """.format(re_type)<line_sep>re_param_raw=r""" : # initial colon (?: # Sphinx keywords param|parameter| arg|argument| key|keyword ) \s+ # whitespace (?: # optional type declaration ({type}|{container_type}) \s+ )? (\w+) # Parameter name \s* # whitespace : # final colon """.format(type=re_type container_type=re_simple_container_type)<line_sep>re_param_in_docstring=re.compile(re_param_raw re.X|re.S)<line_sep>re_type_raw=r""" :type # Sphinx keyword \s+ # whitespace ({type}) # Parameter name \s* # whitespace : # final colon """.format(type=re_multiple_simple_type)<line_sep>re_type_in_docstring=re.compile(re_type_raw re.X|re.S)<line_sep>re_property_type_raw=r""" :type: # Sphinx keyword \s+ # whitespace {type} # type declaration """.format(type=re_multiple_simple_type)<line_sep>re_property_type_in_docstring=re.compile(re_property_type_raw re.X|re.S)<line_sep>re_raise_raw=r""" : # initial colon (?: # Sphinx keyword raises?| except|exception ) \s+ # whitespace ({type}) # exception type \s* # whitespace : # final colon """.format(type=re_multiple_simple_type)<line_sep>re_raise_in_docstring=re.compile(re_raise_raw re.X|re.S)<line_sep>re_rtype_in_docstring=re.compile(r":rtype:")<line_sep>re_returns_in_docstring=re.compile(r":returns?:")<line_sep>supports_yields=<false><def_stmt>is_valid self<block_start><return>bool(self.re_param_in_docstring.search(self.doc)<or>self.re_raise_in_docstring.search(self.doc)<or>self.re_rtype_in_docstring.search(self.doc)<or>self.re_returns_in_docstring.search(self.doc)<or>self.re_property_type_in_docstring.search(self.doc))<block_end><def_stmt>exceptions self<block_start>types=set()<for_stmt>match re.finditer(self.re_raise_in_docstring self.doc)<block_start>raise_type=match.group(1)<line_sep>types.update(_split_multiple_exc_types(raise_type))<block_end><return>types<block_end><def_stmt>has_params self<block_start><if_stmt><not>self.doc<block_start><return><false><block_end><return>self.re_param_in_docstring.search(self.doc)<is><not><none><block_end><def_stmt>has_returns self<block_start><if_stmt><not>self.doc<block_start><return><false><block_end><return>bool(self.re_returns_in_docstring.search(self.doc))<block_end><def_stmt>has_rtype self<block_start><if_stmt><not>self.doc<block_start><return><false><block_end><return>bool(self.re_rtype_in_docstring.search(self.doc))<block_end><def_stmt>has_property_returns self<block_start><if_stmt><not>self.doc<block_start><return><false><block_end># The summary line is the return doc, # so the first line must not be a known directive. <return><not>self.doc.lstrip().startswith(":")<block_end><def_stmt>has_property_type self<block_start><if_stmt><not>self.doc<block_start><return><false><block_end><return>bool(self.re_property_type_in_docstring.search(self.doc))<block_end><def_stmt>match_param_docs self<block_start>params_with_doc=set()<line_sep>params_with_type=set()<for_stmt>match re.finditer(self.re_param_in_docstring self.doc)<block_start>name=match.group(2)<line_sep>params_with_doc.add(name)<line_sep>param_type=match.group(1)<if_stmt>param_type<is><not><none><block_start>params_with_type.add(name)<block_end><block_end>params_with_type.update(re.findall(self.re_type_in_docstring self.doc))<line_sep><return>params_with_doc params_with_type<block_end><block_end><class_stmt>EpytextDocstring(SphinxDocstring)<block_start>""" Epytext is similar to Sphinx. See the docs: http://epydoc.sourceforge.net/epytext.html http://epydoc.sourceforge.net/fields.html#fields It's used in PyCharm: https://www.jetbrains.com/help/pycharm/2016.1/creating-documentation-comments.html#d848203e314 https://www.jetbrains.com/help/pycharm/2016.1/using-docstrings-to-specify-types.html """<line_sep>re_param_in_docstring=re.compile(SphinxDocstring.re_param_raw.replace(":" "@" 1) re.X|re.S)<line_sep>re_type_in_docstring=re.compile(SphinxDocstring.re_type_raw.replace(":" "@" 1) re.X|re.S)<line_sep>re_property_type_in_docstring=re.compile(SphinxDocstring.re_property_type_raw.replace(":" "@" 1) re.X|re.S)<line_sep>re_raise_in_docstring=re.compile(SphinxDocstring.re_raise_raw.replace(":" "@" 1) re.X|re.S)<line_sep>re_rtype_in_docstring=re.compile(r""" @ # initial "at" symbol (?: # Epytext keyword rtype|returntype ) : # final colon """ re.X|re.S )<line_sep>re_returns_in_docstring=re.compile(r"@returns?:")<def_stmt>has_property_returns self<block_start><if_stmt><not>self.doc<block_start><return><false><block_end># If this is a property docstring, the summary is the return doc. <if_stmt>self.has_property_type()# The summary line is the return doc, # so the first line must not be a known directive. <block_start><return><not>self.doc.lstrip().startswith("@")<block_end><return><false><block_end><block_end><class_stmt>GoogleDocstring(Docstring)<block_start>re_type=SphinxDocstring.re_type<line_sep>re_xref=SphinxDocstring.re_xref<line_sep>re_container_type=r""" (?:{type}|{xref}) # a container type [\(\[] [^\n]+ [\)\]] # with the contents of the container """.format(type=re_type xref=re_xref)<line_sep>re_multiple_type=r""" (?:{container_type}|{type}|{xref}) (?:(?:\s+(?:of|or)\s+|\s*,\s*)(?:{container_type}|{type}|{xref}))* """.format(type=re_type xref=re_xref container_type=re_container_type)<line_sep>_re_section_template=r""" ^([ ]*) {0} \s*: \s*$ # Google parameter header ( .* ) # section """<line_sep>re_param_section=re.compile(_re_section_template.format(r"(?:Args|Arguments|Parameters)") re.X|re.S|re.M )<line_sep>re_keyword_param_section=re.compile(_re_section_template.format(r"Keyword\s(?:Args|Arguments|Parameters)") re.X|re.S|re.M )<line_sep>re_param_line=re.compile(r""" \s* \*{{0,2}}(\w+) # identifier potentially with asterisks \s* ( [(] {type} (?:,\s+optional)? [)] )? \s* : # optional type declaration \s* (.*) # beginning of optional description """.format(type=re_multiple_type) re.X|re.S|re.M )<line_sep>re_raise_section=re.compile(_re_section_template.format(r"Raises") re.X|re.S|re.M)<line_sep>re_raise_line=re.compile(r""" \s* ({type}) \s* : # identifier \s* (.*) # beginning of optional description """.format(type=re_multiple_type) re.X|re.S|re.M )<line_sep>re_returns_section=re.compile(_re_section_template.format(r"Returns?") re.X|re.S|re.M)<line_sep>re_returns_line=re.compile(r""" \s* ({type}:)? # identifier \s* (.*) # beginning of description """.format(type=re_multiple_type) re.X|re.S|re.M )<line_sep>re_property_returns_line=re.compile(r""" ^{type}: # indentifier \s* (.*) # Summary line / description """.format(type=re_multiple_type) re.X|re.S|re.M )<line_sep>re_yields_section=re.compile(_re_section_template.format(r"Yields?") re.X|re.S|re.M)<line_sep>re_yields_line=re_returns_line<line_sep>supports_yields=<true><def_stmt>is_valid self<block_start><return>bool(self.re_param_section.search(self.doc)<or>self.re_raise_section.search(self.doc)<or>self.re_returns_section.search(self.doc)<or>self.re_yields_section.search(self.doc)<or>self.re_property_returns_line.search(self._first_line()))<block_end><def_stmt>has_params self<block_start><if_stmt><not>self.doc<block_start><return><false><block_end><return>self.re_param_section.search(self.doc)<is><not><none><block_end><def_stmt>has_returns self<block_start><if_stmt><not>self.doc<block_start><return><false><block_end>entries=self._parse_section(self.re_returns_section)<for_stmt>entry entries<block_start>match=self.re_returns_line.match(entry)<if_stmt><not>match<block_start><continue><block_end>return_desc=match.group(2)<if_stmt>return_desc<block_start><return><true><block_end><block_end><return><false><block_end><def_stmt>has_rtype self<block_start><if_stmt><not>self.doc<block_start><return><false><block_end>entries=self._parse_section(self.re_returns_section)<for_stmt>entry entries<block_start>match=self.re_returns_line.match(entry)<if_stmt><not>match<block_start><continue><block_end>return_type=match.group(1)<if_stmt>return_type<block_start><return><true><block_end><block_end><return><false><block_end><def_stmt>has_property_returns self# The summary line is the return doc, # so the first line must not be a known directive. <block_start>first_line=self._first_line()<line_sep><return><not>bool(self.re_param_section.search(first_line)<or>self.re_raise_section.search(first_line)<or>self.re_returns_section.search(first_line)<or>self.re_yields_section.search(first_line))<block_end><def_stmt>has_property_type self<block_start><if_stmt><not>self.doc<block_start><return><false><block_end><return>bool(self.re_property_returns_line.match(self._first_line()))<block_end><def_stmt>has_yields self<block_start><if_stmt><not>self.doc<block_start><return><false><block_end>entries=self._parse_section(self.re_yields_section)<for_stmt>entry entries<block_start>match=self.re_yields_line.match(entry)<if_stmt><not>match<block_start><continue><block_end>yield_desc=match.group(2)<if_stmt>yield_desc<block_start><return><true><block_end><block_end><return><false><block_end><def_stmt>has_yields_type self<block_start><if_stmt><not>self.doc<block_start><return><false><block_end>entries=self._parse_section(self.re_yields_section)<for_stmt>entry entries<block_start>match=self.re_yields_line.match(entry)<if_stmt><not>match<block_start><continue><block_end>yield_type=match.group(1)<if_stmt>yield_type<block_start><return><true><block_end><block_end><return><false><block_end><def_stmt>exceptions self<block_start>types=set()<line_sep>entries=self._parse_section(self.re_raise_section)<for_stmt>entry entries<block_start>match=self.re_raise_line.match(entry)<if_stmt><not>match<block_start><continue><block_end>exc_type=match.group(1)<line_sep>exc_desc=match.group(2)<if_stmt>exc_desc<block_start>types.update(_split_multiple_exc_types(exc_type))<block_end><block_end><return>types<block_end><def_stmt>match_param_docs self<block_start>params_with_doc=set()<line_sep>params_with_type=set()<line_sep>entries=self._parse_section(self.re_param_section)<line_sep>entries.extend(self._parse_section(self.re_keyword_param_section))<for_stmt>entry entries<block_start>match=self.re_param_line.match(entry)<if_stmt><not>match<block_start><continue><block_end>param_name=match.group(1)<line_sep>param_type=match.group(2)<line_sep>param_desc=match.group(3)<if_stmt>param_type<block_start>params_with_type.add(param_name)<block_end><if_stmt>param_desc<block_start>params_with_doc.add(param_name)<block_end><block_end><return>params_with_doc params_with_type<block_end><def_stmt>_first_line self<block_start><return>self.doc.lstrip().split("\n" 1)[0]<block_end>@staticmethod<def_stmt>min_section_indent section_match<block_start><return>len(section_match.group(1))+1<block_end>@staticmethod<def_stmt>_is_section_header _# Google parsing does not need to detect section headers, # because it works off of indentation level only <block_start><return><false><block_end><def_stmt>_parse_section self section_re<block_start>section_match=section_re.search(self.doc)<if_stmt>section_match<is><none><block_start><return>[]<block_end>min_indentation=self.min_section_indent(section_match)<line_sep>entries=[]<line_sep>entry=[]<line_sep>is_first=<true><for_stmt>line section_match.group(2).splitlines()<block_start><if_stmt><not>line.strip()<block_start><continue><block_end>indentation=space_indentation(line)<if_stmt>indentation<l>min_indentation<block_start><break><block_end># The first line after the header defines the minimum # indentation. <if_stmt>is_first<block_start>min_indentation=indentation<line_sep>is_first=<false><block_end><if_stmt>indentation<eq>min_indentation<block_start><if_stmt>self._is_section_header(line)<block_start><break><block_end># Lines with minimum indentation must contain the beginning # of a new parameter documentation. <if_stmt>entry<block_start>entries.append("\n".join(entry))<line_sep>entry=[]<block_end><block_end>entry.append(line)<block_end><if_stmt>entry<block_start>entries.append("\n".join(entry))<block_end><return>entries<block_end><block_end><class_stmt>NumpyDocstring(GoogleDocstring)<block_start>_re_section_template=r""" ^([ ]*) {0} \s*?$ # Numpy parameters header \s* [-=]+ \s*?$ # underline ( .* ) # section """<line_sep>re_param_section=re.compile(_re_section_template.format(r"(?:Args|Arguments|Parameters)") re.X|re.S|re.M )<line_sep>re_param_line=re.compile(r""" \s* (\w+) # identifier \s* : \s* (?:({type})(?:,\s+optional)?)? # optional type declaration \n # description starts on a new line \s* (.*) # description """.format(type=GoogleDocstring.re_multiple_type) re.X|re.S )<line_sep>re_raise_section=re.compile(_re_section_template.format(r"Raises") re.X|re.S|re.M)<line_sep>re_raise_line=re.compile(r""" \s* ({type})$ # type declaration \s* (.*) # optional description """.format(type=GoogleDocstring.re_type) re.X|re.S|re.M )<line_sep>re_returns_section=re.compile(_re_section_template.format(r"Returns?") re.X|re.S|re.M)<line_sep>re_returns_line=re.compile(r""" \s* (?:\w+\s+:\s+)? # optional name ({type})$ # type declaration \s* (.*) # optional description """.format(type=GoogleDocstring.re_multiple_type) re.X|re.S|re.M )<line_sep>re_yields_section=re.compile(_re_section_template.format(r"Yields?") re.X|re.S|re.M)<line_sep>re_yields_line=re_returns_line<line_sep>supports_yields=<true><line_sep>@staticmethod<def_stmt>min_section_indent section_match<block_start><return>len(section_match.group(1))<block_end>@staticmethod<def_stmt>_is_section_header line<block_start><return>bool(re.match(r"\s*-+$" line))<block_end><block_end>DOCSTRING_TYPES={"sphinx":SphinxDocstring "epytext":EpytextDocstring "google":GoogleDocstring "numpy":NumpyDocstring "default":Docstring }<line_sep>"""A map of the name of the docstring type to its class. :type: dict(str, type) """<line_sep>
# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. <import_stmt>functools<import_stmt>logging<import_stmt>os<import_stmt>socket<import_stmt>urllib.error<import_from_stmt>urllib.parse parse_qs quote urlencode urlparse urlunparse<import_stmt>certifi<import_stmt>urllib3<import_from_stmt>esrally exceptions<import_from_stmt>esrally.utils console convert<line_sep>__HTTP=<none><def_stmt>init <block_start>logger=logging.getLogger(__name__)<line_sep><global>__HTTP<line_sep>proxy_url=os.getenv("http_proxy")<if_stmt>proxy_url<and>len(proxy_url)<g>0<block_start>parsed_url=urllib3.util.parse_url(proxy_url)<line_sep>logger.info("Connecting via proxy URL [%s] to the Internet (picked up from the env variable [http_proxy])." proxy_url)<line_sep>__HTTP=urllib3.ProxyManager(proxy_url cert_reqs="CERT_REQUIRED" ca_certs=certifi.where() # appropriate headers will only be set if there is auth info proxy_headers=urllib3.make_headers(proxy_basic_auth=parsed_url.auth) )<block_end><else_stmt><block_start>logger.info("Connecting directly to the Internet (no proxy support).")<line_sep>__HTTP=urllib3.PoolManager(cert_reqs="CERT_REQUIRED" ca_certs=certifi.where())<block_end><block_end><class_stmt>Progress<block_start><def_stmt>__init__ self msg accuracy=0<block_start>self.p=console.progress()<line_sep># if we don't show a decimal sign, the maximum width is 3 (max value is 100 (%)). Else its 3 + 1 (for the decimal point) # the accuracy that the user requested. total_width=3<if>accuracy<eq>0<else>4+accuracy<line_sep># sample formatting string: [%5.1f%%] for an accuracy of 1 self.percent_format="[%%%d.%df%%%%]"%(total_width accuracy)<line_sep>self.msg=msg<block_end><def_stmt>__call__ self bytes_read bytes_total<block_start><if_stmt>bytes_total<block_start>completed=bytes_read/bytes_total<line_sep>total_as_mb=convert.bytes_to_human_string(bytes_total)<line_sep>self.p.print("%s (%s total size)"%(self.msg total_as_mb) self.percent_format%(completed<times>100))<block_end><else_stmt><block_start>self.p.print(self.msg ".")<block_end><block_end><def_stmt>finish self<block_start>self.p.finish()<block_end><block_end><def_stmt>_fake_import_boto3 # This function only exists to be mocked in tests to raise an ImportError, in # order to simulate the absence of boto3 <block_start><pass><block_end><def_stmt>_download_from_s3_bucket bucket_name bucket_path local_path expected_size_in_bytes=<none> progress_indicator=<none># pylint: disable=import-outside-toplevel # lazily initialize S3 support - it might not be available <block_start><try_stmt><block_start>_fake_import_boto3()<import_stmt>boto3.s3.transfer<block_end><except_stmt>ImportError<block_start>console.error("S3 support is optional. Install it with `python -m pip install esrally[s3]`")<line_sep><raise><block_end><class_stmt>S3ProgressAdapter<block_start><def_stmt>__init__ self size progress<block_start>self._expected_size_in_bytes=size<line_sep>self._progress=progress<line_sep>self._bytes_read=0<block_end><def_stmt>__call__ self bytes_amount<block_start>self._bytes_read<augadd>bytes_amount<line_sep>self._progress(self._bytes_read self._expected_size_in_bytes)<block_end><block_end>s3=boto3.resource("s3")<line_sep>bucket=s3.Bucket(bucket_name)<if_stmt>expected_size_in_bytes<is><none><block_start>expected_size_in_bytes=bucket.Object(bucket_path).content_length<block_end>progress_callback=S3ProgressAdapter(expected_size_in_bytes progress_indicator)<if>progress_indicator<else><none><line_sep>bucket.download_file(bucket_path local_path Callback=progress_callback Config=boto3.s3.transfer.TransferConfig(use_threads=<false>))<block_end><def_stmt>_build_gcs_object_url bucket_name bucket_path# / and other special characters must be urlencoded in bucket and object names # ref: https://cloud.google.com/storage/docs/request-endpoints#encoding <block_start><return>functools.reduce(urllib.parse.urljoin ["https://storage.googleapis.com/storage/v1/b/" f"{quote(bucket_name.strip('/') safe='')}/" "o/" f"{quote(bucket_path.strip('/') safe='')}" "?alt=media" ] )<block_end><def_stmt>_download_from_gcs_bucket bucket_name bucket_path local_path expected_size_in_bytes=<none> progress_indicator=<none># pylint: disable=import-outside-toplevel # lazily initialize Google Cloud Storage support - we might not need it <block_start><import_stmt>google.auth<import_stmt>google.auth.transport.requests<as>tr_requests<import_stmt>google.oauth2.credentials<line_sep># Using Google Resumable Media as the standard storage library doesn't support progress # (https://github.com/googleapis/python-storage/issues/27) <import_from_stmt>google.resumable_media.requests ChunkedDownload<line_sep>ro_scope="https://www.googleapis.com/auth/devstorage.read_only"<line_sep>access_token=os.environ.get("GOOGLE_AUTH_TOKEN")<if_stmt>access_token<block_start>credentials=google.oauth2.credentials.Credentials(token=access_token scopes=(ro_scope ))<block_end><else_stmt># https://google-auth.readthedocs.io/en/latest/user-guide.html <block_start>credentials,_=google.auth.default(scopes=(ro_scope ))<block_end>transport=tr_requests.AuthorizedSession(credentials)<line_sep>chunk_size=50<times>1024<times>1024# 50MB <with_stmt>open(local_path "wb")<as>local_fp<block_start>media_url=_build_gcs_object_url(bucket_name bucket_path)<line_sep>download=ChunkedDownload(media_url chunk_size local_fp)<line_sep># allow us to calculate the total bytes download.consume_next_chunk(transport)<if_stmt><not>expected_size_in_bytes<block_start>expected_size_in_bytes=download.total_bytes<block_end><while_stmt><not>download.finished<block_start><if_stmt>progress_indicator<and>download.bytes_downloaded<and>download.total_bytes<block_start>progress_indicator(download.bytes_downloaded expected_size_in_bytes)<block_end>download.consume_next_chunk(transport)<block_end><block_end><block_end><def_stmt>download_from_bucket blobstore url local_path expected_size_in_bytes=<none> progress_indicator=<none><block_start>blob_downloader={"s3":_download_from_s3_bucket "gs":_download_from_gcs_bucket}<line_sep>logger=logging.getLogger(__name__)<line_sep>bucket_and_path=url[5:]# s3:// or gs:// prefix for now bucket_end_index=bucket_and_path.find("/")<line_sep>bucket=bucket_and_path[:bucket_end_index]<line_sep># we need to remove the leading "/" bucket_path=bucket_and_path[bucket_end_index+1:]<line_sep>logger.info("Downloading from [%s] bucket [%s] and path [%s] to [%s]." blobstore bucket bucket_path local_path)<line_sep>blob_downloader[blobstore](bucket bucket_path local_path expected_size_in_bytes progress_indicator)<line_sep><return>expected_size_in_bytes<block_end><def_stmt>download_http url local_path expected_size_in_bytes=<none> progress_indicator=<none><block_start><with_stmt>__http().request("GET" url preload_content=<false> retries=10 timeout=urllib3.Timeout(connect=45 read=240))<as>r open(local_path "wb")<as>out_file<block_start><if_stmt>r.status<g>299<block_start><raise>urllib.error.HTTPError(url r.status "" <none> <none>)<block_end># noinspection PyBroadException <try_stmt><block_start>size_from_content_header=int(r.getheader("Content-Length"))<if_stmt>expected_size_in_bytes<is><none><block_start>expected_size_in_bytes=size_from_content_header<block_end><block_end><except_stmt>BaseException<block_start>size_from_content_header=<none><block_end>chunk_size=2<power>16<line_sep>bytes_read=0<for_stmt>chunk r.stream(chunk_size)<block_start>out_file.write(chunk)<line_sep>bytes_read<augadd>len(chunk)<if_stmt>progress_indicator<and>size_from_content_header<block_start>progress_indicator(bytes_read size_from_content_header)<block_end><block_end><return>expected_size_in_bytes<block_end><block_end><def_stmt>add_url_param_elastic_no_kpi url<block_start>scheme=urllib3.util.parse_url(url).scheme<if_stmt>scheme.startswith("http")<block_start><return>_add_url_param(url {"x-elastic-no-kpi":"true"})<block_end><else_stmt><block_start><return>url<block_end><block_end><def_stmt>_add_url_param url params<block_start>url_parsed=urlparse(url)<line_sep>query=parse_qs(url_parsed.query)<line_sep>query.update(params)<line_sep><return>urlunparse((url_parsed.scheme url_parsed.netloc url_parsed.path url_parsed.params urlencode(query doseq=<true>) url_parsed.fragment))<block_end><def_stmt>download url local_path expected_size_in_bytes=<none> progress_indicator=<none><block_start>""" Downloads a single file from a URL to the provided local path. :param url: The remote URL specifying one file that should be downloaded. May be either a HTTP, HTTPS, S3 or GS URL. :param local_path: The local file name of the file that should be downloaded. :param expected_size_in_bytes: The expected file size in bytes if known. It will be used to verify that all data have been downloaded. :param progress_indicator A callable that can be use to report progress to the user. It is expected to take two parameters ``bytes_read`` and ``total_bytes``. If not provided, no progress is shown. Note that ``total_bytes`` is derived from the ``Content-Length`` header and not from the parameter ``expected_size_in_bytes`` for downloads via HTTP(S). """<line_sep>tmp_data_set_path=local_path+".tmp"<try_stmt><block_start>scheme=urllib3.util.parse_url(url).scheme<if_stmt>scheme<in>["s3" "gs"]<block_start>expected_size_in_bytes=download_from_bucket(scheme url tmp_data_set_path expected_size_in_bytes progress_indicator)<block_end><else_stmt><block_start>expected_size_in_bytes=download_http(url tmp_data_set_path expected_size_in_bytes progress_indicator)<block_end><block_end><except_stmt>BaseException<block_start><if_stmt>os.path.isfile(tmp_data_set_path)<block_start>os.remove(tmp_data_set_path)<block_end><raise><block_end><else_stmt><block_start>download_size=os.path.getsize(tmp_data_set_path)<if_stmt>expected_size_in_bytes<is><not><none><and>download_size<ne>expected_size_in_bytes<block_start><if_stmt>os.path.isfile(tmp_data_set_path)<block_start>os.remove(tmp_data_set_path)<block_end><raise>exceptions.DataError("Download of [%s] is corrupt. Downloaded [%d] bytes but [%d] bytes are expected. Please retry."%(local_path download_size expected_size_in_bytes))<block_end>os.rename(tmp_data_set_path local_path)<block_end><block_end><def_stmt>retrieve_content_as_string url<block_start><with_stmt>__http().request("GET" url timeout=urllib3.Timeout(connect=45 read=240))<as>response<block_start><return>response.read().decode("utf-8")<block_end><block_end><def_stmt>has_internet_connection probing_url<block_start>logger=logging.getLogger(__name__)<try_stmt># We try to connect to Github by default. We use that to avoid touching too much different remote endpoints. <block_start>logger.debug("Checking for internet connection against [%s]" probing_url)<line_sep># We do a HTTP request here to respect the HTTP proxy setting. If we'd open a plain socket connection we circumvent the # proxy and erroneously conclude we don't have an Internet connection. response=__http().request("GET" probing_url timeout=2.0)<line_sep>status=response.status<line_sep>logger.debug("Probing result is HTTP status [%s]" str(status))<line_sep><return>status<eq>200<block_end><except_stmt>KeyboardInterrupt<block_start><raise><block_end><except_stmt>BaseException<block_start>logger.debug("Could not detect a working Internet connection" exc_info=<true>)<line_sep><return><false><block_end><block_end><def_stmt>__http <block_start><if_stmt><not>__HTTP<block_start>init()<block_end><return>__HTTP<block_end><def_stmt>resolve hostname_or_ip<block_start><if_stmt>hostname_or_ip<and>hostname_or_ip.startswith("127")<block_start><return>hostname_or_ip<block_end>addrinfo=socket.getaddrinfo(hostname_or_ip 22 0 0 socket.IPPROTO_TCP)<for_stmt>family,_,_,_,sockaddr addrinfo# we're interested in the IPv4 address <block_start><if_stmt>family<eq>socket.AddressFamily.AF_INET<block_start>ip,_=sockaddr<if_stmt>ip[:3]<ne>"127"<block_start><return>ip<block_end><block_end><block_end><return><none><block_end>
"""Use the HTMLParser library to parse HTML files that aren't too bad."""<line_sep>__all__=['HTMLParserTreeBuilder' ]<import_from_stmt>html.parser HTMLParser<import_stmt>sys<line_sep># Starting in Python 3.2, the HTMLParser constructor takes a 'strict' # argument, which we'd like to set to False. Unfortunately, # http://bugs.python.org/issue13273 makes strict=True a better bet # before Python 3.2.3. # # At the end of this file, we monkeypatch HTMLParser so that # strict=True works well on Python 3.2.2. major,minor,release=sys.version_info[:3]<line_sep>CONSTRUCTOR_TAKES_STRICT=(major<g>3<or>(major<eq>3<and>minor<g>2)<or>(major<eq>3<and>minor<eq>2<and>release<ge>3))<import_from_stmt>bs4.element CData Comment Declaration Doctype ProcessingInstruction <import_from_stmt>bs4.dammit EntitySubstitution UnicodeDammit<import_from_stmt>bs4.builder HTML HTMLTreeBuilder STRICT <line_sep>HTMLPARSER='html.parser'<class_stmt>BeautifulSoupHTMLParser(HTMLParser)<block_start><def_stmt>handle_starttag self name attrs# XXX namespace <block_start>self.soup.handle_starttag(name <none> <none> dict(attrs))<block_end><def_stmt>handle_endtag self name<block_start>self.soup.handle_endtag(name)<block_end><def_stmt>handle_data self data<block_start>self.soup.handle_data(data)<block_end><def_stmt>handle_charref self name# XXX workaround for a bug in HTMLParser. Remove this once # it's fixed. <block_start><if_stmt>name.startswith('x')<block_start>real_name=int(name.lstrip('x') 16)<block_end><else_stmt><block_start>real_name=int(name)<block_end><try_stmt><block_start>data=chr(real_name)<block_end><except_stmt>(ValueError OverflowError)<as>e<block_start>data="\N{REPLACEMENT CHARACTER}"<block_end>self.handle_data(data)<block_end><def_stmt>handle_entityref self name<block_start>character=EntitySubstitution.HTML_ENTITY_TO_CHARACTER.get(name)<if_stmt>character<is><not><none><block_start>data=character<block_end><else_stmt><block_start>data="&%s;"%name<block_end>self.handle_data(data)<block_end><def_stmt>handle_comment self data<block_start>self.soup.endData()<line_sep>self.soup.handle_data(data)<line_sep>self.soup.endData(Comment)<block_end><def_stmt>handle_decl self data<block_start>self.soup.endData()<if_stmt>data.startswith("DOCTYPE ")<block_start>data=data[len("DOCTYPE "):]<block_end>self.soup.handle_data(data)<line_sep>self.soup.endData(Doctype)<block_end><def_stmt>unknown_decl self data<block_start><if_stmt>data.upper().startswith('CDATA[')<block_start>cls=CData<line_sep>data=data[len('CDATA['):]<block_end><else_stmt><block_start>cls=Declaration<block_end>self.soup.endData()<line_sep>self.soup.handle_data(data)<line_sep>self.soup.endData(cls)<block_end><def_stmt>handle_pi self data<block_start>self.soup.endData()<line_sep>self.soup.handle_data(data)<line_sep>self.soup.endData(ProcessingInstruction)<block_end><block_end><class_stmt>HTMLParserTreeBuilder(HTMLTreeBuilder)<block_start>is_xml=<false><line_sep>features=[HTML STRICT HTMLPARSER]<def_stmt>__init__ self *args **kwargs<block_start><if_stmt>CONSTRUCTOR_TAKES_STRICT<block_start>kwargs['strict']=<false><block_end>self.parser_args=(args kwargs)<block_end><def_stmt>prepare_markup self markup user_specified_encoding=<none> document_declared_encoding=<none><block_start>""" :return: A 4-tuple (markup, original encoding, encoding declared within markup, whether any characters had to be replaced with REPLACEMENT CHARACTER). """<if_stmt>isinstance(markup str)<block_start><return>markup <none> <none> <false><block_end>try_encodings=[user_specified_encoding document_declared_encoding]<line_sep>dammit=UnicodeDammit(markup try_encodings is_html=<true>)<line_sep><return>(dammit.markup dammit.original_encoding dammit.declared_html_encoding dammit.contains_replacement_characters)<block_end><def_stmt>feed self markup<block_start>args,kwargs=self.parser_args<line_sep>parser=BeautifulSoupHTMLParser(*args **kwargs)<line_sep>parser.soup=self.soup<line_sep>parser.feed(markup)<block_end><block_end># Patch 3.2 versions of HTMLParser earlier than 3.2.3 to use some # 3.2.3 code. This ensures they don't treat markup like <p></p> as a # string. # # XXX This code can be removed once most Python 3 users are on 3.2.3. <if_stmt>major<eq>3<and>minor<eq>2<and><not>CONSTRUCTOR_TAKES_STRICT<block_start><import_stmt>re<line_sep>attrfind_tolerant=re.compile(r'\s*((?<=[\'"\s])[^\s/>][^\s/=>]*)(\s*=+\s*'<concat>r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?')<line_sep>HTMLParserTreeBuilder.attrfind_tolerant=attrfind_tolerant<line_sep>locatestarttagend=re.compile(r""" <[a-zA-Z][-.a-zA-Z0-9:_]* # tag name (?:\s+ # whitespace before attribute name (?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name (?:\s*=\s* # value indicator (?:'[^']*' # LITA-enclosed value |\"[^\"]*\" # LIT-enclosed value |[^'\">\s]+ # bare value ) )? ) )* \s* # trailing whitespace """ re.VERBOSE)<line_sep>BeautifulSoupHTMLParser.locatestarttagend=locatestarttagend<import_from_stmt>html.parser tagfind attrfind<def_stmt>parse_starttag self i<block_start>self.__starttag_text=<none><line_sep>endpos=self.check_for_whole_start_tag(i)<if_stmt>endpos<l>0<block_start><return>endpos<block_end>rawdata=self.rawdata<line_sep>self.__starttag_text=rawdata[i:endpos]<line_sep># Now parse the data between i+1 and j into a tag and attrs attrs=[]<line_sep>match=tagfind.match(rawdata i+1)<assert_stmt>match 'unexpected call to parse_starttag()'<line_sep>k=match.end()<line_sep>self.lasttag=tag=rawdata[i+1:k].lower()<while_stmt>k<l>endpos<block_start><if_stmt>self.strict<block_start>m=attrfind.match(rawdata k)<block_end><else_stmt><block_start>m=attrfind_tolerant.match(rawdata k)<block_end><if_stmt><not>m<block_start><break><block_end>attrname,rest,attrvalue=m.group(1 2 3)<if_stmt><not>rest<block_start>attrvalue=<none><block_end><elif_stmt>attrvalue[:1]<eq>'\''<eq>attrvalue[-1:]<or>attrvalue[:1]<eq>'"'<eq>attrvalue[-1:]<block_start>attrvalue=attrvalue[1:-1]<block_end><if_stmt>attrvalue<block_start>attrvalue=self.unescape(attrvalue)<block_end>attrs.append((attrname.lower() attrvalue))<line_sep>k=m.end()<block_end>end=rawdata[k:endpos].strip()<if_stmt>end<not><in>(">" "/>")<block_start>lineno,offset=self.getpos()<if_stmt>"\n"<in>self.__starttag_text<block_start>lineno=lineno+self.__starttag_text.count("\n")<line_sep>offset=len(self.__starttag_text)-self.__starttag_text.rfind("\n")<block_end><else_stmt><block_start>offset=offset+len(self.__starttag_text)<block_end><if_stmt>self.strict<block_start>self.error("junk characters in start tag: %r"%(rawdata[k:endpos][:20] ))<block_end>self.handle_data(rawdata[i:endpos])<line_sep><return>endpos<block_end><if_stmt>end.endswith('/>')# XHTML-style empty tag: <span attr="value" /> <block_start>self.handle_startendtag(tag attrs)<block_end><else_stmt><block_start>self.handle_starttag(tag attrs)<if_stmt>tag<in>self.CDATA_CONTENT_ELEMENTS<block_start>self.set_cdata_mode(tag)<block_end><block_end><return>endpos<block_end><def_stmt>set_cdata_mode self elem<block_start>self.cdata_elem=elem.lower()<line_sep>self.interesting=re.compile(r'</\s*%s\s*>'%self.cdata_elem re.I)<block_end>BeautifulSoupHTMLParser.parse_starttag=parse_starttag<line_sep>BeautifulSoupHTMLParser.set_cdata_mode=set_cdata_mode<line_sep>CONSTRUCTOR_TAKES_STRICT=<true><block_end>
#! /usr/bin/env python # Copyright 2019 <NAME> # # This file is part of WarpX. # # License: BSD-3-Clause-LBNL # This script tests the particle pusher (HC) # using a force-free field, # in which position x should remain 0. # An initial velocity Vy corresponding to # Lorentz factor = 20 is used. # Bz is fixed at 1 T. # Ex = -Vy*Bz. # Possible errors: # Boris: 2321.3958529 # Vay: 0.00010467 # HC: 0.00011403 # tolerance: 0.001 # Possible running time: ~ 4.0 s <import_stmt>sys<import_stmt>yt<line_sep>sys.path.insert(1 '../../../../warpx/Regression/Checksum/')<import_stmt>checksumAPI<line_sep>tolerance=0.001<line_sep>filename=sys.argv[1]<line_sep>ds=yt.load(filename)<line_sep>ad=ds.all_data()<line_sep>x=ad['particle_position_x'].to_ndarray()<line_sep>print('error = ' abs(x))<line_sep>print('tolerance = ' tolerance)<assert_stmt>(abs(x)<l>tolerance)<line_sep>test_name=filename[:-9]# Could also be os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name filename)<line_sep>
<import_from_stmt>traceback format_tb<import_stmt>smtplib<import_from_stmt>email.header Header<import_from_stmt>email.mime.text MIMEText<import_from_stmt>email.utils parseaddr formataddr <import_from_stmt>config get_email_args<import_from_stmt>logger other<line_sep>email_args=get_email_args()<line_sep>smtp_server=email_args['server']<line_sep>smtp_port=email_args['port']<line_sep>from_addr=email_args['from']<line_sep>from_password=email_args['password']<line_sep>to_addr=email_args['to']<line_sep>email_subject=email_args['subject']<line_sep>warning_info=email_args['warning_info']<def_stmt>_format_addr nick_addr<block_start>name,addr=parseaddr(nick_addr)<line_sep><return>formataddr((Header(name 'utf-8').encode() addr))<block_end><def_stmt>gen_msg content subject email_from email_to from_nick=<none> to_nick=<none><block_start><if_stmt>from_nick<is><none><block_start>from_nick=email_from<block_end><if_stmt>to_nick<is><none><block_start>to_nick=email_to<block_end>msg=MIMEText(content 'plain' 'utf-8')<line_sep>msg['From']=_format_addr('{} <{}>'.format(from_nick email_from))<line_sep>msg['To']=_format_addr('{} <{}>'.format(to_nick email_to))<line_sep>msg['Subject']=Header(subject).encode()<line_sep><return>msg<block_end><def_stmt>send_email email_from=from_addr email_pass=from_password to_addrs=<none> server=smtp_server port=smtp_port from_nick='weibospider' to_nick='SpiderUser'<block_start><if_stmt>to_addrs<is><none><or>isinstance(to_addrs str)<block_start>to_addrs=[to_addrs]<block_end>msg=gen_msg(warning_info email_subject email_from to_addrs[0] from_nick to_nick)<line_sep>server=smtplib.SMTP(server port)<try_stmt><block_start>server.starttls()<line_sep>server.login(email_from email_pass)<line_sep>rs=server.sendmail(email_from to_addrs msg.as_string())<block_end><except_stmt>Exception<as>e<block_start>other.error('Failed to send emails, {} is raised, here are details:{}'.format(e format_tb(e.__traceback__)[0]))<block_end><else_stmt><block_start><return>rs<block_end><finally_stmt><block_start>server.quit()<block_end><block_end>
<import_stmt>subprocess os<def_stmt>http_client_post ip port url entries={}<block_start><import_stmt>urllib.request urllib.parse json<line_sep>url=url<if><not>url.startswith('/')<else>url[1:]<line_sep>response=urllib.request.urlopen('http://%s:%d/%s'%(ip port url) urllib.parse.urlencode(entries).encode())<line_sep>obj=json.loads(response.read().decode().strip())<line_sep>response.close()<line_sep><return>obj<block_end><class_stmt>case_handler# [Order-by] lexicographic order # curl -L -X POST http://0.0.0.0:1728/v1/minions/list <block_start><def_stmt>minions_list form args<block_start>minions=[]<for_stmt>item args.conn<block_start>minions.append(args.conn[item][1])<block_end><return>{"minions":minions}<block_end># curl -L -X POST -F mem=4096 -F cpu=2 http://0.0.0.0:1728/v1/resource/allocation <def_stmt>resource_allocation form args<block_start>mem=int(form['mem'])<line_sep>cpu=int(form['cpu'])<line_sep>candidates={}<import_from_stmt>daemon.http minion_http_handler<for_stmt>item args.conn<block_start>addr=args.conn[item][1]<line_sep>obj=http_client_post(addr minion_http_handler.http_port '/v1/system/memsw/available')<if_stmt>obj['success']<and>obj['data']['Mbytes']<ge>mem<block_start>candidates[addr]=obj['data']<block_end><block_end><if_stmt>len(candidates)<le>0<block_start><raise>Exception("no minions")<block_end><else_stmt><block_start><import_from_stmt>policy.allocate candidates_selector<line_sep>one=candidates_selector.select(candidates)<line_sep><return>{"recommend":one}<block_end><block_end># curl -L -X POST -F user=docklet http://0.0.0.0:1728/v1/user/live/add <def_stmt>user_live_add form args<block_start><if_stmt><not>os.path.exists('/var/lib/docklet/global/users/%s'%form['user'])<block_start><return><false><block_end>subprocess.getoutput('echo live > /var/lib/docklet/global/users/%s/status'%form['user'])<line_sep><return><true><block_end># curl -L -X POST -F user=docklet http://0.0.0.0:1728/v1/user/live/remove <def_stmt>user_live_remove form args<block_start>subprocess.getoutput('rm -f /var/lib/docklet/global/users/%s/status'%form['user'])<line_sep><return><true><block_end># curl -L -X POST http://0.0.0.0:1728/v1/user/live/list <def_stmt>user_live_list form args<block_start><return>subprocess.getoutput('ls -1 /var/lib/docklet/global/users/*/status 2>/dev/null | awk -F\/ \'{print $(NF-1)\'}').split()<block_end><block_end>
# _*_ coding: utf-8 _*_ __author__='LennonChin'<line_sep>__date__='2017/11/27 23:38'<line_sep>
# terrascript/provider/NetApp/netapp_elementsw.py # Automatically generated by tools/makecode.py (24-Sep-2021 15:22:13 UTC) <import_stmt>terrascript<class_stmt>netapp_elementsw(terrascript.Provider)<block_start>"""Support for Volume, Initiator, Account, and Volume Access Group resources."""<line_sep>__description__=("Support for Volume, Initiator, Account, and Volume Access Group resources.")<line_sep>__namespace__="NetApp"<line_sep>__name__="netapp-elementsw"<line_sep>__source__="https://github.com/NetApp/terraform-provider-netapp-elementsw"<line_sep>__version__="20.11.0"<line_sep>__published__="2020-11-24T20:02:40Z"<line_sep>__tier__="partner"<block_end>__all__=["netapp_elementsw"]<line_sep>
# -*- coding: utf-8 -*- <import_stmt>pandas<as>pd<import_from_stmt>collections Counter<import_from_stmt>itertools chain product<import_from_stmt>operator mul<import_from_stmt>functools reduce<import_from_stmt>math log10<import_from_stmt>functools lru_cache<import_stmt>logging<line_sep>log=logging.getLogger(__name__)<import_from_stmt>fuzzymatcher.utils add_dmetaphones_to_col is_mispelling convert_series_to_dmetaphones tokens_to_dmetaphones<class_stmt>Scorer<block_start>""" A DataPreprocessor is responsible for ingesting df_left (the dataframe containing the records we want to find matches for) and df_right (the dataframe we want to search for potential matches) and applying preprocessing stages like normalisation to make matching easier. """<def_stmt>add_data self matcher<block_start>self.matcher=matcher<line_sep>self._generate_probs()<block_end><def_stmt>get_prob self token field left_right misspelling=<false><block_start>""" Get probability given field and token """<try_stmt><block_start><if_stmt><not>misspelling<and>left_right<eq>"left"<block_start><return>self.left_field_token_probs_dict[field][token]<block_end><if_stmt><not>misspelling<and>left_right<eq>"right"<block_start><return>self.right_field_token_probs_dict[field][token]<block_end><if_stmt>misspelling<and>left_right<eq>"left"<block_start><return>self.left_field_misspelling_probs_dict[field][token]<block_end><if_stmt>misspelling<and>left_right<eq>"right"<block_start><return>self.right_field_misspelling_probs_dict[field][token]<block_end><block_end><except_stmt>KeyError<block_start><return><none><block_end><block_end>@lru_cache(maxsize=int(1e6))<def_stmt>score_match self record_left_id record_right_id<block_start>record_left=self.matcher.left_records[record_left_id]<line_sep>record_right=self.matcher.right_records[record_right_id]<line_sep># Need to find common tokens, and get their probabilities fields_left=record_left.fields<line_sep>prob=1<for_stmt>f_left fields_left<block_start>p=self._field_to_prob(f_left record_left record_right)<line_sep>prob=p<times>prob<block_end>match_score=self.prob_to_score(prob)<line_sep><return>{"match_prob":prob "match_score":match_score "record_right":record_right}<block_end><def_stmt>_field_to_prob self field_left record_left record_right<block_start>field_right=self.matcher.left_to_right_lookup[field_left]<line_sep>tokens_left=set(record_left.clean_token_dict[field_left])<line_sep>tokens_right=set(record_right.clean_token_dict[field_right])<line_sep>matching_tokens=tokens_left.intersection(tokens_right)<line_sep>unmatching_tokens_left=tokens_left.difference(tokens_right)<line_sep>unmatching_tokens_right=tokens_right.difference(tokens_left)<line_sep>prob_matching=self._get_prob_matching(matching_tokens field_right)<line_sep>prob_unmatching1=self._get_prob_unmatching(unmatching_tokens_left tokens_right field_right field_left)<line_sep>prob_unmatching2=self._get_prob_unmatching(unmatching_tokens_right tokens_left field_right field_left)<line_sep>tokens_alt_left=set(record_left.token_misspelling_dict[field_left])<line_sep>tokens_alt_right=set(record_right.token_misspelling_dict[field_right])<line_sep>matching_tokens_alt=tokens_alt_left.intersection(tokens_alt_right)<line_sep>prob_matching_alt=self._get_prob_matching(matching_tokens_alt field_right misspelling=<true>)<line_sep>prob=prob_matching<times>prob_unmatching1<times>prob_unmatching2<times>prob_matching_alt<line_sep><return>prob<block_end><def_stmt>_get_prob_matching self tokens f_right misspelling=<false><block_start>prob=1<for_stmt>t tokens<block_start>p=self.get_prob(t f_right "right" misspelling)<line_sep>prob=p<times>prob<block_end><return>prob<block_end><def_stmt>_get_prob_unmatching self unmatching_tokens record_tokens field_right field_left# If the unmatching token is not a misspelling, then undo its probability <block_start>prob=1<for_stmt>umt unmatching_tokens<block_start><if_stmt><not>self._is_misspelling_of_one(umt record_tokens)<block_start>p=self.get_prob(umt field_right "right")<if_stmt>p<is><none># If this token never appears on the right, how often does it appear on the left <block_start>p=self.get_prob(umt field_left "left")<block_end>prob=p<times>prob<block_end><block_end>prob=Scorer._adjust_prob_towards_one(prob)<line_sep><return>1/prob<block_end><def_stmt>_is_misspelling_of_one self token token_list<block_start><for_stmt>t token_list<block_start><if_stmt>self.matcher.token_comparison.is_mispelling(token t)<block_start><return><true><block_end><block_end><return><false><block_end><def_stmt>get_token_lists_by_field self recordsdict attribute<block_start>token_lists_by_field={}<line_sep>key=next(iter(recordsdict))<line_sep>fields=recordsdict[key].fields<for_stmt>f fields<block_start>token_lists_by_field[f]=[]<block_end><for_stmt>key,this_record recordsdict.items()<block_start><for_stmt>f fields<block_start>tokens=getattr(this_record attribute)[f]<line_sep>token_lists_by_field[f].extend(tokens)<block_end><block_end><return>token_lists_by_field<block_end><def_stmt>field_tokens_to_prob self field_tokens<block_start>ft=field_tokens<for_stmt>key,value ft.items()<block_start>counts=Counter(value)<line_sep>count_sum=sum(counts.values())<line_sep>counts={k:v/count_sum<for>k,v counts.items()}<line_sep>ft[key]=counts<block_end><return>ft<block_end><def_stmt>_generate_probs self<block_start>left_field_tokens=self.get_token_lists_by_field(self.matcher.left_records "clean_token_dict")<line_sep>self.left_field_token_probs_dict=self.field_tokens_to_prob(left_field_tokens)<line_sep>right_field_tokens=self.get_token_lists_by_field(self.matcher.right_records "clean_token_dict")<line_sep>self.right_field_token_probs_dict=self.field_tokens_to_prob(right_field_tokens)<line_sep>left_field_tokens=self.get_token_lists_by_field(self.matcher.left_records "token_misspelling_dict")<line_sep>self.left_field_misspelling_probs_dict=self.field_tokens_to_prob(left_field_tokens)<line_sep>right_field_tokens=self.get_token_lists_by_field(self.matcher.right_records "token_misspelling_dict")<line_sep>self.right_field_misspelling_probs_dict=self.field_tokens_to_prob(right_field_tokens)<block_end>@staticmethod<def_stmt>prob_to_score prob<block_start><return>-(log10(prob))/30<block_end>@staticmethod<def_stmt>_adjust_prob_towards_one initial_prob amount=2<block_start><return>initial_prob<block_end><block_end>
<import_from_stmt>.. Provider<as>InternetProvider<class_stmt>Provider(InternetProvider)<block_start>user_name_formats=('{{last_name_female}}.{{first_name_female}}' '{{last_name_female}}.{{first_name_female}}' '{{last_name_male}}.{{first_name_male}}' '{{last_name_male}}.{{first_name_male}}' '{{first_name_female}}.{{last_name_female}}' '{{first_name_male}}.{{last_name_male}}' '{{first_name}}##' '?{{last_name}}' '?{{last_name}}' '?{{last_name}}' )<line_sep>email_formats=('{{user_name}}@{{free_email_domain}}' )<line_sep>free_email_domains=('zoznam.sk' 'gmail.com' 'centrum.sk' 'post.sk' 'chello.sk' 'pobox.sk' 'szm.sk' 'atlas.sk' 'azet.sk' 'inmail.sk' )<line_sep>tlds=('sk' 'com')<block_end>
<import_stmt>json<import_from_stmt>io TextIOWrapper<import_from_stmt>json JSONDecodeError<import_stmt>requests<import_stmt>os<import_from_stmt>src.logs Logging<class_stmt>Config<block_start><def_stmt>__init__ self log<block_start>self.log=log<if_stmt><not>os.path.exists("config.json")<block_start>self.log("config.json not found, creating new one")<with_stmt>open("config.json" "w")<as>file<block_start>config=self.config_dialog(file)<block_end><block_end><try_stmt><block_start><with_stmt>open("config.json" "r")<as>file<block_start>self.log("config opened")<line_sep>config=json.load(file)<if_stmt>config.get("cooldown")<is><none><block_start>self.log("some config values are None, getting new config")<line_sep>config=self.config_dialog(file)<block_end><if_stmt>config.get("weapon")<eq>""<or>config.get("weapon")<eq><none><block_start>weapon=input("Enter the name of the weapon you use the most (This is for tracking the skins): ").capitalize().strip()<line_sep>self.log(f"User inputted {weapon} as the weapon")<with_stmt>open("config.json" "w")<as>f<block_start><if_stmt><not>self.weapon_check(weapon)<block_start>print(weapon+" is not known valorant weapon you can edit directly "+os.getcwd()+"\config.json\n")<line_sep>config["weapon"]="vandal"<line_sep>json.dump(config f indent=4)<line_sep>self.log("vandal weapon has been added to the config file by default")<block_end><else_stmt><block_start>config["weapon"]=weapon<line_sep>json.dump(config f indent=4)<line_sep>self.log(f"{weapon} weapon has been added to the config file by user")<block_end><block_end><block_end><block_end><block_end><except_stmt>(JSONDecodeError)<block_start>self.log("invalid file")<with_stmt>open("config.json" "w")<as>file<block_start>config=self.config_dialog(file)<block_end><block_end><finally_stmt><block_start>self.cooldown=config["cooldown"]<line_sep>self.log(f"got cooldown with value '{self.cooldown}'")<if_stmt><not>self.weapon_check(config["weapon"])<block_start>self.weapon="vandal"# if the user manually entered a wrong name into the config file, this will be the default until changed by the user. <block_end><else_stmt><block_start>self.weapon=config["weapon"]<block_end><block_end><block_end><def_stmt>config_dialog self fileToWrite:TextIOWrapper<block_start>self.log("color config prompt called")<line_sep>jsonToWrite={"cooldown":1 "weapon":""}<line_sep>json.dump(jsonToWrite fileToWrite)<line_sep><return>jsonToWrite<block_end><def_stmt>weapon_check self name<block_start><if_stmt>name<in>[weapon["displayName"]<for>weapon requests.get("https://valorant-api.com/v1/weapons").json()["data"]]<block_start><return><true><block_end><else_stmt><block_start><return><false><block_end><block_end><block_end>
<import_stmt>spacy<line_sep>nlp=spacy.load("fr_core_news_sm")<line_sep>text="Apple a été créée en 1976 par <NAME>, <NAME> et <NAME>."<line_sep># Traite le texte doc=____<line_sep># Itère sur les entités prédites <for_stmt>ent ____.____# Affiche le texte de l'entité et son label <block_start>print(ent.____ ____.____)<block_end>
<import_from_stmt>sofi.ui Element<def_stmt>test_attrs_to_string_shortcut <block_start>attributes=[('cl' 'class') ('ident' 'id') ]<line_sep>e=Element(cl='container' ident='foo')<line_sep>should_be='class="container" id="foo"'<assert_stmt>e._attrs_to_string(attributes)<eq>should_be<block_end><def_stmt>test_attrs_to_string_empty_shortcut <block_start>attributes=[]<line_sep>e=Element()<line_sep>should_be=''<assert_stmt>e._attrs_to_string(attributes)<eq>should_be<block_end><def_stmt>test_attrs_to_string_default_arguments <block_start>e=Element(cl='container' ident='foo')<line_sep>should_be='class="container" id="foo"'<assert_stmt>e._attrs_to_string()<eq>should_be<block_end><def_stmt>test_attrs_to_string_attribute_with_no_value <block_start>attributes=[('cl' 'class') ('ident' 'id') ('disabled' <none>) ]<line_sep>e=Element(cl='container' ident='foo')<line_sep>should_be='class="container" id="foo" disabled'<assert_stmt>e._attrs_to_string(attributes)<eq>should_be<block_end><def_stmt>test_attrs_to_string_with_nonexistent_attribute <block_start>attributes=[('cl' 'class') ('ident' 'id') ('this_doesnt_exist' 'foo') ]<line_sep>e=Element(cl='container' ident='foo')<line_sep>should_be='class="container" id="foo"'<assert_stmt>e._attrs_to_string(attributes)<eq>should_be<block_end>
<import_from_future_stmt> absolute_import print_function unicode_literals<import_from_stmt>builtins dict str<import_from_stmt>indra.statements *<import_from_stmt>indra.assemblers.cyjs CyJSAssembler<line_sep>mek=Agent('MAP2K1' db_refs={'HGNC':'6840' 'TEXT':'mek1'})<line_sep>erk=Agent('MAPK1' db_refs={'UP':'P28482'})<line_sep>dusp=Agent('DUSP4')<line_sep>st_phos=Phosphorylation(mek erk)<line_sep>st_phos_Y=Phosphorylation(mek erk residue='Y')<line_sep>st_phos_T=Phosphorylation(mek erk residue='T')<line_sep>st_dephos=Dephosphorylation(dusp erk)<line_sep>st_complex=Complex([mek erk dusp])<line_sep>st_act=Activation(mek erk)<line_sep>st_gef=Gef(Agent('SOS1') Agent('HRAS'))<line_sep>st_gap=Gap(Agent('RASA1') Agent('HRAS'))<line_sep>st_incamount=IncreaseAmount(Agent('TP53') Agent('MDM2'))<line_sep>st_decamount=DecreaseAmount(Agent('MDM2') Agent('TP53'))<line_sep>st_act2=Inhibition(dusp erk)<line_sep>st_cited=Phosphorylation(mek erk evidence=Evidence(pmid='12345' text='MEK phosphorylates ERK'))<line_sep>st_cited2=Phosphorylation(mek erk evidence=Evidence(pmid='api35' text='MEK phosphorylates ERK'))<line_sep>st_selfmod=Autophosphorylation(Agent('AKT1') 'S' '473')<def_stmt>test_act <block_start>cja=CyJSAssembler()<line_sep>cja.add_statements([st_act st_act2])<line_sep>cja.make_model()<assert_stmt>len(cja._nodes)<eq>3<assert_stmt>len(cja._edges)<eq>2<line_sep>polarities=[edge['data']['polarity']<for>edge cja._edges]<assert_stmt>len(set(polarities))<eq>2<assert_stmt>'positive'<in>polarities<assert_stmt>'negative'<in>polarities<line_sep>db_refs=[node['data']['db_refs']<for>node cja._nodes]<for_stmt>node,refs zip(cja._nodes db_refs)<block_start><if_stmt>node['data']['name']<eq>'MAP2K1'<block_start><assert_stmt>refs.get('HGNC')<eq>'https://identifiers.org/hgnc:6840' refs<assert_stmt>refs.get('TEXT')<eq>'mek1' refs<block_end><if_stmt>node['data']['name']<eq>'MAPK1'<block_start><assert_stmt>refs.get('UniProt')<block_end><if_stmt>node['data']['name']<eq>'DUSP4'<block_start><assert_stmt><not>refs<block_end><block_end><block_end><def_stmt>test_regamount <block_start>cja=CyJSAssembler()<line_sep>cja.add_statements([st_incamount st_decamount])<line_sep>cja.make_model()<assert_stmt>len(cja._nodes)<eq>2<assert_stmt>len(cja._edges)<eq>2<line_sep>polarities=[edge['data']['polarity']<for>edge cja._edges]<assert_stmt>len(set(polarities))<eq>2<assert_stmt>'positive'<in>polarities<assert_stmt>'negative'<in>polarities<block_end><def_stmt>test_ras <block_start>cja=CyJSAssembler()<line_sep>cja.add_statements([st_gef st_gap])<line_sep>cja.make_model()<assert_stmt>len(cja._nodes)<eq>3<assert_stmt>len(cja._edges)<eq>2<line_sep>polarities=[edge['data']['polarity']<for>edge cja._edges]<assert_stmt>len(set(polarities))<eq>2<assert_stmt>'positive'<in>polarities<assert_stmt>'negative'<in>polarities<block_end><def_stmt>test_selfmod <block_start>cja=CyJSAssembler()<line_sep>cja.add_statements([st_selfmod])<line_sep>cja.make_model()<assert_stmt>len(cja._nodes)<eq>1<assert_stmt>len(cja._edges)<eq>1<line_sep>polarities=[edge['data']['polarity']<for>edge cja._edges]<assert_stmt>len(polarities)<eq>1<assert_stmt>polarities[0]<eq>'positive'<block_end><def_stmt>test_complex <block_start>cja=CyJSAssembler()<line_sep>cja.add_statements([st_complex])<line_sep>cja.make_model()<assert_stmt>len(cja._nodes)<eq>3<assert_stmt>len(cja._edges)<eq>3<line_sep>polarities=[edge['data']['polarity']<for>edge cja._edges]<assert_stmt>len(set(polarities))<eq>1<assert_stmt>'none'<in>polarities<block_end><def_stmt>test_print_cyjs_graph <block_start>cja=CyJSAssembler()<line_sep>cja.add_statements([st_act st_act2])<line_sep>cja.make_model()<line_sep>cyjs_str=cja.print_cyjs_graph()<line_sep># assert output is not empty <assert_stmt>len(cyjs_str)<g>len('{\n "edges": [],\n "nodes": []\n}')<block_end><def_stmt>test_no_grouping <block_start>st1=Phosphorylation(Agent('A') Agent('B'))<line_sep>st2=Phosphorylation(Agent('A') Agent('C'))<line_sep>st3=Phosphorylation(Agent('C') Agent('B'))<line_sep>cja=CyJSAssembler()<line_sep>cja.add_statements([st1 st2 st3])<line_sep>cja.make_model(grouping=<true>)<line_sep>parents=[node['data']['parent']<for>node cja._nodes]<for_stmt>parent parents<block_start><assert_stmt>parent<eq>''<block_end><block_end><def_stmt>test_grouping_block_targeting_node <block_start>st1=Phosphorylation(Agent('A') Agent('B'))<line_sep>st2=Phosphorylation(Agent('C') Agent('B'))<line_sep>cja=CyJSAssembler()<line_sep>cja.add_statements([st1 st2])<line_sep>cja.make_model(grouping=<true>)<for_stmt>node cja._nodes<block_start><if_stmt>node['data']['name']<eq>'A'<block_start>parent_a=node['data']['parent']<block_end><if_stmt>node['data']['name']<eq>'B'<block_start>parent_b=node['data']['parent']<assert_stmt>parent_b<eq>''<block_end><if_stmt>node['data']['name']<eq>'C'<block_start>parent_c=node['data']['parent']<block_end><block_end>assert_element_properties(cja)<assert_stmt>parent_a<eq>parent_c<line_sep>parent_a_name=[x['data']['name']<for>x cja._nodes<if>x['data']['id']<eq>parent_a][0]<assert_stmt>parent_a_name.startswith('Group')<assert_stmt>len(cja._edges)<eq>3<line_sep>virtual_edges=[x<for>x cja._edges<if>x['data']['i']<eq>'Virtual']<assert_stmt>len(virtual_edges)<eq>2<line_sep>real_edges=[x<for>x cja._edges<if>x['data']['i']<ne>'Virtual']<assert_stmt>len(real_edges)<eq>1<block_end><def_stmt>test_grouping_node_targeting_block <block_start>st1=Phosphorylation(Agent('A') Agent('B'))<line_sep>st2=Phosphorylation(Agent('A') Agent('C'))<line_sep>cja=CyJSAssembler()<line_sep>cja.add_statements([st1 st2])<line_sep>cja.make_model(grouping=<true>)<for_stmt>node cja._nodes<block_start><if_stmt>node['data']['name']<eq>'A'<block_start>parent_a=node['data']['parent']<assert_stmt>parent_a<eq>''<block_end><if_stmt>node['data']['name']<eq>'B'<block_start>parent_b=node['data']['parent']<block_end><if_stmt>node['data']['name']<eq>'C'<block_start>parent_c=node['data']['parent']<block_end><block_end>assert_element_properties(cja)<assert_stmt>parent_b<eq>parent_c<line_sep>parent_b_name=[x['data']['name']<for>x cja._nodes<if>x['data']['id']<eq>parent_b][0]<assert_stmt>parent_b_name.startswith('Group')<assert_stmt>len(cja._edges)<eq>3<line_sep>virtual_edges=[x<for>x cja._edges<if>x['data']['i']<eq>'Virtual']<assert_stmt>len(virtual_edges)<eq>2<line_sep>real_edges=[x<for>x cja._edges<if>x['data']['i']<ne>'Virtual']<assert_stmt>len(real_edges)<eq>1<block_end><def_stmt>test_grouping_node_targeting_block_targeting_node <block_start>st1=Phosphorylation(Agent('A') Agent('B'))<line_sep>st2=Phosphorylation(Agent('A') Agent('C'))<line_sep>st3=Phosphorylation(Agent('B') Agent('D'))<line_sep>st4=Phosphorylation(Agent('C') Agent('D'))<line_sep>cja=CyJSAssembler()<line_sep>cja.add_statements([st1 st2 st3 st4])<line_sep>cja.make_model(grouping=<true>)<for_stmt>node cja._nodes<block_start><if_stmt>node['data']['name']<eq>'A'<block_start>parent_a=node['data']['parent']<assert_stmt>parent_a<eq>''<block_end><if_stmt>node['data']['name']<eq>'B'<block_start>parent_b=node['data']['parent']<block_end><if_stmt>node['data']['name']<eq>'C'<block_start>parent_c=node['data']['parent']<block_end><if_stmt>node['data']['name']<eq>'D'<block_start>parent_d=node['data']['parent']<assert_stmt>parent_d<eq>''<block_end><block_end>assert_element_properties(cja)<assert_stmt>parent_b<eq>parent_c<line_sep>parent_b_name=[x['data']['name']<for>x cja._nodes<if>x['data']['id']<eq>parent_b][0]<assert_stmt>parent_b_name.startswith('Group')<assert_stmt>len(cja._edges)<eq>6<line_sep>virtual_edges=[x<for>x cja._edges<if>x['data']['i']<eq>'Virtual']<assert_stmt>len(virtual_edges)<eq>4<line_sep>real_edges=[x<for>x cja._edges<if>x['data']['i']<ne>'Virtual']<assert_stmt>len(real_edges)<eq>2<block_end><def_stmt>test_grouping_block_targeting_block <block_start>st1=Phosphorylation(Agent('A') Agent('B'))<line_sep>st2=Phosphorylation(Agent('A') Agent('C'))<line_sep>st3=Phosphorylation(Agent('D') Agent('B'))<line_sep>st4=Phosphorylation(Agent('D') Agent('C'))<line_sep>cja=CyJSAssembler()<line_sep>cja.add_statements([st1 st2 st3 st4])<line_sep>cja.make_model(grouping=<true>)<for_stmt>node cja._nodes<block_start><if_stmt>node['data']['name']<eq>'A'<block_start>parent_a=node['data']['parent']<block_end><if_stmt>node['data']['name']<eq>'B'<block_start>parent_b=node['data']['parent']<block_end><if_stmt>node['data']['name']<eq>'C'<block_start>parent_c=node['data']['parent']<block_end><if_stmt>node['data']['name']<eq>'D'<block_start>parent_d=node['data']['parent']<block_end><block_end>assert_element_properties(cja)<assert_stmt>parent_b<eq>parent_c<assert_stmt>parent_a<eq>parent_d<line_sep>parent_b_name=[x['data']['name']<for>x cja._nodes<if>x['data']['id']<eq>parent_b][0]<line_sep>parent_a_name=[x['data']['name']<for>x cja._nodes<if>x['data']['id']<eq>parent_a][0]<assert_stmt>parent_b_name.startswith('Group')<assert_stmt>parent_a_name.startswith('Group')<assert_stmt>len(cja._edges)<eq>5<line_sep>virtual_edges=[x<for>x cja._edges<if>x['data']['i']<eq>'Virtual']<assert_stmt>len(virtual_edges)<eq>4<line_sep>real_edges=[x<for>x cja._edges<if>x['data']['i']<ne>'Virtual']<assert_stmt>len(real_edges)<eq>1<block_end><def_stmt>test_edge_aggregation_between_nongroup_nodes <block_start>cja=CyJSAssembler()<line_sep>cja.add_statements([st_phos_Y st_phos_T])<line_sep>cja.make_model(grouping=<false>)<assert_stmt>len(cja._nodes)<eq>2<assert_stmt>len(cja._edges)<eq>1<for_stmt>edge cja._edges<block_start><assert_stmt>len(edge['data']['uuid_list'])<eq>2<block_end><for_stmt>node cja._nodes<block_start><assert_stmt>len(node['data']['uuid_list'])<eq>2<block_end>cja=CyJSAssembler()<line_sep>cja.add_statements([st_phos_Y st_phos_T])<line_sep>cja.make_model(grouping=<true>)<assert_stmt>len(cja._nodes)<eq>2<assert_stmt>len(cja._edges)<eq>1<for_stmt>edge cja._edges<block_start><assert_stmt>len(edge['data']['uuid_list'])<eq>2<block_end><for_stmt>node cja._nodes<block_start><assert_stmt>len(node['data']['uuid_list'])<eq>2<block_end><block_end><def_stmt>assert_element_properties cja# each element needs an id <block_start>elements=([n<for>n cja._nodes]+[e<for>e cja._edges])<for_stmt>element elements<block_start><assert_stmt>element['data']['id']<is><not><none> "Element ID is none"<assert_stmt>element['data']['id']<ne>'' "Element ID is blank string!"<line_sep># each element should also have a list of uuids with at least one uuid <assert_stmt>element['data']['uuid_list']<is><not><none> "uuid_list is None"<assert_stmt>len(element['data']['uuid_list'])<ge>1 "uuid_list is empty!"<for_stmt>uuid element['data']['uuid_list']<block_start><assert_stmt>type(uuid)<eq>type('abc') str(uuid)+' is not a string'<block_end><block_end><block_end>
# # This file is part of pyasn1 software. # # Copyright (c) 2005-2017, <NAME> <<EMAIL>> # License: http://snmplabs.com/pyasn1/license.html # <try_stmt><block_start><import_stmt>unittest2<as>unittest<block_end><except_stmt>ImportError<block_start><import_stmt>unittest<block_end>suite=unittest.TestLoader().loadTestsFromNames(['tests.codec.cer.test_encoder.suite' 'tests.codec.cer.test_decoder.suite'])<if_stmt>__name__<eq>'__main__'<block_start>unittest.TextTestRunner(verbosity=2).run(suite)<block_end>
<import_stmt>tensorflow<as>tf<line_sep># basic tf.app.flags.DEFINE_string('action' 'train' 'Action to take')<line_sep>tf.app.flags.DEFINE_string('working_dir' '' 'Directory for saving checkpoints and log files')<line_sep>tf.app.flags.DEFINE_string('log_file_prefix' 'fctd_' 'Prefix of logging file name')<line_sep># FCTD model tf.app.flags.DEFINE_integer('pos_label' 1 'Label for the background class')<line_sep>tf.app.flags.DEFINE_integer('neg_label' 0 'Label for the background class')<line_sep>tf.app.flags.DEFINE_float('fctd_min_scale' 0.1 'Minimum region size')<line_sep>tf.app.flags.DEFINE_float('fctd_max_scale' 0.95 'Maximum region size')<line_sep>tf.app.flags.DEFINE_float('pos_scale_diff_threshold' 1.7 '')<line_sep>tf.app.flags.DEFINE_float('neg_scale_diff_threshold' 2.0 '')<line_sep>tf.app.flags.DEFINE_integer('fctd_n_scale' 6 'Number of region scales')<line_sep>tf.app.flags.DEFINE_integer('n_local_links' 8 'Number of links of a grid node')<line_sep>tf.app.flags.DEFINE_integer('n_cross_links' 4 'Number of cross-layer links on each node')<line_sep>tf.app.flags.DEFINE_string('link_clf_mode' 'softmax' 'Mode of classifying local links. Can be softmax or sigmoid')<line_sep># testing tf.app.flags.DEFINE_integer('test_period' 5000 'Period of on-the-fly testing')<line_sep>tf.app.flags.DEFINE_string('test_model_path' '' 'Test model path')<line_sep>tf.app.flags.DEFINE_string('test_record_path' '' 'Test tf-records path')<line_sep>tf.app.flags.DEFINE_integer('num_test' 500 'Number of test images')<line_sep>tf.app.flags.DEFINE_float('node_threshold' 0.5 'Confidence threshold for nodes')<line_sep>tf.app.flags.DEFINE_float('link_threshold' 0.5 'Confidence threshold for links')<line_sep>tf.app.flags.DEFINE_integer('nms_top_k' 400 'Apply NMS only to examples with top-k scores on each class')<line_sep>tf.app.flags.DEFINE_integer('keep_top_k' 200 'Keep examples with top-k scores after NMS')<line_sep>tf.app.flags.DEFINE_integer('save_visualization' 0 'Save visualization results')<line_sep>tf.app.flags.DEFINE_string('result_format' 'icdar_2015_inc' 'Result file format')<line_sep>tf.app.flags.DEFINE_float('bbox_scale_factor' 0 'Bounding box scale trick')<line_sep># summaries and checkpoints tf.app.flags.DEFINE_integer('brief_summary_period' 10 'Period for brief summaries')<line_sep>tf.app.flags.DEFINE_integer('detailed_summary_period' 500 'Period for detailed summaries')<line_sep>tf.app.flags.DEFINE_integer('checkpoint_period' 5000 'Period for saving checkpoints')<line_sep>
"""The Fil memory profiler."""<line_sep>__all__=["__version__"]<line_sep># If we're running with Fil preloaded, after forks make sure Fil is no longer # enabled, since we don't yet support child processes. This is also done in C # code; doing it only in Python or only C doesn't seem to work. <import_stmt>sys<import_stmt>os<if_stmt>sys.version_info[:2]<g>(3 6)# register_at_fork only works in Python 3.6 or later. <block_start><if_stmt>os.getenv("__FIL_STATUS")<in>("api" "program")<block_start><def_stmt>unset _os=os<block_start>_os.environ["__FIL_STATUS"]="subprocess"<block_end>os.register_at_fork(after_in_child=unset)<del_stmt>unset<block_end><block_end># Fallback mechanism for detecting forks, for Python 3.6 or if someone isn't # doing fork()-without-exec() right (i.e. not calling the C API postfork()): _original_pid=os.getpid()<del_stmt>sys os<try_stmt><block_start><import_from_stmt>._version version<as>__version__<block_end><except_stmt>ImportError# package is not installed <block_start><try_stmt><block_start><import_from_stmt>importlib.metadata version PackageNotFoundError<try_stmt><block_start>__version__=version(__name__)<block_end><except_stmt>PackageNotFoundError<block_start>__version__="unknown"<block_end><block_end><except_stmt>ImportError# Python 3.6 doesn't have importlib.metadata: <block_start>__version__="unknown"<block_end><block_end><def_stmt>load_ipython_extension ipython<block_start>"""Load our IPython magic."""<import_from_stmt>IPython.core.error UsageError<import_stmt>os<if_stmt>os.environ.get("__FIL_STATUS")<ne>"api"<block_start><raise>UsageError("In order to use Fil, you need to run your notebook with the Fil kernel.\n\n"<concat>"You can change the kernel via the 'Change Kernel' option at the bottom of "<concat>"the Kernel menu in Jupyter.")<block_end><import_from_stmt>._ipython FilMagics<line_sep>ipython.register_magics(FilMagics)<block_end>
<import_stmt>os<import_stmt>time<import_from_stmt>datetime datetime<import_stmt>boto3<import_from_stmt>botocore.exceptions ClientError<import_from_stmt>boto3.dynamodb.conditions Key<import_from_stmt>aws_lambda_powertools.logging Logger<line_sep>logger=Logger()<import_stmt>common.get_config<as>get_config<def_stmt>check_regions_to_deploy package requirements_hash regions<block_start>""" Args: package: Name of package to deploy requirements_hash: Hash of requirements.txt file regions: Total regions configured to deployed return: regions_to_deploy(list): Regions where latest package doesn't match requirements_hash provided """<line_sep>table_name=os.environ["DB_NAME"]<line_sep>dynamodb=boto3.resource("dynamodb")<line_sep>table=dynamodb.Table(table_name)<line_sep>response=table.query(IndexName="package_global" KeyConditionExpression=Key("pckg").eq(package)&Key("dplySts").eq("latest") )<line_sep># check if there are any region in regions that aren't deployed regions_deployed=[item["rgn"]<for>item response["Items"]]<line_sep>regions_to_deploy=[region<for>region regions<if>region<not><in>regions_deployed]<line_sep>logger.info({"message":f"Deploying to {len(regions_to_deploy)} new regions" "new_regions":regions_to_deploy "regions_deployed":regions_deployed })<line_sep># for all deployed regions, check if it has the latest version <for_stmt>item response["Items"]<block_start><if_stmt>item["rqrmntsHsh"]<ne>requirements_hash<block_start><if_stmt>item["rgn"]<in>regions<block_start>regions_to_deploy.append(item["rgn"])<block_end><block_end><block_end># deduplicate regions_to_deploy=list(set(regions_to_deploy))<line_sep>logger.info({"regions_to_deploy":regions_to_deploy})<line_sep><return>regions_to_deploy<block_end><def_stmt>download_artifact package_artifact<block_start>""" Downloads s3://bucket_name/package_artifact to /tmp directory """<line_sep>bucket_name=os.environ["BUCKET_NAME"]<line_sep>s3=boto3.resource("s3")<line_sep>s3.meta.client.download_file(bucket_name package_artifact f"/tmp/{package_artifact}")<with_stmt>open(f"/tmp/{package_artifact}" "rb")<as>zip_file<block_start>zip_binary=zip_file.read()<block_end>logger.info(f"Package {package_artifact} downloaded")<line_sep><return>zip_binary<block_end><def_stmt>get_requirements_txt package<block_start>""" Args: package: Name of package to query for return: requirements_txt (str): Requirements.txt of the package, or "null" ot not present """<line_sep>build_v0="bldVrsn0#"<line_sep>sk=f"pckg#{package}"<line_sep>client=boto3.client("dynamodb")<line_sep>table_name=os.environ["DB_NAME"]<line_sep>response=client.get_item(TableName=table_name Key={"pk":{"S":build_v0} "sk":{"S":sk}} )<line_sep>logger.info({"query_requirements":response})<line_sep>requirements_txt=response.get("Item" {}).get("rqrmntsTxt" {}).get("S" "null")<line_sep><return>requirements_txt<block_end>@logger.inject_lambda_context<def_stmt>main event context<block_start>regions=get_config.get_aws_regions()<line_sep>package=event["package"]<line_sep>version=event["version"]<line_sep>build_flag=event["build_flag"]<line_sep>package_artifact=event["zip_file"]<line_sep>requirements_hash=event["requirements_hash"]<line_sep>license_info=event["license_info"]<line_sep>table_name=os.environ["DB_NAME"]<line_sep>expiry_days=int(os.environ["EXPIRY_DAYS"])<line_sep>dynamo_client=boto3.client("dynamodb")<line_sep>deployed_flag=<false><line_sep># Check if need to deploy regions_to_deploy=check_regions_to_deploy(package requirements_hash regions)<if_stmt>len(regions_to_deploy)<eq>0<block_start>logger.info({"message":"No new regions to deploy to, terminating!"})<line_sep><return>{"deployed_flag":deployed_flag "build_flag":build_flag "package":package "version":version "requirements_hash":requirements_hash }<block_end>logger.info({"message":"Regions to deploy" "regions_to_deploy":regions_to_deploy})<line_sep># Download Lambda Artifact layer_name=f"{os.environ['LAMBDA_PREFIX']}{package}"<line_sep>zip_binary=download_artifact(package_artifact)<line_sep># Get requirements txt requirements_txt=get_requirements_txt(package)<for_stmt>region regions_to_deploy# Publish Layer Version <block_start>logger.info({"message":"Deploying" "region":region "package":package})<line_sep>lambda_client=boto3.client("lambda" region_name=region)<line_sep>response=lambda_client.publish_layer_version(LayerName=layer_name Description=f"{package}=={version} | {requirements_hash}" Content={"ZipFile":zip_binary} CompatibleRuntimes=["python3.6" "python3.7" "python3.8"] LicenseInfo=license_info )<line_sep>layer_version_arn=response["LayerVersionArn"]<line_sep>layer_version_created_date=datetime.utcnow().isoformat()<line_sep>layer_version=int(layer_version_arn.split(":")[-1])<line_sep># Make Layer Publicly accessible logger.info({"message":"Making Public" "region":region "package":package "arn":layer_version_arn "created_date":layer_version_created_date })<line_sep>lambda_client.add_layer_version_permission(LayerName=layer_name VersionNumber=layer_version StatementId="make_public" Action="lambda:GetLayerVersion" Principal="*" )<line_sep># Insert new entry into DynamoDB logger.info({"message":"Inserting to table" "region":region "package":package "arn":layer_version_arn })<line_sep>pk=f"lyr#{region}.{package}"<line_sep>sk_v0="lyrVrsn0#"<line_sep>sk=f"lyrVrsn#v{layer_version}"<line_sep>sk_previous=f"lyrVrsn#v{layer_version-1}"<line_sep>dynamo_client.transact_write_items(TransactItems=[{"Update":{"TableName":table_name "Key":{"pk":{"S":pk} "sk":{"S":sk_v0} } "UpdateExpression":"set "<concat>"rqrmntsTxt = :rqrmntsTxt, "<concat>"pckgVrsn = :pckgVrsn, "<concat>"rqrmntsHsh = :rqrmntsHsh,"<concat>"arn = :arn,"<concat>"crtdDt = :crtdDt,"<concat>"lyrVrsn = :lyrVrsn" "ExpressionAttributeValues":{":rqrmntsTxt":{"S":requirements_txt} ":crtdDt":{"S":layer_version_created_date} ":pckgVrsn":{"S":version} ":rqrmntsHsh":{"S":requirements_hash} ":arn":{"S":layer_version_arn} ":lyrVrsn":{"N":str(layer_version)} } # Allow update only if # Current lyrVrsn is less than updated value # or lyrVrsn doesn't exists "ConditionExpression":"lyrVrsn <= :lyrVrsn OR attribute_not_exists(lyrVrsn)" }} {"Put":{"TableName":table_name "Item":{"pk":{"S":pk} "sk":{"S":sk} "pckgVrsn":{"S":version} "crtdDt":{"S":layer_version_created_date} "rqrmntsTxt":{"S":requirements_txt} "rqrmntsHsh":{"S":requirements_hash} "arn":{"S":layer_version_arn} "pckg":{"S":package} "rgn":{"S":region} "dplySts":{"S":"latest"} "lyrVrsn":{"N":str(layer_version)} } }} ])<if_stmt>layer_version<g>1<block_start>logger.info({"message":"Updating Previous Version" "region":region "package":package "arn":layer_version_arn })<try_stmt><block_start>dynamo_client.update_item(TableName=table_name Key={"pk":{"S":pk} "sk":{"S":sk_previous}} UpdateExpression="set "<concat>"dplySts = :dplySts, "<concat>"exDt = :exDt" ExpressionAttributeValues={":dplySts":{"S":"deprecated"} ":exDt":{"N":str(int(time.time()+24<times>3600<times>expiry_days))} } ConditionExpression="attribute_exists(sk)" )<block_end><except_stmt>ClientError<as>e<block_start><if_stmt>e.response["Error"]["Code"]<eq>"ConditionalCheckFailedException"<block_start>logger.warning({"message":"Conditional Check failed" "layer_version":layer_version "sk":sk_previous })<block_end><block_end><block_end>deployed_flag=<true><block_end><return>{"deployed_to":regions_to_deploy "deployed_flag":deployed_flag "build_flag":build_flag "package":package "version":version "requirements_hash":requirements_hash }<block_end>
<import_from_stmt>torch nn<import_from_stmt>torch.nn Sequential<as>Seq Linear<as>Lin<line_sep>############################## # Basic layers ############################## <def_stmt>act_layer act_type inplace=<false> neg_slope=0.2 n_prelu=1<block_start>""" helper selecting activation :param act_type: :param inplace: :param neg_slope: :param n_prelu: :return: """<line_sep>act_type=act_type.lower()<if_stmt>act_type<eq>'relu'<block_start>layer=nn.ReLU(inplace)<block_end><elif_stmt>act_type<eq>'leakyrelu'<block_start>layer=nn.LeakyReLU(neg_slope inplace)<block_end><elif_stmt>act_type<eq>'prelu'<block_start>layer=nn.PReLU(num_parameters=n_prelu init=neg_slope)<block_end><else_stmt><block_start><raise>NotImplementedError('activation layer [%s] is not found'%act_type)<block_end><return>layer<block_end><def_stmt>norm_layer norm_type nc# helper selecting normalization layer <block_start>norm_type=norm_type.lower()<if_stmt>norm_type<eq>'batch'<block_start>layer=nn.BatchNorm1d(nc affine=<true>)<block_end><elif_stmt>norm_type<eq>'instance'<block_start>layer=nn.InstanceNorm1d(nc affine=<false>)<block_end><else_stmt><block_start><raise>NotImplementedError('normalization layer [%s] is not found'%norm_type)<block_end><return>layer<block_end><class_stmt>MultiSeq(Seq)<block_start><def_stmt>__init__ self *args<block_start>super(MultiSeq self).__init__(*args)<block_end><def_stmt>forward self *inputs<block_start><for_stmt>module self._modules.values()<block_start><if_stmt>type(inputs)<eq>tuple<block_start>inputs=module(*inputs)<block_end><else_stmt><block_start>inputs=module(inputs)<block_end><block_end><return>inputs<block_end><block_end><class_stmt>MLP(Seq)<block_start><def_stmt>__init__ self channels act_type='relu' norm_type=<none> bias=<true><block_start>m=[]<for_stmt>i range(1 len(channels))<block_start>m.append(Lin(channels[i-1] channels[i] bias))<if_stmt>act_type<block_start>m.append(act_layer(act_type))<block_end><if_stmt>norm_type<block_start>m.append(norm_layer(norm_type channels[-1]))<block_end><block_end>super(MLP self).__init__(*m)<block_end><block_end>
# coding: utf-8 <import_stmt>lxml.html<import_from_stmt>.abstract get_strategy<class_stmt>TestTags<block_start><def_stmt>test_adding_tags_for_page self# Given <block_start>strategy=get_strategy({'start_urls':[{'url':'http://foo.bar/api' 'tags':["test"]}]})<line_sep>strategy.dom=lxml.html.fromstring(""" <html><body> <h1>Foo</h1> </body></html> """)<line_sep># When actual=strategy.get_records_from_dom("http://foo.bar/api")<line_sep># Then <assert_stmt>actual[0]['tags']<eq>["test"]<block_end><def_stmt>test_adding_tags_for_subpage self# Given <block_start>strategy=get_strategy({'start_urls':[{'url':'http://foo.bar/api' 'tags':["test"]}]})<line_sep>strategy.dom=lxml.html.fromstring(""" <html><body> <h1>Foo</h1> </body></html> """)<line_sep># When actual=strategy.get_records_from_dom("http://foo.bar/api/test")<line_sep># Then <assert_stmt>actual[0]['tags']<eq>["test"]<block_end><def_stmt>test_regex_start_urls self# Given # Stub ENV variables read by ConfigLoader <block_start>strategy=get_strategy({'start_urls':[{'url':'http://foo.bar/.*' 'tags':["test"]}]})<line_sep>strategy.dom=lxml.html.fromstring(""" <html><body> <h1>Foo</h1> </body></html> """)<line_sep># When actual=strategy.get_records_from_dom("http://foo.bar/api/test")<line_sep># Then <assert_stmt>actual[0]['tags']<eq>["test"]<block_end><block_end>
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. <import_from_stmt>telemetry.page page<as>page_module<import_from_stmt>telemetry story<class_stmt>BasicPlayPage(page_module.Page)<block_start><def_stmt>__init__ self url page_set name=''<block_start>super(BasicPlayPage self).__init__(url=url page_set=page_set name=name)<line_sep>self.add_browser_metrics=<true><block_end><def_stmt>PlayAction self action_runner<block_start>action_runner.PlayMedia(playing_event_timeout_in_seconds=60 ended_event_timeout_in_seconds=60)<block_end><def_stmt>RunPageInteractions self action_runner<block_start>self.PlayAction(action_runner)<block_end><def_stmt>SeekBeforeAndAfterPlayhead self action_runner<block_start>action_runner.PlayMedia(playing_event_timeout_in_seconds=60)<line_sep># Wait for 1 second so that we know the play-head is at ~1s. action_runner.Wait(1)<line_sep># Seek to before the play-head location. action_runner.SeekMedia(seconds=0.5 timeout_in_seconds=60 label='seek_warm')<line_sep># Seek to after the play-head location. action_runner.SeekMedia(seconds=15 timeout_in_seconds=60 label='seek_cold')<block_end><block_end><class_stmt>SeekBeforeAndAfterPlayheadPage(BasicPlayPage)<block_start><def_stmt>__init__ self url page_set name<block_start>super(SeekBeforeAndAfterPlayheadPage self).__init__(url=url page_set=page_set name=name)<line_sep>self.add_browser_metrics=<false><block_end><def_stmt>RunPageInteractions self action_runner<block_start>self.SeekBeforeAndAfterPlayhead(action_runner)<block_end><block_end><class_stmt>MediaCnsCasesPageSet(story.StorySet)<block_start>""" Media benchmark on network constrained conditions. """<def_stmt>__init__ self<block_start>super(MediaCnsCasesPageSet self).__init__()<line_sep>urls_list=[# pylint: disable=line-too-long 'file://tough_video_cases/video.html?id=no_constraints_webm&src=tulip2.webm&net=none' # pylint: disable=line-too-long 'file://tough_video_cases/video.html?id=cable_webm&src=tulip2.webm&net=cable' # pylint: disable=line-too-long 'file://tough_video_cases/video.html?id=wifi_webm&src=tulip2.webm&net=wifi' # pylint: disable=line-too-long 'file://tough_video_cases/video.html?id=no_constraints_ogv&src=tulip2.ogv&net=none' # pylint: disable=line-too-long 'file://tough_video_cases/video.html?id=cable_ogv&src=tulip2.ogv&net=cable' # pylint: disable=line-too-long 'file://tough_video_cases/video.html?id=wifi_ogv&src=tulip2.ogv&net=wifi' # pylint: disable=line-too-long 'file://tough_video_cases/video.html?id=no_constraints_mp4&src=tulip2.mp4&net=none' # pylint: disable=line-too-long 'file://tough_video_cases/video.html?id=cable_mp4&src=tulip2.mp4&net=cable' # pylint: disable=line-too-long 'file://tough_video_cases/video.html?id=wifi_mp4&src=tulip2.mp4&net=wifi' # pylint: disable=line-too-long 'file://tough_video_cases/video.html?id=no_constraints_wav&src=tulip2.wav&type=audio&net=none' # pylint: disable=line-too-long 'file://tough_video_cases/video.html?id=cable_wav&src=tulip2.wav&type=audio&net=cable' # pylint: disable=line-too-long 'file://tough_video_cases/video.html?id=wifi_wav&src=tulip2.wav&type=audio&net=wifi' # pylint: disable=line-too-long 'file://tough_video_cases/video.html?id=no_constraints_ogg&src=tulip2.ogg&type=audio&net=none' # pylint: disable=line-too-long 'file://tough_video_cases/video.html?id=cable_ogg&src=tulip2.ogg&type=audio&net=cable' # pylint: disable=line-too-long 'file://tough_video_cases/video.html?id=wifi_ogg&src=tulip2.ogg&type=audio&net=wifi' # pylint: disable=line-too-long 'file://tough_video_cases/video.html?id=no_constraints_mp3&src=tulip2.mp3&type=audio&net=none' # pylint: disable=line-too-long 'file://tough_video_cases/video.html?id=cable_mp3&src=tulip2.mp3&type=audio&net=cable' # pylint: disable=line-too-long 'file://tough_video_cases/video.html?id=wifi_mp3&src=tulip2.mp3&type=audio&net=wifi' # pylint: disable=line-too-long 'file://tough_video_cases/video.html?id=no_constraints_m4a&src=tulip2.m4a&type=audio&net=none' # pylint: disable=line-too-long 'file://tough_video_cases/video.html?id=cable_m4a&src=tulip2.m4a&type=audio&net=cable' # pylint: disable=line-too-long 'file://tough_video_cases/video.html?id=wifi_m4a&src=tulip2.m4a&type=audio&net=wifi']<for_stmt>url urls_list<block_start>self.AddStory(BasicPlayPage(url self))<block_end>urls_list2=[# pylint: disable=line-too-long 'file://tough_video_cases/video.html?id=wifi_mp3&src=tulip2.mp3&type=audio&net=wifi' # pylint: disable=line-too-long 'file://tough_video_cases/video.html?id=wifi_m4a&src=tulip2.m4a&type=audio&net=wifi' # pylint: disable=line-too-long 'file://tough_video_cases/video.html?id=wifi_ogg&src=tulip2.ogg&type=audio&net=wifi' # pylint: disable=line-too-long 'file://tough_video_cases/video.html?id=wifi_wav&src=tulip2.wav&type=audio&net=wifi' # pylint: disable=line-too-long 'file://tough_video_cases/video.html?id=wifi_mp4&src=tulip2.mp4&type=audio&net=wifi' # pylint: disable=line-too-long 'file://tough_video_cases/video.html?id=wifi_ogv&src=tulip2.ogv&type=audio&net=wifi' # pylint: disable=line-too-long 'file://tough_video_cases/video.html?id=wifi_webm&src=tulip2.webm&type=audio&net=wifi']<for_stmt>url urls_list2<block_start><if_stmt>url<in>urls_list<block_start>name='seek_'+url<block_end><else_stmt><block_start>name=''<block_end>self.AddStory(SeekBeforeAndAfterPlayheadPage(url self name=name))<block_end><block_end><block_end>
# Copyright 2021 Google Research. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """EfficientNet V1 and V2 model configs."""<import_stmt>functools<import_stmt>re<import_stmt>backbones.efficientnet.effnetv2_hparams<as>hparams<line_sep>cfg_register=functools.partial(hparams.register prefix='cfg:')<class_stmt>BlockDecoder(object)<block_start>"""Block Decoder for readability."""<def_stmt>_decode_block_string self block_string<block_start>"""Gets a block through a string notation of arguments."""<assert_stmt>isinstance(block_string str)<line_sep>ops=block_string.split('_')<line_sep>options={}<for_stmt>op ops<block_start>splits=re.split(r'(\d.*)' op)<if_stmt>len(splits)<ge>2<block_start>key,value=splits[:2]<line_sep>options[key]=value<block_end><block_end><return>hparams.Config(kernel_size=int(options['k']) num_repeat=int(options['r']) input_filters=int(options['i']) output_filters=int(options['o']) expand_ratio=int(options['e']) se_ratio=float(options['se'])<if>'se'<in>options<else><none> bottomright_stride='br'<in>options strides=int(options['s']) conv_type=int(options['c'])<if>'c'<in>options<else>0 )<block_end><def_stmt>_encode_block_string self block<block_start>"""Encodes a block to a string."""<line_sep>args=['r%d'%block.num_repeat 'k%d'%block.kernel_size 's%d'%block.strides 'e%s'%block.expand_ratio 'i%d'%block.input_filters 'o%d'%block.output_filters 'c%d'%block.conv_type 'f%d'%block.fused_conv ]<if_stmt>block.se_ratio<g>0<and>block.se_ratio<le>1<block_start>args.append('se%s'%block.se_ratio)<block_end><if_stmt>block.bottomright_stride<g>0<and>block.se_ratio<le>1<block_start>args.append('br')<block_end><return>'_'.join(args)<block_end><def_stmt>decode self string_list<block_start>"""Decodes a list of string notations to specify blocks inside the network. Args: string_list: a list of strings, each string is a notation of block. Returns: A list of namedtuples to represent blocks arguments. """<assert_stmt>isinstance(string_list list)<line_sep>blocks_args=[]<for_stmt>block_string string_list<block_start>blocks_args.append(self._decode_block_string(block_string))<block_end><return>blocks_args<block_end><def_stmt>encode self blocks_args<block_start>"""Encodes a list of Blocks to a list of strings. Args: blocks_args: A list of namedtuples to represent blocks arguments. Returns: a list of strings, each string is a notation of block. """<line_sep>block_strings=[]<for_stmt>block blocks_args<block_start>block_strings.append(self._encode_block_string(block))<block_end><return>block_strings<block_end><block_end>#################### EfficientNet V1 configs #################### v1_b0_block_str=['r1_k3_s1_e1_i32_o16_se0.25' 'r2_k3_s2_e6_i16_o24_se0.25' 'r2_k5_s2_e6_i24_o40_se0.25' 'r3_k3_s2_e6_i40_o80_se0.25' 'r3_k5_s1_e6_i80_o112_se0.25' 'r4_k5_s2_e6_i112_o192_se0.25_br' 'r1_k3_s1_e6_i192_o320_se0.25' ]<line_sep>efficientnetv1_params={# (width_coefficient, depth_coefficient, resolution, dropout_rate) 'efficientnet-b0':(1.0 1.0 224 0.2) 'efficientnet-b1':(1.0 1.1 240 0.2) 'efficientnet-b2':(1.1 1.2 260 0.3) 'efficientnet-b3':(1.2 1.4 300 0.3) 'efficientnet-b4':(1.4 1.8 380 0.4) 'efficientnet-b5':(1.6 2.2 456 0.4) 'efficientnet-b6':(1.8 2.6 528 0.5) 'efficientnet-b7':(2.0 3.1 600 0.5) 'efficientnet-b8':(2.2 3.6 672 0.5) 'efficientnet-l2':(4.3 5.3 800 0.5) }<def_stmt>efficientnetv1_config model_name='efficientnet-b0'<block_start>"""EfficientNetV1 model config."""<line_sep>width_coefficient,depth_coefficient,isize,dropout_rate=(efficientnetv1_params[model_name])<line_sep>cfg=hparams.Config(model=dict(model_name=model_name blocks_args=BlockDecoder().decode(v1_b0_block_str) width_coefficient=width_coefficient depth_coefficient=depth_coefficient dropout_rate=dropout_rate ) eval=dict(isize=isize) train=dict(isize=0.8) # 80% of eval size data=dict(augname='effnetv1_autoaug') )<line_sep><return>cfg<block_end>#################### EfficientNet V2 configs #################### v2_base_block=[# The baseline config for v2 models. 'r1_k3_s1_e1_i32_o16_c1' 'r2_k3_s2_e4_i16_o32_c1' 'r2_k3_s2_e4_i32_o48_c1' 'r3_k3_s2_e4_i48_o96_se0.25' 'r5_k3_s1_e6_i96_o112_se0.25' 'r8_k3_s2_e6_i112_o192_se0.25_br' ]<line_sep>v2_s_block=[# about base.yaml * (width1.4, depth1.8) 'r2_k3_s1_e1_i24_o24_c1' 'r4_k3_s2_e4_i24_o48_c1' 'r4_k3_s2_e4_i48_o64_c1' 'r6_k3_s2_e4_i64_o128_se0.25' 'r9_k3_s1_e6_i128_o160_se0.25' 'r15_k3_s2_e6_i160_o256_se0.25_br' ]<line_sep>v2_m_block=[# about base.yaml * (width1.6, depth2.2) 'r3_k3_s1_e1_i24_o24_c1' 'r5_k3_s2_e4_i24_o48_c1' 'r5_k3_s2_e4_i48_o80_c1' 'r7_k3_s2_e4_i80_o160_se0.25' 'r14_k3_s1_e6_i160_o176_se0.25' 'r18_k3_s2_e6_i176_o304_se0.25_br' 'r5_k3_s1_e6_i304_o512_se0.25' ]<line_sep>v2_l_block=[# about base.yaml * (width2.0, depth3.1) 'r4_k3_s1_e1_i32_o32_c1' 'r7_k3_s2_e4_i32_o64_c1' 'r7_k3_s2_e4_i64_o96_c1' 'r10_k3_s2_e4_i96_o192_se0.25' 'r19_k3_s1_e6_i192_o224_se0.25' 'r25_k3_s2_e6_i224_o384_se0.25_br' 'r7_k3_s1_e6_i384_o640_se0.25' ]<line_sep>v2_xl_block=[# only for 21k pretraining. 'r4_k3_s1_e1_i32_o32_c1' 'r8_k3_s2_e4_i32_o64_c1' 'r8_k3_s2_e4_i64_o96_c1' 'r16_k3_s2_e4_i96_o192_se0.25' 'r24_k3_s1_e6_i192_o256_se0.25' 'r32_k3_s2_e6_i256_o512_se0.25_br' 'r8_k3_s1_e6_i512_o640_se0.25' ]<line_sep>efficientnetv2_params={# (block, width, depth, train_size, eval_size, dropout, randaug, mixup, aug) 'efficientnetv2-s':# 83.9% @ 22M (v2_s_block 1.0 1.0 300 384 0.2 10 0 'randaug') 'efficientnetv2-m':# 85.2% @ 54M (v2_m_block 1.0 1.0 384 480 0.3 15 0.2 'randaug') 'efficientnetv2-l':# 85.7% @ 120M (v2_l_block 1.0 1.0 384 480 0.4 20 0.5 'randaug') 'efficientnetv2-xl':(v2_xl_block 1.0 1.0 384 512 0.4 20 0.5 'randaug') # For fair comparison to EfficientNetV1, using the same scaling and autoaug. 'efficientnetv2-b0':# 78.7% @ 7M params (v2_base_block 1.0 1.0 192 224 0.2 0 0 'effnetv1_autoaug') 'efficientnetv2-b1':# 79.8% @ 8M params (v2_base_block 1.0 1.1 192 240 0.2 0 0 'effnetv1_autoaug') 'efficientnetv2-b2':# 80.5% @ 10M params (v2_base_block 1.1 1.2 208 260 0.3 0 0 'effnetv1_autoaug') 'efficientnetv2-b3':# 82.1% @ 14M params (v2_base_block 1.2 1.4 240 300 0.3 0 0 'effnetv1_autoaug') }<def_stmt>efficientnetv2_config model_name='efficientnetv2-s'<block_start>"""EfficientNetV2 model config."""<line_sep>block,width,depth,train_size,eval_size,dropout,randaug,mix,aug=(efficientnetv2_params[model_name])<line_sep>cfg=hparams.Config(model=dict(model_name=model_name blocks_args=BlockDecoder().decode(block) width_coefficient=width depth_coefficient=depth dropout_rate=dropout ) train=dict(isize=train_size stages=4 sched=<true>) eval=dict(isize=eval_size) data=dict(augname=aug ram=randaug mixup_alpha=mix cutmix_alpha=mix) )<line_sep><return>cfg<block_end>################################################################################ <def_stmt>get_model_config model_name:str<block_start>"""Main entry for model name to config."""<if_stmt>model_name.startswith('efficientnet-')<block_start><return>efficientnetv1_config(model_name)<block_end><if_stmt>model_name.startswith('efficientnetv2-')<block_start><return>efficientnetv2_config(model_name)<block_end><raise>ValueError(f'Unknown model_name {model_name}')<block_end>
# -*- coding: utf-8 -*- <import_stmt>scrapy<import_from_stmt>locations.items GeojsonPointItem<class_stmt>TheBigBiscuitSpider(scrapy.Spider)<block_start>name='thebigbiscuit'<line_sep>item_attributes={'brand':'The Big Biscuit'}<line_sep>allowed_domains=['www.bigbiscuit.com']<line_sep>start_urls=['https://bigbiscuit.com/locations/']<def_stmt>parse self response<block_start><for_stmt>each response.xpath('//*[contains(@class, "section--location")]')<block_start>name=each.xpath('./*[@class="location-title"]/text()').extract_first()<if_stmt>name<block_start>street=each.xpath('.//*[@class="info-block"]//*[@class="text-small"][1]/text()').extract_first()<line_sep>postcode=each.xpath('./@data-zip').extract_first()<line_sep>city=each.xpath('./@data-city').extract_first()<line_sep>state=each.xpath('./@data-state').extract_first()<line_sep>phone=each.xpath('.//*[@class="phone"]/text()').extract_first()<line_sep>ref=each.xpath('.//*[@data-location]/@data-location').extract_first()<line_sep>hours=each.xpath('.//*[@class="info-block"]//*[@class="text-small"][3]/text()').extract_first()<line_sep>website=each.xpath(f'//*[@class="title h6" and text()="{name}"]/parent::div/following-sibling::a[1]/@href').extract_first()<line_sep>properties={'name':name 'ref':ref 'street':street 'city':city 'postcode':postcode 'state':state 'phone':phone 'website':website}<line_sep><yield>GeojsonPointItem(**properties)<block_end><block_end><block_end><block_end>
<import_stmt>subprocess<import_stmt>datetime<import_stmt>sys<def_stmt>install package<block_start>subprocess.check_call([sys.executable "-m" "pip" "install" package])<block_end>install('setuptools')<line_sep>install('gitpython')<import_stmt>setuptools<import_stmt>git<line_sep>repo=git.Repo(search_parent_directories=<true>)<line_sep>date=datetime.datetime.utcnow()<with_stmt>open("README.md" "r")<as>fh<block_start>long_description=fh.read()+f'\n git version: {repo.head.object.hexsha}'+f'\n date: {date}'<block_end><with_stmt>open('VERSION')<as>fs<block_start>version=fs.readline().strip()<block_end># version += f".dev{date.strftime('%y%m%d%H%M%S')}" <with_stmt>open('requirements.txt')<as>fs<block_start>requirements=[l.strip()<for>l fs<if><not>l.strip().startswith('#')]<block_end>extras={}<line_sep>extras["docs"]=["recommonmark" "sphinx" "sphinx-markdown-tables" "sphinx-rtd-theme"]<line_sep>setuptools.setup(name="DeBERTa" version=version author="penhe" author_email="<EMAIL>" description="Decoding enhanced BERT with Disentangled Attention" keywords="NLP deep learning transformer pytorch Attention BERT RoBERTa DeBERTa" license="MIT" long_description=long_description long_description_content_type="text/markdown" url="https://github.com/microsoft/DeBERTa" packages=setuptools.find_packages(exclude=['__pycache__']) package_dir={'DeBERTa':'DeBERTa'} classifiers=["Programming Language :: Python :: 3" "License :: OSI Approved :: MIT License" "Operating System :: OS Independent" ] python_requires='>=3.6' extras_require=extras install_requires=requirements)<line_sep>
<import_stmt>pytest<import_from_stmt>. mock_s3<import_from_stmt>library.aws.s3 S3BucketsAclChecker<import_from_stmt>library.aws.utility Account<line_sep>buckets={"Bucket1":{"Description":"Private bucket" "CheckShouldPass":<true> "ACL":"private" } "Bucket2":{"Description":"Public read bucket" "CheckShouldPass":<false> "ACL":"public-read" } "Bucket3":{"Description":"Public read-write bucket" "CheckShouldPass":<false> "ACL":"public-read-write" } "Bucket4":{"Description":"Authenticated read bucket" "CheckShouldPass":<false> "ACL":"authenticated-read" }}<def_stmt>find_rule_prop bucket prop default<block_start><try_stmt><block_start><return>buckets[bucket.name][prop]<block_end><except_stmt>KeyError<block_start><return>default<block_end><block_end><def_stmt>ident_test arg<block_start>""" Used to build identification string for each autogenerated test (for easy recognition of failed tests). :param bucket: dict with information about rules :return: . """<line_sep># print(jsonDumps(s3acl_details)) <if_stmt>isinstance(arg bool)<block_start><return>"remediated"<if>arg<else>"original"<block_end><else_stmt><block_start>descr=find_rule_prop(arg "Description" "default description")<line_sep><return>f"params: {arg.name} ({descr})"<block_end><block_end><def_stmt>pytest_generate_tests metafunc<block_start>""" Entrypoint for tests (built-in pytest function for dynamic generation of test cases). """<line_sep># Launch EC2 mocking and env preparation mock_s3.start()<line_sep>mock_s3.create_env(buckets)<line_sep>account=Account()<line_sep>checker=S3BucketsAclChecker(account)<line_sep>checker.check()<for_stmt>s3_bucket checker.buckets<block_start>s3_bucket.restrict_acl()<block_end>checker_remediated=S3BucketsAclChecker(account)<line_sep>checker_remediated.check()<line_sep>s3_buckets=[(bucket <false>)<for>bucket checker.buckets]<line_sep>s3_buckets<augadd>[(bucket <true>)<for>bucket checker_remediated.buckets]<line_sep>metafunc.parametrize("bucket,remediated" s3_buckets ids=ident_test)<block_end>@pytest.mark.s3acl<def_stmt>test_s3acl bucket remediated<block_start>""" Actual testing function. :param bucket: :param remediated :return: nothing, raises AssertionError if actual test result is not matched with expected """<line_sep># print(f"{jsonDumps(s3acl_details)}") expected=<true><if>remediated<else>find_rule_prop(bucket "CheckShouldPass" <true>)<assert_stmt>expected<eq>(<not>bucket.public_by_acl)<block_end>
<import_stmt>numpy<as>np<import_stmt>pytest<import_stmt>tensorflow_probability<as>tfp<import_from_stmt>gpflow.config Config as_context<import_from_stmt>gpflow.utilities positive triangular<line_sep>@pytest.mark.parametrize("env_lower, override_lower" [(0.1 <none>) # ensure default from config is applied (0.0 0.2) # ensure override is applied (0.3 0.4) # ensure local overrides config ] )<def_stmt>test_positive_lower env_lower override_lower<block_start>expected_lower=override_lower<or>env_lower<with_stmt>as_context(Config(positive_bijector="softplus" positive_minimum=env_lower))<block_start>bijector=positive(lower=override_lower)<assert_stmt>isinstance(bijector tfp.bijectors.Chain)<assert_stmt>np.isclose(bijector.bijectors[0].shift expected_lower)<block_end><block_end>@pytest.mark.parametrize("env_bijector, override_bijector, expected_class" [("softplus" <none> tfp.bijectors.Softplus) ("softplus" "Exp" tfp.bijectors.Exp) ("exp" <none> tfp.bijectors.Exp) ("exp" "Softplus" tfp.bijectors.Softplus) ] )<def_stmt>test_positive_bijector env_bijector override_bijector expected_class<block_start><with_stmt>as_context(Config(positive_bijector=env_bijector positive_minimum=0.0))<block_start>bijector=positive(base=override_bijector)<assert_stmt>isinstance(bijector expected_class)<block_end><block_end><def_stmt>test_positive_calculation_order <block_start>value,lower=-10.0 10.0<line_sep>expected=np.exp(value)+lower<with_stmt>as_context(Config(positive_bijector="exp" positive_minimum=lower))<block_start>result=positive()(value).numpy()<block_end><assert_stmt>np.isclose(result expected)<assert_stmt>result<ge>lower<block_end><def_stmt>test_triangular <block_start><assert_stmt>isinstance(triangular() tfp.bijectors.FillTriangular)<block_end>
# # This file is part of pyasn1-modules software. # # Created by <NAME>. # # Copyright (c) 2019, Vigil Security, LLC # License: http://snmplabs.com/pyasn1/license.html # # Algorithm Identifiers for HKDF # # ASN.1 source from: # https://www.rfc-editor.org/rfc/rfc8619.txt # <import_from_stmt>pyasn1.type univ<import_from_stmt>pyasn1_modules rfc5280<line_sep># Object Identifiers id_alg_hkdf_with_sha256=univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.28')<line_sep>id_alg_hkdf_with_sha384=univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.29')<line_sep>id_alg_hkdf_with_sha512=univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.30')<line_sep># Key Derivation Algorithm Identifiers kda_hkdf_with_sha256=rfc5280.AlgorithmIdentifier()<line_sep>kda_hkdf_with_sha256['algorithm']=id_alg_hkdf_with_sha256<line_sep># kda_hkdf_with_sha256['parameters'] are absent kda_hkdf_with_sha384=rfc5280.AlgorithmIdentifier()<line_sep>kda_hkdf_with_sha384['algorithm']=id_alg_hkdf_with_sha384<line_sep># kda_hkdf_with_sha384['parameters'] are absent kda_hkdf_with_sha512=rfc5280.AlgorithmIdentifier()<line_sep>kda_hkdf_with_sha512['algorithm']=id_alg_hkdf_with_sha512<line_sep># kda_hkdf_with_sha512['parameters'] are absent
# # Copyright (C) 2016 Intel Corporation. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright notice(s), # this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice(s), # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY EXPRESS # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO # EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE # OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # <import_stmt>pytest<import_stmt>os<import_stmt>tempfile<import_stmt>subprocess<class_stmt>CMD_helper(object)<block_start><def_stmt>execute_cmd self command sudo=<false><block_start><if_stmt>sudo<block_start>command="sudo {0}".format(command)<block_end>#Initialize temp file for stdout. Will be removed when closed. outfile=tempfile.SpooledTemporaryFile()<try_stmt>#Invoke process <block_start>p=subprocess.Popen(command stdout=outfile stderr=subprocess.STDOUT shell=<true>)<line_sep>p.communicate()<line_sep>#Read stdout from file outfile.seek(0)<line_sep>stdout=outfile.read()<block_end><except_stmt><block_start><raise><block_end><finally_stmt>#Make sure the file is closed <block_start>outfile.close()<block_end>retcode=p.returncode<line_sep><return>stdout retcode<block_end><def_stmt>get_command_path self binary<block_start>"""Get the path to the binary."""<line_sep>path=os.path.dirname(os.path.abspath(__file__))<line_sep><return>os.path.join(path binary)<block_end><block_end>
# -*- coding: utf-8 -*- """This module contains all the LUA code that needs to be on the device to perform whats needed. They will be uploaded if they doesn't exist"""<line_sep># Copyright (C) 2015-2019 <NAME> <<EMAIL>> # pylint: disable=C0301 # flake8: noqa LUA_FUNCTIONS=['recv_block' 'recv_name' 'recv' 'shafile' 'send_block' 'send_file' 'send']<line_sep>DOWNLOAD_FILE="file.open('{filename}') print(file.seek('end', 0)) file.seek('set', {bytes_read}) uart.write(0, file.read({chunk_size}))file.close()"<line_sep>PRINT_FILE="file.open('{filename}') print('---{filename}---') print(file.read()) file.close() print('---')"<line_sep>INFO_GROUP="for key,value in pairs(node.info('{group}')) do k=tostring(key) print(k .. string.rep(' ', 20 - #k), tostring(value)) end"<line_sep>LIST_FILES='for key,value in pairs(file.list()) do print(key,value) end'<line_sep># NUL = \000, ACK = \006 RECV_LUA=r""" function recv() local on,w,ack,nack=uart.on,uart.write,'\6','\21' local fd local function recv_block(d) local t,l = d:byte(1,2) if t ~= 1 then w(0, nack); fd:close(); return on('data') end if l >= 0 then fd:write(d:sub(3, l+2)); end if l == 0 then fd:close(); w(0, ack); return on('data') else w(0, ack) end end local function recv_name(d) d = d:gsub('%z.*', '') d:sub(1,-2) file.remove(d) fd=file.open(d, 'w') on('data', 130, recv_block, 0) w(0, ack) end on('data', '\0', recv_name, 0) w(0, 'C') end function shafile(f) print(crypto.toHex(crypto.fhash('sha1', f))) end """<line_sep># noqa: E122 SEND_LUA=r""" function send(f) uart.on('data', 1, function (data) local on,w=uart.on,uart.write local fd local function send_block(d) l = string.len(d) w(0, '\001' .. string.char(l) .. d .. string.rep('\0', 128 - l)) return l end local function send_file(f) local s, p fd=file.open(f) s=fd:seek('end', 0) p=0 on('data', 1, function(data) if data == '\006' and p<s then fd:seek('set',p) p=p+send_block(fd:read(128)) else send_block('') fd:close() on('data') print('interrupted') end end, 0) w(0, f .. '\000') end uart.on('data') if data == 'C' then send_file(f) else print('transfer interrupted') end end, 0) end """<line_sep>UART_SETUP='uart.setup(0,{baud},8,0,1,1)'<line_sep>REMOVE_ALL_FILES=r""" for key,value in pairs(file.list()) do file.remove(key) end """<line_sep>
# Copyright 2014-2016 Presslabs SRL # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>os<import_from_stmt>threading Event<import_stmt>pytest<import_from_stmt>mock patch MagicMock call<import_from_stmt>fuse FuseOSError<import_from_stmt>gitfs.views.current CurrentView<import_from_stmt>gitfs.cache.gitignore CachedIgnore<class_stmt>TestCurrentView(object)<block_start><def_stmt>test_rename self<block_start>mocked_re=MagicMock()<line_sep>mocked_index=MagicMock()<line_sep>mocked_os=MagicMock()<line_sep>mocked_result=MagicMock()<line_sep>mocked_result.rename.return_value=<true><line_sep>mocked_re.sub.return_value="new"<line_sep>mocked_os.path.split.return_value=[1 1]<with_stmt>patch.multiple("gitfs.views.current" re=mocked_re os=mocked_os)<block_start><import_from_stmt>gitfs.views current<as>current_view<line_sep>old_rename=current_view.PassthroughView.rename<line_sep>current_view.PassthroughView.rename=<lambda>self old new:<true><line_sep>current=CurrentView(regex="regex" repo="repo" repo_path="repo_path" ignore=CachedIgnore())<line_sep>current._stage=mocked_index<line_sep>result=current.rename("old" "new")<assert_stmt>result<is><true><line_sep>mocked_index.assert_called_once_with(**{"remove":1 "add":"new" "message":"Rename old to new"})<line_sep>mocked_os.path.split.assert_called_once_with("old")<line_sep>current_view.PassthroughView.rename=old_rename<block_end><block_end><def_stmt>test_rename_in_git_dir self<block_start>current=CurrentView(repo="repo" repo_path="repo_path" ignore=CachedIgnore())<with_stmt>pytest.raises(FuseOSError)<block_start>current.rename(".git/" ".git/")<block_end><block_end><def_stmt>test_symlink self<block_start>mocked_index=MagicMock()<line_sep>mocked_repo=MagicMock()<line_sep>mocked_full_path=MagicMock()<line_sep>mocked_full_path.return_value="full_path"<line_sep>mocked_repo._full_path=mocked_full_path<with_stmt>patch("gitfs.views.current.os")<as>mocked_os<block_start>mocked_os.symlink.return_value="done"<line_sep>current=CurrentView(repo=mocked_repo repo_path="repo_path" ignore=CachedIgnore())<line_sep>current._stage=mocked_index<assert_stmt>current.symlink("name" "target")<eq>"done"<line_sep>mocked_os.symlink.assert_called_once_with("target" "full_path")<line_sep>mocked_full_path.assert_called_once_with("name")<line_sep>message="Create symlink to target for name"<line_sep>mocked_index.assert_called_once_with(add="name" message=message)<block_end><block_end><def_stmt>test_readlink self<block_start>mocked_repo=MagicMock()<line_sep>mocked_full_path=MagicMock()<line_sep>mocked_full_path.return_value="full path"<line_sep>mocked_repo._full_path=mocked_full_path<with_stmt>patch("gitfs.views.current.os")<as>mocked_os<block_start>mocked_os.readlink.return_value="done"<line_sep>current=CurrentView(repo=mocked_repo repo_path="repo_path" ignore=CachedIgnore())<line_sep>current._full_path=mocked_full_path<assert_stmt>current.readlink("path")<eq>"done"<block_end><block_end><def_stmt>test_getattr self<block_start>mocked_full=MagicMock()<line_sep>mocked_os=MagicMock()<line_sep>mocked_stat=MagicMock()<line_sep>mocked_repo=MagicMock()<line_sep>mocked_stat.simple="stat"<line_sep>mocked_os.lstat.return_value=mocked_stat<line_sep>mocked_full.return_value="full_path"<line_sep>mocked_repo._full_path=mocked_full<with_stmt>patch.multiple("gitfs.views.current" os=mocked_os STATS=["simple"])<block_start>current=CurrentView(repo=mocked_repo uid=1 gid=1 repo_path="repo_path" ignore=CachedIgnore() )<line_sep>current._full_path=mocked_full<line_sep>result=current.getattr("path")<line_sep>asserted_result={"st_uid":1 "st_gid":1 "simple":"stat"}<assert_stmt>result<eq>asserted_result<line_sep>mocked_os.lstat.assert_called_once_with("full_path")<line_sep>mocked_full.assert_called_once_with("path")<block_end><block_end><def_stmt>test_write_in_git_dir self<block_start><with_stmt>pytest.raises(FuseOSError)<block_start>current=CurrentView(repo="repo" uid=1 gid=1 repo_path="repo_path" read_only=Event() ignore=CachedIgnore() )<line_sep>current.write(".git/index" "buf" "offset" 1)<block_end><block_end><def_stmt>test_write_in_modules_dir self<block_start><with_stmt>pytest.raises(FuseOSError)<block_start>current=CurrentView(repo="repo" uid=1 gid=1 repo_path="repo_path" read_only=Event() ignore=CachedIgnore() )<line_sep>current.write(".gitmodules" "buf" "offset" 1)<block_end><block_end><def_stmt>test_write_to_large_file self<block_start>current=CurrentView(repo="repo" uid=1 gid=1 repo_path="repo_path" read_only=Event() ignore=CachedIgnore() )<line_sep>current.max_size=10<line_sep>current.dirty={"/path":{"size":5}}<with_stmt>pytest.raises(FuseOSError)<block_start>current.write("/path" "bufffffert" 11 1)<block_end><block_end><def_stmt>test_write self<block_start><import_from_stmt>gitfs.views current<as>current_view<line_sep>mocked_write=<lambda>self path buf offste fh:"done"<line_sep>old_write=current_view.PassthroughView.write<line_sep>current_view.PassthroughView.write=mocked_write<line_sep>current=CurrentView(repo="repo" uid=1 gid=1 repo_path="repo_path" read_only=Event() ignore=CachedIgnore() )<line_sep>current.max_offset=20<line_sep>current.max_size=20<line_sep>current.dirty={1:{}}<assert_stmt>current.write("/path" "buf" 3 1)<eq>"done"<assert_stmt>current.dirty<eq>{1:{"message":"Update /path" "stage":<true>}}<line_sep>current_view.PassthroughView.write=old_write<block_end><def_stmt>test_mkdir self<block_start><import_from_stmt>gitfs.views current<as>current_view<line_sep>old_mkdir=current_view.PassthroughView.mkdir<line_sep>old_chmod=current_view.PassthroughView.chmod<line_sep>mocked_mkdir=<lambda>self path mode:"done"<line_sep>mocked_chmod=MagicMock()<line_sep>mocked_chmod.return_value=<none><line_sep>current_view.PassthroughView.mkdir=mocked_mkdir<line_sep>current_view.PassthroughView.chmod=mocked_chmod<line_sep>mocked_release=MagicMock()<line_sep>mocked_full_path=MagicMock()<line_sep>mocked_repo=MagicMock()<line_sep>mocked_full_path.return_value="full_path"<line_sep>mocked_repo._full_path=mocked_full_path<line_sep>keep_path="/path/.keep"<line_sep>mode=os.O_WRONLY|os.O_CREAT<with_stmt>patch("gitfs.views.current.os")<as>mocked_os<block_start>mocked_os.path.exists.return_value=<false><line_sep>mocked_os.open.return_value=10<line_sep>mocked_os.O_WRONLY=os.O_WRONLY<line_sep>mocked_os.O_CREAT=os.O_CREAT<line_sep>current=CurrentView(repo=mocked_repo uid=1 gid=1 repo_path="repo_path" ignore=CachedIgnore() )<line_sep>current.release=mocked_release<assert_stmt>current.mkdir("/path" "mode")<eq>"done"<line_sep>mocked_full_path.assert_called_once_with(keep_path)<line_sep>mocked_os.path.exists.assert_called_once_with(keep_path)<line_sep>mocked_os.open.assert_called_once_with("full_path" mode)<line_sep>mocked_chmod.assert_called_once_with(keep_path 0o644)<assert_stmt>current.dirty<eq>{10:{"message":"Create the /path directory" "stage":<true>}}<line_sep>mocked_release.assert_called_once_with(keep_path 10)<block_end>current_view.PassthroughView.mkdir=old_mkdir<line_sep>current_view.PassthroughView.chmod=old_chmod<block_end><def_stmt>test_mkdir_in_git_dir self<block_start>current=CurrentView(repo="repo" uid=1 gid=1 repo_path="repo_path" ignore=CachedIgnore())<with_stmt>pytest.raises(FuseOSError)<block_start>current.mkdir(".git/" "mode")<block_end><block_end><def_stmt>test_create_in_git_dir self<block_start>current=CurrentView(repo="repo" uid=1 gid=1 repo_path="repo_path" ignore=CachedIgnore())<with_stmt>pytest.raises(FuseOSError)<block_start>current.create(".git/" "mode")<block_end><block_end><def_stmt>test_create self<block_start><import_from_stmt>gitfs.views current<as>current_view<line_sep>old_chmod=current_view.PassthroughView.chmod<line_sep>mock_chmod=<lambda>self path mode:"done"<line_sep>current_view.PassthroughView.chmod=mock_chmod<line_sep>mocked_open=MagicMock()<line_sep>mocked_open.return_value="done"<line_sep>current=CurrentView(repo="repo" uid=1 gid=1 repo_path="repo_path" ignore=CachedIgnore())<line_sep>current.dirty={"/path":{"content":"here"}}<line_sep>current.open_for_write=mocked_open<assert_stmt>current.create("/path" "mode")<eq>"done"<line_sep>current_view.PassthroughView.chmod=old_chmod<block_end><def_stmt>test_chmod_in_git_dir self<block_start>current=CurrentView(repo="repo" uid=1 gid=1 repo_path="repo_path" ignore=CachedIgnore())<with_stmt>pytest.raises(FuseOSError)<block_start>current.chmod(".git/" "mode")<block_end><block_end><def_stmt>test_chmod self<block_start><import_from_stmt>gitfs.views current<as>current_view<line_sep>old_chmod=current_view.PassthroughView.chmod<line_sep>current_view.PassthroughView.chmod=<lambda>self path mode:"done"<line_sep>mocked_index=MagicMock()<line_sep>mocked_full=MagicMock(return_value="/path")<line_sep>mocked_repo=MagicMock(_full_path=mocked_full)<line_sep>current=CurrentView(repo=mocked_repo uid=1 gid=1 repo_path="repo_path" ignore=CachedIgnore())<line_sep>current._stage=mocked_index<assert_stmt>current.chmod("/path" 0o100644)<eq>"done"<line_sep>message="Chmod to 0%o on %s"%(0o644 "/path")<line_sep>mocked_index.assert_called_once_with(add="/path" message=message)<line_sep>current_view.PassthroughView.chmod=old_chmod<block_end><def_stmt>test_chmod_on_dir self<block_start><import_from_stmt>gitfs.views current<as>current_view<line_sep>old_chmod=current_view.PassthroughView.chmod<line_sep>current_view.PassthroughView.chmod=<lambda>self path mode:"done"<line_sep>mocked_full=MagicMock(return_value="repo/path/to/dir")<line_sep>mocked_repo=MagicMock(_full_path=mocked_full)<with_stmt>patch("gitfs.views.current.os")<as>mocked_os<block_start>mocked_os.path.isdir.return_value=<true><line_sep>current=CurrentView(repo=mocked_repo uid=1 gid=1 repo_path="repo_path" ignore=CachedIgnore() )<assert_stmt>current.chmod("/path/to/dir" 0o040755)<eq>"done"<line_sep>mocked_os.path.isdir.assert_called_once_with("repo/path/to/dir")<block_end>current_view.PassthroughView.chmod=old_chmod<block_end><def_stmt>test_fsync_a_file_from_git_dir self<block_start>current=CurrentView(repo="repo" uid=1 gid=1 repo_path="repo_path" ignore=CachedIgnore())<with_stmt>pytest.raises(FuseOSError)<block_start>current.fsync(".git/" "data" 0)<block_end><block_end><def_stmt>test_fsync self<block_start><import_from_stmt>gitfs.views current<as>current_view<line_sep>old_fsync=current_view.PassthroughView.fsync<line_sep>current_view.PassthroughView.fsync=<lambda>me path data fh:"done"<line_sep>mocked_index=MagicMock()<line_sep>current=CurrentView(repo="repo" uid=1 gid=1 repo_path="repo_path" ignore=CachedIgnore())<line_sep>current._stage=mocked_index<assert_stmt>current.fsync("/path" "data" 1)<eq>"done"<line_sep>message="Fsync /path"<line_sep>mocked_index.assert_called_once_with(add="/path" message=message)<line_sep>current_view.PassthroughView.fsync=old_fsync<block_end><def_stmt>test_unlink_from_git_dir self<block_start>current=CurrentView(repo="repo" repo_path="repo_path" ignore=CachedIgnore())<with_stmt>pytest.raises(FuseOSError)<block_start>current.unlink(".git/")<block_end><block_end><def_stmt>test_unlink self<block_start><import_from_stmt>gitfs.views current<as>current_view<line_sep>old_unlink=current_view.PassthroughView.unlink<line_sep>current_view.PassthroughView.unlink=<lambda>me path:"done"<line_sep>mocked_index=MagicMock()<line_sep>current=CurrentView(repo="repo" uid=1 gid=1 repo_path="repo_path" ignore=CachedIgnore())<line_sep>current._stage=mocked_index<assert_stmt>current.unlink("/path")<eq>"done"<line_sep>message="Deleted /path"<line_sep>mocked_index.assert_called_once_with(remove="/path" message=message)<line_sep>current_view.PassthroughView.unlink=old_unlink<block_end><def_stmt>test_stage self<block_start>mocked_repo=MagicMock()<line_sep>mocked_sanitize=MagicMock()<line_sep>mocked_queue=MagicMock()<line_sep>mocked_files=MagicMock(return_value=<none>)<line_sep>mocked_sanitize.return_value=["to-stage"]<line_sep>current=CurrentView(repo=mocked_repo repo_path="repo_path" queue=mocked_queue ignore=CachedIgnore() )<line_sep>current._sanitize=mocked_sanitize<line_sep>current._get_files_from_path=mocked_files<line_sep>current._stage("message" ["add"] ["remove"])<line_sep>mocked_queue.commit.assert_called_once_with(add=["to-stage"] remove=["to-stage"] message="message")<line_sep>mocked_repo.index.add.assert_called_once_with(["to-stage"])<line_sep>mocked_repo.index.remove.assert_called_once_with(["to-stage"])<line_sep>mocked_files.has_calls([call(["add"])])<line_sep>mocked_sanitize.has_calls([call(["add"]) call(["remove"])])<block_end><def_stmt>test_sanitize self<block_start>current=CurrentView(repo="repo" repo_path="repo_path")<assert_stmt>current._sanitize("/path")<eq>"path"<block_end><def_stmt>test_open self<block_start>mocked_full=MagicMock(return_value="full_path")<line_sep>mocked_repo=MagicMock(_full_path=mocked_full)<line_sep>mocked_os=MagicMock()<line_sep>mocked_os.open.return_value=1<with_stmt>patch.multiple("gitfs.views.current" os=mocked_os)<block_start>current=CurrentView(repo=mocked_repo repo_path="repo_path" ignore=CachedIgnore())<line_sep>current._full_path=mocked_full<line_sep>current.writing=set([])<assert_stmt>current.open("path/" os.O_WRONLY)<eq>1<line_sep>mocked_os.open.assert_called_once_with("full_path" os.O_WRONLY)<block_end><block_end><def_stmt>test_release_with_stage self<block_start>message="I need to stage this"<line_sep>mocked_os=MagicMock()<line_sep>mocked_stage=MagicMock()<line_sep>mocked_os.close.return_value=0<with_stmt>patch.multiple("gitfs.views.current" os=mocked_os)<block_start>current=CurrentView(repo="repo" repo_path="repo_path" ignore=CachedIgnore())<line_sep>current._stage=mocked_stage<line_sep>current.dirty={4:{"message":message "stage":<true>}}<assert_stmt>current.release("/path" 4)<eq>0<line_sep>mocked_os.close.assert_called_once_with(4)<line_sep>mocked_stage.assert_called_once_with(add="/path" message=message)<block_end><block_end><def_stmt>test_release_without_stage self<block_start>message="No need to stage this"<line_sep>mocked_os=MagicMock()<line_sep>mocked_stage=MagicMock()<line_sep>mocked_os.close.return_value=0<with_stmt>patch.multiple("gitfs.views.current" os=mocked_os)<block_start>current=CurrentView(repo="repo" repo_path="repo_path" ignore=CachedIgnore())<line_sep>current._stage=mocked_stage<line_sep>current.dirty={4:{"message":message "stage":<false>}}<assert_stmt>current.release("/path" 4)<eq>0<line_sep>mocked_os.close.assert_called_once_with(4)<assert_stmt>mocked_stage.call_count<eq>0<block_end><block_end><block_end>
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) <import_stmt>subprocess<import_stmt>sys<import_from_stmt>spack *<class_stmt>NodeJs(Package)<block_start>"""Node.js is a JavaScript runtime built on Chrome's V8 JavaScript engine."""<line_sep>homepage="https://nodejs.org/"<line_sep>url="https://nodejs.org/dist/v13.5.0/node-v13.5.0.tar.gz"<line_sep>list_url="https://nodejs.org/dist/"<line_sep>list_depth=1<line_sep># Current (latest features) version('15.3.0' sha256='cadfa384a5f14591b84ce07a1afe529f28deb0d43366fb0ae4e78afba96bfaf2')<line_sep>version('14.16.1' sha256='5f5080427abddde7f22fd2ba77cd2b8a1f86253277a1eec54bc98a202728ce80')<line_sep>version('14.13.0' sha256='8538b2e76aa06ee0e6eb1c118426c3c5ca53b2e49d66591738eacf76e89edd61')<line_sep>version('14.10.0' sha256='7e0d7a1aa23697415e3588a1ca4f1c47496e6c88b9cf37c66be90353d3e4ac3e')<line_sep>version('13.8.0' sha256='815b5e1b18114f35da89e4d98febeaba97555d51ef593bd5175db2b05f2e8be6')<line_sep>version('13.5.0' sha256='4b8078d896a7550d7ed399c1b4ac9043e9f883be404d9b337185c8d8479f2db8')<line_sep># LTS (recommended for most users) version('14.15.1' sha256='a1120472bf55aea745287693a6651e16973e1008c9d6107df350126adf9716fe' preferred=<true>)<line_sep>version('12.18.4' sha256='a802d87e579e46fc52771ed6f2667048320caca867be3276f4c4f1bbb41389c3')<line_sep>version('12.18.3' sha256='6ea85f80e01b007cc9b566b8836513bc5102667d833bad4c1092be60fa60c2d4')<line_sep>version('12.16.0' sha256='ae2dfe74485d821d4fef7cf1802acd2322cd994c853a2327c4306952f4453441')<line_sep>version('12.14.0' sha256='5c1939867228f3845c808ef84a89c8ee93cc35f857bf7587ecee1b5a6d9da67b')<line_sep>version('11.1.0' sha256='3f53b5ac25b2d36ad538267083c0e603d9236867a936c22a9116d95fa10c60d5')<line_sep>version('10.13.0' sha256='aa06825fff375ece7c0d881ae0de5d402a857e8cabff9b4a50f2f0b7b44906be')<line_sep>version('8.11.4' sha256='459144e361d64ca7362c37cc9717c044ef909d348cb5aa3f2b62538560a6085a')<line_sep>version('8.9.1' sha256='32491b7fcc4696b2cdead45c47e52ad16bbed8f78885d32e873952fee0f971e1')<line_sep>version('7.1.0' sha256='595e7e2a37d1e0573044a90077bb12c0f750e5d8851899ffa74038238da9a983')<line_sep>version('6.3.0' sha256='4ed7a99985f8afee337cc22d5fef61b495ab4238dfff3750ac9019e87fc6aae6')<line_sep>version('6.2.2' sha256='b6baee57a0ede496c7c7765001f7495ad74c8dfe8c34f1a6fb2cd5d8d526ffce')<line_sep>variant('debug' default=<false> description='Include debugger support')<line_sep>variant('doc' default=<false> description='Compile with documentation')<line_sep>variant('icu4c' default=<false> description='Build with support for all locales instead of just English')<line_sep>variant('openssl' default=<true> description='Build with Spacks OpenSSL instead of the bundled version')<line_sep>variant('zlib' default=<true> description='Build with Spacks zlib instead of the bundled version')<line_sep># https://github.com/nodejs/node/blob/master/BUILDING.md#unix-and-macos depends_on('[email protected]:' type='build')<line_sep>depends_on('libtool' type='build' when=sys.platform<ne>'darwin')<line_sep>depends_on('pkgconfig' type='build')<line_sep>depends_on('[email protected]:2.8,3.5:' when='@12:' type='build')<line_sep>depends_on('[email protected]:2.8' when='@:11' type='build')<line_sep># depends_on('bash-completion', when="+bash-completion") depends_on('icu4c' when='+icu4c')<line_sep>depends_on('[email protected]:1.0' when='@:9+openssl')<line_sep>depends_on('[email protected]:' when='@10:+openssl')<line_sep>depends_on('zlib' when='+zlib')<line_sep>phases=['configure' 'build' 'install']<line_sep># https://github.com/spack/spack/issues/19310 conflicts('%gcc@:4.8' msg="fails to build with gcc 4.8 "<concat>"(see https://github.com/spack/spack/issues/19310")<def_stmt>setup_build_environment self env# Force use of experimental Python 3 support <block_start>env.set('PYTHON' self.spec['python'].command.path)<line_sep>env.set('NODE_GYP_FORCE_PYTHON' self.spec['python'].command.path)<block_end><def_stmt>configure_args self# On OSX, the system libtool must be used # So, we ensure that this is the case by... <block_start><if_stmt>sys.platform<eq>'darwin'<block_start>process_pipe=subprocess.Popen(["which" "libtool"] stdout=subprocess.PIPE)<line_sep>result_which=process_pipe.communicate()[0]<line_sep>process_pipe=subprocess.Popen(["whereis" "libtool"] stdout=subprocess.PIPE)<line_sep>result_whereis=process_pipe.communicate()[0]<assert_stmt>result_which<eq>result_whereis ('On OSX the system libtool must be used. Please'<concat>'(temporarily) remove \n %s or its link to libtool from'<concat>'path')<block_end>args=['--prefix={0}'.format(self.prefix) # Note: npm is updated more regularly than node.js, so we build # the package instead of using the bundled version '--without-npm']<if_stmt>'+debug'<in>self.spec<block_start>args.append('--debug')<block_end><if_stmt>'+openssl'<in>self.spec<block_start>args.extend(['--shared-openssl' '--shared-openssl-includes={0}'.format(self.spec['openssl'].prefix.include) '--shared-openssl-libpath={0}'.format(self.spec['openssl'].prefix.lib) ])<block_end><if_stmt>'+zlib'<in>self.spec<block_start>args.extend(['--shared-zlib' '--shared-zlib-includes={0}'.format(self.spec['zlib'].prefix.include) '--shared-zlib-libpath={0}'.format(self.spec['zlib'].prefix.lib) ])<block_end><if_stmt>'+icu4c'<in>self.spec<block_start>args.append('--with-intl=full-icu')<block_end><return>args<block_end><def_stmt>configure self spec prefix<block_start><if_stmt>self.version<ge>Version('10.11.0')<block_start>python('configure.py' *self.configure_args())<block_end><else_stmt><block_start>python('configure' *self.configure_args())<block_end><block_end><def_stmt>build self spec prefix<block_start>make()<if_stmt>'+doc'<in>spec<block_start>make('doc')<block_end><block_end>@run_after('build')@on_package_attributes(run_tests=<true>)<def_stmt>build_test self<block_start>make('test')<line_sep>make('test-addons')<block_end><def_stmt>install self spec prefix<block_start>make('install')<block_end><block_end>
# Copyright 2009-2011 <NAME>. # This program is distributed under the LGPL2.1 license. <import_stmt>webbrowser<import_stmt>wx.html<import_from_stmt>python_toolbox.wx_tools.widgets.cute_window CuteWindow<class_stmt>CuteHtmlWindow(wx.html.HtmlWindow CuteWindow)<block_start>event_modules=wx.html<def_stmt>__init__ self parent id=-1 pos=wx.DefaultPosition size=wx.DefaultSize style=wx.html.HW_DEFAULT_STYLE name=wx.html.HtmlWindowNameStr<block_start>wx.html.HtmlWindow.__init__(self parent=parent id=id pos=pos size=size style=style name=name)<line_sep>self.bind_event_handlers(CuteHtmlWindow)<block_end><def_stmt>_on_html_link_clicked self event<block_start>webbrowser.open_new_tab(event.GetLinkInfo().GetHref())<block_end><block_end>
# -*- coding: utf-8 -*- # Copyright (c) 2016-2021 by University of Kassel and Fraunhofer Institute for Energy Economics # and Energy System Technology (IEE), Kassel. All rights reserved. <import_stmt>pandas<as>pd<import_stmt>pytest<import_stmt>pandapower<as>pp<import_stmt>pandapower.networks<as>pn<def_stmt>test_cigre_hv <block_start>net=pn.create_cigre_network_hv()# length_km_6a_6b=0.1 pp.runpp(net)<line_sep>all_vn_kv=pd.Series([22 220 380])<assert_stmt>net.bus.vn_kv.isin(all_vn_kv).all()<line_sep>all_length_km=pd.Series([100 300 600 0.1])<assert_stmt>net.line.length_km.isin(all_length_km).all()<assert_stmt>len(net.bus)<eq>13<assert_stmt>len(net.line)<eq>9<assert_stmt>len(net.gen)<eq>3<assert_stmt>len(net.sgen)<eq>0<assert_stmt>len(net.shunt)<eq>3<assert_stmt>len(net.trafo)<eq>6<assert_stmt>len(net.load)<eq>5<assert_stmt>len(net.ext_grid)<eq>1<assert_stmt>net.converged<line_sep>net=pn.create_cigre_network_hv(length_km_6a_6b=80)<assert_stmt>net.line.length_km[8]<eq>80<block_end><def_stmt>test_cigre_mv <block_start>net=pn.create_cigre_network_mv()# with_der=False pp.runpp(net)<line_sep>all_vn_kv=pd.Series([110 20])<assert_stmt>net.bus.vn_kv.isin(all_vn_kv).all()<assert_stmt>len(net.bus)<eq>15<assert_stmt>len(net.line)<eq>15<assert_stmt>len(net.gen)<eq>0<assert_stmt>len(net.sgen)<eq>0<assert_stmt>len(net.shunt)<eq>0<assert_stmt>len(net.trafo)<eq>2<assert_stmt>len(net.load)<eq>18<assert_stmt>len(net.ext_grid)<eq>1<assert_stmt>len(net.switch)<eq>8<assert_stmt>net.converged<line_sep>net=pn.create_cigre_network_mv(with_der="pv_wind")<line_sep>pp.runpp(net)<line_sep>all_vn_kv=pd.Series([110 20])<assert_stmt>net.bus.vn_kv.isin(all_vn_kv).all()<assert_stmt>len(net.bus)<eq>15<assert_stmt>len(net.line)<eq>15<assert_stmt>len(net.gen)<eq>0<assert_stmt>len(net.sgen)<eq>9<assert_stmt>len(net.shunt)<eq>0<assert_stmt>len(net.trafo)<eq>2<assert_stmt>len(net.load)<eq>18<assert_stmt>len(net.ext_grid)<eq>1<assert_stmt>len(net.switch)<eq>8<assert_stmt>net.converged<line_sep>net=pn.create_cigre_network_mv(with_der="all")<line_sep>pp.runpp(net)<line_sep>all_vn_kv=pd.Series([110 20])<assert_stmt>net.bus.vn_kv.isin(all_vn_kv).all()<assert_stmt>len(net.bus)<eq>15<assert_stmt>len(net.line)<eq>15<assert_stmt>len(net.gen)<eq>0<assert_stmt>len(net.sgen)<eq>13<assert_stmt>len(net.storage)<eq>2<assert_stmt>len(net.shunt)<eq>0<assert_stmt>len(net.trafo)<eq>2<assert_stmt>len(net.load)<eq>18<assert_stmt>len(net.ext_grid)<eq>1<assert_stmt>len(net.switch)<eq>8<assert_stmt>net.converged<block_end><def_stmt>test_cigre_lv <block_start>net=pn.create_cigre_network_lv()<line_sep>pp.runpp(net)<line_sep>all_vn_kv=pd.Series([20 0.4])<assert_stmt>net.bus.vn_kv.isin(all_vn_kv).all()<assert_stmt>len(net.bus)<eq>44<assert_stmt>len(net.line)<eq>37<assert_stmt>len(net.gen)<eq>0<assert_stmt>len(net.sgen)<eq>0<assert_stmt>len(net.shunt)<eq>0<assert_stmt>len(net.trafo)<eq>3<assert_stmt>len(net.load)<eq>15<assert_stmt>len(net.ext_grid)<eq>1<assert_stmt>len(net.switch)<eq>3<assert_stmt>net.converged<block_end><if_stmt>__name__<eq>'__main__'<block_start>pytest.main(['-x' "test_cigre_networks.py"])<block_end>
<import_stmt>threading<import_stmt>re<import_stmt>time<import_stmt>functools<import_stmt>sublime<import_stmt>sublime_plugin<import_from_stmt>..show_error show_error<import_from_stmt>..package_manager PackageManager<import_from_stmt>..package_disabler PackageDisabler<import_from_stmt>..thread_progress ThreadProgress<try_stmt><block_start>str_cls=unicode<line_sep>bytes_cls=str<block_end><except_stmt>(NameError)<block_start>str_cls=str<line_sep>bytes_cls=bytes<block_end><class_stmt>AdvancedInstallPackageCommand(sublime_plugin.WindowCommand)<block_start>""" A command that accepts a comma-separated list of packages to install, or prompts the user to paste a comma-separated list """<def_stmt>run self packages=<none><block_start>is_str=isinstance(packages str_cls)<line_sep>is_bytes=isinstance(packages bytes_cls)<if_stmt>packages<and>(is_str<or>is_bytes)<block_start>packages=self.split(packages)<block_end><if_stmt>packages<and>isinstance(packages list)<block_start><return>self.start(packages)<block_end>self.window.show_input_panel('Packages to Install (Comma-separated)' '' self.on_done <none> <none>)<block_end><def_stmt>split self packages<block_start><if_stmt>isinstance(packages bytes_cls)<block_start>packages=packages.decode('utf-8')<block_end><return>re.split(r'\s*,\s*' packages)<block_end><def_stmt>on_done self input<block_start>""" Input panel handler - adds the provided URL as a repository :param input: A string of the URL to the new repository """<line_sep>input=input.strip()<if_stmt><not>input<block_start>show_error(u''' No package names were entered ''')<line_sep><return><block_end>self.start(self.split(input))<block_end><def_stmt>start self packages<block_start>thread=AdvancedInstallPackageThread(packages)<line_sep>thread.start()<line_sep>message='Installing package'<if_stmt>len(packages)<g>1<block_start>message<augadd>'s'<block_end>ThreadProgress(thread message '')<block_end><block_end><class_stmt>AdvancedInstallPackageThread(threading.Thread PackageDisabler)<block_start>""" A thread to run the installation of one or more packages in """<def_stmt>__init__ self packages<block_start>""" :param window: An instance of :class:`sublime.Window` that represents the Sublime Text window to show the available package list in. """<line_sep>self.manager=PackageManager()<line_sep>self.packages=packages<line_sep>self.installed=self.manager.list_packages()<line_sep>self.disabled=[]<for_stmt>package_name packages<block_start>operation_type='install'<if>package_name<not><in>self.installed<else>'upgrade'<line_sep>self.disabled.extend(self.disable_packages(package_name operation_type))<block_end>threading.Thread.__init__(self)<block_end><def_stmt>run self# Allow packages to properly disable <block_start>time.sleep(0.7)<def_stmt>do_reenable_package package_name<block_start>operation_type='install'<if>package_name<not><in>self.installed<else>'upgrade'<line_sep>self.reenable_package(package_name operation_type)<block_end><for_stmt>package self.packages<block_start>result=self.manager.install_package(package)<line_sep># Do not reenable if installation deferred until next restart <if_stmt>result<is><not><none><and>package<in>self.disabled# We use a functools.partial to generate the on-complete callback in # order to bind the current value of the parameters, unlike lambdas. <block_start>sublime.set_timeout(functools.partial(do_reenable_package package) 700)<block_end><block_end><block_end><block_end>
#---------------------------------------------------------------------- # Copyright (c) 1999-2001, Digital Creations, Fredericksburg, VA, USA # and <NAME>. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # o Redistributions of source code must retain the above copyright # notice, this list of conditions, and the disclaimer that follows. # # o Redistributions in binary form must reproduce the above copyright # notice, this list of conditions, and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # o Neither the name of Digital Creations nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY DIGITAL CREATIONS AND CONTRIBUTORS *AS # IS* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DIGITAL # CREATIONS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS # OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR # TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH # DAMAGE. #---------------------------------------------------------------------- """Support for Berkeley DB 4.0 through 4.7 with a simple interface. For the full featured object oriented interface use the bsddb.db module instead. It mirrors the Oracle Berkeley DB C API. """<import_stmt>sys<line_sep>absolute_import=(sys.version_info[0]<ge>3)<if_stmt>sys.py3kwarning<block_start><import_stmt>warnings<line_sep>warnings.warnpy3k("in 3.x, bsddb has been removed; "<concat>"please use the pybsddb project instead" DeprecationWarning 2)<block_end><try_stmt><block_start><if_stmt>__name__<eq>'bsddb3'# import _pybsddb binary as it should be the more recent version from # a standalone pybsddb addon package than the version included with # python as bsddb._bsddb. <block_start><if_stmt>absolute_import# Because this syntaxis is not valid before Python 2.5 <block_start>exec("from . import _pybsddb")<block_end><else_stmt><block_start><import_stmt>_pybsddb<block_end>_bsddb=_pybsddb<import_from_stmt>bsddb3.dbutils DeadlockWrap<as>_DeadlockWrap<block_end><else_stmt><block_start><import_stmt>_bsddb<import_from_stmt>bsddb.dbutils DeadlockWrap<as>_DeadlockWrap<block_end><block_end><except_stmt>ImportError# Remove ourselves from sys.modules <block_start><import_stmt>sys<del_stmt>sys.modules[__name__]<line_sep><raise><block_end># bsddb3 calls it db, but provide _db for backwards compatibility db=_db=_bsddb<line_sep>__version__=db.__version__<line_sep>error=db.DBError# So bsddb.error will mean something... #---------------------------------------------------------------------- <import_stmt>sys os<import_from_stmt>weakref ref<if_stmt>sys.version_info[0:2]<le>(2 5)<block_start><import_stmt>UserDict<line_sep>MutableMapping=UserDict.DictMixin<block_end><else_stmt><block_start><import_stmt>collections<line_sep>MutableMapping=collections.MutableMapping<block_end><class_stmt>_iter_mixin(MutableMapping)<block_start><def_stmt>_make_iter_cursor self<block_start>cur=_DeadlockWrap(self.db.cursor)<line_sep>key=id(cur)<line_sep>self._cursor_refs[key]=ref(cur self._gen_cref_cleaner(key))<line_sep><return>cur<block_end><def_stmt>_gen_cref_cleaner self key# use generate the function for the weakref callback here # to ensure that we do not hold a strict reference to cur # in the callback. <block_start><return><lambda>ref:self._cursor_refs.pop(key <none>)<block_end><def_stmt>__iter__ self<block_start>self._kill_iteration=<false><line_sep>self._in_iter<augadd>1<try_stmt><block_start><try_stmt><block_start>cur=self._make_iter_cursor()<line_sep># FIXME-20031102-greg: race condition. cursor could # be closed by another thread before this call. # since we're only returning keys, we call the cursor # methods with flags=0, dlen=0, dofs=0 key=_DeadlockWrap(cur.first 0 0 0)[0]<line_sep><yield>key<line_sep>next=getattr(cur "next")<while_stmt>1<block_start><try_stmt><block_start>key=_DeadlockWrap(next 0 0 0)[0]<line_sep><yield>key<block_end><except_stmt>_bsddb.DBCursorClosedError<block_start><if_stmt>self._kill_iteration<block_start><raise>RuntimeError('Database changed size '<concat>'during iteration.')<block_end>cur=self._make_iter_cursor()<line_sep># FIXME-20031101-greg: race condition. cursor could # be closed by another thread before this call. _DeadlockWrap(cur.set key 0 0 0)<line_sep>next=getattr(cur "next")<block_end><block_end><block_end><except_stmt>_bsddb.DBNotFoundError<block_start><pass><block_end><except_stmt>_bsddb.DBCursorClosedError# the database was modified during iteration. abort. <block_start><pass><block_end><block_end># When Python 2.3 not supported in bsddb3, we can change this to "finally" <except_stmt><block_start>self._in_iter<augsub>1<line_sep><raise><block_end>self._in_iter<augsub>1<block_end><def_stmt>iteritems self<block_start><if_stmt><not>self.db<block_start><return><block_end>self._kill_iteration=<false><line_sep>self._in_iter<augadd>1<try_stmt><block_start><try_stmt><block_start>cur=self._make_iter_cursor()<line_sep># FIXME-20031102-greg: race condition. cursor could # be closed by another thread before this call. kv=_DeadlockWrap(cur.first)<line_sep>key=kv[0]<line_sep><yield>kv<line_sep>next=getattr(cur "next")<while_stmt>1<block_start><try_stmt><block_start>kv=_DeadlockWrap(next)<line_sep>key=kv[0]<line_sep><yield>kv<block_end><except_stmt>_bsddb.DBCursorClosedError<block_start><if_stmt>self._kill_iteration<block_start><raise>RuntimeError('Database changed size '<concat>'during iteration.')<block_end>cur=self._make_iter_cursor()<line_sep># FIXME-20031101-greg: race condition. cursor could # be closed by another thread before this call. _DeadlockWrap(cur.set key 0 0 0)<line_sep>next=getattr(cur "next")<block_end><block_end><block_end><except_stmt>_bsddb.DBNotFoundError<block_start><pass><block_end><except_stmt>_bsddb.DBCursorClosedError# the database was modified during iteration. abort. <block_start><pass><block_end><block_end># When Python 2.3 not supported in bsddb3, we can change this to "finally" <except_stmt><block_start>self._in_iter<augsub>1<line_sep><raise><block_end>self._in_iter<augsub>1<block_end><block_end><class_stmt>_DBWithCursor(_iter_mixin)<block_start>""" A simple wrapper around DB that makes it look like the bsddbobject in the old module. It uses a cursor as needed to provide DB traversal. """<def_stmt>__init__ self db<block_start>self.db=db<line_sep>self.db.set_get_returns_none(0)<line_sep># FIXME-20031101-greg: I believe there is still the potential # for deadlocks in a multithreaded environment if someone # attempts to use the any of the cursor interfaces in one # thread while doing a put or delete in another thread. The # reason is that _checkCursor and _closeCursors are not atomic # operations. Doing our own locking around self.dbc, # self.saved_dbc_key and self._cursor_refs could prevent this. # TODO: A test case demonstrating the problem needs to be written. # self.dbc is a DBCursor object used to implement the # first/next/previous/last/set_location methods. self.dbc=<none><line_sep>self.saved_dbc_key=<none><line_sep># a collection of all DBCursor objects currently allocated # by the _iter_mixin interface. self._cursor_refs={}<line_sep>self._in_iter=0<line_sep>self._kill_iteration=<false><block_end><def_stmt>__del__ self<block_start>self.close()<block_end><def_stmt>_checkCursor self<block_start><if_stmt>self.dbc<is><none><block_start>self.dbc=_DeadlockWrap(self.db.cursor)<if_stmt>self.saved_dbc_key<is><not><none><block_start>_DeadlockWrap(self.dbc.set self.saved_dbc_key)<line_sep>self.saved_dbc_key=<none><block_end><block_end><block_end># This method is needed for all non-cursor DB calls to avoid # Berkeley DB deadlocks (due to being opened with DB_INIT_LOCK # and DB_THREAD to be thread safe) when intermixing database # operations that use the cursor internally with those that don't. <def_stmt>_closeCursors self save=1<block_start><if_stmt>self.dbc<block_start>c=self.dbc<line_sep>self.dbc=<none><if_stmt>save<block_start><try_stmt><block_start>self.saved_dbc_key=_DeadlockWrap(c.current 0 0 0)[0]<block_end><except_stmt>db.DBError<block_start><pass><block_end><block_end>_DeadlockWrap(c.close)<del_stmt>c<block_end><for_stmt>cref self._cursor_refs.values()<block_start>c=cref()<if_stmt>c<is><not><none><block_start>_DeadlockWrap(c.close)<block_end><block_end><block_end><def_stmt>_checkOpen self<block_start><if_stmt>self.db<is><none><block_start><raise>error "BSDDB object has already been closed"<block_end><block_end><def_stmt>isOpen self<block_start><return>self.db<is><not><none><block_end><def_stmt>__len__ self<block_start>self._checkOpen()<line_sep><return>_DeadlockWrap(<lambda>:len(self.db))<block_end># len(self.db) <if_stmt>sys.version_info[0:2]<ge>(2 6)<block_start><def_stmt>__repr__ self<block_start><if_stmt>self.isOpen()<block_start><return>repr(dict(_DeadlockWrap(self.db.items)))<block_end><return>repr(dict())<block_end><block_end><def_stmt>__getitem__ self key<block_start>self._checkOpen()<line_sep><return>_DeadlockWrap(<lambda>:self.db[key])<block_end># self.db[key] <def_stmt>__setitem__ self key value<block_start>self._checkOpen()<line_sep>self._closeCursors()<if_stmt>self._in_iter<and>key<not><in>self<block_start>self._kill_iteration=<true><block_end><def_stmt>wrapF <block_start>self.db[key]=value<block_end>_DeadlockWrap(wrapF)<block_end># self.db[key] = value <def_stmt>__delitem__ self key<block_start>self._checkOpen()<line_sep>self._closeCursors()<if_stmt>self._in_iter<and>key<in>self<block_start>self._kill_iteration=<true><block_end><def_stmt>wrapF <block_start><del_stmt>self.db[key]<block_end>_DeadlockWrap(wrapF)<block_end># del self.db[key] <def_stmt>close self<block_start>self._closeCursors(save=0)<if_stmt>self.dbc<is><not><none><block_start>_DeadlockWrap(self.dbc.close)<block_end>v=0<if_stmt>self.db<is><not><none><block_start>v=_DeadlockWrap(self.db.close)<block_end>self.dbc=<none><line_sep>self.db=<none><line_sep><return>v<block_end><def_stmt>keys self<block_start>self._checkOpen()<line_sep><return>_DeadlockWrap(self.db.keys)<block_end><def_stmt>has_key self key<block_start>self._checkOpen()<line_sep><return>_DeadlockWrap(self.db.has_key key)<block_end><def_stmt>set_location self key<block_start>self._checkOpen()<line_sep>self._checkCursor()<line_sep><return>_DeadlockWrap(self.dbc.set_range key)<block_end><def_stmt>next self# Renamed by "2to3" <block_start>self._checkOpen()<line_sep>self._checkCursor()<line_sep>rv=_DeadlockWrap(getattr(self.dbc "next"))<line_sep><return>rv<block_end><if_stmt>sys.version_info[0]<ge>3# For "2to3" conversion <block_start>next=__next__<block_end><def_stmt>previous self<block_start>self._checkOpen()<line_sep>self._checkCursor()<line_sep>rv=_DeadlockWrap(self.dbc.prev)<line_sep><return>rv<block_end><def_stmt>first self<block_start>self._checkOpen()<line_sep># fix 1725856: don't needlessly try to restore our cursor position self.saved_dbc_key=<none><line_sep>self._checkCursor()<line_sep>rv=_DeadlockWrap(self.dbc.first)<line_sep><return>rv<block_end><def_stmt>last self<block_start>self._checkOpen()<line_sep># fix 1725856: don't needlessly try to restore our cursor position self.saved_dbc_key=<none><line_sep>self._checkCursor()<line_sep>rv=_DeadlockWrap(self.dbc.last)<line_sep><return>rv<block_end><def_stmt>sync self<block_start>self._checkOpen()<line_sep><return>_DeadlockWrap(self.db.sync)<block_end><block_end>#---------------------------------------------------------------------- # Compatibility object factory functions <def_stmt>hashopen file flag='c' mode=0666 pgsize=<none> ffactor=<none> nelem=<none> cachesize=<none> lorder=<none> hflags=0<block_start>flags=_checkflag(flag file)<line_sep>e=_openDBEnv(cachesize)<line_sep>d=db.DB(e)<line_sep>d.set_flags(hflags)<if_stmt>pgsize<is><not><none><block_start>d.set_pagesize(pgsize)<block_end><if_stmt>lorder<is><not><none><block_start>d.set_lorder(lorder)<block_end><if_stmt>ffactor<is><not><none><block_start>d.set_h_ffactor(ffactor)<block_end><if_stmt>nelem<is><not><none><block_start>d.set_h_nelem(nelem)<block_end>d.open(file db.DB_HASH flags mode)<line_sep><return>_DBWithCursor(d)<block_end>#---------------------------------------------------------------------- <def_stmt>btopen file flag='c' mode=0666 btflags=0 cachesize=<none> maxkeypage=<none> minkeypage=<none> pgsize=<none> lorder=<none><block_start>flags=_checkflag(flag file)<line_sep>e=_openDBEnv(cachesize)<line_sep>d=db.DB(e)<if_stmt>pgsize<is><not><none><block_start>d.set_pagesize(pgsize)<block_end><if_stmt>lorder<is><not><none><block_start>d.set_lorder(lorder)<block_end>d.set_flags(btflags)<if_stmt>minkeypage<is><not><none><block_start>d.set_bt_minkey(minkeypage)<block_end><if_stmt>maxkeypage<is><not><none><block_start>d.set_bt_maxkey(maxkeypage)<block_end>d.open(file db.DB_BTREE flags mode)<line_sep><return>_DBWithCursor(d)<block_end>#---------------------------------------------------------------------- <def_stmt>rnopen file flag='c' mode=0666 rnflags=0 cachesize=<none> pgsize=<none> lorder=<none> rlen=<none> delim=<none> source=<none> pad=<none><block_start>flags=_checkflag(flag file)<line_sep>e=_openDBEnv(cachesize)<line_sep>d=db.DB(e)<if_stmt>pgsize<is><not><none><block_start>d.set_pagesize(pgsize)<block_end><if_stmt>lorder<is><not><none><block_start>d.set_lorder(lorder)<block_end>d.set_flags(rnflags)<if_stmt>delim<is><not><none><block_start>d.set_re_delim(delim)<block_end><if_stmt>rlen<is><not><none><block_start>d.set_re_len(rlen)<block_end><if_stmt>source<is><not><none><block_start>d.set_re_source(source)<block_end><if_stmt>pad<is><not><none><block_start>d.set_re_pad(pad)<block_end>d.open(file db.DB_RECNO flags mode)<line_sep><return>_DBWithCursor(d)<block_end>#---------------------------------------------------------------------- <def_stmt>_openDBEnv cachesize<block_start>e=db.DBEnv()<if_stmt>cachesize<is><not><none><block_start><if_stmt>cachesize<ge>20480<block_start>e.set_cachesize(0 cachesize)<block_end><else_stmt><block_start><raise>error "cachesize must be >= 20480"<block_end><block_end>e.set_lk_detect(db.DB_LOCK_DEFAULT)<line_sep>e.open('.' db.DB_PRIVATE|db.DB_CREATE|db.DB_THREAD|db.DB_INIT_LOCK|db.DB_INIT_MPOOL)<line_sep><return>e<block_end><def_stmt>_checkflag flag file<block_start><if_stmt>flag<eq>'r'<block_start>flags=db.DB_RDONLY<block_end><elif_stmt>flag<eq>'rw'<block_start>flags=0<block_end><elif_stmt>flag<eq>'w'<block_start>flags=db.DB_CREATE<block_end><elif_stmt>flag<eq>'c'<block_start>flags=db.DB_CREATE<block_end><elif_stmt>flag<eq>'n'<block_start>flags=db.DB_CREATE<line_sep>#flags = db.DB_CREATE | db.DB_TRUNCATE # we used db.DB_TRUNCATE flag for this before but Berkeley DB # 4.2.52 changed to disallowed truncate with txn environments. <if_stmt>file<is><not><none><and>os.path.isfile(file)<block_start>os.unlink(file)<block_end><block_end><else_stmt><block_start><raise>error "flags should be one of 'r', 'w', 'c' or 'n'"<block_end><return>flags|db.DB_THREAD<block_end>#---------------------------------------------------------------------- # This is a silly little hack that allows apps to continue to use the # DB_THREAD flag even on systems without threads without freaking out # Berkeley DB. # # This assumes that if Python was built with thread support then # Berkeley DB was too. <try_stmt><block_start><import_stmt>thread<del_stmt>thread<block_end><except_stmt>ImportError<block_start>db.DB_THREAD=0<block_end>#----------------------------------------------------------------------
"""Models for OAuth2 applications."""<import_from_future_stmt> unicode_literals<import_from_stmt>django.contrib.auth.models User<import_from_stmt>django.db models<import_from_stmt>django.utils.translation ugettext_lazy<as>_<import_from_stmt>djblets.db.fields JSONField<import_from_stmt>oauth2_provider.models AbstractApplication<import_from_stmt>reviewboard.site.models LocalSite<class_stmt>Application(AbstractApplication)<block_start>"""An OAuth2 application. This model is specialized so that it can be limited to a :py:class:`~reviewboard.site.models.LocalSite`. """<line_sep>enabled=models.BooleanField(verbose_name=_('Enabled') help_text=_('Whether or not this application can be used to '<concat>'authenticate with Review Board.') default=<true> )<line_sep>original_user=models.ForeignKey(verbose_name=_('Original User') to=User blank=<true> null=<true> help_text=_('The original owner of this application.'))<line_sep>local_site=models.ForeignKey(verbose_name=_('Local Site') to=LocalSite related_name='oauth_applications' blank=<true> null=<true> help_text=_('An optional Local Site to limit this application to.<br>'<concat>'If specified, only users with access to the Local Site '<concat>'will be able to use the application.') )<line_sep>extra_data=JSONField(_('Extra Data') null=<true> default=dict )<line_sep>@property<def_stmt>is_disabled_for_security self<block_start>"""Whether or not this application is disabled for security reasons. This will be ``True`` when the :py:attr:`original_owner` no longer has access to the :py:attr:`local_site` this application is associated with. """<line_sep><return><not>self.enabled<and>self.original_user_id<is><not><none><block_end><def_stmt>clean self<block_start>"""Validate the application. We do the validation for this in :py:meth:`ApplicationForm.clean() <reviewboard.oauth.forms.ApplicationForm.clean` so that we can have errors for ``authorization_grant_type`` and ``redirect_uris`` conflicts show up on the appropriate field. The parent class does the same validation, but as a result it will have form-wide errors instead of per-field errors for the above two fields when they are in conflict. Therefore we avoid that validation by making this a no-op. """<line_sep><pass><block_end><def_stmt>is_accessible_by self user local_site=<none><block_start>"""Return whether or not the user has access to this Application. A user has access if one of the following conditions is met: * The user owns the Application. * The user is an administrator. * The user is a Local Site administrator on the Local Site the Application is assigned to. Args: user (django.contrib.auth.models.User): The user in question. local_site (reviewboard.site.models.LocalSite): The Local Site the user would access this Application under. Returns: bool: Whether or not the given user has access to information about this Application. """<line_sep><return>(user.is_authenticated()<and>(self.user_id<eq>user.pk<or>user.is_superuser<or>(self.local_site_id<is><not><none><and>local_site<is><not><none><and>self.local_site_id<eq>local_site.pk<and>local_site.is_mutable_by(user))))<block_end><def_stmt>is_mutable_by self user local_site=<none><block_start>"""Return whether or not the user can modify this Application. A user has access if one of the following conditions is met: * The user owns the Application. * The user is an administrator. * The user is a Local Site administrator on the Local Site the Application is assigned to. Args: user (django.contrib.auth.models.User): The user in question. local_site (reviewboard.site.models.LocalSite): The Local Site the user would modify this Application under. Returns: bool: Whether or not the given user can modify this Application. """<line_sep><return>self.is_accessible_by(user local_site=local_site)<block_end><class_stmt>Meta<block_start>db_table='reviewboard_oauth_application'<line_sep>verbose_name=_('OAuth Application')<line_sep>verbose_name_plural=_('OAuth Applications')<block_end><block_end>
# -*- coding: utf-8 -*- <import_from_stmt>datetime datetime<import_from_stmt>StringIO StringIO<import_stmt>unittest<import_from_stmt>xml.dom minidom<import_from_stmt>django.conf settings<import_from_stmt>django.core serializers<import_from_stmt>django.db transaction<import_from_stmt>django.test TestCase TransactionTestCase Approximate<import_from_stmt>django.utils simplejson<import_from_stmt>models Category Author Article AuthorProfile Actor Movie Score Player Team<class_stmt>SerializerRegistrationTests(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.old_SERIALIZATION_MODULES=getattr(settings 'SERIALIZATION_MODULES' <none>)<line_sep>self.old_serializers=serializers._serializers<line_sep>serializers._serializers={}<line_sep>settings.SERIALIZATION_MODULES={"json2":"django.core.serializers.json" }<block_end><def_stmt>tearDown self<block_start>serializers._serializers=self.old_serializers<if_stmt>self.old_SERIALIZATION_MODULES<block_start>settings.SERIALIZATION_MODULES=self.old_SERIALIZATION_MODULES<block_end><else_stmt><block_start>delattr(settings 'SERIALIZATION_MODULES')<block_end><block_end><def_stmt>test_register self<block_start>"Registering a new serializer populates the full registry. Refs #14823"<line_sep>serializers.register_serializer('json3' 'django.core.serializers.json')<line_sep>public_formats=serializers.get_public_serializer_formats()<line_sep>self.assertTrue('json3'<in>public_formats)<line_sep>self.assertTrue('json2'<in>public_formats)<line_sep>self.assertTrue('xml'<in>public_formats)<block_end><def_stmt>test_unregister self<block_start>"Unregistering a serializer doesn't cause the registry to be repopulated. Refs #14823"<line_sep>serializers.unregister_serializer('xml')<line_sep>serializers.register_serializer('json3' 'django.core.serializers.json')<line_sep>public_formats=serializers.get_public_serializer_formats()<line_sep>self.assertFalse('xml'<in>public_formats)<line_sep>self.assertTrue('json3'<in>public_formats)<block_end><def_stmt>test_builtin_serializers self<block_start>"Requesting a list of serializer formats popuates the registry"<line_sep>all_formats=set(serializers.get_serializer_formats())<line_sep>public_formats=set(serializers.get_public_serializer_formats())<line_sep>self.assertTrue('xml'<in>all_formats) <line_sep>self.assertTrue('xml'<in>public_formats)<line_sep>self.assertTrue('json2'<in>all_formats)<line_sep>self.assertTrue('json2'<in>public_formats)<line_sep>self.assertTrue('python'<in>all_formats)<line_sep>self.assertFalse('python'<in>public_formats)<block_end><block_end><class_stmt>SerializersTestBase(object)<block_start>@staticmethod<def_stmt>_comparison_value value<block_start><return>value<block_end><def_stmt>setUp self<block_start>sports=Category.objects.create(name="Sports")<line_sep>music=Category.objects.create(name="Music")<line_sep>op_ed=Category.objects.create(name="Op-Ed")<line_sep>self.joe=Author.objects.create(name="Joe")<line_sep>self.jane=Author.objects.create(name="Jane")<line_sep>self.a1=Article(author=self.jane headline="Poker has no place on ESPN" pub_date=datetime(2006 6 16 11 00))<line_sep>self.a1.save()<line_sep>self.a1.categories=[sports op_ed]<line_sep>self.a2=Article(author=self.joe headline="Time to reform copyright" pub_date=datetime(2006 6 16 13 00 11 345))<line_sep>self.a2.save()<line_sep>self.a2.categories=[music op_ed]<block_end><def_stmt>test_serialize self<block_start>"""Tests that basic serialization works."""<line_sep>serial_str=serializers.serialize(self.serializer_name Article.objects.all())<line_sep>self.assertTrue(self._validate_output(serial_str))<block_end><def_stmt>test_serializer_roundtrip self<block_start>"""Tests that serialized content can be deserialized."""<line_sep>serial_str=serializers.serialize(self.serializer_name Article.objects.all())<line_sep>models=list(serializers.deserialize(self.serializer_name serial_str))<line_sep>self.assertEqual(len(models) 2)<block_end><def_stmt>test_altering_serialized_output self<block_start>""" Tests the ability to create new objects by modifying serialized content. """<line_sep>old_headline="Poker has no place on ESPN"<line_sep>new_headline="Poker has no place on television"<line_sep>serial_str=serializers.serialize(self.serializer_name Article.objects.all())<line_sep>serial_str=serial_str.replace(old_headline new_headline)<line_sep>models=list(serializers.deserialize(self.serializer_name serial_str))<line_sep># Prior to saving, old headline is in place self.assertTrue(Article.objects.filter(headline=old_headline))<line_sep>self.assertFalse(Article.objects.filter(headline=new_headline))<for_stmt>model models<block_start>model.save()<block_end># After saving, new headline is in place self.assertTrue(Article.objects.filter(headline=new_headline))<line_sep>self.assertFalse(Article.objects.filter(headline=old_headline))<block_end><def_stmt>test_one_to_one_as_pk self<block_start>""" Tests that if you use your own primary key field (such as a OneToOneField), it doesn't appear in the serialized field list - it replaces the pk identifier. """<line_sep>profile=AuthorProfile(author=self.joe date_of_birth=datetime(1970 1 1))<line_sep>profile.save()<line_sep>serial_str=serializers.serialize(self.serializer_name AuthorProfile.objects.all())<line_sep>self.assertFalse(self._get_field_values(serial_str 'author'))<for_stmt>obj serializers.deserialize(self.serializer_name serial_str)<block_start>self.assertEqual(obj.object.pk self._comparison_value(self.joe.pk))<block_end><block_end><def_stmt>test_serialize_field_subset self<block_start>"""Tests that output can be restricted to a subset of fields"""<line_sep>valid_fields=('headline' 'pub_date')<line_sep>invalid_fields=("author" "categories")<line_sep>serial_str=serializers.serialize(self.serializer_name Article.objects.all() fields=valid_fields)<for_stmt>field_name invalid_fields<block_start>self.assertFalse(self._get_field_values(serial_str field_name))<block_end><for_stmt>field_name valid_fields<block_start>self.assertTrue(self._get_field_values(serial_str field_name))<block_end><block_end><def_stmt>test_serialize_unicode self<block_start>"""Tests that unicode makes the roundtrip intact"""<line_sep>actor_name=u"Za\u017c\u00f3\u0142\u0107"<line_sep>movie_title=u'G\u0119\u015bl\u0105 ja\u017a\u0144'<line_sep>ac=Actor(name=actor_name)<line_sep>mv=Movie(title=movie_title actor=ac)<line_sep>ac.save()<line_sep>mv.save()<line_sep>serial_str=serializers.serialize(self.serializer_name [mv])<line_sep>self.assertEqual(self._get_field_values(serial_str "title")[0] movie_title)<line_sep>self.assertEqual(self._get_field_values(serial_str "actor")[0] actor_name)<line_sep>obj_list=list(serializers.deserialize(self.serializer_name serial_str))<line_sep>mv_obj=obj_list[0].object<line_sep>self.assertEqual(mv_obj.title movie_title)<block_end><def_stmt>test_serialize_with_null_pk self<block_start>""" Tests that serialized data with no primary key results in a model instance with no id """<line_sep>category=Category(name="Reference")<line_sep>serial_str=serializers.serialize(self.serializer_name [category])<line_sep>pk_value=self._get_pk_values(serial_str)[0]<line_sep>self.assertFalse(pk_value)<line_sep>cat_obj=list(serializers.deserialize(self.serializer_name serial_str))[0].object<line_sep>self.assertEqual(cat_obj.id <none>)<block_end><def_stmt>test_float_serialization self<block_start>"""Tests that float values serialize and deserialize intact"""<line_sep>sc=Score(score=3.4)<line_sep>sc.save()<line_sep>serial_str=serializers.serialize(self.serializer_name [sc])<line_sep>deserial_objs=list(serializers.deserialize(self.serializer_name serial_str))<line_sep>self.assertEqual(deserial_objs[0].object.score Approximate(3.4 places=1))<block_end><def_stmt>test_custom_field_serialization self<block_start>"""Tests that custom fields serialize and deserialize intact"""<line_sep>team_str="<NAME>"<line_sep>player=Player()<line_sep>player.name="<NAME>"<line_sep>player.rank=1<line_sep>player.team=Team(team_str)<line_sep>player.save()<line_sep>serial_str=serializers.serialize(self.serializer_name Player.objects.all())<line_sep>team=self._get_field_values(serial_str "team")<line_sep>self.assertTrue(team)<line_sep>self.assertEqual(team[0] team_str)<line_sep>deserial_objs=list(serializers.deserialize(self.serializer_name serial_str))<line_sep>self.assertEqual(deserial_objs[0].object.team.to_string() player.team.to_string())<block_end><def_stmt>test_pre_1000ad_date self<block_start>"""Tests that year values before 1000AD are properly formatted"""<line_sep># Regression for #12524 -- dates before 1000AD get prefixed # 0's on the year a=Article.objects.create(author=self.jane headline="Nobody remembers the early years" pub_date=datetime(1 2 3 4 5 6))<line_sep>serial_str=serializers.serialize(self.serializer_name [a])<line_sep>date_values=self._get_field_values(serial_str "pub_date")<line_sep>self.assertEquals(date_values[0] "0001-02-03 04:05:06")<block_end><def_stmt>test_pkless_serialized_strings self<block_start>""" Tests that serialized strings without PKs can be turned into models """<line_sep>deserial_objs=list(serializers.deserialize(self.serializer_name self.pkless_str))<for_stmt>obj deserial_objs<block_start>self.assertFalse(obj.object.id)<line_sep>obj.save()<block_end>self.assertEqual(Category.objects.all().count() 4)<block_end><block_end><class_stmt>SerializersTransactionTestBase(object)<block_start><def_stmt>test_forward_refs self<block_start>""" Tests that objects ids can be referenced before they are defined in the serialization data. """<line_sep># The deserialization process needs to be contained # within a transaction in order to test forward reference # handling. transaction.enter_transaction_management()<line_sep>transaction.managed(<true>)<line_sep>objs=serializers.deserialize(self.serializer_name self.fwd_ref_str)<for_stmt>obj objs<block_start>obj.save()<block_end>transaction.commit()<line_sep>transaction.leave_transaction_management()<for_stmt>model_cls (Category Author Article)<block_start>self.assertEqual(model_cls.objects.all().count() 1)<block_end>art_obj=Article.objects.all()[0]<line_sep>self.assertEqual(art_obj.categories.all().count() 1)<line_sep>self.assertEqual(art_obj.author.name "Agnes")<block_end><block_end><class_stmt>XmlSerializerTestCase(SerializersTestBase TestCase)<block_start>serializer_name="xml"<line_sep>pkless_str="""<?xml version="1.0" encoding="utf-8"?> <django-objects version="1.0"> <object model="serializers.category"> <field type="CharField" name="name">Reference</field> </object> </django-objects>"""<line_sep>@staticmethod<def_stmt>_comparison_value value# The XML serializer handles everything as strings, so comparisons # need to be performed on the stringified value <block_start><return>unicode(value)<block_end>@staticmethod<def_stmt>_validate_output serial_str<block_start><try_stmt><block_start>minidom.parseString(serial_str)<block_end><except_stmt>Exception<block_start><return><false><block_end><else_stmt><block_start><return><true><block_end><block_end>@staticmethod<def_stmt>_get_pk_values serial_str<block_start>ret_list=[]<line_sep>dom=minidom.parseString(serial_str)<line_sep>fields=dom.getElementsByTagName("object")<for_stmt>field fields<block_start>ret_list.append(field.getAttribute("pk"))<block_end><return>ret_list<block_end>@staticmethod<def_stmt>_get_field_values serial_str field_name<block_start>ret_list=[]<line_sep>dom=minidom.parseString(serial_str)<line_sep>fields=dom.getElementsByTagName("field")<for_stmt>field fields<block_start><if_stmt>field.getAttribute("name")<eq>field_name<block_start>temp=[]<for_stmt>child field.childNodes<block_start>temp.append(child.nodeValue)<block_end>ret_list.append("".join(temp))<block_end><block_end><return>ret_list<block_end><block_end><class_stmt>XmlSerializerTransactionTestCase(SerializersTransactionTestBase TransactionTestCase)<block_start>serializer_name="xml"<line_sep>fwd_ref_str="""<?xml version="1.0" encoding="utf-8"?> <django-objects version="1.0"> <object pk="1" model="serializers.article"> <field to="serializers.author" name="author" rel="ManyToOneRel">1</field> <field type="CharField" name="headline">Forward references pose no problem</field> <field type="DateTimeField" name="pub_date">2006-06-16 15:00:00</field> <field to="serializers.category" name="categories" rel="ManyToManyRel"> <object pk="1"></object> </field> </object> <object pk="1" model="serializers.author"> <field type="CharField" name="name">Agnes</field> </object> <object pk="1" model="serializers.category"> <field type="CharField" name="name">Reference</field></object> </django-objects>"""<block_end><class_stmt>JsonSerializerTestCase(SerializersTestBase TestCase)<block_start>serializer_name="json"<line_sep>pkless_str="""[{"pk": null, "model": "serializers.category", "fields": {"name": "Reference"}}]"""<line_sep>@staticmethod<def_stmt>_validate_output serial_str<block_start><try_stmt><block_start>simplejson.loads(serial_str)<block_end><except_stmt>Exception<block_start><return><false><block_end><else_stmt><block_start><return><true><block_end><block_end>@staticmethod<def_stmt>_get_pk_values serial_str<block_start>ret_list=[]<line_sep>serial_list=simplejson.loads(serial_str)<for_stmt>obj_dict serial_list<block_start>ret_list.append(obj_dict["pk"])<block_end><return>ret_list<block_end>@staticmethod<def_stmt>_get_field_values serial_str field_name<block_start>ret_list=[]<line_sep>serial_list=simplejson.loads(serial_str)<for_stmt>obj_dict serial_list<block_start><if_stmt>field_name<in>obj_dict["fields"]<block_start>ret_list.append(obj_dict["fields"][field_name])<block_end><block_end><return>ret_list<block_end><block_end><class_stmt>JsonSerializerTransactionTestCase(SerializersTransactionTestBase TransactionTestCase)<block_start>serializer_name="json"<line_sep>fwd_ref_str="""[ { "pk": 1, "model": "serializers.article", "fields": { "headline": "Forward references pose no problem", "pub_date": "2006-06-16 15:00:00", "categories": [1], "author": 1 } }, { "pk": 1, "model": "serializers.category", "fields": { "name": "Reference" } }, { "pk": 1, "model": "serializers.author", "fields": { "name": "Agnes" } }]"""<block_end><try_stmt><block_start><import_stmt>yaml<block_end><except_stmt>ImportError<block_start><pass><block_end><else_stmt><block_start><class_stmt>YamlSerializerTestCase(SerializersTestBase TestCase)<block_start>serializer_name="yaml"<line_sep>fwd_ref_str="""- fields: headline: Forward references pose no problem pub_date: 2006-06-16 15:00:00 categories: [1] author: 1 pk: 1 model: serializers.article - fields: name: Reference pk: 1 model: serializers.category - fields: name: Agnes pk: 1 model: serializers.author"""<line_sep>pkless_str="""- fields: name: Reference pk: null model: serializers.category"""<line_sep>@staticmethod<def_stmt>_validate_output serial_str<block_start><try_stmt><block_start>yaml.load(StringIO(serial_str))<block_end><except_stmt>Exception<block_start><return><false><block_end><else_stmt><block_start><return><true><block_end><block_end>@staticmethod<def_stmt>_get_pk_values serial_str<block_start>ret_list=[]<line_sep>stream=StringIO(serial_str)<for_stmt>obj_dict yaml.load(stream)<block_start>ret_list.append(obj_dict["pk"])<block_end><return>ret_list<block_end>@staticmethod<def_stmt>_get_field_values serial_str field_name<block_start>ret_list=[]<line_sep>stream=StringIO(serial_str)<for_stmt>obj_dict yaml.load(stream)<block_start><if_stmt>"fields"<in>obj_dict<and>field_name<in>obj_dict["fields"]<block_start>field_value=obj_dict["fields"][field_name]<line_sep># yaml.load will return non-string objects for some # of the fields we are interested in, this ensures that # everything comes back as a string <if_stmt>isinstance(field_value basestring)<block_start>ret_list.append(field_value)<block_end><else_stmt><block_start>ret_list.append(str(field_value))<block_end><block_end><block_end><return>ret_list<block_end><block_end><class_stmt>YamlSerializerTransactionTestCase(SerializersTransactionTestBase TransactionTestCase)<block_start>serializer_name="yaml"<line_sep>fwd_ref_str="""- fields: headline: Forward references pose no problem pub_date: 2006-06-16 15:00:00 categories: [1] author: 1 pk: 1 model: serializers.article - fields: name: Reference pk: 1 model: serializers.category - fields: name: Agnes pk: 1 model: serializers.author"""<block_end><block_end>
<import_stmt>gym<import_from_stmt>gym spaces<import_stmt>numpy<as>np<try_stmt><block_start><import_from_stmt>dm_env specs<block_end><except_stmt>ImportError<block_start>specs=<none><block_end><def_stmt>_convert_spec_to_space spec<block_start><if_stmt>isinstance(spec dict)<block_start><return>spaces.Dict({k:_convert_spec_to_space(v)<for>k,v spec.items()})<block_end><if_stmt>isinstance(spec specs.DiscreteArray)<block_start><return>spaces.Discrete(spec.num_values)<block_end><elif_stmt>isinstance(spec specs.BoundedArray)<block_start><return>spaces.Box(low=np.asscalar(spec.minimum) high=np.asscalar(spec.maximum) shape=spec.shape dtype=spec.dtype)<block_end><elif_stmt>isinstance(spec specs.Array)<block_start><return>spaces.Box(low=-float("inf") high=float("inf") shape=spec.shape dtype=spec.dtype)<block_end><raise>NotImplementedError(("Could not convert `Array` spec of type {} to Gym space. "<concat>"Attempted to convert: {}").format(type(spec) spec))<block_end><class_stmt>DMEnv(gym.Env)<block_start>"""A `gym.Env` wrapper for the `dm_env` API. """<line_sep>metadata={"render.modes":["rgb_array"]}<def_stmt>__init__ self dm_env<block_start>super(DMEnv self).__init__()<line_sep>self._env=dm_env<line_sep>self._prev_obs=<none><if_stmt>specs<is><none><block_start><raise>RuntimeError(("The `specs` module from `dm_env` was not imported. Make sure "<concat>"`dm_env` is installed and visible in the current python "<concat>"environment."))<block_end><block_end><def_stmt>step self action<block_start>ts=self._env.step(action)<line_sep>reward=ts.reward<if_stmt>reward<is><none><block_start>reward=0.<block_end><return>ts.observation reward ts.last() {"discount":ts.discount}<block_end><def_stmt>reset self<block_start>ts=self._env.reset()<line_sep><return>ts.observation<block_end><def_stmt>render self mode="rgb_array"<block_start><if_stmt>self._prev_obs<is><none><block_start><raise>ValueError("Environment not started. Make sure to reset before rendering.")<block_end><if_stmt>mode<eq>"rgb_array"<block_start><return>self._prev_obs<block_end><else_stmt><block_start><raise>NotImplementedError("Render mode '{}' is not supported.".format(mode))<block_end><block_end>@property<def_stmt>action_space self<block_start>spec=self._env.action_spec()<line_sep><return>_convert_spec_to_space(spec)<block_end>@property<def_stmt>observation_space self<block_start>spec=self._env.observation_spec()<line_sep><return>_convert_spec_to_space(spec)<block_end>@property<def_stmt>reward_range self<block_start>spec=self._env.reward_spec()<if_stmt>isinstance(spec specs.BoundedArray)<block_start><return>spec.minimum spec.maximum<block_end><return>-float("inf") float("inf")<block_end><block_end>
<import_stmt>os<line_sep>DATASET_DIRECTORY_PATH="./datasets/"<line_sep>GENERATIVE_WEIGHTS_DIRECTORY_PATH="./generative_weights/"<if_stmt><not>os.path.exists(DATASET_DIRECTORY_PATH)<block_start>os.makedirs(DATASET_DIRECTORY_PATH)<block_end><if_stmt><not>os.path.exists(GENERATIVE_WEIGHTS_DIRECTORY_PATH)<block_start>os.makedirs(GENERATIVE_WEIGHTS_DIRECTORY_PATH)<block_end>
""" categories: Types,bytearray description: Array slice assignment with unsupported RHS cause: Unknown workaround: Unknown """<line_sep>b=bytearray(4)<line_sep>b[0:1]=[1 2]<line_sep>print(b)<line_sep>
""" # -*- coding: utf-8 -*- ----------------------------------------------------------------------------------- # Author: <NAME> # DoC: 2020.08.17 # email: <EMAIL> ----------------------------------------------------------------------------------- # Description: The configurations of the project will be defined here """<import_stmt>os<import_stmt>argparse<import_stmt>torch<import_from_stmt>easydict EasyDict<as>edict<def_stmt>parse_train_configs <block_start>parser=argparse.ArgumentParser(description='The Implementation using PyTorch')<line_sep>parser.add_argument('--seed' type=int default=2020 help='re-produce the results with seed random')<line_sep>parser.add_argument('--saved_fn' type=str default='fpn_resnet_18' metavar='FN' help='The name using for saving logs, models,...')<line_sep>parser.add_argument('--root-dir' type=str default='../' metavar='PATH' help='The ROOT working directory')<line_sep>#################################################################### ############## Model configs ######################## #################################################################### parser.add_argument('--arch' type=str default='fpn_resnet_18' metavar='ARCH' help='The name of the model architecture')<line_sep>parser.add_argument('--pretrained_path' type=str default=<none> metavar='PATH' help='the path of the pretrained checkpoint')<line_sep>#################################################################### ############## Dataloader and Running configs ####### #################################################################### parser.add_argument('--hflip_prob' type=float default=0.5 help='The probability of horizontal flip')<line_sep>parser.add_argument('--no-val' action='store_true' help='If true, dont evaluate the model on the val set')<line_sep>parser.add_argument('--num_samples' type=int default=<none> help='Take a subset of the dataset to run and debug')<line_sep>parser.add_argument('--num_workers' type=int default=4 help='Number of threads for loading data')<line_sep>parser.add_argument('--batch_size' type=int default=16 help='mini-batch size (default: 16), this is the total'<concat>'batch size of all GPUs on the current node when using'<concat>'Data Parallel or Distributed Data Parallel')<line_sep>parser.add_argument('--print_freq' type=int default=50 metavar='N' help='print frequency (default: 50)')<line_sep>parser.add_argument('--tensorboard_freq' type=int default=50 metavar='N' help='frequency of saving tensorboard (default: 50)')<line_sep>parser.add_argument('--checkpoint_freq' type=int default=2 metavar='N' help='frequency of saving checkpoints (default: 5)')<line_sep>#################################################################### ############## Training strategy #################### #################################################################### parser.add_argument('--start_epoch' type=int default=1 metavar='N' help='the starting epoch')<line_sep>parser.add_argument('--num_epochs' type=int default=300 metavar='N' help='number of total epochs to run')<line_sep>parser.add_argument('--lr_type' type=str default='cosin' help='the type of learning rate scheduler (cosin or multi_step or one_cycle)')<line_sep>parser.add_argument('--lr' type=float default=0.001 metavar='LR' help='initial learning rate')<line_sep>parser.add_argument('--minimum_lr' type=float default=1e-7 metavar='MIN_LR' help='minimum learning rate during training')<line_sep>parser.add_argument('--momentum' type=float default=0.949 metavar='M' help='momentum')<line_sep>parser.add_argument('-wd' '--weight_decay' type=float default=0. metavar='WD' help='weight decay (default: 0.)')<line_sep>parser.add_argument('--optimizer_type' type=str default='adam' metavar='OPTIMIZER' help='the type of optimizer, it can be sgd or adam')<line_sep>parser.add_argument('--steps' nargs='*' default=[150 180] help='number of burn in step')<line_sep>#################################################################### ############## Loss weight ########################## #################################################################### #################################################################### ############## Distributed Data Parallel ############ #################################################################### parser.add_argument('--world-size' default=-1 type=int metavar='N' help='number of nodes for distributed training')<line_sep>parser.add_argument('--rank' default=-1 type=int metavar='N' help='node rank for distributed training')<line_sep>parser.add_argument('--dist-url' default='tcp://127.0.0.1:29500' type=str help='url used to set up distributed training')<line_sep>parser.add_argument('--dist-backend' default='nccl' type=str help='distributed backend')<line_sep>parser.add_argument('--gpu_idx' default=<none> type=int help='GPU index to use.')<line_sep>parser.add_argument('--no_cuda' action='store_true' help='If true, cuda is not used.')<line_sep>parser.add_argument('--multiprocessing-distributed' action='store_true' help='Use multi-processing distributed training to launch '<concat>'N processes per node, which has N GPUs. This is the '<concat>'fastest way to use PyTorch for either single node or '<concat>'multi node data parallel training')<line_sep>#################################################################### ############## Evaluation configurations ################### #################################################################### parser.add_argument('--evaluate' action='store_true' help='only evaluate the model, not training')<line_sep>parser.add_argument('--resume_path' type=str default=<none> metavar='PATH' help='the path of the resumed checkpoint')<line_sep>parser.add_argument('--K' type=int default=50 help='the number of top K')<line_sep>configs=edict(vars(parser.parse_args()))<line_sep>#################################################################### ############## Hardware configurations ############################# #################################################################### configs.device=torch.device('cpu'<if>configs.no_cuda<else>'cuda')<line_sep>configs.ngpus_per_node=torch.cuda.device_count()<line_sep>configs.pin_memory=<true><line_sep>configs.input_size=(608 608)<line_sep>configs.hm_size=(152 152)<line_sep>configs.down_ratio=4<line_sep>configs.max_objects=50<line_sep>configs.imagenet_pretrained=<true><line_sep>configs.head_conv=64<line_sep>configs.num_classes=3<line_sep>configs.num_center_offset=2<line_sep>configs.num_z=1<line_sep>configs.num_dim=3<line_sep>configs.num_direction=2# sin, cos configs.heads={'hm_cen':configs.num_classes 'cen_offset':configs.num_center_offset 'direction':configs.num_direction 'z_coor':configs.num_z 'dim':configs.num_dim}<line_sep>configs.num_input_features=4<line_sep>#################################################################### ############## Dataset, logs, Checkpoints dir ###################### #################################################################### configs.dataset_dir=os.path.join(configs.root_dir 'dataset' 'kitti')<line_sep>configs.checkpoints_dir=os.path.join(configs.root_dir 'checkpoints' configs.saved_fn)<line_sep>configs.logs_dir=os.path.join(configs.root_dir 'logs' configs.saved_fn)<if_stmt><not>os.path.isdir(configs.checkpoints_dir)<block_start>os.makedirs(configs.checkpoints_dir)<block_end><if_stmt><not>os.path.isdir(configs.logs_dir)<block_start>os.makedirs(configs.logs_dir)<block_end><return>configs<block_end>
# -*- coding: utf-8 -*- <import_from_stmt>.munsell *# noqa <import_from_stmt>. munsell<line_sep>__all__=[]<line_sep>__all__<augadd>munsell.__all__<line_sep>
# Copyright 2021 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Learning rate schedule."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>functools<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_from_stmt>official.modeling.hyperparams params_dict<class_stmt>StepLearningRateWithLinearWarmup(tf.keras.optimizers.schedules.LearningRateSchedule)<block_start>"""Class to generate learning rate tensor."""<def_stmt>__init__ self total_steps params<block_start>"""Creates the step learning rate tensor with linear warmup."""<line_sep>super(StepLearningRateWithLinearWarmup self).__init__()<line_sep>self._total_steps=total_steps<assert_stmt>isinstance(params (dict params_dict.ParamsDict))<if_stmt>isinstance(params dict)<block_start>params=params_dict.ParamsDict(params)<block_end>self._params=params<block_end><def_stmt>__call__ self global_step<block_start>warmup_lr=self._params.warmup_learning_rate<line_sep>warmup_steps=self._params.warmup_steps<line_sep>init_lr=self._params.init_learning_rate<line_sep>lr_levels=self._params.learning_rate_levels<line_sep>lr_steps=self._params.learning_rate_steps<line_sep>linear_warmup=(warmup_lr+tf.cast(global_step dtype=tf.float32)/warmup_steps<times>(init_lr-warmup_lr))<line_sep>learning_rate=tf.where(global_step<l>warmup_steps linear_warmup init_lr)<for_stmt>next_learning_rate,start_step zip(lr_levels lr_steps)<block_start>learning_rate=tf.where(global_step<ge>start_step next_learning_rate learning_rate)<block_end><return>learning_rate<block_end><def_stmt>get_config self<block_start><return>{'_params':self._params.as_dict()}<block_end><block_end><class_stmt>CosineLearningRateWithLinearWarmup(tf.keras.optimizers.schedules.LearningRateSchedule)<block_start>"""Class to generate learning rate tensor."""<def_stmt>__init__ self total_steps params<block_start>"""Creates the consine learning rate tensor with linear warmup."""<line_sep>super(CosineLearningRateWithLinearWarmup self).__init__()<line_sep>self._total_steps=total_steps<assert_stmt>isinstance(params (dict params_dict.ParamsDict))<if_stmt>isinstance(params dict)<block_start>params=params_dict.ParamsDict(params)<block_end>self._params=params<block_end><def_stmt>__call__ self global_step<block_start>global_step=tf.cast(global_step dtype=tf.float32)<line_sep>warmup_lr=self._params.warmup_learning_rate<line_sep>warmup_steps=self._params.warmup_steps<line_sep>init_lr=self._params.init_learning_rate<line_sep>total_steps=self._total_steps<line_sep>linear_warmup=(warmup_lr+global_step/warmup_steps<times>(init_lr-warmup_lr))<line_sep>cosine_learning_rate=(init_lr<times>(tf.cos(np.pi<times>(global_step-warmup_steps)/(total_steps-warmup_steps))+1.0)/2.0)<line_sep>learning_rate=tf.where(global_step<l>warmup_steps linear_warmup cosine_learning_rate)<line_sep><return>learning_rate<block_end><def_stmt>get_config self<block_start><return>{'_params':self._params.as_dict()}<block_end><block_end><def_stmt>learning_rate_generator total_steps params<block_start>"""The learning rate function generator."""<if_stmt>params.type<eq>'step'<block_start><return>StepLearningRateWithLinearWarmup(total_steps params)<block_end><elif_stmt>params.type<eq>'cosine'<block_start><return>CosineLearningRateWithLinearWarmup(total_steps params)<block_end><else_stmt><block_start><raise>ValueError('Unsupported learning rate type: {}.'.format(params.type))<block_end><block_end>
# Copyright 2021 Catalyst Cloud # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The code related to integration between oslo.cache module and trove."""<import_from_stmt>oslo_cache core<import_from_stmt>oslo_config cfg<def_stmt>register_cache_configurations conf<block_start>"""Register all configurations required for oslo.cache. The procedure registers all configurations required for oslo.cache. It should be called before configuring of cache region """<line_sep>core.configure(conf)<line_sep>ports_cache_group=cfg.OptGroup('instance_ports_cache')<line_sep>ports_cache_opts=[cfg.IntOpt('expiration_time' default=86400 help='TTL, in seconds, for any cached item in the '<concat>'dogpile.cache region used for caching of the '<concat>'instance ports.') cfg.BoolOpt("caching" default=<true> help='Toggle to enable/disable caching when getting trove '<concat>'instance ports. Please note that the global toggle '<concat>'for oslo.cache(enabled=True in [cache] group) '<concat>'must be enabled to use this feature.')]<line_sep>conf.register_group(ports_cache_group)<line_sep>conf.register_opts(ports_cache_opts group=ports_cache_group)<line_sep><return>conf<block_end># variable that stores an initialized cache region for trove _REGION=<none><def_stmt>get_cache_region <block_start><global>_REGION<if_stmt><not>_REGION<block_start>_REGION=core.configure_cache_region(conf=register_cache_configurations(cfg.CONF) region=core.create_region())<block_end><return>_REGION<block_end>
# coding=utf-8 # Copyright (c) 2019 Alibaba PAI team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>tensorflow<as>tf<def_stmt>softmax_cross_entropy labels depth logits<block_start>labels=tf.squeeze(labels)<line_sep>one_hot_labels=tf.one_hot(labels depth=depth dtype=tf.float32)<line_sep>loss=tf.losses.softmax_cross_entropy(onehot_labels=one_hot_labels logits=logits)<line_sep><return>loss<block_end><def_stmt>mean_square_error labels logits<block_start><return>tf.losses.mean_squared_error(labels logits)<block_end><def_stmt>multi_label_sigmoid_cross_entropy labels depth logits<block_start>one_hots=tf.one_hot(labels depth)<line_sep>multi_hots=tf.reduce_max(one_hots axis=1)<line_sep>multi_hots=tf.cast(multi_hots logits.dtype)<line_sep><return>tf.losses.sigmoid_cross_entropy(multi_class_labels=multi_hots logits=logits)<block_end>
<import_stmt>responses<import_from_stmt>dagster build_op_context op<import_from_stmt>dagster_pagerduty pagerduty_resource<line_sep>@responses.activate<def_stmt>test_pagerduty_resource <block_start>@op(required_resource_keys={"pagerduty"})<def_stmt>pagerduty_op context<block_start><assert_stmt>context.resources.pagerduty<with_stmt>responses.RequestsMock()<as>rsps<block_start>rsps.add(rsps.POST "https://events.pagerduty.com/v2/enqueue/" status=202 json={"status":"success" "message":"Event processed" "dedup_key":"foobar"} )<line_sep>context.resources.pagerduty.EventV2_create(summary="PING OK - Packet loss = 0%, RTA = 1.41 ms Host 'acme-andromeda-sv1-c40"<concat>":: 172.16.31.10' is DOWN" source="prod05.theseus.acme-widgets.com" severity="error" event_action="trigger" dedup_key="foobar" timestamp="2015-07-17T08:42:58.315+0000" component="mysql" group="prod-datapipe" event_class="High CPU" custom_details={"ping time":"1500ms" "load avg":0.75} )<line_sep><return><true><block_end><block_end><with_stmt>build_op_context(resources={"pagerduty":pagerduty_resource.configured({"routing_key":"<KEY>"})})<as>context<block_start><assert_stmt>pagerduty_op(context)<block_end><block_end>
# coding=utf-8 # Copyright 2021 The Uncertainty Baselines Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Util file for psl rules test."""<import_from_stmt>typing List<import_stmt>tensorflow<as>tf<line_sep>LOGITS=[[[0.0 0.0 0.4 0.4 0.0 0.2 0.0 0.0 0.0] [0.0 0.0 0.2 0.6 0.0 0.2 0.0 0.0 0.0] [0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.5 0.0] [0.0 0.8 0.1 0.1 0.2 0.0 0.0 0.0 0.2] [0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0] [0.1 0.0 0.0 0.0 0.0 0.0 0.3 0.0 1.0] [0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0] [0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0] [0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0] [0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0]] [[0.0 0.8 0.0 0.0 0.0 0.2 0.0 0.0 0.0] [0.0 0.0 0.5 0.0 0.0 0.5 0.0 0.0 0.0] [0.0 0.0 0.5 0.4 0.0 0.0 0.0 0.1 0.0] [0.0 0.0 0.8 0.2 0.0 0.0 0.0 0.0 0.0] [0.0 0.1 0.0 0.0 0.0 0.0 0.0 0.0 1.0] [0.0 0.0 0.9 0.1 0.0 0.0 0.0 0.0 0.0] [0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0] [0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0] [0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0] [0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0]]]<line_sep>FEATURES=[[[-1 -2 -2 -2 -1 -2 -2 -2] [-1 -2 -2 -2 -1 -2 -2 -2] [-1 -2 -2 -2 -2 -2 -2 -2] [-1 -2 -2 -2 -2 -1 -2 -1] [-1 -2 -2 -1 -1 -2 -2 -2] [-2 -1 -2 -1 -1 -2 -1 -2] [-3 -2 -2 -2 -2 -2 -2 -2] [-3 -2 -2 -2 -2 -2 -2 -2] [-3 -2 -2 -2 -2 -2 -2 -2] [-3 -2 -2 -2 -2 -2 -2 -2]] [[-1 -2 -2 -2 -2 -2 -2 -2] [-1 -2 -2 -2 -2 -2 -2 -2] [-1 -2 -2 -1 -1 -2 -2 -2] [-1 -2 -2 -2 -1 -2 -2 -1] [-1 -2 -1 -2 -1 -2 -2 -2] [-2 -2 -2 -1 -1 -2 -2 -2] [-3 -2 -2 -2 -2 -2 -2 -2] [-3 -2 -2 -2 -2 -2 -2 -2] [-3 -2 -2 -2 -2 -2 -2 -2] [-3 -2 -2 -2 -2 -2 -2 -2]]]<line_sep>DATA={'train_data':[[[[1109 1616 41 800 740 1743 557 981 886 1616 1658 909 1380 1256 1565 482 1304] [1109 1304]] [[1109 1023 38 893 1037 1664 886 1304] [1109 218 751 1616 812 1406 1152 981 65 778 688 886 427 641 611 742 321 557 354 1471 161 182 767 1304]] [[1109 1162 145 557 981 740 734 776 1037 755 886 1304] [1109 1616 812 1406 1152 981 79 886 766 1616 558 165 1471 161 182 4 1304]] [[1109 1738 145 893 532 1304] [1109 1616 1658 218 1616 812 1406 1152 981 79 886 1023 38 557 354 182 731 161 182 1304]] [[1109 1738 145 1215 1047 1274 1304] [1109 1616 812 1406 1152 981 740 65 778 688 886 427 641 611 742 321 557 354 1017 161 731 1304]] [[1109 1162 641 631 145 1738 1499 740 1743 557 981 1304] [1109 1616 1658 218 145 1162 1499 981 740 263 173 62 886 766 1616 558 165 1471 161 1017 4 1304]]] [[[1109 1616 1658 1450 1743 800 1430 79 886 1616 1658 1496 1565 1448 929 1489 742 1662 1565 1662 1304] [1109 1304]]] [[[1109 1616 1658 1276 1450 1743 800 1430 79 751 1616 1133 1431 1496 742 1062 1415 1565 818 1304] [1109 1304]]] [[[1109 1616 41 800 981 886 1616 1077 742 1145 1565 83 1037 923 1304] [1109 1304]] [[1109 1738 145 557 740 1743 557 981 909 256 680 187 1304] [1109 218 1616 812 1406 1152 981 740 886 1023 38 557 354 182 767 161 1017 4 1304]] [[1109 525 641 751 1498 1133 1431 1085 1743 610 1304] [1109 427 641 611 742 865 641 557 574 1304]] [[1109 525 641 751 1498 1133 1431 1085 886 1304] [1109 1185 641 1077 1762 512 4 1304]]] [[[1109 764 1178 1616 1658 1450 1743 557 981 79 886 1616 1133 1431 1496 742 821 1565 83 1304] [1109 1304]]]] 'test_data':[[[[1109 1616 1658 1450 1743 891 38 800 1430 886 1616 1658 909 742 499 1565 1159 1472 886 1304] [1109 1304]]] [[[1109 1616 427 611 564 112 801 1412 742 446 248 800 1001 194 886 1616 1077 742 1514 1743 142 886 1304] [1109 1304]] [[1109 1738 1573 557 1510 1561 1301 1301 1412 4 1304] [1109 1616 323 800 1409 1177 886 1573 1738 557 1412 742 1621 248 800 1001 194 886 1304]] [[1109 1499 1718 37 1738 1337 1616 1077 886 1304] [1109 800 1176 72 1506 1738 1374 751 427 641 611 742 1514 1573 1304]]] [[[1109 1228 1616 1658 1450 1743 800 981 886 1616 1077 742 1145 283 1669 1565 482 1250 551 886 1304] [1109 1304]] [[1109 1228 766 641 1406 1762 742 849 1304] [1109 1616 812 1406 1152 981 740 886 427 641 611 742 321 557 354 182 731 4 1304]] [[1109 1718 37 1738 1337 1616 1077 1304] [1109 427 641 611 742 865 641 557 574 1304]] [[1109 525 641 37 1738 1337 1616 1077 886 1304] [1109 1738 145 1762 512 1616 766 814 641 4 1304]]] [[[1109 1228 1616 1658 1450 1743 662 226 557 981 79 886 1616 1658 1496 742 1187 1493 1136 1565 1690 886 1304] [1109 1304]]] ] 'vocab_mapping':{'address':53 'thank':525 'sure':631 'yes':758 'hello':764 'pricey':1012 'hi':1228 'great':1490 'no':1499 'phone':1596 'thanks':1718 } 'train_labels':[['init_request' 'second_request' 'second_request' 'second_request' 'second_request' 'insist'] ['init_request'] ['init_request'] ['init_request' 'second_request' 'cancel' 'end'] ['init_request']] 'test_labels':[['init_request'] ['init_request' 'slot_question' 'cancel'] ['init_request' 'second_request' 'cancel' 'end'] ['init_request']]}<line_sep>TEST_MULTIWOZ_CONFIG={'default_seed':4 'batch_size':128 'max_dialog_size':10 'max_utterance_size':40 'class_map':{'accept':0 'cancel':1 'end':2 'greet':3 'info_question':4 'init_request':5 'insist':6 'second_request':7 'slot_question':8 } 'accept_words':['yes' 'great'] 'cancel_words':['no'] 'end_words':['thank' 'thanks'] 'greet_words':['hello' 'hi'] 'info_question_words':['address' 'phone'] 'insist_words':['sure' 'no'] 'slot_question_words':['pricey'] 'includes_word':-1 'excludes_word':-2 'mask_index':0 'accept_index':1 'cancel_index':2 'end_index':3 'greet_index':4 'info_question_index':5 'insist_index':6 'slot_question_index':7 'utterance_mask':-1 'last_utterance_mask':-2 'pad_utterance_mask':-3 'shuffle_train':<true> 'shuffle_test':<false> 'train_epochs':5 }<def_stmt>build_constrained_model input_size:List[int]<arrow>tf.keras.Model<block_start>"""Build simple neural model for class prediction."""<line_sep>input_layer=tf.keras.layers.Input(input_size)<line_sep>hidden_layer_1=tf.keras.layers.Dense(1024)(input_layer)<line_sep>hidden_layer_2=tf.keras.layers.Dense(512 activation='sigmoid')(hidden_layer_1)<line_sep>output=tf.keras.layers.Dense(9 activation='softmax' kernel_regularizer=tf.keras.regularizers.l2(1.0))(hidden_layer_2)<line_sep>model=tf.keras.Model(input_layer output)<line_sep>model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001) loss='categorical_crossentropy' metrics=['accuracy'])<line_sep><return>model<block_end>
<import_from_stmt>device Motor<import_from_stmt>time sleep<import_from_stmt>twowheel TwoWheelController<def_stmt>test_controller controller<block_start><for_stmt>_ range(2)<block_start>controller.set_axis(x=-0.3)<line_sep>sleep(0.25)<line_sep>controller.set_axis(x=0.3)<line_sep>sleep(0.25)<line_sep>controller.set_axis()<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>controller=TwoWheelController()<line_sep>test_controller(controller)<block_end>
# coding: utf-8 # Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. # This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. <import_from_stmt>oci.util formatted_flat_dict NONE_SENTINEL value_allowed_none_or_none_sentinel# noqa: F401 <import_from_stmt>oci.decorators init_model_state_from_kwargs<line_sep>@init_model_state_from_kwargs<class_stmt>CreateDeploymentBackupDetails(object)<block_start>""" The information about a new DeploymentBackup. """<def_stmt>__init__ self **kwargs<block_start>""" Initializes a new CreateDeploymentBackupDetails object with values from keyword arguments. The following keyword arguments are supported (corresponding to the getters/setters of this class): :param display_name: The value to assign to the display_name property of this CreateDeploymentBackupDetails. :type display_name: str :param compartment_id: The value to assign to the compartment_id property of this CreateDeploymentBackupDetails. :type compartment_id: str :param deployment_id: The value to assign to the deployment_id property of this CreateDeploymentBackupDetails. :type deployment_id: str :param namespace_name: The value to assign to the namespace_name property of this CreateDeploymentBackupDetails. :type namespace_name: str :param bucket_name: The value to assign to the bucket_name property of this CreateDeploymentBackupDetails. :type bucket_name: str :param object_name: The value to assign to the object_name property of this CreateDeploymentBackupDetails. :type object_name: str :param freeform_tags: The value to assign to the freeform_tags property of this CreateDeploymentBackupDetails. :type freeform_tags: dict(str, str) :param defined_tags: The value to assign to the defined_tags property of this CreateDeploymentBackupDetails. :type defined_tags: dict(str, dict(str, object)) """<line_sep>self.swagger_types={'display_name':'str' 'compartment_id':'str' 'deployment_id':'str' 'namespace_name':'str' 'bucket_name':'str' 'object_name':'str' 'freeform_tags':'dict(str, str)' 'defined_tags':'dict(str, dict(str, object))'}<line_sep>self.attribute_map={'display_name':'displayName' 'compartment_id':'compartmentId' 'deployment_id':'deploymentId' 'namespace_name':'namespaceName' 'bucket_name':'bucketName' 'object_name':'objectName' 'freeform_tags':'freeformTags' 'defined_tags':'definedTags'}<line_sep>self._display_name=<none><line_sep>self._compartment_id=<none><line_sep>self._deployment_id=<none><line_sep>self._namespace_name=<none><line_sep>self._bucket_name=<none><line_sep>self._object_name=<none><line_sep>self._freeform_tags=<none><line_sep>self._defined_tags=<none><block_end>@property<def_stmt>display_name self<block_start>""" **[Required]** Gets the display_name of this CreateDeploymentBackupDetails. An object's Display Name. :return: The display_name of this CreateDeploymentBackupDetails. :rtype: str """<line_sep><return>self._display_name<block_end>@display_name.setter<def_stmt>display_name self display_name<block_start>""" Sets the display_name of this CreateDeploymentBackupDetails. An object's Display Name. :param display_name: The display_name of this CreateDeploymentBackupDetails. :type: str """<line_sep>self._display_name=display_name<block_end>@property<def_stmt>compartment_id self<block_start>""" **[Required]** Gets the compartment_id of this CreateDeploymentBackupDetails. The `OCID`__ of the compartment being referenced. __ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm :return: The compartment_id of this CreateDeploymentBackupDetails. :rtype: str """<line_sep><return>self._compartment_id<block_end>@compartment_id.setter<def_stmt>compartment_id self compartment_id<block_start>""" Sets the compartment_id of this CreateDeploymentBackupDetails. The `OCID`__ of the compartment being referenced. __ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm :param compartment_id: The compartment_id of this CreateDeploymentBackupDetails. :type: str """<line_sep>self._compartment_id=compartment_id<block_end>@property<def_stmt>deployment_id self<block_start>""" **[Required]** Gets the deployment_id of this CreateDeploymentBackupDetails. The `OCID`__ of the deployment being referenced. __ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm :return: The deployment_id of this CreateDeploymentBackupDetails. :rtype: str """<line_sep><return>self._deployment_id<block_end>@deployment_id.setter<def_stmt>deployment_id self deployment_id<block_start>""" Sets the deployment_id of this CreateDeploymentBackupDetails. The `OCID`__ of the deployment being referenced. __ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm :param deployment_id: The deployment_id of this CreateDeploymentBackupDetails. :type: str """<line_sep>self._deployment_id=deployment_id<block_end>@property<def_stmt>namespace_name self<block_start>""" **[Required]** Gets the namespace_name of this CreateDeploymentBackupDetails. Name of namespace that serves as a container for all of your buckets :return: The namespace_name of this CreateDeploymentBackupDetails. :rtype: str """<line_sep><return>self._namespace_name<block_end>@namespace_name.setter<def_stmt>namespace_name self namespace_name<block_start>""" Sets the namespace_name of this CreateDeploymentBackupDetails. Name of namespace that serves as a container for all of your buckets :param namespace_name: The namespace_name of this CreateDeploymentBackupDetails. :type: str """<line_sep>self._namespace_name=namespace_name<block_end>@property<def_stmt>bucket_name self<block_start>""" **[Required]** Gets the bucket_name of this CreateDeploymentBackupDetails. Name of the bucket where the object is to be uploaded in the object storage :return: The bucket_name of this CreateDeploymentBackupDetails. :rtype: str """<line_sep><return>self._bucket_name<block_end>@bucket_name.setter<def_stmt>bucket_name self bucket_name<block_start>""" Sets the bucket_name of this CreateDeploymentBackupDetails. Name of the bucket where the object is to be uploaded in the object storage :param bucket_name: The bucket_name of this CreateDeploymentBackupDetails. :type: str """<line_sep>self._bucket_name=bucket_name<block_end>@property<def_stmt>object_name self<block_start>""" **[Required]** Gets the object_name of this CreateDeploymentBackupDetails. Name of the object to be uploaded to object storage :return: The object_name of this CreateDeploymentBackupDetails. :rtype: str """<line_sep><return>self._object_name<block_end>@object_name.setter<def_stmt>object_name self object_name<block_start>""" Sets the object_name of this CreateDeploymentBackupDetails. Name of the object to be uploaded to object storage :param object_name: The object_name of this CreateDeploymentBackupDetails. :type: str """<line_sep>self._object_name=object_name<block_end>@property<def_stmt>freeform_tags self<block_start>""" Gets the freeform_tags of this CreateDeploymentBackupDetails. A simple key-value pair that is applied without any predefined name, type, or scope. Exists for cross-compatibility only. Example: `{\"bar-key\": \"value\"}` :return: The freeform_tags of this CreateDeploymentBackupDetails. :rtype: dict(str, str) """<line_sep><return>self._freeform_tags<block_end>@freeform_tags.setter<def_stmt>freeform_tags self freeform_tags<block_start>""" Sets the freeform_tags of this CreateDeploymentBackupDetails. A simple key-value pair that is applied without any predefined name, type, or scope. Exists for cross-compatibility only. Example: `{\"bar-key\": \"value\"}` :param freeform_tags: The freeform_tags of this CreateDeploymentBackupDetails. :type: dict(str, str) """<line_sep>self._freeform_tags=freeform_tags<block_end>@property<def_stmt>defined_tags self<block_start>""" Gets the defined_tags of this CreateDeploymentBackupDetails. Tags defined for this resource. Each key is predefined and scoped to a namespace. Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}` :return: The defined_tags of this CreateDeploymentBackupDetails. :rtype: dict(str, dict(str, object)) """<line_sep><return>self._defined_tags<block_end>@defined_tags.setter<def_stmt>defined_tags self defined_tags<block_start>""" Sets the defined_tags of this CreateDeploymentBackupDetails. Tags defined for this resource. Each key is predefined and scoped to a namespace. Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}` :param defined_tags: The defined_tags of this CreateDeploymentBackupDetails. :type: dict(str, dict(str, object)) """<line_sep>self._defined_tags=defined_tags<block_end><def_stmt>__repr__ self<block_start><return>formatted_flat_dict(self)<block_end><def_stmt>__eq__ self other<block_start><if_stmt>other<is><none><block_start><return><false><block_end><return>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>self<eq>other<block_end><block_end>
"""FFI related code patterns and utility function to detect them."""<import_from_stmt>typing Optional<import_stmt>re<import_stmt>attr<import_stmt>numpy<as>np<import_from_stmt>bisect bisect<import_from_stmt>.lsp Range Position Location<class_stmt>Pattern<block_start>"""Base class of all interesting code patterns."""<line_sep><pass><block_end>@attr.s<class_stmt>Def(Pattern)<block_start>"""Definition of an FFI resource. Parameters ---------- key : str Global unique id to locate the FFI resource. path : str The file path. range: Range The location range of the defininition. """<line_sep>key:str=attr.ib()<line_sep>path:str=attr.ib()<line_sep>range:Range=attr.ib()<block_end>@attr.s<class_stmt>Ref(Pattern)<block_start>"""Reference of an FFI resource. Parameters ---------- key : str Global unique id to locate the FFI resource. path : str The file path. range: Range The location range of the defininition. """<line_sep>key:str=attr.ib()<line_sep>path:str=attr.ib()<line_sep>range:Range=attr.ib()<block_end>@attr.s<class_stmt>Export(Pattern)<block_start>"""Export a collection of keys with the same prefix. Parameters ---------- key_prefix : str The prefix of keys to be exported. path : str The file path to be exported. fkey2var : Function The function that takes a key and maps to a local var in the path which corresponds to the FFI resource. fvar2key : Function The function that takes local var name and maps to the corresponding key, can return None. """<line_sep>key_prefix:str=attr.ib()<line_sep>path:str=attr.ib()<line_sep>fkey2var=attr.ib()<line_sep>fvar2key=attr.ib()<block_end>@attr.s<class_stmt>Symbol(Pattern)<block_start>"""A symbol in python expression, can contain dot. """<line_sep>value:str=attr.ib()<block_end><def_stmt>re_matcher rexpr fcreate use_search=<false><block_start>""" Parameters ---------- rexpr : str A regexp pattern to match. fcreate : Function (match, path, range) -> result. use_search: bool Whether use search """<line_sep>rexpr=re.compile(rexpr)<def_stmt>_matcher path source begin_line=0 end_line=<none><block_start>source=source.split("\n")<if>isinstance(source str)<else>source<line_sep>results=[]<line_sep>end_line=min(end_line len(source))<if>end_line<else>len(source)<for_stmt>line range(begin_line end_line)<block_start>content=source[line]<line_sep>match=rexpr.search(content)<if>use_search<else>rexpr.match(content)<if_stmt>match<block_start>start,end=match.span()<line_sep>start_pos=Position(line start)<line_sep>end_pos=Position(line end)<line_sep>item=fcreate(match path Range(start_pos end_pos))<if_stmt>item<block_start>results.append(item)<block_end><block_end><block_end><return>results<block_end><return>_matcher<block_end><def_stmt>re_multi_line_matcher rexpr fcreate<block_start>""" Matches a pattern spanning multiple lines Parameters ---------- rexpr : str A regexp pattern to match. fcreate : Function (match, path, range) -> result. """<line_sep>rexpr=re.compile(rexpr)<def_stmt>_matcher path lines<block_start>source="".join(lines)<line_sep>matches=list(rexpr.finditer(source))<if_stmt>matches<eq>[]<block_start><return>[]<block_end>line_counts=map(<lambda>line:len(line) lines)<line_sep>cumsum=np.cumsum(list(line_counts))<line_sep># find line num, start and end pos for each match next_begin=0<line_sep>result=[]<for_stmt>match matches<block_start>line_num_start=bisect(cumsum[next_begin:] match.start())+next_begin<line_sep>next_begin=line_num_start<line_sep>line_num_end=line_num_start+match.group().count("\n")<line_sep>pos_start=match.start()-int(cumsum[line_num_start-1])<line_sep>pos_end=match.end()-int(cumsum[line_num_end-1])<assert_stmt>(pos_start<ge>0<and>pos_end<ge>0)<line_sep>rg=Range(Position(line_num_start pos_start) Position(line_num_end pos_end))<line_sep>result.append(fcreate(match path rg))<block_end><return>result<block_end><return>_matcher<block_end><def_stmt>re_match_pybind_class <block_start><return>re_multi_line_matcher(r"py::class_\<[A-Za-z0-9|_|::|<|>]+(\,\s*[A-Za-z0-9|_|::|<|>]+)*\>"<concat>r"\s*\(\s*m,\s*\"(?P<key>[A-Za-z0-9|_]+)\""<concat>r"(,\s*[A-Za-z0-9|_|::|<|>|(|)]+)*\)" <lambda>match path rg:Def(key=match.group("key") path=path range=rg))<block_end><def_stmt>re_match_pybind_method <block_start><return>re_multi_line_matcher(r"\.def\(\s*\"(?P<key>[a-z0-9|_]+)\"" <lambda>match path rg:Def(key=match.group("key") path=path range=rg))<block_end><def_stmt>macro_matcher macro_names fcreate=<none><block_start>"""Match pattern <macro_name>("<skey>" Parameters ---------- macro_names : list List of macro names to match. fcreate : Function (skey, path, range, macro_name) -> result. """<line_sep>rexpr=r"(?P<macro_name>("<line_sep>rexpr<augadd>"|".join(re.escape(x)<for>x macro_names)<line_sep>rexpr<augadd>r"))\(\"(?P<skey>[^\"]+)\"\)?"<def_stmt>_fcreate match path rg<block_start><return>fcreate(match.group("skey") path rg match.group("macro_name"))<block_end><return>re_matcher(rexpr _fcreate)<block_end><def_stmt>func_get_searcher func_names fcreate=<none><block_start>"""Search pattern <func_name>("<skey>") Parameters ---------- func_names : list List of macro names to match. fcreate : Function (skey, path, range, func_name) -> result. """<line_sep>rexpr=r"(?P<func_name>("<line_sep>rexpr<augadd>"|".join(re.escape(x)<for>x func_names)<line_sep>rexpr<augadd>r"))\(\"(?P<skey>[^\"]+)\"\)"<line_sep>rexpr=re.compile(rexpr)<def_stmt>_matcher path source begin_line=0 end_line=<none><block_start>source=source.split("\n")<if>isinstance(source str)<else>source<line_sep>results=[]<line_sep>end_line=min(end_line len(source))<if>end_line<else>len(source)<for_stmt>line range(begin_line end_line)<block_start>content=source[line]<line_sep>str_pos=0<while_stmt><true><block_start>match=rexpr.search(content str_pos)<if_stmt><not>match<block_start><break><block_end>start,end=match.span("skey")<line_sep>start_pos=Position(line start)<line_sep>end_pos=Position(line end)<line_sep>item=fcreate(match.group("skey") path Range(start_pos end_pos) match.group("func_name"))<if_stmt>item<block_start>results.append(item)<block_end>str_pos=match.end()<block_end><block_end><return>results<block_end><return>_matcher<block_end><def_stmt>decorator_matcher func_names keyword fcreate=<none><block_start>"""Search pattern @[namespace]<func_name>("<skey>") Parameters ---------- func_names : list List of macro names to match. fcreate : Function (skey, path, range, func_name) -> result. """<line_sep>decorator=r"@?(?P<decorator>([a-zA-Z_]?[a-zA-Z_0-9.]*.)?("<line_sep>decorator<augadd>"|".join(re.escape(x)<for>x func_names)<line_sep>decorator<augadd>"))((\(\"(?P<skey>[^\"]+)\")|(\s*\Z))"<line_sep>nextline=keyword+r"\s+(?P<skey>[a-zA-Z_0-9]+)\("<line_sep>decorator=re.compile(decorator)<line_sep>nextline=re.compile(nextline)<def_stmt>_matcher path source begin_line=0 end_line=<none><block_start>source=source.split("\n")<if>isinstance(source str)<else>source<line_sep>results=[]<line_sep>end_line=min(end_line len(source))<if>end_line<else>len(source)<for_stmt>line range(begin_line end_line)<block_start>content=source[line]<line_sep>match=decorator.match(content)<if_stmt>match<block_start>skey=match.group("skey")<if_stmt>skey<block_start>start,end=match.span("skey")<line_sep>lineno=line<block_end><if_stmt><not>skey<and>line+1<l>len(source)<block_start>match_name=nextline.match(source[line+1])<if_stmt>match_name<block_start>skey=match_name.group("skey")<line_sep>start,end=match_name.span("skey")<line_sep>lineno=line+1<block_end><block_end><if_stmt>skey<block_start>start_pos=Position(lineno start)<line_sep>end_pos=Position(lineno end)<line_sep>item=fcreate(skey path Range(start_pos end_pos) match.group("decorator"))<if_stmt>item<block_start>results.append(item)<block_end><block_end><block_end><block_end><return>results<block_end><return>_matcher<block_end>@attr.s<class_stmt>PyImport<block_start>"""Python import syntax."""<line_sep>from_mod:Optional[str]=attr.ib()<line_sep>import_name:str=attr.ib()<line_sep>alias:Optional[str]=attr.ib()<block_end>RE_PY_IMPORT_PREFIX=re.compile(r"\s*from\s+(?P<mod>[^\s]+)\s+import")<line_sep>RE_PY_IMPORT_ITEM=re.compile(r"\s+(?P<name>[^\s]+)(\s+as\s+(?P<alias>[^\s]+))?")<def_stmt>find_py_imports source<block_start>"""Discover python import information."""<line_sep>source=source.split("\n")<if>isinstance(source str)<else>source<line_sep>results=[]<for_stmt>line,content enumerate(source)<block_start>prefix=RE_PY_IMPORT_PREFIX.match(content)<if_stmt>prefix<block_start>from_mod=prefix.group("mod")<for_stmt>item content[prefix.end():].split(",")<block_start>match=RE_PY_IMPORT_ITEM.match(item)<if_stmt>match<block_start>results.append(PyImport(from_mod=from_mod import_name=match.group("name") alias=match.group("alias")))<block_end><block_end><block_end><block_end><return>results<block_end>RE_PY_DELIM=r"(\s|[.,\(\)\[\]{}:=\+\-\*]|\Z)+"<def_stmt>search_symbol source symbols<block_start>"""Search symbols within a source, return matched positions."""<line_sep>source=source.split("\n")<if>isinstance(source str)<else>source<line_sep>rexpr=RE_PY_DELIM+"(?P<name>("<line_sep>rexpr<augadd>"|".join([re.escape(sym)<for>sym symbols])+"))"<line_sep>rexpr<augadd>RE_PY_DELIM<line_sep>rexpr=re.compile(rexpr)<line_sep>results=[]<for_stmt>line,content enumerate(source)<block_start>match=rexpr.search(content)<if_stmt>match<block_start>start,end=match.span("name")<line_sep>start_pos=Position(line start)<line_sep>end_pos=Position(line end)<line_sep>results.append(Range(start_pos end_pos))<block_end><block_end><return>results<block_end>RE_PY_NAMESPACE_PREFIX=re.compile(r"[a-zA-Z_][a-zA-Z0-9_.]+\Z")<line_sep>RE_PY_VAR_NAME=re.compile(r"[a-zA-Z0-9_.]+")<def_stmt>extract_symbol source pos:Position<block_start>"""Find the complete expression, include namespace prefix"""<line_sep>source=source.split("\n")<if>isinstance(source str)<else>source<line_sep>content=source[pos.line]<line_sep>mprefix=RE_PY_NAMESPACE_PREFIX.search(content 0 pos.character)<line_sep>start=mprefix.start()<if>mprefix<else>pos.character<line_sep>mvar=RE_PY_VAR_NAME.match(content pos.character)<line_sep>end=mvar.end()<if>mvar<else>pos.character<line_sep>value=content[start:end]<if_stmt>end<l>len(content)<and>content[end]<eq>"\""<block_start><return><none><block_end><return>Symbol(value)<block_end>
# # Copyright 2015 eNovance <<EMAIL>> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. <import_stmt>os<import_from_stmt>keystoneauth1 loading<as>ka_loading<import_from_stmt>keystoneclient.v3 client<as>ks_client_v3<import_from_stmt>oslo_config cfg<line_sep>DEFAULT_GROUP="service_credentials"<line_sep># List of group that can set auth_section to use a different # credentials section OVERRIDABLE_GROUPS=['gnocchi' 'zaqar' 'monasca']<def_stmt>get_session conf requests_session=<none> group=<none> timeout=<none><block_start>"""Get a ceilometer service credentials auth session."""<line_sep>group=group<or>DEFAULT_GROUP<line_sep>auth_plugin=ka_loading.load_auth_from_conf_options(conf group)<line_sep>kwargs={'auth':auth_plugin 'session':requests_session}<if_stmt>timeout<is><not><none><block_start>kwargs['timeout']=timeout<block_end>session=ka_loading.load_session_from_conf_options(conf group **kwargs)<line_sep><return>session<block_end><def_stmt>get_client conf trust_id=<none> requests_session=<none> group=DEFAULT_GROUP<block_start>"""Return a client for keystone v3 endpoint, optionally using a trust."""<line_sep>session=get_session(conf requests_session=requests_session group=group)<line_sep><return>ks_client_v3.Client(session=session trust_id=trust_id region_name=conf[group].region_name)<block_end><def_stmt>get_service_catalog client<block_start><return>client.session.auth.get_access(client.session).service_catalog<block_end><def_stmt>get_auth_token client<block_start><return>client.session.auth.get_access(client.session).auth_token<block_end>CLI_OPTS=[cfg.StrOpt('region-name' deprecated_group="DEFAULT" deprecated_name="os-region-name" default=os.environ.get('OS_REGION_NAME') help='Region name to use for OpenStack service endpoints.') cfg.StrOpt('interface' default=os.environ.get('OS_INTERFACE' os.environ.get('OS_ENDPOINT_TYPE' 'public')) deprecated_name="os-endpoint-type" choices=('public' 'internal' 'admin' 'auth' 'publicURL' 'internalURL' 'adminURL') help='Type of endpoint in Identity service catalog to use for '<concat>'communication with OpenStack services.') ]<def_stmt>register_keystoneauth_opts conf<block_start>_register_keystoneauth_group(conf DEFAULT_GROUP)<for_stmt>group OVERRIDABLE_GROUPS<block_start>_register_keystoneauth_group(conf group)<line_sep>conf.set_default('auth_section' DEFAULT_GROUP group=group)<block_end><block_end><def_stmt>_register_keystoneauth_group conf group<block_start>ka_loading.register_auth_conf_options(conf group)<line_sep>ka_loading.register_session_conf_options(conf group deprecated_opts={'cacert':[cfg.DeprecatedOpt('os-cacert' group=group) cfg.DeprecatedOpt('os-cacert' group="DEFAULT")]})<line_sep>conf.register_opts(CLI_OPTS group=group)<block_end><def_stmt>post_register_keystoneauth_opts conf<block_start><for_stmt>group OVERRIDABLE_GROUPS<block_start><if_stmt>conf[group].auth_section<ne>DEFAULT_GROUP# NOTE(sileht): We register this again after the auth_section have # been read from the configuration file <block_start>_register_keystoneauth_group(conf conf[group].auth_section)<block_end><block_end><block_end>
# -*- coding: utf-8 -*- <import_from_future_stmt> unicode_literals print_function with_statement<line_sep>__version__='6.0.4'<line_sep>__author__='<NAME> - Buzon, Inc.'<line_sep>__license__='MIT'<line_sep># v <import_from_stmt>.zipfly ZipFly<import_from_stmt>.zipfly LargePredictionSize<import_from_stmt>.response FileResponse<import_from_stmt>.api Buffer<line_sep>
__all__=['ImportancesComposite' 'ClassifierModelStatsComposite' 'RegressionModelStatsComposite' 'IndividualPredictionsComposite' 'ShapDependenceComposite' 'ShapInteractionsComposite' 'DecisionTreesComposite' 'WhatIfComposite' 'SimplifiedClassifierComposite' 'SimplifiedRegressionComposite' ]<import_stmt>dash_bootstrap_components<as>dbc<import_stmt>dash_html_components<as>html<import_from_stmt>..explainers RandomForestExplainer XGBExplainer<import_from_stmt>..dashboard_methods *<import_from_stmt>.classifier_components *<import_from_stmt>.regression_components *<import_from_stmt>.overview_components *<import_from_stmt>.connectors *<import_from_stmt>.shap_components *<import_from_stmt>.decisiontree_components *<import_from_stmt>.. to_html<class_stmt>ImportancesComposite(ExplainerComponent)<block_start><def_stmt>__init__ self explainer title="Feature Importances" name=<none> hide_title=<false> hide_importances=<false> hide_descriptions=<false> hide_selector=<true> **kwargs<block_start>"""Overview tab of feature importances Can show both permutation importances and mean absolute shap values. Args: explainer (Explainer): explainer object constructed with either ClassifierExplainer() or RegressionExplainer() title (str, optional): Title of tab or page. Defaults to "Feature Importances". name (str, optional): unique name to add to Component elements. If None then random uuid is generated to make sure it's unique. Defaults to None. hide_title (bool, optional): hide the title hide_importances (bool, optional): hide the ImportancesComponent hide_descriptions (bool, optional): hide the FeatureDescriptionsComponent hide_selector (bool, optional): hide the post label selector. Defaults to True. """<line_sep>super().__init__(explainer title name)<line_sep>self.importances=ImportancesComponent(explainer name=self.name+"0" hide_selector=hide_selector **kwargs)<line_sep>self.feature_descriptions=FeatureDescriptionsComponent(explainer **kwargs)<if_stmt><not>self.explainer.descriptions<block_start>self.hide_descriptions=<true><block_end><block_end><def_stmt>layout self<block_start><return>html.Div([dbc.Row([make_hideable(dbc.Col([html.H2(self.title)]) hide=self.hide_title) ]) dbc.Row([make_hideable(dbc.Col([self.importances.layout() ]) hide=self.hide_importances) ] style=dict(margin=25)) dbc.Row([make_hideable(dbc.Col([self.feature_descriptions.layout() ]) hide=self.hide_descriptions) ] style=dict(margin=25))])<block_end><def_stmt>to_html self state_dict=<none> add_header=<true><block_start>html=to_html.hide(to_html.title(self.title) hide=self.hide_title)<line_sep>html<augadd>to_html.card_rows([to_html.hide(self.importances.to_html(state_dict add_header=<false>) self.hide_importances)] [to_html.hide(self.feature_descriptions.to_html(state_dict add_header=<false>) self.hide_descriptions)] )<if_stmt>add_header<block_start><return>to_html.add_header(html)<block_end><return>html<block_end><block_end><class_stmt>ClassifierModelStatsComposite(ExplainerComponent)<block_start><def_stmt>__init__ self explainer title="Classification Stats" name=<none> hide_title=<true> hide_selector=<true> hide_globalcutoff=<false> hide_modelsummary=<false> hide_confusionmatrix=<false> hide_precision=<false> hide_classification=<false> hide_rocauc=<false> hide_prauc=<false> hide_liftcurve=<false> hide_cumprecision=<false> pos_label=<none> bin_size=0.1 quantiles=10 cutoff=0.5 **kwargs<block_start>"""Composite of multiple classifier related components: - precision graph - confusion matrix - lift curve - classification graph - roc auc graph - pr auc graph Args: explainer (Explainer): explainer object constructed with either ClassifierExplainer() or RegressionExplainer() title (str, optional): Title of tab or page. Defaults to "Decision Trees". name (str, optional): unique name to add to Component elements. If None then random uuid is generated to make sure it's unique. Defaults to None. hide_title (bool, optional): hide title. Defaults to True. hide_selector (bool, optional): hide all pos label selectors. Defaults to True. hide_globalcutoff (bool, optional): hide CutoffPercentileComponent hide_modelsummary (bool, optional): hide ClassifierModelSummaryComponent hide_confusionmatrix (bool, optional): hide ConfusionMatrixComponent hide_precision (bool, optional): hide PrecisionComponent hide_classification (bool, optional): hide ClassificationComponent hide_rocauc (bool, optional): hide RocAucComponent hide_prauc (bool, optional): hide PrAucComponent hide_liftcurve (bool, optional): hide LiftCurveComponent hide_cumprecision (bool, optional): hide CumulativePrecisionComponent pos_label ({int, str}, optional): initial pos label. Defaults to explainer.pos_label bin_size (float, optional): bin_size for precision plot. Defaults to 0.1. quantiles (int, optional): number of quantiles for precision plot. Defaults to 10. cutoff (float, optional): initial cutoff. Defaults to 0.5. """<line_sep>super().__init__(explainer title name)<line_sep>self.summary=ClassifierModelSummaryComponent(explainer name=self.name+"0" hide_selector=hide_selector pos_label=pos_label **kwargs)<line_sep>self.precision=PrecisionComponent(explainer name=self.name+"1" hide_selector=hide_selector pos_label=pos_label **kwargs)<line_sep>self.confusionmatrix=ConfusionMatrixComponent(explainer name=self.name+"2" hide_selector=hide_selector pos_label=pos_label **kwargs)<line_sep>self.cumulative_precision=CumulativePrecisionComponent(explainer name=self.name+"3" hide_selector=hide_selector pos_label=pos_label **kwargs)<line_sep>self.liftcurve=LiftCurveComponent(explainer name=self.name+"4" hide_selector=hide_selector pos_label=pos_label **kwargs)<line_sep>self.classification=ClassificationComponent(explainer name=self.name+"5" hide_selector=hide_selector pos_label=pos_label **kwargs)<line_sep>self.rocauc=RocAucComponent(explainer name=self.name+"6" hide_selector=hide_selector pos_label=pos_label **kwargs)<line_sep>self.prauc=PrAucComponent(explainer name=self.name+"7" hide_selector=hide_selector pos_label=pos_label **kwargs)<line_sep>self.cutoffpercentile=CutoffPercentileComponent(explainer name=self.name+"8" hide_selector=hide_selector pos_label=pos_label **kwargs)<line_sep>self.cutoffconnector=CutoffConnector(self.cutoffpercentile [self.summary self.precision self.confusionmatrix self.liftcurve self.cumulative_precision self.classification self.rocauc self.prauc])<block_end><def_stmt>layout self<block_start><return>html.Div([dbc.Row([make_hideable(dbc.Col([html.H2('Model Performance:')]) hide=self.hide_title) ]) dbc.Row([make_hideable(dbc.Col([self.cutoffpercentile.layout() ]) hide=self.hide_globalcutoff) ] style=dict(marginTop=25 marginBottom=25)) dbc.CardDeck([make_hideable(self.summary.layout() hide=self.hide_modelsummary) make_hideable(self.confusionmatrix.layout() hide=self.hide_confusionmatrix) ] style=dict(marginBottom=25)) dbc.CardDeck([make_hideable(self.precision.layout() hide=self.hide_precision) make_hideable(self.classification.layout() hide=self.hide_classification)] style=dict(marginBottom=25)) dbc.CardDeck([make_hideable(self.rocauc.layout() hide=self.hide_rocauc) make_hideable(self.prauc.layout() hide=self.hide_prauc) ] style=dict(marginBottom=25)) dbc.CardDeck([make_hideable(self.liftcurve.layout() self.hide_liftcurve) make_hideable(self.cumulative_precision.layout() self.hide_cumprecision) ] style=dict(marginBottom=25)) ])<block_end><def_stmt>to_html self state_dict=<none> add_header=<true><block_start>html=to_html.hide(to_html.title(self.title) hide=self.hide_title)<line_sep>html<augadd>to_html.card_rows([to_html.hide(self.summary.to_html(state_dict add_header=<false>) hide=self.hide_modelsummary) to_html.hide(self.confusionmatrix.to_html(state_dict add_header=<false>) hide=self.hide_confusionmatrix)] [to_html.hide(self.precision.to_html(state_dict add_header=<false>) hide=self.hide_precision) to_html.hide(self.classification.to_html(state_dict add_header=<false>) hide=self.hide_classification)] [to_html.hide(self.rocauc.to_html(state_dict add_header=<false>) hide=self.hide_rocauc) to_html.hide(self.prauc.to_html(state_dict add_header=<false>) hide=self.hide_prauc)] [to_html.hide(self.liftcurve.to_html(state_dict add_header=<false>) hide=self.hide_liftcurve) to_html.hide(self.cumulative_precision.to_html(state_dict add_header=<false>) hide=self.hide_cumprecision)])<if_stmt>add_header<block_start><return>to_html.add_header(html)<block_end><return>html<block_end><block_end><class_stmt>RegressionModelStatsComposite(ExplainerComponent)<block_start><def_stmt>__init__ self explainer title="Regression Stats" name=<none> hide_title=<true> hide_modelsummary=<false> hide_predsvsactual=<false> hide_residuals=<false> hide_regvscol=<false> logs=<false> pred_or_actual="vs_pred" residuals='difference' col=<none> **kwargs<block_start>"""Composite for displaying multiple regression related graphs: - predictions vs actual plot - residual plot - residuals vs feature Args: explainer (Explainer): explainer object constructed with either ClassifierExplainer() or RegressionExplainer() title (str, optional): Title of tab or page. Defaults to "Regression Stats". name (str, optional): unique name to add to Component elements. If None then random uuid is generated to make sure it's unique. Defaults to None. hide_title (bool, optional): hide title. Defaults to True. hide_modelsummary (bool, optional): hide RegressionModelSummaryComponent hide_predsvsactual (bool, optional): hide PredictedVsActualComponent hide_residuals (bool, optional): hide ResidualsComponent hide_regvscol (bool, optional): hide RegressionVsColComponent logs (bool, optional): Use log axis. Defaults to False. pred_or_actual (str, optional): plot residuals vs predictions or vs y (actual). Defaults to "vs_pred". residuals (str, {'difference', 'ratio', 'log-ratio'} optional): How to calcualte residuals. Defaults to 'difference'. col ({str, int}, optional): Feature to use for residuals plot. Defaults to None. """<line_sep>super().__init__(explainer title name)<assert_stmt>pred_or_actual<in>['vs_actual' 'vs_pred'] "pred_or_actual should be 'vs_actual' or 'vs_pred'!"<line_sep>self.modelsummary=RegressionModelSummaryComponent(explainer name=self.name+"0" **kwargs)<line_sep>self.preds_vs_actual=PredictedVsActualComponent(explainer name=self.name+"0" logs=logs **kwargs)<line_sep>self.residuals=ResidualsComponent(explainer name=self.name+"1" pred_or_actual=pred_or_actual residuals=residuals **kwargs)<line_sep>self.reg_vs_col=RegressionVsColComponent(explainer name=self.name+"2" logs=logs **kwargs)<block_end><def_stmt>layout self<block_start><return>html.Div([dbc.Row([make_hideable(dbc.Col([html.H2('Model Performance:')]) hide=self.hide_title)]) dbc.CardDeck([make_hideable(self.modelsummary.layout() hide=self.hide_modelsummary) make_hideable(self.preds_vs_actual.layout() hide=self.hide_predsvsactual) ] style=dict(margin=25)) dbc.CardDeck([make_hideable(self.residuals.layout() hide=self.hide_residuals) make_hideable(self.reg_vs_col.layout() hide=self.hide_regvscol) ] style=dict(margin=25))])<block_end><def_stmt>to_html self state_dict=<none> add_header=<true><block_start>html=to_html.hide(to_html.title(self.title) hide=self.hide_title)<line_sep>html<augadd>to_html.card_rows([to_html.hide(self.modelsummary.to_html(state_dict add_header=<false>) hide=self.hide_modelsummary) to_html.hide(self.preds_vs_actual.to_html(state_dict add_header=<false>) hide=self.hide_predsvsactual)] [to_html.hide(self.residuals.to_html(state_dict add_header=<false>) hide=self.hide_residuals) to_html.hide(self.reg_vs_col.to_html(state_dict add_header=<false>) hide=self.hide_regvscol)] )<if_stmt>add_header<block_start><return>to_html.add_header(html)<block_end><return>html<block_end><block_end><class_stmt>IndividualPredictionsComposite(ExplainerComponent)<block_start><def_stmt>__init__ self explainer title="Individual Predictions" name=<none> hide_predindexselector=<false> hide_predictionsummary=<false> hide_contributiongraph=<false> hide_pdp=<false> hide_contributiontable=<false> hide_title=<false> hide_selector=<true> index_check=<true> **kwargs<block_start>"""Composite for a number of component that deal with individual predictions: - random index selector - prediction summary - shap contributions graph - shap contribution table - pdp graph Args: explainer (Explainer): explainer object constructed with either ClassifierExplainer() or RegressionExplainer() title (str, optional): Title of tab or page. Defaults to "Individual Predictions". name (str, optional): unique name to add to Component elements. If None then random uuid is generated to make sure it's unique. Defaults to None. hide_predindexselector (bool, optional): hide ClassifierRandomIndexComponent or RegressionRandomIndexComponent hide_predictionsummary (bool, optional): hide ClassifierPredictionSummaryComponent or RegressionPredictionSummaryComponent hide_contributiongraph (bool, optional): hide ShapContributionsGraphComponent hide_pdp (bool, optional): hide PdpComponent hide_contributiontable (bool, optional): hide ShapContributionsTableComponent hide_title (bool, optional): hide title. Defaults to False. index_check (bool, optional): only pass valid indexes from random index selector to feature input. Defaults to True. hide_selector(bool, optional): hide all pos label selectors. Defaults to True. """<line_sep>super().__init__(explainer title name)<if_stmt>self.explainer.is_classifier<block_start>self.index=ClassifierRandomIndexComponent(explainer name=self.name+"0" hide_selector=hide_selector **kwargs)<line_sep>self.summary=ClassifierPredictionSummaryComponent(explainer name=self.name+"1" hide_selector=hide_selector **kwargs)<block_end><elif_stmt>self.explainer.is_regression<block_start>self.index=RegressionRandomIndexComponent(explainer name=self.name+"0" hide_selector=hide_selector **kwargs)<line_sep>self.summary=RegressionPredictionSummaryComponent(explainer name=self.name+"1" hide_selector=hide_selector **kwargs)<block_end>self.contributions=ShapContributionsGraphComponent(explainer name=self.name+"2" hide_selector=hide_selector **kwargs)<line_sep>self.pdp=PdpComponent(explainer name=self.name+"3" hide_selector=hide_selector **kwargs)<line_sep>self.contributions_list=ShapContributionsTableComponent(explainer name=self.name+"4" hide_selector=hide_selector **kwargs)<line_sep>self.index_connector=IndexConnector(self.index [self.summary self.contributions self.pdp self.contributions_list] explainer=explainer<if>index_check<else><none>)<block_end><def_stmt>layout self<block_start><return>dbc.Container([dbc.CardDeck([make_hideable(self.index.layout() hide=self.hide_predindexselector) make_hideable(self.summary.layout() hide=self.hide_predictionsummary) ] style=dict(marginBottom=25 marginTop=25)) dbc.CardDeck([make_hideable(self.contributions.layout() hide=self.hide_contributiongraph) make_hideable(self.pdp.layout() hide=self.hide_pdp) ] style=dict(marginBottom=25 marginTop=25)) dbc.Row([dbc.Col([make_hideable(self.contributions_list.layout() hide=self.hide_contributiontable) ] md=6) dbc.Col([html.Div([]) ] md=6) ])] fluid=<true>)<block_end><def_stmt>to_html self state_dict=<none> add_header=<true><block_start>html=to_html.title(self.title)<line_sep>html<augadd>to_html.card_rows([to_html.hide(self.index.to_html(state_dict add_header=<false>) self.hide_predindexselector) to_html.hide(self.summary.to_html(state_dict add_header=<false>) self.hide_predictionsummary)] [to_html.hide(self.contributions.to_html(state_dict add_header=<false>) self.hide_contributiongraph) to_html.hide(self.pdp.to_html(state_dict add_header=<false>) self.hide_pdp)] [to_html.hide(self.contributions_list.to_html(state_dict add_header=<false>) self.hide_contributiontable)])<if_stmt>add_header<block_start><return>to_html.add_header(html)<block_end><return>html<block_end><block_end><class_stmt>WhatIfComposite(ExplainerComponent)<block_start><def_stmt>__init__ self explainer title="What if..." name=<none> hide_whatifindexselector=<false> hide_inputeditor=<false> hide_whatifprediction=<false> hide_whatifcontributiongraph=<false> hide_whatifpdp=<false> hide_whatifcontributiontable=<false> hide_title=<true> hide_selector=<true> index_check=<true> n_input_cols=4 sort='importance' **kwargs<block_start>"""Composite for the whatif component: Args: explainer (Explainer): explainer object constructed with either ClassifierExplainer() or RegressionExplainer() title (str, optional): Title of tab or page. Defaults to "Individual Predictions". name (str, optional): unique name to add to Component elements. If None then random uuid is generated to make sure it's unique. Defaults to None. hide_title (bool, optional): hide title. Defaults to True. hide_selector(bool, optional): hide all pos label selectors. Defaults to True. hide_whatifindexselector (bool, optional): hide ClassifierRandomIndexComponent or RegressionRandomIndexComponent hide_inputeditor (bool, optional): hide FeatureInputComponent hide_whatifprediction (bool, optional): hide PredictionSummaryComponent hide_whatifcontributiongraph (bool, optional): hide ShapContributionsGraphComponent hide_whatifcontributiontable (bool, optional): hide ShapContributionsTableComponent hide_whatifpdp (bool, optional): hide PdpComponent index_check (bool, optional): only pass valid indexes from random index selector to feature input. Defaults to True. n_input_cols (int, optional): number of columns to divide the feature inputs into. Defaults to 4. sort ({'abs', 'high-to-low', 'low-to-high', 'importance'}, optional): sorting of shap values. Defaults to 'importance'. """<line_sep>super().__init__(explainer title name)<if_stmt>'hide_whatifcontribution'<in>kwargs<block_start>print("Warning: hide_whatifcontribution will be deprecated, use hide_whatifcontributiongraph instead!")<line_sep>self.hide_whatifcontributiongraph=kwargs['hide_whatifcontribution']<block_end>self.input=FeatureInputComponent(explainer name=self.name+"0" hide_selector=hide_selector n_input_cols=self.n_input_cols **update_params(kwargs hide_index=<true>))<if_stmt>self.explainer.is_classifier<block_start>self.index=ClassifierRandomIndexComponent(explainer name=self.name+"1" hide_selector=hide_selector **kwargs)<line_sep>self.prediction=ClassifierPredictionSummaryComponent(explainer name=self.name+"2" feature_input_component=self.input hide_star_explanation=<true> hide_selector=hide_selector **kwargs)<block_end><elif_stmt>self.explainer.is_regression<block_start>self.index=RegressionRandomIndexComponent(explainer name=self.name+"1" **kwargs)<line_sep>self.prediction=RegressionPredictionSummaryComponent(explainer name=self.name+"2" feature_input_component=self.input **kwargs)<block_end>self.contribgraph=ShapContributionsGraphComponent(explainer name=self.name+"3" feature_input_component=self.input hide_selector=hide_selector sort=sort **kwargs)<line_sep>self.contribtable=ShapContributionsTableComponent(explainer name=self.name+"4" feature_input_component=self.input hide_selector=hide_selector sort=sort **kwargs)<line_sep>self.pdp=PdpComponent(explainer name=self.name+"5" feature_input_component=self.input hide_selector=hide_selector **kwargs)<line_sep>self.index_connector=IndexConnector(self.index self.input explainer=explainer<if>index_check<else><none>)<block_end><def_stmt>layout self<block_start><return>dbc.Container([dbc.Row([make_hideable(dbc.Col([html.H1(self.title)]) hide=self.hide_title) ]) dbc.Row([make_hideable(dbc.Col([self.index.layout() ] md=7) hide=self.hide_whatifindexselector) make_hideable(dbc.Col([self.prediction.layout() ] md=5) hide=self.hide_whatifprediction) ] style=dict(marginBottom=15 marginTop=15)) dbc.CardDeck([make_hideable(self.input.layout() hide=self.hide_inputeditor) ] style=dict(marginBottom=15 marginTop=15)) dbc.CardDeck([make_hideable(self.contribgraph.layout() hide=self.hide_whatifcontributiongraph) make_hideable(self.pdp.layout() hide=self.hide_whatifpdp) ] style=dict(marginBottom=15 marginTop=15)) dbc.Row([make_hideable(dbc.Col([self.contribtable.layout()] md=6) hide=self.hide_whatifcontributiontable) dbc.Col([] md=6) ])] fluid=<true>)<block_end><def_stmt>to_html self state_dict=<none> add_header=<true><block_start>html=to_html.title(self.title)<line_sep>html<augadd>to_html.card_rows([to_html.hide(self.index.to_html(state_dict add_header=<false>) self.hide_whatifindexselector) to_html.hide(self.prediction.to_html(state_dict add_header=<false>) self.hide_whatifprediction)] [to_html.hide(self.input.to_html(state_dict add_header=<false>) self.hide_inputeditor)] [to_html.hide(self.contribgraph.to_html(state_dict add_header=<false>) self.hide_whatifcontributiongraph) to_html.hide(self.pdp.to_html(state_dict add_header=<false>) self.hide_whatifpdp)] [to_html.hide(self.contribtable.to_html(state_dict add_header=<false>) self.hide_whatifcontributiontable)])<if_stmt>add_header<block_start><return>to_html.add_header(html)<block_end><return>html<block_end><block_end><class_stmt>ShapDependenceComposite(ExplainerComponent)<block_start><def_stmt>__init__ self explainer title='Feature Dependence' name=<none> hide_selector=<true> hide_shapsummary=<false> hide_shapdependence=<false> depth=<none> **kwargs<block_start>"""Composite of ShapSummary and ShapDependence component Args: explainer (Explainer): explainer object constructed with either ClassifierExplainer() or RegressionExplainer() title (str, optional): Title of tab or page. Defaults to "Feature Dependence". name (str, optional): unique name to add to Component elements. If None then random uuid is generated to make sure it's unique. Defaults to None. hide_selector (bool, optional): hide all pos label selectors. Defaults to True. hide_shapsummary (bool, optional): hide ShapSummaryComponent hide_shapdependence (bool, optional): ShapDependenceComponent depth (int, optional): Number of features to display. Defaults to None. """<line_sep>super().__init__(explainer title name)<line_sep>self.shap_summary=ShapSummaryComponent(self.explainer name=self.name+"0" **update_params(kwargs hide_selector=hide_selector depth=depth))<line_sep>self.shap_dependence=ShapDependenceComponent(self.explainer name=self.name+"1" hide_selector=hide_selector **kwargs)<line_sep>self.connector=ShapSummaryDependenceConnector(self.shap_summary self.shap_dependence)<block_end><def_stmt>layout self<block_start><return>dbc.Container([dbc.CardDeck([make_hideable(self.shap_summary.layout() hide=self.hide_shapsummary) make_hideable(self.shap_dependence.layout() hide=self.hide_shapdependence) ] style=dict(marginTop=25)) ] fluid=<true>)<block_end><def_stmt>to_html self state_dict=<none> add_header=<true><block_start>html=to_html.title(self.title)<line_sep>html<augadd>to_html.card_rows([to_html.hide(self.shap_summary.to_html(state_dict add_header=<false>) self.hide_shapsummary) to_html.hide(self.shap_dependence.to_html(state_dict add_header=<false>) self.hide_shapdependence)] )<if_stmt>add_header<block_start><return>to_html.add_header(html)<block_end><return>html<block_end><block_end><class_stmt>ShapInteractionsComposite(ExplainerComponent)<block_start><def_stmt>__init__ self explainer title='Feature Interactions' name=<none> hide_selector=<true> hide_interactionsummary=<false> hide_interactiondependence=<false> depth=<none> **kwargs<block_start>"""Composite of InteractionSummaryComponent and InteractionDependenceComponent Args: explainer (Explainer): explainer object constructed with either ClassifierExplainer() or RegressionExplainer() title (str, optional): Title of tab or page. Defaults to "Feature Interactions". name (str, optional): unique name to add to Component elements. If None then random uuid is generated to make sure it's unique. Defaults to None. hide_selector (bool, optional): hide all pos label selectors. Defaults to True. hide_interactionsummary (bool, optional): hide InteractionSummaryComponent hide_interactiondependence (bool, optional): hide InteractionDependenceComponent depth (int, optional): Initial number of features to display. Defaults to None. """<line_sep>super().__init__(explainer title name)<line_sep>self.interaction_summary=InteractionSummaryComponent(explainer name=self.name+"0" hide_selector=hide_selector depth=depth **kwargs)<line_sep>self.interaction_dependence=InteractionDependenceComponent(explainer name=self.name+"1" hide_selector=hide_selector **kwargs)<line_sep>self.connector=InteractionSummaryDependenceConnector(self.interaction_summary self.interaction_dependence)<block_end><def_stmt>layout self<block_start><return>dbc.Container([dbc.CardDeck([make_hideable(self.interaction_summary.layout() hide=self.hide_interactionsummary) make_hideable(self.interaction_dependence.layout() hide=self.hide_interactiondependence) ] style=dict(marginTop=25))] fluid=<true>)<block_end><def_stmt>to_html self state_dict=<none> add_header=<true><block_start>html=to_html.title(self.title)<line_sep>html<augadd>to_html.card_rows([to_html.hide(self.interaction_summary.to_html(state_dict add_header=<false>) self.hide_interactionsummary) to_html.hide(self.interaction_dependence.to_html(state_dict add_header=<false>) self.hide_interactiondependence)] )<if_stmt>add_header<block_start><return>to_html.add_header(html)<block_end><return>html<block_end><block_end><class_stmt>DecisionTreesComposite(ExplainerComponent)<block_start><def_stmt>__init__ self explainer title="Decision Trees" name=<none> hide_treeindexselector=<false> hide_treesgraph=<false> hide_treepathtable=<false> hide_treepathgraph=<false> hide_selector=<true> index_check=<true> **kwargs<block_start>"""Composite of decision tree related components: - index selector - individual decision trees barchart - decision path table - deciion path graph Args: explainer (Explainer): explainer object constructed with either RandomForestClassifierExplainer() or RandomForestRegressionExplainer() title (str, optional): Title of tab or page. Defaults to "Decision Trees". name (str, optional): unique name to add to Component elements. If None then random uuid is generated to make sure it's unique. Defaults to None. hide_treeindexselector (bool, optional): hide ClassifierRandomIndexComponent or RegressionRandomIndexComponent hide_treesgraph (bool, optional): hide DecisionTreesComponent hide_treepathtable (bool, optional): hide DecisionPathTableComponent hide_treepathgraph (bool, optional): DecisionPathGraphComponent hide_selector (bool, optional): hide all pos label selectors. Defaults to True. index_check (bool, optional): only pass valid indexes from random index selector to feature input. Defaults to True. """<line_sep>super().__init__(explainer title name)<line_sep>self.trees=DecisionTreesComponent(explainer name=self.name+"0" hide_selector=hide_selector **kwargs)<line_sep>self.decisionpath_table=DecisionPathTableComponent(explainer name=self.name+"1" hide_selector=hide_selector **kwargs)<if_stmt>explainer.is_classifier<block_start>self.index=ClassifierRandomIndexComponent(explainer name=self.name+"2" hide_selector=hide_selector **kwargs)<block_end><elif_stmt>explainer.is_regression<block_start>self.index=RegressionRandomIndexComponent(explainer name=self.name+"2" **kwargs)<block_end>self.decisionpath_graph=DecisionPathGraphComponent(explainer name=self.name+"3" hide_selector=hide_selector **kwargs)<line_sep>self.index_connector=IndexConnector(self.index [self.trees self.decisionpath_table self.decisionpath_graph] explainer=explainer<if>index_check<else><none>)<line_sep>self.highlight_connector=HighlightConnector(self.trees [self.decisionpath_table self.decisionpath_graph])<block_end><def_stmt>layout self<block_start><return>html.Div([dbc.Row([make_hideable(dbc.Col([self.index.layout() ]) hide=self.hide_treeindexselector) ] style=dict(margin=25)) dbc.Row([make_hideable(dbc.Col([self.trees.layout() ]) hide=self.hide_treesgraph) ] style=dict(margin=25)) dbc.Row([make_hideable(dbc.Col([self.decisionpath_table.layout() ]) hide=self.hide_treepathtable) ] style=dict(margin=25)) dbc.Row([make_hideable(dbc.Col([self.decisionpath_graph.layout()]) hide=self.hide_treepathgraph) ] style=dict(margin=25)) ])<block_end><def_stmt>to_html self state_dict=<none> add_header=<true><block_start>html=to_html.title(self.title)<line_sep>html<augadd>to_html.card_rows([to_html.hide(self.index.to_html(state_dict add_header=<false>) self.hide_treeindexselector)] [to_html.hide(self.trees.to_html(state_dict add_header=<false>) self.hide_treesgraph)] [to_html.hide(self.decisionpath_table.to_html(state_dict add_header=<false>) self.hide_treepathtable)] )<if_stmt>add_header<block_start><return>to_html.add_header(html)<block_end><return>html<block_end><block_end><class_stmt>SimplifiedClassifierComposite(ExplainerComponent)<block_start><def_stmt>__init__ self explainer title="Simple Classifier Explainer" name=<none> hide_title=<false> classifier_custom_component='roc_auc' hide_confusionmatrix=<false> hide_classifier_custom_component=<false> hide_shapsummary=<false> hide_shapdependence=<false> hide_predindexselector=<false> hide_predictionsummary=<false> hide_contributiongraph=<false> **kwargs<block_start>"""Composite of multiple classifier related components, on a single tab: - confusion matrix - one other model quality indicator: choose from pr auc graph, precision graph, lift curve, classification graph, or roc auc graph - shap importance - shap dependence - index selector - index prediction summary - index shap contribution graph Args: explainer (Explainer): explainer object constructed with either ClassifierExplainer() or RegressionExplainer() title (str, optional): Title of tab or page. Defaults to "Simple Classification Stats". name (str, optional): unique name to add to Component elements. If None then random uuid is generated to make sure it's unique. Defaults to None. hide_title (bool, optional): hide the title. Defaults to False. classifier_custom_component (str, optional): custom classifier quality indicator supported by the ClassifierExplainer object. Valid values are: 'roc_auc', 'metrics', pr_auc', 'precision_graph', 'lift_curve', 'classification'. Defaults to 'roc_auc'. hide_confusionmatrix (bool, optional): hide ConfusionMatrixComponent hide_classifier_custom_component (bool, optional): hide the chosen classifier_custom_component hide_shapsummary (bool, optional): hide ShapSummaryComponent hide_shapdependence (bool, optional): hide ShapDependenceComponent hide_predindexselector (bool, optional): hide ClassifierRandomIndexComponent or RegressionRandomIndexComponent hide_predictionsummary (bool, optional): hide ClassifierPredictionSummaryComponent or RegressionPredictionSummaryComponent hide_contributiongraph (bool, optional): hide ShapContributionsGraphComponent """<line_sep>super().__init__(explainer title=title name=name)<line_sep>self.confusionmatrix=ConfusionMatrixComponent(explainer **update_params(kwargs hide_percentage=<true> hide_selector=<true> hide_normalize=<true>))<line_sep># select custom classifier report metric <if_stmt>classifier_custom_component<eq>'metrics'<block_start>self.classifier_custom_component=ClassifierModelSummaryComponent(explainer **update_params(kwargs hide_selector=<true>))<block_end><elif_stmt>classifier_custom_component<eq>'pr_auc'<block_start>self.classifier_custom_component=PrAucComponent(explainer **update_params(kwargs hide_selector=<true>))<block_end><elif_stmt>classifier_custom_component<eq>'precision_graph'<block_start>self.classifier_custom_component=PrecisionComponent(explainer **update_params(kwargs hide_selector=<true>))<block_end><elif_stmt>classifier_custom_component<eq>'lift_curve'<block_start>self.classifier_custom_component=LiftCurveComponent(explainer **update_params(kwargs hide_selector=<true>))<block_end><elif_stmt>classifier_custom_component<eq>'classifiction'<block_start>self.classifier_custom_component=ClassificationComponent(explainer **update_params(kwargs hide_selector=<true>))<block_end><elif_stmt>classifier_custom_component<eq>'roc_auc'<block_start>self.classifier_custom_component=RocAucComponent(explainer **update_params(kwargs hide_selector=<true>))<block_end><else_stmt><block_start><raise>ValueError("ERROR: SimplifiedClassifierDashboard parameter classifier_custom_component "<concat>"should be in {'metrics', 'roc_auc', pr_auc', 'precision_graph', 'lift_curve', 'class_graph'} "<concat>f"but you passed {classifier_custom_component}!")<block_end># SHAP summary & dependence self.shap_summary=ShapSummaryComponent(explainer **update_params(kwargs title="Shap Feature Importances" hide_index=<true> hide_selector=<true> depth=<none> hide_depth=<true>))<line_sep>self.shap_dependence=ShapDependenceComponent(explainer **update_params(kwargs hide_selector=<true> hide_index=<true> color_col="no_color_col"))<line_sep># SHAP contribution, along with prediction summary self.index=ClassifierRandomIndexComponent(explainer hide_selector=<true> **kwargs)<line_sep>self.summary=ClassifierPredictionSummaryComponent(explainer hide_index=<true> hide_selector=<true> **kwargs)<line_sep>self.contributions=ShapContributionsGraphComponent(explainer hide_index=<true> hide_depth=<true> hide_selector=<true> **kwargs)<line_sep>self.cutoffconnector=CutoffConnector(self.confusionmatrix self.classifier_custom_component)<line_sep>self.connector=ShapSummaryDependenceConnector(self.shap_summary self.shap_dependence)<line_sep>self.index_connector=IndexConnector(self.index [self.summary self.contributions])<block_end><def_stmt>layout self<block_start><return>dbc.Container([dbc.Row([make_hideable(dbc.Col([html.H1(self.title id='simple-classifier-composite-title')]) hide=self.hide_title) ]) dbc.Row([dbc.Col([html.H2("Model performance") dbc.CardDeck([make_hideable(self.confusionmatrix.layout() hide=self.hide_confusionmatrix) make_hideable(self.classifier_custom_component.layout() hide=self.hide_classifier_custom_component) ] style=dict(marginTop=25 marginBottom=25)) ])]) dbc.Row([dbc.Col([html.H3("SHAP values") dbc.CardDeck([make_hideable(self.shap_summary.layout() hide=self.hide_shapsummary) make_hideable(self.shap_dependence.layout() hide=self.hide_shapdependence) ] style=dict(marginTop=25 marginBottom=25)) ])]) dbc.Row([dbc.Col([html.H2("Individual predictions") dbc.CardDeck([make_hideable(self.index.layout() hide=self.hide_predindexselector) make_hideable(self.summary.layout() hide=self.hide_predictionsummary) ] style=dict(marginBottom=25 marginTop=25)) dbc.CardDeck([make_hideable(self.contributions.layout() hide=self.hide_contributiongraph) ] style=dict(marginBottom=25 marginTop=25))])]) ] fluid=<false>)<block_end><def_stmt>to_html self state_dict=<none> add_header=<true><block_start>html=to_html.hide(to_html.title(self.title) hide=self.hide_title)<line_sep>html<augadd>to_html.card_rows([to_html.hide(self.confusionmatrix.to_html(state_dict add_header=<false>) hide=self.hide_confusionmatrix) to_html.hide(self.classifier_custom_component.to_html(state_dict add_header=<false>) hide=self.hide_classifier_custom_component)] [to_html.hide(self.shap_summary.to_html(state_dict add_header=<false>) hide=self.hide_shapsummary) to_html.hide(self.shap_dependence.to_html(state_dict add_header=<false>) hide=self.hide_shapdependence)] [to_html.hide(self.index.to_html(state_dict add_header=<false>) hide=self.hide_predindexselector) to_html.hide(self.summary.to_html(state_dict add_header=<false>) hide=self.hide_predictionsummary)] [to_html.hide(self.contributions.to_html(state_dict add_header=<false>) hide=self.hide_contributiongraph)])<if_stmt>add_header<block_start><return>to_html.add_header(html)<block_end><return>html<block_end><block_end><class_stmt>SimplifiedRegressionComposite(ExplainerComponent)<block_start><def_stmt>__init__ self explainer title="Simple Regression Explainer" name=<none> hide_title=<false> regression_custom_component='vs_col' hide_goodness_of_fit=<false> hide_regression_custom_component=<false> hide_shapsummary=<false> hide_shapdependence=<false> hide_predindexselector=<false> hide_predictionsummary=<false> hide_contributiongraph=<false> **kwargs<block_start>"""Composite of multiple classifier related components, on a single tab: - goodness of fit component - one other model quality indicator: 'metrics', 'residuals' or'residuals_vs_col' - shap importance - shap dependence - index selector - index prediction summary - index shap contribution graph Args: explainer (Explainer): explainer object constructed with either ClassifierExplainer() or RegressionExplainer() title (str, optional): Title of tab or page. Defaults to "Simple Classification Stats". name (str, optional): unique name to add to Component elements. If None then random uuid is generated to make sure it's unique. Defaults to None. hide_title (bool, optional): hide the title. Defaults to False. regression_custom_component (str, optional): custom classifier quality indicator supported by the ClassifierExplainer object. Valid values are: 'metrics', 'residuals' or'vs_col' hide_goodness_of_fit (bool, optional): hide goodness of fit component hide_regression_custom_component (bool, optional): hide the chosen regression_custom_component hide_shapsummary (bool, optional): hide ShapSummaryComponent hide_shapdependence (bool, optional): hide ShapDependenceComponent hide_predindexselector (bool, optional): hide RegressionRandomIndexComponent or RegressionRandomIndexComponent hide_predictionsummary (bool, optional): hide RegressionPredictionSummaryComponent or RegressionPredictionSummaryComponent hide_contributiongraph (bool, optional): hide ShapContributionsGraphComponent """<line_sep>super().__init__(explainer title name)<line_sep>self.goodness_of_fit=PredictedVsActualComponent(explainer **kwargs)<line_sep># select custom classifier report metric <if_stmt>regression_custom_component<eq>'metrics'<block_start>self.regression_custom_component=RegressionModelSummaryComponent(explainer **kwargs)<block_end><elif_stmt>regression_custom_component<eq>'residuals'<block_start>self.regression_custom_component=ResidualsComponent(explainer **kwargs)<block_end><elif_stmt>regression_custom_component<eq>'vs_col'<block_start>self.regression_custom_component=RegressionVsColComponent(explainer **update_params(kwargs display='predicted'))<block_end><else_stmt><block_start><raise>ValueError("ERROR: SimplifiedRegressionDashboard parameter "<concat>"regression_custom_component should be in {'metrics', 'residuals', 'vs_col'}"<concat>f" but you passed {regression_custom_component}!")<block_end># SHAP summary & dependence self.shap_summary=ShapSummaryComponent(explainer **update_params(kwargs title="Shap Feature Importances" hide_index=<true> depth=<none> hide_depth=<true>))<line_sep>self.shap_dependence=ShapDependenceComponent(explainer **update_params(kwargs hide_index=<true>))<line_sep># SHAP contribution, along with prediction summary self.index=RegressionRandomIndexComponent(explainer **kwargs)<line_sep>self.summary=RegressionPredictionSummaryComponent(explainer hide_index=<true> **kwargs)<line_sep>self.contributions=ShapContributionsGraphComponent(explainer **update_params(kwargs hide_index=<true> hide_depth=<true>))<line_sep>self.connector=ShapSummaryDependenceConnector(self.shap_summary self.shap_dependence)<line_sep>self.index_connector=IndexConnector(self.index [self.summary self.contributions])<block_end><def_stmt>layout self<block_start><return>dbc.Container([dbc.Row([make_hideable(dbc.Col([html.H1(self.title id='simple-regression-composite-title') ]) hide=self.hide_title) ]) dbc.Row([dbc.Col([html.H2("Model performance") dbc.CardDeck([make_hideable(self.goodness_of_fit.layout() hide=self.hide_goodness_of_fit) make_hideable(self.regression_custom_component.layout() hide=self.hide_regression_custom_component) ] style=dict(marginTop=25 marginBottom=25)) ])]) dbc.Row([dbc.Col([html.H3("SHAP values") dbc.CardDeck([make_hideable(self.shap_summary.layout() hide=self.hide_shapsummary) make_hideable(self.shap_dependence.layout() hide=self.hide_shapdependence) ] style=dict(marginTop=25 marginBottom=25)) ])]) dbc.Row([dbc.Col([html.H2("Individual predictions") dbc.CardDeck([make_hideable(self.index.layout() hide=self.hide_predindexselector) make_hideable(self.summary.layout() hide=self.hide_predictionsummary) ] style=dict(marginBottom=25 marginTop=25)) dbc.CardDeck([make_hideable(self.contributions.layout() hide=self.hide_contributiongraph) ] style=dict(marginBottom=25 marginTop=25))])])] fluid=<false>)<block_end><def_stmt>to_html self state_dict=<none> add_header=<true><block_start>html=to_html.hide(to_html.title(self.title) hide=self.hide_title)<line_sep>html<augadd>to_html.card_rows([to_html.hide(self.goodness_of_fit.to_html(state_dict add_header=<false>) hide=self.hide_goodness_of_fit) to_html.hide(self.regression_custom_component.to_html(state_dict add_header=<false>) hide=self.hide_regression_custom_component)] [to_html.hide(self.shap_summary.to_html(state_dict add_header=<false>) hide=self.hide_shapsummary) to_html.hide(self.shap_dependence.to_html(state_dict add_header=<false>) hide=self.hide_shapdependence)] [to_html.hide(self.index.to_html(state_dict add_header=<false>) hide=self.hide_predindexselector) to_html.hide(self.summary.to_html(state_dict add_header=<false>) hide=self.hide_predictionsummary)] [to_html.hide(self.contributions.to_html(state_dict add_header=<false>) hide=self.hide_contributiongraph)])<if_stmt>add_header<block_start><return>to_html.add_header(html)<block_end><return>html<block_end><block_end>
# -*- coding: utf-8 -*- # Copyright (c) 2013 The Chromium OS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Test the lint module."""<import_from_future_stmt> print_function<import_stmt>collections<import_stmt>StringIO<import_from_stmt>chromite.cli.cros lint<import_from_stmt>chromite.lib cros_test_lib<line_sep># pylint: disable=protected-access <class_stmt>TestNode(object)<block_start>"""Object good enough to stand in for lint funcs"""<line_sep>Args=collections.namedtuple('Args' ('args' 'vararg' 'kwarg'))<line_sep>Arg=collections.namedtuple('Arg' ('name' ))<def_stmt>__init__ self doc='' fromlineno=0 path='foo.py' args=() vararg='' kwarg='' names=<none> lineno=0 name='module' display_type='Module' col_offset=<none><block_start><if_stmt>names<is><none><block_start>names=[('name' <none>)]<block_end>self.doc=doc<line_sep>self.lines=doc.split('\n')<line_sep>self.fromlineno=fromlineno<line_sep>self.lineno=lineno<line_sep>self.file=path<line_sep>self.args=self.Args(args=[self.Arg(name=x)<for>x args] vararg=vararg kwarg=kwarg)<line_sep>self.names=names<line_sep>self.name=name<line_sep>self._display_type=display_type<line_sep>self.col_offset=col_offset<block_end><def_stmt>argnames self<block_start><return>[arg.name<for>arg self.args.args]<block_end><def_stmt>display_type self<block_start><return>self._display_type<block_end><block_end><class_stmt>StatStub(object)<block_start>"""Dummy object to stand in for stat checks."""<def_stmt>__init__ self size=0 mode=0o644<block_start>self.st_size=size<line_sep>self.st_mode=mode<block_end><block_end><class_stmt>CheckerTestCase(cros_test_lib.TestCase)<block_start>"""Helpers for Checker modules"""<def_stmt>add_message self msg_id node=<none> line=<none> args=<none><block_start>"""Capture lint checks"""<line_sep># We include node.doc here explicitly so the pretty assert message # inclues it in the output automatically. doc=node.doc<if>node<else>''<line_sep>self.results.append((msg_id doc line args))<block_end><def_stmt>setUp self<block_start><assert_stmt>hasattr(self 'CHECKER') 'TestCase must set CHECKER'<line_sep>self.results=[]<line_sep>self.checker=self.CHECKER()<line_sep>self.checker.add_message=self.add_message<block_end><block_end><class_stmt>DocStringCheckerTest(CheckerTestCase)<block_start>"""Tests for DocStringChecker module"""<line_sep>GOOD_FUNC_DOCSTRINGS=('Some string' """Short summary Body of text. """ """line o text Body and comments on more than one line. Args: moo: cow Returns: some value Raises: something else """ """Short summary. Args: fat: cat Yields: a spoon """ """Don't flag args variables as sections. Args: return: Foo! """ )<line_sep>BAD_FUNC_DOCSTRINGS=(""" bad first line """ """The first line is good but the second one isn't """ """ whitespace is wrong""" """whitespace is wrong """ """ whitespace is wrong Multiline tickles differently. """ """Should be no trailing blank lines Returns: a value """ """ok line cuddled end""" """we want Args, not Arguments Arguments: some: arg """ """section order is wrong here Raises: It raised. Returns: It returned """ """sections are duplicated Returns: True Returns: or was it false """ """sections lack whitespace between them Args: foo: bar Returns: yeah """ """yields is misspelled Yield: a car """ """Section name has bad spacing Args:\x20\x20\x20 key: here """ """too many blank lines Returns: None """ """wrongly uses javadoc @returns None """ """the indentation is incorrect Args: some: day """ """the final indentation is incorrect Blah. """ )<line_sep># The current linter isn't good enough yet to detect these. TODO_BAD_FUNC_DOCSTRINGS=("""The returns section isn't a proper section Args: bloop: de returns something """ )<line_sep>CHECKER=lint.DocStringChecker<def_stmt>testGood_visit_function self<block_start>"""Allow known good docstrings"""<for_stmt>dc self.GOOD_FUNC_DOCSTRINGS<block_start>self.results=[]<line_sep>node=TestNode(doc=dc display_type=<none> col_offset=4)<line_sep>self.checker.visit_function(node)<line_sep>self.assertEqual(self.results [] msg='docstring was not accepted:\n"""%s"""'%dc)<block_end><block_end><def_stmt>testBad_visit_function self<block_start>"""Reject known bad docstrings"""<for_stmt>dc self.BAD_FUNC_DOCSTRINGS<block_start>self.results=[]<line_sep>node=TestNode(doc=dc display_type=<none> col_offset=4)<line_sep>self.checker.visit_function(node)<line_sep>self.assertNotEqual(self.results [] msg='docstring was not rejected:\n"""%s"""'%dc)<block_end><block_end><def_stmt>testSmoke_visit_module self<block_start>"""Smoke test for modules"""<line_sep>self.checker.visit_module(TestNode(doc='foo'))<line_sep>self.assertEqual(self.results [])<line_sep>self.checker.visit_module(TestNode(doc='' path='/foo/__init__.py'))<line_sep>self.assertEqual(self.results [])<block_end><def_stmt>testSmoke_visit_class self<block_start>"""Smoke test for classes"""<line_sep>self.checker.visit_class(TestNode(doc='bar'))<block_end><def_stmt>testGood_check_first_line self<block_start>"""Verify _check_first_line accepts good inputs"""<line_sep>docstrings=('Some string' )<for_stmt>dc docstrings<block_start>self.results=[]<line_sep>node=TestNode(doc=dc)<line_sep>self.checker._check_first_line(node node.lines)<line_sep>self.assertEqual(self.results [] msg='docstring was not accepted:\n"""%s"""'%dc)<block_end><block_end><def_stmt>testBad_check_first_line self<block_start>"""Verify _check_first_line rejects bad inputs"""<line_sep>docstrings=('\nSome string\n' )<for_stmt>dc docstrings<block_start>self.results=[]<line_sep>node=TestNode(doc=dc)<line_sep>self.checker._check_first_line(node node.lines)<line_sep>self.assertEqual(len(self.results) 1)<block_end><block_end><def_stmt>testGood_check_second_line_blank self<block_start>"""Verify _check_second_line_blank accepts good inputs"""<line_sep>docstrings=('Some string\n\nThis is the third line' 'Some string' )<for_stmt>dc docstrings<block_start>self.results=[]<line_sep>node=TestNode(doc=dc)<line_sep>self.checker._check_second_line_blank(node node.lines)<line_sep>self.assertEqual(self.results [] msg='docstring was not accepted:\n"""%s"""'%dc)<block_end><block_end><def_stmt>testBad_check_second_line_blank self<block_start>"""Verify _check_second_line_blank rejects bad inputs"""<line_sep>docstrings=('Some string\nnonempty secondline' )<for_stmt>dc docstrings<block_start>self.results=[]<line_sep>node=TestNode(doc=dc)<line_sep>self.checker._check_second_line_blank(node node.lines)<line_sep>self.assertEqual(len(self.results) 1)<block_end><block_end><def_stmt>testGoodFuncVarKwArg self<block_start>"""Check valid inputs for *args and **kwargs"""<for_stmt>vararg (<none> 'args' '_args')<block_start><for_stmt>kwarg (<none> 'kwargs' '_kwargs')<block_start>self.results=[]<line_sep>node=TestNode(vararg=vararg kwarg=kwarg)<line_sep>self.checker._check_func_signature(node)<line_sep>self.assertEqual(len(self.results) 0)<block_end><block_end><block_end><def_stmt>testMisnamedFuncVarKwArg self<block_start>"""Reject anything but *args and **kwargs"""<for_stmt>vararg ('arg' 'params' 'kwargs' '_moo')<block_start>self.results=[]<line_sep>node=TestNode(vararg=vararg)<line_sep>self.checker._check_func_signature(node)<line_sep>self.assertEqual(len(self.results) 1)<block_end><for_stmt>kwarg ('kwds' '_kwds' 'args' '_moo')<block_start>self.results=[]<line_sep>node=TestNode(kwarg=kwarg)<line_sep>self.checker._check_func_signature(node)<line_sep>self.assertEqual(len(self.results) 1)<block_end><block_end><def_stmt>testGoodFuncArgs self<block_start>"""Verify normal args in Args are allowed"""<line_sep>datasets=(("""args are correct, and cls is ignored Args: moo: cow """ ('cls' 'moo' ) <none> <none> ) ("""args are correct, and self is ignored Args: moo: cow *args: here """ ('self' 'moo' ) 'args' 'kwargs' ) ("""args are allowed to wrap Args: moo: a big fat cow that takes many lines to describe its fatness """ ('moo' ) <none> 'kwargs' ) )<for_stmt>dc,args,vararg,kwarg datasets<block_start>self.results=[]<line_sep>node=TestNode(doc=dc args=args vararg=vararg kwarg=kwarg)<line_sep>self.checker._check_all_args_in_doc(node node.lines)<line_sep>self.assertEqual(len(self.results) 0)<block_end><block_end><def_stmt>testBadFuncArgs self<block_start>"""Verify bad/missing args in Args are caught"""<line_sep>datasets=(("""missing 'bar' Args: moo: cow """ ('moo' 'bar' ) ) ("""missing 'cow' but has 'bloop' Args: moo: cow """ ('bloop' ) ) ("""too much space after colon Args: moo: cow """ ('moo' ) ) ("""not enough space after colon Args: moo:cow """ ('moo' ) ) )<for_stmt>dc,args datasets<block_start>self.results=[]<line_sep>node=TestNode(doc=dc args=args)<line_sep>self.checker._check_all_args_in_doc(node node.lines)<line_sep>self.assertEqual(len(self.results) 1)<block_end><block_end><block_end><class_stmt>ChromiteLoggingCheckerTest(CheckerTestCase)<block_start>"""Tests for ChromiteLoggingChecker module"""<line_sep>CHECKER=lint.ChromiteLoggingChecker<def_stmt>testLoggingImported self<block_start>"""Test that import logging is flagged."""<line_sep>node=TestNode(names=[('logging' <none>)] lineno=15)<line_sep>self.checker.visit_import(node)<line_sep>self.assertEqual(self.results [('R9301' '' 15 <none>)])<block_end><def_stmt>testLoggingNotImported self<block_start>"""Test that importing something else (not logging) is not flagged."""<line_sep>node=TestNode(names=[('myModule' <none>)] lineno=15)<line_sep>self.checker.visit_import(node)<line_sep>self.assertEqual(self.results [])<block_end><block_end><class_stmt>SourceCheckerTest(CheckerTestCase)<block_start>"""Tests for SourceChecker module"""<line_sep>CHECKER=lint.SourceChecker<def_stmt>_testShebang self shebangs exp mode<block_start>"""Helper for shebang tests"""<for_stmt>shebang shebangs<block_start>self.results=[]<line_sep>node=TestNode()<line_sep>stream=StringIO.StringIO(shebang)<line_sep>st=StatStub(size=len(shebang) mode=mode)<line_sep>self.checker._check_shebang(node stream st)<line_sep>self.assertEqual(len(self.results) exp msg='processing shebang failed: %r'%shebang)<block_end><block_end><def_stmt>testBadShebang self<block_start>"""Verify _check_shebang rejects bad shebangs"""<line_sep>shebangs=('#!/usr/bin/python\n' '#! /usr/bin/python2 \n' '#!/usr/bin/env python\n' '#! /usr/bin/env python2 \n' '#!/usr/bin/python2\n' )<line_sep>self._testShebang(shebangs 1 0o755)<block_end><def_stmt>testGoodShebangNoExec self<block_start>"""Verify _check_shebang rejects shebangs on non-exec files"""<line_sep>shebangs=('#!/usr/bin/env python2\n' '#!/usr/bin/env python3\n' )<line_sep>self._testShebang(shebangs 1 0o644)<block_end><def_stmt>testGoodShebang self<block_start>"""Verify _check_shebang accepts good shebangs"""<line_sep>shebangs=('#!/usr/bin/env python2\n' '#!/usr/bin/env python3\n' '#!/usr/bin/env python2\t\n' )<line_sep>self._testShebang(shebangs 0 0o755)<block_end><def_stmt>testGoodUnittestName self<block_start>"""Verify _check_module_name accepts good unittest names"""<line_sep>module_names=('lint_unittest' )<for_stmt>name module_names<block_start>node=TestNode(name=name)<line_sep>self.results=[]<line_sep>self.checker._check_module_name(node)<line_sep>self.assertEqual(len(self.results) 0)<block_end><block_end><def_stmt>testBadUnittestName self<block_start>"""Verify _check_module_name accepts good unittest names"""<line_sep>module_names=('lint_unittests' )<for_stmt>name module_names<block_start>node=TestNode(name=name)<line_sep>self.results=[]<line_sep>self.checker._check_module_name(node)<line_sep>self.assertEqual(len(self.results) 1)<block_end><block_end><block_end>
<import_stmt>argparse<import_from_stmt>todo.constants COMMANDS<import_from_stmt>todo.parser.base BaseParser<class_stmt>TodoParser(BaseParser)<block_start>""" usage: td [id] {get,delete,uncomplete,complete,edit} ... manage todo positional arguments: id the id of the todo {...} commands get (g) show todo's details delete (d) delete todo uncomplete (u) uncomplete todo complete (c) complete todo edit (e) edit todo optional arguments: -h, --help show this help message and exit `td [id]` defaults to `td [id] get` You don't have to specify the whole `id`, a substring will do """<line_sep>command=COMMANDS.GET_TODO<def_stmt>_add_arguments self<block_start>self.parser.add_argument("id" action="store" help="the id of the todo")<line_sep>subparser=self.parser.add_subparsers(dest="command" help="commands")<line_sep>get_parser=self._add_parser(subparser "get" aliases=["g"] help="get todo")<line_sep>get_parser.set_defaults(command=COMMANDS.GET_TODO)<line_sep>get_parser.usage="td [id]\n td [id] get\n td [id] g"<line_sep>get_parser.epilog="`td [id]` is the shortcut to `td [id] get`"<line_sep>get_parser.description="show todo's details"<line_sep>delete_parser=self._add_parser(subparser "delete" aliases=["d"] help="delete todo")<line_sep>delete_parser.add_argument("--yes" "-y" dest="skip_prompt" action="store_true" help="skip yes/no prompt when deleting todo" )<line_sep>delete_parser.set_defaults(command=COMMANDS.DELETE_TODO)<line_sep>delete_parser.usage="td [id] delete [-yes]\n td [id] d [-y]"<line_sep>delete_parser.description="delete todo"<line_sep>uncomplete_parser=self._add_parser(subparser "uncomplete" aliases=["u"] help="uncomplete todo")<line_sep>uncomplete_parser.set_defaults(command=COMMANDS.UNCOMPLETE_TODO)<line_sep>uncomplete_parser.usage="td [id] uncomplete\n td [id] u"<line_sep>uncomplete_parser.description="uncomplete todo"<line_sep>complete_parser=self._add_parser(subparser "complete" aliases=["c"] help="complete todo")<line_sep>complete_parser.set_defaults(command=COMMANDS.COMPLETE_TODO)<line_sep>complete_parser.usage="td [id] complete\n td [id] c"<line_sep>complete_parser.description="complete todo"<line_sep>edit_parser=self._add_parser(subparser "edit" aliases=["e"] help="edit todo" formatter_class=argparse.RawTextHelpFormatter epilog="""If no optional arguments are provided, the todo will be opened in your editor where you can edit the todo's details. The editor defaults to `vi`, but you can choose your preferred one be setting: ``` [settings] editor: <your_editor> ``` in ~/.td.cfg """ )<line_sep>edit_parser.add_argument("--name" "-n" action="store" help="update todo's name")<line_sep>edit_parser.add_argument("--details" "-d" action="store" help="update todo's detail")<line_sep>edit_parser.add_argument("--group" "-g" action="store" help="set todo's group")<line_sep>edit_parser.set_defaults(command=COMMANDS.EDIT_TODO)<line_sep>edit_parser.usage="td [id] edit [--name NAME] [--details DETAILS]\n td [id] e [-n NAME] [-d DETAILS]"<line_sep>edit_parser.description="edit todo"<block_end><block_end>
<import_stmt>asyncio<import_from_stmt>aio_pika connect_robust Message<async_keyword><def_stmt>main loop<block_start>connection=<await>connect_robust("amqp://guest:[email protected]/" loop=loop)<line_sep>queue_name="test_queue"<line_sep>routing_key="test_queue"<line_sep># Creating channel channel=<await>connection.channel()<line_sep># Declaring exchange exchange=<await>channel.declare_exchange("direct" auto_delete=<true>)<line_sep># Declaring queue queue=<await>channel.declare_queue(queue_name auto_delete=<true>)<line_sep># Binding queue <await>queue.bind(exchange routing_key)<line_sep><await>exchange.publish(Message(bytes("Hello" "utf-8") content_type="text/plain" headers={"foo":"bar"} ) routing_key )<line_sep># Receiving message incoming_message=<await>queue.get(timeout=5)<line_sep># Confirm message <await>incoming_message.ack()<line_sep><await>queue.unbind(exchange routing_key)<line_sep><await>queue.delete()<line_sep><await>connection.close()<block_end><if_stmt>__name__<eq>"__main__"<block_start>loop=asyncio.get_event_loop()<line_sep>loop.run_until_complete(main(loop))<block_end>