prompt
large_stringlengths
70
991k
completion
large_stringlengths
0
1.02k
<|file_name|>EndCompensationEvent.java<|end_file_name|><|fim▁begin|>/* * Copyright 2018 Red Hat, Inc. and/or its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kie.workbench.common.stunner.bpmn.definition; import java.util.Objects; import javax.validation.Valid; import org.jboss.errai.common.client.api.annotations.MapsTo; import org.jboss.errai.common.client.api.annotations.Portable; import org.jboss.errai.databinding.client.api.Bindable; import org.kie.workbench.common.forms.adf.definitions.annotations.FieldParam; import org.kie.workbench.common.forms.adf.definitions.annotations.FormDefinition; import org.kie.workbench.common.forms.adf.definitions.annotations.FormField; import org.kie.workbench.common.forms.adf.definitions.settings.FieldPolicy;<|fim▁hole|>import org.kie.workbench.common.stunner.bpmn.definition.property.background.BackgroundSet; import org.kie.workbench.common.stunner.bpmn.definition.property.dimensions.CircleDimensionSet; import org.kie.workbench.common.stunner.bpmn.definition.property.dimensions.Radius; import org.kie.workbench.common.stunner.bpmn.definition.property.event.compensation.CompensationEventExecutionSet; import org.kie.workbench.common.stunner.bpmn.definition.property.font.FontSet; import org.kie.workbench.common.stunner.bpmn.definition.property.general.BPMNGeneralSet; import org.kie.workbench.common.stunner.core.definition.annotation.Definition; import org.kie.workbench.common.stunner.core.definition.annotation.Property; import org.kie.workbench.common.stunner.core.definition.annotation.morph.Morph; import org.kie.workbench.common.stunner.core.util.HashUtil; import static org.kie.workbench.common.forms.adf.engine.shared.formGeneration.processing.fields.fieldInitializers.nestedForms.AbstractEmbeddedFormsInitializer.COLLAPSIBLE_CONTAINER; import static org.kie.workbench.common.forms.adf.engine.shared.formGeneration.processing.fields.fieldInitializers.nestedForms.AbstractEmbeddedFormsInitializer.FIELD_CONTAINER_PARAM; @Portable @Bindable @Definition @Morph(base = BaseEndEvent.class) @FormDefinition( startElement = "general", policy = FieldPolicy.ONLY_MARKED, defaultFieldSettings = {@FieldParam(name = FIELD_CONTAINER_PARAM, value = COLLAPSIBLE_CONTAINER)} ) public class EndCompensationEvent extends BaseEndEvent { @Property @FormField(afterElement = "general") @Valid private CompensationEventExecutionSet executionSet; public EndCompensationEvent() { this(new BPMNGeneralSet(""), new BackgroundSet(), new FontSet(), new CircleDimensionSet(new Radius()), new CompensationEventExecutionSet()); } public EndCompensationEvent(final @MapsTo("general") BPMNGeneralSet general, final @MapsTo("backgroundSet") BackgroundSet backgroundSet, final @MapsTo("fontSet") FontSet fontSet, final @MapsTo("dimensionsSet") CircleDimensionSet dimensionsSet, final @MapsTo("executionSet") CompensationEventExecutionSet executionSet) { super(general, backgroundSet, fontSet, dimensionsSet); this.executionSet = executionSet; } public CompensationEventExecutionSet getExecutionSet() { return executionSet; } public void setExecutionSet(CompensationEventExecutionSet executionSet) { this.executionSet = executionSet; } @Override public int hashCode() { return HashUtil.combineHashCodes(super.hashCode(), Objects.hashCode(executionSet)); } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o instanceof EndCompensationEvent) { EndCompensationEvent other = (EndCompensationEvent) o; return super.equals(other) && Objects.equals(executionSet, other.executionSet); } return false; } }<|fim▁end|>
<|file_name|>ammoToDmgPattern.py<|end_file_name|><|fim▁begin|># noinspection PyPackageRequirements import wx import gui.globalEvents as GE import gui.mainFrame from gui.contextMenu import ContextMenuSingle from service.fit import Fit class AmmoToDmgPattern(ContextMenuSingle):<|fim▁hole|> def __init__(self): self.mainFrame = gui.mainFrame.MainFrame.getInstance() def display(self, callingWindow, srcContext, mainItem): if srcContext not in ("marketItemGroup", "marketItemMisc") or self.mainFrame.getActiveFit() is None: return False if mainItem is None: return False for attr in ("emDamage", "thermalDamage", "explosiveDamage", "kineticDamage"): if mainItem.getAttribute(attr) is not None: return True return False def getText(self, callingWindow, itmContext, mainItem): return "Set {} as Damage Pattern".format(itmContext if itmContext is not None else "Item") def activate(self, callingWindow, fullContext, mainItem, i): fitID = self.mainFrame.getActiveFit() Fit.getInstance().setAsPattern(fitID, mainItem) wx.PostEvent(self.mainFrame, GE.FitChanged(fitIDs=(fitID,))) def getBitmap(self, callingWindow, context, mainItem): return None AmmoToDmgPattern.register()<|fim▁end|>
visibilitySetting = 'ammoPattern'
<|file_name|>DescribeClusterResultJsonUnmarshaller.java<|end_file_name|><|fim▁begin|>/* * Copyright 2012-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.elasticmapreduce.model.transform; import java.math.*; import javax.annotation.Generated; import com.amazonaws.services.elasticmapreduce.model.*; import com.amazonaws.transform.SimpleTypeJsonUnmarshallers.*; import com.amazonaws.transform.*; import com.fasterxml.jackson.core.JsonToken; import static com.fasterxml.jackson.core.JsonToken.*; /** * DescribeClusterResult JSON Unmarshaller */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class DescribeClusterResultJsonUnmarshaller implements Unmarshaller<DescribeClusterResult, JsonUnmarshallerContext> { public DescribeClusterResult unmarshall(JsonUnmarshallerContext context) throws Exception { DescribeClusterResult describeClusterResult = new DescribeClusterResult(); <|fim▁hole|> JsonToken token = context.getCurrentToken(); if (token == null) token = context.nextToken(); if (token == VALUE_NULL) { return describeClusterResult; } while (true) { if (token == null) break; if (token == FIELD_NAME || token == START_OBJECT) { if (context.testExpression("Cluster", targetDepth)) { context.nextToken(); describeClusterResult.setCluster(ClusterJsonUnmarshaller.getInstance().unmarshall(context)); } } else if (token == END_ARRAY || token == END_OBJECT) { if (context.getLastParsedParentElement() == null || context.getLastParsedParentElement().equals(currentParentElement)) { if (context.getCurrentDepth() <= originalDepth) break; } } token = context.nextToken(); } return describeClusterResult; } private static DescribeClusterResultJsonUnmarshaller instance; public static DescribeClusterResultJsonUnmarshaller getInstance() { if (instance == null) instance = new DescribeClusterResultJsonUnmarshaller(); return instance; } }<|fim▁end|>
int originalDepth = context.getCurrentDepth(); String currentParentElement = context.getCurrentParentElement(); int targetDepth = originalDepth + 1;
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The syscall.rs Project Developers. See the // COPYRIGHT file at the top-level directory of this distribution. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Raw system calls for Rust. #![crate_name="syscall"] #![crate_type="lib"] #![feature(asm)] #![deny(warnings)] #![allow(unstable)] #![no_std] extern crate core; #[cfg(test)] extern crate std; pub use platform::*; pub mod macros; #[cfg(all(target_os="linux", target_arch="x86"))] #[path="platform/linux-x86/mod.rs"] pub mod platform; #[cfg(all(target_os="linux", target_arch="x86_64"))] #[path="platform/linux-x86_64/mod.rs"] pub mod platform; #[cfg(all(target_os="freebsd", target_arch="x86_64"))]<|fim▁hole|>#[path="platform/freebsd-x86_64/mod.rs"] pub mod platform;<|fim▁end|>
<|file_name|>common.py<|end_file_name|><|fim▁begin|>import logging from abc import ABCMeta, abstractmethod from collections import deque from typing import List, Union, Iterable, Sequence log = logging.getLogger(__name__) class NoSensorsFoundException(RuntimeError): pass class Controller(metaclass=ABCMeta): @abstractmethod def run(self): raise NotImplementedError @abstractmethod def enable(self): raise NotImplementedError @abstractmethod def disable(self):<|fim▁hole|> @abstractmethod def valid(self) -> bool: raise NotImplementedError class InputDevice(metaclass=ABCMeta): """ Abstract class for input devices. """ def __init__(self, name): self.name = name self.values = ValueBuffer(name, 128) @abstractmethod def get_value(self) -> float: raise NotImplementedError class OutputDevice(metaclass=ABCMeta): """ Abstract class for output devices. """ def __init__(self, name): self.name = name self.values = ValueBuffer(name, 128) def set_value(self, value: Union[int, float]): self.values.update(value) @abstractmethod def apply(self): raise NotImplementedError @abstractmethod def enable(self): raise NotImplementedError @abstractmethod def disable(self): raise NotImplementedError class PassthroughController(Controller): def __init__(self, inputs=Sequence[InputDevice], outputs=Sequence[OutputDevice], speeds=None): self.inputs = list(inputs) self.outputs = list(outputs) def run(self): for idx, input_reader in enumerate(self.inputs): output = self.outputs[idx] output.name = input_reader.name output.values.name = input_reader.name output.set_value(input_reader.get_value()) output.apply() log.debug('ran loop') def apply_candidates(self): return self.outputs def enable(self): for output_dev in self.outputs: output_dev.enable() def disable(self): for output_dev in self.outputs: output_dev.disable() def valid(self) -> bool: return bool(self.inputs and self.outputs) and len(self.inputs) == len(self.outputs) class DummyInput(InputDevice): def __init__(self): super().__init__('dummy') self.temp = 0 def get_value(self): return self.temp def set_value(self, value): self.temp = value class DummyOutput(OutputDevice): def __init__(self): super().__init__('dummy') self.speed = None self.enabled = False def apply(self): if self.enabled: self.speed = round(self.values.mean()) def enable(self): self.enabled = True def disable(self): self.enabled = False def mean(seq: Iterable) -> float: if not isinstance(seq, Iterable): raise ValueError('provided sequence MUST be iterable') if not isinstance(seq, Sequence): seq = list(seq) if len(seq) == 1: return float(seq[0]) if len(seq) == 0: raise ValueError('sequence must have at least one value.') return sum(seq) / len(seq) def lerp(value: Union[float, int], input_min: Union[float, int], input_max: Union[float, int], output_min: Union[float, int], output_max: Union[float, int]) -> float: if value <= input_min: return float(output_min) if value >= input_max: return float(output_max) return (output_min * (input_max - value) + output_max * (value - input_min)) / (input_max - input_min) def lerp_range(seq: Iterable[Union[float, int]], input_min, input_max, output_min, output_max) -> List[float]: return [lerp(val, input_min, input_max, output_min, output_max) for val in seq] class ValueBuffer: def __init__(self, name, default_value=0.0): self.name = name self.buffer = deque(maxlen=32) self._default_value = default_value def update(self, value: float): self.buffer.append(value) def mean(self) -> float: try: return mean(self.buffer) except (ValueError, ZeroDivisionError): return self._default_value<|fim▁end|>
raise NotImplementedError
<|file_name|>tdf006_ranges.py<|end_file_name|><|fim▁begin|>## \file ## \ingroup tutorial_tdataframe ## \notebook -nodraw ## This tutorial shows how to express the concept of ranges when working with the TDataFrame. ## \macro_code ## ## \date March 2017 ## \author Danilo Piparo import ROOT fill_tree_code = ''' void fill_tree(const char *filename, const char *treeName) { TFile f(filename, "RECREATE"); TTree t(treeName, treeName); int b1; float b2; t.Branch("b1", &b1); t.Branch("b2", &b2); for (int i = 0; i < 100; ++i) { b1 = i; b2 = i * i; t.Fill(); } t.Write(); f.Close(); return; } ''' # We prepare an input tree to run on fileName = "tdf006_ranges_py.root" treeName = "myTree" ROOT.gInterpreter.Declare(fill_tree_code) ROOT.fill_tree(fileName, treeName) # We read the tree from the file and create a TDataFrame. TDF = ROOT.ROOT.Experimental.TDataFrame d = TDF(treeName, fileName) # ## Usage of ranges # Now we'll count some entries using ranges c_all = d.Count() # This is how you can express a range of the first 30 entries d_0_30 = d.Range(0, 30) c_0_30 = d_0_30.Count() # This is how you pick all entries from 15 onwards d_15_end = d.Range(15, 0) c_15_end = d_15_end.Count() # We can use a stride too, in this case we pick an event every 3 d_15_end_3 = d.Range(15, 0, 3) c_15_end_3 = d_15_end_3.Count() # The Range is a 1st class citizen in the TDataFrame graph: # not only actions (like Count) but also filters and new columns can be added to it. d_0_50 = d.Range(0, 50) c_0_50_odd_b1 = d_0_50.Filter("1 == b1 % 2").Count() # An important thing to notice is that the counts of a filter are relative to the # number of entries a filter "sees". Therefore, if a Range depends on a filter, # the Range will act on the entries passing the filter only. c_0_3_after_even_b1 = d.Filter("0 == b1 % 2").Range(0, 3).Count() # Ok, time to wrap up: let's print all counts! print("Usage of ranges:") print(" - All entries:", c_all.GetValue()) print(" - Entries from 0 to 30:", c_0_30.GetValue()) print(" - Entries from 15 onwards:", c_15_end.GetValue())<|fim▁hole|><|fim▁end|>
print(" - Entries from 15 onwards in steps of 3:", c_15_end_3.GetValue()) print(" - Entries from 0 to 50, odd only:", c_0_50_odd_b1.GetValue()) print(" - First three entries of all even entries:", c_0_3_after_even_b1.GetValue())
<|file_name|>log.py<|end_file_name|><|fim▁begin|>import logging, sys from logging import DEBUG, INFO, WARNING, ERROR, CRITICAL class InfoFilter(logging.Filter): def filter(self, rec): return rec.levelno in (logging.DEBUG, logging.INFO) def _new_custom_logger(name='BiblioPixel', fmt='%(levelname)s - %(module)s - %(message)s'): logger = logging.getLogger(name) formatter = logging.Formatter(fmt=fmt) if len(logger.handlers) == 0:<|fim▁hole|> h1.setLevel(logging.DEBUG) h1.addFilter(InfoFilter()) h1.setFormatter(formatter) h2 = logging.StreamHandler(sys.stderr) h2.setLevel(logging.WARNING) h2.setFormatter(formatter) logger.addHandler(h1) logger.addHandler(h2) return logger logger = _new_custom_logger() setLogLevel = logger.setLevel debug, info, warning, error, critical, exception = ( logger.debug, logger.info, logger.warning, logger.error, logger.critical, logger.exception)<|fim▁end|>
logger.setLevel(logging.INFO) h1 = logging.StreamHandler(sys.stdout)
<|file_name|>environment.ts<|end_file_name|><|fim▁begin|><|fim▁hole|>// The build system defaults to the dev environment which uses `environment.ts`, but if you do // `ng build --env=prod` then `environment.prod.ts` will be used instead. // The list of which env maps to which file can be found in `.angular-cli.json`. export const environment = { production: false, // api server api: { url: 'http://127.0.0.1:3000/api' } };<|fim▁end|>
// The file contents for the current environment will overwrite these during build.
<|file_name|>parseXML.js<|end_file_name|><|fim▁begin|>/* * Copyright (c) 2015-2016 Fraunhofer FOKUS * * Licensed under the Apache License, Version 2.0 (the "License");<|fim▁hole|> * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ define([ "../core" ], function( jQuery ) { // Cross-browser xml parsing jQuery.parseXML = function( data ) { var xml, tmp; if ( !data || typeof data !== "string" ) { return null; } // Support: IE9 try { tmp = new DOMParser(); xml = tmp.parseFromString( data, "text/xml" ); } catch ( e ) { xml = undefined; } if ( !xml || xml.getElementsByTagName( "parsererror" ).length ) { jQuery.error( "Invalid XML: " + data ); } return xml; }; return jQuery.parseXML; });<|fim▁end|>
<|file_name|>se_group.py<|end_file_name|><|fim▁begin|># GENERATED FILE - DO NOT EDIT THIS FILE UNLESS YOU ARE A WIZZARD #pylint: skip-file from heat.engine import properties from heat.engine import constraints from heat.engine import attributes from heat.common.i18n import _ from avi.heat.avi_resource import AviResource from avi.heat.avi_resource import AviNestedResource from options import * from common import * from options import * from vi_mgr_common import * from dos import * from analytics_policy import * from vip_autoscale import * class VssPlacement(object): # all schemas num_subcores_schema = properties.Schema( properties.Schema.NUMBER, _("(Introduced in: 17.2.5) Number of sub-cores that comprise a CPU core. (Default: 4)"), required=False, update_allowed=True, ) core_nonaffinity_schema = properties.Schema( properties.Schema.NUMBER, _("(Introduced in: 17.2.5) Degree of core non-affinity for VS placement. (Default: 2)"), required=False, update_allowed=True, ) # properties list PROPERTIES = ( 'num_subcores', 'core_nonaffinity', ) # mapping of properties to their schemas properties_schema = { 'num_subcores': num_subcores_schema, 'core_nonaffinity': core_nonaffinity_schema, } class VcenterClusters(object): # all schemas cluster_uuids_item_schema = properties.Schema( properties.Schema.STRING, _(""), required=True, update_allowed=False, ) cluster_uuids_schema = properties.Schema( properties.Schema.LIST, _(" You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."), schema=cluster_uuids_item_schema, required=False, update_allowed=True, ) include_schema = properties.Schema( properties.Schema.BOOLEAN, _(" (Default: False)"), required=False, update_allowed=True, ) # properties list PROPERTIES = ( 'cluster_uuids', 'include', ) # mapping of properties to their schemas properties_schema = { 'cluster_uuids': cluster_uuids_schema, 'include': include_schema, } # for supporting get_avi_uuid_by_name functionality field_references = { 'cluster_uuids': 'vimgrclusterruntime', } class VcenterHosts(object): # all schemas host_uuids_item_schema = properties.Schema( properties.Schema.STRING, _(""), required=True, update_allowed=False, ) host_uuids_schema = properties.Schema( properties.Schema.LIST, _(" You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."), schema=host_uuids_item_schema, required=False, update_allowed=True, ) include_schema = properties.Schema( properties.Schema.BOOLEAN, _(" (Default: False)"), required=False, update_allowed=True, ) # properties list PROPERTIES = ( 'host_uuids', 'include', ) # mapping of properties to their schemas properties_schema = { 'host_uuids': host_uuids_schema, 'include': include_schema, } # for supporting get_avi_uuid_by_name functionality field_references = { 'host_uuids': 'vimgrhostruntime', } class IptableRule(object): # all schemas src_ip_schema = properties.Schema( properties.Schema.MAP, _(""), schema=IpAddrPrefix.properties_schema, required=False, update_allowed=True, ) dst_ip_schema = properties.Schema( properties.Schema.MAP, _(""), schema=IpAddrPrefix.properties_schema, required=False, update_allowed=True, ) src_port_schema = properties.Schema( properties.Schema.MAP, _(""), schema=PortRange.properties_schema, required=False, update_allowed=True, ) dst_port_schema = properties.Schema( properties.Schema.MAP, _(""), schema=PortRange.properties_schema, required=False, update_allowed=True, ) proto_schema = properties.Schema( properties.Schema.STRING, _(""), required=False, update_allowed=True, constraints=[ constraints.AllowedValues(['PROTO_ALL', 'PROTO_ICMP', 'PROTO_TCP', 'PROTO_UDP']), ], ) input_interface_schema = properties.Schema( properties.Schema.STRING, _(""), required=False, update_allowed=True, ) output_interface_schema = properties.Schema( properties.Schema.STRING, _(""), required=False, update_allowed=True, ) action_schema = properties.Schema( properties.Schema.STRING, _(""), required=True, update_allowed=True, constraints=[ constraints.AllowedValues(['ACCEPT', 'DNAT', 'DROP', 'MASQUERADE', 'REJECT']), ], ) dnat_ip_schema = properties.Schema( properties.Schema.MAP, _(""), schema=IpAddr.properties_schema, required=False, update_allowed=True, ) tag_schema = properties.Schema( properties.Schema.STRING, _(""), required=False, update_allowed=True, ) # properties list PROPERTIES = ( 'src_ip', 'dst_ip', 'src_port', 'dst_port', 'proto', 'input_interface', 'output_interface', 'action', 'dnat_ip', 'tag', ) # mapping of properties to their schemas properties_schema = { 'src_ip': src_ip_schema, 'dst_ip': dst_ip_schema, 'src_port': src_port_schema, 'dst_port': dst_port_schema, 'proto': proto_schema, 'input_interface': input_interface_schema, 'output_interface': output_interface_schema, 'action': action_schema, 'dnat_ip': dnat_ip_schema, 'tag': tag_schema, } # for supporting get_avi_uuid_by_name functionality field_references = { 'src_ip': getattr(IpAddrPrefix, 'field_references', {}), 'dst_ip': getattr(IpAddrPrefix, 'field_references', {}), 'src_port': getattr(PortRange, 'field_references', {}), 'dst_port': getattr(PortRange, 'field_references', {}), 'dnat_ip': getattr(IpAddr, 'field_references', {}), } unique_keys = { 'src_ip': getattr(IpAddrPrefix, 'unique_keys', {}), 'dst_ip': getattr(IpAddrPrefix, 'unique_keys', {}), 'src_port': getattr(PortRange, 'unique_keys', {}), 'dst_port': getattr(PortRange, 'unique_keys', {}), 'dnat_ip': getattr(IpAddr, 'unique_keys', {}), } class IptableRuleSet(object): # all schemas table_schema = properties.Schema( properties.Schema.STRING, _(""), required=True, update_allowed=True, ) chain_schema = properties.Schema( properties.Schema.STRING, _(""), required=True, update_allowed=True, ) rules_item_schema = properties.Schema( properties.Schema.MAP, _(""), schema=IptableRule.properties_schema, required=True, update_allowed=False, ) rules_schema = properties.Schema( properties.Schema.LIST, _(""), schema=rules_item_schema, required=False, update_allowed=True, ) # properties list PROPERTIES = ( 'table', 'chain', 'rules', ) # mapping of properties to their schemas properties_schema = { 'table': table_schema, 'chain': chain_schema, 'rules': rules_schema, } # for supporting get_avi_uuid_by_name functionality field_references = { 'rules': getattr(IptableRule, 'field_references', {}), } unique_keys = { 'rules': getattr(IptableRule, 'unique_keys', {}), } class ServiceEngineGroup(AviResource): resource_name = "serviceenginegroup" # all schemas avi_version_schema = properties.Schema( properties.Schema.STRING, _("Avi Version to use for the object. Default is 16.4.2. If you plan to use any fields introduced after 16.4.2, then this needs to be explicitly set."), required=False, update_allowed=True, ) name_schema = properties.Schema( properties.Schema.STRING, _(""), required=True, update_allowed=True, ) description_schema = properties.Schema( properties.Schema.STRING, _(""), required=False, update_allowed=True, ) max_vs_per_se_schema = properties.Schema( properties.Schema.NUMBER, _("Maximum number of Virtual Services that can be placed on a single Service Engine. East West Virtual Services are excluded from this limit. (Default: 10)"), required=False, update_allowed=True, ) min_scaleout_per_vs_schema = properties.Schema( properties.Schema.NUMBER, _("Minimum number of active Service Engines for the Virtual Service. (Default: 1)"), required=False, update_allowed=True, ) max_scaleout_per_vs_schema = properties.Schema( properties.Schema.NUMBER, _("Maximum number of active Service Engines for the Virtual Service. (Default: 4)"), required=False, update_allowed=True, ) max_se_schema = properties.Schema( properties.Schema.NUMBER, _("Maximum number of Services Engines in this group. (Default: 10)"), required=False, update_allowed=True, ) vcpus_per_se_schema = properties.Schema( properties.Schema.NUMBER, _("Number of vcpus for each of the Service Engine virtual machines. (Default: 1)"), required=False, update_allowed=True, ) memory_per_se_schema = properties.Schema( properties.Schema.NUMBER, _("Amount of memory for each of the Service Engine virtual machines. (Default: 2048)"), required=False, update_allowed=True, ) disk_per_se_schema = properties.Schema( properties.Schema.NUMBER, _("Amount of disk space for each of the Service Engine virtual machines. (Units: GB) (Default: 10)"), required=False, update_allowed=True, ) max_cpu_usage_schema = properties.Schema( properties.Schema.NUMBER, _("When CPU usage on an SE exceeds this threshold, Virtual Services hosted on this SE may be rebalanced to other SEs to reduce load. A new SE may be created as part of this process. (Units: PERCENT) (Default: 80)"), required=False, update_allowed=True, ) min_cpu_usage_schema = properties.Schema( properties.Schema.NUMBER, _("When CPU usage on an SE falls below the minimum threshold, Virtual Services hosted on the SE may be consolidated onto other underutilized SEs. After consolidation, unused Service Engines may then be eligible for deletion. (Units: PERCENT) (Default: 30)"), required=False, update_allowed=True, ) se_deprovision_delay_schema = properties.Schema( properties.Schema.NUMBER, _("Duration to preserve unused Service Engine virtual machines before deleting them. If traffic to a Virtual Service were to spike up abruptly, this SE would still be available to be utilized again rather than creating a new SE. If this value is set to 0, Controller will never delete any SEs and administrator has to manually cleanup unused SEs. (Units: MIN) (Default: 120)"), required=False, update_allowed=True, ) auto_rebalance_schema = properties.Schema( properties.Schema.BOOLEAN, _("If set, Virtual Services will be automatically migrated when load on an SE is less than minimum or more than maximum thresholds. Only Alerts are generated when the auto_rebalance is not set. (Default: False)"), required=False, update_allowed=True, ) se_name_prefix_schema = properties.Schema( properties.Schema.STRING, _("Prefix to use for virtual machine name of Service Engines."), required=False, update_allowed=True, ) vs_host_redundancy_schema = properties.Schema( properties.Schema.BOOLEAN, _("Ensure primary and secondary Service Engines are deployed on different physical hosts. (Default: True)"), required=False, update_allowed=True, ) vcenter_folder_schema = properties.Schema( properties.Schema.STRING, _("Folder to place all the Service Engine virtual machines in vCenter."), required=False, update_allowed=True, ) vcenter_datastores_item_schema = properties.Schema( properties.Schema.MAP, _(""), schema=VcenterDatastore.properties_schema, required=True, update_allowed=False, ) vcenter_datastores_schema = properties.Schema( properties.Schema.LIST, _(""), schema=vcenter_datastores_item_schema, required=False, update_allowed=True, ) vcenter_datastores_include_schema = properties.Schema( properties.Schema.BOOLEAN, _(" (Default: False)"), required=False, update_allowed=True, ) vcenter_datastore_mode_schema = properties.Schema( properties.Schema.STRING, _(" (Default: VCENTER_DATASTORE_ANY)"), required=False, update_allowed=True, constraints=[ constraints.AllowedValues(['VCENTER_DATASTORE_ANY', 'VCENTER_DATASTORE_LOCAL', 'VCENTER_DATASTORE_SHARED']), ], ) vcenter_clusters_schema = properties.Schema( properties.Schema.MAP, _(""), schema=VcenterClusters.properties_schema, required=False, update_allowed=True, ) vcenter_hosts_schema = properties.Schema( properties.Schema.MAP, _(""), schema=VcenterHosts.properties_schema, required=False, update_allowed=True, ) openstack_availability_zone_schema = properties.Schema( properties.Schema.STRING, _("(Deprecated in: 17.1.1) "), required=False, update_allowed=True, ) cpu_reserve_schema = properties.Schema( properties.Schema.BOOLEAN, _(" (Default: False)"), required=False, update_allowed=True, ) mem_reserve_schema = properties.Schema( properties.Schema.BOOLEAN, _(" (Default: True)"), required=False, update_allowed=True, ) mgmt_network_uuid_schema = properties.Schema( properties.Schema.STRING, _("Management network to use for Avi Service Engines You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."), required=False, update_allowed=True, ) mgmt_subnet_schema = properties.Schema( properties.Schema.MAP, _("Management subnet to use for Avi Service Engines"), schema=IpAddrPrefix.properties_schema, required=False, update_allowed=True, ) ha_mode_schema = properties.Schema( properties.Schema.STRING, _("High Availability mode for all the Virtual Services using this Service Engine group. (Default: HA_MODE_SHARED)"), required=False, update_allowed=True, constraints=[ constraints.AllowedValues(['HA_MODE_LEGACY_ACTIVE_STANDBY', 'HA_MODE_SHARED', 'HA_MODE_SHARED_PAIR']), ], ) algo_schema = properties.Schema( properties.Schema.STRING, _("In compact placement, Virtual Services are placed on existing SEs until max_vs_per_se limit is reached. (Default: PLACEMENT_ALGO_PACKED)"), required=False, update_allowed=True, constraints=[ constraints.AllowedValues(['PLACEMENT_ALGO_DISTRIBUTED', 'PLACEMENT_ALGO_PACKED']), ], ) buffer_se_schema = properties.Schema( properties.Schema.NUMBER, _("Excess Service Engine capacity provisioned for HA failover (Default: 1)"), required=False, update_allowed=True, ) active_standby_schema = properties.Schema( properties.Schema.BOOLEAN, _("Service Engines in active/standby mode for HA failover (Default: False)"), required=False, update_allowed=True, ) placement_mode_schema = properties.Schema( properties.Schema.STRING, _("If placement mode is 'Auto', Virtual Services are automatically placed on Service Engines. (Default: PLACEMENT_MODE_AUTO)"), required=False, update_allowed=True, constraints=[ constraints.AllowedValues(['PLACEMENT_MODE_AUTO']), ], ) openstack_mgmt_network_name_schema = properties.Schema( properties.Schema.STRING, _("Avi Management network name"), required=False, update_allowed=True, ) openstack_mgmt_network_uuid_schema = properties.Schema( properties.Schema.STRING, _("Management network UUID"), required=False, update_allowed=True, ) instance_flavor_schema = properties.Schema( properties.Schema.STRING, _("Instance/Flavor type for SE instance"), required=False, update_allowed=True, ) hypervisor_schema = properties.Schema( properties.Schema.STRING, _("Override default hypervisor"), required=False, update_allowed=True, constraints=[ constraints.AllowedValues(['DEFAULT', 'KVM', 'VMWARE_ESX', 'VMWARE_VSAN', 'XEN']), ], ) se_dos_profile_schema = properties.Schema( properties.Schema.MAP, _(""), schema=DosThresholdProfile.properties_schema, required=False, update_allowed=True, ) auto_rebalance_interval_schema = properties.Schema( properties.Schema.NUMBER, _("Frequency of rebalance, if 'Auto rebalance' is enabled (Units: SEC) (Default: 300)"), required=False, update_allowed=True, ) aggressive_failure_detection_schema = properties.Schema( properties.Schema.BOOLEAN, _("Enable aggressive failover configuration for ha. (Default: False)"), required=False, update_allowed=True, ) realtime_se_metrics_schema = properties.Schema( properties.Schema.MAP, _("Enable or disable real time SE metrics"), schema=MetricsRealTimeUpdate.properties_schema, required=False, update_allowed=True, ) vs_scaleout_timeout_schema = properties.Schema( properties.Schema.NUMBER, _("Time to wait for the scaled out SE to become ready before marking the scaleout done (Units: SEC) (Default: 30)"), required=False, update_allowed=True, ) vs_scalein_timeout_schema = properties.Schema( properties.Schema.NUMBER, _("Time to wait for the scaled in SE to drain existing flows before marking the scalein done (Units: SEC) (Default: 30)"), required=False, update_allowed=True, ) hardwaresecuritymodulegroup_uuid_schema = properties.Schema( properties.Schema.STRING, _(" You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."), required=False, update_allowed=True, ) connection_memory_percentage_schema = properties.Schema( properties.Schema.NUMBER, _("Percentage of memory for connection state. This will come at the expense of memory used for HTTP in-memory cache. (Units: PERCENT) (Default: 50)"), required=False, update_allowed=True, ) extra_config_multiplier_schema = properties.Schema( properties.Schema.NUMBER, _("Multiplier for extra config to support large VS/Pool config. (Default: 0.0)"), required=False, update_allowed=True, ) vs_scalein_timeout_for_upgrade_schema = properties.Schema( properties.Schema.NUMBER, _("During SE upgrade, Time to wait for the scaled-in SE to drain existing flows before marking the scalein done (Units: SEC) (Default: 30)"), required=False, update_allowed=True, ) host_attribute_key_schema = properties.Schema( properties.Schema.STRING, _("Key of a (Key, Value) pair identifying a label for a set of Nodes usually in Container Clouds. Needs to be specified together with host_attribute_value. SEs can be configured differently including HA modes across different SE Groups. May also be used for isolation between different classes of VirtualServices. VirtualServices' SE Group may be specified via annotations/labels. A OpenShift/Kubernetes namespace maybe annotated with a matching SE Group label as openshift.io/node-selector: apptype=prod. When multiple SE Groups are used in a Cloud with host attributes specified,just a single SE Group can exist as a match-all SE Group without a host_attribute_key."), required=False, update_allowed=True, ) host_attribute_value_schema = properties.Schema( properties.Schema.STRING, _("Value of a (Key, Value) pair identifying a label for a set of Nodes usually in Container Clouds. Needs to be specified together with host_attribute_key."), required=False, update_allowed=True, ) log_disksz_schema = properties.Schema( properties.Schema.NUMBER, _("Maximum disk capacity (in MB) to be allocated to an SE. This is exclusively used for debug and log data. (Units: MB) (Default: 10000)"), required=False, update_allowed=True, ) os_reserved_memory_schema = properties.Schema( properties.Schema.NUMBER, _("Amount of extra memory to be reserved for use by the Operating System on a Service Engine. (Units: MB) (Default: 0)"), required=False, update_allowed=True, ) floating_intf_ip_item_schema = properties.Schema( properties.Schema.MAP, _("If ServiceEngineGroup is configured for Legacy 1+1 Active Standby HA Mode, Floating IP's will be advertised only by the Active SE in the Pair. Virtual Services in this group must be disabled/enabled for any changes to the Floating IP's to take effect. Only active SE hosting VS tagged with Active Standby SE 1 Tag will advertise this floating IP when manual load distribution is enabled."), schema=IpAddr.properties_schema, required=True, update_allowed=False, ) floating_intf_ip_schema = properties.Schema( properties.Schema.LIST, _("If ServiceEngineGroup is configured for Legacy 1+1 Active Standby HA Mode, Floating IP's will be advertised only by the Active SE in the Pair. Virtual Services in this group must be disabled/enabled for any changes to the Floating IP's to take effect. Only active SE hosting VS tagged with Active Standby SE 1 Tag will advertise this floating IP when manual load distribution is enabled."), schema=floating_intf_ip_item_schema, required=False, update_allowed=True, ) hm_on_standby_schema = properties.Schema( properties.Schema.BOOLEAN, _("Enable active health monitoring from the standby SE for all placed virtual services. (Default: True)"), required=False, update_allowed=True, ) per_app_schema = properties.Schema( properties.Schema.BOOLEAN, _("Per-app SE mode is designed for deploying dedicated load balancers per app (VS). In this mode, each SE is limited to a max of 2 VSs. vCPUs in per-app SEs count towards licensing usage at 25% rate. (Default: False)"), required=False, update_allowed=True, ) enable_vmac_schema = properties.Schema( properties.Schema.BOOLEAN, _("Use Virtual MAC address for interfaces on which floating interface IPs are placed (Default: False)"), required=False, update_allowed=True, ) distribute_load_active_standby_schema = properties.Schema( properties.Schema.BOOLEAN, _("Use both the active and standby Service Engines for Virtual Service placement in the legacy active standby HA mode. (Default: False)"), required=False, update_allowed=True, ) auto_redistribute_active_standby_load_schema = properties.Schema( properties.Schema.BOOLEAN, _("Redistribution of virtual services from the takeover SE to the replacement SE can cause momentary traffic loss. If the auto-redistribute load option is left in its default off state, any desired rebalancing requires calls to REST API. (Default: False)"), required=False, update_allowed=True, ) floating_intf_ip_se_2_item_schema = properties.Schema( properties.Schema.MAP, _("If ServiceEngineGroup is configured for Legacy 1+1 Active Standby HA Mode, Floating IP's will be advertised only by the Active SE in the Pair. Virtual Services in this group must be disabled/enabled for any changes to the Floating IP's to take effect. Only active SE hosting VS tagged with Active Standby SE 2 Tag will advertise this floating IP when manual load distribution is enabled."), schema=IpAddr.properties_schema, required=True, update_allowed=False, ) floating_intf_ip_se_2_schema = properties.Schema( properties.Schema.LIST, _("If ServiceEngineGroup is configured for Legacy 1+1 Active Standby HA Mode, Floating IP's will be advertised only by the Active SE in the Pair. Virtual Services in this group must be disabled/enabled for any changes to the Floating IP's to take effect. Only active SE hosting VS tagged with Active Standby SE 2 Tag will advertise this floating IP when manual load distribution is enabled."), schema=floating_intf_ip_se_2_item_schema, required=False, update_allowed=True, ) custom_tag_item_schema = properties.Schema( properties.Schema.MAP, _("Custom tag will be used to create the tags for SE instance in AWS. Note this is not the same as the prefix for SE name"), schema=CustomTag.properties_schema, required=True, update_allowed=False, ) custom_tag_schema = properties.Schema( properties.Schema.LIST, _("Custom tag will be used to create the tags for SE instance in AWS. Note this is not the same as the prefix for SE name"), schema=custom_tag_item_schema, required=False, update_allowed=True, ) dedicated_dispatcher_core_schema = properties.Schema( properties.Schema.BOOLEAN, _("Dedicate the core that handles packet receive/transmit from the network to just the dispatching function. Don't use it for TCP/IP and SSL functions. (Default: False)"), required=False, update_allowed=True, ) cpu_socket_affinity_schema = properties.Schema( properties.Schema.BOOLEAN, _("Allocate all the CPU cores for the Service Engine Virtual Machines on the same CPU socket. Applicable only for vCenter Cloud. (Default: False)"), required=False, update_allowed=True, ) num_flow_cores_sum_changes_to_ignore_schema = properties.Schema( properties.Schema.NUMBER, _("Number of changes in num flow cores sum to ignore. (Default: 8)"), required=False, update_allowed=True, ) least_load_core_selection_schema = properties.Schema( properties.Schema.BOOLEAN, _("Select core with least load for new flow. (Default: True)"), required=False, update_allowed=True, ) extra_shared_config_memory_schema = properties.Schema( properties.Schema.NUMBER, _("(Introduced in: 17.1.1) Extra config memory to support large Geo DB configuration. (Units: MB) (Default: 0)"), required=False, update_allowed=True, ) se_tunnel_mode_schema = properties.Schema( properties.Schema.NUMBER, _("(Introduced in: 17.1.1) Determines if DSR from secondary SE is active or not: 0: Automatically determine based on hypervisor type. 1: Disable DSR unconditionally. ~[0,1]: Enable DSR unconditionally. (Default: 0)"), required=False, update_allowed=True, ) openstack_availability_zones_item_schema = properties.Schema( properties.Schema.STRING, _("(Introduced in: 17.1.1) "), required=True, update_allowed=False, ) openstack_availability_zones_schema = properties.Schema( properties.Schema.LIST, _("(Introduced in: 17.1.1) "), schema=openstack_availability_zones_item_schema, required=False, update_allowed=True, ) service_ip_subnets_item_schema = properties.Schema( properties.Schema.MAP, _("(Introduced in: 17.1.1) Subnets assigned to the SE group. Required for VS group placement."), schema=IpAddrPrefix.properties_schema, required=True, update_allowed=False, ) service_ip_subnets_schema = properties.Schema( properties.Schema.LIST, _("(Introduced in: 17.1.1) Subnets assigned to the SE group. Required for VS group placement."), schema=service_ip_subnets_item_schema, required=False, update_allowed=True, ) se_vs_hb_max_vs_in_pkt_schema = properties.Schema( properties.Schema.NUMBER, _("(Introduced in: 17.1.1) Maximum number of virtualservices for which heartbeat messages are aggregated in one packet. (Default: 256)"), required=False, update_allowed=True, ) se_vs_hb_max_pkts_in_batch_schema = properties.Schema( properties.Schema.NUMBER, _("(Introduced in: 17.1.1) Maximum number of aggregated vs heartbeat packets to send in a batch. (Default: 8)"), required=False, update_allowed=True, ) auto_rebalance_criteria_item_schema = properties.Schema( properties.Schema.STRING, _("(Introduced in: 17.2.3) Set of criteria for SE Auto Rebalance."), required=True, update_allowed=False, constraints=[ constraints.AllowedValues(['SE_AUTO_REBALANCE_CPS', 'SE_AUTO_REBALANCE_CPU', 'SE_AUTO_REBALANCE_MBPS', 'SE_AUTO_REBALANCE_OPEN_CONNS', 'SE_AUTO_REBALANCE_PPS']), ], ) auto_rebalance_criteria_schema = properties.Schema( properties.Schema.LIST, _("(Introduced in: 17.2.3) Set of criteria for SE Auto Rebalance."), schema=auto_rebalance_criteria_item_schema, required=False, update_allowed=True, ) cloud_uuid_schema = properties.Schema( properties.Schema.STRING, _(""), required=False, update_allowed=False, ) iptables_item_schema = properties.Schema( properties.Schema.MAP, _("Iptable Rules"), schema=IptableRuleSet.properties_schema, required=True, update_allowed=False, ) iptables_schema = properties.Schema( properties.Schema.LIST, _("Iptable Rules"), schema=iptables_item_schema, required=False, update_allowed=True, ) enable_routing_schema = properties.Schema( properties.Schema.BOOLEAN, _("Enable routing for this ServiceEngineGroup (Default: False)"), required=False, update_allowed=True, ) advertise_backend_networks_schema = properties.Schema( properties.Schema.BOOLEAN, _("Advertise reach-ability of backend server networks via ADC through BGP for default gateway feature. (Default: False)"), required=False, update_allowed=True, ) enable_vip_on_all_interfaces_schema = properties.Schema( properties.Schema.BOOLEAN, _("(Introduced in: 17.1.1) Enable VIP on all interfaces of SE. (Default: True)"), required=False, update_allowed=True, ) se_thread_multiplier_schema = properties.Schema( properties.Schema.NUMBER, _("Multiplier for SE threads based on vCPU. (Default: 1)"), required=False, update_allowed=True, ) async_ssl_schema = properties.Schema( properties.Schema.BOOLEAN, _("SSL handshakes will be handled by dedicated SSL Threads (Default: False)"), required=False, update_allowed=True, ) async_ssl_threads_schema = properties.Schema( properties.Schema.NUMBER, _("Number of Async SSL threads per se_dp (Default: 1)"), required=False, update_allowed=True, ) se_udp_encap_ipc_schema = properties.Schema( properties.Schema.NUMBER, _("(Introduced in: 17.1.2) Determines if SE-SE IPC messages are encapsulated in an UDP header: 0: Automatically determine based on hypervisor type. 1: Use UDP encap unconditionally. ~[0,1]: Don't use UDP encap. (Default: 0)"), required=False, update_allowed=True, ) se_ipc_udp_port_schema = properties.Schema( properties.Schema.NUMBER, _("(Introduced in: 17.1.2) UDP Port for SE_DP IPC in Docker bridge mode. (Default: 1500)"), required=False, update_allowed=True, ) se_remote_punt_udp_port_schema = properties.Schema( properties.Schema.NUMBER, _("(Introduced in: 17.1.2) UDP Port for punted packets in Docker bridge mode. (Default: 1501)"), required=False, update_allowed=True, ) se_tunnel_udp_port_schema = properties.Schema( properties.Schema.NUMBER, _("(Introduced in: 17.1.3) UDP Port for tunneled packets from secondary to primary SE in Docker bridge mode. (Default: 1550)"), required=False, update_allowed=True, ) custom_securitygroups_mgmt_item_schema = properties.Schema( properties.Schema.STRING, _("(Introduced in: 17.1.3) Custom Security Groups to be associated with management vNic for SE instances in OpenStack and AWS Clouds."), required=True, update_allowed=False, ) custom_securitygroups_mgmt_schema = properties.Schema( properties.Schema.LIST, _("(Introduced in: 17.1.3) Custom Security Groups to be associated with management vNic for SE instances in OpenStack and AWS Clouds."), schema=custom_securitygroups_mgmt_item_schema, required=False, update_allowed=True, ) custom_securitygroups_data_item_schema = properties.Schema( properties.Schema.STRING, _("(Introduced in: 17.1.3) Custom Security Groups to be associated with data vNics for SE instances in OpenStack and AWS Clouds."), required=True, update_allowed=False, ) custom_securitygroups_data_schema = properties.Schema( properties.Schema.LIST, _("(Introduced in: 17.1.3) Custom Security Groups to be associated with data vNics for SE instances in OpenStack and AWS Clouds."), schema=custom_securitygroups_data_item_schema, required=False, update_allowed=True, ) archive_shm_limit_schema = properties.Schema( properties.Schema.NUMBER, _("(Introduced in: 17.1.3) Amount of SE memory in GB until which shared memory is collected in core archive. (Units: GB) (Default: 8)"), required=False, update_allowed=True, ) significant_log_throttle_schema = properties.Schema( properties.Schema.NUMBER, _("(Introduced in: 17.1.3) This setting limits the number of significant logs generated per second per core on this SE. Default is 100 logs per second. Set it to zero (0) to disable throttling. (Units: PER_SECOND) (Default: 100)"), required=False, update_allowed=True, ) udf_log_throttle_schema = properties.Schema( properties.Schema.NUMBER, _("(Introduced in: 17.1.3) This setting limits the number of UDF logs generated per second per core on this SE. UDF logs are generated due to the configured client log filters or the rules with logging enabled. Default is 100 logs per second. Set it to zero (0) to disable throttling. (Units: PER_SECOND) (Default: 100)"), required=False, update_allowed=True, ) non_significant_log_throttle_schema = properties.Schema( properties.Schema.NUMBER, _("(Introduced in: 17.1.3) This setting limits the number of non-significant logs generated per second per core on this SE. Default is 100 logs per second. Set it to zero (0) to disable throttling. (Units: PER_SECOND) (Default: 100)"), required=False, update_allowed=True, ) ingress_access_mgmt_schema = properties.Schema( properties.Schema.STRING, _("(Introduced in: 17.1.5) Program SE security group ingress rules to allow SSH/ICMP management access from remote CIDR type. (Default: SG_INGRESS_ACCESS_ALL)"), required=False, update_allowed=True, constraints=[ constraints.AllowedValues(['SG_INGRESS_ACCESS_ALL', 'SG_INGRESS_ACCESS_NONE', 'SG_INGRESS_ACCESS_VPC']), ], ) ingress_access_data_schema = properties.Schema( properties.Schema.STRING, _("(Introduced in: 17.1.5) Program SE security group ingress rules to allow VIP data access from remote CIDR type. (Default: SG_INGRESS_ACCESS_ALL)"), required=False, update_allowed=True, constraints=[ constraints.AllowedValues(['SG_INGRESS_ACCESS_ALL', 'SG_INGRESS_ACCESS_NONE', 'SG_INGRESS_ACCESS_VPC']), ], ) se_sb_dedicated_core_schema = properties.Schema( properties.Schema.BOOLEAN, _("(Introduced in: 16.5.2, 17.1.9, 17.2.3) Sideband traffic will be handled by a dedicated core (Default: False)"), required=False, update_allowed=True, ) se_probe_port_schema = properties.Schema( properties.Schema.NUMBER, _("(Introduced in: 17.2.2) TCP port on SE where echo service will be run (Default: 7)"), required=False, update_allowed=True, ) se_sb_threads_schema = properties.Schema( properties.Schema.NUMBER, _("(Introduced in: 16.5.2, 17.1.9, 17.2.3) Number of Sideband threads per SE (Default: 1)"), required=False, update_allowed=True, ) ignore_rtt_threshold_schema = properties.Schema( properties.Schema.NUMBER, _("(Introduced in: 17.1.6,17.2.2) Ignore RTT samples if it is above threshold (Units: MILLISECONDS) (Default: 5000)"), required=False, update_allowed=True, ) waf_mempool_schema = properties.Schema( properties.Schema.BOOLEAN, _("(Introduced in: 17.2.3) Enable memory pool for WAF (Default: True)"), required=False, update_allowed=True, ) waf_mempool_size_schema = properties.Schema( properties.Schema.NUMBER, _("(Introduced in: 17.2.3) Memory pool size used for WAF (Units: KB) (Default: 64)"), required=False, update_allowed=True, ) se_bandwidth_type_schema = properties.Schema( properties.Schema.STRING, _("(Introduced in: 17.2.5) Select the SE bandwidth for the bandwidth license."), required=False, update_allowed=True, constraints=[ constraints.AllowedValues(['SE_BANDWIDTH_10000M', 'SE_BANDWIDTH_1000M', 'SE_BANDWIDTH_200M', 'SE_BANDWIDTH_25M', 'SE_BANDWIDTH_UNLIMITED']), ], ) license_type_schema = properties.Schema( properties.Schema.STRING, _("(Introduced in: 17.2.5) If no license type is specified then default license enforcement for the cloud type is chosen."), required=False, update_allowed=True, constraints=[ constraints.AllowedValues(['LIC_BACKEND_SERVERS', 'LIC_CORES', 'LIC_HOSTS', 'LIC_SE_BANDWIDTH', 'LIC_SOCKETS']), ], ) license_tier_schema = properties.Schema( properties.Schema.STRING, _("(Introduced in: 17.2.5) Specifies the license tier which would be used. This field by default inherits the value from cloud."), required=False, update_allowed=True, constraints=[ constraints.AllowedValues(['ENTERPRISE_16', 'ENTERPRISE_18']), ], ) allow_burst_schema = properties.Schema( properties.Schema.BOOLEAN, _("(Introduced in: 17.2.5) Allow SEs to be created using burst license"), required=False, update_allowed=True, ) auto_rebalance_capacity_per_se_item_schema = properties.Schema( properties.Schema.NUMBER, _("(Introduced in: 17.2.4) Capacities of SE for auto rebalance for each criteria."), required=True, update_allowed=False, ) auto_rebalance_capacity_per_se_schema = properties.Schema( properties.Schema.LIST, _("(Introduced in: 17.2.4) Capacities of SE for auto rebalance for each criteria."), schema=auto_rebalance_capacity_per_se_item_schema, required=False, update_allowed=True, ) host_gateway_monitor_schema = properties.Schema( properties.Schema.BOOLEAN, _("(Introduced in: 17.2.4) Enable the host gateway monitor when service engine is deployed as docker container. Disabled by default. (Default: False)"), required=False, update_allowed=True, ) vss_placement_schema = properties.Schema( properties.Schema.MAP, _("(Introduced in: 17.2.5) Parameters to place Virtual Services on only a subset of the cores of an SE."), schema=VssPlacement.properties_schema, required=False, update_allowed=True, ) flow_table_new_syn_max_entries_schema = properties.Schema( properties.Schema.NUMBER, _("(Introduced in: 17.2.5) Maximum number of flow table entries that have not completed TCP three-way handshake yet (Default: 0)"), required=False, update_allowed=True, ) minimum_required_config_memory_schema = properties.Schema( properties.Schema.NUMBER, _("(Introduced in: 18.1.1) (Deprecated in: 18.1.2) Required available config memory to apply any configuration (Units: PERCENT)"), required=False, update_allowed=True, ) disable_csum_offloads_schema = properties.Schema( properties.Schema.BOOLEAN, _("(Introduced in: 17.1.14, 17.2.5, 18.1.1) Stop using TCP/UDP and IP checksum offload features of NICs (Default: False)"), required=False, update_allowed=True, ) disable_gro_schema = properties.Schema( properties.Schema.BOOLEAN, _("(Introduced in: 17.2.5, 18.1.1) Disable Generic Receive Offload (GRO) in DPDK poll-mode driver packet receive path. GRO is on by default on NICs that do not support LRO (Large Receive Offload) or do not gain performance boost from LRO. (Default: False)"), required=False, update_allowed=True, ) disable_tso_schema = properties.Schema( properties.Schema.BOOLEAN, _("(Introduced in: 17.2.5, 18.1.1) Disable TCP Segmentation Offload (TSO) in DPDK poll-mode driver packet transmit path. TSO is on by default on NICs that support it. (Default: False)"), required=False, update_allowed=True, ) enable_hsm_priming_schema = properties.Schema( properties.Schema.BOOLEAN, _("(Introduced in: 17.2.7, 18.1.1) (This is a beta feature). Enable HSM key priming. If enabled, key handles on the hsm will be synced to SE before processing client connections. (Default: False)"), required=False, update_allowed=True, ) service_ip6_subnets_item_schema = properties.Schema( properties.Schema.MAP, _("(Introduced in: 18.1.1) IPv6 Subnets assigned to the SE group. Required for VS group placement."), schema=IpAddrPrefix.properties_schema, required=True, update_allowed=False, ) service_ip6_subnets_schema = properties.Schema( properties.Schema.LIST, _("(Introduced in: 18.1.1) IPv6 Subnets assigned to the SE group. Required for VS group placement."), schema=service_ip6_subnets_item_schema, required=False, update_allowed=True, ) se_tracert_port_range_schema = properties.Schema( properties.Schema.MAP, _("(Introduced in: 17.2.8) Traceroute port range"), schema=PortRange.properties_schema, required=False, update_allowed=True, ) distribute_queues_schema = properties.Schema( properties.Schema.BOOLEAN, _("(Introduced in: 17.2.8) Distributes queue ownership among cores so multiple cores handle dispatcher duties. (Default: False)"), required=False, update_allowed=True, ) additional_config_memory_schema = properties.Schema( properties.Schema.NUMBER, _("(Introduced in: 18.1.1) (Deprecated in: 18.1.2) Indicates the percent of config memory used for config updates. (Units: PERCENT)"), required=False, update_allowed=True, ) vss_placement_enabled_schema = properties.Schema( properties.Schema.BOOLEAN, _("(Introduced in: 18.1.1) If set, Virtual Services will be placed on only a subset of the cores of an SE. (Default: False)"), required=False, update_allowed=True, ) enable_multi_lb_schema = properties.Schema( properties.Schema.BOOLEAN, _("(Introduced in: 17.2.10, 18.1.2) Applicable only for Azure cloud with Basic SKU LB. If set, additional Azure LBs will be automatically created if resources in existing LB are exhausted. (Default: False)"), required=False, update_allowed=True, ) n_log_streaming_threads_schema = properties.Schema( properties.Schema.NUMBER, _("(Introduced in: 17.2.12, 18.1.2) Number of threads to use for log streaming. (Default: 1)"), required=False, update_allowed=True, ) free_list_size_schema = properties.Schema( properties.Schema.NUMBER, _("(Introduced in: 17.2.10) Number of entries in the free list (Default: 1024)"), required=False, update_allowed=True, ) max_rules_per_lb_schema = properties.Schema( properties.Schema.NUMBER, _("(Introduced in: 17.2.12, 18.1.2) Applicable to Azure platform only. Maximum number of rules per Azure LB. (Default: 150)"), required=False, update_allowed=True, ) max_public_ips_per_lb_schema = properties.Schema( properties.Schema.NUMBER, _("(Introduced in: 17.2.12, 18.1.2) Applicable to Azure platform only. Maximum number of public IPs per Azure LB. (Default: 30)"), required=False, update_allowed=True, ) waf_learning_memory_schema = properties.Schema( properties.Schema.NUMBER, _("(Introduced in: 18.1.2) Amount of memory reserved on SE for WAF learning. This can be atmost 5% of SE memory. (Units: MB) (Default: 0)"), required=False, update_allowed=True, ) waf_learning_interval_schema = properties.Schema( properties.Schema.NUMBER, _("(Introduced in: 18.1.2) Frequency with which SE publishes WAF learning. (Units: MIN) (Default: 10)"), required=False, update_allowed=True, ) self_se_election_schema = properties.Schema( properties.Schema.BOOLEAN, _("(Introduced in: 18.1.2) Enable SEs to elect a primary amongst themselves in the absence of a connectivity to controller. (Default: False)"), required=False, update_allowed=True, ) vip_asg_schema = properties.Schema( properties.Schema.MAP, _("(Introduced in: 18.1.2) When vip_asg is set, Vip configuration will be managed by Avi.User will be able to configure vip_asg or Vips individually at the time of create."), schema=VipAutoscaleGroup.properties_schema, required=False, update_allowed=True, ) minimum_connection_memory_schema = properties.Schema( properties.Schema.NUMBER, _("(Introduced in: 18.1.2) Indicates the percent of memory reserved for connections. (Units: PERCENT) (Default: 20)"), required=False, update_allowed=True, ) shm_minimum_config_memory_schema = properties.Schema(<|fim▁hole|> update_allowed=True, ) heap_minimum_config_memory_schema = properties.Schema( properties.Schema.NUMBER, _("(Introduced in: 18.1.2) Minimum required heap memory to apply any configuration. (Units: MB) (Default: 8)"), required=False, update_allowed=True, ) disable_se_memory_check_schema = properties.Schema( properties.Schema.BOOLEAN, _("(Introduced in: 18.1.2) If set, disable the config memory check done in service engine. (Default: False)"), required=False, update_allowed=True, ) memory_for_config_update_schema = properties.Schema( properties.Schema.NUMBER, _("(Introduced in: 18.1.2) Indicates the percent of memory reserved for config updates. (Units: PERCENT) (Default: 15)"), required=False, update_allowed=True, ) # properties list PROPERTIES = ( 'avi_version', 'name', 'description', 'max_vs_per_se', 'min_scaleout_per_vs', 'max_scaleout_per_vs', 'max_se', 'vcpus_per_se', 'memory_per_se', 'disk_per_se', 'max_cpu_usage', 'min_cpu_usage', 'se_deprovision_delay', 'auto_rebalance', 'se_name_prefix', 'vs_host_redundancy', 'vcenter_folder', 'vcenter_datastores', 'vcenter_datastores_include', 'vcenter_datastore_mode', 'vcenter_clusters', 'vcenter_hosts', 'openstack_availability_zone', 'cpu_reserve', 'mem_reserve', 'mgmt_network_uuid', 'mgmt_subnet', 'ha_mode', 'algo', 'buffer_se', 'active_standby', 'placement_mode', 'openstack_mgmt_network_name', 'openstack_mgmt_network_uuid', 'instance_flavor', 'hypervisor', 'se_dos_profile', 'auto_rebalance_interval', 'aggressive_failure_detection', 'realtime_se_metrics', 'vs_scaleout_timeout', 'vs_scalein_timeout', 'hardwaresecuritymodulegroup_uuid', 'connection_memory_percentage', 'extra_config_multiplier', 'vs_scalein_timeout_for_upgrade', 'host_attribute_key', 'host_attribute_value', 'log_disksz', 'os_reserved_memory', 'floating_intf_ip', 'hm_on_standby', 'per_app', 'enable_vmac', 'distribute_load_active_standby', 'auto_redistribute_active_standby_load', 'floating_intf_ip_se_2', 'custom_tag', 'dedicated_dispatcher_core', 'cpu_socket_affinity', 'num_flow_cores_sum_changes_to_ignore', 'least_load_core_selection', 'extra_shared_config_memory', 'se_tunnel_mode', 'openstack_availability_zones', 'service_ip_subnets', 'se_vs_hb_max_vs_in_pkt', 'se_vs_hb_max_pkts_in_batch', 'auto_rebalance_criteria', 'cloud_uuid', 'iptables', 'enable_routing', 'advertise_backend_networks', 'enable_vip_on_all_interfaces', 'se_thread_multiplier', 'async_ssl', 'async_ssl_threads', 'se_udp_encap_ipc', 'se_ipc_udp_port', 'se_remote_punt_udp_port', 'se_tunnel_udp_port', 'custom_securitygroups_mgmt', 'custom_securitygroups_data', 'archive_shm_limit', 'significant_log_throttle', 'udf_log_throttle', 'non_significant_log_throttle', 'ingress_access_mgmt', 'ingress_access_data', 'se_sb_dedicated_core', 'se_probe_port', 'se_sb_threads', 'ignore_rtt_threshold', 'waf_mempool', 'waf_mempool_size', 'se_bandwidth_type', 'license_type', 'license_tier', 'allow_burst', 'auto_rebalance_capacity_per_se', 'host_gateway_monitor', 'vss_placement', 'flow_table_new_syn_max_entries', 'minimum_required_config_memory', 'disable_csum_offloads', 'disable_gro', 'disable_tso', 'enable_hsm_priming', 'service_ip6_subnets', 'se_tracert_port_range', 'distribute_queues', 'additional_config_memory', 'vss_placement_enabled', 'enable_multi_lb', 'n_log_streaming_threads', 'free_list_size', 'max_rules_per_lb', 'max_public_ips_per_lb', 'waf_learning_memory', 'waf_learning_interval', 'self_se_election', 'vip_asg', 'minimum_connection_memory', 'shm_minimum_config_memory', 'heap_minimum_config_memory', 'disable_se_memory_check', 'memory_for_config_update', ) # mapping of properties to their schemas properties_schema = { 'avi_version': avi_version_schema, 'name': name_schema, 'description': description_schema, 'max_vs_per_se': max_vs_per_se_schema, 'min_scaleout_per_vs': min_scaleout_per_vs_schema, 'max_scaleout_per_vs': max_scaleout_per_vs_schema, 'max_se': max_se_schema, 'vcpus_per_se': vcpus_per_se_schema, 'memory_per_se': memory_per_se_schema, 'disk_per_se': disk_per_se_schema, 'max_cpu_usage': max_cpu_usage_schema, 'min_cpu_usage': min_cpu_usage_schema, 'se_deprovision_delay': se_deprovision_delay_schema, 'auto_rebalance': auto_rebalance_schema, 'se_name_prefix': se_name_prefix_schema, 'vs_host_redundancy': vs_host_redundancy_schema, 'vcenter_folder': vcenter_folder_schema, 'vcenter_datastores': vcenter_datastores_schema, 'vcenter_datastores_include': vcenter_datastores_include_schema, 'vcenter_datastore_mode': vcenter_datastore_mode_schema, 'vcenter_clusters': vcenter_clusters_schema, 'vcenter_hosts': vcenter_hosts_schema, 'openstack_availability_zone': openstack_availability_zone_schema, 'cpu_reserve': cpu_reserve_schema, 'mem_reserve': mem_reserve_schema, 'mgmt_network_uuid': mgmt_network_uuid_schema, 'mgmt_subnet': mgmt_subnet_schema, 'ha_mode': ha_mode_schema, 'algo': algo_schema, 'buffer_se': buffer_se_schema, 'active_standby': active_standby_schema, 'placement_mode': placement_mode_schema, 'openstack_mgmt_network_name': openstack_mgmt_network_name_schema, 'openstack_mgmt_network_uuid': openstack_mgmt_network_uuid_schema, 'instance_flavor': instance_flavor_schema, 'hypervisor': hypervisor_schema, 'se_dos_profile': se_dos_profile_schema, 'auto_rebalance_interval': auto_rebalance_interval_schema, 'aggressive_failure_detection': aggressive_failure_detection_schema, 'realtime_se_metrics': realtime_se_metrics_schema, 'vs_scaleout_timeout': vs_scaleout_timeout_schema, 'vs_scalein_timeout': vs_scalein_timeout_schema, 'hardwaresecuritymodulegroup_uuid': hardwaresecuritymodulegroup_uuid_schema, 'connection_memory_percentage': connection_memory_percentage_schema, 'extra_config_multiplier': extra_config_multiplier_schema, 'vs_scalein_timeout_for_upgrade': vs_scalein_timeout_for_upgrade_schema, 'host_attribute_key': host_attribute_key_schema, 'host_attribute_value': host_attribute_value_schema, 'log_disksz': log_disksz_schema, 'os_reserved_memory': os_reserved_memory_schema, 'floating_intf_ip': floating_intf_ip_schema, 'hm_on_standby': hm_on_standby_schema, 'per_app': per_app_schema, 'enable_vmac': enable_vmac_schema, 'distribute_load_active_standby': distribute_load_active_standby_schema, 'auto_redistribute_active_standby_load': auto_redistribute_active_standby_load_schema, 'floating_intf_ip_se_2': floating_intf_ip_se_2_schema, 'custom_tag': custom_tag_schema, 'dedicated_dispatcher_core': dedicated_dispatcher_core_schema, 'cpu_socket_affinity': cpu_socket_affinity_schema, 'num_flow_cores_sum_changes_to_ignore': num_flow_cores_sum_changes_to_ignore_schema, 'least_load_core_selection': least_load_core_selection_schema, 'extra_shared_config_memory': extra_shared_config_memory_schema, 'se_tunnel_mode': se_tunnel_mode_schema, 'openstack_availability_zones': openstack_availability_zones_schema, 'service_ip_subnets': service_ip_subnets_schema, 'se_vs_hb_max_vs_in_pkt': se_vs_hb_max_vs_in_pkt_schema, 'se_vs_hb_max_pkts_in_batch': se_vs_hb_max_pkts_in_batch_schema, 'auto_rebalance_criteria': auto_rebalance_criteria_schema, 'cloud_uuid': cloud_uuid_schema, 'iptables': iptables_schema, 'enable_routing': enable_routing_schema, 'advertise_backend_networks': advertise_backend_networks_schema, 'enable_vip_on_all_interfaces': enable_vip_on_all_interfaces_schema, 'se_thread_multiplier': se_thread_multiplier_schema, 'async_ssl': async_ssl_schema, 'async_ssl_threads': async_ssl_threads_schema, 'se_udp_encap_ipc': se_udp_encap_ipc_schema, 'se_ipc_udp_port': se_ipc_udp_port_schema, 'se_remote_punt_udp_port': se_remote_punt_udp_port_schema, 'se_tunnel_udp_port': se_tunnel_udp_port_schema, 'custom_securitygroups_mgmt': custom_securitygroups_mgmt_schema, 'custom_securitygroups_data': custom_securitygroups_data_schema, 'archive_shm_limit': archive_shm_limit_schema, 'significant_log_throttle': significant_log_throttle_schema, 'udf_log_throttle': udf_log_throttle_schema, 'non_significant_log_throttle': non_significant_log_throttle_schema, 'ingress_access_mgmt': ingress_access_mgmt_schema, 'ingress_access_data': ingress_access_data_schema, 'se_sb_dedicated_core': se_sb_dedicated_core_schema, 'se_probe_port': se_probe_port_schema, 'se_sb_threads': se_sb_threads_schema, 'ignore_rtt_threshold': ignore_rtt_threshold_schema, 'waf_mempool': waf_mempool_schema, 'waf_mempool_size': waf_mempool_size_schema, 'se_bandwidth_type': se_bandwidth_type_schema, 'license_type': license_type_schema, 'license_tier': license_tier_schema, 'allow_burst': allow_burst_schema, 'auto_rebalance_capacity_per_se': auto_rebalance_capacity_per_se_schema, 'host_gateway_monitor': host_gateway_monitor_schema, 'vss_placement': vss_placement_schema, 'flow_table_new_syn_max_entries': flow_table_new_syn_max_entries_schema, 'minimum_required_config_memory': minimum_required_config_memory_schema, 'disable_csum_offloads': disable_csum_offloads_schema, 'disable_gro': disable_gro_schema, 'disable_tso': disable_tso_schema, 'enable_hsm_priming': enable_hsm_priming_schema, 'service_ip6_subnets': service_ip6_subnets_schema, 'se_tracert_port_range': se_tracert_port_range_schema, 'distribute_queues': distribute_queues_schema, 'additional_config_memory': additional_config_memory_schema, 'vss_placement_enabled': vss_placement_enabled_schema, 'enable_multi_lb': enable_multi_lb_schema, 'n_log_streaming_threads': n_log_streaming_threads_schema, 'free_list_size': free_list_size_schema, 'max_rules_per_lb': max_rules_per_lb_schema, 'max_public_ips_per_lb': max_public_ips_per_lb_schema, 'waf_learning_memory': waf_learning_memory_schema, 'waf_learning_interval': waf_learning_interval_schema, 'self_se_election': self_se_election_schema, 'vip_asg': vip_asg_schema, 'minimum_connection_memory': minimum_connection_memory_schema, 'shm_minimum_config_memory': shm_minimum_config_memory_schema, 'heap_minimum_config_memory': heap_minimum_config_memory_schema, 'disable_se_memory_check': disable_se_memory_check_schema, 'memory_for_config_update': memory_for_config_update_schema, } # for supporting get_avi_uuid_by_name functionality field_references = { 'iptables': getattr(IptableRuleSet, 'field_references', {}), 'floating_intf_ip_se_2': getattr(IpAddr, 'field_references', {}), 'hardwaresecuritymodulegroup_uuid': 'hardwaresecuritymodulegroup', 'vcenter_hosts': getattr(VcenterHosts, 'field_references', {}), 'custom_tag': getattr(CustomTag, 'field_references', {}), 'service_ip_subnets': getattr(IpAddrPrefix, 'field_references', {}), 'mgmt_network_uuid': 'network', 'vcenter_datastores': getattr(VcenterDatastore, 'field_references', {}), 'mgmt_subnet': getattr(IpAddrPrefix, 'field_references', {}), 'vip_asg': getattr(VipAutoscaleGroup, 'field_references', {}), 'service_ip6_subnets': getattr(IpAddrPrefix, 'field_references', {}), 'floating_intf_ip': getattr(IpAddr, 'field_references', {}), 'se_tracert_port_range': getattr(PortRange, 'field_references', {}), 'vcenter_clusters': getattr(VcenterClusters, 'field_references', {}), 'se_dos_profile': getattr(DosThresholdProfile, 'field_references', {}), 'realtime_se_metrics': getattr(MetricsRealTimeUpdate, 'field_references', {}), 'vss_placement': getattr(VssPlacement, 'field_references', {}), } unique_keys = { 'iptables': getattr(IptableRuleSet, 'unique_keys', {}), 'floating_intf_ip_se_2': getattr(IpAddr, 'unique_keys', {}), 'vcenter_hosts': getattr(VcenterHosts, 'unique_keys', {}), 'custom_tag': getattr(CustomTag, 'unique_keys', {}), 'service_ip_subnets': getattr(IpAddrPrefix, 'unique_keys', {}), 'realtime_se_metrics': getattr(MetricsRealTimeUpdate, 'unique_keys', {}), 'vcenter_datastores': getattr(VcenterDatastore, 'unique_keys', {}), 'mgmt_subnet': getattr(IpAddrPrefix, 'unique_keys', {}), 'vip_asg': getattr(VipAutoscaleGroup, 'unique_keys', {}), 'service_ip6_subnets': getattr(IpAddrPrefix, 'unique_keys', {}), 'floating_intf_ip': getattr(IpAddr, 'unique_keys', {}), 'se_tracert_port_range': getattr(PortRange, 'unique_keys', {}), 'vcenter_clusters': getattr(VcenterClusters, 'unique_keys', {}), 'se_dos_profile': getattr(DosThresholdProfile, 'unique_keys', {}), 'vss_placement': getattr(VssPlacement, 'unique_keys', {}), } def resource_mapping(): return { 'Avi::LBaaS::ServiceEngineGroup': ServiceEngineGroup, }<|fim▁end|>
properties.Schema.NUMBER, _("(Introduced in: 18.1.2) Minimum required shared memory to apply any configuration. (Units: MB) (Default: 4)"), required=False,
<|file_name|>urls.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from django.conf.urls.defaults import patterns, url<|fim▁hole|>urlpatterns = patterns('', url('^json/get/$', 'django_notify.views.get_notifications', name='json_get', kwargs={}), url('^json/mark-read/$', 'django_notify.views.mark_read', name='json_mark_read_base', kwargs={}), url('^json/mark-read/(\d+)/$', 'django_notify.views.mark_read', name='json_mark_read', kwargs={}), url('^goto/(?P<notification_id>\d+)/$', 'django_notify.views.goto', name='goto', kwargs={}), url('^goto/$', 'django_notify.views.goto', name='goto_base', kwargs={}), ) def get_pattern(app_name="notify", namespace="notify"): """Every url resolution takes place as "notify:view_name". https://docs.djangoproject.com/en/dev/topics/http/urls/#topics-http-reversing-url-namespaces """ return urlpatterns, app_name, namespace<|fim▁end|>
<|file_name|>user-socket.routes.spec.ts<|end_file_name|><|fim▁begin|>import * as io from "socket.io-client"; import {UserSocketRoutes} from "./user-socket.routes"; import {SocketServer} from "../../../socketio/socket.server"; import {Server} from "../../../core/server"; import {UserComponent} from "../component/user.component"; import {Query, QueryParameter, QueryElement, QueryOperator} from "../../../core/public/query"; import {UserModel} from "../public/user.model"; import {config} from "../../../../config"; import {UserRoutes} from "../public/user.routes"; import {SocketRequest} from "../../../socketio/socket.request"; import {DummyUsers} from "../repository/user-mongoose.repository.spec"; class MockUserComponent implements UserComponent { get(request: SocketRequest<Query>): void { request.socket.emit(UserRoutes.get, request.value); } add(request: SocketRequest<UserModel>): void { request.socket.emit(UserRoutes.add, request.value); } update(request: SocketRequest<UserModel>): void { request.socket.emit(UserRoutes.update, request.value); } test(request: SocketRequest<any>): void { request.socket.emit("ping", request.value); } } describe("User routes for SocketIO", () => { let server: Server = new SocketServer(); let userRoute: UserSocketRoutes; let clientSocket: SocketIOClient.Socket; beforeEach(done => { server.start().then(() => { userRoute = new UserSocketRoutes(server, new MockUserComponent()); done(); }); }); afterEach(done => { clientSocket.on("disconnect", () => { server.stop() .then(done); }); clientSocket.close(); }); it("should succesfully route get requests", done => { let query = new Query([new QueryParameter("name", [new QueryElement(QueryOperator.EqualTo, "bob")])]); clientSocket = io.connect(`${config.server.protocol}://${config.server.uri}:${config.server.port}/${UserRoutes.namespace}`); clientSocket.emit(UserRoutes.get, {token: "abc", query: query}); clientSocket.on(UserRoutes.get, (response: Query) => { expect(response.queryParameters[0].queryElements[0].operands).toEqual("bob"); done(); }); }); it("should succesfully route add requests", done => { let user = DummyUsers.newUserOne; clientSocket = io.connect(`${config.server.protocol}://${config.server.uri}:${config.server.port}/${UserRoutes.namespace}`); clientSocket.emit(UserRoutes.add, {token: DummyUsers.membershipUser, user: user}); clientSocket.on(UserRoutes.add, (response: UserModel) => { expect(response.name).toEqual(user.name); done(); }); }); it("should succesfully route update requests", done => { let user = DummyUsers.newUserOne; clientSocket = io.connect(`${config.server.protocol}://${config.server.uri}:${config.server.port}/${UserRoutes.namespace}`); clientSocket.emit(UserRoutes.update, {token: DummyUsers.membershipUser, user: user}); clientSocket.on(UserRoutes.update, (response: UserModel) => { expect(response.email).toEqual(user.email); done(); });<|fim▁hole|> }); });<|fim▁end|>
<|file_name|>GameBoardUtil.js<|end_file_name|><|fim▁begin|>define(function(require) { var Checker = require("checkers/controller/Checker"), GameBoard = require("checkers/controller/GameBoard"), GameSpace = require("checkers/controller/GameSpace"); var instance = null; function GameBoardUtil() {<|fim▁hole|> var getInstance = function() { if (instance === null) { instance = new GameBoardUtil(); } return instance; } GameBoardUtil.prototype.getValidMoves = function(checker, gameBoard, posDir) { var validMoves = new Array(); $.merge(validMoves, this.getEmptySpaceMoves(checker, gameBoard, posDir)); $.merge(validMoves, this.getJumpMoves(checker, gameBoard, posDir)); return validMoves; } GameBoardUtil.prototype.getEmptySpaceMoves = function(checker, gameBoard, posDir) { var emptySpaceMoves = new Array(); var row = checker.getRow() + posDir; // Checks left move if (this.isValidMove(row, checker.getColumn() - 1)) { var gameSpace = gameBoard.getGameSpace(row, checker.getColumn() - 1) if (gameSpace.isEmpty()) { emptySpaceMoves.push(gameSpace); } } // Checks right move if (this.isValidMove(row, checker.getColumn() + 1)) { var gameSpace = gameBoard.getGameSpace(row, checker.getColumn() + 1); if (gameSpace.isEmpty()) { emptySpaceMoves.push(gameSpace); } } if (checker.isKing()) { var kRow = checker.getRow() - posDir; // Checks left move if (this.isValidMove(kRow, checker.getColumn() - 1)) { var gameSpace = gameBoard.getGameSpace(kRow, checker.getColumn() - 1) if (gameSpace.isEmpty()) { emptySpaceMoves.push(gameSpace); } } // Checks right move if (this.isValidMove(kRow, checker.getColumn() + 1)) { var gameSpace = gameBoard.getGameSpace(kRow, checker.getColumn() + 1); if (gameSpace.isEmpty()) { emptySpaceMoves.push(gameSpace); } } } return emptySpaceMoves; } GameBoardUtil.prototype.isValidMove = function(row, column) { if (row < 0 || row >= GameBoard.NUMSQUARES || column < 0 || column >= GameBoard.NUMSQUARES) { return false; } return true; } GameBoardUtil.prototype.getJumpMoves = function(checker, gameBoard, posDir) { var jumpMoves = new Array(); var row = checker.getRow() + posDir * 2; // Checks left jump move if (this.isValidMove(row, checker.getColumn() - 2)) { var gameSpace = gameBoard.getGameSpace(row, checker.getColumn() - 2); if (gameSpace.isEmpty()) { var jumpedGameSpace = gameBoard.getGameSpace(row - posDir, checker.getColumn() - 1); if (!jumpedGameSpace.isEmpty() && jumpedGameSpace.getChecker().getPlayerId() != checker.getPlayerId()) { jumpMoves.push(gameSpace); } } } // Checks right jump move if (this.isValidMove(row, checker.getColumn() + 2)) { var gameSpace = gameBoard.getGameSpace(row, checker.getColumn() + 2); if (gameSpace.isEmpty()) { var jumpedGameSpace = gameBoard.getGameSpace(row - posDir, checker.getColumn() + 1); if (!jumpedGameSpace.isEmpty() && jumpedGameSpace.getChecker().getPlayerId() != checker.getPlayerId()) { jumpMoves.push(gameSpace); } } } if (checker.isKing()) { // Checks left jump move var kRow = checker.getRow() - posDir * 2; if (this.isValidMove(kRow, checker.getColumn() - 2)) { var gameSpace = gameBoard.getGameSpace(kRow, checker.getColumn() - 2); if (gameSpace.isEmpty()) { var jumpedGameSpace = gameBoard.getGameSpace(kRow + posDir, checker.getColumn() - 1); if (!jumpedGameSpace.isEmpty() && jumpedGameSpace.getChecker().getPlayerId() != checker.getPlayerId()) { jumpMoves.push(gameSpace); } } } // Checks right jump move if (this.isValidMove(kRow, checker.getColumn() + 2)) { var gameSpace = gameBoard.getGameSpace(kRow, checker.getColumn() + 2); if (gameSpace.isEmpty()) { var jumpedGameSpace = gameBoard.getGameSpace(kRow + posDir, checker.getColumn() + 1); if (!jumpedGameSpace.isEmpty() && jumpedGameSpace.getChecker().getPlayerId() != checker.getPlayerId()) { jumpMoves.push(gameSpace); } } } } return jumpMoves; } return ({getInstance:getInstance}); });<|fim▁end|>
}
<|file_name|>genetic_searcher.py<|end_file_name|><|fim▁begin|>import logging import numpy as np from ray.tune.automl.search_policy import AutoMLSearcher logger = logging.getLogger(__name__) LOGGING_PREFIX = "[GENETIC SEARCH] " class GeneticSearch(AutoMLSearcher): """Implement the genetic search. Keep a collection of top-K parameter permutations as base genes, then apply selection, crossover, and mutation to them to generate new genes (a.k.a new generation). Hopefully, the performance of the top population would increase generation by generation. """ def __init__(self, search_space, reward_attr, max_generation=2, population_size=10, population_decay=0.95, keep_top_ratio=0.2, selection_bound=0.4, crossover_bound=0.4): """ Initialize GeneticSearcher. Args: search_space (SearchSpace): The space to search. reward_attr: The attribute name of the reward in the result. max_generation: Max iteration number of genetic search. population_size: Number of trials of the initial generation. population_decay: Decay ratio of population size for the next generation. keep_top_ratio: Ratio of the top performance population. selection_bound: Threshold for performing selection. crossover_bound: Threshold for performing crossover. """ super(GeneticSearch, self).__init__(search_space, reward_attr) self._cur_generation = 1 self._max_generation = max_generation self._population_size = population_size self._population_decay = population_decay self._keep_top_ratio = keep_top_ratio self._selection_bound = selection_bound self._crossover_bound = crossover_bound self._cur_config_list = [] self._cur_encoding_list = [] for _ in range(population_size): one_hot = self.search_space.generate_random_one_hot_encoding() self._cur_encoding_list.append(one_hot) self._cur_config_list.append( self.search_space.apply_one_hot_encoding(one_hot)) def _select(self): population_size = len(self._cur_config_list) logger.info( LOGGING_PREFIX + "Generate the %sth generation, population=%s", self._cur_generation, population_size) return self._cur_config_list, self._cur_encoding_list def _feedback(self, trials): self._cur_generation += 1 if self._cur_generation > self._max_generation: return AutoMLSearcher.TERMINATE sorted_trials = sorted( trials, key=lambda t: t.best_result[self.reward_attr], reverse=True) self._cur_encoding_list = self._next_generation(sorted_trials) self._cur_config_list = [] for one_hot in self._cur_encoding_list: self._cur_config_list.append( self.search_space.apply_one_hot_encoding(one_hot)) return AutoMLSearcher.CONTINUE def _next_generation(self, sorted_trials): """Generate genes (encodings) for the next generation. Use the top K (_keep_top_ratio) trials of the last generation as candidates to generate the next generation. The action could be selection, crossover and mutation according corresponding ratio (_selection_bound, _crossover_bound). Args: sorted_trials: List of finished trials with top performance ones first. Returns: A list of new genes (encodings) """ candidate = [] next_generation = [] num_population = self._next_population_size(len(sorted_trials)) top_num = int(max(num_population * self._keep_top_ratio, 2)) for i in range(top_num): candidate.append(sorted_trials[i].extra_arg) next_generation.append(sorted_trials[i].extra_arg) for i in range(top_num, num_population): flip_coin = np.random.uniform() if flip_coin < self._selection_bound: next_generation.append(GeneticSearch._selection(candidate)) else: if flip_coin < self._selection_bound + self._crossover_bound: next_generation.append(GeneticSearch._crossover(candidate)) else: next_generation.append(GeneticSearch._mutation(candidate)) return next_generation def _next_population_size(self, last_population_size): """Calculate the population size of the next generation. Intuitively, the population should decay after each iteration since it should converge. It can also decrease the total resource required. Args: last_population_size: The last population size. Returns: The new population size. """ # TODO: implement an generic resource allocate algorithm. return int(max(last_population_size * self._population_decay, 3)) @staticmethod def _selection(candidate): """Perform selection action to candidates. For example, new gene = sample_1 + the 5th bit of sample2. Args: candidate: List of candidate genes (encodings). Examples: >>> # Genes that represent 3 parameters >>> gene1 = np.array([[0, 0, 1], [0, 1], [1, 0]]) >>> gene2 = np.array([[0, 1, 0], [1, 0], [0, 1]]) >>> new_gene = _selection([gene1, gene2]) >>> # new_gene could be gene1 overwritten with the >>> # 2nd parameter of gene2 >>> # in which case: >>> # new_gene[0] = gene1[0] >>> # new_gene[1] = gene2[1] >>> # new_gene[2] = gene1[0] Returns: New gene (encoding) """ sample_index1 = np.random.choice(len(candidate)) sample_index2 = np.random.choice(len(candidate)) sample_1 = candidate[sample_index1] sample_2 = candidate[sample_index2] select_index = np.random.choice(len(sample_1)) logger.info( LOGGING_PREFIX + "Perform selection from %sth to %sth at index=%s", sample_index2, sample_index1, select_index) next_gen = [] for i in range(len(sample_1)): if i is select_index: next_gen.append(sample_2[i]) else: next_gen.append(sample_1[i]) return next_gen @staticmethod def _crossover(candidate): """Perform crossover action to candidates. For example, new gene = 60% sample_1 + 40% sample_2. Args: candidate: List of candidate genes (encodings). Examples: >>> # Genes that represent 3 parameters<|fim▁hole|> >>> new_gene = _crossover([gene1, gene2]) >>> # new_gene could be the first [n=1] parameters of >>> # gene1 + the rest of gene2 >>> # in which case: >>> # new_gene[0] = gene1[0] >>> # new_gene[1] = gene2[1] >>> # new_gene[2] = gene1[1] Returns: New gene (encoding) """ sample_index1 = np.random.choice(len(candidate)) sample_index2 = np.random.choice(len(candidate)) sample_1 = candidate[sample_index1] sample_2 = candidate[sample_index2] cross_index = int(len(sample_1) * np.random.uniform(low=0.3, high=0.7)) logger.info( LOGGING_PREFIX + "Perform crossover between %sth and %sth at index=%s", sample_index1, sample_index2, cross_index) next_gen = [] for i in range(len(sample_1)): if i > cross_index: next_gen.append(sample_2[i]) else: next_gen.append(sample_1[i]) return next_gen @staticmethod def _mutation(candidate, rate=0.1): """Perform mutation action to candidates. For example, randomly change 10% of original sample Args: candidate: List of candidate genes (encodings). rate: Percentage of mutation bits Examples: >>> # Genes that represent 3 parameters >>> gene1 = np.array([[0, 0, 1], [0, 1], [1, 0]]) >>> new_gene = _mutation([gene1]) >>> # new_gene could be the gene1 with the 3rd parameter changed >>> # new_gene[0] = gene1[0] >>> # new_gene[1] = gene1[1] >>> # new_gene[2] = [0, 1] != gene1[2] Returns: New gene (encoding) """ sample_index = np.random.choice(len(candidate)) sample = candidate[sample_index] idx_list = [] for i in range(int(max(len(sample) * rate, 1))): idx = np.random.choice(len(sample)) idx_list.append(idx) field = sample[idx] # one-hot encoding field[np.argmax(field)] = 0 bit = np.random.choice(field.shape[0]) field[bit] = 1 logger.info(LOGGING_PREFIX + "Perform mutation on %sth at index=%s", sample_index, str(idx_list)) return sample<|fim▁end|>
>>> gene1 = np.array([[0, 0, 1], [0, 1], [1, 0]]) >>> gene2 = np.array([[0, 1, 0], [1, 0], [0, 1]])
<|file_name|>new-post.js<|end_file_name|><|fim▁begin|>import Ember from 'ember'; export default Ember.Route.extend({ /** * Instance of the posts service. * * @property postsService * @type {Ember.Service} */ postsService: Ember.inject.service('posts'), /** * Instance of the tags service. * * @property tagsService * @type {Ember.Service} */ tagsService: Ember.inject.service('tags'), /** * Instance of the types service. * * @property typesService * @type {Ember.Service} */ typesService: Ember.inject.service('types'), /** * Instance of the images service. * * @property imagesService * @type {Ember.Service} */ imagesService: Ember.inject.service('images'), /** * Instance of the alerts service. * * @property alertsService * @type {Ember.Service} */ alertsService: Ember.inject.service('alerts'), beforeModel() { // Preload images tags and types let promises = [ this.get('tagsService').getAll(), this.get('typesService').getAll(), this.get('imagesService').getAll() ]; return Ember.RSVP.all(promises); }, model() { return { post: this.get('postsService').createNew() }; }, actions: { /** * View specified post page. * * @method viewPost * @return {Void} */ viewPost(id) { this.get('alertsService').success('Your post has been saved'); this.transitionTo('post', { postId: id }); }<|fim▁hole|><|fim▁end|>
} });
<|file_name|>manifest.go<|end_file_name|><|fim▁begin|>package manifest import ( "encoding/json" "fmt" "log" "reflect" "regexp" "strconv" "strings" "github.com/fatih/color" "github.com/servehub/serve/manifest/processor" "github.com/servehub/utils" "github.com/servehub/utils/gabs" ) type Manifest struct { tree *gabs.Container } func (m Manifest) String() string { return m.tree.StringIndent("", " ") } func (m Manifest) Unwrap() interface{} { return m.tree.Data() } func (m Manifest) Has(path string) bool { v := m.tree.Path(path).Data() return v != nil && v != "" } func (m Manifest) GetString(path string) string { return fmt.Sprintf("%v", m.tree.Path(path).Data()) } func (m Manifest) GetStringOr(path string, defaultVal string) string { if m.tree.ExistsP(path) { return m.GetString(path) } return defaultVal } func (m Manifest) GetFloat(path string) float64 { f, err := strconv.ParseFloat(m.GetString(path), 64) if err != nil { log.Fatalf("Error on parse float64 '%v' from: %v", path, m.GetString(path)) } return f } func (m Manifest) GetInt(path string) int { i, err := strconv.Atoi(m.GetString(path)) if err != nil { log.Fatalf("Error on parse integer '%v' from: %v", path, m.GetString(path)) } return i } func (m Manifest) GetIntOr(path string, defaultVal int) int { if m.tree.ExistsP(path) { return m.GetInt(path) } return defaultVal } func (m Manifest) GetBool(path string) bool { return strings.ToLower(m.GetString(path)) == "true" } func (m Manifest) GetMap(path string) map[string]Manifest { out := make(map[string]Manifest) tree := m.tree if len(path) > 0 && path != "." && path != "/" { tree = m.tree.Path(path) } mmap, err := tree.ChildrenMap() if err != nil { log.Fatalf("Error get map '%v' from: %v. Error: %s", path, m.tree.Path(path).Data(), err) }<|fim▁hole|> return out } func (m Manifest) GetArray(path string) []Manifest { out := make([]Manifest, 0) arr, err := m.tree.Path(path).Children() if err != nil { log.Fatalf("Error get array `%v` from: %v", path, m.tree.Path(path).Data()) } for _, v := range arr { out = append(out, Manifest{v}) } return out } func (m Manifest) GetArrayForce(path string) []interface{} { out := make([]interface{}, 0) arr, err := m.tree.Path(path).Children() if err != nil && m.tree.ExistsP(path) { arr = append(arr, m.tree.Path(path)) } for _, v := range arr { out = append(out, v.Data()) } return out } func (m Manifest) GetTree(path string) Manifest { return Manifest{m.tree.Path(path)} } func (m Manifest) Set(path string, value interface{}) { m.tree.SetP(value, path) } func (m Manifest) ArrayAppend(path string, value interface{}) { m.tree.ArrayAppendP(value, path) } func (m Manifest) FindPlugins(plugin string) ([]PluginData, error) { tree := m.tree.Path(plugin) result := make([]PluginData, 0) if tree.Data() == nil { return nil, fmt.Errorf("Plugin `%s` not found in manifest!", plugin) } if _, ok := tree.Data().([]interface{}); ok { arr, _ := tree.Children() for _, item := range arr { if _, ok := item.Data().(string); ok { result = append(result, makePluginPair(plugin, item)) } else if res, err := item.ChildrenMap(); err == nil { if len(res) == 1 { for subplugin, data := range res { result = append(result, makePluginPair(plugin+"."+subplugin, data)) break } } else if len(res) == 0 && !PluginRegestry.Has(plugin) { // skip subplugin with empty data } else { result = append(result, makePluginPair(plugin, item)) } } } } else if PluginRegestry.Has(plugin) { result = append(result, makePluginPair(plugin, tree)) } else { log.Println(color.YellowString("Plugins for `%s` section not specified, skip...", plugin)) } return result, nil } func (m Manifest) DelTree(path string) error { return m.tree.DeleteP(path) } func (m Manifest) GetPluginWithData(plugin string) PluginData { return makePluginPair(plugin, m.tree) } var envNameRegex = regexp.MustCompile("\\W") func (m Manifest) ToEnvMap(prefix string) map[string]string { result := make(map[string]string) if children, err := m.tree.ChildrenMap(); err == nil { for k, child := range children { result = utils.MergeMaps(result, Manifest{child}.ToEnvMap(prefix+strings.ToUpper(envNameRegex.ReplaceAllString(k, "_"))+"_")) } } else if children, err := m.tree.Children(); err == nil { for i, child := range children { result = utils.MergeMaps(result, Manifest{child}.ToEnvMap(prefix+strconv.Itoa(i)+"_")) } } else if m.tree.Data() != nil { result[prefix[:len(prefix)-1]] = fmt.Sprintf("%v", m.tree.Data()) } return result } func Load(path string, vars map[string]string) *Manifest { tree, err := gabs.LoadYamlFile(path) if err != nil { log.Fatalln("Error on load file:", err) } for k, v := range vars { tree.Set(v, "vars", k) } for _, proc := range processor.GetAll() { if err := proc.Process(tree); err != nil { log.Fatalf("Error in processor '%v': %v. \n\nManifest: %s", reflect.ValueOf(proc).Type().Name(), err, tree.StringIndent("", " ")) } } return &Manifest{tree} } func LoadJSON(path string) *Manifest { tree, err := gabs.ParseJSONFile(path) if err != nil { log.Fatalf("Error on load json file '%s': %v\n", path, err) } return &Manifest{tree} } func ParseJSON(json string) *Manifest { tree, err := gabs.ParseJSON([]byte(json)) if err != nil { log.Fatalf("Error on parse json '%s': %v\n", json, err) } return &Manifest{tree} } func makePluginPair(plugin string, data *gabs.Container) PluginData { if s, ok := data.Data().(string); ok { obj := gabs.New() ns := strings.Split(plugin, ".") obj.Set(s, ns[len(ns)-1]) data = obj } else { var cpy interface{} bs, _ := json.Marshal(data.Data()) json.Unmarshal(bs, &cpy) data.Set(cpy) } return PluginData{plugin, PluginRegestry.Get(plugin), Manifest{data}} }<|fim▁end|>
for k, v := range mmap { out[k] = Manifest{v} }
<|file_name|>conf.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # # testgdt documentation build configuration file, created by # sphinx-quickstart on Sun Feb 12 17:11:03 2012. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.todo', 'sphinx.ext.pngmath', 'sphinx.ext.viewcode'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'testgdt' copyright = u'2012, gdt' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.1' # The full version, including alpha/beta/rc tags. release = '0.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'templateclassdoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'testgdt.tex', u'testgdt Documentation', u'gdt', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True <|fim▁hole|># (source start file, name, description, authors, manual section). man_pages = [ ('index', 'templateclass', u'testgdt Documentation', [u'gdt'], 1) ]<|fim▁end|>
# -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples
<|file_name|>server.py<|end_file_name|><|fim▁begin|>import tornado.ioloop import tornado.web import socket import os import sys import time import signal # import datetime import h5py from datetime import datetime, date import tornado.httpserver from browserhandler import BrowseHandler from annotationhandler import AnnotationHandler from projecthandler import ProjectHandler from helphandler import HelpHandler from defaulthandler import DefaultHandler base_path = os.path.dirname(__file__) sys.path.insert(1,os.path.join(base_path, '../common')) from utility import Utility from database import Database from paths import Paths MAX_WAIT_SECONDS_BEFORE_SHUTDOWN = 0.5 class Application(tornado.web.Application): def __init__(self): handlers = [ (r"/", DefaultHandler), (r"/browse.*", BrowseHandler), (r"/project.*", ProjectHandler), (r"/annotate.*", AnnotationHandler), (r'/help*', HelpHandler), (r'/settings/(.*)', tornado.web.StaticFileHandler, {'path': 'resources/settings/'}), (r'/js/(.*)', tornado.web.StaticFileHandler, {'path': 'resources/js/'}), (r'/js/vendors/(.*)', tornado.web.StaticFileHandler, {'path': 'resources/js/vendors/'}), (r'/css/(.*)', tornado.web.StaticFileHandler, {'path': 'resources/css/'}), (r'/uikit/(.*)', tornado.web.StaticFileHandler, {'path': 'resources/uikit/'}), (r'/images/(.*)', tornado.web.StaticFileHandler, {'path': 'resources/images/'}), (r'/open-iconic/(.*)', tornado.web.StaticFileHandler, {'path': 'resources/open-iconic/'}), (r'/input/(.*)', tornado.web.StaticFileHandler, {'path': 'resources/input/'}), (r'/train/(.*)', tornado.web.StaticFileHandler, {'path': 'resources/input/'}), (r'/validate/(.*)', tornado.web.StaticFileHandler, {'path': 'resources/input/'}), #(r"/annotate/(.*)", AnnotationHandler, dict(logic=self)), ] settings = { "template_path": 'resources', "static_path": 'resources', } tornado.web.Application.__init__(self, handlers, **settings) import numpy as np class Server(): def __init__(self, name, port): self.name = name self.port = port application = Application() self.http_server = tornado.httpserver.HTTPServer( application ) hostname = socket.gethostname() print 'hostname:', hostname self.ip = hostname #socket.gethostbyname( hostname )<|fim▁hole|> Utility.print_msg ('\033[93m'+ self.name + ' running/' + '\033[0m', True) Utility.print_msg ('.') Utility.print_msg ('open ' + '\033[92m'+'http://' + self.ip + ':' + str(self.port) + '/' + '\033[0m', True) Utility.print_msg ('.') def start(self): self.print_status() self.http_server.listen( self.port ) tornado.ioloop.IOLoop.instance().start() def stop(self): msg = 'shutting down %s in %s seconds'%(self.name, MAX_WAIT_SECONDS_BEFORE_SHUTDOWN) Utility.print_msg ('\033[93m'+ msg + '\033[0m', True) io_loop = tornado.ioloop.IOLoop.instance() deadline = time.time() + MAX_WAIT_SECONDS_BEFORE_SHUTDOWN def stop_loop(): now = time.time() if now < deadline and (io_loop._callbacks or io_loop._timeouts): io_loop.add_timeout(now + 1, stop_loop) else: io_loop.stop() Utility.print_msg ('\033[93m'+ 'shutdown' + '\033[0m', True, 'done') stop_loop() def sig_handler(sig, frame): msg = 'caught interrupt signal: %s'%sig Utility.print_msg ('\033[93m'+ msg + '\033[0m', True) tornado.ioloop.IOLoop.instance().add_callback(shutdown) def shutdown(): server.stop() def main(): global server signal.signal(signal.SIGTERM, sig_handler) signal.signal(signal.SIGINT, sig_handler) port = 8888 name = 'icon webserver' server = Server(name, port) server.start() if __name__ == "__main__": main()<|fim▁end|>
def print_status(self): Utility.print_msg ('.')
<|file_name|>app.module.js<|end_file_name|><|fim▁begin|>"use strict"; var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) { var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d; if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc); else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r; return c > 3 && r && Object.defineProperty(target, key, r), r; }; var __metadata = (this && this.__metadata) || function (k, v) { if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v); }; var core_1 = require('@angular/core'); var platform_browser_1 = require('@angular/platform-browser'); var forms_1 = require('@angular/forms'); var app_component_1 = require('./app.component'); var people_service_1 = require('./people.service'); var people_list_component_1 = require('./people-list.component'); var person_details_component_1 = require('./person-details.component'); var person_component_1 = require('./+person/person.component'); var http_1 = require('@angular/http'); var angular_datatables_module_1 = require('./shared/modules/datatables/angular-datatables/angular-datatables.module'); var app_routes_1 = require('./app.routes'); var AppModule = (function () { function AppModule() { }<|fim▁hole|> bootstrap: [app_component_1.AppComponent], providers: [people_service_1.PeopleService] }), __metadata('design:paramtypes', []) ], AppModule); return AppModule; }()); exports.AppModule = AppModule; //# sourceMappingURL=app.module.js.map<|fim▁end|>
AppModule = __decorate([ core_1.NgModule({ imports: [platform_browser_1.BrowserModule, app_routes_1.routing, forms_1.FormsModule, http_1.HttpModule, angular_datatables_module_1.DataTablesModule], declarations: [app_component_1.AppComponent, people_list_component_1.PeopleListComponent, person_details_component_1.PersonDetailsComponent, person_component_1.PersonComponent],
<|file_name|>issue-18501.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Test that we don't ICE when inlining a function from another // crate that uses a trait method as a value due to incorrectly // translating the def ID of the trait during AST decoding. <|fim▁hole|>// aux-build:issue-18501.rs // pretty-expanded FIXME #23616 extern crate issue_18501 as issue; fn main() { issue::pass_method(); }<|fim▁end|>
<|file_name|>label_break_value_illegal_uses.rs<|end_file_name|><|fim▁begin|>// Copyright 2018 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![feature(label_break_value)] <|fim▁hole|>} fn labeled_if() { if true 'b: {} //~ ERROR expected `{`, found `'b` } fn labeled_else() { if true {} else 'b: {} //~ ERROR expected `{`, found `'b` } fn labeled_match() { match false 'b: {} //~ ERROR expected one of `.`, `?`, `{`, or an operator } pub fn main() {}<|fim▁end|>
// These are forbidden occurrences of label-break-value fn labeled_unsafe() { unsafe 'b: {} //~ ERROR expected one of `extern`, `fn`, or `{`
<|file_name|>strutil.cc<|end_file_name|><|fim▁begin|>// Copyright 2015 Google Inc. All rights reserved // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // +build ignore #include "strutil.h" #include <ctype.h> #include <limits.h> #include <unistd.h> #include <algorithm> #include <stack> #include <utility> #include "log.h" WordScanner::Iterator& WordScanner::Iterator::operator++() { int len = static_cast<int>(in->size()); for (s = i; s < len; s++) { if (!isspace((*in)[s])) break; } if (s == len) { in = NULL; s = 0; i = 0; return *this; } for (i = s; i < len; i++) { if (isspace((*in)[i])) break; } return *this; } StringPiece WordScanner::Iterator::operator*() const { return in->substr(s, i - s); } WordScanner::WordScanner(StringPiece in) : in_(in) { } WordScanner::Iterator WordScanner::begin() const { Iterator iter; iter.in = &in_; iter.s = 0; iter.i = 0; ++iter; return iter; } WordScanner::Iterator WordScanner::end() const { Iterator iter; iter.in = NULL; iter.s = 0; iter.i = 0; return iter; } void WordScanner::Split(vector<StringPiece>* o) { for (StringPiece t : *this) o->push_back(t); } WordWriter::WordWriter(string* o) : out_(o), needs_space_(false) { } void WordWriter::MaybeAddWhitespace() { if (needs_space_) { out_->push_back(' '); } else { needs_space_ = true; } } void WordWriter::Write(StringPiece s) { MaybeAddWhitespace(); AppendString(s, out_); } ScopedTerminator::ScopedTerminator(StringPiece s) : s_(s), c_(s[s.size()]) { const_cast<char*>(s_.data())[s_.size()] = '\0'; } ScopedTerminator::~ScopedTerminator() { const_cast<char*>(s_.data())[s_.size()] = c_; } void AppendString(StringPiece str, string* out) { out->append(str.begin(), str.end()); } bool HasPrefix(StringPiece str, StringPiece prefix) { ssize_t size_diff = str.size() - prefix.size(); return size_diff >= 0 && str.substr(0, prefix.size()) == prefix; } bool HasSuffix(StringPiece str, StringPiece suffix) { ssize_t size_diff = str.size() - suffix.size(); return size_diff >= 0 && str.substr(size_diff) == suffix; } bool HasWord(StringPiece str, StringPiece w) { size_t found = str.find(w); if (found == string::npos) return false; if (found != 0 && !isspace(str[found-1])) return false; size_t end = found + w.size(); if (end != str.size() && !isspace(str[end])) return false; return true; } StringPiece TrimSuffix(StringPiece str, StringPiece suffix) { ssize_t size_diff = str.size() - suffix.size(); if (size_diff < 0 || str.substr(size_diff) != suffix) return str; return str.substr(0, size_diff); } Pattern::Pattern(StringPiece pat) : pat_(pat), percent_index_(pat.find('%')) { } bool Pattern::Match(StringPiece str) const { if (percent_index_ == string::npos) return str == pat_; return MatchImpl(str); } bool Pattern::MatchImpl(StringPiece str) const { return (HasPrefix(str, pat_.substr(0, percent_index_)) && HasSuffix(str, pat_.substr(percent_index_ + 1))); } StringPiece Pattern::Stem(StringPiece str) const { if (!Match(str)) return ""; return str.substr(percent_index_, str.size() - (pat_.size() - percent_index_ - 1)); } void Pattern::AppendSubst(StringPiece str, StringPiece subst, string* out) const { if (percent_index_ == string::npos) { if (str == pat_) { AppendString(subst, out); return; } else { AppendString(str, out); return; } } if (MatchImpl(str)) { size_t subst_percent_index = subst.find('%'); if (subst_percent_index == string::npos) { AppendString(subst, out); return; } else { AppendString(subst.substr(0, subst_percent_index), out); AppendString(str.substr(percent_index_, str.size() - pat_.size() + 1), out); AppendString(subst.substr(subst_percent_index + 1), out); return; } } AppendString(str, out); } void Pattern::AppendSubstRef(StringPiece str, StringPiece subst, string* out) const { if (percent_index_ != string::npos && subst.find('%') != string::npos) { AppendSubst(str, subst, out); return; } StringPiece s = TrimSuffix(str, pat_); out->append(s.begin(), s.end()); out->append(subst.begin(), subst.end()); } string NoLineBreak(const string& s) { size_t index = s.find('\n'); if (index == string::npos) return s; string r = s; while (index != string::npos) { r = r.substr(0, index) + "\\n" + r.substr(index + 1); index = r.find('\n', index + 2); } return r; } StringPiece TrimLeftSpace(StringPiece s) { size_t i = 0; for (; i < s.size(); i++) { if (isspace(s[i])) continue; char n = s.get(i+1); if (s[i] == '\\' && (n == '\r' || n == '\n')) { i++; continue; } break; } return s.substr(i, s.size() - i); } StringPiece TrimRightSpace(StringPiece s) { size_t i = 0; for (; i < s.size(); i++) { char c = s[s.size() - 1 - i]; if (isspace(c)) { if ((c == '\r' || c == '\n') && s.get(s.size() - 2 - i) == '\\') i++; continue; } break; } return s.substr(0, s.size() - i); } StringPiece TrimSpace(StringPiece s) { return TrimRightSpace(TrimLeftSpace(s)); } StringPiece Dirname(StringPiece s) { size_t found = s.rfind('/'); if (found == string::npos) return StringPiece("."); if (found == 0) return StringPiece(""); return s.substr(0, found); } StringPiece Basename(StringPiece s) { size_t found = s.rfind('/'); if (found == string::npos || found == 0) return s; return s.substr(found + 1); } StringPiece GetExt(StringPiece s) { size_t found = s.rfind('.'); if (found == string::npos) return StringPiece(""); return s.substr(found); } StringPiece StripExt(StringPiece s) { size_t slash_index = s.rfind('/'); size_t found = s.rfind('.'); if (found == string::npos || (slash_index != string::npos && found < slash_index)) return s; return s.substr(0, found); } void NormalizePath(string* o) { if (o->empty()) return; size_t start_index = 0; if ((*o)[0] == '/') start_index++; size_t j = start_index; size_t prev_start = start_index; for (size_t i = start_index; i <= o->size(); i++) { char c = (*o)[i]; if (c != '/' && c != 0) { (*o)[j] = c; j++; continue; } StringPiece prev_dir = StringPiece(o->data() + prev_start, j - prev_start); if (prev_dir == ".") { j--; } else if (prev_dir == ".." && j != 2 /* .. */) { if (j == 3) { // /.. j = start_index; } else { size_t orig_j = j; j -= 4; j = o->rfind('/', j); if (j == string::npos) { j = start_index; } else { j++; } if (StringPiece(o->data() + j, 3) == "../") { j = orig_j; (*o)[j] = c; j++; } } } else if (!prev_dir.empty()) { if (c) { (*o)[j] = c; j++; } } prev_start = j; } if (j > 1 && (*o)[j-1] == '/') j--; o->resize(j); } void AbsPath(StringPiece s, string* o) { if (s.get(0) == '/') { o->clear(); } else { char buf[PATH_MAX]; if (!getcwd(buf, PATH_MAX)) { fprintf(stderr, "getcwd failed\n"); CHECK(false); } CHECK(buf[0] == '/'); *o = buf; *o += '/'; } AppendString(s, o); NormalizePath(o); } template<typename Cond> size_t FindOutsideParenImpl(StringPiece s, Cond cond) { bool prev_backslash = false; stack<char> paren_stack; for (size_t i = 0; i < s.size(); i++) { char c = s[i]; if (cond(c) && paren_stack.empty() && !prev_backslash) { return i; } switch (c) { case '(': paren_stack.push(')'); break; case '{': paren_stack.push('}'); break; case ')': case '}': if (!paren_stack.empty() && c == paren_stack.top()) { paren_stack.pop(); } break; } prev_backslash = c == '\\' && !prev_backslash; } return string::npos; } size_t FindOutsideParen(StringPiece s, char c) { return FindOutsideParenImpl(s, [&c](char d){return c == d;}); } size_t FindTwoOutsideParen(StringPiece s, char c1, char c2) { return FindOutsideParenImpl(s, [&c1, &c2](char d){ return d == c1 || d == c2; }); } size_t FindThreeOutsideParen(StringPiece s, char c1, char c2, char c3) { return FindOutsideParenImpl(s, [&c1, &c2, &c3](char d){ return d == c1 || d == c2 || d == c3; }); } size_t FindEndOfLine(StringPiece s, size_t e, size_t* lf_cnt) { bool prev_backslash = false; for (; e < s.size(); e++) { char c = s[e]; if (c == '\\') { prev_backslash = !prev_backslash; } else if (c == '\n') { ++*lf_cnt; if (!prev_backslash) { return e; } prev_backslash = false; } else if (c != '\r') { prev_backslash = false; } } return e; } StringPiece TrimLeadingCurdir(StringPiece s) { while (s.substr(0, 2) == "./") s = s.substr(2); return s; } void FormatForCommandSubstitution(string* s) { while ((*s)[s->size()-1] == '\n') s->pop_back(); for (size_t i = 0; i < s->size(); i++) { if ((*s)[i] == '\n') (*s)[i] = ' '; } } string SortWordsInString(StringPiece s) { vector<string> toks; for (StringPiece tok : WordScanner(s)) { toks.push_back(tok.as_string()); } sort(toks.begin(), toks.end()); return JoinStrings(toks, " "); } string ConcatDir(StringPiece b, StringPiece n) { string r; if (!b.empty()) { b.AppendToString(&r); r += '/'; } n.AppendToString(&r); NormalizePath(&r); return r; } string EchoEscape(const string str) { const char *in = str.c_str(); string buf; for (; *in; in++) { switch(*in) { case '\\': buf += "\\\\\\\\"; break; case '\n': buf += "\\n"; break; case '"': buf += "\\\""; break; default: buf += *in; } }<|fim▁hole|><|fim▁end|>
return buf; }
<|file_name|>Accessor.js<|end_file_name|><|fim▁begin|>export default class ModelAccessor { constructor() { this.value = 10<|fim▁hole|> get highCount() { return this.value + 100 } set highCount(v) { this.value = v - 100 } get doubleHigh() { return this.highCount * 2 } incr() { this.value++ } }<|fim▁end|>
}
<|file_name|>xc_path8.py<|end_file_name|><|fim▁begin|>import zstackwoodpecker.test_state as ts_header import os TestAction = ts_header.TestAction def path(): return dict(initial_formation="template5", checking_point=8, path_list=[ [TestAction.create_vm, 'vm1', 'flag=ceph'], [TestAction.create_volume, 'volume1', 'flag=ceph,scsi'], [TestAction.attach_volume, 'vm1', 'volume1'], [TestAction.create_volume, 'volume2', 'flag=ceph,scsi'], [TestAction.attach_volume, 'vm1', 'volume2'], [TestAction.create_volume, 'volume3', 'flag=ceph,scsi'], [TestAction.attach_volume, 'vm1', 'volume3'], [TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot1'], [TestAction.clone_vm, 'vm1', 'vm2'], [TestAction.create_volume_backup, 'volume2', 'volume2-backup1'], [TestAction.stop_vm, 'vm1'], [TestAction.use_volume_backup, 'volume2-backup1'], [TestAction.start_vm, 'vm1'], [TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot5'], [TestAction.delete_vm_snapshot, 'vm1-snapshot1'], [TestAction.create_vm_snapshot, 'vm2', 'vm2-snapshot9'], [TestAction.clone_vm, 'vm1', 'vm3', 'full'], [TestAction.delete_volume_snapshot, 'vm1-snapshot5'], [TestAction.stop_vm, 'vm2'], [TestAction.change_vm_image, 'vm2'], [TestAction.delete_vm_snapshot, 'vm2-snapshot9'], ]) ''' The final status: Running:['vm1', 'vm3'] Stopped:['vm2'] Enadbled:['volume1-snapshot5', 'volume2-snapshot5', 'volume3-snapshot5', 'volume2-backup1']<|fim▁hole|>Expunged:[] Ha:[] Group: '''<|fim▁end|>
attached:['volume1', 'volume2', 'volume3', 'clone@volume1', 'clone@volume2', 'clone@volume3'] Detached:[] Deleted:['vm1-snapshot1', 'volume1-snapshot1', 'volume2-snapshot1', 'volume3-snapshot1', 'vm1-snapshot5', 'vm2-snapshot9']
<|file_name|>test_execute.py<|end_file_name|><|fim▁begin|>import os import io import shutil import tempfile import unittest from functools import partial from pathlib import Path from nbformat import validate try: from unittest.mock import patch except ImportError: from mock import patch from .. import engines from ..log import logger from ..iorw import load_notebook_node from ..utils import chdir from ..execute import execute_notebook from ..exceptions import PapermillExecutionError from . import get_notebook_path, kernel_name execute_notebook = partial(execute_notebook, kernel_name=kernel_name) class TestNotebookHelpers(unittest.TestCase): def setUp(self): self.test_dir = tempfile.mkdtemp() self.notebook_name = 'simple_execute.ipynb' self.notebook_path = get_notebook_path(self.notebook_name) self.nb_test_executed_fname = os.path.join( self.test_dir, 'output_{}'.format(self.notebook_name) ) def tearDown(self): shutil.rmtree(self.test_dir) @patch(engines.__name__ + '.PapermillNotebookClient') def test_start_timeout(self, preproc_mock): execute_notebook(self.notebook_path, self.nb_test_executed_fname, start_timeout=123) args, kwargs = preproc_mock.call_args expected = [ ('timeout', None), ('startup_timeout', 123), ('kernel_name', kernel_name), ('log', logger), ] actual = set([(key, kwargs[key]) for key in kwargs]) self.assertTrue( set(expected).issubset(actual), msg='Expected arguments {} are not a subset of actual {}'.format(expected, actual), ) @patch(engines.__name__ + '.PapermillNotebookClient') def test_default_start_timeout(self, preproc_mock): execute_notebook(self.notebook_path, self.nb_test_executed_fname) args, kwargs = preproc_mock.call_args expected = [ ('timeout', None), ('startup_timeout', 60), ('kernel_name', kernel_name), ('log', logger), ] actual = set([(key, kwargs[key]) for key in kwargs]) self.assertTrue( set(expected).issubset(actual), msg='Expected arguments {} are not a subset of actual {}'.format(expected, actual), ) def test_cell_insertion(self): execute_notebook(self.notebook_path, self.nb_test_executed_fname, {'msg': 'Hello'}) test_nb = load_notebook_node(self.nb_test_executed_fname) self.assertListEqual( test_nb.cells[1].get('source').split('\n'), ['# Parameters', 'msg = "Hello"', ''] ) self.assertEqual(test_nb.metadata.papermill.parameters, {'msg': 'Hello'}) def test_no_tags(self): notebook_name = 'no_parameters.ipynb' nb_test_executed_fname = os.path.join(self.test_dir, 'output_{}'.format(notebook_name)) execute_notebook(get_notebook_path(notebook_name), nb_test_executed_fname, {'msg': 'Hello'}) test_nb = load_notebook_node(nb_test_executed_fname) self.assertListEqual( test_nb.cells[0].get('source').split('\n'), ['# Parameters', 'msg = "Hello"', ''] ) self.assertEqual(test_nb.metadata.papermill.parameters, {'msg': 'Hello'}) def test_quoted_params(self): execute_notebook(self.notebook_path, self.nb_test_executed_fname, {'msg': '"Hello"'}) test_nb = load_notebook_node(self.nb_test_executed_fname) self.assertListEqual( test_nb.cells[1].get('source').split('\n'), ['# Parameters', r'msg = "\"Hello\""', ''] ) self.assertEqual(test_nb.metadata.papermill.parameters, {'msg': '"Hello"'}) def test_backslash_params(self): execute_notebook( self.notebook_path, self.nb_test_executed_fname, {'foo': r'do\ not\ crash'} ) test_nb = load_notebook_node(self.nb_test_executed_fname) self.assertListEqual( test_nb.cells[1].get('source').split('\n'), ['# Parameters', r'foo = "do\\ not\\ crash"', ''], ) self.assertEqual(test_nb.metadata.papermill.parameters, {'foo': r'do\ not\ crash'}) def test_backslash_quote_params(self): execute_notebook(self.notebook_path, self.nb_test_executed_fname, {'foo': r'bar=\"baz\"'}) test_nb = load_notebook_node(self.nb_test_executed_fname) self.assertListEqual( test_nb.cells[1].get('source').split('\n'), ['# Parameters', r'foo = "bar=\\\"baz\\\""', ''], ) self.assertEqual(test_nb.metadata.papermill.parameters, {'foo': r'bar=\"baz\"'}) def test_double_backslash_quote_params(self): execute_notebook(self.notebook_path, self.nb_test_executed_fname, {'foo': r'\\"bar\\"'}) test_nb = load_notebook_node(self.nb_test_executed_fname) self.assertListEqual( test_nb.cells[1].get('source').split('\n'), ['# Parameters', r'foo = "\\\\\"bar\\\\\""', ''], ) self.assertEqual(test_nb.metadata.papermill.parameters, {'foo': r'\\"bar\\"'}) def test_prepare_only(self): for example in ['broken1.ipynb', 'keyboard_interrupt.ipynb']: path = get_notebook_path(example) result_path = os.path.join(self.test_dir, example) # Should not raise as we don't execute the notebook at all execute_notebook(path, result_path, {'foo': r'do\ not\ crash'}, prepare_only=True) nb = load_notebook_node(result_path) self.assertEqual(nb.cells[0].cell_type, "code") self.assertEqual( nb.cells[0].get('source').split('\n'), ['# Parameters', r'foo = "do\\ not\\ crash"', ''], ) <|fim▁hole|> def tearDown(self): shutil.rmtree(self.test_dir) def test(self): path = get_notebook_path('broken1.ipynb') # check that the notebook has two existing marker cells, so that this test is sure to be # validating the removal logic (the markers are simulatin an error in the first code cell # that has since been fixed) original_nb = load_notebook_node(path) self.assertEqual(original_nb.cells[0].metadata["tags"], ["papermill-error-cell-tag"]) self.assertIn("In [1]", original_nb.cells[0].source) self.assertEqual(original_nb.cells[2].metadata["tags"], ["papermill-error-cell-tag"]) result_path = os.path.join(self.test_dir, 'broken1.ipynb') with self.assertRaises(PapermillExecutionError): execute_notebook(path, result_path) nb = load_notebook_node(result_path) self.assertEqual(nb.cells[0].cell_type, "markdown") self.assertRegex( nb.cells[0].source, r'^<span .*<a href="#papermill-error-cell".*In \[2\].*</span>$' ) self.assertEqual(nb.cells[0].metadata["tags"], ["papermill-error-cell-tag"]) self.assertEqual(nb.cells[1].cell_type, "markdown") self.assertEqual(nb.cells[2].execution_count, 1) self.assertEqual(nb.cells[3].cell_type, "markdown") self.assertEqual(nb.cells[4].cell_type, "markdown") self.assertEqual(nb.cells[5].cell_type, "markdown") self.assertRegex(nb.cells[5].source, '<span id="papermill-error-cell" .*</span>') self.assertEqual(nb.cells[5].metadata["tags"], ["papermill-error-cell-tag"]) self.assertEqual(nb.cells[6].execution_count, 2) self.assertEqual(nb.cells[6].outputs[0].output_type, 'error') self.assertEqual(nb.cells[7].execution_count, None) # double check the removal (the new cells above should be the only two tagged ones) self.assertEqual( sum("papermill-error-cell-tag" in cell.metadata.get("tags", []) for cell in nb.cells), 2 ) class TestBrokenNotebook2(unittest.TestCase): def setUp(self): self.test_dir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.test_dir) def test(self): path = get_notebook_path('broken2.ipynb') result_path = os.path.join(self.test_dir, 'broken2.ipynb') with self.assertRaises(PapermillExecutionError): execute_notebook(path, result_path) nb = load_notebook_node(result_path) self.assertEqual(nb.cells[0].cell_type, "markdown") self.assertRegex( nb.cells[0].source, r'^<span .*<a href="#papermill-error-cell">.*In \[2\].*</span>$' ) self.assertEqual(nb.cells[1].execution_count, 1) self.assertEqual(nb.cells[2].cell_type, "markdown") self.assertRegex(nb.cells[2].source, '<span id="papermill-error-cell" .*</span>') self.assertEqual(nb.cells[3].execution_count, 2) self.assertEqual(nb.cells[3].outputs[0].output_type, 'display_data') self.assertEqual(nb.cells[3].outputs[1].output_type, 'error') self.assertEqual(nb.cells[4].execution_count, None) class TestReportMode(unittest.TestCase): def setUp(self): self.test_dir = tempfile.mkdtemp() self.notebook_name = 'report_mode_test.ipynb' self.notebook_path = get_notebook_path(self.notebook_name) self.nb_test_executed_fname = os.path.join( self.test_dir, 'output_{}'.format(self.notebook_name) ) def tearDown(self): shutil.rmtree(self.test_dir) def test_report_mode(self): nb = execute_notebook( self.notebook_path, self.nb_test_executed_fname, {'a': 0}, report_mode=True ) for cell in nb.cells: if cell.cell_type == 'code': self.assertEqual(cell.metadata.get('jupyter', {}).get('source_hidden'), True) class TestCWD(unittest.TestCase): def setUp(self): self.test_dir = tempfile.mkdtemp() self.base_test_dir = tempfile.mkdtemp() self.check_notebook_name = 'read_check.ipynb' self.check_notebook_path = os.path.join(self.base_test_dir, 'read_check.ipynb') # Setup read paths so base_test_dir has check_notebook_name shutil.copyfile(get_notebook_path(self.check_notebook_name), self.check_notebook_path) with io.open(os.path.join(self.test_dir, 'check.txt'), 'w', encoding='utf-8') as f: # Needed for read_check to pass f.write(u'exists') self.simple_notebook_name = 'simple_execute.ipynb' self.simple_notebook_path = os.path.join(self.base_test_dir, 'simple_execute.ipynb') # Setup read paths so base_test_dir has simple_notebook_name shutil.copyfile(get_notebook_path(self.simple_notebook_name), self.simple_notebook_path) self.nb_test_executed_fname = 'test_output.ipynb' def tearDown(self): shutil.rmtree(self.test_dir) shutil.rmtree(self.base_test_dir) def test_local_save_ignores_cwd_assignment(self): with chdir(self.base_test_dir): # Both paths are relative execute_notebook( self.simple_notebook_name, self.nb_test_executed_fname, cwd=self.test_dir ) self.assertTrue( os.path.isfile(os.path.join(self.base_test_dir, self.nb_test_executed_fname)) ) def test_execution_respects_cwd_assignment(self): with chdir(self.base_test_dir): # Both paths are relative execute_notebook( self.check_notebook_name, self.nb_test_executed_fname, cwd=self.test_dir ) self.assertTrue( os.path.isfile(os.path.join(self.base_test_dir, self.nb_test_executed_fname)) ) def test_pathlib_paths(self): # Copy of test_execution_respects_cwd_assignment but with `Path`s with chdir(self.base_test_dir): execute_notebook( Path(self.check_notebook_name), Path(self.nb_test_executed_fname), cwd=Path(self.test_dir), ) self.assertTrue(Path(self.base_test_dir).joinpath(self.nb_test_executed_fname).exists()) class TestSysExit(unittest.TestCase): def setUp(self): self.test_dir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.test_dir) def test_sys_exit(self): notebook_name = 'sysexit.ipynb' result_path = os.path.join(self.test_dir, 'output_{}'.format(notebook_name)) execute_notebook(get_notebook_path(notebook_name), result_path) nb = load_notebook_node(result_path) self.assertEqual(nb.cells[0].cell_type, "code") self.assertEqual(nb.cells[0].execution_count, 1) self.assertEqual(nb.cells[1].execution_count, 2) self.assertEqual(nb.cells[1].outputs[0].output_type, 'error') self.assertEqual(nb.cells[1].outputs[0].ename, 'SystemExit') self.assertEqual(nb.cells[1].outputs[0].evalue, '') self.assertEqual(nb.cells[2].execution_count, None) def test_sys_exit0(self): notebook_name = 'sysexit0.ipynb' result_path = os.path.join(self.test_dir, 'output_{}'.format(notebook_name)) execute_notebook(get_notebook_path(notebook_name), result_path) nb = load_notebook_node(result_path) self.assertEqual(nb.cells[0].cell_type, "code") self.assertEqual(nb.cells[0].execution_count, 1) self.assertEqual(nb.cells[1].execution_count, 2) self.assertEqual(nb.cells[1].outputs[0].output_type, 'error') self.assertEqual(nb.cells[1].outputs[0].ename, 'SystemExit') self.assertEqual(nb.cells[1].outputs[0].evalue, '0') self.assertEqual(nb.cells[2].execution_count, None) def test_sys_exit1(self): notebook_name = 'sysexit1.ipynb' result_path = os.path.join(self.test_dir, 'output_{}'.format(notebook_name)) with self.assertRaises(PapermillExecutionError): execute_notebook(get_notebook_path(notebook_name), result_path) nb = load_notebook_node(result_path) self.assertEqual(nb.cells[0].cell_type, "markdown") self.assertRegex( nb.cells[0].source, r'^<span .*<a href="#papermill-error-cell".*In \[2\].*</span>$' ) self.assertEqual(nb.cells[1].execution_count, 1) self.assertEqual(nb.cells[2].cell_type, "markdown") self.assertRegex(nb.cells[2].source, '<span id="papermill-error-cell" .*</span>') self.assertEqual(nb.cells[3].execution_count, 2) self.assertEqual(nb.cells[3].outputs[0].output_type, 'error') self.assertEqual(nb.cells[4].execution_count, None) def test_system_exit(self): notebook_name = 'systemexit.ipynb' result_path = os.path.join(self.test_dir, 'output_{}'.format(notebook_name)) execute_notebook(get_notebook_path(notebook_name), result_path) nb = load_notebook_node(result_path) self.assertEqual(nb.cells[0].cell_type, "code") self.assertEqual(nb.cells[0].execution_count, 1) self.assertEqual(nb.cells[1].execution_count, 2) self.assertEqual(nb.cells[1].outputs[0].output_type, 'error') self.assertEqual(nb.cells[1].outputs[0].ename, 'SystemExit') self.assertEqual(nb.cells[1].outputs[0].evalue, '') self.assertEqual(nb.cells[2].execution_count, None) class TestNotebookValidation(unittest.TestCase): def setUp(self): self.test_dir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.test_dir) def test_from_version_4_4_upgrades(self): notebook_name = 'nb_version_4.4.ipynb' result_path = os.path.join(self.test_dir, 'output_{}'.format(notebook_name)) execute_notebook(get_notebook_path(notebook_name), result_path, {'var': 'It works'}) nb = load_notebook_node(result_path) validate(nb) class TestMinimalNotebook(unittest.TestCase): def setUp(self): self.test_dir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.test_dir) def test_no_v3_language_backport(self): notebook_name = 'blank-vscode.ipynb' result_path = os.path.join(self.test_dir, 'output_{}'.format(notebook_name)) execute_notebook(get_notebook_path(notebook_name), result_path, {'var': 'It works'}) nb = load_notebook_node(result_path) validate(nb)<|fim▁end|>
class TestBrokenNotebook1(unittest.TestCase): def setUp(self): self.test_dir = tempfile.mkdtemp()
<|file_name|>index.ts<|end_file_name|><|fim▁begin|><|fim▁hole|>export * from './home';<|fim▁end|>
export * from './about'; export * from './no-content';
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>extern crate libc; use std::os::raw; use std::ffi::CString; use std::mem::{transmute, size_of}; use std::default::Default; #[link(name = "kleeRuntest")] extern {<|fim▁hole|> fn klee_make_symbolic(data: *mut raw::c_void, length: libc::size_t, name: *const raw::c_char); fn klee_set_forking(state: bool); } pub unsafe fn any(data: *mut raw::c_void, length: usize, name: &str) { klee_make_symbolic(data, length as libc::size_t, CString::new(name).unwrap().as_ptr()); } pub fn set_forking(state: bool) { unsafe { klee_set_forking(state); } } pub fn some<T: Default>(name: &str) -> T { let mut new_symbol = T::default(); symbol(&mut new_symbol, name); return new_symbol; } pub fn symbol<T>(data: *mut T, name: &str) { unsafe{ any(transmute(data), size_of::<T>(), name); } }<|fim▁end|>
<|file_name|>choice_30.test.py<|end_file_name|><|fim▁begin|><|fim▁hole|>""" output = """ """<|fim▁end|>
input = """ :- not b. b :- a, not a. a v c.
<|file_name|>json.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # # This file is part of Zenodo. # Copyright (C) 2016 CERN. # # Zenodo is free software; you can redistribute it # and/or modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Zenodo is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Zenodo; if not, write to the # Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, # MA 02111-1307, USA. # # In applying this license, CERN does not # waive the privileges and immunities granted to it by virtue of its status # as an Intergovernmental Organization or submit itself to any jurisdiction. """Zenodo JSON schema.""" from __future__ import absolute_import, print_function, unicode_literals from flask_babelex import lazy_gettext as _ from invenio_pidrelations.serializers.utils import serialize_relations from invenio_pidstore.models import PersistentIdentifier from marshmallow import Schema, ValidationError, fields, missing, \ validates_schema from werkzeug.routing import BuildError from zenodo.modules.records.utils import is_deposit from zenodo.modules.stats.utils import get_record_stats from ...models import AccessRight, ObjectType from . import common class StrictKeysSchema(Schema): """Ensure only valid keys exists.""" @validates_schema(pass_original=True) def check_unknown_fields(self, data, original_data): """Check for unknown keys.""" for key in original_data: if key not in self.fields: raise ValidationError('Unknown field name {}'.format(key)) class ResourceTypeSchema(StrictKeysSchema): """Resource type schema.""" type = fields.Str( required=True, error_messages=dict( required=_('Type must be specified.') ), ) subtype = fields.Str() openaire_subtype = fields.Str() title = fields.Method('get_title', dump_only=True) def get_title(self, obj): """Get title.""" obj = ObjectType.get_by_dict(obj) return obj['title']['en'] if obj else missing @validates_schema def validate_data(self, data): """Validate resource type.""" obj = ObjectType.get_by_dict(data) if obj is None: raise ValidationError(_('Invalid resource type.')) def dump_openaire_type(self, obj): """Get OpenAIRE subtype.""" acc = obj.get('access_right') if acc: return AccessRight.as_category(acc) return missing class JournalSchemaV1(StrictKeysSchema): """Schema for a journal.""" issue = fields.Str() pages = fields.Str() title = fields.Str() volume = fields.Str() year = fields.Str() class MeetingSchemaV1(StrictKeysSchema): """Schema for a meeting.""" title = fields.Str() acronym = fields.Str() dates = fields.Str() place = fields.Str() url = fields.Str() session = fields.Str() session_part = fields.Str() class ImprintSchemaV1(StrictKeysSchema): """Schema for imprint.""" publisher = fields.Str() place = fields.Str() isbn = fields.Str() class PartOfSchemaV1(StrictKeysSchema): """Schema for imprint.""" pages = fields.Str() title = fields.Str() class ThesisSchemaV1(StrictKeysSchema): """Schema for thesis.""" university = fields.Str() supervisors = fields.Nested(common.PersonSchemaV1, many=True) class FunderSchemaV1(StrictKeysSchema): """Schema for a funder.""" doi = fields.Str() name = fields.Str(dump_only=True) acronyms = fields.List(fields.Str(), dump_only=True) links = fields.Method('get_funder_url', dump_only=True) def get_funder_url(self, obj): """Get grant url.""" return dict(self=common.api_link_for('funder', id=obj['doi'])) class GrantSchemaV1(StrictKeysSchema): """Schema for a grant.""" title = fields.Str(dump_only=True) code = fields.Str() program = fields.Str(dump_only=True) acronym = fields.Str(dump_only=True) funder = fields.Nested(FunderSchemaV1) links = fields.Method('get_grant_url', dump_only=True) def get_grant_url(self, obj): """Get grant url.""" return dict(self=common.api_link_for('grant', id=obj['internal_id'])) class CommunitiesSchemaV1(StrictKeysSchema): """Schema for communities.""" id = fields.Function(lambda x: x) class ActionSchemaV1(StrictKeysSchema): """Schema for a actions.""" prereserve_doi = fields.Str(load_only=True) class FilesSchema(Schema): """Files metadata schema.""" type = fields.String() checksum = fields.String() size = fields.Integer() bucket = fields.String() key = fields.String() links = fields.Method('get_links') def get_links(self, obj): """Get links.""" return { 'self': common.api_link_for( 'object', bucket=obj['bucket'], key=obj['key']) } class OwnerSchema(StrictKeysSchema): """Schema for owners. Allows us to later introduce more properties for an owner. """ id = fields.Function(lambda x: x) class LicenseSchemaV1(StrictKeysSchema): """Schema for license. Allows us to later introduce more properties for an owner. """ id = fields.Str(attribute='id') class MetadataSchemaV1(common.CommonMetadataSchemaV1): """Schema for a record.""" resource_type = fields.Nested(ResourceTypeSchema) access_right_category = fields.Method( 'dump_access_right_category', dump_only=True) license = fields.Nested(LicenseSchemaV1) communities = fields.Nested(CommunitiesSchemaV1, many=True) grants = fields.Nested(GrantSchemaV1, many=True) journal = fields.Nested(JournalSchemaV1) meeting = fields.Nested(MeetingSchemaV1) imprint = fields.Nested(ImprintSchemaV1) part_of = fields.Nested(PartOfSchemaV1) thesis = fields.Nested(ThesisSchemaV1) relations = fields.Method('dump_relations') def dump_access_right_category(self, obj): """Get access right category.""" acc = obj.get('access_right') if acc: return AccessRight.as_category(acc) return missing def dump_relations(self, obj): """Dump the relations to a dictionary.""" if 'relations' in obj: return obj['relations'] if is_deposit(obj): pid = self.context['pid'] return serialize_relations(pid) else: pid = self.context['pid'] return serialize_relations(pid) <|fim▁hole|> files = fields.Nested( FilesSchema, many=True, dump_only=True, attribute='files') metadata = fields.Nested(MetadataSchemaV1) owners = fields.List( fields.Integer, attribute='metadata.owners', dump_only=True) revision = fields.Integer(dump_only=True) updated = fields.Str(dump_only=True) stats = fields.Method('dump_stats') def dump_stats(self, obj): """Dump the stats to a dictionary.""" if '_stats' in obj.get('metadata', {}): return obj['metadata'].get('_stats', {}) else: pid = self.context.get('pid') if isinstance(pid, PersistentIdentifier): return get_record_stats(pid.object_uuid, False) else: return None class DepositSchemaV1(RecordSchemaV1): """Deposit schema. Same as the Record schema except for some few extra additions. """ files = None owners = fields.Nested( OwnerSchema, dump_only=True, attribute='metadata._deposit.owners', many=True) status = fields.Str(dump_only=True, attribute='metadata._deposit.status') recid = fields.Str(dump_only=True, attribute='metadata.recid')<|fim▁end|>
class RecordSchemaV1(common.CommonRecordSchemaV1): """Schema for records v1 in JSON."""
<|file_name|>admin.py<|end_file_name|><|fim▁begin|># ----------------------------------------------------------------------------- # Karajlug.org # Copyright (C) 2010 Karajlug community # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # ----------------------------------------------------------------------------- from django.contrib import admin from django.utils.translation import ugettext as _ from .models import Project, Repository class ProjectAdmin(admin.ModelAdmin): """ Admin interface class for project model """ list_display = ("__unicode__", "version", "home", "license", "vcs", "creator", "weight") ordering = ("weight", ) list_editable = ("home", "weight")<|fim▁hole|> list_filter = ("creator", ) def save_model(self, request, obj, form, change): obj.creator = request.user obj.save() class RepositoryAdmin(admin.ModelAdmin): """ Admin interface class for repository model """ list_display = ("project", "address", "weight") list_editable = ("address", "weight") ordering = ("weight", ) search_fields = ("project", ) list_filter = ("project", ) admin.site.register(Project, ProjectAdmin) admin.site.register(Repository, RepositoryAdmin)<|fim▁end|>
search_fields = ("name", ) prepopulated_fields = {"slug": ("name",)}
<|file_name|>train_classifier.py<|end_file_name|><|fim▁begin|># Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from tflite_runtime.interpreter import load_delegate from tflite_runtime.interpreter import Interpreter import glob import os import subprocess import matplotlib.pyplot as plt import numpy as np <|fim▁hole|>input_size = (224, 224) input_shape = (224, 224, 3) batch_size = 1 ########################################################################################### # Load pretrained model ########################################################################################### base_model = tf.keras.applications.MobileNetV2(input_shape=input_shape, include_top=False, classifier_activation='softmax', weights='imagenet') # Freeze first 100 layers base_model.trainable = True for layer in base_model.layers[:100]: layer.trainable = False model = tf.keras.Sequential([ base_model, tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.GlobalAveragePooling2D(), tf.keras.layers.Dense(units=2, activation='softmax') ]) model.compile(loss='categorical_crossentropy', optimizer=tf.keras.optimizers.RMSprop(lr=1e-5), metrics=['accuracy']) print(model.summary()) ########################################################################################### # Prepare Datasets ########################################################################################### train_datagen = ImageDataGenerator(rescale=1./255, zoom_range=0.3, rotation_range=50, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, horizontal_flip=True, fill_mode='nearest') val_datagen = ImageDataGenerator(rescale=1./255) dataset_path = './dataset' train_set_path = os.path.join(dataset_path, 'train') val_set_path = os.path.join(dataset_path, 'test') batch_size = 64 train_generator = train_datagen.flow_from_directory(train_set_path, target_size=input_size, batch_size=batch_size, class_mode='categorical') val_generator = val_datagen.flow_from_directory(val_set_path, target_size=input_size, batch_size=batch_size, class_mode='categorical') epochs = 15 history = model.fit(train_generator, steps_per_epoch=train_generator.n // batch_size, epochs=epochs, validation_data=val_generator, validation_steps=val_generator.n // batch_size, verbose=1) ########################################################################################### # Plotting Train Data ########################################################################################### acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] loss = history.history['loss'] val_loss = history.history['val_loss'] plt.figure(figsize=(8, 8)) plt.subplot(2, 1, 1) plt.plot(acc, label='Training Accuracy') plt.plot(val_acc, label='Validation Accuracy') plt.legend(loc='lower right') plt.ylabel('Accuracy') plt.ylim([min(plt.ylim()), 1]) plt.title('Training and Validation Accuracy') plt.subplot(2, 1, 2) plt.plot(loss, label='Training Loss') plt.plot(val_loss, label='Validation Loss') plt.legend(loc='upper right') plt.ylabel('Cross Entropy') plt.ylim([0, 1.0]) plt.title('Training and Validation Loss') plt.xlabel('epoch') # plt.show() plt.savefig('history.png') ########################################################################################### # Post Training Quantization ########################################################################################### def representative_data_gen(): dataset_list = tf.data.Dataset.list_files('./dataset/test/*/*') for i in range(100): image = next(iter(dataset_list)) image = tf.io.read_file(image) image = tf.io.decode_jpeg(image, channels=3) image = tf.image.resize(image, input_size) image = tf.cast(image / 255., tf.float32) image = tf.expand_dims(image, 0) yield [image] model.input.set_shape((1,) + model.input.shape[1:]) converter = tf.lite.TFLiteConverter.from_keras_model(model) converter.optimizations = [tf.lite.Optimize.DEFAULT] converter.representative_dataset = representative_data_gen converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] converter.target_spec.supported_types = [tf.int8] converter.inference_input_type = tf.uint8 converter.inference_output_type = tf.uint8 tflite_model = converter.convert() ########################################################################################### # Saving models ########################################################################################### model.save('classifier.h5') with open('classifier.tflite', 'wb') as f: f.write(tflite_model) ########################################################################################### # Evaluating h5 ########################################################################################### batch_images, batch_labels = next(val_generator) labels = '\n'.join(sorted(train_generator.class_indices.keys())) with open('classifier_labels.txt', 'w') as f: f.write(labels) logits = model(batch_images) prediction = np.argmax(logits, axis=1) truth = np.argmax(batch_labels, axis=1) keras_accuracy = tf.keras.metrics.Accuracy() keras_accuracy(prediction, truth) ########################################################################################### # Evaluating tflite ########################################################################################### def set_input_tensor(interpreter, input): input_details = interpreter.get_input_details()[0] tensor_index = input_details['index'] input_tensor = interpreter.tensor(tensor_index)()[0] scale, zero_point = input_details['quantization'] input_tensor[:, :] = np.uint8(input / scale + zero_point) def classify_image(interpreter, input): set_input_tensor(interpreter, input) interpreter.invoke() output_details = interpreter.get_output_details()[0] output = interpreter.get_tensor(output_details['index']) scale, zero_point = output_details['quantization'] output = scale * (output - zero_point) top_1 = np.argmax(output) return top_1 interpreter = tf.lite.Interpreter('classifier.tflite') interpreter.allocate_tensors() # Collect all inference predictions in a list batch_prediction = [] batch_truth = np.argmax(batch_labels, axis=1) for i in range(len(batch_images)): prediction = classify_image(interpreter, batch_images[i]) batch_prediction.append(prediction) # Compare all predictions to the ground truth tflite_accuracy = tf.keras.metrics.Accuracy() tflite_accuracy(batch_prediction, batch_truth) ########################################################################################### # Compiles model ########################################################################################### subprocess.call(["edgetpu_compiler", "--show_operations", "classifier.tflite"]) ########################################################################################### # Evaluating tflite ########################################################################################### interpreter = Interpreter('classifier_edgetpu.tflite', experimental_delegates=[ load_delegate('libedgetpu.so.1.0')]) interpreter.allocate_tensors() # Collect all inference predictions in a list batch_prediction = [] batch_truth = np.argmax(batch_labels, axis=1) for i in range(len(batch_images)): prediction = classify_image(interpreter, batch_images[i]) batch_prediction.append(prediction) # Compare all predictions to the ground truth edgetpu_tflite_accuracy = tf.keras.metrics.Accuracy() edgetpu_tflite_accuracy(batch_prediction, batch_truth) ########################################################################################### # Show Results ########################################################################################### print("Raw model accuracy: {:.2%}".format(keras_accuracy.result())) print("Quant TF Lite accuracy: {:.2%}".format(tflite_accuracy.result())) print("EdgeTpu Quant TF Lite accuracy: {:.2%}".format( edgetpu_tflite_accuracy.result()))<|fim▁end|>
import tensorflow as tf from tensorflow import keras from tensorflow.keras.preprocessing.image import ImageDataGenerator
<|file_name|>get_relatives.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python from phamerator import * from phamerator.phamerator_manage_db import * from phamerator.db_conf import db_conf import sys, getpass GeneID = sys.argv[1] password = getpass.getpass() db = raw_input('database: ') c = db_conf(username='root', password=password, server='134.126.132.72', db=db).get_cursor() <|fim▁hole|><|fim▁end|>
print get_relatives(c, GeneID, alignmentType='both', clustalwThreshold=0.275, blastThreshold=0.0001)
<|file_name|>mainserver.py<|end_file_name|><|fim▁begin|>import os import pwd import grp import argparse import shutil from twisted.web import server, resource, static, script from twisted.internet import reactor from zope.interface import Interface, Attribute, implements from twisted.python.components import registerAdapter from twisted.web.server import Session import globalVals import staticFiles import api class IUser(Interface): loggedIn = Attribute("Is the user logged in") userId = Attribute("Id of the user") userName = Attribute("Username") permission = Attribute("User's permission mode") _points = Attribute("Private points") points = Attribute("Real points") class User(object): def __init__(self, sess): self.loggedIn = False startedSession = False def startSession(): global startedSession if not startedSession: print "Started session adapter" registerAdapter(User, Session, IUser) startedSession = True def drop_privileges(uid_name='nobody', gid_name='nogroup'): if os.getuid() != 0: # We're not root so, like, whatever dude return # Get the uid/gid from the name running_uid = pwd.getpwnam(uid_name).pw_uid running_gid = grp.getgrnam(gid_name).gr_gid # Remove group privileges os.setgroups([]) # Try setting the new uid/gid os.setgid(running_gid) os.setuid(running_uid) # Ensure a very conservative umask old_umask = os.umask(077) def startServer(): ap = argparse.ArgumentParser(description='Server options') ap.add_argument('--clean',action='store_true',default=False) ap.add_argument('--port',type=int,default=1337) ap.add_argument('--domain',default='192.168.220.130') args = ap.parse_args() root = staticFiles.FileNoList('root') root.indexNames = ['index.rpy'] root.ignoreExt('.rpy') root.processors = {'.rpy': script.ResourceScript} <|fim▁hole|> if args.clean: try: shutil.rmtree('root/uploads') except: pass try: shutil.rmtree('root/config') except: pass os.mkdir('root/uploads/') os.mkdir('root/config/') shutil.copyfile('memeOfTheDay.png','root/uploads/memeOfTheDay.png') shutil.copyfile('admin.config','root/config/admin.config') os.system('chown -R nobody:nogroup root/uploads') os.system('chown -R nobody:nogroup root/config') globalVals.init(args,root) site = server.Site(root, logPath=b"access.log") reactor.listenTCP(args.port, site) drop_privileges('nobody','nogroup') reactor.run() if __name__ == "__main__": startServer()<|fim▁end|>
root.putChild('api',api.Api())
<|file_name|>mock.go<|end_file_name|><|fim▁begin|>// // # Mock // // A series of testing aids. // package muta import ( "fmt" "io" "github.com/leeola/muta/mutil" ) // A streamer that creates files and contents, based on the Files // and Contents slices. // // TODO: Find a way to move this into the `muta/mtesting` package. The // problem is that if this is in `muta/mtesting`, then the signature of // MockStreamer.Next becomes `Next() (*muta.FileInfo ...)`. SrcStreamer // and DestStreamer however, require that the signature of any locally // embedded library is `Next() (*FileInfo ...)` instead. // // When `muta` is imported into another library (such as external // Streamers), this appears to not be an issue, because references to // SrcStreamer become `muta.SrcStreamer`, and signatures become // `muta.FileInfo`, and so on. // // I could be way off base though - i'm not sure what to do here. type MockStreamer struct { // A slice of the file names to generate. If no Content is provided, // for an individual file (eg, if there are 5 files, but 4 contents) // the content will be automatically created as `<filename> content` Files []string // An optional slice of the contents to use for each file. These are // Shifted off one by one in the same order as files. If, after the slice // is empty, there are still more files, the contents are automatically // created as `<filename> content`. Contents []string // An optional slice of errors, to be matched up in the same way Contents // are matched to Files. Errors []error } func (s *MockStreamer) Next(inFi FileInfo, inRc io.ReadCloser) ( fi FileInfo, rc io.ReadCloser, err error) { fi = inFi rc = inRc if fi != nil {<|fim▁hole|> return } if len(s.Files) == 0 { return } file := s.Files[0] s.Files = s.Files[1:] fi = NewFileInfo(file) // Shift content off of the list if there is any, or create content // if there isn't any. Note that we don't check for an empty string, // which lets you pass an "empty" file with `Contents: []string{""}` if len(s.Contents) > 0 { rc = mutil.ByteCloser([]byte(s.Contents[0])) s.Contents = s.Contents[1:] } else { rc = mutil.ByteCloser([]byte(fmt.Sprintf("%s content", file))) } if len(s.Errors) > 0 { err = s.Errors[0] s.Errors = s.Errors[1:] } return }<|fim▁end|>
<|file_name|>git.go<|end_file_name|><|fim▁begin|>// Package services provides Github Integration. package services import ( "encoding/json" ) // Git structs type User struct { Name string Email string Username string Display_name string } type GitRepository struct { Id int Name string Full_name string Url string AbsoluteUrl string Owner User Pusher User } type GitCommit struct { Id string Message string Timestamp string Url string Author User Committer User Modified []string }<|fim▁hole|>type GitUser struct { Login string Id int Avatar_url string Type string Site_admin bool } type GitPullRequest struct { Url string Html_url string Id int State string Title string User GitUser Body string Repo GitRepository Merged bool Merged_by GitUser } type GitPayload struct { Zen string Ref string Compare string Repository GitRepository Commits []GitCommit Action string Number int Pull_request GitPullRequest Pusher User } // Return github data. func getGithubData(decoder *json.Decoder, header string) (string, string) { var gEvent GitPayload decoder.Decode(&gEvent) var event, desc string if header == "push" { event = gEvent.Repository.Name + " --> " + header + " event" repo := gEvent.Repository desc = repo.Name + ": \n" + "\nName: " + repo.Name + "\nUrl: " + repo.Url + "\nOwner: " + repo.Owner.Email + "\nCompare: " + gEvent.Compare + "\nRef: " + gEvent.Ref + "\nModified files\n" for i := 0; i < len(gEvent.Commits); i++ { commit := gEvent.Commits[i] desc += "\n* " + commit.Message + " (" + commit.Timestamp + ")" for j := 0; j < len(commit.Modified); j++ { desc += "\n * " + commit.Modified[j] } } } else if header == "pull_request" { pr := gEvent.Pull_request if gEvent.Action == "opened" { event = "New pull request for " + gEvent.Repository.Full_name + " from " + pr.User.Login } else if gEvent.Action == "closed" && pr.Merged { event = "Pull request merged by " + pr.Merged_by.Login } desc = "Title: " + pr.Title if pr.Body != "" { desc += "\nDescription: " + pr.Body } desc += "\nReview at " + pr.Html_url } else if gEvent.Zen != "" { event = "Ping! from " + gEvent.Repository.Name } return event, desc }<|fim▁end|>
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>extern crate libc; pub mod job_control; pub mod signals; use libc::{c_int, pid_t, sighandler_t}; use std::io; use std::os::unix::io::RawFd; pub(crate) const PATH_SEPARATOR: &str = ":"; pub(crate) const O_CLOEXEC: usize = libc::O_CLOEXEC as usize; pub(crate) const SIGHUP: i32 = libc::SIGHUP; pub(crate) const SIGINT: i32 = libc::SIGINT; pub(crate) const SIGTERM: i32 = libc::SIGTERM; pub(crate) const SIGCONT: i32 = libc::SIGCONT; pub(crate) const SIGSTOP: i32 = libc::SIGSTOP; pub(crate) const SIGTSTP: i32 = libc::SIGTSTP; pub(crate) const STDOUT_FILENO: i32 = libc::STDOUT_FILENO; pub(crate) const STDERR_FILENO: i32 = libc::STDERR_FILENO; pub(crate) const STDIN_FILENO: i32 = libc::STDIN_FILENO; pub(crate) fn is_root() -> bool { unsafe { libc::geteuid() == 0 } } pub unsafe fn fork() -> io::Result<u32> { cvt(libc::fork()).map(|pid| pid as u32) } pub(crate) fn getpid() -> io::Result<u32> { cvt(unsafe { libc::getpid() }).map(|pid| pid as u32) } pub(crate) fn kill(pid: u32, signal: i32) -> io::Result<()> { cvt(unsafe { libc::kill(pid as pid_t, signal as c_int) }).and(Ok(())) } pub(crate) fn killpg(pgid: u32, signal: i32) -> io::Result<()> { cvt(unsafe { libc::kill(-(pgid as pid_t), signal as c_int) }).and(Ok(())) } pub(crate) fn pipe2(flags: usize) -> io::Result<(RawFd, RawFd)> { let mut fds = [0; 2]; #[cfg(not(target_os = "macos"))] cvt(unsafe { libc::pipe2(fds.as_mut_ptr(), flags as c_int) })?; #[cfg(target_os = "macos")]<|fim▁hole|>} pub(crate) fn setpgid(pid: u32, pgid: u32) -> io::Result<()> { cvt(unsafe { libc::setpgid(pid as pid_t, pgid as pid_t) }).and(Ok(())) } #[allow(dead_code)] pub(crate) fn signal(signal: i32, handler: extern "C" fn(i32)) -> io::Result<()> { if unsafe { libc::signal(signal as c_int, handler as sighandler_t) } == libc::SIG_ERR { Err(io::Error::last_os_error()) } else { Ok(()) } } pub(crate) fn reset_signal(signal: i32) -> io::Result<()> { if unsafe { libc::signal(signal as c_int, libc::SIG_DFL) } == libc::SIG_ERR { Err(io::Error::last_os_error()) } else { Ok(()) } } pub(crate) fn tcsetpgrp(fd: RawFd, pgrp: u32) -> io::Result<()> { cvt(unsafe { libc::tcsetpgrp(fd as c_int, pgrp as pid_t) }).and(Ok(())) } pub(crate) fn dup(fd: RawFd) -> io::Result<RawFd> { cvt(unsafe { libc::dup(fd) }) } pub(crate) fn dup2(old: RawFd, new: RawFd) -> io::Result<RawFd> { cvt(unsafe { libc::dup2(old, new) }) } pub(crate) fn close(fd: RawFd) -> io::Result<()> { cvt(unsafe { libc::close(fd) }).and(Ok(())) } pub(crate) fn isatty(fd: RawFd) -> bool { unsafe { libc::isatty(fd) == 1 } } trait IsMinusOne { fn is_minus_one(&self) -> bool; } macro_rules! impl_is_minus_one { ($($t:ident)*) => ($(impl IsMinusOne for $t { fn is_minus_one(&self) -> bool { *self == -1 } })*) } impl_is_minus_one! { i8 i16 i32 i64 isize } fn cvt<T: IsMinusOne>(t: T) -> io::Result<T> { if t.is_minus_one() { Err(io::Error::last_os_error()) } else { Ok(t) } } pub mod variables { use users_unix::get_user_by_name; use users_unix::os::unix::UserExt; pub(crate) fn get_user_home(username: &str) -> Option<String> { match get_user_by_name(username) { Some(user) => Some(user.home_dir().to_string_lossy().into_owned()), None => None, } } }<|fim▁end|>
cvt(unsafe { libc::pipe(fds.as_mut_ptr()) })?; Ok((fds[0], fds[1]))
<|file_name|>SegmentTree.java<|end_file_name|><|fim▁begin|>import java.util.Scanner; public class SegmentTree { private static class Node { public int left, right; public long add, sum; public Node(int left, int right, long sum) { this.left = left; this.right = right; this.sum = sum; } } private Node[] tree; private int size; public SegmentTree(int n,int[] arr) { size = (n<<2); tree = new Node[size]; build(0, 0, n-1, arr); } private void build(int pos, int p, int r, int[] arr) { if (p == r) { tree[pos] = new Node(p, r, arr[p]); } else { build(2*pos+1, p, (p+r)/2, arr); build(2*pos+2, (p+r)/2+1, r, arr); tree[pos] = new Node(p, r, tree[2*pos+1].sum + tree[2*pos+2].sum); } } public void update(int p, int r, long delt) { p = (tree[0].left < p)? p : tree[0].left; r = (tree[0].right > r)? r : tree[0].right; if (p <= r) { <|fim▁hole|> } } private void updateHelp(int pos, int p, int r, long delt) { if (tree[pos].left>=p && tree[pos].right<=r) { tree[pos].add += delt; tree[pos].sum += (tree[pos].right-tree[pos].left+1)*delt; } else { if (tree[pos].add!=0) { pushDown(pos); } int mid = (tree[pos].left+tree[pos].right)/2; if (p <= mid) { updateHelp(2*pos+1, p, r, delt); } if (mid+1 <= r) { updateHelp(2*pos+2, p, r, delt); } tree[pos].sum = tree[2*pos+1].sum + tree[2*pos+2].sum; } } private void pushDown(int pos) { int left = 2*pos+1, right = 2*pos+2; tree[left].add += tree[pos].add; tree[right].add += tree[pos].add; tree[left].sum += (tree[left].right-tree[left].left+1)*tree[pos].add; tree[right].sum += (tree[right].right-tree[right].left+1)*tree[pos].add; tree[pos].add = 0; } public long query(int p,int r) { if (tree[0].left<=p && tree[0].right>=r) { return queryHelp(0,p,r); } else { return 0; } } private long queryHelp(int pos,int p,int r) { if (tree[pos].left>=p && tree[pos].right<=r) { return tree[pos].sum; } else { if (tree[pos].add!=0) { pushDown(pos); } long val = 0; int mid = (tree[pos].left+tree[pos].right)/2; if (p <= mid) { val += queryHelp(2*pos+1, p, r); } if (mid+1 <= r) { val += queryHelp(2*pos+2, p, r); } return val; } } public static void main(String[] args) { Main.main(args); } } class Main { /** POJ 3468: http://poj.org/problem?id=3468 */ public static void main(String[] args) { Scanner in = new Scanner(System.in); int n = in.nextInt(); int[] arr = new int[n]; int q = in.nextInt(); for (int i=0;i<n;i++) { arr[i] = in.nextInt(); } SegmentTree tr = new SegmentTree(n,arr); for (int i=0;i<q;i++) { String op = in.next(); if (op.equals("C")) { int p = in.nextInt()-1; int r = in.nextInt()-1; tr.update(p,r,in.nextInt()); } else if (op.equals("Q")) { int p = in.nextInt()-1; int r = in.nextInt()-1; System.out.println(tr.query(p,r)); } } in.close(); } }<|fim▁end|>
updateHelp(0, p, r, delt);
<|file_name|>aarch64.rs<|end_file_name|><|fim▁begin|>//! AArch64-specific definitions for 64-bit linux-like values pub type c_char = u8; pub type wchar_t = u32; pub type nlink_t = u32; pub type blksize_t = i32; pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 48; pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 8; pub const O_DIRECT: ::c_int = 0x10000; pub const O_DIRECTORY: ::c_int = 0x4000; pub const O_NOFOLLOW: ::c_int = 0x8000; s! { pub struct stat { pub st_dev: ::dev_t, pub st_ino: ::ino_t, pub st_mode: ::mode_t,<|fim▁hole|> pub st_uid: ::uid_t, pub st_gid: ::gid_t, pub st_rdev: ::dev_t, __pad1: ::dev_t, pub st_size: ::off_t, pub st_blksize: ::blksize_t, __pad2: ::c_int, pub st_blocks: ::blkcnt_t, pub st_atime: ::time_t, pub st_atime_nsec: ::c_long, pub st_mtime: ::time_t, pub st_mtime_nsec: ::c_long, pub st_ctime: ::time_t, pub st_ctime_nsec: ::c_long, __unused: [::c_int; 2], } pub struct stat64 { pub st_dev: ::dev_t, pub st_ino: ::ino_t, pub st_mode: ::mode_t, pub st_nlink: ::nlink_t, pub st_uid: ::uid_t, pub st_gid: ::gid_t, pub st_rdev: ::dev_t, __pad1: ::dev_t, pub st_size: ::off64_t, pub st_blksize: ::blksize_t, __pad2: ::c_int, pub st_blocks: ::blkcnt64_t, pub st_atime: ::time_t, pub st_atime_nsec: ::c_long, pub st_mtime: ::time_t, pub st_mtime_nsec: ::c_long, pub st_ctime: ::time_t, pub st_ctime_nsec: ::c_long, __unused: [::c_int; 2], } pub struct pthread_attr_t { __size: [u64; 8] } }<|fim▁end|>
pub st_nlink: ::nlink_t,
<|file_name|>type.py<|end_file_name|><|fim▁begin|>import re import sjconfparts.exceptions class Error(sjconfparts.exceptions.Error): pass class ConversionError(Error): pass class ConversionList: """Custom list implementation, linked to the related Conf. Each modification of the list will auto-update the string representation of the list directly in the Conf object, via a call to self.conversion_method(). Nowadays this is considered ugly (maybe it wasn't back in 2008 with Python 2.5?), but no one wants nor has time to redevelop a big part of SJConf to get rid of this. (aka don't blame the current dev who just wants to port this mess to Python3 :-p) Starting from Python3/new style classes, all used special methods must be explicitly redefined: https://docs.python.org/3/reference/datamodel.html#special-lookup """ def __add__(self, other): self.innerList.__add__(other) self.conversion_method() def __init__(self, conversion_method, list_object=None): self.conversion_method = conversion_method if list_object == None: list_object = [] self.innerList = list_object def __contains__(self, item): return self.innerList.__contains__(item) def __delitem__(self, key): self.innerList.__delitem__(key) self.conversion_method() def __getitem__(self, key): self.innerList.__getitem__(key) def __iadd__(self, other): self.innerList.__iadd__(other) self.conversion_method() def __imul__(self, other): self.innerList.__imul__(other) self.conversion_method() def __iter__(self): return self.innerList.__iter__() def __len__(self): return self.innerList.__len__() def __mul__(self, other): self.innerList.__mul__(other) self.conversion_method() def __reversed__(self, other): self.innerList.__reversed__(other) self.conversion_method() def __rmul__(self, other): self.innerList.__rmul__(other) self.conversion_method() def __setitem__(self, key, value): self.innerList.__setitem__(key, value) self.conversion_method() def __str__(self): return self.innerList.__str__() def __getattr__(self, name): list_method = getattr(self.innerList, name) def method(*args, **kw): result = list_method(*args, **kw) if name in ( "append", "extend", "insert", "pop", "remove", "reverse", "sort", ): self.conversion_method() return result return method class Type: class ConversionBadTypeError(ConversionError): def __init__(self, type_source, type_dest): self.msg = "Invalid conversion from type %s to type %s, can only convert from str or to str" @classmethod def convert(cls, type_source, type_dest, dict_source, dict_dest, key): if type_source == "str": type_class_name = type_dest.capitalize() elif type_dest == "str": type_class_name = type_source.capitalize() else: raise Type.ConversionBadTypeError(type_source, type_dest) type_class = getattr(cls, type_class_name) return getattr(type_class, type_source + "_to_" + type_dest)( dict_source, dict_dest, key ) @classmethod def convert_safe(cls, type_source, type_dest, dict_source, dict_dest, key): if type_source == "str": type_class_name = type_dest.capitalize() elif type_dest == "str": type_class_name = type_source.capitalize() else: raise Type.ConversionBadTypeError(type_source, type_dest) type_class = getattr(cls, type_class_name) if hasattr(type_class, type_source + "_to_" + type_dest + "_safe"): return getattr(type_class, type_source + "_to_" + type_dest + "_safe")( dict_source, dict_dest, key ) else: return getattr(type_class, type_source + "_to_" + type_dest)( dict_source, dict_dest, key ) @classmethod def convert_key(cls, key, type): return cls._convert_method("key", key, type) @classmethod def convert_value(cls, value, type, dict_str, dict_type, key): return cls._convert_method("value", value, type, dict_str, dict_type, key) @classmethod def convert_key_for_search(cls, key, type): return cls._convert_method("key_for_search", key, type) @classmethod def _convert_method(cls, method, value, type, *args): type_class = getattr(cls, type.capitalize()) if not hasattr(type_class, method): converted_value = value else: converted_value = getattr(type_class, method)(value, *args) return converted_value class List: @classmethod def value(cls, value, dict_str, dict_type, key): def conversion_method(): Type.List.list_to_str(dict_type, dict_str, key) return ConversionList(conversion_method, value) @classmethod def str_to_list(cls, dict_source, dict_dest, key): def conversion_method(): Type.List.list_to_str(dict_dest, dict_source, key) str_object = dict_source[key] li = list(map(str.strip, str_object.split(","))) try: li.remove("") except ValueError: pass dict_dest[key] = ConversionList(conversion_method, li) return dict_dest @classmethod def str_to_list_safe(cls, dict_source, dict_dest, key): str_object = dict_source[key] list_object = list(map(str.strip, str_object.split(","))) try: list_object.remove("") except ValueError: pass dict_dest[key] = list_object return dict_dest @classmethod def list_to_str(cls, dict_source, dict_dest, key): list_object = dict_source[key] str_object = ", ".join(list_object) dict_dest[key] = str_object return dict_dest class Bool: TRUE_VALUES = ("yes", "on", "true", "enabled", "enable") FALSE_VALUES = ("no", "off", "false", "disabled", "disable") class StrToBoolError(ConversionError): def __init__(self, str_object): self.msg = ( 'Bad value "%s" for str to bool conversion, expected a value in %s' % (str_object, str(Type.Bool.TRUE_VALUES + Type.Bool.FALSE_VALUES)) ) class BoolToStrError(ConversionError): def __init__(self, bool_object): self.msg = ( 'Bad value "%s" for bool to str conversion, expected a boolean' % (bool_object) ) @classmethod def str_to_bool(cls, dict_source, dict_dest, key): str_object = dict_source[key] if str_object.lower() in Type.Bool.TRUE_VALUES: bool_object = True elif str_object.lower() in Type.Bool.FALSE_VALUES: bool_object = False else: raise Type.Bool.StrToBoolError(str_object) dict_dest[key] = bool_object return dict_dest @classmethod def bool_to_str(cls, dict_source, dict_dest, key): bool_object = dict_source[key] if bool_object == True: str_object = "yes" elif bool_object == False: str_object = "no" else: raise Type.Bool.BoolToStrError(bool_object) dict_dest[key] = str_object return dict_dest class Size: class StrToSizeError(ConversionError): def __init__(self, str_object): self.msg = ( 'Bad value "%s" for str to size conversion, expected a value like, e.g. 10M' % (str_object) ) class SizeToStrError(ConversionError): def __init__(self, size_object): self.msg = ( 'Bad value "%s" for size to str conversion, expected an integer' % (size_object) ) @classmethod def str_to_size(cls, dict_source, dict_dest, key): str_object = dict_source[key] suffixes = ["T", "G", "M", "k"] match_result = re.compile("^(\d+)([%s])?$" % ("".join(suffixes))).match( str_object ) if match_result == None: raise Type.Size.StrToSizeError(str_object) size, suffix = match_result.groups("") size_object = int(size) while len(suffixes) > 0: if suffix in suffixes: size_object *= 1024 suffixes.pop() dict_dest[key] = size_object return dict_dest @classmethod def size_to_str(cls, dict_source, dict_dest, key): try: size_object = int(dict_source[key]) except ValueError: raise Type.Size.SizeToStrError(size_object) for suffix_to_test in ("k", "M", "G", "T"): if size_object > 1024: suffix = suffix_to_test size_object /= 1024 str_object = str(size_object) + suffix dict_dest[key] = str_object return dict_dest class Sequence: @classmethod def key(cls, key): match_results = re.compile("^(.*)-\d+$").match(key) if match_results: key = match_results.group(1) return key @classmethod def key_for_search(cls, key): if not hasattr(key, "search"): key = cls.key(key) key = re.compile("^%s(-\d+)?$" % (key)) return key @classmethod def value(cls, value, dict_str, dict_type, key): def conversion_method(): Type.Sequence.sequence_to_str(dict_type, dict_str, key) return ConversionList(conversion_method, value) @classmethod def key_to_index(cls, key, key_to_convert): index = key_to_convert[len(key) + 1 :] if index == "": index = -1 else: index = int(index) return index @classmethod def str_to_sequence(cls, dict_source, dict_dest, key): def conversion_method(): Type.Sequence.sequence_to_str(dict_dest, dict_source, key) str_object = [] key = cls.key(key) regexp = re.compile("^%s-\d+$" % (key))<|fim▁hole|> str_object.append((key_to_test, value)) str_object.sort(key=lambda str_object: cls.key_to_index(key, str_object[0])) sequence_object = ConversionList( conversion_method, [value for (str_key, value) in str_object] ) dict_dest[key] = sequence_object return dict_dest @classmethod def str_to_sequence_safe(cls, dict_source, dict_dest, key): str_object = [] key = cls.key(key) regexp = re.compile("^%s-\d+$" % (key)) for (key_to_test, value) in dict_source.items(): if key_to_test == key or regexp.match(key_to_test): str_object.append((key_to_test, value)) str_object.sort(key=lambda str_object: cls.key_to_index(key, str_object[0])) dict_dest[key] = [value for (str_key, value) in str_object] return dict_dest @classmethod def assign_elts(cls, elts, assignments_old, indices_unassigned): def _assign_unassigned( indices, elts_unassigned, indices_unassigned, index_prev, index ): indices_available = [ index_unassigned for index_unassigned in indices_unassigned if index_unassigned > index_prev and (index_unassigned < index or index < -1) ] for index_available in indices_available: indices_unassigned.remove(index_available) while len(indices_available) > len(elts_unassigned) - ( index >= -1 and 1 or 0 ): indices_available.pop() indices_available.append(index) indices_to_assign = [] for index_available in indices_available: while len(indices_to_assign) < len(elts_unassigned) - ( index_available >= -1 and 1 or 0 ): if index_prev < index_available - 1 or index_available < -1: index_prev += 1 indices_to_assign.append(index_prev) if index_available >= -1: indices_to_assign.append(index_available) index_prev = index_available while len(elts_unassigned) > 0: elts_unassigned.pop(0) index_prev = indices_to_assign.pop(0) indices.append(index_prev) return index_prev elts_unassigned = [] indices = [] index_prev = 0 for elt in elts: elts_unassigned.append(elt) if elt in assignments_old: index = assignments_old[elt] if index > index_prev and ( len(elts_unassigned) == 1 or len(elts_unassigned) <= index - index_prev ): index_prev = _assign_unassigned( indices, elts_unassigned, indices_unassigned, index_prev, index, ) index_prev = _assign_unassigned( indices, elts_unassigned, indices_unassigned, index_prev, -2 ) return indices @classmethod def sequence_to_str(cls, dict_source, dict_dest, key): key = cls.key(key) sequence_object = [elt for elt in list(dict_source[key]) if elt != ""] regexp = re.compile("^%s-\d+$" % (key)) str_keys = [ key_to_test for key_to_test in dict_dest if regexp.match(key_to_test) ] keys_unassigned = [ str_key for str_key in str_keys if dict_dest[str_key] == "" ] str_keys = [ str_key for str_key in str_keys if str_key not in keys_unassigned ] assignments_old = dict( [ (dict_dest[str_key], cls.key_to_index(key, str_key)) for str_key in sorted( str_keys, key=lambda key_to_convert: cls.key_to_index( key, key_to_convert ), ) ] ) indices = cls.assign_elts( sequence_object, assignments_old, [ cls.key_to_index(key, key_to_convert) for key_to_convert in keys_unassigned ], ) for str_key in str_keys: del dict_dest[str_key] while len(sequence_object) > 0: elt = sequence_object.pop(0) index = indices.pop(0) dict_dest[key + "-" + str(index)] = elt return dict_dest<|fim▁end|>
for (key_to_test, value) in dict_source.items(): if key_to_test == key or regexp.match(key_to_test):
<|file_name|>tools.py<|end_file_name|><|fim▁begin|>#-*- coding: utf-8 -*- from django.core.exceptions import PermissionDenied def popup_status(request): return '_popup' in request.REQUEST or 'pop' in request.REQUEST def selectfolder_status(request): return 'select_folder' in request.REQUEST def popup_param(request): if popup_status(request): return "?_popup=1" else: return "" def check_files_edit_permissions(request, files): for f in files: if not f.has_edit_permission(request): raise PermissionDenied def check_folder_edit_permissions(request, folders): for f in folders: if not f.has_edit_permission(request): raise PermissionDenied check_files_edit_permissions(request, f.files) check_folder_edit_permissions(request, f.children.all()) def check_files_read_permissions(request, files): for f in files: if not f.has_read_permission(request): raise PermissionDenied def check_folder_read_permissions(request, folders): for f in folders: if not f.has_read_permission(request): raise PermissionDenied<|fim▁hole|> def userperms_for_request(item, request): r = [] ps = ['read', 'edit', 'add_children'] for p in ps: attr = "has_%s_permission" % p if hasattr(item, attr): x = getattr(item, attr)(request) if x: r.append(p) return r<|fim▁end|>
check_files_read_permissions(request, f.files) check_folder_read_permissions(request, f.children.all())
<|file_name|>main.js<|end_file_name|><|fim▁begin|>/* * jQuery File Upload Plugin JS Example 8.9.1 * https://github.com/blueimp/jQuery-File-Upload * * Copyright 2010, Sebastian Tschan * https://blueimp.net * * Licensed under the MIT license: * http://www.opensource.org/licenses/MIT */ /* global $, window */ $(function () { 'use strict'; // Initialize the jQuery File Upload widget: $('#fileupload').fileupload({ // Uncomment the following to send cross-domain cookies: //xhrFields: {withCredentials: true}, url: root_url + '/media/upload' }); // Enable iframe cross-domain access via redirect option: $('#fileupload').fileupload( 'option', 'redirect', window.location.href.replace( /\/[^\/]*$/, '/cors/result.html?%s' ) ); if (window.location.hostname === 'blueimp.github.io') { // Demo settings: $('#fileupload').fileupload('option', { url: '//jquery-file-upload.appspot.com/', // Enable image resizing, except for Android and Opera, // which actually support image resizing, but fail to // send Blob objects via XHR requests: disableImageResize: /Android(?!.*Chrome)|Opera/ .test(window.navigator.userAgent), maxFileSize: 5000000, acceptFileTypes: /(\.|\/)(gif|jpe?g|png)$/i }); // Upload server status check for browsers with CORS support: if ($.support.cors) { $.ajax({ url: '//jquery-file-upload.appspot.com/', type: 'HEAD' }).fail(function () { $('<div class="alert alert-danger"/>') .text('Upload server currently unavailable - ' + new Date()) .appendTo('#fileupload'); }); } } else { // Load existing files: $('#fileupload').addClass('fileupload-processing'); $.ajax({ // Uncomment the following to send cross-domain cookies: //xhrFields: {withCredentials: true}, url: $('#fileupload').fileupload('option', 'url'), dataType: 'json', context: $('#fileupload')[0] }).always(function () { $(this).removeClass('fileupload-processing'); }).done(function (result) { $(this).fileupload('option', 'done') .call(this, $.Event('done'), {result: result}); }); } ///////////////////////////////////////////////<|fim▁hole|>});<|fim▁end|>
<|file_name|>problem-0003.rs<|end_file_name|><|fim▁begin|>extern crate rust; use rust::primes; /// Problem 3 /// /// The prime factors of 13195 are 5, 7, 13 and 29. /// /// What is the largest prime factor of the number 600851475143 ? fn main() { println!("Problem 3"); let num: u64 = 600851475143; let sq: u64 = (num as f64).sqrt().ceil() as u64; println!("Computing primes up to {}", sq); let prms: Vec<u64> = primes::sieve(sq); println!("Done. Checking for factors."); for n in prms.iter().rev() {<|fim▁hole|> if num % n == 0 { println!("{}", n); break; } } }<|fim▁end|>
<|file_name|>hello.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 # -*- coding: utf-8 -*- def helloworld(): """<|fim▁hole|> Hello world routine ! """ print("Hello world!")<|fim▁end|>
<|file_name|>exclusive_conditions.py<|end_file_name|><|fim▁begin|># Copyright 2013-present Barefoot Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import p4 from collections import defaultdict def _get_extracted_headers(parse_state): extracted = set() return extracted # def _get_hdr_name(hdr): # if hdr.virtual: # return hdr.base_name # elif hdr.index is not None: # return hdr.base_name # else: # return hdr.name def _find_parser_paths(hlir): def _find_paths(state, paths, current_path, path_hdrs, tag_stacks_index): try: next_states = set(state.branch_to.values()) except: paths.append(path_hdrs) return extracted_headers = set() for call in state.call_sequence: if call[0] == p4.parse_call.extract: hdr = call[1] if hdr.virtual: base_name = hdr.base_name current_index = tag_stacks_index[base_name] if current_index > hdr.max_index: paths.append(path_hdrs) return tag_stacks_index[base_name] += 1 name = base_name + "[%d]" % current_index hdr = hlir.p4_header_instances[name] extracted_headers.add(hdr) if len(extracted_headers & path_hdrs) != 0: paths.append(extracted_headers | path_hdrs) return for next_state in next_states: _find_paths(next_state, paths, current_path + [state], extracted_headers | path_hdrs, tag_stacks_index.copy()) paths = [] start_state = hlir.p4_parse_states["start"] _find_paths(start_state, paths, [], set(), defaultdict(int)) return paths def _find_compatible_headers(hlir): def _find_rec(state, current_path, path_hdrs, compatibles): if state in current_path: return try: next_states = set(state.branch_to.values()) except: return extracted_headers = _get_extracted_headers(state) for hdr1, hdr2 in itertools.product(path_hdrs, extracted_headers): compatibles.add( (hdr1, hdr2) ) compatibles.add( (hdr2, hdr1) ) for next_state in next_states: _find_rec(next_state, current_path + [state], path_hdrs | extracted_headers, compatibles) compatibles = set() start_state = hlir.p4_parse_states["start"] _find_rec(start_state, [], set(), compatibles) return compatibles def _get_headers_in_condition(p4_expression, hdrs): try: if p4_expression.op == "valid": hdrs.add(p4_expression.right) _get_headers_in_condition(p4_expression.left, hdrs) _get_headers_in_condition(p4_expression.right, hdrs) except AttributeError: return class Solver(): TRUE = 0 FALSE = 1 DONT_KNOW = 2 def __init__(self, hlir): self.hlir = hlir # self.compatible_headers = _find_compatible_headers(hlir) self.paths = _find_parser_paths(hlir) self.compatible_headers = {} self.implied_headers = {} all_headers = set() for _, hdr in hlir.p4_header_instances.items(): if hdr.metadata or hdr.virtual: continue all_headers.add(hdr) for _, hdr in hlir.p4_header_instances.items(): if hdr.metadata or hdr.virtual: continue self.compatible_headers[hdr] = set() self.implied_headers[hdr] = all_headers.copy() for path in self.paths: for hdr in path: self.compatible_headers[hdr] |= path self.implied_headers[hdr] &= path # print "COMPATIBLE_HEADERS" # for hdr, s in self.compatible_headers.items(): # print hdr, ":", [str(h) for h in s] # print "IMPLIED_HEADERS" # for hdr, s in self.implied_headers.items(): # print hdr, ":", [str(h) for h in s] def _check_header_values_coherent(self, hdrs_valid): for hdr1, hdr2 in itertools.product(hdrs_valid, repeat = 2): if hdr2 not in self.compatible_headers[hdr1] and\ hdrs_valid[hdr1] and hdrs_valid[hdr2]: return False if hdr1 in self.implied_headers[hdr2] and\ hdrs_valid[hdr2] and not hdrs_valid[hdr1]: return False if hdr2 in self.implied_headers[hdr1] and\ hdrs_valid[hdr1] and not hdrs_valid[hdr2]: return False return True def _check_condition(self, c, hdrs_valid): if not c: return Solver.TRUE if c.op == "valid": if hdrs_valid[c.right]: return Solver.TRUE else: return Solver.FALSE elif c.op == "and": left = self._check_condition(c.left, hdrs_valid) right = self._check_condition(c.right, hdrs_valid) if left == Solver.TRUE and right == Solver.TRUE: return Solver.TRUE if left == Solver.FALSE or right == Solver.FALSE: return Solver.FALSE return Solver.DONT_KNOW elif c.op == "or": left = self._check_condition(c.left, hdrs_valid) right = self._check_condition(c.right, hdrs_valid) if left == Solver.TRUE or right == Solver.TRUE: return Solver.TRUE<|fim▁hole|> if left == Solver.FALSE and right == Solver.FALSE: return Solver.FALSE return Solver.DONT_KNOW elif c.op == "not": right = self._check_condition(c.right, hdrs_valid) if right == Solver.TRUE: return Solver.FALSE if right == Solver.FALSE: return Solver.TRUE return Solver.DONT_KNOW return Solver.DONT_KNOW # unknonw_cond is a condition (p4_expression) we want to evaluate # known_conds is a list of 2-tuples (condition, value), where condition is a # p4_expression and value the boolean value of condition def evaluate_condition(self, dangerous_hdrs, unknown_cond, known_conds): used_hdrs = set() _get_headers_in_condition(unknown_cond, used_hdrs) if known_conds: for c in zip(*known_conds)[0]: _get_headers_in_condition(c, used_hdrs) if (used_hdrs & dangerous_hdrs): return False used_hdrs_ordered = list(used_hdrs) used_hdrs_valid = {} num_used_hdrs = len(used_hdrs) result = None for values in itertools.product([True, False], repeat = num_used_hdrs): for idx, hdr in enumerate(used_hdrs_ordered): used_hdrs_valid[hdr] = values[idx] if not self._check_header_values_coherent(used_hdrs_valid): continue violated = False for known_c, value in known_conds: check_c = self._check_condition(known_c, used_hdrs_valid) if check_c == Solver.FALSE and value: violated = True break elif check_c == Solver.TRUE and not value: violated = True break elif check_c == Solver.DONT_KNOW: pass if violated: continue unknown_value = self._check_condition(unknown_cond, used_hdrs_valid) if unknown_value == Solver.DONT_KNOW: return None if result is None: result = unknown_value elif result != unknown_value: return None if result == Solver.TRUE: return True elif result == Solver.FALSE: return False return result<|fim▁end|>
<|file_name|>Array.js<|end_file_name|><|fim▁begin|>/** The MIT License (MIT) Copyright (c) 2014 MyChannel-Apps.de Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE<|fim▁hole|> THE SOFTWARE. @author Christoph Kühl <djchrisnet>, Adrian Preuß <Bizarrus> */ if(!Array.prototype.each) { Object.defineProperty(Array.prototype, 'each', { enumerable: false, configurable: false, writable: false, value: function(callback) { for(var index = 0; index < this.length; index++) { if(callback.apply(this[index], index) === false) { break; } } } }); } if(!Array.prototype.random) { Object.defineProperty(Array.prototype, 'random', { enumerable: false, configurable: false, writable: false, value: function() { return this[RandomOperations.nextInt(this.length)]; } }); } if(!Array.prototype.exists) { Object.defineProperty(Array.prototype, 'exists', { enumerable: false, configurable: false, writable: false, value: function(value) { return (this.indexOf(value) > -1); } }); } if(!Array.prototype.size) { Object.defineProperty(Array.prototype, 'size', { enumerable: false, configurable: false, writable: false, value: function() { return this.length; } }); }<|fim▁end|>
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
<|file_name|>forms.py<|end_file_name|><|fim▁begin|>from django.forms import ModelForm,forms<|fim▁hole|> class PortaForm(ModelForm): class Meta: model = Porta fields = ('descricao',) class GrupoForm(ModelForm): class Meta: model = Grupo fields = ('descricao',)<|fim▁end|>
from django import forms from appPortas.models import * from django.forms.models import inlineformset_factory
<|file_name|>mdtra_resultDialog.cpp<|end_file_name|><|fim▁begin|>/*************************************************************************** * Copyright (C) 2011-2017 Alexander V. Popov. * * This file is part of Molecular Dynamics Trajectory * Reader & Analyzer (MDTRA) source code. * * MDTRA source code is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * MDTRA source code is distributed in the hope that it will be * useful, but WITHOUT ANY WARRANTY; without even the implied * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA ***************************************************************************/ // Purpose: // Implementation of MDTRA_ResultDialog #include "mdtra_main.h" #include "mdtra_mainWindow.h" #include "mdtra_project.h" #include "mdtra_utils.h" #include "mdtra_resultDialog.h" #include "mdtra_resultDataSourceDialog.h" #include "mdtra_multiResultDataSourceDialog.h" #include <QtGui/QMessageBox> #include <QtGui/QPushButton> static const char *szScaleUnitNames[MDTRA_YSU_MAX] = { "Angstroms", "Nanometers", "Degrees", "Radians", "Kcal/A", "Micronewtons", "Square Angstroms", "Square Nanometers", }; static unsigned int uiScaleUnitMap[MDTRA_YSU_MAX] = { (1 << MDTRA_DT_RMSD) | (1 << MDTRA_DT_RMSD_SEL) | (1 << MDTRA_DT_RMSF) | (1 << MDTRA_DT_RMSF_SEL) | (1 << MDTRA_DT_RADIUS_OF_GYRATION) | (1 << MDTRA_DT_DISTANCE), (1 << MDTRA_DT_RMSD) | (1 << MDTRA_DT_RMSD_SEL) | (1 << MDTRA_DT_RMSF) | (1 << MDTRA_DT_RMSF_SEL) | (1 << MDTRA_DT_RADIUS_OF_GYRATION) | (1 << MDTRA_DT_DISTANCE), (1 << MDTRA_DT_ANGLE) | (1 << MDTRA_DT_ANGLE2) | (1 << MDTRA_DT_TORSION) | (1 << MDTRA_DT_TORSION_UNSIGNED) | (1 << MDTRA_DT_DIHEDRAL) | (1 << MDTRA_DT_DIHEDRAL_ABS) | (1 << MDTRA_DT_PLANEANGLE), (1 << MDTRA_DT_ANGLE) | (1 << MDTRA_DT_ANGLE2) | (1 << MDTRA_DT_TORSION) | (1 << MDTRA_DT_TORSION_UNSIGNED) | (1 << MDTRA_DT_DIHEDRAL) | (1 << MDTRA_DT_DIHEDRAL_ABS) | (1 << MDTRA_DT_PLANEANGLE), (1 << MDTRA_DT_FORCE) | (1 << MDTRA_DT_RESULTANT_FORCE), (1 << MDTRA_DT_FORCE) | (1 << MDTRA_DT_RESULTANT_FORCE), (1 << MDTRA_DT_SAS) | (1 << MDTRA_DT_SAS_SEL) | (1 << MDTRA_DT_OCCA) | (1 << MDTRA_DT_OCCA_SEL), (1 << MDTRA_DT_SAS) | (1 << MDTRA_DT_SAS_SEL) | (1 << MDTRA_DT_OCCA) | (1 << MDTRA_DT_OCCA_SEL), }; MDTRA_ResultDialog :: MDTRA_ResultDialog( int index, QWidget *parent ) : QDialog( parent ) { m_pMainWindow = qobject_cast<MDTRA_MainWindow*>(parent); assert(m_pMainWindow != NULL); setupUi( this ); setFixedSize( width(), height() ); setWindowIcon( QIcon(":/png/16x16/result.png") ); if (index < 0) { setWindowTitle( tr("Add Result Collector") ); } else { setWindowTitle( tr("Edit Result Collector") ); } m_iResultIndex = index; MDTRA_DataType currentDT = MDTRA_DT_RMSD; MDTRA_YScaleUnits currentSU = MDTRA_YSU_ANGSTROMS; MDTRA_Layout currentL = UTIL_GetDataSourceDefaultLayout(currentDT); if (index < 0) { QString resultTitle = tr("Result %1").arg(m_pMainWindow->getResultCounter()); lineEdit->setText(resultTitle); } else { MDTRA_Result *pResult = m_pMainWindow->getProject()->fetchResultByIndex( index ); if (pResult) { lineEdit->setText( pResult->name ); currentDT = pResult->type; currentSU = pResult->units; currentL = pResult->layout; dsScaleUnitsCombo->setEnabled( currentDT != MDTRA_DT_USER ); for (int i = 0; i < pResult->sourceList.count(); i++) { m_dsRefList << pResult->sourceList.at(i); MDTRA_DataSource *pDS = m_pMainWindow->getProject()->fetchDataSourceByIndex( pResult->sourceList.at(i).dataSourceIndex ); QListWidgetItem *pItem = new QListWidgetItem( QObject::tr("DATA SOURCE %1: %2\nScale = %3 Bias = %4") .arg(pDS->index) .arg(pDS->name) .arg(pResult->sourceList.at(i).yscale) .arg(pResult->sourceList.at(i).bias), dsList ); pItem->setIcon( QIcon(":/png/16x16/source.png") ); } } } for (int i = 0; i < MDTRA_DT_MAX; i++) { dsTypeCombo->addItem( UTIL_GetDataSourceShortTypeName(i) ); if (UTIL_GetDataSourceTypeId(i) == currentDT) dsTypeCombo->setCurrentIndex(i); } for (int i = 0, j = 0; i < MDTRA_YSU_MAX; i++) { if (uiScaleUnitMap[i] & (1 << (int)currentDT)) { dsScaleUnitsCombo->addItem( szScaleUnitNames[i], i ); if (i == (int)currentSU) dsScaleUnitsCombo->setCurrentIndex(j); j++; } } switch (currentL) { default: case MDTRA_LAYOUT_TIME: rbTime->setChecked( true ); break; case MDTRA_LAYOUT_RESIDUE: rbRes->setChecked( true ); break; } rbLabel->setEnabled( UTIL_IsDataSourceLayoutChangeable(currentDT) ); rbTime->setEnabled( UTIL_IsDataSourceLayoutChangeable(currentDT) ); rbRes->setEnabled( UTIL_IsDataSourceLayoutChangeable(currentDT) ); connect(dsTypeCombo, SIGNAL(currentIndexChanged(int)), this, SLOT(exec_on_update_layout_and_scale_units())); connect(lineEdit, SIGNAL(textChanged(const QString&)), this, SLOT(exec_on_check_resultInput())); connect(buttonBox, SIGNAL(accepted()), this, SLOT(exec_on_accept())); connect(dsAdd, SIGNAL(clicked()), this, SLOT(exec_on_add_result_data_source())); connect(dsAddMulti, SIGNAL(clicked()), this, SLOT(exec_on_add_multiple_result_data_sources())); connect(dsEdit, SIGNAL(clicked()), this, SLOT(exec_on_edit_result_data_source())); connect(dsList, SIGNAL(itemDoubleClicked(QListWidgetItem*)), this, SLOT(exec_on_edit_result_data_source())); connect(dsRemove, SIGNAL(clicked()), this, SLOT(exec_on_remove_result_data_source())); connect(dsUp, SIGNAL(clicked()), this, SLOT(exec_on_up_result_data_source())); connect(dsDown, SIGNAL(clicked()), this, SLOT(exec_on_down_result_data_source())); exec_on_check_resultInput(); } void MDTRA_ResultDialog :: exec_on_update_layout_and_scale_units( void ) { MDTRA_DataType currentDT = UTIL_GetDataSourceTypeId(dsTypeCombo->currentIndex()); int currentDTi = (int)currentDT; int currentSU = dsScaleUnitsCombo->itemData(dsScaleUnitsCombo->currentIndex()).toUInt(); MDTRA_Layout currentL = UTIL_GetDataSourceDefaultLayout(currentDT); rbLabel->setEnabled( UTIL_IsDataSourceLayoutChangeable(currentDT) ); rbTime->setEnabled( UTIL_IsDataSourceLayoutChangeable(currentDT) ); rbRes->setEnabled( UTIL_IsDataSourceLayoutChangeable(currentDT) ); switch (currentL) { default: case MDTRA_LAYOUT_TIME: rbTime->setChecked( true ); break; case MDTRA_LAYOUT_RESIDUE: rbRes->setChecked( true ); break; } dsScaleUnitsCombo->clear(); for (int i = 0, j = 0; i < MDTRA_YSU_MAX; i++) { if (uiScaleUnitMap[i] & (1 << currentDTi)) { dsScaleUnitsCombo->addItem( szScaleUnitNames[i], i ); if (i == currentSU) dsScaleUnitsCombo->setCurrentIndex(j); j++; } } dsScaleUnitsCombo->setEnabled( currentDT != MDTRA_DT_USER ); } void MDTRA_ResultDialog :: exec_on_check_resultInput( void ) { dsTypeCombo->setEnabled( dsList->count() == 0 ); dsUp->setEnabled( dsList->count() > 1 ); dsDown->setEnabled( dsList->count() > 1 ); buttonBox->button( QDialogButtonBox::Ok )->setEnabled( (lineEdit->text().length() > 0) && (dsList->count() > 0)); } void MDTRA_ResultDialog :: exec_on_accept( void ) { if (m_iResultIndex < 0) { if (!m_pMainWindow->getProject()->checkUniqueResultName( lineEdit->text() )) { QMessageBox::warning(this, tr(APPLICATION_TITLE_SMALL), tr("Result \"%1\" already registered.\nPlease enter another result title.").arg(lineEdit->text())); return; } } MDTRA_DataType currentDT = UTIL_GetDataSourceTypeId(dsTypeCombo->currentIndex()); MDTRA_YScaleUnits currentSU = (MDTRA_YScaleUnits)dsScaleUnitsCombo->itemData(dsScaleUnitsCombo->currentIndex()).toUInt(); MDTRA_Layout currentL; if (rbRes->isChecked()) currentL = MDTRA_LAYOUT_RESIDUE; else currentL = MDTRA_LAYOUT_TIME; if (m_iResultIndex < 0) { m_pMainWindow->getProject()->registerResult( lineEdit->text(), currentDT, currentSU, currentL, m_dsRefList, true ); } else { m_pMainWindow->getProject()->modifyResult( m_iResultIndex, lineEdit->text(), currentDT, currentSU, currentL, m_dsRefList ); } accept(); } void MDTRA_ResultDialog :: exec_on_add_result_data_source( void ) { MDTRA_DataType currentDT = UTIL_GetDataSourceTypeId(dsTypeCombo->currentIndex()); MDTRA_ResultDataSourceDialog dialog( currentDT, NULL, m_pMainWindow, this ); if (!dialog.GetAvailableDataSourceCount()) { QMessageBox::warning(this, tr(APPLICATION_TITLE_SMALL), tr("No data sources of type \"%1\" are registered!").arg(UTIL_GetDataSourceShortTypeName(dsTypeCombo->currentIndex()))); return; } QString sCheckUserData(""); bool bCheckUserData = false; if (m_dsRefList.count() > 0) { MDTRA_DataSource *pDS = m_pMainWindow->getProject()->fetchDataSourceByIndex( m_dsRefList.at(0).dataSourceIndex ); sCheckUserData = pDS->userdata; bCheckUserData = true; } if (dialog.exec()) { MDTRA_DSRef newdsref; dialog.GetResultDataSource( &newdsref ); MDTRA_DataSource *pDS = m_pMainWindow->getProject()->fetchDataSourceByIndex( newdsref.dataSourceIndex ); if ( bCheckUserData && (sCheckUserData != pDS->userdata) ) { QMessageBox::warning(this, tr(APPLICATION_TITLE_SMALL), tr("Cannot add user-defined Data Source of type \"%1\"!\nThe Result Collector already has user-defined Data Source of type \"%2\".").arg(pDS->userdata).arg(sCheckUserData)); return; } m_dsRefList << newdsref; QListWidgetItem *pItem = new QListWidgetItem( QObject::tr("DATA SOURCE %1: %2\nScale = %3 Bias = %4") .arg(pDS->index) .arg(pDS->name) .arg(newdsref.yscale) .arg(newdsref.bias), dsList ); pItem->setIcon( QIcon(":/png/16x16/source.png") ); exec_on_check_resultInput(); } } void MDTRA_ResultDialog :: exec_on_edit_result_data_source( void ) { MDTRA_DataType currentDT = UTIL_GetDataSourceTypeId(dsTypeCombo->currentIndex()); QListWidgetItem *pItem = dsList->currentItem(); MDTRA_DSRef *pCurrentRef = const_cast<MDTRA_DSRef*>(&m_dsRefList.at(dsList->currentRow())); MDTRA_ResultDataSourceDialog dialog( currentDT, pCurrentRef, m_pMainWindow, this ); if (dialog.exec()) { dialog.GetResultDataSource( pCurrentRef ); MDTRA_DataSource *pDS = m_pMainWindow->getProject()->fetchDataSourceByIndex( pCurrentRef->dataSourceIndex ); pItem->setText( QObject::tr("DATA SOURCE %1: %2\nScale = %3 Bias = %4") .arg(pDS->index) .arg(pDS->name) .arg(pCurrentRef->yscale) .arg(pCurrentRef->bias)); } } void MDTRA_ResultDialog :: exec_on_remove_result_data_source( void ) { if (dsList->selectedItems().count() <= 0) return; if (QMessageBox::No == QMessageBox::warning( this, tr("Confirm"), tr("Do you want to remove selected result data source from the list?"), QMessageBox::Yes | QMessageBox::Default, QMessageBox::No | QMessageBox::Escape )) { return; } int itemIndex = dsList->currentRow(); m_dsRefList.removeAt( itemIndex ); QListWidgetItem *pItem = dsList->currentItem(); dsList->removeItemWidget( pItem ); delete pItem; exec_on_check_resultInput(); } void MDTRA_ResultDialog :: exec_on_up_result_data_source( void ) { if (dsList->selectedItems().count() <= 0) return; int itemIndex = dsList->currentRow(); if (itemIndex <= 0) return; m_dsRefList.swap( itemIndex, itemIndex-1 ); QListWidgetItem *currentItem = dsList->takeItem( itemIndex ); dsList->insertItem( itemIndex - 1, currentItem ); dsList->setCurrentItem( currentItem ); } <|fim▁hole|>{ if (dsList->selectedItems().count() <= 0) return; int itemIndex = dsList->currentRow(); if (itemIndex >= dsList->count()-1) return; m_dsRefList.swap( itemIndex, itemIndex+1 ); QListWidgetItem *currentItem = dsList->takeItem( itemIndex ); dsList->insertItem( itemIndex + 1, currentItem ); dsList->setCurrentItem( currentItem ); } void MDTRA_ResultDialog :: exec_on_add_multiple_result_data_sources( void ) { MDTRA_DataType currentDT = UTIL_GetDataSourceTypeId(dsTypeCombo->currentIndex()); MDTRA_MultiResultDataSourceDialog dialog( currentDT, m_pMainWindow, this ); if (!dialog.GetAvailableDataSourceCount()) { QMessageBox::warning(this, tr(APPLICATION_TITLE_SMALL), tr("No data sources of type \"%1\" are registered!").arg(UTIL_GetDataSourceShortTypeName(dsTypeCombo->currentIndex()))); return; } QString sCheckUserData(""); bool bCheckUserData = false; if (m_dsRefList.count() > 0) { MDTRA_DataSource *pDS = m_pMainWindow->getProject()->fetchDataSourceByIndex( m_dsRefList.at(0).dataSourceIndex ); sCheckUserData = pDS->userdata; bCheckUserData = true; } if (dialog.exec()) { for (int i = 0; i < dialog.GetAvailableDataSourceCount(); i++) { MDTRA_DSRef newdsref; if (!dialog.GetResultDataSource( i, &newdsref )) continue; MDTRA_DataSource *pDS = m_pMainWindow->getProject()->fetchDataSourceByIndex( newdsref.dataSourceIndex ); if ( bCheckUserData && (sCheckUserData != pDS->userdata) ) { QMessageBox::warning(this, tr(APPLICATION_TITLE_SMALL), tr("Cannot add user-defined Data Source of type \"%1\"!\nThe Result Collector already has user-defined Data Source of type \"%2\".").arg(pDS->userdata).arg(sCheckUserData)); continue; } m_dsRefList << newdsref; QListWidgetItem *pItem = new QListWidgetItem( QObject::tr("DATA SOURCE %1: %2\nScale = %3 Bias = %4") .arg(pDS->index) .arg(pDS->name) .arg(newdsref.yscale) .arg(newdsref.bias), dsList ); pItem->setIcon( QIcon(":/png/16x16/source.png") ); if ( !bCheckUserData ) { sCheckUserData = pDS->userdata; bCheckUserData = true; } } exec_on_check_resultInput(); } }<|fim▁end|>
void MDTRA_ResultDialog :: exec_on_down_result_data_source( void )
<|file_name|>pin-needed-to-poll.rs<|end_file_name|><|fim▁begin|>use std::{ future::Future, pin::Pin, task::{Context, Poll}, }; struct Sleep; impl Future for Sleep {<|fim▁hole|> type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { Poll::Ready(()) } } impl Drop for Sleep { fn drop(&mut self) {} } fn sleep() -> Sleep { Sleep } struct MyFuture { sleep: Sleep, } impl MyFuture { fn new() -> Self { Self { sleep: sleep(), } } } impl Future for MyFuture { type Output = (); fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { self.sleep.poll(cx) //~^ ERROR no method named `poll` found for struct `Sleep` in the current scope } } fn main() {}<|fim▁end|>
<|file_name|>set_nots.go<|end_file_name|><|fim▁begin|>package set import ( "fmt" "strings" ) // Provides a common set baseline for both threadsafe and non-ts Sets. type set struct { m map[interface{}]struct{} // struct{} doesn't take up space } // SetNonTS defines a non-thread safe set data structure. type SetNonTS struct { set } // NewNonTS creates and initialize a new non-threadsafe Set. // It accepts a variable number of arguments to populate the initial set. // If nothing is passed a SetNonTS with zero size is created. func NewNonTS(items ...interface{}) *SetNonTS { s := &SetNonTS{} s.m = make(map[interface{}]struct{}) // Ensure interface compliance var _ Interface = s s.Add(items...) return s } // New creates and initalizes a new Set interface. It accepts a variable // number of arguments to populate the initial set. If nothing is passed a // zero size Set based on the struct is created. func (s *set) New(items ...interface{}) Interface { return NewNonTS(items...) } // Add includes the specified items (one or more) to the set. The underlying // Set s is modified. If passed nothing it silently returns. func (s *set) Add(items ...interface{}) { if len(items) == 0 { return } for _, item := range items { s.m[item] = keyExists } } // Remove deletes the specified items from the set. The underlying Set s is // modified. If passed nothing it silently returns. func (s *set) Remove(items ...interface{}) { if len(items) == 0 { return } for _, item := range items { delete(s.m, item) } } // Pop deletes and return an item from the set. The underlying Set s is // modified. If set is empty, nil is returned. func (s *set) Pop() interface{} { for item := range s.m { delete(s.m, item) return item } return nil } // Has looks for the existence of items passed. It returns false if nothing is // passed. For multiple items it returns true only if all of the items exist. func (s *set) Has(items ...interface{}) bool { // assume checked for empty item, which not exist if len(items) == 0 { return false } has := true for _, item := range items { if _, has = s.m[item]; !has { break } } return has } // Size returns the number of items in a set. func (s *set) Size() int { return len(s.m) } // Clear removes all items from the set. func (s *set) Clear() { s.m = make(map[interface{}]struct{})<|fim▁hole|> return s.Size() == 0 } // IsEqual test whkrypton s and t are the same in size and have the same items. func (s *set) IsEqual(t Interface) bool { // Force locking only if given set is threadsafe. if conv, ok := t.(*Set); ok { conv.l.RLock() defer conv.l.RUnlock() } // return false if they are no the same size if sameSize := len(s.m) == t.Size(); !sameSize { return false } equal := true t.Each(func(item interface{}) bool { _, equal = s.m[item] return equal // if false, Each() will end }) return equal } // IsSubset tests whkrypton t is a subset of s. func (s *set) IsSubset(t Interface) (subset bool) { subset = true t.Each(func(item interface{}) bool { _, subset = s.m[item] return subset }) return } // IsSuperset tests whkrypton t is a superset of s. func (s *set) IsSuperset(t Interface) bool { return t.IsSubset(s) } // Each traverses the items in the Set, calling the provided function for each // set member. Traversal will continue until all items in the Set have been // visited, or if the closure returns false. func (s *set) Each(f func(item interface{}) bool) { for item := range s.m { if !f(item) { break } } } // String returns a string representation of s func (s *set) String() string { t := make([]string, 0, len(s.List())) for _, item := range s.List() { t = append(t, fmt.Sprintf("%v", item)) } return fmt.Sprintf("[%s]", strings.Join(t, ", ")) } // List returns a slice of all items. There is also StringSlice() and // IntSlice() methods for returning slices of type string or int. func (s *set) List() []interface{} { list := make([]interface{}, 0, len(s.m)) for item := range s.m { list = append(list, item) } return list } // Copy returns a new Set with a copy of s. func (s *set) Copy() Interface { return NewNonTS(s.List()...) } // Merge is like Union, however it modifies the current set it's applied on // with the given t set. func (s *set) Merge(t Interface) { t.Each(func(item interface{}) bool { s.m[item] = keyExists return true }) } // it's not the opposite of Merge. // Separate removes the set items containing in t from set s. Please aware that func (s *set) Separate(t Interface) { s.Remove(t.List()...) }<|fim▁end|>
} // IsEmpty reports whkrypton the Set is empty. func (s *set) IsEmpty() bool {
<|file_name|>srcgen.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 import sys import os import argparse parser = argparse.ArgumentParser() parser.add_argument('--input', dest='input', help='the input file') parser.add_argument('--output', dest='output', help='the output file') parser.add_argument('--upper', dest='upper', action='store_true', default=False, help='Convert to upper case.') c_templ = '''int %s() { return 0; } ''' options = parser.parse_args(sys.argv[1:])<|fim▁hole|> funcname = open(options.input).readline().strip() if options.upper: funcname = funcname.upper() open(options.output, 'w').write(c_templ % funcname)<|fim▁end|>
<|file_name|>base.cpp<|end_file_name|><|fim▁begin|>/***************************************************************************** * * This file is part of Mapnik (c++ mapping toolkit) * * Copyright (C) 2021 Artem Pavlenko * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *****************************************************************************/ // mapnik #include <mapnik/text/placements/base.hpp> namespace mapnik { text_placements::text_placements() : defaults() {} void text_placements::add_expressions(expression_set& output) const { defaults.add_expressions(output);<|fim▁hole|>text_placement_info::text_placement_info(text_placements const* parent, double scale_factor_) : properties(parent->defaults) , scale_factor(scale_factor_) {} } // namespace mapnik<|fim▁end|>
}
<|file_name|>journal.go<|end_file_name|><|fim▁begin|>// Copyright 2011 The LevelDB-Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Taken from: https://code.google.com/p/leveldb-go/source/browse/leveldb/record/record.go?r=1d5ccbe03246da926391ee12d1c6caae054ff4b0 // License, authors and contributors informations can be found at bellow URLs respectively: // https://code.google.com/p/leveldb-go/source/browse/LICENSE // https://code.google.com/p/leveldb-go/source/browse/AUTHORS // https://code.google.com/p/leveldb-go/source/browse/CONTRIBUTORS // Package journal reads and writes sequences of journals. Each journal is a stream // of bytes that completes before the next journal starts. // // When reading, call Next to obtain an io.Reader for the next journal. Next will // return io.EOF when there are no more journals. It is valid to call Next // without reading the current journal to exhaustion. // // When writing, call Next to obtain an io.Writer for the next journal. Calling // Next finishes the current journal. Call Close to finish the final journal. // // Optionally, call Flush to finish the current journal and flush the underlying // writer without starting a new journal. To start a new journal after flushing, // call Next. // // Neither Readers or Writers are safe to use concurrently. // // Example code: // func read(r io.Reader) ([]string, error) { // var ss []string // journals := journal.NewReader(r, nil, true, true) // for { // j, err := journals.Next() // if err == io.EOF { // break // } // if err != nil { // return nil, err // } // s, err := ioutil.ReadAll(j) // if err != nil { // return nil, err // } // ss = append(ss, string(s)) // } // return ss, nil // } // // func write(w io.Writer, ss []string) error { // journals := journal.NewWriter(w) // for _, s := range ss { // j, err := journals.Next() // if err != nil { // return err // } // if _, err := j.Write([]byte(s)), err != nil { // return err // } // } // return journals.Close() // } // // The wire format is that the stream is divided into 32KiB blocks, and each // block contains a number of tightly packed chunks. Chunks cannot cross block // boundaries. The last block may be shorter than 32 KiB. Any unused bytes in a // block must be zero. // // A journal maps to one or more chunks. Each chunk has a 7 byte header (a 4 // byte checksum, a 2 byte little-endian uint16 length, and a 1 byte chunk type) // followed by a payload. The checksum is over the chunk type and the payload. // // There are four chunk types: whether the chunk is the full journal, or the // first, middle or last chunk of a multi-chunk journal. A multi-chunk journal // has one first chunk, zero or more middle chunks, and one last chunk. // // The wire format allows for limited recovery in the face of data corruption: // on a format error (such as a checksum mismatch), the reader moves to the // next block and looks for the next full or first chunk. package journal import ( "encoding/binary" "fmt" "io" "github.com/eris-ltd/mint-client/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors" "github.com/eris-ltd/mint-client/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" ) // These constants are part of the wire format and should not be changed. const ( fullChunkType = 1 firstChunkType = 2 middleChunkType = 3 lastChunkType = 4 ) const ( blockSize = 32 * 1024 headerSize = 7 ) type flusher interface { Flush() error } // ErrCorrupted is the error type that generated by corrupted block or chunk. type ErrCorrupted struct { Size int Reason string } func (e *ErrCorrupted) Error() string { return fmt.Sprintf("leveldb/journal: block/chunk corrupted: %s (%d bytes)", e.Reason, e.Size) } // Dropper is the interface that wrap simple Drop method. The Drop // method will be called when the journal reader dropping a block or chunk. type Dropper interface { Drop(err error) } // Reader reads journals from an underlying io.Reader. type Reader struct { // r is the underlying reader. r io.Reader // the dropper. dropper Dropper // strict flag. strict bool // checksum flag. checksum bool // seq is the sequence number of the current journal. seq int // buf[i:j] is the unread portion of the current chunk's payload. // The low bound, i, excludes the chunk header. i, j int // n is the number of bytes of buf that are valid. Once reading has started, // only the final block can have n < blockSize. n int // last is whether the current chunk is the last chunk of the journal. last bool // err is any accumulated error. err error // buf is the buffer. buf [blockSize]byte } // NewReader returns a new reader. The dropper may be nil, and if // strict is true then corrupted or invalid chunk will halt the journal // reader entirely. func NewReader(r io.Reader, dropper Dropper, strict, checksum bool) *Reader { return &Reader{ r: r, dropper: dropper, strict: strict, checksum: checksum, last: true, } } var errSkip = errors.New("leveldb/journal: skipped") func (r *Reader) corrupt(n int, reason string, skip bool) error { if r.dropper != nil { r.dropper.Drop(&ErrCorrupted{n, reason}) } if r.strict && !skip { r.err = errors.NewErrCorrupted(nil, &ErrCorrupted{n, reason}) return r.err } return errSkip } // nextChunk sets r.buf[r.i:r.j] to hold the next chunk's payload, reading the // next block into the buffer if necessary. func (r *Reader) nextChunk(first bool) error { for { if r.j+headerSize <= r.n { checksum := binary.LittleEndian.Uint32(r.buf[r.j+0 : r.j+4]) length := binary.LittleEndian.Uint16(r.buf[r.j+4 : r.j+6]) chunkType := r.buf[r.j+6] if checksum == 0 && length == 0 && chunkType == 0 { // Drop entire block. m := r.n - r.j r.i = r.n r.j = r.n return r.corrupt(m, "zero header", false) } else { m := r.n - r.j r.i = r.j + headerSize r.j = r.j + headerSize + int(length) if r.j > r.n { // Drop entire block. r.i = r.n r.j = r.n return r.corrupt(m, "chunk length overflows block", false) } else if r.checksum && checksum != util.NewCRC(r.buf[r.i-1:r.j]).Value() { // Drop entire block. r.i = r.n r.j = r.n return r.corrupt(m, "checksum mismatch", false) } } if first && chunkType != fullChunkType && chunkType != firstChunkType { m := r.j - r.i r.i = r.j // Report the error, but skip it. return r.corrupt(m+headerSize, "orphan chunk", true) } r.last = chunkType == fullChunkType || chunkType == lastChunkType return nil } // The last block. if r.n < blockSize && r.n > 0 { if !first { return r.corrupt(0, "missing chunk part", false) } r.err = io.EOF return r.err } // Read block. n, err := io.ReadFull(r.r, r.buf[:]) if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { return err } if n == 0 { if !first { return r.corrupt(0, "missing chunk part", false) } r.err = io.EOF return r.err } r.i, r.j, r.n = 0, 0, n } } // Next returns a reader for the next journal. It returns io.EOF if there are no // more journals. The reader returned becomes stale after the next Next call, // and should no longer be used. If strict is false, the reader will returns // io.ErrUnexpectedEOF error when found corrupted journal. func (r *Reader) Next() (io.Reader, error) { r.seq++ if r.err != nil { return nil, r.err } r.i = r.j for { if err := r.nextChunk(true); err == nil { break } else if err != errSkip { return nil, err } } return &singleReader{r, r.seq, nil}, nil } // Reset resets the journal reader, allows reuse of the journal reader. Reset returns // last accumulated error. func (r *Reader) Reset(reader io.Reader, dropper Dropper, strict, checksum bool) error { r.seq++ err := r.err r.r = reader r.dropper = dropper r.strict = strict r.checksum = checksum r.i = 0 r.j = 0 r.n = 0 r.last = true r.err = nil return err } type singleReader struct { r *Reader seq int err error } func (x *singleReader) Read(p []byte) (int, error) { r := x.r if r.seq != x.seq { return 0, errors.New("leveldb/journal: stale reader") } if x.err != nil { return 0, x.err } if r.err != nil { return 0, r.err } for r.i == r.j { if r.last { return 0, io.EOF } x.err = r.nextChunk(false) if x.err != nil { if x.err == errSkip { x.err = io.ErrUnexpectedEOF } return 0, x.err } } n := copy(p, r.buf[r.i:r.j]) r.i += n return n, nil } func (x *singleReader) ReadByte() (byte, error) { r := x.r if r.seq != x.seq { return 0, errors.New("leveldb/journal: stale reader") } if x.err != nil { return 0, x.err } if r.err != nil { return 0, r.err } for r.i == r.j { if r.last { return 0, io.EOF } x.err = r.nextChunk(false) if x.err != nil { if x.err == errSkip { x.err = io.ErrUnexpectedEOF } return 0, x.err } } c := r.buf[r.i] r.i++ return c, nil } // Writer writes journals to an underlying io.Writer. type Writer struct { // w is the underlying writer. w io.Writer // seq is the sequence number of the current journal. seq int // f is w as a flusher. f flusher // buf[i:j] is the bytes that will become the current chunk. // The low bound, i, includes the chunk header. i, j int // buf[:written] has already been written to w. // written is zero unless Flush has been called. written int // first is whether the current chunk is the first chunk of the journal. first bool // pending is whether a chunk is buffered but not yet written. pending bool // err is any accumulated error. err error // buf is the buffer. buf [blockSize]byte } // NewWriter returns a new Writer. func NewWriter(w io.Writer) *Writer { f, _ := w.(flusher) return &Writer{<|fim▁hole|> w: w, f: f, } } // fillHeader fills in the header for the pending chunk. func (w *Writer) fillHeader(last bool) { if w.i+headerSize > w.j || w.j > blockSize { panic("leveldb/journal: bad writer state") } if last { if w.first { w.buf[w.i+6] = fullChunkType } else { w.buf[w.i+6] = lastChunkType } } else { if w.first { w.buf[w.i+6] = firstChunkType } else { w.buf[w.i+6] = middleChunkType } } binary.LittleEndian.PutUint32(w.buf[w.i+0:w.i+4], util.NewCRC(w.buf[w.i+6:w.j]).Value()) binary.LittleEndian.PutUint16(w.buf[w.i+4:w.i+6], uint16(w.j-w.i-headerSize)) } // writeBlock writes the buffered block to the underlying writer, and reserves // space for the next chunk's header. func (w *Writer) writeBlock() { _, w.err = w.w.Write(w.buf[w.written:]) w.i = 0 w.j = headerSize w.written = 0 } // writePending finishes the current journal and writes the buffer to the // underlying writer. func (w *Writer) writePending() { if w.err != nil { return } if w.pending { w.fillHeader(true) w.pending = false } _, w.err = w.w.Write(w.buf[w.written:w.j]) w.written = w.j } // Close finishes the current journal and closes the writer. func (w *Writer) Close() error { w.seq++ w.writePending() if w.err != nil { return w.err } w.err = errors.New("leveldb/journal: closed Writer") return nil } // Flush finishes the current journal, writes to the underlying writer, and // flushes it if that writer implements interface{ Flush() error }. func (w *Writer) Flush() error { w.seq++ w.writePending() if w.err != nil { return w.err } if w.f != nil { w.err = w.f.Flush() return w.err } return nil } // Reset resets the journal writer, allows reuse of the journal writer. Reset // will also closes the journal writer if not already. func (w *Writer) Reset(writer io.Writer) (err error) { w.seq++ if w.err == nil { w.writePending() err = w.err } w.w = writer w.f, _ = writer.(flusher) w.i = 0 w.j = 0 w.written = 0 w.first = false w.pending = false w.err = nil return } // Next returns a writer for the next journal. The writer returned becomes stale // after the next Close, Flush or Next call, and should no longer be used. func (w *Writer) Next() (io.Writer, error) { w.seq++ if w.err != nil { return nil, w.err } if w.pending { w.fillHeader(true) } w.i = w.j w.j = w.j + headerSize // Check if there is room in the block for the header. if w.j > blockSize { // Fill in the rest of the block with zeroes. for k := w.i; k < blockSize; k++ { w.buf[k] = 0 } w.writeBlock() if w.err != nil { return nil, w.err } } w.first = true w.pending = true return singleWriter{w, w.seq}, nil } type singleWriter struct { w *Writer seq int } func (x singleWriter) Write(p []byte) (int, error) { w := x.w if w.seq != x.seq { return 0, errors.New("leveldb/journal: stale writer") } if w.err != nil { return 0, w.err } n0 := len(p) for len(p) > 0 { // Write a block, if it is full. if w.j == blockSize { w.fillHeader(false) w.writeBlock() if w.err != nil { return 0, w.err } w.first = false } // Copy bytes into the buffer. n := copy(w.buf[w.j:], p) w.j += n p = p[n:] } return n0, nil }<|fim▁end|>
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>// Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Traits, helpers, and type definitions for core I/O functionality. #![stable(feature = "rust1", since = "1.0.0")] use cmp; use rustc_unicode::str as core_str; use error as std_error; use fmt; use iter::{self, Iterator, Extend}; use marker::Sized; use ops::{Drop, FnOnce}; use option::Option::{self, Some, None}; use result::Result::{Ok, Err}; use result; use string::String; use str; use vec::Vec; pub use self::buffered::{BufReader, BufWriter, BufStream, LineWriter}; pub use self::buffered::IntoInnerError; pub use self::cursor::Cursor; pub use self::error::{Result, Error, ErrorKind}; pub use self::util::{copy, sink, Sink, empty, Empty, repeat, Repeat}; pub use self::stdio::{stdin, stdout, stderr, _print, Stdin, Stdout, Stderr}; pub use self::stdio::{StdoutLock, StderrLock, StdinLock}; #[doc(no_inline, hidden)] pub use self::stdio::{set_panic, set_print}; #[macro_use] mod lazy; pub mod prelude; mod buffered; mod cursor; mod error; mod impls; mod util; mod stdio; const DEFAULT_BUF_SIZE: usize = 64 * 1024; // A few methods below (read_to_string, read_line) will append data into a // `String` buffer, but we need to be pretty careful when doing this. The // implementation will just call `.as_mut_vec()` and then delegate to a // byte-oriented reading method, but we must ensure that when returning we never // leave `buf` in a state such that it contains invalid UTF-8 in its bounds. // // To this end, we use an RAII guard (to protect against panics) which updates // the length of the string when it is dropped. This guard initially truncates // the string to the prior length and only after we've validated that the // new contents are valid UTF-8 do we allow it to set a longer length. // // The unsafety in this function is twofold: // // 1. We're looking at the raw bytes of `buf`, so we take on the burden of UTF-8 // checks. // 2. We're passing a raw buffer to the function `f`, and it is expected that // the function only *appends* bytes to the buffer. We'll get undefined // behavior if existing bytes are overwritten to have non-UTF-8 data. fn append_to_string<F>(buf: &mut String, f: F) -> Result<usize> where F: FnOnce(&mut Vec<u8>) -> Result<usize> { struct Guard<'a> { s: &'a mut Vec<u8>, len: usize } impl<'a> Drop for Guard<'a> { fn drop(&mut self) { unsafe { self.s.set_len(self.len); } } } unsafe { let mut g = Guard { len: buf.len(), s: buf.as_mut_vec() }; let ret = f(g.s); if str::from_utf8(&g.s[g.len..]).is_err() { ret.and_then(|_| { Err(Error::new(ErrorKind::InvalidInput, "stream did not contain valid UTF-8")) }) } else { g.len = g.s.len(); ret } } } // This uses an adaptive system to extend the vector when it fills. We want to // avoid paying to allocate and zero a huge chunk of memory if the reader only // has 4 bytes while still making large reads if the reader does have a ton // of data to return. Simply tacking on an extra DEFAULT_BUF_SIZE space every // time is 4,500 times (!) slower than this if the reader has a very small // amount of data to return. fn read_to_end<R: Read + ?Sized>(r: &mut R, buf: &mut Vec<u8>) -> Result<usize> { let start_len = buf.len(); let mut len = start_len; let mut new_write_size = 16; let ret; loop { if len == buf.len() { if new_write_size < DEFAULT_BUF_SIZE { new_write_size *= 2; } buf.extend(iter::repeat(0).take(new_write_size)); } match r.read(&mut buf[len..]) { Ok(0) => { ret = Ok(len - start_len); break; } Ok(n) => len += n, Err(ref e) if e.kind() == ErrorKind::Interrupted => {} Err(e) => { ret = Err(e); break; } } } buf.truncate(len); ret } /// A trait for objects which are byte-oriented sources. /// /// Readers are defined by one method, `read`. Each call to `read` will attempt /// to pull bytes from this source into a provided buffer. /// /// Readers are intended to be composable with one another. Many objects /// throughout the I/O and related libraries take and provide types which /// implement the `Read` trait. #[stable(feature = "rust1", since = "1.0.0")] pub trait Read { /// Pull some bytes from this source into the specified buffer, returning /// how many bytes were read. /// /// This function does not provide any guarantees about whether it blocks /// waiting for data, but if an object needs to block for a read but cannot /// it will typically signal this via an `Err` return value. /// /// If the return value of this method is `Ok(n)`, then it must be /// guaranteed that `0 <= n <= buf.len()`. A nonzero `n` value indicates /// that the buffer `buf` has been filled in with `n` bytes of data from this /// source. If `n` is `0`, then it can indicate one of two scenarios: /// /// 1. This reader has reached its "end of file" and will likely no longer /// be able to produce bytes. Note that this does not mean that the /// reader will *always* no longer be able to produce bytes. /// 2. The buffer specified was 0 bytes in length. /// /// No guarantees are provided about the contents of `buf` when this /// function is called, implementations cannot rely on any property of the /// contents of `buf` being true. It is recommended that implementations /// only write data to `buf` instead of reading its contents. /// /// # Errors /// /// If this function encounters any form of I/O or other error, an error /// variant will be returned. If an error is returned then it must be /// guaranteed that no bytes were read. #[stable(feature = "rust1", since = "1.0.0")] fn read(&mut self, buf: &mut [u8]) -> Result<usize>; /// Read all bytes until EOF in this source, placing them into `buf`. /// /// All bytes read from this source will be appended to the specified buffer /// `buf`. This function will continuously call `read` to append more data to /// `buf` until `read` returns either `Ok(0)` or an error of /// non-`ErrorKind::Interrupted` kind. /// /// If successful, this function will return the total number of bytes read. /// /// # Errors /// /// If this function encounters an error of the kind /// `ErrorKind::Interrupted` then the error is ignored and the operation /// will continue. /// /// If any other read error is encountered then this function immediately /// returns. Any bytes which have already been read will be appended to /// `buf`. #[stable(feature = "rust1", since = "1.0.0")] fn read_to_end(&mut self, buf: &mut Vec<u8>) -> Result<usize> { read_to_end(self, buf) } /// Read all bytes until EOF in this source, placing them into `buf`. /// /// If successful, this function returns the number of bytes which were read /// and appended to `buf`. /// /// # Errors /// /// If the data in this stream is *not* valid UTF-8 then an error is /// returned and `buf` is unchanged. /// /// See `read_to_end` for other error semantics. #[stable(feature = "rust1", since = "1.0.0")] fn read_to_string(&mut self, buf: &mut String) -> Result<usize> { // Note that we do *not* call `.read_to_end()` here. We are passing // `&mut Vec<u8>` (the raw contents of `buf`) into the `read_to_end` // method to fill it up. An arbitrary implementation could overwrite the // entire contents of the vector, not just append to it (which is what // we are expecting). // // To prevent extraneously checking the UTF-8-ness of the entire buffer // we pass it to our hardcoded `read_to_end` implementation which we // know is guaranteed to only read data into the end of the buffer. append_to_string(buf, |b| read_to_end(self, b)) } /// Creates a "by reference" adaptor for this instance of `Read`. /// /// The returned adaptor also implements `Read` and will simply borrow this /// current reader. #[stable(feature = "rust1", since = "1.0.0")] fn by_ref(&mut self) -> &mut Self where Self: Sized { self } /// Transforms this `Read` instance to an `Iterator` over its bytes. /// /// The returned type implements `Iterator` where the `Item` is `Result<u8, /// R::Err>`. The yielded item is `Ok` if a byte was successfully read and /// `Err` otherwise for I/O errors. EOF is mapped to returning `None` from /// this iterator. #[stable(feature = "rust1", since = "1.0.0")] fn bytes(self) -> Bytes<Self> where Self: Sized { Bytes { inner: self } } /// Transforms this `Read` instance to an `Iterator` over `char`s. /// /// This adaptor will attempt to interpret this reader as a UTF-8 encoded /// sequence of characters. The returned iterator will return `None` once /// EOF is reached for this reader. Otherwise each element yielded will be a /// `Result<char, E>` where `E` may contain information about what I/O error /// occurred or where decoding failed. /// /// Currently this adaptor will discard intermediate data read, and should /// be avoided if this is not desired. #[unstable(feature = "io", reason = "the semantics of a partial read/write \ of where errors happen is currently \ unclear and may change")] fn chars(self) -> Chars<Self> where Self: Sized { Chars { inner: self } } /// Creates an adaptor which will chain this stream with another. /// /// The returned `Read` instance will first read all bytes from this object /// until EOF is encountered. Afterwards the output is equivalent to the /// output of `next`. #[stable(feature = "rust1", since = "1.0.0")] fn chain<R: Read>(self, next: R) -> Chain<Self, R> where Self: Sized { Chain { first: self, second: next, done_first: false } } /// Creates an adaptor which will read at most `limit` bytes from it. /// /// This function returns a new instance of `Read` which will read at most /// `limit` bytes, after which it will always return EOF (`Ok(0)`). Any /// read errors will not count towards the number of bytes read and future /// calls to `read` may succeed. #[stable(feature = "rust1", since = "1.0.0")] fn take(self, limit: u64) -> Take<Self> where Self: Sized { Take { inner: self, limit: limit } } /// Creates a reader adaptor which will write all read data into the given /// output stream. /// /// Whenever the returned `Read` instance is read it will write the read /// data to `out`. The current semantics of this implementation imply that /// a `write` error will not report how much data was initially read. #[unstable(feature = "io", reason = "the semantics of a partial read/write \ of where errors happen is currently \ unclear and may change")] fn tee<W: Write>(self, out: W) -> Tee<Self, W> where Self: Sized { Tee { reader: self, writer: out } } } /// A trait for objects which are byte-oriented sinks. /// /// The `write` method will attempt to write some data into the object, /// returning how many bytes were successfully written. /// /// The `flush` method is useful for adaptors and explicit buffers themselves /// for ensuring that all buffered data has been pushed out to the "true sink". /// /// Writers are intended to be composable with one another. Many objects /// throughout the I/O and related libraries take and provide types which /// implement the `Write` trait. #[stable(feature = "rust1", since = "1.0.0")] pub trait Write { /// Write a buffer into this object, returning how many bytes were written. /// /// This function will attempt to write the entire contents of `buf`, but /// the entire write may not succeed, or the write may also generate an /// error. A call to `write` represents *at most one* attempt to write to /// any wrapped object. /// /// Calls to `write` are not guaranteed to block waiting for data to be /// written, and a write which would otherwise block can indicated through /// an `Err` variant. /// /// If the return value is `Ok(n)` then it must be guaranteed that /// `0 <= n <= buf.len()`. A return value of `0` typically means that the /// underlying object is no longer able to accept bytes and will likely not /// be able to in the future as well, or that the buffer provided is empty. /// /// # Errors /// /// Each call to `write` may generate an I/O error indicating that the /// operation could not be completed. If an error is returned then no bytes /// in the buffer were written to this writer. /// /// It is **not** considered an error if the entire buffer could not be /// written to this writer. #[stable(feature = "rust1", since = "1.0.0")] fn write(&mut self, buf: &[u8]) -> Result<usize>; /// Flush this output stream, ensuring that all intermediately buffered /// contents reach their destination. /// /// # Errors /// /// It is considered an error if not all bytes could be written due to /// I/O errors or EOF being reached. #[stable(feature = "rust1", since = "1.0.0")] fn flush(&mut self) -> Result<()>; /// Attempts to write an entire buffer into this write. /// /// This method will continuously call `write` while there is more data to /// write. This method will not return until the entire buffer has been /// successfully written or an error occurs. The first error generated from /// this method will be returned. /// /// # Errors /// /// This function will return the first error that `write` returns. #[stable(feature = "rust1", since = "1.0.0")] fn write_all(&mut self, mut buf: &[u8]) -> Result<()> { while !buf.is_empty() { match self.write(buf) { Ok(0) => return Err(Error::new(ErrorKind::WriteZero, "failed to write whole buffer")), Ok(n) => buf = &buf[n..], Err(ref e) if e.kind() == ErrorKind::Interrupted => {} Err(e) => return Err(e), } } Ok(()) } /// Writes a formatted string into this writer, returning any error /// encountered. /// /// This method is primarily used to interface with the `format_args!` /// macro, but it is rare that this should explicitly be called. The /// `write!` macro should be favored to invoke this method instead. /// /// This function internally uses the `write_all` method on this trait and /// hence will continuously write data so long as no errors are received. /// This also means that partial writes are not indicated in this signature. /// /// # Errors /// /// This function will return any I/O error reported while formatting. #[stable(feature = "rust1", since = "1.0.0")] fn write_fmt(&mut self, fmt: fmt::Arguments) -> Result<()> { // Create a shim which translates a Write to a fmt::Write and saves // off I/O errors. instead of discarding them struct Adaptor<'a, T: ?Sized + 'a> { inner: &'a mut T, error: Result<()>, } impl<'a, T: Write + ?Sized> fmt::Write for Adaptor<'a, T> { fn write_str(&mut self, s: &str) -> fmt::Result { match self.inner.write_all(s.as_bytes()) { Ok(()) => Ok(()), Err(e) => { self.error = Err(e); Err(fmt::Error) } } } } let mut output = Adaptor { inner: self, error: Ok(()) }; match fmt::write(&mut output, fmt) { Ok(()) => Ok(()), Err(..) => output.error } } /// Creates a "by reference" adaptor for this instance of `Write`. /// /// The returned adaptor also implements `Write` and will simply borrow this /// current writer. #[stable(feature = "rust1", since = "1.0.0")] fn by_ref(&mut self) -> &mut Self where Self: Sized { self } /// Creates a new writer which will write all data to both this writer and /// another writer. /// /// All data written to the returned writer will both be written to `self` /// as well as `other`. Note that the error semantics of the current /// implementation do not precisely track where errors happen. For example /// an error on the second call to `write` will not report that the first /// call to `write` succeeded. #[unstable(feature = "io", reason = "the semantics of a partial read/write \ of where errors happen is currently \ unclear and may change")] fn broadcast<W: Write>(self, other: W) -> Broadcast<Self, W> where Self: Sized { Broadcast { first: self, second: other } } } /// An object implementing `Seek` internally has some form of cursor which can /// be moved within a stream of bytes. /// /// The stream typically has a fixed size, allowing seeking relative to either /// end or the current offset. #[stable(feature = "rust1", since = "1.0.0")] pub trait Seek { /// Seek to an offset, in bytes, in a stream /// /// A seek beyond the end of a stream is allowed, but seeking before offset /// 0 is an error. /// /// The behavior when seeking past the end of the stream is implementation /// defined. /// /// This method returns the new position within the stream if the seek /// operation completed successfully. /// /// # Errors /// /// Seeking to a negative offset is considered an error #[stable(feature = "rust1", since = "1.0.0")] fn seek(&mut self, pos: SeekFrom) -> Result<u64>; } /// Enumeration of possible methods to seek within an I/O object. #[derive(Copy, PartialEq, Eq, Clone, Debug)] #[stable(feature = "rust1", since = "1.0.0")] pub enum SeekFrom { /// Set the offset to the provided number of bytes. #[stable(feature = "rust1", since = "1.0.0")] Start(u64), /// Set the offset to the size of this object plus the specified number of /// bytes. /// /// It is possible to seek beyond the end of an object, but is an error to /// seek before byte 0. #[stable(feature = "rust1", since = "1.0.0")] End(i64), /// Set the offset to the current position plus the specified number of /// bytes. /// /// It is possible to seek beyond the end of an object, but is an error to /// seek before byte 0. #[stable(feature = "rust1", since = "1.0.0")] Current(i64), } fn read_until<R: BufRead + ?Sized>(r: &mut R, delim: u8, buf: &mut Vec<u8>) -> Result<usize> { let mut read = 0; loop { let (done, used) = { let available = match r.fill_buf() { Ok(n) => n, Err(ref e) if e.kind() == ErrorKind::Interrupted => continue, Err(e) => return Err(e) }; match available.position_elem(&delim) { Some(i) => { buf.push_all(&available[..i + 1]); (true, i + 1) } None => { buf.push_all(available); (false, available.len()) } } }; r.consume(used); read += used; if done || used == 0 { return Ok(read); } } } /// A Buffer is a type of reader which has some form of internal buffering to /// allow certain kinds of reading operations to be more optimized than others. /// /// This type extends the `Read` trait with a few methods that are not /// possible to reasonably implement with purely a read interface. #[stable(feature = "rust1", since = "1.0.0")] pub trait BufRead: Read { /// Fills the internal buffer of this object, returning the buffer contents. /// /// None of the contents will be "read" in the sense that later calling /// `read` may return the same contents. /// /// The `consume` function must be called with the number of bytes that are /// consumed from this buffer returned to ensure that the bytes are never /// returned twice. /// /// An empty buffer returned indicates that the stream has reached EOF. /// /// # Errors /// /// This function will return an I/O error if the underlying reader was /// read, but returned an error. #[stable(feature = "rust1", since = "1.0.0")] fn fill_buf(&mut self) -> Result<&[u8]>; /// Tells this buffer that `amt` bytes have been consumed from the buffer, /// so they should no longer be returned in calls to `read`. /// /// This function does not perform any I/O, it simply informs this object /// that some amount of its buffer, returned from `fill_buf`, has been /// consumed and should no longer be returned. /// /// This function is used to tell the buffer how many bytes you've consumed /// from the return value of `fill_buf`, and so may do odd things if /// `fill_buf` isn't called before calling this. /// /// The `amt` must be `<=` the number of bytes in the buffer returned by `fill_buf`. #[stable(feature = "rust1", since = "1.0.0")] fn consume(&mut self, amt: usize); /// Read all bytes until the delimiter `byte` is reached. /// /// This function will continue to read (and buffer) bytes from the /// underlying stream until the delimiter or EOF is found. Once found, all /// bytes up to, and including, the delimiter (if found) will be appended to /// `buf`. /// /// If this buffered reader is currently at EOF, then this function will not /// place any more bytes into `buf` and will return `Ok(n)` where `n` is the /// number of bytes which were read. /// /// # Errors /// /// This function will ignore all instances of `ErrorKind::Interrupted` and /// will otherwise return any errors returned by `fill_buf`. /// /// If an I/O error is encountered then all bytes read so far will be /// present in `buf` and its length will have been adjusted appropriately. #[stable(feature = "rust1", since = "1.0.0")] fn read_until(&mut self, byte: u8, buf: &mut Vec<u8>) -> Result<usize> { read_until(self, byte, buf) } /// Read all bytes until a newline byte (the 0xA byte) is reached, and /// append them to the provided buffer. /// /// This function will continue to read (and buffer) bytes from the /// underlying stream until the newline delimiter (the 0xA byte) or EOF is /// found. Once found, all bytes up to, and including, the delimiter (if /// found) will be appended to `buf`. /// /// If this reader is currently at EOF then this function will not modify /// `buf` and will return `Ok(n)` where `n` is the number of bytes which /// were read. /// /// # Errors /// /// This function has the same error semantics as `read_until` and will also /// return an error if the read bytes are not valid UTF-8. If an I/O error /// is encountered then `buf` may contain some bytes already read in the /// event that all data read so far was valid UTF-8. #[stable(feature = "rust1", since = "1.0.0")] fn read_line(&mut self, buf: &mut String) -> Result<usize> { // Note that we are not calling the `.read_until` method here, but // rather our hardcoded implementation. For more details as to why, see // the comments in `read_to_end`. append_to_string(buf, |b| read_until(self, b'\n', b)) } /// Returns an iterator over the contents of this reader split on the byte /// `byte`. /// /// The iterator returned from this function will return instances of /// `io::Result<Vec<u8>>`. Each vector returned will *not* have the /// delimiter byte at the end. /// /// This function will yield errors whenever `read_until` would have also /// yielded an error. #[stable(feature = "rust1", since = "1.0.0")] fn split(self, byte: u8) -> Split<Self> where Self: Sized { Split { buf: self, delim: byte } } /// Returns an iterator over the lines of this reader. /// /// The iterator returned from this function will yield instances of /// `io::Result<String>`. Each string returned will *not* have a newline /// byte (the 0xA byte) at the end. #[stable(feature = "rust1", since = "1.0.0")] fn lines(self) -> Lines<Self> where Self: Sized { Lines { buf: self } } } /// A `Write` adaptor which will write data to multiple locations. /// /// For more information, see `Write::broadcast`. #[unstable(feature = "io", reason = "awaiting stability of Write::broadcast")] pub struct Broadcast<T, U> { first: T, second: U, } #[unstable(feature = "io", reason = "awaiting stability of Write::broadcast")] impl<T: Write, U: Write> Write for Broadcast<T, U> { fn write(&mut self, data: &[u8]) -> Result<usize> { let n = try!(self.first.write(data)); // FIXME: what if the write fails? (we wrote something) try!(self.second.write_all(&data[..n])); Ok(n) } fn flush(&mut self) -> Result<()> { self.first.flush().and(self.second.flush()) } } /// Adaptor to chain together two instances of `Read`. /// /// For more information, see `Read::chain`. #[stable(feature = "rust1", since = "1.0.0")] pub struct Chain<T, U> { first: T, second: U, done_first: bool, } #[stable(feature = "rust1", since = "1.0.0")] impl<T: Read, U: Read> Read for Chain<T, U> { fn read(&mut self, buf: &mut [u8]) -> Result<usize> { if !self.done_first { match try!(self.first.read(buf)) { 0 => { self.done_first = true; } n => return Ok(n), } } self.second.read(buf) } } /// Reader adaptor which limits the bytes read from an underlying reader. /// /// For more information, see `Read::take`. #[stable(feature = "rust1", since = "1.0.0")] pub struct Take<T> { inner: T, limit: u64, } #[stable(feature = "rust1", since = "1.0.0")] impl<T> Take<T> { /// Returns the number of bytes that can be read before this instance will /// return EOF. /// /// # Note /// /// This instance may reach EOF after reading fewer bytes than indicated by /// this method if the underlying `Read` instance reaches EOF. #[stable(feature = "rust1", since = "1.0.0")] pub fn limit(&self) -> u64 { self.limit } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: Read> Read for Take<T> { fn read(&mut self, buf: &mut [u8]) -> Result<usize> { // Don't call into inner reader at all at EOF because it may still block if self.limit == 0 { return Ok(0); } let max = cmp::min(buf.len() as u64, self.limit) as usize; let n = try!(self.inner.read(&mut buf[..max])); self.limit -= n as u64;<|fim▁hole|>#[stable(feature = "rust1", since = "1.0.0")] impl<T: BufRead> BufRead for Take<T> { fn fill_buf(&mut self) -> Result<&[u8]> { let buf = try!(self.inner.fill_buf()); let cap = cmp::min(buf.len() as u64, self.limit) as usize; Ok(&buf[..cap]) } fn consume(&mut self, amt: usize) { // Don't let callers reset the limit by passing an overlarge value let amt = cmp::min(amt as u64, self.limit) as usize; self.limit -= amt as u64; self.inner.consume(amt); } } /// An adaptor which will emit all read data to a specified writer as well. /// /// For more information see `Read::tee` #[unstable(feature = "io", reason = "awaiting stability of Read::tee")] pub struct Tee<R, W> { reader: R, writer: W, } #[unstable(feature = "io", reason = "awaiting stability of Read::tee")] impl<R: Read, W: Write> Read for Tee<R, W> { fn read(&mut self, buf: &mut [u8]) -> Result<usize> { let n = try!(self.reader.read(buf)); // FIXME: what if the write fails? (we read something) try!(self.writer.write_all(&buf[..n])); Ok(n) } } /// A bridge from implementations of `Read` to an `Iterator` of `u8`. /// /// See `Read::bytes` for more information. #[stable(feature = "rust1", since = "1.0.0")] pub struct Bytes<R> { inner: R, } #[stable(feature = "rust1", since = "1.0.0")] impl<R: Read> Iterator for Bytes<R> { type Item = Result<u8>; fn next(&mut self) -> Option<Result<u8>> { let mut buf = [0]; match self.inner.read(&mut buf) { Ok(0) => None, Ok(..) => Some(Ok(buf[0])), Err(e) => Some(Err(e)), } } } /// A bridge from implementations of `Read` to an `Iterator` of `char`. /// /// See `Read::chars` for more information. #[unstable(feature = "io", reason = "awaiting stability of Read::chars")] pub struct Chars<R> { inner: R, } /// An enumeration of possible errors that can be generated from the `Chars` /// adapter. #[derive(Debug)] #[unstable(feature = "io", reason = "awaiting stability of Read::chars")] pub enum CharsError { /// Variant representing that the underlying stream was read successfully /// but it did not contain valid utf8 data. NotUtf8, /// Variant representing that an I/O error occurred. Other(Error), } #[unstable(feature = "io", reason = "awaiting stability of Read::chars")] impl<R: Read> Iterator for Chars<R> { type Item = result::Result<char, CharsError>; fn next(&mut self) -> Option<result::Result<char, CharsError>> { let mut buf = [0]; let first_byte = match self.inner.read(&mut buf) { Ok(0) => return None, Ok(..) => buf[0], Err(e) => return Some(Err(CharsError::Other(e))), }; let width = core_str::utf8_char_width(first_byte); if width == 1 { return Some(Ok(first_byte as char)) } if width == 0 { return Some(Err(CharsError::NotUtf8)) } let mut buf = [first_byte, 0, 0, 0]; { let mut start = 1; while start < width { match self.inner.read(&mut buf[start..width]) { Ok(0) => return Some(Err(CharsError::NotUtf8)), Ok(n) => start += n, Err(e) => return Some(Err(CharsError::Other(e))), } } } Some(match str::from_utf8(&buf[..width]).ok() { Some(s) => Ok(s.char_at(0)), None => Err(CharsError::NotUtf8), }) } } #[unstable(feature = "io", reason = "awaiting stability of Read::chars")] impl std_error::Error for CharsError { fn description(&self) -> &str { match *self { CharsError::NotUtf8 => "invalid utf8 encoding", CharsError::Other(ref e) => std_error::Error::description(e), } } fn cause(&self) -> Option<&std_error::Error> { match *self { CharsError::NotUtf8 => None, CharsError::Other(ref e) => e.cause(), } } } #[unstable(feature = "io", reason = "awaiting stability of Read::chars")] impl fmt::Display for CharsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { CharsError::NotUtf8 => { "byte stream did not contain valid utf8".fmt(f) } CharsError::Other(ref e) => e.fmt(f), } } } /// An iterator over the contents of an instance of `BufRead` split on a /// particular byte. /// /// See `BufRead::split` for more information. #[stable(feature = "rust1", since = "1.0.0")] pub struct Split<B> { buf: B, delim: u8, } #[stable(feature = "rust1", since = "1.0.0")] impl<B: BufRead> Iterator for Split<B> { type Item = Result<Vec<u8>>; fn next(&mut self) -> Option<Result<Vec<u8>>> { let mut buf = Vec::new(); match self.buf.read_until(self.delim, &mut buf) { Ok(0) => None, Ok(_n) => { if buf[buf.len() - 1] == self.delim { buf.pop(); } Some(Ok(buf)) } Err(e) => Some(Err(e)) } } } /// An iterator over the lines of an instance of `BufRead` split on a newline /// byte. /// /// See `BufRead::lines` for more information. #[stable(feature = "rust1", since = "1.0.0")] pub struct Lines<B> { buf: B, } #[stable(feature = "rust1", since = "1.0.0")] impl<B: BufRead> Iterator for Lines<B> { type Item = Result<String>; fn next(&mut self) -> Option<Result<String>> { let mut buf = String::new(); match self.buf.read_line(&mut buf) { Ok(0) => None, Ok(_n) => { if buf.ends_with("\n") { buf.pop(); } Some(Ok(buf)) } Err(e) => Some(Err(e)) } } } #[cfg(test)] mod tests { use prelude::v1::*; use io::prelude::*; use io; use super::Cursor; #[test] fn read_until() { let mut buf = Cursor::new(&b"12"[..]); let mut v = Vec::new(); assert_eq!(buf.read_until(b'3', &mut v).unwrap(), 2); assert_eq!(v, b"12"); let mut buf = Cursor::new(&b"1233"[..]); let mut v = Vec::new(); assert_eq!(buf.read_until(b'3', &mut v).unwrap(), 3); assert_eq!(v, b"123"); v.truncate(0); assert_eq!(buf.read_until(b'3', &mut v).unwrap(), 1); assert_eq!(v, b"3"); v.truncate(0); assert_eq!(buf.read_until(b'3', &mut v).unwrap(), 0); assert_eq!(v, []); } #[test] fn split() { let buf = Cursor::new(&b"12"[..]); let mut s = buf.split(b'3'); assert_eq!(s.next().unwrap().unwrap(), vec![b'1', b'2']); assert!(s.next().is_none()); let buf = Cursor::new(&b"1233"[..]); let mut s = buf.split(b'3'); assert_eq!(s.next().unwrap().unwrap(), vec![b'1', b'2']); assert_eq!(s.next().unwrap().unwrap(), vec![]); assert!(s.next().is_none()); } #[test] fn read_line() { let mut buf = Cursor::new(&b"12"[..]); let mut v = String::new(); assert_eq!(buf.read_line(&mut v).unwrap(), 2); assert_eq!(v, "12"); let mut buf = Cursor::new(&b"12\n\n"[..]); let mut v = String::new(); assert_eq!(buf.read_line(&mut v).unwrap(), 3); assert_eq!(v, "12\n"); v.truncate(0); assert_eq!(buf.read_line(&mut v).unwrap(), 1); assert_eq!(v, "\n"); v.truncate(0); assert_eq!(buf.read_line(&mut v).unwrap(), 0); assert_eq!(v, ""); } #[test] fn lines() { let buf = Cursor::new(&b"12"[..]); let mut s = buf.lines(); assert_eq!(s.next().unwrap().unwrap(), "12".to_string()); assert!(s.next().is_none()); let buf = Cursor::new(&b"12\n\n"[..]); let mut s = buf.lines(); assert_eq!(s.next().unwrap().unwrap(), "12".to_string()); assert_eq!(s.next().unwrap().unwrap(), "".to_string()); assert!(s.next().is_none()); } #[test] fn read_to_end() { let mut c = Cursor::new(&b""[..]); let mut v = Vec::new(); assert_eq!(c.read_to_end(&mut v).unwrap(), 0); assert_eq!(v, []); let mut c = Cursor::new(&b"1"[..]); let mut v = Vec::new(); assert_eq!(c.read_to_end(&mut v).unwrap(), 1); assert_eq!(v, b"1"); } #[test] fn read_to_string() { let mut c = Cursor::new(&b""[..]); let mut v = String::new(); assert_eq!(c.read_to_string(&mut v).unwrap(), 0); assert_eq!(v, ""); let mut c = Cursor::new(&b"1"[..]); let mut v = String::new(); assert_eq!(c.read_to_string(&mut v).unwrap(), 1); assert_eq!(v, "1"); let mut c = Cursor::new(&b"\xff"[..]); let mut v = String::new(); assert!(c.read_to_string(&mut v).is_err()); } #[test] fn take_eof() { struct R; impl Read for R { fn read(&mut self, _: &mut [u8]) -> io::Result<usize> { Err(io::Error::new(io::ErrorKind::Other, "")) } } let mut buf = [0; 1]; assert_eq!(0, R.take(0).read(&mut buf).unwrap()); } }<|fim▁end|>
Ok(n) } }
<|file_name|>UnixProcessManager.java<|end_file_name|><|fim▁begin|>/******************************************************************************* * Copyright (c) 2012-2017 Codenvy, S.A. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html * * Contributors: * Codenvy, S.A. - initial API and implementation *******************************************************************************/ package org.eclipse.che.api.core.util; import com.sun.jna.Library; import com.sun.jna.Native; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.lang.reflect.Field; import java.util.ArrayList; import java.util.List; import java.util.regex.Pattern; /** * Process manager for *nix like system. * * @author andrew00x */ class UnixProcessManager extends ProcessManager { /* At the moment tested on linux only. */ private static final Logger LOG = LoggerFactory.getLogger(UnixProcessManager.class); private static final CLibrary C_LIBRARY;<|fim▁hole|> CLibrary lib = null; Field pidField = null; if (SystemInfo.isUnix()) { try { lib = ((CLibrary)Native.loadLibrary("c", CLibrary.class)); } catch (Exception e) { LOG.error("Cannot load native library", e); } try { pidField = Thread.currentThread().getContextClassLoader().loadClass("java.lang.UNIXProcess").getDeclaredField("pid"); pidField.setAccessible(true); } catch (Exception e) { LOG.error(e.getMessage(), e); } } C_LIBRARY = lib; PID_FIELD = pidField; } private static interface CLibrary extends Library { // kill -l int SIGKILL = 9; int SIGTERM = 15; int kill(int pid, int signal); String strerror(int errno); int system(String cmd); } private static final Pattern UNIX_PS_TABLE_PATTERN = Pattern.compile("\\s+"); @Override public void kill(Process process) { if (C_LIBRARY != null) { killTree(getPid(process)); } else { throw new IllegalStateException("Can't kill process. Not unix system?"); } } private void killTree(int pid) { final int[] children = getChildProcesses(pid); LOG.debug("PID: {}, child PIDs: {}", pid, children); if (children.length > 0) { for (int cpid : children) { killTree(cpid); // kill process tree recursively } } int r = C_LIBRARY.kill(pid, CLibrary.SIGKILL); // kill origin process LOG.debug("kill {}", pid); if (r != 0) { if (LOG.isDebugEnabled()) { LOG.debug("kill for {} returns {}, strerror '{}'", pid, r, C_LIBRARY.strerror(r)); } } } private int[] getChildProcesses(final int myPid) { final String ps = "ps -e -o ppid,pid,comm"; /* PPID, PID, COMMAND */ final List<Integer> children = new ArrayList<>(); final StringBuilder error = new StringBuilder(); final LineConsumer stdout = new LineConsumer() { @Override public void writeLine(String line) throws IOException { if (line != null && !line.isEmpty()) { final String[] tokens = UNIX_PS_TABLE_PATTERN.split(line.trim()); if (tokens.length == 3 /* PPID, PID, COMMAND */) { int ppid; try { ppid = Integer.parseInt(tokens[0]); } catch (NumberFormatException e) { // May be first line from process table: 'PPID PID COMMAND'. Skip it. return; } if (ppid == myPid) { int pid = Integer.parseInt(tokens[1]); children.add(pid); } } } } @Override public void close() throws IOException { } }; final LineConsumer stderr = new LineConsumer() { @Override public void writeLine(String line) throws IOException { if (error.length() > 0) { error.append('\n'); } error.append(line); } @Override public void close() throws IOException { } }; try { ProcessUtil.process(Runtime.getRuntime().exec(ps), stdout, stderr); } catch (IOException e) { throw new IllegalStateException(e); } if (error.length() > 0) { throw new IllegalStateException("can't get child processes: " + error.toString()); } final int size = children.size(); final int[] result = new int[size]; for (int i = 0; i < size; i++) { result[i] = children.get(i); } return result; } @Override public boolean isAlive(Process process) { return process.isAlive(); } int getPid(Process process) { if (PID_FIELD != null) { try { return ((Number)PID_FIELD.get(process)).intValue(); } catch (IllegalAccessException e) { throw new IllegalStateException("Can't get process' pid. Not unix system?", e); } } else { throw new IllegalStateException("Can't get process' pid. Not unix system?"); } } @Override int system(String command) { return C_LIBRARY.system(command); } }<|fim▁end|>
private static final Field PID_FIELD; static {
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># Copyright 2013-2015 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import re from wlauto import AndroidUiAutoBenchmark, Parameter, Alias from wlauto.exceptions import ConfigError class Andebench(AndroidUiAutoBenchmark): name = 'andebench' description = """ AndEBench is an industry standard Android benchmark provided by The Embedded Microprocessor Benchmark Consortium (EEMBC). http://www.eembc.org/andebench/about.php From the website: - Initial focus on CPU and Dalvik interpreter performance - Internal algorithms concentrate on integer operations - Compares the difference between native and Java performance - Implements flexible multicore performance analysis - Results displayed in Iterations per second - Detailed log file for comprehensive engineering analysis """ package = 'com.eembc.coremark' activity = 'com.eembc.coremark.splash' summary_metrics = ['AndEMark Java', 'AndEMark Native'] <|fim▁hole|> description='Number of threads that will be spawned by AndEBench.'), Parameter('single_threaded', kind=bool, description=""" If ``true``, AndEBench will run with a single thread. Note: this must not be specified if ``number_of_threads`` has been specified. """), ] aliases = [ Alias('andebenchst', number_of_threads=1), ] regex = re.compile('\s*(?P<key>(AndEMark Native|AndEMark Java))\s*:' '\s*(?P<value>\d+)') def validate(self): if (self.number_of_threads is not None) and (self.single_threaded is not None): # pylint: disable=E1101 raise ConfigError('Can\'t specify both number_of_threads and single_threaded parameters.') def setup(self, context): if self.number_of_threads is None: # pylint: disable=access-member-before-definition if self.single_threaded: # pylint: disable=E1101 self.number_of_threads = 1 # pylint: disable=attribute-defined-outside-init else: self.number_of_threads = self.device.number_of_cores # pylint: disable=W0201 self.logger.debug('Using {} threads'.format(self.number_of_threads)) self.uiauto_params['number_of_threads'] = self.number_of_threads # Called after this setup as modifying uiauto_params super(Andebench, self).setup(context) def update_result(self, context): super(Andebench, self).update_result(context) results = {} with open(self.logcat_log) as fh: for line in fh: match = self.regex.search(line) if match: data = match.groupdict() results[data['key']] = data['value'] for key, value in results.iteritems(): context.result.add_metric(key, value)<|fim▁end|>
parameters = [ Parameter('number_of_threads', kind=int,
<|file_name|>extension_fullscreen_apitest.cc<|end_file_name|><|fim▁begin|>// Copyright (c) 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/extensions/extension_apitest.h" #include "chrome/browser/ui/browser_window.h" IN_PROC_BROWSER_TEST_F(ExtensionApiTest, ExtensionFullscreenAccessFail) { // Test that fullscreen can be accessed from an extension without permission. ASSERT_TRUE(RunPlatformAppTest("fullscreen/no_permission")) << message_; } // Disabled, a user gesture is required for fullscreen. http://crbug.com/174178 IN_PROC_BROWSER_TEST_F(ExtensionApiTest, DISABLED_ExtensionFullscreenAccessPass) { // Test that fullscreen can be accessed from an extension with permission. ASSERT_TRUE(RunPlatformAppTest("fullscreen/has_permission")) << message_; } IN_PROC_BROWSER_TEST_F(ExtensionApiTest, FocusWindowDoesNotExitFullscreen) { browser()->window()->EnterFullscreen( GURL(), EXCLUSIVE_ACCESS_BUBBLE_TYPE_BROWSER_FULLSCREEN_EXIT_INSTRUCTION, false); bool is_fullscreen = browser()->window()->IsFullscreen(); ASSERT_TRUE(RunExtensionTest("window_update/focus")) << message_; ASSERT_EQ(is_fullscreen, browser()->window()->IsFullscreen()); } // Fails flakily: http://crbug.com/308041 IN_PROC_BROWSER_TEST_F(ExtensionApiTest,<|fim▁hole|> DISABLED_UpdateWindowSizeExitsFullscreen) { browser()->window()->EnterFullscreen( GURL(), EXCLUSIVE_ACCESS_BUBBLE_TYPE_BROWSER_FULLSCREEN_EXIT_INSTRUCTION, false); ASSERT_TRUE(RunExtensionTest("window_update/sizing")) << message_; ASSERT_FALSE(browser()->window()->IsFullscreen()); }<|fim▁end|>
<|file_name|>threadmanager.rs<|end_file_name|><|fim▁begin|>// rustyVM - Java VM written in pure Rust // Copyright (c) 2013 Alexander Gessler // // Permission is hereby granted, free of charge, to any person obtaining a copy of // this software and associated documentation files (the "Software"), to deal in // the Software without restriction, including without limitation the rights to // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of // the Software, and to permit persons to whom the Software is furnished to do so, // subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS // FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR // COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER // IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN // CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. // use std::hashmap::{HashMap}; use objectbroker::{ObjectBroker}; // Global thread state and management. All threads maintain some global state // in the central Broker messaging task. Global state includes scheduling info, // join()-waitlists as well as whether threads are daemons or not. // Notably, this design makes access to threads scale badly as it is inherently // single-threaded. On the other side, it makes thread maintenance very easy // and safe. It is reasonable to assume that most code will not too often interact // directly with java.lang.Thread. // References: // http://docs.oracle.com/javase/7/docs/api/java/lang/Thread.html // http://docs.oracle.com/javase/7/docs/api/java/lang/ThreadGroup.html // Remote messages responded to by the threadmanager (messages are received // and forwarded by broker) pub enum RemoteThreadOpMessage { THREAD_JOIN, THREAD_NOTIFY_TERMINATION, THREAD_SET_PRIORITY(int), THREAD_SET_NAME(~str) } pub struct GlobThreadInfo { tid : uint, // given name of the thread, used for debugging name : ~str, // java thread priority priority : int, // daemon : bool, } pub struct ThreadManager { priv threads : HashMap<uint, GlobThreadInfo>, // number of threads in `threads` with daemon=false, // when this counter reaches 0, the VM shuts down priv alive_nondaemon_count : uint, priv state : ThreadManagerState, // stopped threads get moved here so their parameters are still // available. TODO: how to prevent this from growing indefinitely priv stopped_threads : ~[GlobThreadInfo], } #[deriving(Eq)] pub enum ThreadManagerState { // initial state when no thread has been added yet TMS_NoThreadSeenYet, // running state - at least one non-daemon thread TMS_Running, // all non-daemon threads have died. Transition from // here to Running is possible by adding a new thread // that is not a daemon. TMS_AllNonDaemonsDead, } impl ThreadManager { // ---------------------------------------------- pub fn new() -> ThreadManager { ThreadManager{ threads : HashMap::new(), alive_nondaemon_count : 0, state : TMS_NoThreadSeenYet, stopped_threads : ~[], } } // ---------------------------------------------- pub fn get_state(&self) -> ThreadManagerState { self.state } // ---------------------------------------------- // Register a thread with the ThreadManager pub fn add_thread(&mut self, tid : uint) { assert!(!self.threads.contains_key(&tid)); self.threads.insert(tid, GlobThreadInfo { tid : tid, name : ~"", priority : 0, daemon : false, }); self.alive_nondaemon_count += 1; self.state = TMS_Running; } // ---------------------------------------------- // Unregister a thread from the ThreadManager. The thread // is shelved of to the so called 'stopped-threads' list, // which makes its name, priority and other parameter // available even after it has been removed from the live // thread state. pub fn remove_thread(&mut self, tid : uint) { assert!(self.threads.contains_key(&tid)); let t = self.threads.pop(&tid).unwrap(); if !t.daemon { self.alive_nondaemon_count -= 1; } self.stopped_threads.push(t); self.state = if self.alive_nondaemon_count == 0 { TMS_AllNonDaemonsDead } else { TMS_Running }; } // ---------------------------------------------- // Change the 'daemon' flag of a given thread with immediate // effect. If this causes the last alive non-daemon thread to // become daemon, the thread manager's state changes to // TMS_AllNonDaemonsDead pub fn set_daemon(&mut self, tid : uint, daemonize : bool) { assert!(self.threads.contains_key(&tid)); let old = self.threads.get(&tid).daemon; if old == daemonize { return; } self.threads.get_mut(&tid).daemon = daemonize; assert!(!daemonize || self.alive_nondaemon_count > 0); self.alive_nondaemon_count += if daemonize { -1 } else { 1 }; self.state = if self.alive_nondaemon_count == 0 { TMS_AllNonDaemonsDead } else { TMS_Running }; } // ---------------------------------------------- pub fn process_message(&mut self, src_tid : uint, dest_tid : uint, op : RemoteThreadOpMessage) { match op { THREAD_JOIN => (), THREAD_NOTIFY_TERMINATION => fail!("THREAD_NOTIFY_TERMINATION unexpected"), THREAD_SET_PRIORITY(prio) => (), THREAD_SET_NAME(name) => (), } } } #[cfg(test)] mod tests { use threadmanager::*; #[test] fn test_threadmanager_lifecycle() { let mut t = ThreadManager::new(); assert_eq!(t.get_state(), TMS_NoThreadSeenYet); t.add_thread(12); assert_eq!(t.get_state(), TMS_Running); t.add_thread(13); t.remove_thread(13); assert_eq!(t.get_state(), TMS_Running); t.remove_thread(12); assert_eq!(t.get_state(), TMS_AllNonDaemonsDead); t.add_thread(16); assert_eq!(t.get_state(), TMS_Running); } #[test] #[should_fail] fn test_threadmanager_groups_cannot_remove_nonexistent_thread() { let mut t = ThreadManager::new(); t.remove_thread(1); } #[test] #[should_fail] fn test_threadmanager_groups_cannot_reuse_tid() { let mut t = ThreadManager::new(); t.add_thread(12); t.add_thread(13); t.remove_thread(12); t.add_thread(12); } #[test] fn test_threadmanager_lifecycle_with_daemons() { let mut t = ThreadManager::new(); t.add_thread(12); t.add_thread(13); t.set_daemon(13, true); assert_eq!(t.get_state(), TMS_Running); t.remove_thread(13); assert_eq!(t.get_state(), TMS_Running); t.set_daemon(12, true);<|fim▁hole|>}<|fim▁end|>
assert_eq!(t.get_state(), TMS_AllNonDaemonsDead); } // TODO: test stopped-tid
<|file_name|>manage.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python import os from app import create_app, db from app.models import User, Role<|fim▁hole|> app = create_app(os.getenv('FLASK_CONFIG') or 'default') manager = Manager(app) migrate = Migrate(app, db) def make_shell_context(): return dict(app=app, db=db, User=User, Role=Role) manager.add_command("shell", Shell(make_context=make_shell_context)) manager.add_command('db', MigrateCommand) @manager.command def test(): """Run the unit tests.""" import unittest tests = unittest.TestLoader().discover('tests') unittest.TextTestRunner(verbosity=2).run(tests) if __name__ == '__main__': with app.app_context(): db.create_all() manager.run()<|fim▁end|>
from flask.ext.script import Manager, Shell from flask.ext.migrate import Migrate, MigrateCommand
<|file_name|>record_type.rs<|end_file_name|><|fim▁begin|>use crate::library; #[derive(PartialEq, Eq)] pub enum RecordType { /// Boxed record that use g_boxed_copy, g_boxed_free. /// Must have glib_get_type function AutoBoxed, /// Boxed record with custom copy/free functions Boxed, /// Referencecounted record Refcounted, //TODO: detect and generate direct records //Direct, } impl RecordType { pub fn of(record: &library::Record) -> RecordType { let mut has_copy = false; let mut has_free = false; let mut has_ref = false; let mut has_unref = false; let mut has_destroy = false; for func in &record.functions { match &func.name[..] { "copy" => has_copy = true, "free" => has_free = true, "destroy" => has_destroy = true, "ref" => has_ref = true, "unref" => has_unref = true, _ => (), } } <|fim▁hole|> } if has_ref && has_unref { RecordType::Refcounted } else if has_copy && has_free { RecordType::Boxed } else { RecordType::AutoBoxed } } }<|fim▁end|>
if has_destroy && has_copy { has_free = true;
<|file_name|>apps.py<|end_file_name|><|fim▁begin|>from django.apps import AppConfig <|fim▁hole|> class ScrapperConfig(AppConfig): name = 'scrapper'<|fim▁end|>
<|file_name|>logger_setup.py<|end_file_name|><|fim▁begin|>''' logger_setup.py customizes the app's logging module. Each time an event is logged the logger checks the level of the event (eg. debug, warning, info...). If the event is above the approved threshold then it goes through. The handlers do the same thing; they output to a file/shell if the event level is above their threshold. :Example: >> from website import logger >> logger.info('event', foo='bar') **Levels**: - logger.debug('For debugging purposes') - logger.info('An event occured, for example a database update') - logger.warning('Rare situation') - logger.error('Something went wrong') - logger.critical('Very very bad') You can build a log incrementally as so: >> log = logger.new(date='now') >> log = log.bind(weather='rainy') >> log.info('user logged in', user='John') ''' import datetime as dt import logging from logging.handlers import RotatingFileHandler import pytz from flask import request, session from structlog import wrap_logger from structlog.processors import JSONRenderer from app import app <|fim▁hole|>app.logger.removeHandler(app.logger.handlers[0]) TZ = pytz.timezone(app.config['TIMEZONE']) def add_fields(_, level, event_dict): ''' Add custom fields to each record. ''' now = dt.datetime.now() #event_dict['timestamp'] = TZ.localize(now, True).astimezone(pytz.utc).isoformat() event_dict['timestamp'] = TZ.localize(now, True).astimezone\ (pytz.timezone(app.config['TIMEZONE'])).strftime(app.config['TIME_FMT']) event_dict['level'] = level if request: try: #event_dict['ip_address'] = request.headers['X-Forwarded-For'].split(',')[0].strip() event_dict['ip_address'] = request.headers.get('X-Forwarded-For', request.remote_addr) #event_dict['ip_address'] = request.header.get('X-Real-IP') except: event_dict['ip_address'] = 'unknown' return event_dict # Add a handler to write log messages to a file if app.config.get('LOG_FILE'): file_handler = RotatingFileHandler(filename=app.config['LOG_FILENAME'], maxBytes=app.config['LOG_MAXBYTES'], backupCount=app.config['LOG_BACKUPS'], mode='a', encoding='utf-8') file_handler.setLevel(logging.DEBUG) app.logger.addHandler(file_handler) # Wrap the application logger with structlog to format the output logger = wrap_logger( app.logger, processors=[ add_fields, JSONRenderer(indent=None) ] )<|fim▁end|>
# Set the logging level app.logger.setLevel(app.config['LOG_LEVEL']) # Remove the stdout handler
<|file_name|>half_normal.py<|end_file_name|><|fim▁begin|># Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """The Half Normal distribution class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.ops import random_ops from tensorflow.python.ops.distributions import distribution from tensorflow.python.ops.distributions import special_math __all__ = [ "HalfNormal", ] class HalfNormal(distribution.Distribution): """The Half Normal distribution with scale `scale`. #### Mathematical details The half normal is a transformation of a centered normal distribution. If some random variable `X` has normal distribution, ```none X ~ Normal(0.0, scale) Y = |X| ``` Then `Y` will have half normal distribution. The probability density function (pdf) is: ```none pdf(x; scale, x > 0) = sqrt(2) / (scale * sqrt(pi)) * exp(- 1/2 * (x / scale) ** 2) ) ``` Where `scale = sigma` is the standard deviation of the underlying normal distribution. #### Examples Examples of initialization of one or a batch of distributions. ```python # Define a single scalar HalfNormal distribution. dist = tf.contrib.distributions.HalfNormal(scale=3.0) # Evaluate the cdf at 1, returning a scalar. dist.cdf(1.) # Define a batch of two scalar valued HalfNormals. # The first has scale 11.0, the second 22.0 dist = tf.contrib.distributions.HalfNormal(scale=[11.0, 22.0]) # Evaluate the pdf of the first distribution on 1.0, and the second on 1.5, # returning a length two tensor. dist.prob([1.0, 1.5]) # Get 3 samples, returning a 3 x 2 tensor. dist.sample([3]) ``` """ def __init__(self, scale, validate_args=False, allow_nan_stats=True, name="HalfNormal"): """Construct HalfNormals with scale `scale`. Args: scale: Floating point tensor; the scales of the distribution(s). Must contain only positive values. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class. """ parameters = locals() with ops.name_scope(name, values=[scale]): with ops.control_dependencies([check_ops.assert_positive(scale)] if validate_args else []): self._scale = array_ops.identity(scale, name="scale") super(HalfNormal, self).__init__( dtype=self._scale.dtype, reparameterization_type=distribution.FULLY_REPARAMETERIZED, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, graph_parents=[self._scale], name=name) @staticmethod def _param_shapes(sample_shape): return {"scale": ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)} @property def scale(self): """Distribution parameter for the scale.""" return self._scale def _batch_shape_tensor(self): return array_ops.shape(self.scale) def _batch_shape(self): return self.scale.shape def _event_shape_tensor(self): return constant_op.constant([], dtype=dtypes.int32) def _event_shape(self): return tensor_shape.scalar() def _sample_n(self, n, seed=None): shape = array_ops.concat([[n], self.batch_shape_tensor()], 0) sampled = random_ops.random_normal(<|fim▁hole|> return math_ops.abs(sampled * self.scale) def _prob(self, x): coeff = np.sqrt(2) / self.scale / np.sqrt(np.pi) pdf = coeff * math_ops.exp(- 0.5 * (x / self.scale) ** 2) return pdf * math_ops.cast(x >= 0, self.dtype) def _cdf(self, x): truncated_x = nn.relu(x) return math_ops.erf(truncated_x / self.scale / np.sqrt(2.0)) def _entropy(self): return 0.5 * math_ops.log(np.pi * self.scale ** 2.0 / 2.0) + 0.5 def _mean(self): return self.scale * np.sqrt(2.0) / np.sqrt(np.pi) def _quantile(self, p): return np.sqrt(2.0) * self.scale * special_math.erfinv(p) def _mode(self): return array_ops.zeros(self.batch_shape_tensor()) def _variance(self): return self.scale ** 2.0 * (1.0 - 2.0 / np.pi)<|fim▁end|>
shape=shape, mean=0., stddev=1., dtype=self.dtype, seed=seed)
<|file_name|>register.service.js<|end_file_name|><|fim▁begin|>"use strict"; var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) { var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;<|fim▁hole|> return c > 3 && r && Object.defineProperty(target, key, r), r; }; var __metadata = (this && this.__metadata) || function (k, v) { if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v); }; var core_1 = require("@angular/core"); var http_1 = require("@angular/http"); var RegService = (function () { function RegService(_http) { this._http = _http; } RegService.prototype.ngOnInit = function () { }; RegService.prototype.getUsers = function () { return this._http.get('http://ankitesh.pythonanywhere.com/api/v1.0/get_books_data') .map(function (response) { return response.json(); }); }; RegService.prototype.regUser = function (User) { var payload = JSON.stringify({ payload: { "Username": User.Username, "Email_id": User.Email_id, "Password": User.Password } }); return this._http.post('http://ankitesh.pythonanywhere.com/api/v1.0/get_book_summary', payload) .map(function (response) { return response.json(); }); }; return RegService; }()); RegService = __decorate([ core_1.Injectable(), __metadata("design:paramtypes", [http_1.Http]) ], RegService); exports.RegService = RegService; //# sourceMappingURL=register.service.js.map<|fim▁end|>
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc); else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
<|file_name|>filter_modules.py<|end_file_name|><|fim▁begin|>""" Query modules mapping functions to their query strings structured: module_name { query_string: function_for_query } """ from __future__ import absolute_import from __future__ import unicode_literals from __future__ import print_function from __future__ import division # Standard imports from future import standard_library standard_library.install_aliases() from builtins import range from builtins import * import sys import os import math import datetime import logging # logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s', level=logging.DEBUG) import random from uuid import UUID # Our imports from emission.core.get_database import get_section_db, get_trip_db, get_routeCluster_db, get_alternatives_db from . import trip_old as trip # 0763de67-f61e-3f5d-90e7-518e69793954 # 0763de67-f61e-3f5d-90e7-518e69793954_20150421T230304-0700_0 # helper for getCanonicalTrips def get_clusters_info(uid): c_db = get_routeCluster_db() s_db = get_section_db() clusterJson = c_db.find_one({"clusters":{"$exists":True}, "user": uid}) if clusterJson is None: return [] c_info = [] clusterSectionLists= list(clusterJson["clusters"].values()) logging.debug( "Number of section lists for user %s is %s" % (uid, len(clusterSectionLists))) for sectionList in clusterSectionLists: first = True logging.debug( "Number of sections in sectionList for user %s is %s" % (uid, len(sectionList))) if (len(sectionList) == 0): # There's no point in returning this cluster, let's move on continue distributionArrays = [[] for _ in range(5)] for section in sectionList: section_json = s_db.find_one({"_id":section}) if first: representative_trip = section_json first = False appendIfPresent(distributionArrays[0], section_json, "section_start_datetime") appendIfPresent(distributionArrays[1], section_json, "section_end_datetime") appendIfPresent(distributionArrays[2], section_json, "section_start_point") appendIfPresent(distributionArrays[3], section_json, "section_end_point") appendIfPresent(distributionArrays[4], section_json, "confirmed_mode") c_info.append((distributionArrays, representative_trip)) return c_info def appendIfPresent(list,element,key): if element is not None and key in element: list.append(element[key]) else: logging.debug("not appending element %s with key %s" % (element, key)) class AlternativesNotFound(Exception): def __init__(self, value): self.value = value def __str__(self): return repr(self.value) #returns the top trips for the user, defaulting to the top 10 trips def getCanonicalTrips(uid, get_representative=False): # number returned isnt used """ uid is a UUID object, not a string """ # canonical_trip_list = [] # x = 0 # if route clusters return nothing, then get common routes for user #clusters = get_routeCluster_db().find_one({'$and':[{'user':uid},{'method':'lcs'}]}) # c = get_routeCluster_db().find_one({'$and':[{'user':uid},{'method':'lcs'}]}) logging.debug('UUID for canonical %s' % uid) info = get_clusters_info(uid) cluster_json_list = [] for (cluster, rt) in info: json_dict = dict() json_dict["representative_trip"] = rt json_dict["start_point_distr"] = cluster[2] json_dict["end_point_distr"] = cluster[3] json_dict["start_time_distr"] = cluster[0] json_dict["end_time_distr"] = cluster[1] json_dict["confirmed_mode_list"] = cluster[4] cluster_json_list.append(json_dict) toRet = cluster_json_list return toRet.__iter__() #returns all trips to the user def getAllTrips(uid): #trips = list(get_trip_db().find({"user_id":uid, "type":"move"})) query = {'user_id':uid, 'type':'move'} return get_trip_db().find(query) <|fim▁hole|> #trips = list(get_trip_db().find({"user_id":uid, "type":"move"})) d = datetime.datetime.now() - datetime.timedelta(days=dys) query = {'user_id':uid, 'type':'move','trip_start_datetime':{"$gt":d}} return get_trip_db().find(query) #returns all trips with no alternatives to the user def getNoAlternatives(uid): # If pipelineFlags exists then we have started alternatives, and so have # already scheduled the query. No need to reschedule unless the query fails. # TODO: If the query fails, then remove the pipelineFlags so that we will # reschedule. query = {'user_id':uid, 'type':'move', 'pipelineFlags': {'$exists': False}} return get_trip_db().find(query) def getNoAlternativesPastMonth(uid): d = datetime.datetime.now() - datetime.timedelta(days=30) query = {'user_id':uid, 'type':'move', 'trip_start_datetime':{"$gt":d}, 'pipelineFlags': {'$exists': False}} return get_trip_db().find(query) # Returns the trips that are suitable for training # Currently this is: # - trips that have alternatives, and # - have not yet been included in a training set def getTrainingTrips(uid): return getTrainingTrips_Date(uid, 30) query = {'user_id':uid, 'type':'move'} return get_trip_db().find(query) def getTrainingTrips_Date(uid, dys): d = datetime.datetime.now() - datetime.timedelta(days=dys) query = {'user_id':uid, 'type':'move','trip_start_datetime':{"$gt":d}, "pipelineFlags":{"$exists":True}} #query = {'user_id':uid, 'type':'move','trip_start_datetime':{"$gt":d}} #print get_trip_db().count_documents(query) return get_trip_db().find(query) def getAlternativeTrips(trip_id): #TODO: clean up datetime, and queries here #d = datetime.datetime.now() - datetime.timedelta(days=6) #query = {'trip_id':trip_id, 'trip_start_datetime':{"$gt":d}} query = {'trip_id':trip_id} alternatives = get_alternatives_db().find(query) if alternatives.estimated_document_count() > 0: logging.debug("Number of alternatives for trip %s is %d" % (trip_id, alternatives.estimated_document_count())) return alternatives raise AlternativesNotFound("No Alternatives Found") def getRecentTrips(uid): raise NotImplementedError() def getTripsThroughMode(uid): raise NotImplementedError() modules = { # Trip Module 'trips': { 'get_canonical': getCanonicalTrips, 'get_all': getAllTrips, 'get_no_alternatives': getNoAlternatives, 'get_no_alternatives_past_month': getNoAlternativesPastMonth, 'get_most_recent': getRecentTrips, 'get_trips_by_mode': getTripsThroughMode}, # Utility Module 'utility': { 'get_training': getTrainingTrips }, # Recommender Module 'recommender': { 'get_improve': getCanonicalTrips }, #Perturbation Module 'perturbation': {}, #Alternatives Module # note: uses a different collection than section_db 'alternatives': { 'get_alternatives': getAlternativeTrips } }<|fim▁end|>
def getAllTrips_Date(uid, dys):
<|file_name|>issue-23304-2.rs<|end_file_name|><|fim▁begin|>// Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at<|fim▁hole|>// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![allow(dead_code)] enum X { A = 0 as isize } enum Y { A = X::A as isize } fn main() { }<|fim▁end|>
// http://rust-lang.org/COPYRIGHT. //
<|file_name|>urls.py<|end_file_name|><|fim▁begin|>"""nubrain URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.8/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Add an import: from blog import urls as blog_urls 2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls)) """ from django.conf.urls import patterns, include, url<|fim▁hole|> urlpatterns = patterns('', (r'^$', RedirectView.as_view(url='%s/admin/' % BASE_URL)), url(r'^admin/', include(admin.site.urls)), ) admin.site.site_title = ugettext_lazy(APP_NAME) admin.site.site_header = ugettext_lazy('%s Admin' % APP_NAME) admin.site.index_title = ugettext_lazy('%s Dashboard' % APP_NAME) admin.autodiscover()<|fim▁end|>
from django.contrib import admin from nubrain.settings import BASE_URL, APP_NAME from django.views.generic import RedirectView from django.utils.translation import ugettext_lazy
<|file_name|>grid.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! CSS handling for the computed value of //! [grids](https://drafts.csswg.org/css-grid/) use cssparser::{ParseError as CssParseError, Parser, Token}; use parser::{Parse, ParserContext}; use std::mem; use style_traits::{ParseError, StyleParseErrorKind}; use values::{CSSFloat, CustomIdent}; use values::computed::{self, Context, ToComputedValue}; use values::generics::grid::{GridTemplateComponent, RepeatCount, TrackBreadth}; use values::generics::grid::{LineNameList, TrackKeyword, TrackRepeat, TrackSize}; use values::generics::grid::{TrackList, TrackListType, TrackListValue}; use values::specified::{Integer, LengthOrPercentage}; /// Parse a single flexible length. pub fn parse_flex<'i, 't>(input: &mut Parser<'i, 't>) -> Result<CSSFloat, ParseError<'i>> { let location = input.current_source_location(); match *input.next()? { Token::Dimension { value, ref unit, .. } if unit.eq_ignore_ascii_case("fr") && value.is_sign_positive() => { Ok(value) }, ref t => Err(location.new_unexpected_token_error(t.clone())), } } impl Parse for TrackBreadth<LengthOrPercentage> { fn parse<'i, 't>( context: &ParserContext, input: &mut Parser<'i, 't>, ) -> Result<Self, ParseError<'i>> { if let Ok(lop) = input.try(|i| LengthOrPercentage::parse_non_negative(context, i)) { return Ok(TrackBreadth::Breadth(lop)); } if let Ok(f) = input.try(parse_flex) { return Ok(TrackBreadth::Fr(f)); } TrackKeyword::parse(input).map(TrackBreadth::Keyword) } } impl Parse for TrackSize<LengthOrPercentage> { fn parse<'i, 't>( context: &ParserContext, input: &mut Parser<'i, 't>, ) -> Result<Self, ParseError<'i>> { if let Ok(b) = input.try(|i| TrackBreadth::parse(context, i)) { return Ok(TrackSize::Breadth(b)); } if input.try(|i| i.expect_function_matching("minmax")).is_ok() { return input.parse_nested_block(|input| { let inflexible_breadth = match input.try(|i| LengthOrPercentage::parse_non_negative(context, i)) { Ok(lop) => TrackBreadth::Breadth(lop), Err(..) => { let keyword = TrackKeyword::parse(input)?; TrackBreadth::Keyword(keyword) }, }; input.expect_comma()?; Ok(TrackSize::Minmax( inflexible_breadth, TrackBreadth::parse(context, input)?, )) }); } input.expect_function_matching("fit-content")?; let lop = input.parse_nested_block(|i| LengthOrPercentage::parse_non_negative(context, i))?; Ok(TrackSize::FitContent(lop)) } } /// Parse the grid line names into a vector of owned strings. /// /// <https://drafts.csswg.org/css-grid/#typedef-line-names> pub fn parse_line_names<'i, 't>( input: &mut Parser<'i, 't>, ) -> Result<Box<[CustomIdent]>, ParseError<'i>> { input.expect_square_bracket_block()?; input.parse_nested_block(|input| { let mut values = vec![]; while let Ok((loc, ident)) = input.try(|i| -> Result<_, CssParseError<()>> { Ok((i.current_source_location(), i.expect_ident_cloned()?)) }) { let ident = CustomIdent::from_ident(loc, &ident, &["span"])?; values.push(ident); } Ok(values.into_boxed_slice()) }) } /// The type of `repeat` function (only used in parsing). /// /// <https://drafts.csswg.org/css-grid/#typedef-track-repeat> #[derive(Clone, Copy, Debug, PartialEq, SpecifiedValueInfo)] #[cfg_attr(feature = "servo", derive(MallocSizeOf))] enum RepeatType { /// [`<auto-repeat>`](https://drafts.csswg.org/css-grid/#typedef-auto-repeat) Auto, /// [`<track-repeat>`](https://drafts.csswg.org/css-grid/#typedef-track-repeat) Normal, /// [`<fixed-repeat>`](https://drafts.csswg.org/css-grid/#typedef-fixed-repeat) Fixed, } impl TrackRepeat<LengthOrPercentage, Integer> { fn parse_with_repeat_type<'i, 't>( context: &ParserContext, input: &mut Parser<'i, 't>, ) -> Result<(Self, RepeatType), ParseError<'i>> { input .try(|i| i.expect_function_matching("repeat").map_err(|e| e.into())) .and_then(|_| { input.parse_nested_block(|input| { let count = RepeatCount::parse(context, input)?; input.expect_comma()?; let is_auto = count == RepeatCount::AutoFit || count == RepeatCount::AutoFill; let mut repeat_type = if is_auto { RepeatType::Auto } else { // <fixed-size> is a subset of <track-size>, so it should work for both RepeatType::Fixed }; let mut names = vec![]; let mut values = vec![]; let mut current_names; loop { current_names = input .try(parse_line_names) .unwrap_or(vec![].into_boxed_slice()); if let Ok(track_size) = input.try(|i| TrackSize::parse(context, i)) { if !track_size.is_fixed() { if is_auto { // should be <fixed-size> for <auto-repeat> return Err(input .new_custom_error(StyleParseErrorKind::UnspecifiedError)); } if repeat_type == RepeatType::Fixed { repeat_type = RepeatType::Normal // <track-size> for sure } } values.push(track_size); names.push(current_names); if is_auto { // FIXME: In the older version of the spec // (https://www.w3.org/TR/2015/WD-css-grid-1-20150917/#typedef-auto-repeat), // if the repeat type is `<auto-repeat>` we shouldn't try to parse more than // one `TrackSize`. But in current version of the spec, this is deprecated // but we are adding this for gecko parity. We should remove this when // gecko implements new spec. names.push( input .try(parse_line_names) .unwrap_or(vec![].into_boxed_slice()), ); break; } } else { if values.is_empty() { // expecting at least one <track-size> return Err(input.new_custom_error(StyleParseErrorKind::UnspecifiedError)); } names.push(current_names); // final `<line-names>` break; // no more <track-size>, breaking } } let repeat = TrackRepeat { count: count, track_sizes: values, line_names: names.into_boxed_slice(), }; Ok((repeat, repeat_type)) }) }) } } impl Parse for TrackList<LengthOrPercentage, Integer> { fn parse<'i, 't>( context: &ParserContext, input: &mut Parser<'i, 't>, ) -> Result<Self, ParseError<'i>> { let mut current_names = vec![]; let mut names = vec![]; let mut values = vec![]; // assume it's the simplest case. let mut list_type = TrackListType::Explicit; // holds <auto-repeat> value. It can only be only one in a TrackList. let mut auto_repeat = None; // if there is any <auto-repeat> the list will be of type TrackListType::Auto(idx) // where idx points to the position of the <auto-repeat> in the track list. If there // is any repeat before <auto-repeat>, we need to take the number of repetitions into // account to set the position of <auto-repeat> so it remains the same while computing // values. let mut auto_offset = 0; // assume that everything is <fixed-size>. This flag is useful when we encounter <auto-repeat> let mut atleast_one_not_fixed = false; loop { current_names.extend_from_slice(&mut input .try(parse_line_names) .unwrap_or(vec![].into_boxed_slice())); if let Ok(track_size) = input.try(|i| TrackSize::parse(context, i)) { if !track_size.is_fixed() { atleast_one_not_fixed = true; if auto_repeat.is_some() { // <auto-track-list> only accepts <fixed-size> and <fixed-repeat> return Err(input.new_custom_error(StyleParseErrorKind::UnspecifiedError)); } } let vec = mem::replace(&mut current_names, vec![]); names.push(vec.into_boxed_slice()); values.push(TrackListValue::TrackSize(track_size)); } else if let Ok((repeat, type_)) = input.try(|i| TrackRepeat::parse_with_repeat_type(context, i)) { if list_type == TrackListType::Explicit { list_type = TrackListType::Normal; // <explicit-track-list> doesn't contain repeat() } match type_ { RepeatType::Normal => { atleast_one_not_fixed = true; if auto_repeat.is_some() { // only <fixed-repeat> return Err(input.new_custom_error(StyleParseErrorKind::UnspecifiedError)); } }, RepeatType::Auto => { if auto_repeat.is_some() || atleast_one_not_fixed { // We've either seen <auto-repeat> earlier, or there's at least one non-fixed value return Err(input.new_custom_error(StyleParseErrorKind::UnspecifiedError)); } list_type = TrackListType::Auto(values.len() as u16 + auto_offset); auto_repeat = Some(repeat); let vec = mem::replace(&mut current_names, vec![]); names.push(vec.into_boxed_slice()); continue; }, RepeatType::Fixed => (), } let vec = mem::replace(&mut current_names, vec![]); names.push(vec.into_boxed_slice()); if let RepeatCount::Number(num) = repeat.count { auto_offset += (num.value() - 1) as u16; } values.push(TrackListValue::TrackRepeat(repeat)); } else { if values.is_empty() && auto_repeat.is_none() { return Err(input.new_custom_error(StyleParseErrorKind::UnspecifiedError)); } names.push(current_names.into_boxed_slice()); break; } } Ok(TrackList { list_type: list_type, values: values, line_names: names.into_boxed_slice(), auto_repeat: auto_repeat, }) } } impl ToComputedValue for TrackList<LengthOrPercentage, Integer> { type ComputedValue = TrackList<computed::LengthOrPercentage, computed::Integer>; #[inline] fn to_computed_value(&self, context: &Context) -> Self::ComputedValue { // Merge the line names while computing values. The resulting values will // all be bunch of `<track-size>` and one <auto-repeat>. // // For example, // `[a b] 100px [c d] repeat(1, 30px [g]) [h]` will be merged as `[a b] 100px [c d] 30px [g h]` // whereas, `[a b] repeat(2, [c] 50px [d]) [e f] repeat(auto-fill, [g] 12px) 10px [h]` will be merged as // `[a b c] 50px [d c] 50px [d e f] repeat(auto-fill, [g] 12px) 10px [h]`, with the `<auto-repeat>` value // set in the `auto_repeat` field, and the `idx` in TrackListType::Auto pointing to the values after // `<auto-repeat>` (in this case, `10px [h]`). let mut prev_names = vec![]; let mut line_names = Vec::with_capacity(self.line_names.len() + 1); let mut values = Vec::with_capacity(self.values.len() + 1); for (pos, names) in self.line_names.iter().enumerate() { prev_names.extend_from_slice(&names); if pos >= self.values.len() { let vec = mem::replace(&mut prev_names, vec![]); line_names.push(vec.into_boxed_slice()); continue; } match self.values[pos] { TrackListValue::TrackSize(ref size) => { let vec = mem::replace(&mut prev_names, vec![]); line_names.push(vec.into_boxed_slice()); values.push(TrackListValue::TrackSize(size.to_computed_value(context))); }, TrackListValue::TrackRepeat(ref repeat) => { // If the repeat count is numeric, we expand and merge the values. let mut repeat = repeat.expand(); let mut repeat_names_iter = repeat.line_names.iter(); for (size, repeat_names) in repeat.track_sizes.drain(..).zip(&mut repeat_names_iter) { prev_names.extend_from_slice(&repeat_names);<|fim▁hole|> if let Some(names) = repeat_names_iter.next() { prev_names.extend_from_slice(&names); } }, } } TrackList { list_type: self.list_type.to_computed_value(context), values: values, line_names: line_names.into_boxed_slice(), auto_repeat: self.auto_repeat .clone() .map(|repeat| repeat.to_computed_value(context)), } } #[inline] fn from_computed_value(computed: &Self::ComputedValue) -> Self { let mut values = Vec::with_capacity(computed.values.len() + 1); for value in computed .values .iter() .map(ToComputedValue::from_computed_value) { values.push(value); } TrackList { list_type: computed.list_type, values: values, line_names: computed.line_names.clone(), auto_repeat: computed .auto_repeat .clone() .map(|ref repeat| TrackRepeat::from_computed_value(repeat)), } } } #[cfg(feature = "gecko")] #[inline] fn allow_grid_template_subgrids() -> bool { use gecko_bindings::structs::mozilla; unsafe { mozilla::StaticPrefs_sVarCache_layout_css_grid_template_subgrid_value_enabled } } #[cfg(feature = "servo")] #[inline] fn allow_grid_template_subgrids() -> bool { false } impl Parse for GridTemplateComponent<LengthOrPercentage, Integer> { // FIXME: Derive Parse (probably with None_) fn parse<'i, 't>( context: &ParserContext, input: &mut Parser<'i, 't>, ) -> Result<Self, ParseError<'i>> { if input.try(|i| i.expect_ident_matching("none")).is_ok() { return Ok(GridTemplateComponent::None); } Self::parse_without_none(context, input) } } impl GridTemplateComponent<LengthOrPercentage, Integer> { /// Parses a `GridTemplateComponent<LengthOrPercentage>` except `none` keyword. pub fn parse_without_none<'i, 't>( context: &ParserContext, input: &mut Parser<'i, 't>, ) -> Result<Self, ParseError<'i>> { if allow_grid_template_subgrids() { if let Ok(t) = input.try(|i| LineNameList::parse(context, i)) { return Ok(GridTemplateComponent::Subgrid(t)); } } TrackList::parse(context, input).map(GridTemplateComponent::TrackList) } }<|fim▁end|>
let vec = mem::replace(&mut prev_names, vec![]); line_names.push(vec.into_boxed_slice()); values.push(TrackListValue::TrackSize(size.to_computed_value(context))); }
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>from PIL import Image from maskgen import exif import numpy as np import PIL """ Save the image as PDF. If the image has a orientation and 'Image Rotated', rotate the image according to the EXIF. """ def transform(img,source,target, **kwargs): if 'resolution' in kwargs: res = float(int(kwargs['resolution'])) else:<|fim▁hole|> res = 200.0 im = img.convert('RGB').to_array() Image.fromarray(im).save(target,format='PDF',resolution=res) return None,None def operation(): return {'name':'OutputPDF', 'category':'Output', 'description':'Save an image as .pdf', 'software':'PIL', 'version':PIL.__version__, 'arguments':{ 'resolution':{ 'type':'int', 'defaultvalue':'100', 'description':'DPI' } }, 'transitions': [ 'image.image' ] } def suffix(): return '.pdf'<|fim▁end|>
<|file_name|>test_weewx.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # (c) 2020-2021 Andreas Motl <[email protected]> import logging import pytest import pytest_twisted from twisted.internet import threads from test.settings.mqttkit import settings, influx_sensors, PROCESS_DELAY_MQTT from test.util import mqtt_json_sensor, sleep logger = logging.getLogger(__name__) # The WeeWX decoder listens on the special MQTT topic suffix "/loop". mqtt_topic = 'mqttkit-1/itest/foo/bar/loop' # Data payload captured from a Vantage Pro 2 weather station. data = { "windSpeed10_kph": "5.78725803977", "monthET": "1.32", "highUV": "0.0", "cloudbase_meter": "773.082217509", "leafTemp1_C": "8.33333333333", "rainAlarm": "0.0", "pressure_mbar": "948.046280104", "rain_cm": "0.0", "highRadiation": "0.0", "interval_minute": "5.0", "barometer_mbar": "1018.35464712", "yearRain_cm": "17.2000000043", "consBatteryVoltage_volt": "4.72", "dewpoint_C": "2.07088485785", "insideAlarm": "0.0", "inHumidity": "29.0", "soilLeafAlarm4": "0.0", "sunrise": "1492489200.0", "windGust_kph": "9.65608800006", "heatindex_C": "3.55555555556", "dayRain_cm": "0.0", "lowOutTemp": "38.3", "outsideAlarm1": "0.0", "forecastIcon": "8.0", "outsideAlarm2": "0.0",<|fim▁hole|> "stormStart": "1492207200.0", "inDewpoint": "45.1231125123", "altimeter_mbar": "1016.62778614", "windchill_C": "3.55555555556", "appTemp_C": "1.26842313302", "outTemp_C": "3.55555555556", "windGustDir": "275.0", "extraAlarm1": "0.0", "extraAlarm2": "0.0", "extraAlarm3": "0.0", "extraAlarm4": "0.0", "extraAlarm5": "0.0", "extraAlarm6": "0.0", "extraAlarm7": "0.0", "extraAlarm8": "0.0", "humidex_C": "3.55555555556", "rain24_cm": "0.88000000022", "rxCheckPercent": "87.9791666667", "hourRain_cm": "0.0", "inTemp_C": "26.8333333333", "watertemp": "8.33333333333", "trendIcon": "59.7350993377", "soilLeafAlarm2": "0.0", "soilLeafAlarm3": "0.0", "usUnits": "16.0", "soilLeafAlarm1": "0.0", "leafWet4": "0.0", "txBatteryStatus": "0.0", "yearET": "4.88", "monthRain_cm": "2.94000000074", "UV": "0.0", "rainRate_cm_per_hour": "0.0", "dayET": "0.0", "dateTime": "1492467300.0", "windDir": "283.55437192", "stormRain_cm": "1.72000000043", "ET_cm": "0.0", "sunset": "1492538940.0", "highOutTemp": "38.4", "radiation_Wpm2": "0.0" } @pytest_twisted.inlineCallbacks @pytest.mark.mqtt @pytest.mark.weewx def test_weewx_mqtt(machinery, create_influxdb, reset_influxdb): """ Publish single reading in JSON format to MQTT broker and proof it is stored in the InfluxDB database. """ # Submit a single measurement, without timestamp. yield threads.deferToThread(mqtt_json_sensor, settings.mqtt_topic_json, data) # Wait for some time to process the message. yield sleep(PROCESS_DELAY_MQTT) # Proof that data arrived in InfluxDB. record = influx_sensors.get_first_record() assert record["time"] == '2017-04-17T22:15:00Z' assert record["outTemp_C"] == 3.55555555556 assert record["windSpeed10_kph"] == 5.78725803977 assert record["cloudbase_meter"] == 773.082217509 assert record["consBatteryVoltage_volt"] == 4.72 yield record<|fim▁end|>
"windSpeed_kph": "3.95409343049", "forecastRule": "40.0", "windrun_km": "1.07449640224", "outHumidity": "90.0",
<|file_name|>StatsCollection.d.ts<|end_file_name|><|fim▁begin|>/** * Tracks a collection of stats. * * @constructor */ export declare class StatsCollection { private counters_; incrementCounter(name: string, amount?: number): void; get(): { [k: string]: number; };<|fim▁hole|>}<|fim▁end|>
<|file_name|>aixc++.py<|end_file_name|><|fim▁begin|>"""SCons.Tool.aixc++ Tool-specific initialization for IBM xlC / Visual Age C++ compiler. There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Tool.Tool() selection method. """ # # __COPYRIGHT__ # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__" #forward proxy to the preffered cxx version from SCons.Tool.aixcxx import * # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End:<|fim▁hole|><|fim▁end|>
# vim: set expandtab tabstop=4 shiftwidth=4:
<|file_name|>get_all_ship_fields.py<|end_file_name|><|fim▁begin|>__author__ = 'thorsteinn' def get_all_ship_fields(db): ships = db.keys() fields = [] for ship in ships: shipDB = db[ship] shipKeys = shipDB.keys() for oneKey in shipKeys:<|fim▁hole|> if oneKey not in fields: fields.append(oneKey) return fields<|fim▁end|>
<|file_name|>mock_backend_spec.ts<|end_file_name|><|fim▁begin|>import {AsyncTestCompleter, afterEach, beforeEach, ddescribe, describe, expect, iit, inject, it, xit, SpyObject} from 'angular2/testing_internal'; import {ObservableWrapper} from 'angular2/src/facade/async'; import {BrowserXhr} from 'angular2/src/http/backends/browser_xhr'; import {MockConnection, MockBackend} from 'angular2/src/http/backends/mock_backend'; import {provide, Injector} from 'angular2/core'; import {Request} from 'angular2/src/http/static_request'; import {Response} from 'angular2/src/http/static_response'; import {Headers} from 'angular2/src/http/headers'; import {Map} from 'angular2/src/facade/collection'; import {RequestOptions, BaseRequestOptions} from 'angular2/src/http/base_request_options'; import {BaseResponseOptions, ResponseOptions} from 'angular2/src/http/base_response_options'; import {ResponseType} from 'angular2/src/http/enums'; import {ReplaySubject} from 'rxjs/subject/ReplaySubject'; export function main() { describe('MockBackend', () => { var backend: MockBackend; var sampleRequest1: Request; var sampleResponse1: Response; var sampleRequest2: Request; var sampleResponse2: Response; beforeEach(() => { var injector = Injector.resolveAndCreate( [provide(ResponseOptions, {useClass: BaseResponseOptions}), MockBackend]); backend = injector.get(MockBackend); var base = new BaseRequestOptions(); sampleRequest1 = new Request(base.merge(new RequestOptions({url: 'https://google.com'}))); sampleResponse1 = new Response(new ResponseOptions({body: 'response1'})); sampleRequest2 = new Request(base.merge(new RequestOptions({url: 'https://google.com'}))); sampleResponse2 = new Response(new ResponseOptions({body: 'response2'})); }); it('should create a new MockBackend', () => {expect(backend).toBeAnInstanceOf(MockBackend)}); it('should create a new MockConnection', () => {expect(backend.createConnection(sampleRequest1)).toBeAnInstanceOf(MockConnection)}); it('should create a new connection and allow subscription', () => { let connection: MockConnection = backend.createConnection(sampleRequest1); connection.response.subscribe(() => {}); }); it('should allow responding after subscription', inject([AsyncTestCompleter], (async: AsyncTestCompleter) => { let connection: MockConnection = backend.createConnection(sampleRequest1); connection.response.subscribe(() => { async.done(); }); connection.mockRespond(sampleResponse1); })); it('should allow subscribing after responding', inject([AsyncTestCompleter], (async: AsyncTestCompleter) => { let connection: MockConnection = backend.createConnection(sampleRequest1); connection.mockRespond(sampleResponse1); connection.response.subscribe(() => { async.done(); }); })); it('should allow responding after subscription with an error', inject([AsyncTestCompleter], (async: AsyncTestCompleter) => { let connection: MockConnection = backend.createConnection(sampleRequest1); connection.response.subscribe(null, () => { async.done(); }); connection.mockError(new Error('nope')); })); it('should not throw when there are no unresolved requests', inject([AsyncTestCompleter], (async: AsyncTestCompleter) => { let connection: MockConnection = backend.createConnection(sampleRequest1); connection.response.subscribe(() => { async.done(); }); connection.mockRespond(sampleResponse1); backend.verifyNoPendingRequests(); })); xit('should throw when there are unresolved requests', inject([AsyncTestCompleter], (async: AsyncTestCompleter) => { let connection: MockConnection = backend.createConnection(sampleRequest1); connection.response.subscribe(() => { async.done(); }); backend.verifyNoPendingRequests(); })); it('should work when requests are resolved out of order', inject([AsyncTestCompleter], (async: AsyncTestCompleter) => { let connection1: MockConnection = backend.createConnection(sampleRequest1); let connection2: MockConnection = backend.createConnection(sampleRequest1); connection1.response.subscribe(() => { async.done(); }); connection2.response.subscribe(() => {}); connection2.mockRespond(sampleResponse1);<|fim▁hole|> connection1.mockRespond(sampleResponse1); backend.verifyNoPendingRequests(); })); xit('should allow double subscribing', inject([AsyncTestCompleter], (async: AsyncTestCompleter) => { let responses: Response[] = [sampleResponse1, sampleResponse2]; backend.connections.subscribe((c: MockConnection) => c.mockRespond(responses.shift())); let responseObservable: ReplaySubject<Response> = backend.createConnection(sampleRequest1).response; responseObservable.subscribe(res => expect(res.text()).toBe('response1')); responseObservable.subscribe( res => expect(res.text()).toBe('response2'), null, async.done); })); // TODO(robwormald): readyStates are leaving? it('should allow resolution of requests manually', () => { let connection1: MockConnection = backend.createConnection(sampleRequest1); let connection2: MockConnection = backend.createConnection(sampleRequest1); connection1.response.subscribe(() => {}); connection2.response.subscribe(() => {}); backend.resolveAllConnections(); backend.verifyNoPendingRequests(); }); }); }<|fim▁end|>
<|file_name|>boundaries.py<|end_file_name|><|fim▁begin|>""" OpenStreetMap boundary generator. Author: Andrzej Talarczyk <[email protected]> Based on work of Michał Rogalski (Rogal). License: GPLv3. """ import os import platform import shutil def clean(src_dir):<|fim▁hole|> src_dir (string): path to the directory from which target files will be removed """ if os.path.isfile("{dane_osm}/poland.o5m".format(dane_osm=src_dir)): os.remove("{dane_osm}/poland.o5m".format(dane_osm=src_dir)) if os.path.isfile("{dane_osm}/poland-boundaries.osm".format(dane_osm=src_dir)): os.remove("{dane_osm}/poland-boundaries.osm".format(dane_osm=src_dir)) if os.path.exists("{dane_osm}/bounds".format(dane_osm=src_dir)): shutil.rmtree("{dane_osm}/bounds".format(dane_osm=src_dir)) def generate(bin_dir, src_dir, pbf_filename) -> int: """Generates boundaries. Args: bin_dir (string): path to a directory holding compilation tools src_dir (string): path to a directory with source data pbf_filename (string): source PBF file Raises: Exception: [description] Returns: int: 0 if succes. """ ret = -1 if platform.system() == 'Windows': ret = os.system("start /low /b /wait {binarki}/osmconvert.exe {dane_osm}/{pbf_filename} --out-o5m >{dane_osm}/poland.o5m".format( binarki=bin_dir, dane_osm=src_dir, pbf_filename=pbf_filename)) ret = os.system("start /low /b /wait {binarki}/osmfilter.exe {dane_osm}/poland.o5m --keep-nodes= --keep-ways-relations=\"boundary=administrative =postal_code postal_code=\" >{dane_osm}/poland-boundaries.osm".format( dane_osm=src_dir, binarki=bin_dir)) ret = os.system("start /low /b /wait java -cp {binarki}/mkgmap.jar uk.me.parabola.mkgmap.reader.osm.boundary.BoundaryPreprocessor {dane_osm}/poland-boundaries.osm {dane_osm}/bounds".format( binarki=bin_dir, dane_osm=src_dir)) elif platform.system() == 'Linux': ret = os.system("osmconvert {dane_osm}/{pbf_filename} --out-o5m >{dane_osm}/poland.o5m".format( dane_osm=src_dir, pbf_filename=pbf_filename)) ret = os.system("osmfilter {dane_osm}/poland.o5m --keep-nodes= --keep-ways-relations=\"boundary=administrative =postal_code postal_code=\" >{dane_osm}/poland-boundaries.osm".format( dane_osm=src_dir)) ret = os.system("java -cp {binarki}/mkgmap.jar uk.me.parabola.mkgmap.reader.osm.boundary.BoundaryPreprocessor {dane_osm}/poland-boundaries.osm {dane_osm}/bounds".format( binarki=bin_dir, dane_osm=src_dir)) else: raise Exception("Unsupported operating system.") return ret<|fim▁end|>
"""Remove target files. Args:
<|file_name|>bbolt.go<|end_file_name|><|fim▁begin|>package bolt import ( "context" "fmt" "os" "path/filepath" "time" platform "github.com/influxdata/influxdb/v2" platform2 "github.com/influxdata/influxdb/v2/kit/platform" "github.com/influxdata/influxdb/v2/rand" "github.com/influxdata/influxdb/v2/snowflake" bolt "go.etcd.io/bbolt" "go.uber.org/zap" ) const DefaultFilename = "influxd.bolt" // Client is a client for the boltDB data store. type Client struct { Path string db *bolt.DB log *zap.Logger IDGenerator platform2.IDGenerator TokenGenerator platform.TokenGenerator platform.TimeGenerator pluginsCollector *pluginMetricsCollector } // NewClient returns an instance of a Client. func NewClient(log *zap.Logger) *Client { return &Client{ log: log, IDGenerator: snowflake.NewIDGenerator(), TokenGenerator: rand.NewTokenGenerator(64), TimeGenerator: platform.RealTimeGenerator{}, // Refresh telegraf plugin metrics every hour. pluginsCollector: NewPluginMetricsCollector(time.Minute * 59), } } // DB returns the clients DB. func (c *Client) DB() *bolt.DB { return c.db } // Open / create boltDB file. func (c *Client) Open(ctx context.Context) error { // Ensure the required directory structure exists. if err := os.MkdirAll(filepath.Dir(c.Path), 0700); err != nil { return fmt.Errorf("unable to create directory %s: %v", c.Path, err) } if _, err := os.Stat(c.Path); err != nil && !os.IsNotExist(err) { return err } // Open database file. db, err := bolt.Open(c.Path, 0600, &bolt.Options{Timeout: 1 * time.Second}) if err != nil { // Hack to give a slightly nicer error message for a known failure mode when bolt calls // mmap on a file system that doesn't support the MAP_SHARED option. // // See: https://github.com/boltdb/bolt/issues/272 // See: https://stackoverflow.com/a/18421071 if err.Error() == "invalid argument" { return fmt.Errorf("unable to open boltdb: mmap of %q may not support the MAP_SHARED option", c.Path) } return fmt.Errorf("unable to open boltdb: %w", err) } c.db = db if err := c.initialize(ctx); err != nil { return err } c.pluginsCollector.Open(c.db) c.log.Info("Resources opened", zap.String("path", c.Path)) return nil } // initialize creates Buckets that are missing func (c *Client) initialize(ctx context.Context) error { if err := c.db.Update(func(tx *bolt.Tx) error { // Always create ID bucket. // TODO: is this still needed? if err := c.initializeID(tx); err != nil { return err } // TODO: make card to normalize everything under kv? bkts := [][]byte{ authorizationBucket, bucketBucket, dashboardBucket, organizationBucket, scraperBucket, telegrafBucket, telegrafPluginsBucket, userBucket, } for _, bktName := range bkts { if _, err := tx.CreateBucketIfNotExists(bktName); err != nil { return err } } return nil }); err != nil { return err } return nil }<|fim▁hole|> // Close the connection to the bolt database func (c *Client) Close() error { c.pluginsCollector.Close() if c.db != nil { return c.db.Close() } return nil }<|fim▁end|>
<|file_name|>x86register.rs<|end_file_name|><|fim▁begin|>use crate::error::*; use crate::il::Expression as Expr; use crate::il::*; use crate::translator::x86::mode::Mode; use falcon_capstone::capstone_sys::x86_reg; const X86REGISTERS: &[X86Register] = &[ X86Register { name: "ah", capstone_reg: x86_reg::X86_REG_AH, full_reg: x86_reg::X86_REG_EAX, offset: 8, bits: 8, mode: Mode::X86, }, X86Register { name: "al", capstone_reg: x86_reg::X86_REG_AL, full_reg: x86_reg::X86_REG_EAX, offset: 0, bits: 8, mode: Mode::X86, }, X86Register { name: "ax", capstone_reg: x86_reg::X86_REG_AX, full_reg: x86_reg::X86_REG_EAX, offset: 0, bits: 16, mode: Mode::X86, }, X86Register { name: "eax", capstone_reg: x86_reg::X86_REG_EAX, full_reg: x86_reg::X86_REG_EAX, offset: 0, bits: 32, mode: Mode::X86, }, X86Register { name: "bh", capstone_reg: x86_reg::X86_REG_BH, full_reg: x86_reg::X86_REG_EBX, offset: 8, bits: 8, mode: Mode::X86, }, X86Register { name: "bl", capstone_reg: x86_reg::X86_REG_BL, full_reg: x86_reg::X86_REG_EBX, offset: 0, bits: 8, mode: Mode::X86, }, X86Register { name: "bx", capstone_reg: x86_reg::X86_REG_BX, full_reg: x86_reg::X86_REG_EBX, offset: 0, bits: 16, mode: Mode::X86, }, X86Register { name: "ebx", capstone_reg: x86_reg::X86_REG_EBX, full_reg: x86_reg::X86_REG_EBX, offset: 0, bits: 32, mode: Mode::X86, }, X86Register { name: "ch", capstone_reg: x86_reg::X86_REG_CH, full_reg: x86_reg::X86_REG_ECX, offset: 8, bits: 8, mode: Mode::X86, }, X86Register { name: "cl", capstone_reg: x86_reg::X86_REG_CL, full_reg: x86_reg::X86_REG_ECX, offset: 0, bits: 8, mode: Mode::X86, }, X86Register { name: "cx", capstone_reg: x86_reg::X86_REG_CX, full_reg: x86_reg::X86_REG_ECX, offset: 0, bits: 16, mode: Mode::X86, }, X86Register { name: "ecx", capstone_reg: x86_reg::X86_REG_ECX, full_reg: x86_reg::X86_REG_ECX, offset: 0, bits: 32, mode: Mode::X86, }, X86Register { name: "dh", capstone_reg: x86_reg::X86_REG_DH, full_reg: x86_reg::X86_REG_EDX, offset: 8, bits: 8, mode: Mode::X86, }, X86Register { name: "dl", capstone_reg: x86_reg::X86_REG_DL, full_reg: x86_reg::X86_REG_EDX, offset: 0, bits: 8, mode: Mode::X86, }, X86Register { name: "dx", capstone_reg: x86_reg::X86_REG_DX, full_reg: x86_reg::X86_REG_EDX, offset: 0, bits: 16, mode: Mode::X86, }, X86Register { name: "edx", capstone_reg: x86_reg::X86_REG_EDX, full_reg: x86_reg::X86_REG_EDX, offset: 0, bits: 32, mode: Mode::X86, }, X86Register { name: "si", capstone_reg: x86_reg::X86_REG_SI, full_reg: x86_reg::X86_REG_ESI, offset: 0, bits: 16, mode: Mode::X86, }, X86Register { name: "esi", capstone_reg: x86_reg::X86_REG_ESI, full_reg: x86_reg::X86_REG_ESI, offset: 0, bits: 32, mode: Mode::X86, }, X86Register { name: "di", capstone_reg: x86_reg::X86_REG_DI, full_reg: x86_reg::X86_REG_EDI, offset: 0, bits: 16, mode: Mode::X86, }, X86Register { name: "edi", capstone_reg: x86_reg::X86_REG_EDI, full_reg: x86_reg::X86_REG_EDI, offset: 0, bits: 32, mode: Mode::X86, }, X86Register { name: "sp", capstone_reg: x86_reg::X86_REG_SP, full_reg: x86_reg::X86_REG_ESP, offset: 0, bits: 16, mode: Mode::X86, }, X86Register { name: "esp", capstone_reg: x86_reg::X86_REG_ESP, full_reg: x86_reg::X86_REG_ESP, offset: 0, bits: 32, mode: Mode::X86, }, X86Register { name: "bp", capstone_reg: x86_reg::X86_REG_BP, full_reg: x86_reg::X86_REG_EBP, offset: 0, bits: 16, mode: Mode::X86, }, X86Register { name: "ebp", capstone_reg: x86_reg::X86_REG_EBP, full_reg: x86_reg::X86_REG_EBP, offset: 0, bits: 32, mode: Mode::X86, }, X86Register { name: "fs_base", capstone_reg: x86_reg::X86_REG_FS, full_reg: x86_reg::X86_REG_FS, offset: 0, bits: 32, mode: Mode::X86, }, X86Register { name: "gs_base", capstone_reg: x86_reg::X86_REG_GS, full_reg: x86_reg::X86_REG_GS, offset: 0, bits: 32, mode: Mode::X86, }, X86Register { name: "ds_base", capstone_reg: x86_reg::X86_REG_DS, full_reg: x86_reg::X86_REG_DS, offset: 0, bits: 32, mode: Mode::X86, }, X86Register { name: "es_base", capstone_reg: x86_reg::X86_REG_ES, full_reg: x86_reg::X86_REG_ES, offset: 0, bits: 32, mode: Mode::X86, }, X86Register { name: "cs_base", capstone_reg: x86_reg::X86_REG_CS, full_reg: x86_reg::X86_REG_CS, offset: 0, bits: 32, mode: Mode::X86, }, X86Register { name: "ss_base", capstone_reg: x86_reg::X86_REG_SS, full_reg: x86_reg::X86_REG_SS, offset: 0, bits: 32, mode: Mode::X86, }, ]; const AMD64REGISTERS: &[X86Register] = &[ X86Register { name: "ah", capstone_reg: x86_reg::X86_REG_AH, full_reg: x86_reg::X86_REG_RAX, offset: 8, bits: 8, mode: Mode::Amd64, }, X86Register { name: "al", capstone_reg: x86_reg::X86_REG_AL, full_reg: x86_reg::X86_REG_RAX, offset: 0, bits: 8, mode: Mode::Amd64, }, X86Register { name: "ax", capstone_reg: x86_reg::X86_REG_AX, full_reg: x86_reg::X86_REG_RAX, offset: 0, bits: 16, mode: Mode::Amd64, }, X86Register { name: "eax", capstone_reg: x86_reg::X86_REG_EAX, full_reg: x86_reg::X86_REG_RAX, offset: 0, bits: 32, mode: Mode::Amd64, }, X86Register { name: "rax", capstone_reg: x86_reg::X86_REG_RAX, full_reg: x86_reg::X86_REG_RAX, offset: 0, bits: 64, mode: Mode::Amd64, }, X86Register { name: "bh", capstone_reg: x86_reg::X86_REG_BH, full_reg: x86_reg::X86_REG_RBX, offset: 8, bits: 8, mode: Mode::Amd64, }, X86Register { name: "bl", capstone_reg: x86_reg::X86_REG_BL, full_reg: x86_reg::X86_REG_RBX, offset: 0, bits: 8, mode: Mode::Amd64, }, X86Register { name: "bx", capstone_reg: x86_reg::X86_REG_BX, full_reg: x86_reg::X86_REG_RBX, offset: 0, bits: 16, mode: Mode::Amd64, }, X86Register { name: "ebx", capstone_reg: x86_reg::X86_REG_EBX, full_reg: x86_reg::X86_REG_RBX, offset: 0, bits: 32, mode: Mode::Amd64, }, X86Register { name: "rbx", capstone_reg: x86_reg::X86_REG_RBX, full_reg: x86_reg::X86_REG_RBX, offset: 0, bits: 64, mode: Mode::Amd64, }, X86Register { name: "ch", capstone_reg: x86_reg::X86_REG_CH, full_reg: x86_reg::X86_REG_RCX, offset: 8, bits: 8, mode: Mode::Amd64, }, X86Register { name: "cl", capstone_reg: x86_reg::X86_REG_CL, full_reg: x86_reg::X86_REG_RCX, offset: 0, bits: 8, mode: Mode::Amd64, }, X86Register { name: "cx", capstone_reg: x86_reg::X86_REG_CX, full_reg: x86_reg::X86_REG_RCX, offset: 0, bits: 16, mode: Mode::Amd64, }, X86Register { name: "ecx", capstone_reg: x86_reg::X86_REG_ECX, full_reg: x86_reg::X86_REG_RCX, offset: 0, bits: 32, mode: Mode::Amd64, }, X86Register { name: "rcx", capstone_reg: x86_reg::X86_REG_RCX, full_reg: x86_reg::X86_REG_RCX, offset: 0, bits: 64, mode: Mode::Amd64, }, X86Register { name: "dh", capstone_reg: x86_reg::X86_REG_DH, full_reg: x86_reg::X86_REG_RDX, offset: 8, bits: 8, mode: Mode::Amd64, }, X86Register { name: "dl", capstone_reg: x86_reg::X86_REG_DL, full_reg: x86_reg::X86_REG_RDX, offset: 0, bits: 8, mode: Mode::Amd64, }, X86Register { name: "dx", capstone_reg: x86_reg::X86_REG_DX, full_reg: x86_reg::X86_REG_RDX, offset: 0, bits: 16, mode: Mode::Amd64, }, X86Register { name: "edx", capstone_reg: x86_reg::X86_REG_EDX, full_reg: x86_reg::X86_REG_RDX, offset: 0, bits: 32, mode: Mode::Amd64, }, X86Register { name: "rdx", capstone_reg: x86_reg::X86_REG_RDX, full_reg: x86_reg::X86_REG_RDX, offset: 0, bits: 64, mode: Mode::Amd64, }, X86Register { name: "sil", capstone_reg: x86_reg::X86_REG_SIL, full_reg: x86_reg::X86_REG_RSI, offset: 0, bits: 8, mode: Mode::Amd64, }, X86Register { name: "si", capstone_reg: x86_reg::X86_REG_SI, full_reg: x86_reg::X86_REG_RSI, offset: 0, bits: 16, mode: Mode::Amd64, }, X86Register { name: "esi", capstone_reg: x86_reg::X86_REG_ESI, full_reg: x86_reg::X86_REG_RSI, offset: 0, bits: 32, mode: Mode::Amd64, }, X86Register { name: "rsi", capstone_reg: x86_reg::X86_REG_RSI, full_reg: x86_reg::X86_REG_RSI, offset: 0, bits: 64, mode: Mode::Amd64, }, X86Register { name: "dil", capstone_reg: x86_reg::X86_REG_DIL, full_reg: x86_reg::X86_REG_RDI, offset: 0, bits: 8, mode: Mode::Amd64, }, X86Register { name: "di", capstone_reg: x86_reg::X86_REG_DI, full_reg: x86_reg::X86_REG_RDI, offset: 0, bits: 16, mode: Mode::Amd64, }, X86Register { name: "edi", capstone_reg: x86_reg::X86_REG_EDI, full_reg: x86_reg::X86_REG_RDI, offset: 0, bits: 32, mode: Mode::Amd64, }, X86Register { name: "rdi", capstone_reg: x86_reg::X86_REG_RDI, full_reg: x86_reg::X86_REG_RDI, offset: 0, bits: 64, mode: Mode::Amd64, }, X86Register { name: "sp", capstone_reg: x86_reg::X86_REG_SP, full_reg: x86_reg::X86_REG_RSP, offset: 0, bits: 16, mode: Mode::Amd64, }, X86Register { name: "esp", capstone_reg: x86_reg::X86_REG_ESP, full_reg: x86_reg::X86_REG_RSP, offset: 0, bits: 32, mode: Mode::Amd64, }, X86Register { name: "rsp", capstone_reg: x86_reg::X86_REG_RSP, full_reg: x86_reg::X86_REG_RSP, offset: 0, bits: 64, mode: Mode::Amd64, }, X86Register { name: "bpl", capstone_reg: x86_reg::X86_REG_BPL, full_reg: x86_reg::X86_REG_RBP, offset: 0, bits: 8, mode: Mode::Amd64, }, X86Register { name: "bp", capstone_reg: x86_reg::X86_REG_BP, full_reg: x86_reg::X86_REG_RBP, offset: 0, bits: 16, mode: Mode::Amd64, }, X86Register { name: "ebp", capstone_reg: x86_reg::X86_REG_EBP, full_reg: x86_reg::X86_REG_RBP, offset: 0, bits: 32, mode: Mode::Amd64, }, X86Register { name: "rbp", capstone_reg: x86_reg::X86_REG_RBP, full_reg: x86_reg::X86_REG_RBP, offset: 0, bits: 64, mode: Mode::Amd64, }, X86Register { name: "r8b", capstone_reg: x86_reg::X86_REG_R8B, full_reg: x86_reg::X86_REG_R8, offset: 0, bits: 8, mode: Mode::Amd64, }, X86Register { name: "r8w", capstone_reg: x86_reg::X86_REG_R8W, full_reg: x86_reg::X86_REG_R8, offset: 0, bits: 16, mode: Mode::Amd64, }, X86Register { name: "r8d", capstone_reg: x86_reg::X86_REG_R8D, full_reg: x86_reg::X86_REG_R8, offset: 0, bits: 32, mode: Mode::Amd64, }, X86Register { name: "r8", capstone_reg: x86_reg::X86_REG_R8, full_reg: x86_reg::X86_REG_R8, offset: 0, bits: 64, mode: Mode::Amd64, }, X86Register { name: "r9b", capstone_reg: x86_reg::X86_REG_R9B, full_reg: x86_reg::X86_REG_R9, offset: 0, bits: 8, mode: Mode::Amd64, }, X86Register { name: "r9w", capstone_reg: x86_reg::X86_REG_R9W, full_reg: x86_reg::X86_REG_R9, offset: 0, bits: 16, mode: Mode::Amd64, }, X86Register { name: "r9d", capstone_reg: x86_reg::X86_REG_R9D, full_reg: x86_reg::X86_REG_R9, offset: 0, bits: 32, mode: Mode::Amd64, }, X86Register { name: "r9", capstone_reg: x86_reg::X86_REG_R9, full_reg: x86_reg::X86_REG_R9, offset: 0, bits: 64, mode: Mode::Amd64, }, X86Register { name: "r10b", capstone_reg: x86_reg::X86_REG_R10B, full_reg: x86_reg::X86_REG_R10, offset: 0, bits: 8, mode: Mode::Amd64, }, X86Register { name: "r10w", capstone_reg: x86_reg::X86_REG_R10W, full_reg: x86_reg::X86_REG_R10, offset: 0, bits: 16, mode: Mode::Amd64, }, X86Register { name: "r10d", capstone_reg: x86_reg::X86_REG_R10D, full_reg: x86_reg::X86_REG_R10, offset: 0, bits: 32, mode: Mode::Amd64, }, X86Register { name: "r10", capstone_reg: x86_reg::X86_REG_R10, full_reg: x86_reg::X86_REG_R10, offset: 0, bits: 64, mode: Mode::Amd64, }, X86Register { name: "r11b", capstone_reg: x86_reg::X86_REG_R11B, full_reg: x86_reg::X86_REG_R11, offset: 0, bits: 8, mode: Mode::Amd64, }, X86Register { name: "r11w", capstone_reg: x86_reg::X86_REG_R11W, full_reg: x86_reg::X86_REG_R11, offset: 0, bits: 16, mode: Mode::Amd64, }, X86Register { name: "r11d", capstone_reg: x86_reg::X86_REG_R11D, full_reg: x86_reg::X86_REG_R11, offset: 0, bits: 32, mode: Mode::Amd64, }, X86Register { name: "r11", capstone_reg: x86_reg::X86_REG_R11, full_reg: x86_reg::X86_REG_R11, offset: 0, bits: 64, mode: Mode::Amd64, }, X86Register { name: "r12b", capstone_reg: x86_reg::X86_REG_R12B, full_reg: x86_reg::X86_REG_R12, offset: 0, bits: 8, mode: Mode::Amd64, }, X86Register { name: "r12w", capstone_reg: x86_reg::X86_REG_R12W, full_reg: x86_reg::X86_REG_R12, offset: 0, bits: 16, mode: Mode::Amd64, }, X86Register { name: "r12d", capstone_reg: x86_reg::X86_REG_R12D, full_reg: x86_reg::X86_REG_R12, offset: 0, bits: 32, mode: Mode::Amd64, }, X86Register { name: "r12", capstone_reg: x86_reg::X86_REG_R12, full_reg: x86_reg::X86_REG_R12, offset: 0, bits: 64, mode: Mode::Amd64, }, X86Register { name: "r13b", capstone_reg: x86_reg::X86_REG_R13B, full_reg: x86_reg::X86_REG_R13, offset: 0, bits: 8, mode: Mode::Amd64, }, X86Register { name: "r13w", capstone_reg: x86_reg::X86_REG_R13W, full_reg: x86_reg::X86_REG_R13, offset: 0, bits: 16, mode: Mode::Amd64, }, X86Register { name: "r13d", capstone_reg: x86_reg::X86_REG_R13D, full_reg: x86_reg::X86_REG_R13, offset: 0, bits: 32, mode: Mode::Amd64, }, X86Register { name: "r13", capstone_reg: x86_reg::X86_REG_R13, full_reg: x86_reg::X86_REG_R13, offset: 0, bits: 64, mode: Mode::Amd64, }, X86Register { name: "r14b", capstone_reg: x86_reg::X86_REG_R14B, full_reg: x86_reg::X86_REG_R14, offset: 0, bits: 8, mode: Mode::Amd64, }, X86Register { name: "r14w", capstone_reg: x86_reg::X86_REG_R14W, full_reg: x86_reg::X86_REG_R14, offset: 0, bits: 16, mode: Mode::Amd64, }, X86Register { name: "r14d", capstone_reg: x86_reg::X86_REG_R14D, full_reg: x86_reg::X86_REG_R14, offset: 0, bits: 32, mode: Mode::Amd64, }, X86Register { name: "r14", capstone_reg: x86_reg::X86_REG_R14, full_reg: x86_reg::X86_REG_R14, offset: 0, bits: 64, mode: Mode::Amd64, }, X86Register { name: "r15b", capstone_reg: x86_reg::X86_REG_R15B, full_reg: x86_reg::X86_REG_R15, offset: 0, bits: 8, mode: Mode::Amd64, }, X86Register { name: "r15w", capstone_reg: x86_reg::X86_REG_R15W, full_reg: x86_reg::X86_REG_R15, offset: 0, bits: 16, mode: Mode::Amd64, }, X86Register {<|fim▁hole|> bits: 32, mode: Mode::Amd64, }, X86Register { name: "r15", capstone_reg: x86_reg::X86_REG_R15, full_reg: x86_reg::X86_REG_R15, offset: 0, bits: 64, mode: Mode::Amd64, }, X86Register { name: "fs_base", capstone_reg: x86_reg::X86_REG_FS, full_reg: x86_reg::X86_REG_FS, offset: 0, bits: 64, mode: Mode::Amd64, }, X86Register { name: "gs_base", capstone_reg: x86_reg::X86_REG_GS, full_reg: x86_reg::X86_REG_GS, offset: 0, bits: 64, mode: Mode::Amd64, }, X86Register { name: "ds_base", capstone_reg: x86_reg::X86_REG_DS, full_reg: x86_reg::X86_REG_DS, offset: 0, bits: 64, mode: Mode::Amd64, }, X86Register { name: "es_base", capstone_reg: x86_reg::X86_REG_ES, full_reg: x86_reg::X86_REG_ES, offset: 0, bits: 64, mode: Mode::Amd64, }, X86Register { name: "cs_base", capstone_reg: x86_reg::X86_REG_CS, full_reg: x86_reg::X86_REG_CS, offset: 0, bits: 64, mode: Mode::Amd64, }, X86Register { name: "ss_base", capstone_reg: x86_reg::X86_REG_SS, full_reg: x86_reg::X86_REG_SS, offset: 0, bits: 64, mode: Mode::Amd64, }, X86Register { name: "xmm0", capstone_reg: x86_reg::X86_REG_XMM0, full_reg: x86_reg::X86_REG_XMM0, offset: 0, bits: 128, mode: Mode::Amd64, }, X86Register { name: "xmm1", capstone_reg: x86_reg::X86_REG_XMM1, full_reg: x86_reg::X86_REG_XMM1, offset: 0, bits: 128, mode: Mode::Amd64, }, X86Register { name: "xmm2", capstone_reg: x86_reg::X86_REG_XMM2, full_reg: x86_reg::X86_REG_XMM2, offset: 0, bits: 128, mode: Mode::Amd64, }, X86Register { name: "xmm3", capstone_reg: x86_reg::X86_REG_XMM3, full_reg: x86_reg::X86_REG_XMM3, offset: 0, bits: 128, mode: Mode::Amd64, }, X86Register { name: "xmm4", capstone_reg: x86_reg::X86_REG_XMM4, full_reg: x86_reg::X86_REG_XMM4, offset: 0, bits: 128, mode: Mode::Amd64, }, X86Register { name: "xmm5", capstone_reg: x86_reg::X86_REG_XMM5, full_reg: x86_reg::X86_REG_XMM5, offset: 0, bits: 128, mode: Mode::Amd64, }, X86Register { name: "xmm6", capstone_reg: x86_reg::X86_REG_XMM6, full_reg: x86_reg::X86_REG_XMM6, offset: 0, bits: 128, mode: Mode::Amd64, }, X86Register { name: "xmm7", capstone_reg: x86_reg::X86_REG_XMM7, full_reg: x86_reg::X86_REG_XMM7, offset: 0, bits: 128, mode: Mode::Amd64, }, X86Register { name: "xmm8", capstone_reg: x86_reg::X86_REG_XMM8, full_reg: x86_reg::X86_REG_XMM8, offset: 0, bits: 128, mode: Mode::Amd64, }, X86Register { name: "xmm9", capstone_reg: x86_reg::X86_REG_XMM9, full_reg: x86_reg::X86_REG_XMM9, offset: 0, bits: 128, mode: Mode::Amd64, }, X86Register { name: "xmm10", capstone_reg: x86_reg::X86_REG_XMM10, full_reg: x86_reg::X86_REG_XMM10, offset: 0, bits: 128, mode: Mode::Amd64, }, X86Register { name: "xmm11", capstone_reg: x86_reg::X86_REG_XMM11, full_reg: x86_reg::X86_REG_XMM11, offset: 0, bits: 128, mode: Mode::Amd64, }, X86Register { name: "xmm12", capstone_reg: x86_reg::X86_REG_XMM12, full_reg: x86_reg::X86_REG_XMM12, offset: 0, bits: 128, mode: Mode::Amd64, }, X86Register { name: "xmm13", capstone_reg: x86_reg::X86_REG_XMM13, full_reg: x86_reg::X86_REG_XMM13, offset: 0, bits: 128, mode: Mode::Amd64, }, X86Register { name: "xmm14", capstone_reg: x86_reg::X86_REG_XMM14, full_reg: x86_reg::X86_REG_XMM14, offset: 0, bits: 128, mode: Mode::Amd64, }, X86Register { name: "xmm15", capstone_reg: x86_reg::X86_REG_XMM15, full_reg: x86_reg::X86_REG_XMM15, offset: 0, bits: 128, mode: Mode::Amd64, }, X86Register { name: "xmm16", capstone_reg: x86_reg::X86_REG_XMM16, full_reg: x86_reg::X86_REG_XMM16, offset: 0, bits: 128, mode: Mode::Amd64, }, X86Register { name: "xmm17", capstone_reg: x86_reg::X86_REG_XMM17, full_reg: x86_reg::X86_REG_XMM17, offset: 0, bits: 128, mode: Mode::Amd64, }, X86Register { name: "xmm18", capstone_reg: x86_reg::X86_REG_XMM18, full_reg: x86_reg::X86_REG_XMM18, offset: 0, bits: 128, mode: Mode::Amd64, }, X86Register { name: "xmm19", capstone_reg: x86_reg::X86_REG_XMM19, full_reg: x86_reg::X86_REG_XMM19, offset: 0, bits: 128, mode: Mode::Amd64, }, X86Register { name: "xmm20", capstone_reg: x86_reg::X86_REG_XMM20, full_reg: x86_reg::X86_REG_XMM20, offset: 0, bits: 128, mode: Mode::Amd64, }, X86Register { name: "xmm21", capstone_reg: x86_reg::X86_REG_XMM21, full_reg: x86_reg::X86_REG_XMM21, offset: 0, bits: 128, mode: Mode::Amd64, }, X86Register { name: "xmm22", capstone_reg: x86_reg::X86_REG_XMM22, full_reg: x86_reg::X86_REG_XMM22, offset: 0, bits: 128, mode: Mode::Amd64, }, X86Register { name: "xmm23", capstone_reg: x86_reg::X86_REG_XMM23, full_reg: x86_reg::X86_REG_XMM23, offset: 0, bits: 128, mode: Mode::Amd64, }, X86Register { name: "xmm24", capstone_reg: x86_reg::X86_REG_XMM24, full_reg: x86_reg::X86_REG_XMM24, offset: 0, bits: 128, mode: Mode::Amd64, }, X86Register { name: "xmm25", capstone_reg: x86_reg::X86_REG_XMM25, full_reg: x86_reg::X86_REG_XMM25, offset: 0, bits: 128, mode: Mode::Amd64, }, X86Register { name: "xmm26", capstone_reg: x86_reg::X86_REG_XMM26, full_reg: x86_reg::X86_REG_XMM26, offset: 0, bits: 128, mode: Mode::Amd64, }, X86Register { name: "xmm27", capstone_reg: x86_reg::X86_REG_XMM27, full_reg: x86_reg::X86_REG_XMM27, offset: 0, bits: 128, mode: Mode::Amd64, }, X86Register { name: "xmm28", capstone_reg: x86_reg::X86_REG_XMM28, full_reg: x86_reg::X86_REG_XMM28, offset: 0, bits: 128, mode: Mode::Amd64, }, X86Register { name: "xmm29", capstone_reg: x86_reg::X86_REG_XMM29, full_reg: x86_reg::X86_REG_XMM29, offset: 0, bits: 128, mode: Mode::Amd64, }, X86Register { name: "xmm30", capstone_reg: x86_reg::X86_REG_XMM30, full_reg: x86_reg::X86_REG_XMM30, offset: 0, bits: 128, mode: Mode::Amd64, }, X86Register { name: "xmm31", capstone_reg: x86_reg::X86_REG_XMM31, full_reg: x86_reg::X86_REG_XMM31, offset: 0, bits: 128, mode: Mode::Amd64, }, ]; /// Struct for dealing with x86 registers pub(crate) struct X86Register { name: &'static str, // The capstone enum value for this register. capstone_reg: x86_reg, /// The full register. For example, eax is the full register for al. full_reg: x86_reg, /// The offset of this register. For example, ah is offset 8 bit into eax. offset: usize, /// The size of this register in bits bits: usize, /// The mode for this register mode: Mode, } impl X86Register { pub fn bits(&self) -> usize { self.bits } /// Returns true if this is a full-width register (i.e. eax, ebx, etc) pub fn is_full(&self) -> bool { self.capstone_reg == self.full_reg } /// Returns the full-width register for this register pub fn get_full(&self) -> Result<&'static X86Register> { get_register(&self.mode, self.full_reg) } /// Returns an expression which evaluates to the value of the register. /// /// This handles things like al/ah/ax/eax pub fn get(&self) -> Result<Expression> { if self.is_full() { Ok(expr_scalar(self.name, self.bits)) } else if self.offset == 0 { Expr::trun(self.bits, self.get_full()?.get()?) } else { let full_reg = self.get_full()?; let expr = Expr::shr( full_reg.get()?, expr_const(self.offset as u64, full_reg.bits), )?; Expr::trun(self.bits, expr) } } /// Sets the value of this register. /// /// This handles things like al/ah/ax/eax pub fn set(&self, block: &mut Block, value: Expression) -> Result<()> { if self.is_full() { block.assign(scalar(self.name, self.bits), value); Ok(()) } else if self.offset == 0 { let full_reg = self.get_full()?; if self.bits() < 32 { let mask = !0 << self.bits; let expr = Expr::and(full_reg.get()?, expr_const(mask, full_reg.bits))?; let expr = Expr::or(expr, Expr::zext(full_reg.bits, value)?)?; full_reg.set(block, expr) } else { full_reg.set(block, Expr::zext(full_reg.bits, value)?) } } else { let full_reg = self.get_full()?; let mask = ((1 << self.bits) - 1) << self.offset; let expr = Expr::and(full_reg.get()?, expr_const(mask, full_reg.bits))?; let value = Expr::zext(full_reg.bits, value)?; let expr = Expr::or( expr, Expr::shl(value, expr_const(self.offset as u64, full_reg.bits))?, )?; full_reg.set(block, expr) } } } /// Takes a capstone register enum and returns an `X86Register` pub(crate) fn get_register(mode: &Mode, capstone_id: x86_reg) -> Result<&'static X86Register> { let registers: &[X86Register] = match *mode { Mode::X86 => X86REGISTERS, Mode::Amd64 => AMD64REGISTERS, }; for register in registers.iter() { if register.capstone_reg == capstone_id { return Ok(register); } } Err(format!("Could not find register {:?}", capstone_id).into()) }<|fim▁end|>
name: "r15d", capstone_reg: x86_reg::X86_REG_R15D, full_reg: x86_reg::X86_REG_R15, offset: 0,
<|file_name|>one_line_if_v2.rs<|end_file_name|><|fim▁begin|>// rustfmt-version: Two fn plain_if(x: bool) -> u8 { if x { 0 } else { 1 } } fn paren_if(x: bool) -> u8 { (if x { 0 } else { 1 }) } fn let_if(x: bool) -> u8 { let x = if x { foo() } else { bar() }; x } fn return_if(x: bool) -> u8 { return if x { 0 } else { 1 }; } fn multi_if() { use std::io; if x { foo()<|fim▁hole|> } if x { foo() } else { bar() } } fn middle_if() { use std::io; if x { foo() } else { bar() } let x = 1; }<|fim▁end|>
} else { bar()
<|file_name|>urls.py<|end_file_name|><|fim▁begin|>from django.urls import path from . import views urlpatterns = [<|fim▁hole|> path('', views.events), ]<|fim▁end|>
<|file_name|>Schema.java<|end_file_name|><|fim▁begin|>// Copyright 2017 Yahoo Inc. // Licensed under the terms of the Apache license. Please see LICENSE.md file distributed with this work for terms. package com.yahoo.bard.webservice.table; import com.yahoo.bard.webservice.data.time.Granularity; import com.yahoo.bard.webservice.util.Utils; import java.util.LinkedHashSet; import java.util.Optional; import java.util.Set; /** * An interface describing a table or table-like entity composed of sets of columns. */ public interface Schema { /** * Get all the columns underlying this Schema. * * @return The columns of this schema */ Set<Column> getColumns(); /** * Get the time granularity for this Schema. * * @return The time granularity of this schema */ Granularity getGranularity(); /** * Getter for set of columns by sub-type. * * @param columnClass The class of columns to to search * @param <T> sub-type of Column to return * * @return Set of Columns */<|fim▁hole|> default <T extends Column> LinkedHashSet<T> getColumns(Class<T> columnClass) { return Utils.getSubsetByType(getColumns(), columnClass); } /** * Given a column type and name, return the column of the expected type. * * @param name The name on the column * @param columnClass The class of the column being retrieved * @param <T> The type of the subclass of the column being retrieved * * @return The an optional containing the column of the name and type specified, if any */ default <T extends Column> Optional<T> getColumn(String name, Class<T> columnClass) { return getColumns(columnClass).stream() .filter(column -> column.getName().equals(name)) .findFirst(); } }<|fim▁end|>
<|file_name|>route.js<|end_file_name|><|fim▁begin|>import Ember from 'ember'; <|fim▁hole|> model(params) { return this.get('k8s').getService(params.name); } });<|fim▁end|>
export default Ember.Route.extend({ k8s: Ember.inject.service(),
<|file_name|>big64.js<|end_file_name|><|fim▁begin|>import test from 'ava'; import {big64, get64} from '../../src/index.js'; function macro(t, a, o, expected) { expected = get64(...expected); t.deepEqual(big64(a, o), expected); } macro.title = (providedTitle, a, o, expected) => `${providedTitle || ''} big64(${a}, ${o}) === ${expected}`.trim(); test( macro, [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], 0, [0x00_00_00_00, 0x00_00_00_00], ); test( macro, [0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], 0, [0xff_00_00_00, 0x00_00_00_00], ); test( macro, [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00], 0, [0xff_ff_ff_ff, 0xff_ff_ff_00], ); test( macro, [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff], 0, [0xff_ff_ff_ff, 0xff_ff_ff_ff], ); test( macro, [0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00], 0, [0x00_00_ff_00, 0x00_00_00_00], ); test( macro, [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00], 0, [0x00_00_00_00, 0x00_00_01_00], ); test( macro, [0x00, 0x00, 0x00, 0xa0, 0x00, 0x00, 0x00, 0x00], 0, [0x00_00_00_a0, 0x00_00_00_00], ); test( macro, [0x00, 0x00, 0x00, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x03, 0x0c], 1, [0x00_00_a0_00, 0x00_00_00_03],<|fim▁hole|><|fim▁end|>
);
<|file_name|>toolTip.js<|end_file_name|><|fim▁begin|>/* See license.txt for terms of usage */ require.def("domplate/toolTip", [ "domplate/domplate", "core/lib", "core/trace" ], function(Domplate, Lib, Trace) { with (Domplate) { // ************************************************************************************************ // Globals var mouseEvents = "mousemove mouseover mousedown click mouseout"; var currentToolTip = null; // ************************************************************************************************ // Tooltip function ToolTip() { this.element = null; } ToolTip.prototype = domplate( { tag: DIV({"class": "toolTip"}, DIV() ), show: function(target, options) { if (currentToolTip) currentToolTip.hide(); this.target = target; this.addListeners(); // Render the tooltip.<|fim▁hole|> // Compute coordinates and show. var box = Lib.getElementBox(this.target); this.element.style.top = box.top + box.height + "px"; this.element.style.left = box.left + box.width + "px"; this.element.style.display = "block"; currentToolTip = this; return this.element; }, hide: function() { if (!this.element) return; this.removeListeners(); // Remove UI this.element.parentNode.removeChild(this.element); currentToolTip = this.element = null; }, addListeners: function() { this.onMouseEvent = Lib.bind(this.onMouseEvent, this); // Register listeners for all mouse events. $(document).bind(mouseEvents, this.onMouseEvent, true); }, removeListeners: function() { // Remove listeners for mouse events. $(document).unbind(mouseEvents, this.onMouseEvent, this, true); }, // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // // Listeners onMouseEvent: function(event) { var e = Lib.fixEvent(event); // If the mouse is hovering within the tooltip pass the event further to it. var ancestor = Lib.getAncestorByClass(e.target, "toolTip"); if (ancestor) return; var x = event.clientX, y = event.clientY; var box = Lib.getElementBox(this.element); if (event.type != "click" && event.type != "mousedown") box = Lib.inflateRect(box, 10, 10); // If the mouse is hovering within near neighbourhood, ignore it. if (Lib.pointInRect(box, x, y)) { Lib.cancelEvent(e); return; } // If the mouse is hovering over the target, ignore it too. if (Lib.isAncestor(e.target, this.target)) { Lib.cancelEvent(e); return; } // The mouse is hovering far away, let's destroy the the tooltip. this.hide(); Lib.cancelEvent(e); } }); // ************************************************************************************************ return ToolTip; // **********************************************************************************************// }});<|fim▁end|>
var body = Lib.getBody(document); this.element = this.tag.append({options: options}, body, this);
<|file_name|>geolocip.py<|end_file_name|><|fim▁begin|><|fim▁hole|># This file is part of weboob. # # weboob is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # weboob is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with weboob. If not, see <http://www.gnu.org/licenses/>. from .base import Capability, BaseObject, StringField, FloatField __all__ = ['IpLocation', 'CapGeolocIp'] class IpLocation(BaseObject): """ Represents the location of an IP address. """ city = StringField('City') region = StringField('Region') zipcode = StringField('Zip code') country = StringField('Country') lt = FloatField('Latitude') lg = FloatField('Longitude') osmlink = StringField('Link to OpenStreetMap location page') host = StringField('Hostname') tld = StringField('Top Level Domain') isp = StringField('Internet Service Provider') def __init__(self, ipaddr): BaseObject.__init__(self, ipaddr) class CapGeolocIp(Capability): """ Access information about IP addresses database. """ def get_location(self, ipaddr): """ Get location of an IP address. :param ipaddr: IP address :type ipaddr: str :rtype: :class:`IpLocation` """ raise NotImplementedError()<|fim▁end|>
# -*- coding: utf-8 -*- # Copyright(C) 2010-2011 Romain Bignon #
<|file_name|>CacheRemoteImageAsyncJob.java<|end_file_name|><|fim▁begin|>package it.ninjatech.kvo.async.job; import it.ninjatech.kvo.model.ImageProvider; import it.ninjatech.kvo.util.Logger; import java.awt.Dimension; import java.awt.Image; import java.util.EnumSet; public class CacheRemoteImageAsyncJob extends AbstractImageLoaderAsyncJob { private static final long serialVersionUID = -8459315395025635686L; private final String path; private final String type; private final Dimension size; private Image image; public CacheRemoteImageAsyncJob(String id, ImageProvider provider, String path, Dimension size, String type) { super(id, EnumSet.of(LoadType.Cache, LoadType.Remote), provider); this.path = path; this.size = size; this.type = type; } @Override protected void execute() { try { Logger.log("-> executing cache-remote image %s\n", this.id); this.image = getImage(null, null, this.id, this.path, this.size, this.type); } catch (Exception e) { this.exception = e; } } public Image getImage() { return this.image; } <|fim▁hole|><|fim▁end|>
}
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>mod arrays; mod strings; use self::strings::unescape; pub use self::{arrays::ArrayMethod, strings::StringMethod}; use super::Expander; use crate::{parser::lexers::ArgumentSplitter, types}; use thiserror::Error; #[derive(Debug, PartialEq, Clone)] pub enum Pattern<'a> { StringPattern(&'a str), Whitespace, } #[derive(Debug)] pub struct MethodArgs<'a, 'b, E: Expander> { args: &'a str, expand: &'b mut E, } /// Error during method expansion /// /// Ex: `$join($scalar)` (can't join a scala) or `$unknown(@variable)` (unknown method) #[derive(Debug, Clone, Error)] pub enum MethodError { /// Unknown array method #[error("'{0}' is an unknown array method")] InvalidArrayMethod(String), /// Unknown scalar method #[error("'{0}' is an unknown string method")] InvalidScalarMethod(String), /// A wrong argumeng was given to the method (extra, missing, or wrong type) #[error("{0}: {1}")] WrongArgument(&'static str, &'static str), /// An invalid regex was provided. This is specific to the `matches` method #[error("regex_replace: error in regular expression '{0}': {1}")]<|fim▁hole|> impl<'a, 'b, E: 'b + Expander> MethodArgs<'a, 'b, E> { pub fn array(&mut self) -> impl Iterator<Item = types::Str> + '_ { let expand = &mut (*self.expand); ArgumentSplitter::new(self.args) .flat_map(move |x| expand.expand_string(x).unwrap_or_else(|_| types::Args::new())) .map(|s| unescape(&s)) } pub fn join(self, pattern: &str) -> super::Result<types::Str, E::Error> { Ok(unescape(&self.expand.expand_string(self.args)?.join(pattern))) } pub fn new(args: &'a str, expand: &'b mut E) -> MethodArgs<'a, 'b, E> { MethodArgs { args, expand } } }<|fim▁end|>
InvalidRegex(String, #[source] regex::Error), }
<|file_name|>copy-button.js<|end_file_name|><|fim▁begin|>!((document, $) => {<|fim▁hole|> clip.on('success', function(e) { $('.copied').show(); $('.copied').fadeOut(2000); }); })(document, jQuery);<|fim▁end|>
var clip = new Clipboard('.copy-button');
<|file_name|>graphicsRequestBroker.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python """ ############################################################################## ## ## ## @Name : graphicsRequestBroker.py ## ## ## @author : Nicholas Lemay ## ## @since : 2007-06-28, last updated on 2008-04-23 ## ## ## @license : MetPX Copyright (C) 2004-2006 Environment Canada ## MetPX comes with ABSOLUTELY NO WARRANTY; For details type see the file ## named COPYING in the root of the source directory tree. ## ## @summary : This file is to be used as a bridge between the graphics ## request web page and hte different plotting methods. ## ## ## ## @requires: graphicsRequest, wich sends all the queries. ## ## The different graphic plotters. ## ## ############################################################################## """ import cgi, gettext, os, sys import cgitb; cgitb.enable() sys.path.insert(2, sys.path[0] + '/../../..') from pxStats.lib.StatsPaths import StatsPaths from pxStats.lib.GnuQueryBroker import GnuQueryBroker from pxStats.lib.RRDQueryBroker import RRDQueryBroker from pxStats.lib.LanguageTools import LanguageTools from cgi import escape LOCAL_MACHINE = os.uname()[1] EXPECTED_PARAMETERS = [ 'lang', 'querier','endTime','groupName','span','fileType','machines','statsTypes','preDeterminedSpan','sourlients','combineSourlients','products'] CURRENT_MODULE_ABS_PATH = os.path.abspath(__file__).replace( ".pyc", ".py" ) def returnToQueriersLocationWithReply( querier, reply ): """ @summary : Changes location back to the querier + returns the reply of the query to the querier. @param querier: String containing the location of the querier. @param reply : Series of parameters to send as a reply to te querier. """ print """ HTTP/1.0 200 OK Server: NCSA/1.0a6 Content-type: text/plain """ print """ %s """ %( escape(reply) ) def getQuerierLocation( form ): """ @param form : Form with whom this programm was called. @return : Returns the queriers location. """ try: querier = form["querier"] except: querier = "" return querier def handlePlotRequest( form, language ): """ @param form: form wich contains the parameters to use for the query. @param language : language of the querier. @precondition: global _ translator must have been set prior to calling this method. """ global _ querier = getQuerierLocation( form ) plotter = getPlotterType(form) #validate for known plotter if plotter == "gnuplot": queryBroker = GnuQueryBroker( querierLanguage = language ) elif plotter == "rrd": queryBroker = RRDQueryBroker( querierLanguage = language ) else: queryBroker = None #---------------------------------------------------------------------- try: if queryBroker != None :#if valid plotter queryBroker.getParametersFromForm( form ) error = queryBroker.searchForParameterErrors() if error == "" : queryBroker.prepareQuery( ) queryBroker.executeQuery( ) reply = queryBroker.getReplyToSendToquerier() returnToQueriersLocationWithReply( querier , reply ) else: #An error was located within the call. queryBroker.replyParameters.error = error reply = queryBroker.getReplyToSendToquerier() returnToQueriersLocationWithReply( querier , reply ) else:#other reply = "images=;error=" + _("Cannot execute query.Unknown plotter.Plotter was %s") %plotter returnToQueriersLocationWithReply( querier , reply ) #---------------------------------------------------- except Exception,inst: #---------------- reply = "images=;error=Unexpected error : %s." %(inst) #------------------ returnToQueriersLocationWithReply( querier , reply ) def getPlotterType( form ): """ @param form : Form with whom this programm was called. @return : Returns the plotter type. """ #---------------------------------------------------------------------- try: if ( form["preDeterminedSpan"] == _("daily") ) : plotter = "gnuplot" else: try: if int( form["span"] )<= 36 : plotter = "gnuplot" else: plotter = "rrd" except: plotter = "rrd" #------------------------------------------------------------------- except: #---------------------------------------------------------- plotter = "" return plotter def getForm(): """ @summary: Returns the form with whom this page was called. @note: The input form is expected ot be contained within the field storage. Thus this program is expected to be called from requests like xmlhttp.send() @return: Returns the form with whom this page was called. """ newForm = {} form = cgi.FieldStorage() #print form for key in form.keys(): # print key value = form.getvalue(key, "") if isinstance(value, list): # Multiple username fields specified newvalue = ",".join(value) else: newvalue = value newForm[key.replace("?","")]= newvalue for param in EXPECTED_PARAMETERS: if param not in newForm.keys(): newForm[param] = '' form = newForm #print form return form def getLanguage( form ): """ @summary : Returns the language in which the page should be generated. @param form: Form containing hte parameters with whom this program was called. """ language = "" try : language = form["lang"] except: pass return language def setGlobalLanguageParameters( language ): """ @summary : Sets up all the needed global language variables so that they can be used everywhere in this program. @param language: language with whom this script was called. @return: None """ global _ _ = LanguageTools.getTranslatorForModule( CURRENT_MODULE_ABS_PATH, language) def main(): """ @summary: Based on the plotter specified in the received form, executes query using a broker that's specific to the said plotter. """ try: form = getForm() language = getLanguage( form ) if language == "" : # unspecified language. querier = getQuerierLocation( form ) reply = "images=;error=" + "Error in query broker. Cannot proceed with query. No language was specified." #No language, do not translate. returnToQueriersLocationWithReply( querier, reply ) elif language not in LanguageTools.getSupportedLanguages(): # unsupported language<|fim▁hole|> else: #params seem ok setGlobalLanguageParameters( language ) handlePlotRequest( form, language ) except Exception, instance : #temp file helpfull for debugging! fileHandle= open('graphicsRequestBrokerDebuggingOutput','w') fileHandle.write( str(instance) ) fileHandle.close() if __name__ == '__main__': main()<|fim▁end|>
querier = getQuerierLocation( form ) reply = "images=;error=" + "Error in query broker. Cannot proceed with query. %s is not a supported language." %language #Unsupported language, do not translate. returnToQueriersLocationWithReply( querier, reply )
<|file_name|>request.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ use crate::body::{consume_body, BodyOperations, BodyType}; use crate::dom::bindings::cell::{DomRefCell, Ref}; use crate::dom::bindings::codegen::Bindings::HeadersBinding::{HeadersInit, HeadersMethods}; use crate::dom::bindings::codegen::Bindings::RequestBinding; use crate::dom::bindings::codegen::Bindings::RequestBinding::ReferrerPolicy; use crate::dom::bindings::codegen::Bindings::RequestBinding::RequestCache; use crate::dom::bindings::codegen::Bindings::RequestBinding::RequestCredentials; use crate::dom::bindings::codegen::Bindings::RequestBinding::RequestDestination; use crate::dom::bindings::codegen::Bindings::RequestBinding::RequestInfo; use crate::dom::bindings::codegen::Bindings::RequestBinding::RequestInit; use crate::dom::bindings::codegen::Bindings::RequestBinding::RequestMethods; use crate::dom::bindings::codegen::Bindings::RequestBinding::RequestMode; use crate::dom::bindings::codegen::Bindings::RequestBinding::RequestRedirect; use crate::dom::bindings::error::{Error, Fallible}; use crate::dom::bindings::reflector::{reflect_dom_object, DomObject, Reflector}; use crate::dom::bindings::root::{DomRoot, MutNullableDom}; use crate::dom::bindings::str::{ByteString, DOMString, USVString}; use crate::dom::bindings::trace::RootedTraceableBox; use crate::dom::globalscope::GlobalScope; use crate::dom::headers::{Guard, Headers}; use crate::dom::promise::Promise; use crate::dom::xmlhttprequest::Extractable; use dom_struct::dom_struct; use http::header::{HeaderName, HeaderValue}; use http::method::InvalidMethod; use http::Method as HttpMethod; use net_traits::request::CacheMode as NetTraitsRequestCache; use net_traits::request::CredentialsMode as NetTraitsRequestCredentials; use net_traits::request::Destination as NetTraitsRequestDestination; use net_traits::request::RedirectMode as NetTraitsRequestRedirect; use net_traits::request::Referrer as NetTraitsRequestReferrer; use net_traits::request::Request as NetTraitsRequest; use net_traits::request::RequestMode as NetTraitsRequestMode; use net_traits::request::{Origin, Window}; use net_traits::ReferrerPolicy as MsgReferrerPolicy; use servo_url::ServoUrl; use std::cell::Cell; use std::rc::Rc; use std::str::FromStr; #[dom_struct] pub struct Request { reflector_: Reflector, request: DomRefCell<NetTraitsRequest>, body_used: Cell<bool>, headers: MutNullableDom<Headers>, mime_type: DomRefCell<Vec<u8>>, #[ignore_malloc_size_of = "Rc"] body_promise: DomRefCell<Option<(Rc<Promise>, BodyType)>>, } impl Request { fn new_inherited(global: &GlobalScope, url: ServoUrl) -> Request { Request { reflector_: Reflector::new(), request: DomRefCell::new(net_request_from_global(global, url)), body_used: Cell::new(false), headers: Default::default(), mime_type: DomRefCell::new("".to_string().into_bytes()), body_promise: DomRefCell::new(None), } } pub fn new(global: &GlobalScope, url: ServoUrl) -> DomRoot<Request> { reflect_dom_object( Box::new(Request::new_inherited(global, url)), global, RequestBinding::Wrap, ) } // https://fetch.spec.whatwg.org/#dom-request pub fn Constructor( global: &GlobalScope, input: RequestInfo, init: RootedTraceableBox<RequestInit>, ) -> Fallible<DomRoot<Request>> { // Step 1 let temporary_request: NetTraitsRequest; // Step 2 let mut fallback_mode: Option<NetTraitsRequestMode> = None; // Step 3 let mut fallback_credentials: Option<NetTraitsRequestCredentials> = None; // Step 4 let base_url = global.api_base_url(); // Step 5 TODO: "Let signal be null." match input { // Step 6 RequestInfo::USVString(USVString(ref usv_string)) => { // Step 6.1 let parsed_url = base_url.join(&usv_string); // Step 6.2 if parsed_url.is_err() { return Err(Error::Type("Url could not be parsed".to_string())); } // Step 6.3 let url = parsed_url.unwrap(); if includes_credentials(&url) { return Err(Error::Type("Url includes credentials".to_string())); } // Step 6.4 temporary_request = net_request_from_global(global, url); // Step 6.5 fallback_mode = Some(NetTraitsRequestMode::CorsMode); // Step 6.6 fallback_credentials = Some(NetTraitsRequestCredentials::CredentialsSameOrigin); }, // Step 7 RequestInfo::Request(ref input_request) => { // This looks like Step 38 // TODO do this in the right place to not mask other errors if request_is_disturbed(input_request) || request_is_locked(input_request) { return Err(Error::Type("Input is disturbed or locked".to_string())); } // Step 7.1 temporary_request = input_request.request.borrow().clone(); // Step 7.2 TODO: "Set signal to input's signal." }, } // Step 8 // TODO: `entry settings object` is not implemented yet. let origin = base_url.origin(); // Step 9 let mut window = Window::Client; // Step 10 // TODO: `environment settings object` is not implemented in Servo yet. // Step 11 if !init.window.handle().is_null_or_undefined() { return Err(Error::Type("Window is present and is not null".to_string())); } // Step 12 if !init.window.handle().is_undefined() { window = Window::NoWindow; } // Step 13 let mut request: NetTraitsRequest; request = net_request_from_global(global, temporary_request.current_url()); request.method = temporary_request.method; request.headers = temporary_request.headers.clone(); request.unsafe_request = true; request.window = window; // TODO: `entry settings object` is not implemented in Servo yet. request.origin = Origin::Client; request.referrer = temporary_request.referrer; request.referrer_policy = temporary_request.referrer_policy; request.mode = temporary_request.mode; request.credentials_mode = temporary_request.credentials_mode; request.cache_mode = temporary_request.cache_mode; request.redirect_mode = temporary_request.redirect_mode; request.integrity_metadata = temporary_request.integrity_metadata; // Step 14 if init.body.is_some() || init.cache.is_some() || init.credentials.is_some() || init.integrity.is_some() || init.headers.is_some() || init.method.is_some() || init.mode.is_some() || init.redirect.is_some() || init.referrer.is_some() || init.referrerPolicy.is_some() || !init.window.handle().is_undefined() { // Step 14.1 if request.mode == NetTraitsRequestMode::Navigate { request.mode = NetTraitsRequestMode::SameOrigin; } // Step 14.2 TODO: "Unset request's reload-navigation flag." // Step 14.3 TODO: "Unset request's history-navigation flag." // Step 14.4 request.referrer = NetTraitsRequestReferrer::Client; // Step 14.5 request.referrer_policy = None; } // Step 15 if let Some(init_referrer) = init.referrer.as_ref() { // Step 15.1 let ref referrer = init_referrer.0; // Step 15.2 if referrer.is_empty() { request.referrer = NetTraitsRequestReferrer::NoReferrer; } else { // Step 15.3.1 let parsed_referrer = base_url.join(referrer); // Step 15.3.2 if parsed_referrer.is_err() { return Err(Error::Type("Failed to parse referrer url".to_string())); } // Step 15.3.3 if let Ok(parsed_referrer) = parsed_referrer { if (parsed_referrer.cannot_be_a_base() && parsed_referrer.scheme() == "about" && parsed_referrer.path() == "client") || parsed_referrer.origin() != origin { request.referrer = NetTraitsRequestReferrer::Client; } else { // Step 15.3.4 request.referrer = NetTraitsRequestReferrer::ReferrerUrl(parsed_referrer); } } } } // Step 16 if let Some(init_referrerpolicy) = init.referrerPolicy.as_ref() { let init_referrer_policy = init_referrerpolicy.clone().into(); request.referrer_policy = Some(init_referrer_policy); } // Step 17 let mode = init .mode .as_ref() .map(|m| m.clone().into()) .or(fallback_mode); // Step 18 if let Some(NetTraitsRequestMode::Navigate) = mode { return Err(Error::Type("Request mode is Navigate".to_string())); } // Step 19 if let Some(m) = mode { request.mode = m; } // Step 20 let credentials = init .credentials .as_ref() .map(|m| m.clone().into()) .or(fallback_credentials); // Step 21 if let Some(c) = credentials { request.credentials_mode = c; } // Step 22 if let Some(init_cache) = init.cache.as_ref() { let cache = init_cache.clone().into(); request.cache_mode = cache; } // Step 23 if request.cache_mode == NetTraitsRequestCache::OnlyIfCached { if request.mode != NetTraitsRequestMode::SameOrigin { return Err(Error::Type( "Cache is 'only-if-cached' and mode is not 'same-origin'".to_string(), )); } } // Step 24 if let Some(init_redirect) = init.redirect.as_ref() { let redirect = init_redirect.clone().into(); request.redirect_mode = redirect; } // Step 25 if let Some(init_integrity) = init.integrity.as_ref() { let integrity = init_integrity.clone().to_string(); request.integrity_metadata = integrity; } // Step 26 TODO: "If init["keepalive"] exists..." // Step 27.1 if let Some(init_method) = init.method.as_ref() { // Step 27.2 if !is_method(&init_method) { return Err(Error::Type("Method is not a method".to_string())); } if is_forbidden_method(&init_method) { return Err(Error::Type("Method is forbidden".to_string())); } // Step 27.3 let method = match init_method.as_str() { Some(s) => normalize_method(s) .map_err(|e| Error::Type(format!("Method is not valid: {:?}", e)))?, None => return Err(Error::Type("Method is not a valid UTF8".to_string())), }; // Step 27.4 request.method = method; } // Step 28 TODO: "If init["signal"] exists..." // Step 29 let r = Request::from_net_request(global, request); // Step 30 TODO: "If signal is not null..." // Step 31 // "or_init" looks unclear here, but it always enters the block since r // hasn't had any other way to initialize its headers r.headers.or_init(|| Headers::for_request(&r.global())); // Step 32 - but spec says this should only be when non-empty init? // Step 32.1 let mut headers_copy = r.Headers(); // Step 32.2 if let Some(possible_header) = init.headers.as_ref() { match possible_header { &HeadersInit::Headers(ref init_headers) => { headers_copy = DomRoot::from_ref(&*init_headers); }, &HeadersInit::ByteStringSequenceSequence(ref init_sequence) => { headers_copy.fill(Some(HeadersInit::ByteStringSequenceSequence( init_sequence.clone(), )))?; }, &HeadersInit::ByteStringByteStringRecord(ref init_map) => { headers_copy.fill(Some(HeadersInit::ByteStringByteStringRecord( init_map.clone(), )))?; }, } } // Step 32.3 // We cannot empty `r.Headers().header_list` because // we would undo the Step 27 above. One alternative is to set // `headers_copy` as a deep copy of `r.Headers()`. However, // `r.Headers()` is a `DomRoot<T>`, and therefore it is difficult // to obtain a mutable reference to `r.Headers()`. Without the // mutable reference, we cannot mutate `r.Headers()` to be the // deep copied headers in Step 27. // Step 32.4 if r.request.borrow().mode == NetTraitsRequestMode::NoCors { let borrowed_request = r.request.borrow(); // Step 32.4.1 if !is_cors_safelisted_method(&borrowed_request.method) { return Err(Error::Type( "The mode is 'no-cors' but the method is not a cors-safelisted method" .to_string(), )); } // Step 32.4.2 r.Headers().set_guard(Guard::RequestNoCors); } // Step 32.5 match init.headers { None => { // This is equivalent to the specification's concept of // "associated headers list". If an init headers is not given, // but an input with headers is given, set request's // headers as the input's Headers. if let RequestInfo::Request(ref input_request) = input { r.Headers() .fill(Some(HeadersInit::Headers(input_request.Headers())))?; } }, Some(HeadersInit::Headers(_)) => { r.Headers().fill(Some(HeadersInit::Headers(headers_copy)))? }, _ => {}, } // Step 32.5-6 depending on how we got here // Copy the headers list onto the headers of net_traits::Request r.request.borrow_mut().headers = r.Headers().get_headers_list(); // Step 33 let mut input_body = if let RequestInfo::Request(ref input_request) = input { let input_request_request = input_request.request.borrow(); input_request_request.body.clone() } else { None }; // Step 34 if let Some(init_body_option) = init.body.as_ref() { if init_body_option.is_some() || input_body.is_some() { let req = r.request.borrow(); let req_method = &req.method; match *req_method { HttpMethod::GET => { return Err(Error::Type( "Init's body is non-null, and request method is GET".to_string(), )); }, HttpMethod::HEAD => { return Err(Error::Type( "Init's body is non-null, and request method is HEAD".to_string(), )); }, _ => {}, } } } // Step 35-36 if let Some(Some(ref init_body)) = init.body { // Step 36.2 TODO "If init["keepalive"] exists and is true..." // Step 36.3 let extracted_body_tmp = init_body.extract(); input_body = Some(extracted_body_tmp.0); let content_type = extracted_body_tmp.1; // Step 36.4 if let Some(contents) = content_type { let ct_header_name = b"Content-Type"; if !r .Headers() .Has(ByteString::new(ct_header_name.to_vec())) .unwrap() { let ct_header_val = contents.as_bytes(); r.Headers().Append( ByteString::new(ct_header_name.to_vec()), ByteString::new(ct_header_val.to_vec()), )?; // In Servo r.Headers's header list isn't a pointer to // the same actual list as r.request's, and so we need to // append to both lists to keep them in sync. if let Ok(v) = HeaderValue::from_bytes(ct_header_val) { r.request .borrow_mut() .headers .insert(HeaderName::from_bytes(ct_header_name).unwrap(), v); } } } } // Step 37 "TODO if body is non-null and body's source is null..." // This looks like where we need to set the use-preflight flag // if the request has a body and nothing else has set the flag. // Step 38 is done earlier // Step 39 // TODO: `ReadableStream` object is not implemented in Servo yet. // Step 40 r.request.borrow_mut().body = input_body; // Step 41 let extracted_mime_type = r.Headers().extract_mime_type(); *r.mime_type.borrow_mut() = extracted_mime_type; // Step 42 Ok(r) } // https://fetch.spec.whatwg.org/#concept-body-locked fn locked(&self) -> bool { // TODO: ReadableStream is unimplemented. Just return false // for now. false } } impl Request { fn from_net_request(global: &GlobalScope, net_request: NetTraitsRequest) -> DomRoot<Request> { let r = Request::new(global, net_request.current_url()); *r.request.borrow_mut() = net_request; r } fn clone_from(r: &Request) -> Fallible<DomRoot<Request>> { let req = r.request.borrow(); let url = req.url(); let body_used = r.body_used.get(); let mime_type = r.mime_type.borrow().clone(); let headers_guard = r.Headers().get_guard(); let r_clone = Request::new(&r.global(), url); r_clone.request.borrow_mut().pipeline_id = req.pipeline_id; { let mut borrowed_r_request = r_clone.request.borrow_mut(); borrowed_r_request.origin = req.origin.clone(); } *r_clone.request.borrow_mut() = req.clone(); r_clone.body_used.set(body_used); *r_clone.mime_type.borrow_mut() = mime_type; r_clone .Headers() .fill(Some(HeadersInit::Headers(r.Headers())))?; r_clone.Headers().set_guard(headers_guard); Ok(r_clone) } pub fn get_request(&self) -> NetTraitsRequest { self.request.borrow().clone() } } fn net_request_from_global(global: &GlobalScope, url: ServoUrl) -> NetTraitsRequest { let origin = Origin::Origin(global.get_url().origin()); let pipeline_id = global.pipeline_id(); NetTraitsRequest::new(url, Some(origin), Some(pipeline_id)) } // https://fetch.spec.whatwg.org/#concept-method-normalize fn normalize_method(m: &str) -> Result<HttpMethod, InvalidMethod> { match_ignore_ascii_case! { m, "delete" => return Ok(HttpMethod::DELETE), "get" => return Ok(HttpMethod::GET), "head" => return Ok(HttpMethod::HEAD), "options" => return Ok(HttpMethod::OPTIONS), "post" => return Ok(HttpMethod::POST), "put" => return Ok(HttpMethod::PUT), _ => (), } debug!("Method: {:?}", m); HttpMethod::from_str(m) } // https://fetch.spec.whatwg.org/#concept-method fn is_method(m: &ByteString) -> bool { m.as_str().is_some() } // https://fetch.spec.whatwg.org/#forbidden-method fn is_forbidden_method(m: &ByteString) -> bool { match m.to_lower().as_str() { Some("connect") => true, Some("trace") => true, Some("track") => true, _ => false, } } // https://fetch.spec.whatwg.org/#cors-safelisted-method fn is_cors_safelisted_method(m: &HttpMethod) -> bool { m == &HttpMethod::GET || m == &HttpMethod::HEAD || m == &HttpMethod::POST } // https://url.spec.whatwg.org/#include-credentials fn includes_credentials(input: &ServoUrl) -> bool { !input.username().is_empty() || input.password().is_some() } // TODO: `Readable Stream` object is not implemented in Servo yet. // https://fetch.spec.whatwg.org/#concept-body-disturbed fn request_is_disturbed(_input: &Request) -> bool { false } // TODO: `Readable Stream` object is not implemented in Servo yet. // https://fetch.spec.whatwg.org/#concept-body-locked fn request_is_locked(_input: &Request) -> bool { false } impl RequestMethods for Request { // https://fetch.spec.whatwg.org/#dom-request-method fn Method(&self) -> ByteString { let r = self.request.borrow(); ByteString::new(r.method.as_ref().as_bytes().into()) } // https://fetch.spec.whatwg.org/#dom-request-url fn Url(&self) -> USVString { let r = self.request.borrow(); USVString(r.url_list.get(0).map_or("", |u| u.as_str()).into()) } // https://fetch.spec.whatwg.org/#dom-request-headers fn Headers(&self) -> DomRoot<Headers> { self.headers.or_init(|| Headers::new(&self.global())) } // https://fetch.spec.whatwg.org/#dom-request-destination fn Destination(&self) -> RequestDestination { self.request.borrow().destination.into() } // https://fetch.spec.whatwg.org/#dom-request-referrer fn Referrer(&self) -> USVString { let r = self.request.borrow(); USVString(match r.referrer { NetTraitsRequestReferrer::NoReferrer => String::from(""), NetTraitsRequestReferrer::Client => String::from("about:client"), NetTraitsRequestReferrer::ReferrerUrl(ref u) => { let u_c = u.clone(); u_c.into_string() }, }) } // https://fetch.spec.whatwg.org/#dom-request-referrerpolicy fn ReferrerPolicy(&self) -> ReferrerPolicy { self.request .borrow() .referrer_policy .map(|m| m.into()) .unwrap_or(ReferrerPolicy::_empty) } // https://fetch.spec.whatwg.org/#dom-request-mode fn Mode(&self) -> RequestMode { self.request.borrow().mode.clone().into() } // https://fetch.spec.whatwg.org/#dom-request-credentials fn Credentials(&self) -> RequestCredentials { let r = self.request.borrow().clone(); r.credentials_mode.into() } // https://fetch.spec.whatwg.org/#dom-request-cache fn Cache(&self) -> RequestCache { let r = self.request.borrow().clone(); r.cache_mode.into() } // https://fetch.spec.whatwg.org/#dom-request-redirect fn Redirect(&self) -> RequestRedirect { let r = self.request.borrow().clone(); r.redirect_mode.into() } // https://fetch.spec.whatwg.org/#dom-request-integrity fn Integrity(&self) -> DOMString { let r = self.request.borrow(); DOMString::from_string(r.integrity_metadata.clone()) } // https://fetch.spec.whatwg.org/#dom-body-bodyused fn BodyUsed(&self) -> bool { self.body_used.get() } // https://fetch.spec.whatwg.org/#dom-request-clone fn Clone(&self) -> Fallible<DomRoot<Request>> { // Step 1 if request_is_locked(self) { return Err(Error::Type("Request is locked".to_string())); } if request_is_disturbed(self) { return Err(Error::Type("Request is disturbed".to_string())); } // Step 2 Request::clone_from(self) } // https://fetch.spec.whatwg.org/#dom-body-text fn Text(&self) -> Rc<Promise> { consume_body(self, BodyType::Text) } // https://fetch.spec.whatwg.org/#dom-body-blob fn Blob(&self) -> Rc<Promise> { consume_body(self, BodyType::Blob) } // https://fetch.spec.whatwg.org/#dom-body-formdata fn FormData(&self) -> Rc<Promise> { consume_body(self, BodyType::FormData) }<|fim▁hole|> fn Json(&self) -> Rc<Promise> { consume_body(self, BodyType::Json) } // https://fetch.spec.whatwg.org/#dom-body-arraybuffer fn ArrayBuffer(&self) -> Rc<Promise> { consume_body(self, BodyType::ArrayBuffer) } } impl BodyOperations for Request { fn get_body_used(&self) -> bool { self.BodyUsed() } fn set_body_promise(&self, p: &Rc<Promise>, body_type: BodyType) { assert!(self.body_promise.borrow().is_none()); self.body_used.set(true); *self.body_promise.borrow_mut() = Some((p.clone(), body_type)); } fn is_locked(&self) -> bool { self.locked() } fn take_body(&self) -> Option<Vec<u8>> { let mut request = self.request.borrow_mut(); let body = request.body.take(); Some(body.unwrap_or(vec![])) } fn get_mime_type(&self) -> Ref<Vec<u8>> { self.mime_type.borrow() } } impl Into<NetTraitsRequestCache> for RequestCache { fn into(self) -> NetTraitsRequestCache { match self { RequestCache::Default => NetTraitsRequestCache::Default, RequestCache::No_store => NetTraitsRequestCache::NoStore, RequestCache::Reload => NetTraitsRequestCache::Reload, RequestCache::No_cache => NetTraitsRequestCache::NoCache, RequestCache::Force_cache => NetTraitsRequestCache::ForceCache, RequestCache::Only_if_cached => NetTraitsRequestCache::OnlyIfCached, } } } impl Into<RequestCache> for NetTraitsRequestCache { fn into(self) -> RequestCache { match self { NetTraitsRequestCache::Default => RequestCache::Default, NetTraitsRequestCache::NoStore => RequestCache::No_store, NetTraitsRequestCache::Reload => RequestCache::Reload, NetTraitsRequestCache::NoCache => RequestCache::No_cache, NetTraitsRequestCache::ForceCache => RequestCache::Force_cache, NetTraitsRequestCache::OnlyIfCached => RequestCache::Only_if_cached, } } } impl Into<NetTraitsRequestCredentials> for RequestCredentials { fn into(self) -> NetTraitsRequestCredentials { match self { RequestCredentials::Omit => NetTraitsRequestCredentials::Omit, RequestCredentials::Same_origin => NetTraitsRequestCredentials::CredentialsSameOrigin, RequestCredentials::Include => NetTraitsRequestCredentials::Include, } } } impl Into<RequestCredentials> for NetTraitsRequestCredentials { fn into(self) -> RequestCredentials { match self { NetTraitsRequestCredentials::Omit => RequestCredentials::Omit, NetTraitsRequestCredentials::CredentialsSameOrigin => RequestCredentials::Same_origin, NetTraitsRequestCredentials::Include => RequestCredentials::Include, } } } impl Into<NetTraitsRequestDestination> for RequestDestination { fn into(self) -> NetTraitsRequestDestination { match self { RequestDestination::_empty => NetTraitsRequestDestination::None, RequestDestination::Audio => NetTraitsRequestDestination::Audio, RequestDestination::Document => NetTraitsRequestDestination::Document, RequestDestination::Embed => NetTraitsRequestDestination::Embed, RequestDestination::Font => NetTraitsRequestDestination::Font, RequestDestination::Image => NetTraitsRequestDestination::Image, RequestDestination::Manifest => NetTraitsRequestDestination::Manifest, RequestDestination::Object => NetTraitsRequestDestination::Object, RequestDestination::Report => NetTraitsRequestDestination::Report, RequestDestination::Script => NetTraitsRequestDestination::Script, RequestDestination::Sharedworker => NetTraitsRequestDestination::SharedWorker, RequestDestination::Style => NetTraitsRequestDestination::Style, RequestDestination::Track => NetTraitsRequestDestination::Track, RequestDestination::Video => NetTraitsRequestDestination::Video, RequestDestination::Worker => NetTraitsRequestDestination::Worker, RequestDestination::Xslt => NetTraitsRequestDestination::Xslt, } } } impl Into<RequestDestination> for NetTraitsRequestDestination { fn into(self) -> RequestDestination { match self { NetTraitsRequestDestination::None => RequestDestination::_empty, NetTraitsRequestDestination::Audio => RequestDestination::Audio, NetTraitsRequestDestination::Document => RequestDestination::Document, NetTraitsRequestDestination::Embed => RequestDestination::Embed, NetTraitsRequestDestination::Font => RequestDestination::Font, NetTraitsRequestDestination::Image => RequestDestination::Image, NetTraitsRequestDestination::Manifest => RequestDestination::Manifest, NetTraitsRequestDestination::Object => RequestDestination::Object, NetTraitsRequestDestination::Report => RequestDestination::Report, NetTraitsRequestDestination::Script => RequestDestination::Script, NetTraitsRequestDestination::ServiceWorker | NetTraitsRequestDestination::AudioWorklet | NetTraitsRequestDestination::PaintWorklet => { panic!("ServiceWorker request destination should not be exposed to DOM") }, NetTraitsRequestDestination::SharedWorker => RequestDestination::Sharedworker, NetTraitsRequestDestination::Style => RequestDestination::Style, NetTraitsRequestDestination::Track => RequestDestination::Track, NetTraitsRequestDestination::Video => RequestDestination::Video, NetTraitsRequestDestination::Worker => RequestDestination::Worker, NetTraitsRequestDestination::Xslt => RequestDestination::Xslt, } } } impl Into<NetTraitsRequestMode> for RequestMode { fn into(self) -> NetTraitsRequestMode { match self { RequestMode::Navigate => NetTraitsRequestMode::Navigate, RequestMode::Same_origin => NetTraitsRequestMode::SameOrigin, RequestMode::No_cors => NetTraitsRequestMode::NoCors, RequestMode::Cors => NetTraitsRequestMode::CorsMode, } } } impl Into<RequestMode> for NetTraitsRequestMode { fn into(self) -> RequestMode { match self { NetTraitsRequestMode::Navigate => RequestMode::Navigate, NetTraitsRequestMode::SameOrigin => RequestMode::Same_origin, NetTraitsRequestMode::NoCors => RequestMode::No_cors, NetTraitsRequestMode::CorsMode => RequestMode::Cors, NetTraitsRequestMode::WebSocket { .. } => { unreachable!("Websocket request mode should never be exposed to Dom") }, } } } // TODO // When whatwg/fetch PR #346 is merged, fix this. impl Into<MsgReferrerPolicy> for ReferrerPolicy { fn into(self) -> MsgReferrerPolicy { match self { ReferrerPolicy::_empty => MsgReferrerPolicy::NoReferrer, ReferrerPolicy::No_referrer => MsgReferrerPolicy::NoReferrer, ReferrerPolicy::No_referrer_when_downgrade => { MsgReferrerPolicy::NoReferrerWhenDowngrade }, ReferrerPolicy::Origin => MsgReferrerPolicy::Origin, ReferrerPolicy::Origin_when_cross_origin => MsgReferrerPolicy::OriginWhenCrossOrigin, ReferrerPolicy::Unsafe_url => MsgReferrerPolicy::UnsafeUrl, ReferrerPolicy::Strict_origin => MsgReferrerPolicy::StrictOrigin, ReferrerPolicy::Strict_origin_when_cross_origin => { MsgReferrerPolicy::StrictOriginWhenCrossOrigin }, } } } impl Into<ReferrerPolicy> for MsgReferrerPolicy { fn into(self) -> ReferrerPolicy { match self { MsgReferrerPolicy::NoReferrer => ReferrerPolicy::No_referrer, MsgReferrerPolicy::NoReferrerWhenDowngrade => { ReferrerPolicy::No_referrer_when_downgrade }, MsgReferrerPolicy::Origin => ReferrerPolicy::Origin, MsgReferrerPolicy::SameOrigin => ReferrerPolicy::Origin, MsgReferrerPolicy::OriginWhenCrossOrigin => ReferrerPolicy::Origin_when_cross_origin, MsgReferrerPolicy::UnsafeUrl => ReferrerPolicy::Unsafe_url, MsgReferrerPolicy::StrictOrigin => ReferrerPolicy::Strict_origin, MsgReferrerPolicy::StrictOriginWhenCrossOrigin => { ReferrerPolicy::Strict_origin_when_cross_origin }, } } } impl Into<NetTraitsRequestRedirect> for RequestRedirect { fn into(self) -> NetTraitsRequestRedirect { match self { RequestRedirect::Follow => NetTraitsRequestRedirect::Follow, RequestRedirect::Error => NetTraitsRequestRedirect::Error, RequestRedirect::Manual => NetTraitsRequestRedirect::Manual, } } } impl Into<RequestRedirect> for NetTraitsRequestRedirect { fn into(self) -> RequestRedirect { match self { NetTraitsRequestRedirect::Follow => RequestRedirect::Follow, NetTraitsRequestRedirect::Error => RequestRedirect::Error, NetTraitsRequestRedirect::Manual => RequestRedirect::Manual, } } } impl Clone for HeadersInit { fn clone(&self) -> HeadersInit { match self { &HeadersInit::Headers(ref h) => HeadersInit::Headers(h.clone()), &HeadersInit::ByteStringSequenceSequence(ref b) => { HeadersInit::ByteStringSequenceSequence(b.clone()) }, &HeadersInit::ByteStringByteStringRecord(ref m) => { HeadersInit::ByteStringByteStringRecord(m.clone()) }, } } }<|fim▁end|>
// https://fetch.spec.whatwg.org/#dom-body-json
<|file_name|>test.rs<|end_file_name|><|fim▁begin|>// Copyright 2017 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![feature(extern_types)] <|fim▁hole|>#[link(name = "ctest", kind = "static")] extern { type data; fn data_create(magic: u32) -> *mut data; fn data_get(data: *mut data) -> u32; } const MAGIC: u32 = 0xdeadbeef; fn main() { unsafe { let data = data_create(MAGIC); assert_eq!(data_get(data), MAGIC); } }<|fim▁end|>
<|file_name|>test_aspp.py<|end_file_name|><|fim▁begin|>import unittest import chainer from chainer import testing from chainer.testing import attr <|fim▁hole|> class TestSeparableASPP(unittest.TestCase): def setUp(self): self.in_channels = 128 self.out_channels = 32 self.link = SeparableASPP( self.in_channels, self.out_channels) def check_call(self): xp = self.link.xp x = chainer.Variable(xp.random.uniform( low=-1, high=1, size=(2, self.in_channels, 64, 64) ).astype(xp.float32)) y = self.link(x) self.assertIsInstance(y, chainer.Variable) self.assertIsInstance(y.data, xp.ndarray) self.assertEqual(y.shape, (2, self.out_channels, 64, 64)) @attr.slow def test_call_cpu(self): self.check_call() @attr.gpu @attr.slow def test_call_gpu(self): self.link.to_gpu() self.check_call() testing.run_module(__name__, __file__)<|fim▁end|>
from chainercv.links.model.deeplab import SeparableASPP
<|file_name|>views.py<|end_file_name|><|fim▁begin|>from django.shortcuts import render, redirect, get_object_or_404 from django.http import HttpResponseBadRequest, HttpResponse from bootcamp.tasks.models import Task from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger from bootcamp.tasks.forms import TaskForm from django.contrib.auth.decorators import login_required from bootcamp.decorators import ajax_required import markdown from django.template.loader import render_to_string import requests,json from bootcamp.utils.loadconfig import get_vars def getvrflist(network): if network.lower() == 'emc'.lower(): filename = '/etc/netbot/emcvrflist.txt' elif network.lower() == 'mtn'.lower(): filename = '/etc/netbot/mtnvrflist.txt' vrfnames = [] with open(filename) as f: for line in f: vrfnames.append(line) return vrfnames @login_required def traceroute(request): # task = get_object_or_404(Task, status=Task.ACTIVE) emcvrfname=getvrflist('emc') return render(request, 'traceroute/traceroute.html', {'task': "task", 'emcvrf':emcvrfname,'message':""}) @login_required def inttraceroute(request): # task = get_object_or_404(Task, status=Task.ACTIVE) emcvrfname=getvrflist('emc') return render(request, 'traceroute/inttraceroute.html', {'task': "task", 'emcvrf':emcvrfname,'message':""}) @login_required() def runtrace(request): sourceip = request.POST.get('sourceip') destip = request.POST.get('destip') vrf = request.POST.get('vrf') network = request.POST.get('network') vrfname = request.POST.get('vrfname') baseurl = get_vars('ansibengineemc') emcvrfname=getvrflist('emc') if sourceip == '' or destip == '' or vrf == '' or vrfname == '' or network == '': return render(request, 'traceroute/traceroute.html', {'task': "task", 'emcvrf':emcvrfname,'message':"Please fill in all the details!!"}) if str(network).lower() == 'EMC'.lower(): baseurl = get_vars('ansibengineemc') else: baseurl = get_vars('ansibenginemtn') if vrf == 'True': vrf="True" else: vrf="False" return render(request, 'traceroute/runtraceroute.html', {'sourceip': sourceip, 'destip':destip,'vrfname': vrfname, 'vrf':vrf,'baseurl':baseurl}) @login_required() def runtraceapi(request): sourceip = request.POST.get('sourceip') destip = request.POST.get('destip') vrf = request.POST.get('vrf') vrfname = request.POST.get('vrfname') baseurl = request.POST.get('baseurl') url = baseurl+'/ansibengine/api/v1.0/runtrace' headers = {'content-type': 'application/json'} temp= {} data= {} data['sourceip']=sourceip data['destip']=destip data['vrfname']=vrfname if vrf == 'True': data['vrf']="True" else: data['vrf']="False" try: response = requests.post(url, data=json.dumps(data), headers=headers, auth=('netbot','N#tB@t')) if not response.status_code == 201 : temp['value']="Error!! Unexpected response. Please report this" return HttpResponse(json.dumps(temp), content_type = "application/json") except requests.exceptions.RequestException as e: # return "Error: {}".format(e) temp['value']="Error connecting to API. Please report this" return HttpResponse(json.dumps(temp), content_type = "application/json") return HttpResponse(response.text, content_type = "application/json") @login_required() def runinterfacetrace(request): routerip = request.POST.get('sourceip') interfaceip = request.POST.get('sourceint') destip = request.POST.get('destip') vrf = request.POST.get('vrf') network = request.POST.get('network') vrfname = request.POST.get('vrfdropdown') baseurl = get_vars('ansibengineemc') emcvrfname=getvrflist('emc') if routerip == '' or interfaceip == '' or destip == '' or vrf == '' or vrfname == '' or network == '': return render(request, 'traceroute/inttraceroute.html', {'task': "task", 'emcvrf':emcvrfname,'message':"Please fill in all the details!!"}) if str(network).lower() == 'EMC'.lower(): baseurl = get_vars('ansibengineemc') else: baseurl = get_vars('ansibenginemtn') if vrf == 'True': vrf="True" else: vrf="False" return render(request, 'traceroute/runinterfacetraceroute.html', {'routerip': routerip, 'interfaceip':interfaceip, 'destip':destip,'vrfname': vrfname, 'vrf':vrf,'baseurl':baseurl}) @login_required() def runinterfacetraceapi(request): routerip = request.POST.get('routerip') interfaceip = request.POST.get('interfaceip') destip = request.POST.get('destip') vrf = request.POST.get('vrf') vrfname = request.POST.get('vrfname') baseurl = request.POST.get('baseurl') url = baseurl+'/ansibengine/api/v1.0/runinterfacetrace' headers = {'content-type': 'application/json'} temp= {} data= {} data['routerip']=routerip data['interfaceip']=interfaceip data['destip']=destip data['vrfname']=vrfname<|fim▁hole|> data['vrf']="False" try: response = requests.post(url, data=json.dumps(data), headers=headers, auth=('netbot','N#tB@t')) if not response.status_code == 201 : temp['value']="Error!! Unexpected response. Please report this" return HttpResponse(json.dumps(temp), content_type = "application/json") except requests.exceptions.RequestException as e: # return "Error: {}".format(e) temp['value']="Error connecting to API. Please report this" return HttpResponse(json.dumps(temp), content_type = "application/json") return HttpResponse(response.text, content_type = "application/json") ##deprecated method @login_required() def gettraceroute(request): sourceip = request.POST.get('sourceip') destip = request.POST.get('destip') vrf = request.POST.get('vrf') network = request.POST.get('network') vrfname = request.POST.get('vrfdropdown') baseurl = get_vars('ansibengineemc') if str(network).lower() == 'EMC'.lower(): baseurl = get_vars('ansibengineemc') else: baseurl = get_vars('ansibenginemtn') url = baseurl+'/ansibengine/api/v1.0/gettraceroute' headers = {'content-type': 'application/json'} emcvrfname=getvrflist('emc') if vrf is True: data= {} data['sourceip']=sourceip data['destip']=destip data['vrf']="True" data['vrfname']=vrfname response = requests.post(url, data=json.dumps(data), headers=headers, auth=('netbot','N#tB@t')) statuscode = response.status_code if int(statuscode) == 200: return render(request, 'traceroute/traceroute.html', {'task': "task", 'emcvrf':emcvrfname, 'message':"Another task is running! Please wait.."}) else: data= {} data['sourceip']=sourceip data['destip']=destip data['vrf']="False" data['vrfname']=vrfname response = requests.post(url, data=json.dumps(data), headers=headers, auth=('netbot','N#tB@t')) statuscode = response.status_code if int(statuscode) == 200: return render(request, 'traceroute/traceroute.html', {'task': "task", 'emcvrf':emcvrfname, 'message':"Another task is running! Please wait.."}) return render(request, 'traceroute/runtraceroute.html', {'task': "task",'baseurl':baseurl}) ##deprecated method @login_required() def getinterfacetraceroute(request): routerip = request.POST.get('sourceip') interfaceip = request.POST.get('sourceint') destip = request.POST.get('destip') vrf = request.POST.get('vrf') network = request.POST.get('network') vrfname = request.POST.get('vrfdropdown') baseurl = get_vars('ansibengineemc') if network.lower() == 'EMC'.lower(): baseurl = get_vars('ansibengineemc') else: baseurl = get_vars('ansibenginemtn') url = baseurl+'/ansibengine/api/v1.0/getinterfacetraceroute' headers = {'content-type': 'application/json'} emcvrfname=getvrflist('emc') if vrf is True: data= {} data['routerip']=routerip data['interfaceip']=interfaceip data['destip']=destip data['vrf']="True" data['vrfname']=vrfname response = requests.post(url, data=json.dumps(data), headers=headers, auth=('netbot','N#tB@t')) statuscode = response.status_code if int(statuscode) == 200: return render(request, 'traceroute/inttraceroute.html', {'task': "task", 'emcvrf':emcvrfname, 'message':"Another task is running! Please wait.."}) else: data= {} data['routerip']=routerip data['interfaceip']=interfaceip data['destip']=destip data['vrf']="False" data['vrfname']=vrfname response = requests.post(url, data=json.dumps(data), headers=headers, auth=('netbot','N#tB@t')) statuscode = response.status_code if int(statuscode) == 200: return render(request, 'traceroute/inttraceroute.html', {'task': "task", 'emcvrf':emcvrfname, 'message':"Another task is running! Please wait.."}) return render(request, 'traceroute/runinterfacetraceroute.html', {'task': "task",'baseurl':baseurl}) ##deprecated method def runtraceroute(request): baseurl = get_vars('ansibengineemc') if request.method == 'POST': baseurl = request.POST.get('baseurl') # if request.method == 'POST': # baseurl = request.POST.get('baseurl') url = baseurl+'/ansibengine/api/v1.0/runtraceroute' headers = {'content-type': 'application/json'} data= {} data['value']="some" data['ipath']='new value' response = requests.post(url, data=json.dumps(data), headers=headers, auth=('netbot','N#tB@t')) return HttpResponse(response.text, content_type = "application/json") ##deprecated method def runinterfacetraceroute(request): baseurl = get_vars('ansibengineemc') if request.method == 'POST': baseurl = request.POST.get('baseurl') # if request.method == 'POST': # baseurl = request.POST.get('baseurl') url = baseurl+'/ansibengine/api/v1.0/runinterfacetraceroute' headers = {'content-type': 'application/json'} data= {} data['value']=url response = requests.post(url, data=json.dumps(data), headers=headers, auth=('netbot','N#tB@t')) return HttpResponse(response.text, content_type = "application/json") # task = get_object_or_404(Task, status=Task.ACTIVE) # return render(request, 'traceroute/runtraceroute.html', {'task': "task"})<|fim▁end|>
if vrf == 'True': data['vrf']="True" else:
<|file_name|>convert_weights_to_keras.py<|end_file_name|><|fim▁begin|>from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys sys.path.insert(0,'..') import tensorflow as tf import numpy as np import itertools import pickle import os import re import inception_v4 os.environ['CUDA_VISIBLE_DEVICES'] = '' def atoi(text): return int(text) if text.isdigit() else text def natural_keys(myobject): return [ atoi(c) for c in re.split('(\d+)', myobject.name) ] def setWeights(layers, weights): for index, layer in enumerate(layers): if "dense" in layer.name: continue layer.set_weights(weights[index]) print(layer.name + " weights have been set!") print("Finished Setting Weights!") def get_layers(model): # Get Trainable layers layers = model.layers layers.sort(key=natural_keys) result = [] for i in range(len(layers)): try: layer = model.layers[i] if layer.trainable: bad = ["pooling", "flatten", "dropout", "activation", "concatenate"] if not any(word in layer.name for word in bad): result.append(layer) except: continue<|fim▁hole|> out_prep = [list(elem) for elem in res_zipped] out = out_prep + [[fn]] return out if __name__ == "__main__": model = inception_v4.create_model() with open('weights.p', 'rb') as fp: weights = pickle.load(fp) # Get layers to set layers = get_layers(model) layers = list(itertools.chain.from_iterable(layers)) # Set the layer weights setWeights(layers, weights) # Save model weights in h5 format model.save_weights("../weights/inception-v4_weights_tf_dim_ordering_tf_kernels_notop.h5") print("Finished saving weights in h5 format")<|fim▁end|>
bn,cv,fn=result[:int((len(result)-1)/2)],result[int((len(result)-1)/2):],result[-1] res_zipped = zip(cv, bn)
<|file_name|>ManufacturingData.java<|end_file_name|><|fim▁begin|>package model.dataModels; /** * Class ManufacturingData. * @author Daniel * */ public class ManufacturingData { private String customerNumber; private String materialNumber; private String orderNumber; private String timeStamp; private MachineData[] machineData; private SpectralAnalysisData analysisData; /** * Constructor. */ public ManufacturingData() {} /** * Creates a string representation * of this object. * @return */ @Override public String toString() { return customerNumber + " " + materialNumber + " " + orderNumber + " " + timeStamp + " " + machineData + " " + analysisData; } /* * Getters and Setters. */ /** * Adds erp data. * @param data */ public void setErpData(ErpData data) { this.customerNumber = data.getCustomerNumber(); this.materialNumber = data.getMaterialNumber(); this.orderNumber = data.getOrderNumber(); this.timeStamp = data.getTimeStamp(); } /** * Appends machine data to the array. * @param data */ public void appendMachineData(MachineData data) { if(this.machineData == null) {<|fim▁hole|> int length = this.machineData.length; MachineData[] temp = new MachineData[length + 1]; for(int i = 0; i < length; i++) { temp[i] = this.machineData[i]; } temp[length] = data; this.machineData = temp; } } /** * Adds spectral analysis data. * @param analysisData */ public void setAnalysisData(SpectralAnalysisData analysisData) { this.analysisData = analysisData; } public String getCustomerNumber() { return customerNumber; } public String getMaterialNumber() { return materialNumber; } public String getOrderNumber() { return orderNumber; } public String getTimeStamp() { return timeStamp; } public MachineData[] getMachineData() { return machineData; } public SpectralAnalysisData getAnalysisData() { return analysisData; } }<|fim▁end|>
this.machineData = new MachineData[1]; machineData[0] = data; } else {
<|file_name|>test_cookies.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright 2017-2022 Mike Fährmann # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. import os import sys import unittest from unittest import mock import time import logging import tempfile from os.path import join sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from gallery_dl import config, extractor # noqa E402 class TestCookiejar(unittest.TestCase): @classmethod def setUpClass(cls): cls.path = tempfile.TemporaryDirectory() cls.cookiefile = join(cls.path.name, "cookies.txt")<|fim▁hole|> cls.invalid_cookiefile = join(cls.path.name, "invalid.txt") with open(cls.invalid_cookiefile, "w") as file: file.write("""# asd .example.org\tTRUE/FALSE\t253402210800\tNAME\tVALUE """) @classmethod def tearDownClass(cls): cls.path.cleanup() config.clear() def test_cookiefile(self): config.set((), "cookies", self.cookiefile) cookies = extractor.find("test:").session.cookies self.assertEqual(len(cookies), 1) cookie = next(iter(cookies)) self.assertEqual(cookie.domain, ".example.org") self.assertEqual(cookie.path , "/") self.assertEqual(cookie.name , "NAME") self.assertEqual(cookie.value , "VALUE") def test_invalid_cookiefile(self): self._test_warning(self.invalid_cookiefile, ValueError) def test_invalid_filename(self): self._test_warning(join(self.path.name, "nothing"), FileNotFoundError) def _test_warning(self, filename, exc): config.set((), "cookies", filename) log = logging.getLogger("test") with mock.patch.object(log, "warning") as mock_warning: cookies = extractor.find("test:").session.cookies self.assertEqual(len(cookies), 0) self.assertEqual(mock_warning.call_count, 1) self.assertEqual(mock_warning.call_args[0][0], "cookies: %s") self.assertIsInstance(mock_warning.call_args[0][1], exc) class TestCookiedict(unittest.TestCase): def setUp(self): self.cdict = {"NAME1": "VALUE1", "NAME2": "VALUE2"} config.set((), "cookies", self.cdict) def tearDown(self): config.clear() def test_dict(self): cookies = extractor.find("test:").session.cookies self.assertEqual(len(cookies), len(self.cdict)) self.assertEqual(sorted(cookies.keys()), sorted(self.cdict.keys())) self.assertEqual(sorted(cookies.values()), sorted(self.cdict.values())) def test_domain(self): for category in ["exhentai", "idolcomplex", "nijie", "seiga"]: extr = _get_extractor(category) cookies = extr.session.cookies for key in self.cdict: self.assertTrue(key in cookies) for c in cookies: self.assertEqual(c.domain, extr.cookiedomain) class TestCookieLogin(unittest.TestCase): def tearDown(self): config.clear() def test_cookie_login(self): extr_cookies = { "exhentai" : ("ipb_member_id", "ipb_pass_hash"), "idolcomplex": ("login", "pass_hash"), "nijie" : ("nemail", "nlogin"), "seiga" : ("user_session",), } for category, cookienames in extr_cookies.items(): cookies = {name: "value" for name in cookienames} config.set((), "cookies", cookies) extr = _get_extractor(category) with mock.patch.object(extr, "_login_impl") as mock_login: extr.login() mock_login.assert_not_called() class TestCookieUtils(unittest.TestCase): def test_check_cookies(self): extr = extractor.find("test:") self.assertFalse(extr._cookiejar, "empty") self.assertFalse(extr.cookiedomain, "empty") # always returns False when checking for empty cookie list self.assertFalse(extr._check_cookies(())) self.assertFalse(extr._check_cookies(("a",))) self.assertFalse(extr._check_cookies(("a", "b"))) self.assertFalse(extr._check_cookies(("a", "b", "c"))) extr._cookiejar.set("a", "1") self.assertTrue(extr._check_cookies(("a",))) self.assertFalse(extr._check_cookies(("a", "b"))) self.assertFalse(extr._check_cookies(("a", "b", "c"))) extr._cookiejar.set("b", "2") self.assertTrue(extr._check_cookies(("a",))) self.assertTrue(extr._check_cookies(("a", "b"))) self.assertFalse(extr._check_cookies(("a", "b", "c"))) def test_check_cookies_domain(self): extr = extractor.find("test:") self.assertFalse(extr._cookiejar, "empty") extr.cookiedomain = ".example.org" self.assertFalse(extr._check_cookies(("a",))) self.assertFalse(extr._check_cookies(("a", "b"))) extr._cookiejar.set("a", "1") self.assertFalse(extr._check_cookies(("a",))) extr._cookiejar.set("a", "1", domain=extr.cookiedomain) self.assertTrue(extr._check_cookies(("a",))) extr._cookiejar.set("a", "1", domain="www" + extr.cookiedomain) self.assertEqual(len(extr._cookiejar), 3) self.assertTrue(extr._check_cookies(("a",))) extr._cookiejar.set("b", "2", domain=extr.cookiedomain) extr._cookiejar.set("c", "3", domain=extr.cookiedomain) self.assertTrue(extr._check_cookies(("a", "b", "c"))) def test_check_cookies_expires(self): extr = extractor.find("test:") self.assertFalse(extr._cookiejar, "empty") self.assertFalse(extr.cookiedomain, "empty") now = int(time.time()) log = logging.getLogger("test") extr._cookiejar.set("a", "1", expires=now-100) with mock.patch.object(log, "warning") as mw: self.assertFalse(extr._check_cookies(("a",))) self.assertEqual(mw.call_count, 1) self.assertEqual(mw.call_args[0], ("Cookie '%s' has expired", "a")) extr._cookiejar.set("a", "1", expires=now+100) with mock.patch.object(log, "warning") as mw: self.assertFalse(extr._check_cookies(("a",))) self.assertEqual(mw.call_count, 1) self.assertEqual(mw.call_args[0], ( "Cookie '%s' will expire in less than %s hour%s", "a", 1, "")) extr._cookiejar.set("a", "1", expires=now+100+7200) with mock.patch.object(log, "warning") as mw: self.assertFalse(extr._check_cookies(("a",))) self.assertEqual(mw.call_count, 1) self.assertEqual(mw.call_args[0], ( "Cookie '%s' will expire in less than %s hour%s", "a", 3, "s")) extr._cookiejar.set("a", "1", expires=now+100+24*3600) with mock.patch.object(log, "warning") as mw: self.assertTrue(extr._check_cookies(("a",))) self.assertEqual(mw.call_count, 0) def _get_extractor(category): for extr in extractor.extractors(): if extr.category == category and hasattr(extr, "_login_impl"): url = next(extr._get_tests())[0] return extr.from_url(url) if __name__ == "__main__": unittest.main()<|fim▁end|>
with open(cls.cookiefile, "w") as file: file.write("""# HTTP Cookie File .example.org\tTRUE\t/\tFALSE\t253402210800\tNAME\tVALUE """)
<|file_name|>presence.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" ?><!DOCTYPE TS><TS language="uk" sourcelanguage="en" version="2.0"> <context> <name>Presence</name> <message> <source>XMPP stream closed unexpectedly</source> <translation>XMPP-потік несподівано завершився</translation><|fim▁hole|> <message> <source>Presence Manager</source> <translation type="unfinished"/> </message> <message> <source>Allows other modules to obtain information about the status of contacts in the roster</source> <translation type="unfinished"/> </message> </context> </TS><|fim▁end|>
</message> </context> <context> <name>PresenceManager</name>
<|file_name|>global_entry.rs<|end_file_name|><|fim▁begin|>use std::fmt::Debug; use crate::utils::*; use crate::wasm::*; #[derive(Debug)] pub struct GlobalEntry { content_type: DataTypes, mutable: bool, body: FunctionBody, } impl GlobalEntry { pub fn from<T: std::io::Read>(f: &mut T) -> Result<Self, WasmError><|fim▁hole|> { let content_type = DataTypes::from(f)?; let mutable = read_uleb128(f) .map_err(WasmError::IOError)?.0 == 1; let body = FunctionBody::from(f)?; Ok(GlobalEntry{ content_type, mutable, body }) } }<|fim▁end|>
<|file_name|>regions-fn-subtyping.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. fn of<T>() -> @fn(T) { fail!(); } fn subtype<T>(x: @fn(T)) { fail!(); } fn test_fn<'x,'y,'z,T>(_x: &'x T, _y: &'y T, _z: &'z T) { // Here, x, y, and z are free. Other letters // are bound. Note that the arrangement // subtype::<T1>(of::<T2>()) will typecheck // iff T1 <: T2. subtype::<&fn<'a>(&'a T)>( of::<&fn<'a>(&'a T)>()); subtype::<&fn<'a>(&'a T)>( of::<&fn<'b>(&'b T)>()); subtype::<&fn<'b>(&'b T)>( of::<&fn<'x>(&'x T)>()); subtype::<&fn<'x>(&'x T)>(<|fim▁hole|> subtype::<&fn<'a>(&'a T, &'a T)>( of::<&fn<'a,'b>(&'a T, &'b T)>()); //~ ERROR mismatched types subtype::<&fn<'a,'b>(&'a T, &'b T)>( of::<&fn<'x,'y>(&'x T, &'y T)>()); subtype::<&fn<'x,'y>(&'x T, &'y T)>( of::<&fn<'a,'b>(&'a T, &'b T)>()); //~ ERROR mismatched types subtype::<&fn<'x,'a>(&'x T) -> @fn(&'a T)>( of::<&fn<'x,'a>(&'x T) -> @fn(&'a T)>()); subtype::<&fn<'a>(&'a T) -> @fn(&'a T)>( of::<&fn<'a,'b>(&'a T) -> @fn(&'b T)>()); //~ ERROR mismatched types subtype::<&fn<'a>(&'a T) -> @fn(&'a T)>( of::<&fn<'x,'b>(&'x T) -> @fn(&'b T)>()); //~ ERROR mismatched types subtype::<&fn<'a,'b>(&'a T) -> @fn(&'b T)>( of::<&fn<'a>(&'a T) -> @fn(&'a T)>()); } fn main() {}<|fim▁end|>
of::<&fn<'b>(&'b T)>()); //~ ERROR mismatched types subtype::<&fn<'a,'b>(&'a T, &'b T)>( of::<&fn<'a>(&'a T, &'a T)>());
<|file_name|>streamio-consumer.js<|end_file_name|><|fim▁begin|>var stream = require('getstream'); // Instantiate a new client (server side) client = stream.connect('9jtyb249ejzp', 'jnbe82eryq4qvquj4wn5dc8nh85bry33jpbmu84jn58xc3uk4y697xke4rcz9kyk', '24985'); // Instantiate a new client (client side) //client = stream.connect('9jtyb249ejzp', null, '24985');<|fim▁hole|>var p1 = client.feed('project_aggregated', 'd0bbf3d8-c6da-460e-b7cf-ab99b79e9986'); // Read 'timeline' for jack - the post by chris will show up: p1.get({ limit: 10 }).then(function(results) { var activityData = results; console.log('Activity data:', activityData); // Read the next page, using id filtering for optimal performance: p1.get({ limit: 10, id_lte: activityData[activityData.length-1].id }).then(function(results) { var nextActivityData = results; console.log('Activity data:', activityData); }); });<|fim▁end|>
<|file_name|>urls.py<|end_file_name|><|fim▁begin|>from django.conf.urls import include, url<|fim▁hole|>import accounts.urls from . import views urlpatterns = [ url(r'^$', views.HomePage.as_view(), name='home'), url(r'^about/$', views.AboutPage.as_view(), name='about'), url(r'^users/', include(profiles.urls, namespace='profiles')), url(r'^admin/', include(admin.site.urls)), url(r'^', include(accounts.urls, namespace='accounts')), url(r'^apis/', include('api.urls', namespace='api')), ] urlpatterns += [ url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')), ] urlpatterns += [ url(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}), ]<|fim▁end|>
from django.contrib import admin from django.conf import settings from django.conf.urls.static import static import profiles.urls
<|file_name|>cgroup_path.go<|end_file_name|><|fim▁begin|>package bundlerules import ( "path/filepath" <|fim▁hole|> "code.cloudfoundry.org/guardian/rundmc/goci" ) type CGroupPath struct { Path string } func (r CGroupPath) Apply(bndl goci.Bndl, spec spec.DesiredContainerSpec, _ string) (goci.Bndl, error) { if spec.Privileged { return bndl, nil } if spec.CgroupPath != "" { return bndl.WithCGroupPath(filepath.Join(r.Path, spec.CgroupPath)), nil } return bndl.WithCGroupPath(filepath.Join(r.Path, spec.Handle)), nil }<|fim▁end|>
spec "code.cloudfoundry.org/guardian/gardener/container-spec"