prompt
large_stringlengths
70
991k
completion
large_stringlengths
0
1.02k
<|file_name|>index.js<|end_file_name|><|fim▁begin|>'use strict' const Buffer = require('safe-buffer').Buffer const crypto = require('crypto') const Transform = require('stream').Transform const SPEC_ALGORITHMS = ['sha256', 'sha384', 'sha512'] const BASE64_REGEX = /^[a-z0-9+/]+(?:=?=?)$/i const SRI_REGEX = /^([^-]+)-([^?]+)([?\S*]*)$/ const STRICT_SRI_REGEX = /^([^-]+)-([A-Za-z0-9+/=]{44,88})(\?[\x21-\x7E]*)*$/ const VCHAR_REGEX = /^[\x21-\x7E]+$/ class Hash { get isHash () { return true } constructor (hash, opts) { const strict = !!(opts && opts.strict) this.source = hash.trim() // 3.1. Integrity metadata (called "Hash" by ssri) // https://w3c.github.io/webappsec-subresource-integrity/#integrity-metadata-description const match = this.source.match( strict ? STRICT_SRI_REGEX : SRI_REGEX ) if (!match) { return } if (strict && !SPEC_ALGORITHMS.some(a => a === match[1])) { return } this.algorithm = match[1] this.digest = match[2] const rawOpts = match[3] this.options = rawOpts ? rawOpts.slice(1).split('?') : [] } hexDigest () { return this.digest && Buffer.from(this.digest, 'base64').toString('hex') } toJSON () { return this.toString() } toString (opts) { if (opts && opts.strict) { // Strict mode enforces the standard as close to the foot of the // letter as it can. if (!( // The spec has very restricted productions for algorithms. // https://www.w3.org/TR/CSP2/#source-list-syntax SPEC_ALGORITHMS.some(x => x === this.algorithm) && // Usually, if someone insists on using a "different" base64, we // leave it as-is, since there's multiple standards, and the // specified is not a URL-safe variant. // https://www.w3.org/TR/CSP2/#base64_value this.digest.match(BASE64_REGEX) && // Option syntax is strictly visual chars. // https://w3c.github.io/webappsec-subresource-integrity/#grammardef-option-expression // https://tools.ietf.org/html/rfc5234#appendix-B.1 (this.options || []).every(opt => opt.match(VCHAR_REGEX)) )) { return '' } } const options = this.options && this.options.length ? `?${this.options.join('?')}` : '' return `${this.algorithm}-${this.digest}${options}` } } class Integrity { get isIntegrity () { return true } toJSON () { return this.toString() } toString (opts) { opts = opts || {} let sep = opts.sep || ' ' if (opts.strict) { // Entries must be separated by whitespace, according to spec. sep = sep.replace(/\S+/g, ' ') } return Object.keys(this).map(k => { return this[k].map(hash => { return Hash.prototype.toString.call(hash, opts) }).filter(x => x.length).join(sep) }).filter(x => x.length).join(sep) } concat (integrity, opts) { const other = typeof integrity === 'string' ? integrity : stringify(integrity, opts) return parse(`${this.toString(opts)} ${other}`, opts) } hexDigest () { return parse(this, {single: true}).hexDigest() } match (integrity, opts) { const other = parse(integrity, opts) const algo = other.pickAlgorithm(opts) return ( this[algo] && other[algo] && this[algo].find(hash => other[algo].find(otherhash => hash.digest === otherhash.digest ) ) ) || false } pickAlgorithm (opts) { const pickAlgorithm = (opts && opts.pickAlgorithm) || getPrioritizedHash const keys = Object.keys(this) if (!keys.length) { throw new Error(`No algorithms available for ${ JSON.stringify(this.toString()) }`) } return keys.reduce((acc, algo) => { return pickAlgorithm(acc, algo) || acc }) } } module.exports.parse = parse function parse (sri, opts) { opts = opts || {} if (typeof sri === 'string') { return _parse(sri, opts) } else if (sri.algorithm && sri.digest) { const fullSri = new Integrity() fullSri[sri.algorithm] = [sri] return _parse(stringify(fullSri, opts), opts) } else { return _parse(stringify(sri, opts), opts) } } function _parse (integrity, opts) { // 3.4.3. Parse metadata // https://w3c.github.io/webappsec-subresource-integrity/#parse-metadata if (opts.single) { return new Hash(integrity, opts) } return integrity.trim().split(/\s+/).reduce((acc, string) => { const hash = new Hash(string, opts) if (hash.algorithm && hash.digest) { const algo = hash.algorithm if (!acc[algo]) { acc[algo] = [] } acc[algo].push(hash) } return acc }, new Integrity()) } module.exports.stringify = stringify function stringify (obj, opts) { if (obj.algorithm && obj.digest) { return Hash.prototype.toString.call(obj, opts) } else if (typeof obj === 'string') { return stringify(parse(obj, opts), opts)<|fim▁hole|> } } module.exports.fromHex = fromHex function fromHex (hexDigest, algorithm, opts) { const optString = (opts && opts.options && opts.options.length) ? `?${opts.options.join('?')}` : '' return parse( `${algorithm}-${ Buffer.from(hexDigest, 'hex').toString('base64') }${optString}`, opts ) } module.exports.fromData = fromData function fromData (data, opts) { opts = opts || {} const algorithms = opts.algorithms || ['sha512'] const optString = opts.options && opts.options.length ? `?${opts.options.join('?')}` : '' return algorithms.reduce((acc, algo) => { const digest = crypto.createHash(algo).update(data).digest('base64') const hash = new Hash( `${algo}-${digest}${optString}`, opts ) if (hash.algorithm && hash.digest) { const algo = hash.algorithm if (!acc[algo]) { acc[algo] = [] } acc[algo].push(hash) } return acc }, new Integrity()) } module.exports.fromStream = fromStream function fromStream (stream, opts) { opts = opts || {} const P = opts.Promise || Promise const istream = integrityStream(opts) return new P((resolve, reject) => { stream.pipe(istream) stream.on('error', reject) istream.on('error', reject) let sri istream.on('integrity', s => { sri = s }) istream.on('end', () => resolve(sri)) istream.on('data', () => {}) }) } module.exports.checkData = checkData function checkData (data, sri, opts) { opts = opts || {} sri = parse(sri, opts) if (!Object.keys(sri).length) { if (opts.error) { throw Object.assign( new Error('No valid integrity hashes to check against'), { code: 'EINTEGRITY' } ) } else { return false } } const algorithm = sri.pickAlgorithm(opts) const digest = crypto.createHash(algorithm).update(data).digest('base64') const newSri = parse({algorithm, digest}) const match = newSri.match(sri, opts) if (match || !opts.error) { return match } else if (typeof opts.size === 'number' && (data.length !== opts.size)) { const err = new Error(`data size mismatch when checking ${sri}.\n Wanted: ${opts.size}\n Found: ${data.length}`) err.code = 'EBADSIZE' err.found = data.length err.expected = opts.size err.sri = sri throw err } else { const err = new Error(`Integrity checksum failed when using ${algorithm}: Wanted ${sri}, but got ${newSri}. (${data.length} bytes)`) err.code = 'EINTEGRITY' err.found = newSri err.expected = sri err.algorithm = algorithm err.sri = sri throw err } } module.exports.checkStream = checkStream function checkStream (stream, sri, opts) { opts = opts || {} const P = opts.Promise || Promise const checker = integrityStream(Object.assign({}, opts, { integrity: sri })) return new P((resolve, reject) => { stream.pipe(checker) stream.on('error', reject) checker.on('error', reject) let sri checker.on('verified', s => { sri = s }) checker.on('end', () => resolve(sri)) checker.on('data', () => {}) }) } module.exports.integrityStream = integrityStream function integrityStream (opts) { opts = opts || {} // For verification const sri = opts.integrity && parse(opts.integrity, opts) const goodSri = sri && Object.keys(sri).length const algorithm = goodSri && sri.pickAlgorithm(opts) const digests = goodSri && sri[algorithm] // Calculating stream const algorithms = Array.from( new Set( (opts.algorithms || ['sha512']) .concat(algorithm ? [algorithm] : []) ) ) const hashes = algorithms.map(crypto.createHash) let streamSize = 0 const stream = new Transform({ transform (chunk, enc, cb) { streamSize += chunk.length hashes.forEach(h => h.update(chunk, enc)) cb(null, chunk, enc) } }).on('end', () => { const optString = (opts.options && opts.options.length) ? `?${opts.options.join('?')}` : '' const newSri = parse(hashes.map((h, i) => { return `${algorithms[i]}-${h.digest('base64')}${optString}` }).join(' '), opts) // Integrity verification mode const match = goodSri && newSri.match(sri, opts) if (typeof opts.size === 'number' && streamSize !== opts.size) { const err = new Error(`stream size mismatch when checking ${sri}.\n Wanted: ${opts.size}\n Found: ${streamSize}`) err.code = 'EBADSIZE' err.found = streamSize err.expected = opts.size err.sri = sri stream.emit('error', err) } else if (opts.integrity && !match) { const err = new Error(`${sri} integrity checksum failed when using ${algorithm}: wanted ${digests} but got ${newSri}. (${streamSize} bytes)`) err.code = 'EINTEGRITY' err.found = newSri err.expected = digests err.algorithm = algorithm err.sri = sri stream.emit('error', err) } else { stream.emit('size', streamSize) stream.emit('integrity', newSri) match && stream.emit('verified', match) } }) return stream } module.exports.create = createIntegrity function createIntegrity (opts) { opts = opts || {} const algorithms = opts.algorithms || ['sha512'] const optString = opts.options && opts.options.length ? `?${opts.options.join('?')}` : '' const hashes = algorithms.map(crypto.createHash) return { update: function (chunk, enc) { hashes.forEach(h => h.update(chunk, enc)) return this }, digest: function (enc) { const integrity = algorithms.reduce((acc, algo) => { const digest = hashes.shift().digest('base64') const hash = new Hash( `${algo}-${digest}${optString}`, opts ) if (hash.algorithm && hash.digest) { const algo = hash.algorithm if (!acc[algo]) { acc[algo] = [] } acc[algo].push(hash) } return acc }, new Integrity()) return integrity } } } const NODE_HASHES = new Set(crypto.getHashes()) // This is a Best Effort™ at a reasonable priority for hash algos const DEFAULT_PRIORITY = [ 'md5', 'whirlpool', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512', // TODO - it's unclear _which_ of these Node will actually use as its name // for the algorithm, so we guesswork it based on the OpenSSL names. 'sha3', 'sha3-256', 'sha3-384', 'sha3-512', 'sha3_256', 'sha3_384', 'sha3_512' ].filter(algo => NODE_HASHES.has(algo)) function getPrioritizedHash (algo1, algo2) { return DEFAULT_PRIORITY.indexOf(algo1.toLowerCase()) >= DEFAULT_PRIORITY.indexOf(algo2.toLowerCase()) ? algo1 : algo2 }<|fim▁end|>
} else { return Integrity.prototype.toString.call(obj, opts)
<|file_name|>plugins.go<|end_file_name|><|fim▁begin|>/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package volume import ( "fmt" "net" "strings" "sync" "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/unversioned" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/types" utilerrors "k8s.io/kubernetes/pkg/util/errors" "k8s.io/kubernetes/pkg/util/io" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/validation" ) // VolumeOptions contains option information about a volume. type VolumeOptions struct { // The attributes below are required by volume.Provisioner // TODO: refactor all of this out of volumes when an admin can configure // many kinds of provisioners. // Capacity is the size of a volume. Capacity resource.Quantity // AccessModes of a volume AccessModes []api.PersistentVolumeAccessMode // Reclamation policy for a persistent volume PersistentVolumeReclaimPolicy api.PersistentVolumeReclaimPolicy // PV.Name of the appropriate PersistentVolume. Used to generate cloud // volume name. PVName string // PVC.Name of the PersistentVolumeClaim; only set during dynamic provisioning. PVCName string // Unique name of Kubernetes cluster. ClusterName string // Tags to attach to the real volume in the cloud provider - e.g. AWS EBS CloudTags *map[string]string // Volume provisioning parameters from StorageClass Parameters map[string]string // Volume selector from PersistentVolumeClaim Selector *unversioned.LabelSelector } // VolumePlugin is an interface to volume plugins that can be used on a // kubernetes node (e.g. by kubelet) to instantiate and manage volumes. type VolumePlugin interface { // Init initializes the plugin. This will be called exactly once // before any New* calls are made - implementations of plugins may // depend on this. Init(host VolumeHost) error // Name returns the plugin's name. Plugins should use namespaced names // such as "example.com/volume". The "kubernetes.io" namespace is // reserved for plugins which are bundled with kubernetes. GetPluginName() string // GetVolumeName returns the name/ID to uniquely identifying the actual // backing device, directory, path, etc. referenced by the specified volume // spec. // For Attachable volumes, this value must be able to be passed back to // volume Detach methods to identify the device to act on. // If the plugin does not support the given spec, this returns an error. GetVolumeName(spec *Spec) (string, error) // CanSupport tests whether the plugin supports a given volume // specification from the API. The spec pointer should be considered // const. CanSupport(spec *Spec) bool // RequiresRemount returns true if this plugin requires mount calls to be // reexecuted. Atomically updating volumes, like Downward API, depend on // this to update the contents of the volume. RequiresRemount() bool // NewMounter creates a new volume.Mounter from an API specification. // Ownership of the spec pointer in *not* transferred. // - spec: The api.Volume spec // - pod: The enclosing pod NewMounter(spec *Spec, podRef *api.Pod, opts VolumeOptions) (Mounter, error) // NewUnmounter creates a new volume.Unmounter from recoverable state. // - name: The volume name, as per the api.Volume spec. // - podUID: The UID of the enclosing pod NewUnmounter(name string, podUID types.UID) (Unmounter, error) // ConstructVolumeSpec constructs a volume spec based on the given volume name // and mountPath. The spec may have incomplete information due to limited // information from input. This function is used by volume manager to reconstruct // volume spec by reading the volume directories from disk ConstructVolumeSpec(volumeName, mountPath string) (*Spec, error) } // PersistentVolumePlugin is an extended interface of VolumePlugin and is used // by volumes that want to provide long term persistence of data type PersistentVolumePlugin interface { VolumePlugin // GetAccessModes describes the ways a given volume can be accessed/mounted. GetAccessModes() []api.PersistentVolumeAccessMode } // RecyclableVolumePlugin is an extended interface of VolumePlugin and is used // by persistent volumes that want to be recycled before being made available // again to new claims type RecyclableVolumePlugin interface { VolumePlugin // NewRecycler creates a new volume.Recycler which knows how to reclaim // this resource after the volume's release from a PersistentVolumeClaim NewRecycler(pvName string, spec *Spec) (Recycler, error) } // DeletableVolumePlugin is an extended interface of VolumePlugin and is used // by persistent volumes that want to be deleted from the cluster after their // release from a PersistentVolumeClaim. type DeletableVolumePlugin interface { VolumePlugin // NewDeleter creates a new volume.Deleter which knows how to delete this // resource in accordance with the underlying storage provider after the // volume's release from a claim NewDeleter(spec *Spec) (Deleter, error) } const ( // Name of a volume in external cloud that is being provisioned and thus // should be ignored by rest of Kubernetes. ProvisionedVolumeName = "placeholder-for-provisioning" ) // ProvisionableVolumePlugin is an extended interface of VolumePlugin and is // used to create volumes for the cluster. type ProvisionableVolumePlugin interface { VolumePlugin // NewProvisioner creates a new volume.Provisioner which knows how to // create PersistentVolumes in accordance with the plugin's underlying // storage provider NewProvisioner(options VolumeOptions) (Provisioner, error) } // AttachableVolumePlugin is an extended interface of VolumePlugin and is used for volumes that require attachment // to a node before mounting. type AttachableVolumePlugin interface { VolumePlugin NewAttacher() (Attacher, error) NewDetacher() (Detacher, error) GetDeviceMountRefs(deviceMountPath string) ([]string, error) } // VolumeHost is an interface that plugins can use to access the kubelet. type VolumeHost interface { // GetPluginDir returns the absolute path to a directory under which // a given plugin may store data. This directory might not actually // exist on disk yet. For plugin data that is per-pod, see // GetPodPluginDir(). GetPluginDir(pluginName string) string // GetPodVolumeDir returns the absolute path a directory which // represents the named volume under the named plugin for the given // pod. If the specified pod does not exist, the result of this call // might not exist. GetPodVolumeDir(podUID types.UID, pluginName string, volumeName string) string // GetPodPluginDir returns the absolute path to a directory under which // a given plugin may store data for a given pod. If the specified pod // does not exist, the result of this call might not exist. This // directory might not actually exist on disk yet. GetPodPluginDir(podUID types.UID, pluginName string) string // GetKubeClient returns a client interface GetKubeClient() clientset.Interface // NewWrapperMounter finds an appropriate plugin with which to handle // the provided spec. This is used to implement volume plugins which // "wrap" other plugins. For example, the "secret" volume is // implemented in terms of the "emptyDir" volume. NewWrapperMounter(volName string, spec Spec, pod *api.Pod, opts VolumeOptions) (Mounter, error) // NewWrapperUnmounter finds an appropriate plugin with which to handle // the provided spec. See comments on NewWrapperMounter for more // context. NewWrapperUnmounter(volName string, spec Spec, podUID types.UID) (Unmounter, error) // Get cloud provider from kubelet. GetCloudProvider() cloudprovider.Interface // Get mounter interface. GetMounter() mount.Interface // Get writer interface for writing data to disk. GetWriter() io.Writer // Returns the hostname of the host kubelet is running on GetHostName() string // Returns host IP or nil in the case of error. GetHostIP() (net.IP, error) // Returns the rootcontext to use when performing mounts for a volume. // This is a temporary measure in order to set the rootContext of tmpfs<|fim▁hole|> // Returns node allocatable GetNodeAllocatable() (api.ResourceList, error) } // VolumePluginMgr tracks registered plugins. type VolumePluginMgr struct { mutex sync.Mutex plugins map[string]VolumePlugin } // Spec is an internal representation of a volume. All API volume types translate to Spec. type Spec struct { Volume *api.Volume PersistentVolume *api.PersistentVolume ReadOnly bool } // Name returns the name of either Volume or PersistentVolume, one of which must not be nil. func (spec *Spec) Name() string { switch { case spec.Volume != nil: return spec.Volume.Name case spec.PersistentVolume != nil: return spec.PersistentVolume.Name default: return "" } } // VolumeConfig is how volume plugins receive configuration. An instance // specific to the plugin will be passed to the plugin's // ProbeVolumePlugins(config) func. Reasonable defaults will be provided by // the binary hosting the plugins while allowing override of those default // values. Those config values are then set to an instance of VolumeConfig // and passed to the plugin. // // Values in VolumeConfig are intended to be relevant to several plugins, but // not necessarily all plugins. The preference is to leverage strong typing // in this struct. All config items must have a descriptive but non-specific // name (i.e, RecyclerMinimumTimeout is OK but RecyclerMinimumTimeoutForNFS is // !OK). An instance of config will be given directly to the plugin, so // config names specific to plugins are unneeded and wrongly expose plugins in // this VolumeConfig struct. // // OtherAttributes is a map of string values intended for one-off // configuration of a plugin or config that is only relevant to a single // plugin. All values are passed by string and require interpretation by the // plugin. Passing config as strings is the least desirable option but can be // used for truly one-off configuration. The binary should still use strong // typing for this value when binding CLI values before they are passed as // strings in OtherAttributes. type VolumeConfig struct { // RecyclerPodTemplate is pod template that understands how to scrub clean // a persistent volume after its release. The template is used by plugins // which override specific properties of the pod in accordance with that // plugin. See NewPersistentVolumeRecyclerPodTemplate for the properties // that are expected to be overridden. RecyclerPodTemplate *api.Pod // RecyclerMinimumTimeout is the minimum amount of time in seconds for the // recycler pod's ActiveDeadlineSeconds attribute. Added to the minimum // timeout is the increment per Gi of capacity. RecyclerMinimumTimeout int // RecyclerTimeoutIncrement is the number of seconds added to the recycler // pod's ActiveDeadlineSeconds for each Gi of capacity in the persistent // volume. Example: 5Gi volume x 30s increment = 150s + 30s minimum = 180s // ActiveDeadlineSeconds for recycler pod RecyclerTimeoutIncrement int // PVName is name of the PersistentVolume instance that is being recycled. // It is used to generate unique recycler pod name. PVName string // OtherAttributes stores config as strings. These strings are opaque to // the system and only understood by the binary hosting the plugin and the // plugin itself. OtherAttributes map[string]string // ProvisioningEnabled configures whether provisioning of this plugin is // enabled or not. Currently used only in host_path plugin. ProvisioningEnabled bool } // NewSpecFromVolume creates an Spec from an api.Volume func NewSpecFromVolume(vs *api.Volume) *Spec { return &Spec{ Volume: vs, } } // NewSpecFromPersistentVolume creates an Spec from an api.PersistentVolume func NewSpecFromPersistentVolume(pv *api.PersistentVolume, readOnly bool) *Spec { return &Spec{ PersistentVolume: pv, ReadOnly: readOnly, } } // InitPlugins initializes each plugin. All plugins must have unique names. // This must be called exactly once before any New* methods are called on any // plugins. func (pm *VolumePluginMgr) InitPlugins(plugins []VolumePlugin, host VolumeHost) error { pm.mutex.Lock() defer pm.mutex.Unlock() if pm.plugins == nil { pm.plugins = map[string]VolumePlugin{} } allErrs := []error{} for _, plugin := range plugins { name := plugin.GetPluginName() if errs := validation.IsQualifiedName(name); len(errs) != 0 { allErrs = append(allErrs, fmt.Errorf("volume plugin has invalid name: %q: %s", name, strings.Join(errs, ";"))) continue } if _, found := pm.plugins[name]; found { allErrs = append(allErrs, fmt.Errorf("volume plugin %q was registered more than once", name)) continue } err := plugin.Init(host) if err != nil { glog.Errorf("Failed to load volume plugin %s, error: %s", plugin, err.Error()) allErrs = append(allErrs, err) continue } pm.plugins[name] = plugin glog.V(1).Infof("Loaded volume plugin %q", name) } return utilerrors.NewAggregate(allErrs) } // FindPluginBySpec looks for a plugin that can support a given volume // specification. If no plugins can support or more than one plugin can // support it, return error. func (pm *VolumePluginMgr) FindPluginBySpec(spec *Spec) (VolumePlugin, error) { pm.mutex.Lock() defer pm.mutex.Unlock() matches := []string{} for k, v := range pm.plugins { if v.CanSupport(spec) { matches = append(matches, k) } } if len(matches) == 0 { return nil, fmt.Errorf("no volume plugin matched") } if len(matches) > 1 { return nil, fmt.Errorf("multiple volume plugins matched: %s", strings.Join(matches, ",")) } return pm.plugins[matches[0]], nil } // FindPluginByName fetches a plugin by name or by legacy name. If no plugin // is found, returns error. func (pm *VolumePluginMgr) FindPluginByName(name string) (VolumePlugin, error) { pm.mutex.Lock() defer pm.mutex.Unlock() // Once we can get rid of legacy names we can reduce this to a map lookup. matches := []string{} for k, v := range pm.plugins { if v.GetPluginName() == name { matches = append(matches, k) } } if len(matches) == 0 { return nil, fmt.Errorf("no volume plugin matched") } if len(matches) > 1 { return nil, fmt.Errorf("multiple volume plugins matched: %s", strings.Join(matches, ",")) } return pm.plugins[matches[0]], nil } // FindPersistentPluginBySpec looks for a persistent volume plugin that can // support a given volume specification. If no plugin is found, return an // error func (pm *VolumePluginMgr) FindPersistentPluginBySpec(spec *Spec) (PersistentVolumePlugin, error) { volumePlugin, err := pm.FindPluginBySpec(spec) if err != nil { return nil, fmt.Errorf("Could not find volume plugin for spec: %#v", spec) } if persistentVolumePlugin, ok := volumePlugin.(PersistentVolumePlugin); ok { return persistentVolumePlugin, nil } return nil, fmt.Errorf("no persistent volume plugin matched") } // FindPersistentPluginByName fetches a persistent volume plugin by name. If // no plugin is found, returns error. func (pm *VolumePluginMgr) FindPersistentPluginByName(name string) (PersistentVolumePlugin, error) { volumePlugin, err := pm.FindPluginByName(name) if err != nil { return nil, err } if persistentVolumePlugin, ok := volumePlugin.(PersistentVolumePlugin); ok { return persistentVolumePlugin, nil } return nil, fmt.Errorf("no persistent volume plugin matched") } // FindRecyclablePluginByName fetches a persistent volume plugin by name. If // no plugin is found, returns error. func (pm *VolumePluginMgr) FindRecyclablePluginBySpec(spec *Spec) (RecyclableVolumePlugin, error) { volumePlugin, err := pm.FindPluginBySpec(spec) if err != nil { return nil, err } if recyclableVolumePlugin, ok := volumePlugin.(RecyclableVolumePlugin); ok { return recyclableVolumePlugin, nil } return nil, fmt.Errorf("no recyclable volume plugin matched") } // FindProvisionablePluginByName fetches a persistent volume plugin by name. If // no plugin is found, returns error. func (pm *VolumePluginMgr) FindProvisionablePluginByName(name string) (ProvisionableVolumePlugin, error) { volumePlugin, err := pm.FindPluginByName(name) if err != nil { return nil, err } if provisionableVolumePlugin, ok := volumePlugin.(ProvisionableVolumePlugin); ok { return provisionableVolumePlugin, nil } return nil, fmt.Errorf("no provisionable volume plugin matched") } // FindDeletablePluginBySppec fetches a persistent volume plugin by spec. If // no plugin is found, returns error. func (pm *VolumePluginMgr) FindDeletablePluginBySpec(spec *Spec) (DeletableVolumePlugin, error) { volumePlugin, err := pm.FindPluginBySpec(spec) if err != nil { return nil, err } if deletableVolumePlugin, ok := volumePlugin.(DeletableVolumePlugin); ok { return deletableVolumePlugin, nil } return nil, fmt.Errorf("no deletable volume plugin matched") } // FindDeletablePluginByName fetches a persistent volume plugin by name. If // no plugin is found, returns error. func (pm *VolumePluginMgr) FindDeletablePluginByName(name string) (DeletableVolumePlugin, error) { volumePlugin, err := pm.FindPluginByName(name) if err != nil { return nil, err } if deletableVolumePlugin, ok := volumePlugin.(DeletableVolumePlugin); ok { return deletableVolumePlugin, nil } return nil, fmt.Errorf("no deletable volume plugin matched") } // FindCreatablePluginBySpec fetches a persistent volume plugin by name. If // no plugin is found, returns error. func (pm *VolumePluginMgr) FindCreatablePluginBySpec(spec *Spec) (ProvisionableVolumePlugin, error) { volumePlugin, err := pm.FindPluginBySpec(spec) if err != nil { return nil, err } if provisionableVolumePlugin, ok := volumePlugin.(ProvisionableVolumePlugin); ok { return provisionableVolumePlugin, nil } return nil, fmt.Errorf("no creatable volume plugin matched") } // FindAttachablePluginBySpec fetches a persistent volume plugin by name. // Unlike the other "FindPlugin" methods, this does not return error if no // plugin is found. All volumes require a mounter and unmounter, but not // every volume will have an attacher/detacher. func (pm *VolumePluginMgr) FindAttachablePluginBySpec(spec *Spec) (AttachableVolumePlugin, error) { volumePlugin, err := pm.FindPluginBySpec(spec) if err != nil { return nil, err } if attachableVolumePlugin, ok := volumePlugin.(AttachableVolumePlugin); ok { return attachableVolumePlugin, nil } return nil, nil } // FindAttachablePluginByName fetches an attachable volume plugin by name. // Unlike the other "FindPlugin" methods, this does not return error if no // plugin is found. All volumes require a mounter and unmounter, but not // every volume will have an attacher/detacher. func (pm *VolumePluginMgr) FindAttachablePluginByName(name string) (AttachableVolumePlugin, error) { volumePlugin, err := pm.FindPluginByName(name) if err != nil { return nil, err } if attachablePlugin, ok := volumePlugin.(AttachableVolumePlugin); ok { return attachablePlugin, nil } return nil, nil } // NewPersistentVolumeRecyclerPodTemplate creates a template for a recycler // pod. By default, a recycler pod simply runs "rm -rf" on a volume and tests // for emptiness. Most attributes of the template will be correct for most // plugin implementations. The following attributes can be overridden per // plugin via configuration: // // 1. pod.Spec.Volumes[0].VolumeSource must be overridden. Recycler // implementations without a valid VolumeSource will fail. // 2. pod.GenerateName helps distinguish recycler pods by name. Recommended. // Default is "pv-recycler-". // 3. pod.Spec.ActiveDeadlineSeconds gives the recycler pod a maximum timeout // before failing. Recommended. Default is 60 seconds. // // See HostPath and NFS for working recycler examples func NewPersistentVolumeRecyclerPodTemplate() *api.Pod { timeout := int64(60) pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ GenerateName: "pv-recycler-", Namespace: api.NamespaceDefault, }, Spec: api.PodSpec{ ActiveDeadlineSeconds: &timeout, RestartPolicy: api.RestartPolicyNever, Volumes: []api.Volume{ { Name: "vol", // IMPORTANT! All plugins using this template MUST // override pod.Spec.Volumes[0].VolumeSource Recycler // implementations without a valid VolumeSource will fail. VolumeSource: api.VolumeSource{}, }, }, Containers: []api.Container{ { Name: "pv-recycler", Image: "gcr.io/google_containers/busybox", Command: []string{"/bin/sh"}, Args: []string{"-c", "test -e /scrub && rm -rf /scrub/..?* /scrub/.[!.]* /scrub/* && test -z \"$(ls -A /scrub)\" || exit 1"}, VolumeMounts: []api.VolumeMount{ { Name: "vol", MountPath: "/scrub", }, }, }, }, }, } return pod }<|fim▁end|>
// mounts correctly. It will be replaced and expanded on by future // SecurityContext work. GetRootContext() string
<|file_name|>net.rs<|end_file_name|><|fim▁begin|>// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use libc::{size_t, ssize_t, c_int, c_void, c_uint}; use libc; use std::io; use std::io::IoError; use std::io::net::ip; use std::mem; use std::ptr; use std::rt::rtio; use std::rt::task::BlockedTask; use homing::{HomingIO, HomeHandle}; use rc::Refcount; use stream::StreamWatcher; use super::{Loop, Request, UvError, Buf, status_to_io_result, uv_error_to_io_error, UvHandle, slice_to_uv_buf, wait_until_woken_after, wakeup}; use timeout::{AccessTimeout, AcceptTimeout, ConnectCtx}; use uvio::UvIoFactory; use uvll; //////////////////////////////////////////////////////////////////////////////// /// Generic functions related to dealing with sockaddr things //////////////////////////////////////////////////////////////////////////////// pub fn htons(u: u16) -> u16 { mem::to_be16(u) } pub fn ntohs(u: u16) -> u16 { mem::from_be16(u) } pub fn sockaddr_to_addr(storage: &libc::sockaddr_storage, len: uint) -> ip::SocketAddr { match storage.ss_family as c_int { libc::AF_INET => { assert!(len as uint >= mem::size_of::<libc::sockaddr_in>()); let storage: &libc::sockaddr_in = unsafe { mem::transmute(storage) }; let addr = storage.sin_addr.s_addr as u32; let a = (addr >> 0) as u8; let b = (addr >> 8) as u8; let c = (addr >> 16) as u8; let d = (addr >> 24) as u8; ip::SocketAddr { ip: ip::Ipv4Addr(a, b, c, d), port: ntohs(storage.sin_port), } } libc::AF_INET6 => { assert!(len as uint >= mem::size_of::<libc::sockaddr_in6>()); let storage: &libc::sockaddr_in6 = unsafe { mem::transmute(storage) }; let a = ntohs(storage.sin6_addr.s6_addr[0]); let b = ntohs(storage.sin6_addr.s6_addr[1]); let c = ntohs(storage.sin6_addr.s6_addr[2]); let d = ntohs(storage.sin6_addr.s6_addr[3]); let e = ntohs(storage.sin6_addr.s6_addr[4]); let f = ntohs(storage.sin6_addr.s6_addr[5]); let g = ntohs(storage.sin6_addr.s6_addr[6]); let h = ntohs(storage.sin6_addr.s6_addr[7]); ip::SocketAddr { ip: ip::Ipv6Addr(a, b, c, d, e, f, g, h), port: ntohs(storage.sin6_port), } } n => { fail!("unknown family {}", n); } } } fn addr_to_sockaddr(addr: ip::SocketAddr) -> (libc::sockaddr_storage, uint) { unsafe { let mut storage: libc::sockaddr_storage = mem::init(); let len = match addr.ip { ip::Ipv4Addr(a, b, c, d) => { let storage: &mut libc::sockaddr_in = mem::transmute(&mut storage); (*storage).sin_family = libc::AF_INET as libc::sa_family_t; (*storage).sin_port = htons(addr.port); (*storage).sin_addr = libc::in_addr { s_addr: (d as u32 << 24) | (c as u32 << 16) | (b as u32 << 8) | (a as u32 << 0) }; mem::size_of::<libc::sockaddr_in>() } ip::Ipv6Addr(a, b, c, d, e, f, g, h) => { let storage: &mut libc::sockaddr_in6 = mem::transmute(&mut storage); storage.sin6_family = libc::AF_INET6 as libc::sa_family_t; storage.sin6_port = htons(addr.port); storage.sin6_addr = libc::in6_addr { s6_addr: [ htons(a), htons(b), htons(c), htons(d), htons(e), htons(f), htons(g), htons(h), ] }; mem::size_of::<libc::sockaddr_in6>() } }; return (storage, len); } } enum SocketNameKind { TcpPeer, Tcp, Udp } fn socket_name(sk: SocketNameKind, handle: *c_void) -> Result<ip::SocketAddr, IoError> { let getsockname = match sk { TcpPeer => uvll::uv_tcp_getpeername, Tcp => uvll::uv_tcp_getsockname, Udp => uvll::uv_udp_getsockname, }; // Allocate a sockaddr_storage since we don't know if it's ipv4 or ipv6 let mut sockaddr: libc::sockaddr_storage = unsafe { mem::init() }; let mut namelen = mem::size_of::<libc::sockaddr_storage>() as c_int; let sockaddr_p = &mut sockaddr as *mut libc::sockaddr_storage; match unsafe { getsockname(handle, sockaddr_p as *mut libc::sockaddr, &mut namelen) } { 0 => Ok(sockaddr_to_addr(&sockaddr, namelen as uint)), n => Err(uv_error_to_io_error(UvError(n))) } } //////////////////////////////////////////////////////////////////////////////// /// TCP implementation //////////////////////////////////////////////////////////////////////////////// pub struct TcpWatcher { handle: *uvll::uv_tcp_t, stream: StreamWatcher, home: HomeHandle, refcount: Refcount, // libuv can't support concurrent reads and concurrent writes of the same // stream object, so we use these access guards in order to arbitrate among // multiple concurrent reads and writes. Note that libuv *can* read and // write simultaneously, it just can't read and read simultaneously. read_access: AccessTimeout, write_access: AccessTimeout, } pub struct TcpListener { home: HomeHandle, handle: *uvll::uv_pipe_t, closing_task: Option<BlockedTask>, outgoing: Sender<Result<Box<rtio::RtioTcpStream:Send>, IoError>>, incoming: Receiver<Result<Box<rtio::RtioTcpStream:Send>, IoError>>, } pub struct TcpAcceptor { listener: Box<TcpListener>, timeout: AcceptTimeout, } // TCP watchers (clients/streams) impl TcpWatcher { pub fn new(io: &mut UvIoFactory) -> TcpWatcher { let handle = io.make_handle(); TcpWatcher::new_home(&io.loop_, handle) } fn new_home(loop_: &Loop, home: HomeHandle) -> TcpWatcher { let handle = unsafe { uvll::malloc_handle(uvll::UV_TCP) }; assert_eq!(unsafe { uvll::uv_tcp_init(loop_.handle, handle) }, 0); TcpWatcher { home: home, handle: handle, stream: StreamWatcher::new(handle), refcount: Refcount::new(), read_access: AccessTimeout::new(), write_access: AccessTimeout::new(), } } pub fn connect(io: &mut UvIoFactory, address: ip::SocketAddr, timeout: Option<u64>) -> Result<TcpWatcher, UvError> { let tcp = TcpWatcher::new(io); let cx = ConnectCtx { status: -1, task: None, timer: None }; let (addr, _len) = addr_to_sockaddr(address); let addr_p = &addr as *_ as *libc::sockaddr; cx.connect(tcp, timeout, io, |req, tcp, cb| { unsafe { uvll::uv_tcp_connect(req.handle, tcp.handle, addr_p, cb) } }) } } impl HomingIO for TcpWatcher { fn home<'r>(&'r mut self) -> &'r mut HomeHandle { &mut self.home } } impl rtio::RtioSocket for TcpWatcher { fn socket_name(&mut self) -> Result<ip::SocketAddr, IoError> { let _m = self.fire_homing_missile(); socket_name(Tcp, self.handle) } } impl rtio::RtioTcpStream for TcpWatcher { fn read(&mut self, buf: &mut [u8]) -> Result<uint, IoError> { let m = self.fire_homing_missile(); let guard = try!(self.read_access.grant(m)); // see comments in close_read about this check if guard.access.is_closed() { return Err(io::standard_error(io::EndOfFile)) } self.stream.read(buf).map_err(uv_error_to_io_error) } fn write(&mut self, buf: &[u8]) -> Result<(), IoError> { let m = self.fire_homing_missile(); let guard = try!(self.write_access.grant(m)); self.stream.write(buf, guard.can_timeout).map_err(uv_error_to_io_error) } fn peer_name(&mut self) -> Result<ip::SocketAddr, IoError> { let _m = self.fire_homing_missile(); socket_name(TcpPeer, self.handle) } fn control_congestion(&mut self) -> Result<(), IoError> { let _m = self.fire_homing_missile(); status_to_io_result(unsafe { uvll::uv_tcp_nodelay(self.handle, 0 as c_int) }) } fn nodelay(&mut self) -> Result<(), IoError> { let _m = self.fire_homing_missile(); status_to_io_result(unsafe { uvll::uv_tcp_nodelay(self.handle, 1 as c_int) }) } fn keepalive(&mut self, delay_in_seconds: uint) -> Result<(), IoError> { let _m = self.fire_homing_missile(); status_to_io_result(unsafe { uvll::uv_tcp_keepalive(self.handle, 1 as c_int, delay_in_seconds as c_uint) }) } fn letdie(&mut self) -> Result<(), IoError> { let _m = self.fire_homing_missile(); status_to_io_result(unsafe { uvll::uv_tcp_keepalive(self.handle, 0 as c_int, 0 as c_uint) }) } fn clone(&self) -> Box<rtio::RtioTcpStream:Send> { box TcpWatcher { handle: self.handle, stream: StreamWatcher::new(self.handle), home: self.home.clone(), refcount: self.refcount.clone(), read_access: self.read_access.clone(), write_access: self.write_access.clone(), } as Box<rtio::RtioTcpStream:Send> } fn close_read(&mut self) -> Result<(), IoError> { // see comments in PipeWatcher::close_read let task = { let m = self.fire_homing_missile(); self.read_access.access.close(&m); self.stream.cancel_read(uvll::EOF as libc::ssize_t) }; let _ = task.map(|t| t.reawaken()); Ok(()) } fn close_write(&mut self) -> Result<(), IoError> { let _m = self.fire_homing_missile(); shutdown(self.handle, &self.uv_loop()) } fn set_timeout(&mut self, timeout: Option<u64>) { self.set_read_timeout(timeout); self.set_write_timeout(timeout); } fn set_read_timeout(&mut self, ms: Option<u64>) { let _m = self.fire_homing_missile(); let loop_ = self.uv_loop(); self.read_access.set_timeout(ms, &self.home, &loop_, cancel_read, &self.stream as *_ as uint); fn cancel_read(stream: uint) -> Option<BlockedTask> { let stream: &mut StreamWatcher = unsafe { mem::transmute(stream) }; stream.cancel_read(uvll::ECANCELED as ssize_t) } } fn set_write_timeout(&mut self, ms: Option<u64>) { let _m = self.fire_homing_missile(); let loop_ = self.uv_loop(); self.write_access.set_timeout(ms, &self.home, &loop_, cancel_write, &self.stream as *_ as uint); fn cancel_write(stream: uint) -> Option<BlockedTask> { let stream: &mut StreamWatcher = unsafe { mem::transmute(stream) }; stream.cancel_write() } } } impl UvHandle<uvll::uv_tcp_t> for TcpWatcher { fn uv_handle(&self) -> *uvll::uv_tcp_t { self.stream.handle } } impl Drop for TcpWatcher { fn drop(&mut self) { let _m = self.fire_homing_missile(); if self.refcount.decrement() { self.close(); } } } // TCP listeners (unbound servers) impl TcpListener { pub fn bind(io: &mut UvIoFactory, address: ip::SocketAddr) -> Result<Box<TcpListener>, UvError> { let handle = unsafe { uvll::malloc_handle(uvll::UV_TCP) }; assert_eq!(unsafe { uvll::uv_tcp_init(io.uv_loop(), handle) }, 0); let (tx, rx) = channel(); let l = box TcpListener { home: io.make_handle(), handle: handle, closing_task: None, outgoing: tx, incoming: rx, }; let (addr, _len) = addr_to_sockaddr(address); let res = unsafe { let addr_p = &addr as *libc::sockaddr_storage; uvll::uv_tcp_bind(l.handle, addr_p as *libc::sockaddr) }; return match res { 0 => Ok(l.install()), n => Err(UvError(n)) }; } } impl HomingIO for TcpListener { fn home<'r>(&'r mut self) -> &'r mut HomeHandle { &mut self.home } } impl UvHandle<uvll::uv_tcp_t> for TcpListener { fn uv_handle(&self) -> *uvll::uv_tcp_t { self.handle } } impl rtio::RtioSocket for TcpListener { fn socket_name(&mut self) -> Result<ip::SocketAddr, IoError> { let _m = self.fire_homing_missile(); socket_name(Tcp, self.handle) } } impl rtio::RtioTcpListener for TcpListener { fn listen(~self) -> Result<Box<rtio::RtioTcpAcceptor:Send>, IoError> { // create the acceptor object from ourselves let mut acceptor = box TcpAcceptor { listener: self, timeout: AcceptTimeout::new(), }; let _m = acceptor.fire_homing_missile(); // FIXME: the 128 backlog should be configurable match unsafe { uvll::uv_listen(acceptor.listener.handle, 128, listen_cb) } { 0 => Ok(acceptor as Box<rtio::RtioTcpAcceptor:Send>), n => Err(uv_error_to_io_error(UvError(n))), } } } extern fn listen_cb(server: *uvll::uv_stream_t, status: c_int) { assert!(status != uvll::ECANCELED); let tcp: &mut TcpListener = unsafe { UvHandle::from_uv_handle(&server) }; let msg = match status { 0 => { let loop_ = Loop::wrap(unsafe { uvll::get_loop_for_uv_handle(server) }); let client = TcpWatcher::new_home(&loop_, tcp.home().clone()); assert_eq!(unsafe { uvll::uv_accept(server, client.handle) }, 0); Ok(box client as Box<rtio::RtioTcpStream:Send>) } n => Err(uv_error_to_io_error(UvError(n))) }; tcp.outgoing.send(msg); } impl Drop for TcpListener { fn drop(&mut self) { let _m = self.fire_homing_missile(); self.close(); } } // TCP acceptors (bound servers) impl HomingIO for TcpAcceptor { fn home<'r>(&'r mut self) -> &'r mut HomeHandle { self.listener.home() } } impl rtio::RtioSocket for TcpAcceptor { fn socket_name(&mut self) -> Result<ip::SocketAddr, IoError> { let _m = self.fire_homing_missile(); socket_name(Tcp, self.listener.handle) } } impl rtio::RtioTcpAcceptor for TcpAcceptor { fn accept(&mut self) -> Result<Box<rtio::RtioTcpStream:Send>, IoError> { self.timeout.accept(&self.listener.incoming) } fn accept_simultaneously(&mut self) -> Result<(), IoError> { let _m = self.fire_homing_missile(); status_to_io_result(unsafe { uvll::uv_tcp_simultaneous_accepts(self.listener.handle, 1) }) } fn dont_accept_simultaneously(&mut self) -> Result<(), IoError> { let _m = self.fire_homing_missile(); status_to_io_result(unsafe { uvll::uv_tcp_simultaneous_accepts(self.listener.handle, 0) }) } fn set_timeout(&mut self, ms: Option<u64>) { let _m = self.fire_homing_missile(); match ms { None => self.timeout.clear(), Some(ms) => self.timeout.set_timeout(ms, &mut *self.listener), } } } //////////////////////////////////////////////////////////////////////////////// /// UDP implementation //////////////////////////////////////////////////////////////////////////////// pub struct UdpWatcher { handle: *uvll::uv_udp_t, home: HomeHandle, // See above for what these fields are refcount: Refcount, read_access: AccessTimeout, write_access: AccessTimeout, blocked_sender: Option<BlockedTask>, } struct UdpRecvCtx { task: Option<BlockedTask>, buf: Option<Buf>, result: Option<(ssize_t, Option<ip::SocketAddr>)>, } struct UdpSendCtx { result: c_int, data: Option<Vec<u8>>, udp: *mut UdpWatcher, } impl UdpWatcher { pub fn bind(io: &mut UvIoFactory, address: ip::SocketAddr) -> Result<UdpWatcher, UvError> { let udp = UdpWatcher { handle: unsafe { uvll::malloc_handle(uvll::UV_UDP) }, home: io.make_handle(), refcount: Refcount::new(), read_access: AccessTimeout::new(), write_access: AccessTimeout::new(), blocked_sender: None, }; assert_eq!(unsafe { uvll::uv_udp_init(io.uv_loop(), udp.handle) }, 0); let (addr, _len) = addr_to_sockaddr(address); let result = unsafe { let addr_p = &addr as *libc::sockaddr_storage; uvll::uv_udp_bind(udp.handle, addr_p as *libc::sockaddr, 0u32) }; return match result { 0 => Ok(udp), n => Err(UvError(n)), }; } } impl UvHandle<uvll::uv_udp_t> for UdpWatcher { fn uv_handle(&self) -> *uvll::uv_udp_t { self.handle } } impl HomingIO for UdpWatcher { fn home<'r>(&'r mut self) -> &'r mut HomeHandle { &mut self.home } } impl rtio::RtioSocket for UdpWatcher { fn socket_name(&mut self) -> Result<ip::SocketAddr, IoError> { let _m = self.fire_homing_missile(); socket_name(Udp, self.handle) } } impl rtio::RtioUdpSocket for UdpWatcher { fn recvfrom(&mut self, buf: &mut [u8]) -> Result<(uint, ip::SocketAddr), IoError> { let loop_ = self.uv_loop(); let m = self.fire_homing_missile(); let _guard = try!(self.read_access.grant(m)); return match unsafe { uvll::uv_udp_recv_start(self.handle, alloc_cb, recv_cb) } { 0 => { let mut cx = UdpRecvCtx { task: None, buf: Some(slice_to_uv_buf(buf)), result: None, }; let handle = self.handle; wait_until_woken_after(&mut cx.task, &loop_, || { unsafe { uvll::set_data_for_uv_handle(handle, &cx) } }); match cx.result.take_unwrap() { (n, _) if n < 0 => Err(uv_error_to_io_error(UvError(n as c_int))), (n, addr) => Ok((n as uint, addr.unwrap())) } } n => Err(uv_error_to_io_error(UvError(n))) }; extern fn alloc_cb(handle: *uvll::uv_udp_t, _suggested_size: size_t, buf: *mut Buf) { unsafe {<|fim▁hole|> *buf = cx.buf.take().expect("recv alloc_cb called more than once") } } extern fn recv_cb(handle: *uvll::uv_udp_t, nread: ssize_t, buf: *Buf, addr: *libc::sockaddr, _flags: c_uint) { assert!(nread != uvll::ECANCELED as ssize_t); let cx = unsafe { &mut *(uvll::get_data_for_uv_handle(handle) as *mut UdpRecvCtx) }; // When there's no data to read the recv callback can be a no-op. // This can happen if read returns EAGAIN/EWOULDBLOCK. By ignoring // this we just drop back to kqueue and wait for the next callback. if nread == 0 { cx.buf = Some(unsafe { *buf }); return } unsafe { assert_eq!(uvll::uv_udp_recv_stop(handle), 0) } let addr = if addr == ptr::null() { None } else { let len = mem::size_of::<libc::sockaddr_storage>(); Some(sockaddr_to_addr(unsafe { mem::transmute(addr) }, len)) }; cx.result = Some((nread, addr)); wakeup(&mut cx.task); } } fn sendto(&mut self, buf: &[u8], dst: ip::SocketAddr) -> Result<(), IoError> { let m = self.fire_homing_missile(); let loop_ = self.uv_loop(); let guard = try!(self.write_access.grant(m)); let mut req = Request::new(uvll::UV_UDP_SEND); let (addr, _len) = addr_to_sockaddr(dst); let addr_p = &addr as *_ as *libc::sockaddr; // see comments in StreamWatcher::write for why we may allocate a buffer // here. let data = if guard.can_timeout {Some(Vec::from_slice(buf))} else {None}; let uv_buf = if guard.can_timeout { slice_to_uv_buf(data.get_ref().as_slice()) } else { slice_to_uv_buf(buf) }; return match unsafe { uvll::uv_udp_send(req.handle, self.handle, [uv_buf], addr_p, send_cb) } { 0 => { req.defuse(); // uv callback now owns this request let mut cx = UdpSendCtx { result: uvll::ECANCELED, data: data, udp: self as *mut _ }; wait_until_woken_after(&mut self.blocked_sender, &loop_, || { req.set_data(&cx); }); if cx.result != uvll::ECANCELED { return match cx.result { 0 => Ok(()), n => Err(uv_error_to_io_error(UvError(n))) } } let new_cx = box UdpSendCtx { result: 0, udp: 0 as *mut UdpWatcher, data: cx.data.take(), }; unsafe { req.set_data(&*new_cx); mem::forget(new_cx); } Err(uv_error_to_io_error(UvError(cx.result))) } n => Err(uv_error_to_io_error(UvError(n))) }; // This function is the same as stream::write_cb, but adapted for udp // instead of streams. extern fn send_cb(req: *uvll::uv_udp_send_t, status: c_int) { let req = Request::wrap(req); let cx: &mut UdpSendCtx = unsafe { req.get_data() }; cx.result = status; if cx.udp as uint != 0 { let udp: &mut UdpWatcher = unsafe { &mut *cx.udp }; wakeup(&mut udp.blocked_sender); } else { let _cx: Box<UdpSendCtx> = unsafe { mem::transmute(cx) }; } } } fn join_multicast(&mut self, multi: ip::IpAddr) -> Result<(), IoError> { let _m = self.fire_homing_missile(); status_to_io_result(unsafe { multi.to_str().with_c_str(|m_addr| { uvll::uv_udp_set_membership(self.handle, m_addr, ptr::null(), uvll::UV_JOIN_GROUP) }) }) } fn leave_multicast(&mut self, multi: ip::IpAddr) -> Result<(), IoError> { let _m = self.fire_homing_missile(); status_to_io_result(unsafe { multi.to_str().with_c_str(|m_addr| { uvll::uv_udp_set_membership(self.handle, m_addr, ptr::null(), uvll::UV_LEAVE_GROUP) }) }) } fn loop_multicast_locally(&mut self) -> Result<(), IoError> { let _m = self.fire_homing_missile(); status_to_io_result(unsafe { uvll::uv_udp_set_multicast_loop(self.handle, 1 as c_int) }) } fn dont_loop_multicast_locally(&mut self) -> Result<(), IoError> { let _m = self.fire_homing_missile(); status_to_io_result(unsafe { uvll::uv_udp_set_multicast_loop(self.handle, 0 as c_int) }) } fn multicast_time_to_live(&mut self, ttl: int) -> Result<(), IoError> { let _m = self.fire_homing_missile(); status_to_io_result(unsafe { uvll::uv_udp_set_multicast_ttl(self.handle, ttl as c_int) }) } fn time_to_live(&mut self, ttl: int) -> Result<(), IoError> { let _m = self.fire_homing_missile(); status_to_io_result(unsafe { uvll::uv_udp_set_ttl(self.handle, ttl as c_int) }) } fn hear_broadcasts(&mut self) -> Result<(), IoError> { let _m = self.fire_homing_missile(); status_to_io_result(unsafe { uvll::uv_udp_set_broadcast(self.handle, 1 as c_int) }) } fn ignore_broadcasts(&mut self) -> Result<(), IoError> { let _m = self.fire_homing_missile(); status_to_io_result(unsafe { uvll::uv_udp_set_broadcast(self.handle, 0 as c_int) }) } fn clone(&self) -> Box<rtio::RtioUdpSocket:Send> { box UdpWatcher { handle: self.handle, home: self.home.clone(), refcount: self.refcount.clone(), write_access: self.write_access.clone(), read_access: self.read_access.clone(), blocked_sender: None, } as Box<rtio::RtioUdpSocket:Send> } fn set_timeout(&mut self, timeout: Option<u64>) { self.set_read_timeout(timeout); self.set_write_timeout(timeout); } fn set_read_timeout(&mut self, ms: Option<u64>) { let _m = self.fire_homing_missile(); let loop_ = self.uv_loop(); self.read_access.set_timeout(ms, &self.home, &loop_, cancel_read, self.handle as uint); fn cancel_read(stream: uint) -> Option<BlockedTask> { // This method is quite similar to StreamWatcher::cancel_read, see // there for more information let handle = stream as *uvll::uv_udp_t; assert_eq!(unsafe { uvll::uv_udp_recv_stop(handle) }, 0); let data = unsafe { let data = uvll::get_data_for_uv_handle(handle); if data.is_null() { return None } uvll::set_data_for_uv_handle(handle, 0 as *int); &mut *(data as *mut UdpRecvCtx) }; data.result = Some((uvll::ECANCELED as ssize_t, None)); data.task.take() } } fn set_write_timeout(&mut self, ms: Option<u64>) { let _m = self.fire_homing_missile(); let loop_ = self.uv_loop(); self.write_access.set_timeout(ms, &self.home, &loop_, cancel_write, self as *mut _ as uint); fn cancel_write(stream: uint) -> Option<BlockedTask> { let stream: &mut UdpWatcher = unsafe { mem::transmute(stream) }; stream.blocked_sender.take() } } } impl Drop for UdpWatcher { fn drop(&mut self) { // Send ourselves home to close this handle (blocking while doing so). let _m = self.fire_homing_missile(); if self.refcount.decrement() { self.close(); } } } //////////////////////////////////////////////////////////////////////////////// // Shutdown helper //////////////////////////////////////////////////////////////////////////////// pub fn shutdown(handle: *uvll::uv_stream_t, loop_: &Loop) -> Result<(), IoError> { struct Ctx { slot: Option<BlockedTask>, status: c_int, } let mut req = Request::new(uvll::UV_SHUTDOWN); return match unsafe { uvll::uv_shutdown(req.handle, handle, shutdown_cb) } { 0 => { req.defuse(); // uv callback now owns this request let mut cx = Ctx { slot: None, status: 0 }; wait_until_woken_after(&mut cx.slot, loop_, || { req.set_data(&cx); }); status_to_io_result(cx.status) } n => Err(uv_error_to_io_error(UvError(n))) }; extern fn shutdown_cb(req: *uvll::uv_shutdown_t, status: libc::c_int) { let req = Request::wrap(req); assert!(status != uvll::ECANCELED); let cx: &mut Ctx = unsafe { req.get_data() }; cx.status = status; wakeup(&mut cx.slot); } } #[cfg(test)] mod test { use std::rt::rtio::{RtioTcpStream, RtioTcpListener, RtioTcpAcceptor, RtioUdpSocket}; use std::io::test::{next_test_ip4, next_test_ip6}; use super::{UdpWatcher, TcpWatcher, TcpListener}; use super::super::local_loop; #[test] fn connect_close_ip4() { match TcpWatcher::connect(local_loop(), next_test_ip4(), None) { Ok(..) => fail!(), Err(e) => assert_eq!(e.name(), "ECONNREFUSED".to_strbuf()), } } #[test] fn connect_close_ip6() { match TcpWatcher::connect(local_loop(), next_test_ip6(), None) { Ok(..) => fail!(), Err(e) => assert_eq!(e.name(), "ECONNREFUSED".to_strbuf()), } } #[test] fn udp_bind_close_ip4() { match UdpWatcher::bind(local_loop(), next_test_ip4()) { Ok(..) => {} Err(..) => fail!() } } #[test] fn udp_bind_close_ip6() { match UdpWatcher::bind(local_loop(), next_test_ip6()) { Ok(..) => {} Err(..) => fail!() } } #[test] fn listen_ip4() { let (tx, rx) = channel(); let addr = next_test_ip4(); spawn(proc() { let w = match TcpListener::bind(local_loop(), addr) { Ok(w) => w, Err(e) => fail!("{:?}", e) }; let mut w = match w.listen() { Ok(w) => w, Err(e) => fail!("{:?}", e), }; tx.send(()); match w.accept() { Ok(mut stream) => { let mut buf = [0u8, ..10]; match stream.read(buf) { Ok(10) => {} e => fail!("{:?}", e), } for i in range(0, 10u8) { assert_eq!(buf[i as uint], i + 1); } } Err(e) => fail!("{:?}", e) } }); rx.recv(); let mut w = match TcpWatcher::connect(local_loop(), addr, None) { Ok(w) => w, Err(e) => fail!("{:?}", e) }; match w.write([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) { Ok(()) => {}, Err(e) => fail!("{:?}", e) } } #[test] fn listen_ip6() { let (tx, rx) = channel(); let addr = next_test_ip6(); spawn(proc() { let w = match TcpListener::bind(local_loop(), addr) { Ok(w) => w, Err(e) => fail!("{:?}", e) }; let mut w = match w.listen() { Ok(w) => w, Err(e) => fail!("{:?}", e), }; tx.send(()); match w.accept() { Ok(mut stream) => { let mut buf = [0u8, ..10]; match stream.read(buf) { Ok(10) => {} e => fail!("{:?}", e), } for i in range(0, 10u8) { assert_eq!(buf[i as uint], i + 1); } } Err(e) => fail!("{:?}", e) } }); rx.recv(); let mut w = match TcpWatcher::connect(local_loop(), addr, None) { Ok(w) => w, Err(e) => fail!("{:?}", e) }; match w.write([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) { Ok(()) => {}, Err(e) => fail!("{:?}", e) } } #[test] fn udp_recv_ip4() { let (tx, rx) = channel(); let client = next_test_ip4(); let server = next_test_ip4(); spawn(proc() { match UdpWatcher::bind(local_loop(), server) { Ok(mut w) => { tx.send(()); let mut buf = [0u8, ..10]; match w.recvfrom(buf) { Ok((10, addr)) => assert_eq!(addr, client), e => fail!("{:?}", e), } for i in range(0, 10u8) { assert_eq!(buf[i as uint], i + 1); } } Err(e) => fail!("{:?}", e) } }); rx.recv(); let mut w = match UdpWatcher::bind(local_loop(), client) { Ok(w) => w, Err(e) => fail!("{:?}", e) }; match w.sendto([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], server) { Ok(()) => {}, Err(e) => fail!("{:?}", e) } } #[test] fn udp_recv_ip6() { let (tx, rx) = channel(); let client = next_test_ip6(); let server = next_test_ip6(); spawn(proc() { match UdpWatcher::bind(local_loop(), server) { Ok(mut w) => { tx.send(()); let mut buf = [0u8, ..10]; match w.recvfrom(buf) { Ok((10, addr)) => assert_eq!(addr, client), e => fail!("{:?}", e), } for i in range(0, 10u8) { assert_eq!(buf[i as uint], i + 1); } } Err(e) => fail!("{:?}", e) } }); rx.recv(); let mut w = match UdpWatcher::bind(local_loop(), client) { Ok(w) => w, Err(e) => fail!("{:?}", e) }; match w.sendto([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], server) { Ok(()) => {}, Err(e) => fail!("{:?}", e) } } #[test] fn test_read_read_read() { let addr = next_test_ip4(); static MAX: uint = 5000; let (tx, rx) = channel(); spawn(proc() { let listener = TcpListener::bind(local_loop(), addr).unwrap(); let mut acceptor = listener.listen().unwrap(); tx.send(()); let mut stream = acceptor.accept().unwrap(); let buf = [1, .. 2048]; let mut total_bytes_written = 0; while total_bytes_written < MAX { assert!(stream.write(buf).is_ok()); uvdebug!("wrote bytes"); total_bytes_written += buf.len(); } }); rx.recv(); let mut stream = TcpWatcher::connect(local_loop(), addr, None).unwrap(); let mut buf = [0, .. 2048]; let mut total_bytes_read = 0; while total_bytes_read < MAX { let nread = stream.read(buf).unwrap(); total_bytes_read += nread; for i in range(0u, nread) { assert_eq!(buf[i], 1); } } uvdebug!("read {} bytes total", total_bytes_read); } #[test] #[ignore(cfg(windows))] // FIXME(#10102) server never sees second packet fn test_udp_twice() { let server_addr = next_test_ip4(); let client_addr = next_test_ip4(); let (tx, rx) = channel(); spawn(proc() { let mut client = UdpWatcher::bind(local_loop(), client_addr).unwrap(); rx.recv(); assert!(client.sendto([1], server_addr).is_ok()); assert!(client.sendto([2], server_addr).is_ok()); }); let mut server = UdpWatcher::bind(local_loop(), server_addr).unwrap(); tx.send(()); let mut buf1 = [0]; let mut buf2 = [0]; let (nread1, src1) = server.recvfrom(buf1).unwrap(); let (nread2, src2) = server.recvfrom(buf2).unwrap(); assert_eq!(nread1, 1); assert_eq!(nread2, 1); assert_eq!(src1, client_addr); assert_eq!(src2, client_addr); assert_eq!(buf1[0], 1); assert_eq!(buf2[0], 2); } #[test] fn test_udp_many_read() { let server_out_addr = next_test_ip4(); let server_in_addr = next_test_ip4(); let client_out_addr = next_test_ip4(); let client_in_addr = next_test_ip4(); static MAX: uint = 500_000; let (tx1, rx1) = channel::<()>(); let (tx2, rx2) = channel::<()>(); spawn(proc() { let l = local_loop(); let mut server_out = UdpWatcher::bind(l, server_out_addr).unwrap(); let mut server_in = UdpWatcher::bind(l, server_in_addr).unwrap(); let (tx, rx) = (tx2, rx1); tx.send(()); rx.recv(); let msg = [1, .. 2048]; let mut total_bytes_sent = 0; let mut buf = [1]; while buf[0] == 1 { // send more data assert!(server_out.sendto(msg, client_in_addr).is_ok()); total_bytes_sent += msg.len(); // check if the client has received enough let res = server_in.recvfrom(buf); assert!(res.is_ok()); let (nread, src) = res.unwrap(); assert_eq!(nread, 1); assert_eq!(src, client_out_addr); } assert!(total_bytes_sent >= MAX); }); let l = local_loop(); let mut client_out = UdpWatcher::bind(l, client_out_addr).unwrap(); let mut client_in = UdpWatcher::bind(l, client_in_addr).unwrap(); let (tx, rx) = (tx1, rx2); rx.recv(); tx.send(()); let mut total_bytes_recv = 0; let mut buf = [0, .. 2048]; while total_bytes_recv < MAX { // ask for more assert!(client_out.sendto([1], server_in_addr).is_ok()); // wait for data let res = client_in.recvfrom(buf); assert!(res.is_ok()); let (nread, src) = res.unwrap(); assert_eq!(src, server_out_addr); total_bytes_recv += nread; for i in range(0u, nread) { assert_eq!(buf[i], 1); } } // tell the server we're done assert!(client_out.sendto([0], server_in_addr).is_ok()); } #[test] fn test_read_and_block() { let addr = next_test_ip4(); let (tx, rx) = channel::<Receiver<()>>(); spawn(proc() { let rx = rx.recv(); let mut stream = TcpWatcher::connect(local_loop(), addr, None).unwrap(); stream.write([0, 1, 2, 3, 4, 5, 6, 7]).unwrap(); stream.write([0, 1, 2, 3, 4, 5, 6, 7]).unwrap(); rx.recv(); stream.write([0, 1, 2, 3, 4, 5, 6, 7]).unwrap(); stream.write([0, 1, 2, 3, 4, 5, 6, 7]).unwrap(); rx.recv(); }); let listener = TcpListener::bind(local_loop(), addr).unwrap(); let mut acceptor = listener.listen().unwrap(); let (tx2, rx2) = channel(); tx.send(rx2); let mut stream = acceptor.accept().unwrap(); let mut buf = [0, .. 2048]; let expected = 32; let mut current = 0; let mut reads = 0; while current < expected { let nread = stream.read(buf).unwrap(); for i in range(0u, nread) { let val = buf[i] as uint; assert_eq!(val, current % 8); current += 1; } reads += 1; let _ = tx2.send_opt(()); } // Make sure we had multiple reads assert!(reads > 1); } #[test] fn test_simple_tcp_server_and_client_on_diff_threads() { let addr = next_test_ip4(); spawn(proc() { let listener = TcpListener::bind(local_loop(), addr).unwrap(); let mut acceptor = listener.listen().unwrap(); let mut stream = acceptor.accept().unwrap(); let mut buf = [0, .. 2048]; let nread = stream.read(buf).unwrap(); assert_eq!(nread, 8); for i in range(0u, nread) { assert_eq!(buf[i], i as u8); } }); let mut stream = TcpWatcher::connect(local_loop(), addr, None); while stream.is_err() { stream = TcpWatcher::connect(local_loop(), addr, None); } stream.unwrap().write([0, 1, 2, 3, 4, 5, 6, 7]).unwrap(); } #[should_fail] #[test] fn tcp_listener_fail_cleanup() { let addr = next_test_ip4(); let w = TcpListener::bind(local_loop(), addr).unwrap(); let _w = w.listen().unwrap(); fail!(); } #[should_fail] #[test] fn tcp_stream_fail_cleanup() { let (tx, rx) = channel(); let addr = next_test_ip4(); spawn(proc() { let w = TcpListener::bind(local_loop(), addr).unwrap(); let mut w = w.listen().unwrap(); tx.send(()); drop(w.accept().unwrap()); }); rx.recv(); let _w = TcpWatcher::connect(local_loop(), addr, None).unwrap(); fail!(); } #[should_fail] #[test] fn udp_listener_fail_cleanup() { let addr = next_test_ip4(); let _w = UdpWatcher::bind(local_loop(), addr).unwrap(); fail!(); } #[should_fail] #[test] fn udp_fail_other_task() { let addr = next_test_ip4(); let (tx, rx) = channel(); // force the handle to be created on a different scheduler, failure in // the original task will force a homing operation back to this // scheduler. spawn(proc() { let w = UdpWatcher::bind(local_loop(), addr).unwrap(); tx.send(w); }); let _w = rx.recv(); fail!(); } }<|fim▁end|>
let cx = uvll::get_data_for_uv_handle(handle); let cx = &mut *(cx as *mut UdpRecvCtx);
<|file_name|>gen.py<|end_file_name|><|fim▁begin|>from datetime import timedelta, date def daterange(start_date, end_date): for n in range(int ((end_date - start_date).days)): yield start_date + timedelta(n) start_date = date(2017, 9, 30)<|fim▁hole|> print './pos_runner.sh', single_date.strftime("%Y-%m-%d")<|fim▁end|>
end_date = date(2017, 10, 23) for single_date in daterange(start_date, end_date): print './neg_runner.sh', single_date.strftime("%Y-%m-%d")
<|file_name|>kernel_stats.hh<|end_file_name|><|fim▁begin|>/* * Copyright (c) 2004-2005 The Regents of The University of Michigan * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer; * redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution; * neither the name of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Authors: Gabe Black */ #ifndef __ARCH_SPARC_KERNEL_STATS_HH__ #define __ARCH_SPARC_KERNEL_STATS_HH__ #include <map> #include <stack> #include <string> #include <vector> <|fim▁hole|>namespace Kernel { class Statistics : public ::Kernel::Statistics { public: Statistics() : ::Kernel::Statistics() {} }; } // namespace AlphaISA::Kernel } // namespace AlphaISA #endif // __ARCH_SPARC_KERNEL_STATS_HH__<|fim▁end|>
#include "kern/kernel_stats.hh" namespace SparcISA {
<|file_name|>runtest.rs<|end_file_name|><|fim▁begin|>// Copyright 2012-2013 The Rust Project Developers. See the // COPYRIGHT file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use core::prelude::*; use common::mode_run_pass; use common::mode_run_fail; use common::mode_compile_fail; use common::mode_pretty; use common::config; use errors; use header::load_props; use header::TestProps; use procsrv; use util; use util::logv; use core::io::WriterUtil; use core::io; use core::os; use core::str; use core::uint; use core::vec; pub fn run(config: config, testfile: ~str) { if config.verbose { // We're going to be dumping a lot of info. Start on a new line. io::stdout().write_str(~"\n\n"); } let testfile = Path(testfile); debug!("running %s", testfile.to_str()); let props = load_props(&testfile); debug!("loaded props"); match config.mode { mode_compile_fail => run_cfail_test(config, props, &testfile), mode_run_fail => run_rfail_test(config, props, &testfile), mode_run_pass => run_rpass_test(config, props, &testfile), mode_pretty => run_pretty_test(config, props, &testfile), mode_debug_info => run_debuginfo_test(config, props, &testfile) } } fn run_cfail_test(config: config, props: TestProps, testfile: &Path) { let ProcRes = compile_test(config, props, testfile); if ProcRes.status == 0 { fatal_ProcRes(~"compile-fail test compiled successfully!", ProcRes); } check_correct_failure_status(ProcRes); let expected_errors = errors::load_errors(testfile); if !expected_errors.is_empty() { if !props.error_patterns.is_empty() { fatal(~"both error pattern and expected errors specified"); } check_expected_errors(expected_errors, testfile, ProcRes); } else { check_error_patterns(props, testfile, ProcRes); } } fn run_rfail_test(config: config, props: TestProps, testfile: &Path) { let ProcRes = if !config.jit { let ProcRes = compile_test(config, props, testfile); if ProcRes.status != 0 { fatal_ProcRes(~"compilation failed!", ProcRes); } exec_compiled_test(config, props, testfile) } else { jit_test(config, props, testfile) }; // The value our Makefile configures valgrind to return on failure static valgrind_err: int = 100; if ProcRes.status == valgrind_err { fatal_ProcRes(~"run-fail test isn't valgrind-clean!", ProcRes); } check_correct_failure_status(ProcRes); check_error_patterns(props, testfile, ProcRes); } fn check_correct_failure_status(ProcRes: ProcRes) { // The value the rust runtime returns on failure static rust_err: int = 101; if ProcRes.status != rust_err { fatal_ProcRes( fmt!("failure produced the wrong error code: %d", ProcRes.status), ProcRes); } } fn run_rpass_test(config: config, props: TestProps, testfile: &Path) { if !config.jit { let mut ProcRes = compile_test(config, props, testfile); if ProcRes.status != 0 { fatal_ProcRes(~"compilation failed!", ProcRes); } ProcRes = exec_compiled_test(config, props, testfile); if ProcRes.status != 0 { fatal_ProcRes(~"test run failed!", ProcRes); } } else { let mut ProcRes = jit_test(config, props, testfile); if ProcRes.status != 0 { fatal_ProcRes(~"jit failed!", ProcRes); } } } fn run_pretty_test(config: config, props: TestProps, testfile: &Path) { if props.pp_exact.is_some() { logv(config, ~"testing for exact pretty-printing"); } else { logv(config, ~"testing for converging pretty-printing"); } let rounds = match props.pp_exact { Some(_) => 1, None => 2 }; let mut srcs = ~[io::read_whole_file_str(testfile).get()]; let mut round = 0; while round < rounds { logv(config, fmt!("pretty-printing round %d", round)); let ProcRes = print_source(config, testfile, srcs[round]); if ProcRes.status != 0 { fatal_ProcRes(fmt!("pretty-printing failed in round %d", round), ProcRes); } srcs.push(ProcRes.stdout); round += 1; } let mut expected = match props.pp_exact { Some(file) => { let filepath = testfile.dir_path().push_rel(&file); io::read_whole_file_str(&filepath).get() } None => { srcs[vec::len(srcs) - 2u] } }; let mut actual = srcs[vec::len(srcs) - 1u]; if props.pp_exact.is_some() { // Now we have to care about line endings let cr = ~"\r"; actual = str::replace(actual, cr, ~""); expected = str::replace(expected, cr, ~""); } compare_source(expected, actual); // Finally, let's make sure it actually appears to remain valid code let ProcRes = typecheck_source(config, props, testfile, actual); if ProcRes.status != 0 { fatal_ProcRes(~"pretty-printed source does not typecheck", ProcRes); } return; fn print_source(config: config, testfile: &Path, src: ~str) -> ProcRes { compose_and_run(config, testfile, make_pp_args(config, testfile), ~[], config.compile_lib_path, Some(src)) } fn make_pp_args(config: config, _testfile: &Path) -> ProcArgs { let prog = config.rustc_path; let args = ~[~"-", ~"--pretty", ~"normal"]; return ProcArgs {prog: prog.to_str(), args: args}; } fn compare_source(expected: ~str, actual: ~str) { if expected != actual { error(~"pretty-printed source does not match expected source"); let msg = fmt!("\n\ expected:\n\ ------------------------------------------\n\ %s\n\ ------------------------------------------\n\ actual:\n\ ------------------------------------------\n\ %s\n\ ------------------------------------------\n\ \n", expected, actual); io::stdout().write_str(msg); fail!(); } } fn typecheck_source(config: config, props: TestProps, testfile: &Path, src: ~str) -> ProcRes { compose_and_run_compiler( config, props, testfile, make_typecheck_args(config, props, testfile), Some(src)) } fn make_typecheck_args(config: config, props: TestProps, testfile: &Path) -> ProcArgs { let prog = config.rustc_path; let mut args = ~[~"-", ~"--no-trans", ~"--lib", ~"-L", config.build_base.to_str(), ~"-L", aux_output_dir_name(config, testfile).to_str()]; args += split_maybe_args(config.rustcflags); args += split_maybe_args(props.compile_flags); return ProcArgs {prog: prog.to_str(), args: args}; } } fn run_debuginfo_test(config: config, props: TestProps, testfile: &Path) { // do not optimize debuginfo tests let config = match config.rustcflags { Some(flags) => config { rustcflags: Some(str::replace(flags, ~"-O", ~"")), .. config }, None => config }; // compile test file (it shoud have 'compile-flags:-g' in the header) let mut ProcRes = compile_test(config, props, testfile); if ProcRes.status != 0 { fatal_ProcRes(~"compilation failed!", ProcRes); } // write debugger script let script_str = str::append(str::connect(props.debugger_cmds, "\n"), ~"\nquit\n"); debug!("script_str = %s", script_str); dump_output_file(config, testfile, script_str, ~"debugger.script"); // run debugger script with gdb #[cfg(windows)] fn debugger() -> ~str { ~"gdb.exe" } #[cfg(unix)] fn debugger() -> ~str { ~"gdb" } let debugger_script = make_out_name(config, testfile, ~"debugger.script"); let debugger_opts = ~[~"-quiet", ~"-batch", ~"-nx", ~"-command=" + debugger_script.to_str(), make_exe_name(config, testfile).to_str()]; let ProcArgs = ProcArgs {prog: debugger(), args: debugger_opts}; ProcRes = compose_and_run(config, testfile, ProcArgs, ~[], ~"", None); if ProcRes.status != 0 { fatal(~"gdb failed to execute"); } let num_check_lines = vec::len(props.check_lines); if num_check_lines > 0 { // check if each line in props.check_lines appears in the // output (in order) let mut i = 0u; for str::each_line(ProcRes.stdout) |line| { if props.check_lines[i].trim() == line.trim() { i += 1u; } if i == num_check_lines { // all lines checked break; } } if i != num_check_lines { fatal_ProcRes(fmt!("line not found in debugger output: %s" props.check_lines[i]), ProcRes); } } } fn check_error_patterns(props: TestProps, testfile: &Path, ProcRes: ProcRes) { if vec::is_empty(props.error_patterns) { fatal(~"no error pattern specified in " + testfile.to_str()); } if ProcRes.status == 0 { fatal(~"process did not return an error status"); } let mut next_err_idx = 0u; let mut next_err_pat = props.error_patterns[next_err_idx]; let mut done = false; for str::each_line(ProcRes.stderr) |line| { if str::contains(line, next_err_pat) { debug!("found error pattern %s", next_err_pat); next_err_idx += 1u; if next_err_idx == vec::len(props.error_patterns) { debug!("found all error patterns"); done = true; break; } next_err_pat = props.error_patterns[next_err_idx]; } } if done { return; } let missing_patterns = vec::slice(props.error_patterns, next_err_idx, vec::len(props.error_patterns)); if vec::len(missing_patterns) == 1u { fatal_ProcRes(fmt!("error pattern '%s' not found!", missing_patterns[0]), ProcRes); } else { for missing_patterns.each |pattern| { error(fmt!("error pattern '%s' not found!", *pattern)); } fatal_ProcRes(~"multiple error patterns not found", ProcRes); } } fn check_expected_errors(expected_errors: ~[errors::ExpectedError], testfile: &Path, ProcRes: ProcRes) { // true if we found the error in question let mut found_flags = vec::from_elem( vec::len(expected_errors), false); if ProcRes.status == 0 { fatal(~"process did not return an error status"); } let prefixes = vec::map(expected_errors, |ee| { fmt!("%s:%u:", testfile.to_str(), ee.line) }); // Scan and extract our error/warning messages, // which look like: // filename:line1:col1: line2:col2: *error:* msg // filename:line1:col1: line2:col2: *warning:* msg // where line1:col1: is the starting point, line2:col2: // is the ending point, and * represents ANSI color codes. for str::each_line(ProcRes.stderr) |line| { let mut was_expected = false; for vec::eachi(expected_errors) |i, ee| { if !found_flags[i] { debug!("prefix=%s ee.kind=%s ee.msg=%s line=%s", prefixes[i], ee.kind, ee.msg, line); if (str::starts_with(line, prefixes[i]) && str::contains(line, ee.kind) && str::contains(line, ee.msg)) { found_flags[i] = true; was_expected = true; break; } } } // ignore this msg which gets printed at the end if str::contains(line, ~"aborting due to") { was_expected = true; } if !was_expected && is_compiler_error_or_warning(str::from_slice(line)) { fatal_ProcRes(fmt!("unexpected compiler error or warning: '%s'", line), ProcRes); } } for uint::range(0u, vec::len(found_flags)) |i| { if !found_flags[i] { let ee = expected_errors[i]; fatal_ProcRes(fmt!("expected %s on line %u not found: %s", ee.kind, ee.line, ee.msg), ProcRes); } } } fn is_compiler_error_or_warning(line: ~str) -> bool { let mut i = 0u; return scan_until_char(line, ':', &mut i) && scan_char(line, ':', &mut i) && scan_integer(line, &mut i) && scan_char(line, ':', &mut i) && scan_integer(line, &mut i) && scan_char(line, ':', &mut i) && scan_char(line, ' ', &mut i) && scan_integer(line, &mut i) && scan_char(line, ':', &mut i) && scan_integer(line, &mut i) && scan_char(line, ' ', &mut i) && (scan_string(line, ~"error", &mut i) || scan_string(line, ~"warning", &mut i)); } fn scan_until_char(haystack: ~str, needle: char, idx: &mut uint) -> bool { if *idx >= haystack.len() { return false; } let opt = str::find_char_from(haystack, needle, *idx); if opt.is_none() { return false; } *idx = opt.get(); return true; } fn scan_char(haystack: ~str, needle: char, idx: &mut uint) -> bool { if *idx >= haystack.len() { return false; } let range = str::char_range_at(haystack, *idx); if range.ch != needle { return false; } *idx = range.next; return true; } fn scan_integer(haystack: ~str, idx: &mut uint) -> bool { let mut i = *idx; while i < haystack.len() { let range = str::char_range_at(haystack, i); if range.ch < '0' || '9' < range.ch { break; } i = range.next; } if i == *idx { return false; } *idx = i; return true; } fn scan_string(haystack: ~str, needle: ~str, idx: &mut uint) -> bool { let mut haystack_i = *idx; let mut needle_i = 0u; while needle_i < needle.len() { if haystack_i >= haystack.len() { return false; } let range = str::char_range_at(haystack, haystack_i); haystack_i = range.next; if !scan_char(needle, range.ch, &mut needle_i) { return false; } } *idx = haystack_i; return true; } struct ProcArgs {prog: ~str, args: ~[~str]} struct ProcRes {status: int, stdout: ~str, stderr: ~str, cmdline: ~str} fn compile_test(config: config, props: TestProps, testfile: &Path) -> ProcRes { compile_test_(config, props, testfile, []) } fn jit_test(config: config, props: TestProps, testfile: &Path) -> ProcRes { compile_test_(config, props, testfile, [~"--jit"]) } fn compile_test_(config: config, props: TestProps, testfile: &Path, extra_args: &[~str]) -> ProcRes { let link_args = ~[~"-L", aux_output_dir_name(config, testfile).to_str()]; compose_and_run_compiler( config, props, testfile, make_compile_args(config, props, link_args + extra_args, make_exe_name, testfile), None) } fn exec_compiled_test(config: config, props: TestProps, testfile: &Path) -> ProcRes { // If testing the new runtime then set the RUST_NEWRT env var let env = if config.newrt { props.exec_env + ~[(~"RUST_NEWRT", ~"1")] } else { props.exec_env }; compose_and_run(config, testfile, make_run_args(config, props, testfile), env, config.run_lib_path, None) } fn compose_and_run_compiler( config: config, props: TestProps, testfile: &Path, args: ProcArgs, input: Option<~str>) -> ProcRes { if !props.aux_builds.is_empty() { ensure_dir(&aux_output_dir_name(config, testfile)); } let extra_link_args = ~[~"-L", aux_output_dir_name(config, testfile).to_str()]; for vec::each(props.aux_builds) |rel_ab| { let abs_ab = config.aux_base.push_rel(&Path(*rel_ab)); let aux_args = make_compile_args(config, props, ~[~"--lib"] + extra_link_args, |a,b| make_lib_name(a, b, testfile), &abs_ab); let auxres = compose_and_run(config, &abs_ab, aux_args, ~[], config.compile_lib_path, None); if auxres.status != 0 { fatal_ProcRes( fmt!("auxiliary build of %s failed to compile: ", abs_ab.to_str()), auxres); } } compose_and_run(config, testfile, args, ~[], config.compile_lib_path, input) } fn ensure_dir(path: &Path) { if os::path_is_dir(path) { return; } if !os::make_dir(path, 0x1c0i32) { fail!(fmt!("can't make dir %s", path.to_str())); } } fn compose_and_run(config: config, testfile: &Path, ProcArgs: ProcArgs, procenv: ~[(~str, ~str)], lib_path: ~str, input: Option<~str>) -> ProcRes { return program_output(config, testfile, lib_path, ProcArgs.prog, ProcArgs.args, procenv, input); } fn make_compile_args(config: config, props: TestProps, extras: ~[~str], xform: &fn(config, (&Path)) -> Path, testfile: &Path) -> ProcArgs { let prog = config.rustc_path; let mut args = ~[testfile.to_str(), ~"-o", xform(config, testfile).to_str(), ~"-L", config.build_base.to_str()] + extras;<|fim▁hole|>} fn make_lib_name(config: config, auxfile: &Path, testfile: &Path) -> Path { // what we return here is not particularly important, as it // happens; rustc ignores everything except for the directory. let auxname = output_testname(auxfile); aux_output_dir_name(config, testfile).push_rel(&auxname) } fn make_exe_name(config: config, testfile: &Path) -> Path { Path(output_base_name(config, testfile).to_str() + str::from_slice(os::EXE_SUFFIX)) } fn make_run_args(config: config, _props: TestProps, testfile: &Path) -> ProcArgs { let toolargs = { // If we've got another tool to run under (valgrind), // then split apart its command let runtool = match config.runtool { Some(s) => Some(s), None => None }; split_maybe_args(runtool) }; let args = toolargs + ~[make_exe_name(config, testfile).to_str()]; return ProcArgs {prog: args[0], args: vec::slice(args, 1, args.len()).to_vec()}; } fn split_maybe_args(argstr: Option<~str>) -> ~[~str] { fn rm_whitespace(v: ~[~str]) -> ~[~str] { v.filtered(|s| !str::is_whitespace(*s)) } match argstr { Some(s) => { let mut ss = ~[]; for str::each_split_char(s, ' ') |s| { ss.push(s.to_owned()) } rm_whitespace(ss) } None => ~[] } } fn program_output(config: config, testfile: &Path, lib_path: ~str, prog: ~str, args: ~[~str], env: ~[(~str, ~str)], input: Option<~str>) -> ProcRes { let cmdline = { let cmdline = make_cmdline(lib_path, prog, args); logv(config, fmt!("executing %s", cmdline)); cmdline }; let res = procsrv::run(lib_path, prog, args, env, input); dump_output(config, testfile, res.out, res.err); return ProcRes {status: res.status, stdout: res.out, stderr: res.err, cmdline: cmdline}; } // Linux and mac don't require adjusting the library search path #[cfg(target_os = "linux")] #[cfg(target_os = "macos")] #[cfg(target_os = "freebsd")] fn make_cmdline(_libpath: ~str, prog: ~str, args: ~[~str]) -> ~str { fmt!("%s %s", prog, str::connect(args, ~" ")) } #[cfg(target_os = "win32")] fn make_cmdline(libpath: ~str, prog: ~str, args: ~[~str]) -> ~str { fmt!("%s %s %s", lib_path_cmd_prefix(libpath), prog, str::connect(args, ~" ")) } // Build the LD_LIBRARY_PATH variable as it would be seen on the command line // for diagnostic purposes fn lib_path_cmd_prefix(path: ~str) -> ~str { fmt!("%s=\"%s\"", util::lib_path_env_var(), util::make_new_path(path)) } fn dump_output(config: config, testfile: &Path, out: ~str, err: ~str) { dump_output_file(config, testfile, out, ~"out"); dump_output_file(config, testfile, err, ~"err"); maybe_dump_to_stdout(config, out, err); } fn dump_output_file(config: config, testfile: &Path, out: ~str, extension: ~str) { let outfile = make_out_name(config, testfile, extension); let writer = io::file_writer(&outfile, ~[io::Create, io::Truncate]).get(); writer.write_str(out); } fn make_out_name(config: config, testfile: &Path, extension: ~str) -> Path { output_base_name(config, testfile).with_filetype(extension) } fn aux_output_dir_name(config: config, testfile: &Path) -> Path { output_base_name(config, testfile).with_filetype("libaux") } fn output_testname(testfile: &Path) -> Path { Path(testfile.filestem().get()) } fn output_base_name(config: config, testfile: &Path) -> Path { config.build_base .push_rel(&output_testname(testfile)) .with_filetype(config.stage_id) } fn maybe_dump_to_stdout(config: config, out: ~str, err: ~str) { if config.verbose { let sep1 = fmt!("------%s------------------------------", ~"stdout"); let sep2 = fmt!("------%s------------------------------", ~"stderr"); let sep3 = ~"------------------------------------------"; io::stdout().write_line(sep1); io::stdout().write_line(out); io::stdout().write_line(sep2); io::stdout().write_line(err); io::stdout().write_line(sep3); } } fn error(err: ~str) { io::stdout().write_line(fmt!("\nerror: %s", err)); } fn fatal(err: ~str) -> ! { error(err); fail!(); } fn fatal_ProcRes(err: ~str, ProcRes: ProcRes) -> ! { let msg = fmt!("\n\ error: %s\n\ command: %s\n\ stdout:\n\ ------------------------------------------\n\ %s\n\ ------------------------------------------\n\ stderr:\n\ ------------------------------------------\n\ %s\n\ ------------------------------------------\n\ \n", err, ProcRes.cmdline, ProcRes.stdout, ProcRes.stderr); io::stdout().write_str(msg); fail!(); }<|fim▁end|>
args += split_maybe_args(config.rustcflags); args += split_maybe_args(props.compile_flags); return ProcArgs {prog: prog.to_str(), args: args};
<|file_name|>googlecode_upload.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # # !!!!!!!!! WARNING !!!!!!!!!!!!!!! # This Script was bastardized To Read Password From /home/bspaans/.googlecode # # # # Copyright 2006, 2007 Google Inc. All Rights Reserved. # Author: [email protected] (David Anderson) # # Script for uploading files to a Google Code project. # # This is intended to be both a useful script for people who want to # streamline project uploads and a reference implementation for # uploading files to Google Code projects. # # To upload a file to Google Code, you need to provide a path to the # file on your local machine, a small summary of what the file is, a # project name, and a valid account that is a member or owner of that # project. You can optionally provide a list of labels that apply to # the file. The file will be uploaded under the same name that it has # in your local filesystem (that is, the "basename" or last path # component). Run the script with '--help' to get the exact syntax # and available options. # # Note that the upload script requests that you enter your # googlecode.com password. This is NOT your Gmail account password! # This is the password you use on googlecode.com for committing to # Subversion and uploading files. You can find your password by going # to http://code.google.com/hosting/settings when logged in with your # Gmail account. If you have already committed to your project's # Subversion repository, the script will automatically retrieve your # credentials from there (unless disabled, see the output of '--help' # for details). # # If you are looking at this script as a reference for implementing # your own Google Code file uploader, then you should take a look at # the upload() function, which is the meat of the uploader. You # basically need to build a multipart/form-data POST request with the # right fields and send it to https://PROJECT.googlecode.com/files . # Authenticate the request using HTTP Basic authentication, as is # shown below. # # Licensed under the terms of the Apache Software License 2.0: # http://www.apache.org/licenses/LICENSE-2.0 # # Questions, comments, feature requests and patches are most welcome. # Please direct all of these to the Google Code users group: # http://groups.google.com/group/google-code-hosting """Google Code file uploader script. """ __author__ = '[email protected] (David Anderson)' import http.client import os.path import optparse import getpass import base64 import sys def get_svn_config_dir(): pass def get_svn_auth(project_name, config_dir): """Return (username, password) for project_name in config_dir. !!!!! CHANGED !!!!!!!!""" f = open("/home/bspaans/.googlecode", 'r') usr_data = f.read().split(":") f.close() return (usr_data[0], usr_data[1][:-1]) def upload(file, project_name, user_name, password, summary, labels=None): """Upload a file to a Google Code project's file server. Args: file: The local path to the file. project_name: The name of your project on Google Code. user_name: Your Google account name. password: The googlecode.com password for your account. Note that this is NOT your global Google Account password! summary: A small description for the file. labels: an optional list of label strings with which to tag the file. Returns: a tuple: http_status: 201 if the upload succeeded, something else if an error occured. http_reason: The human-readable string associated with http_status file_url: If the upload succeeded, the URL of the file on Google Code, None otherwise. """ # The login is the user part of [email protected]. If the login provided # is in the full user@domain form, strip it down. if user_name.endswith('@gmail.com'): user_name = user_name[:user_name.index('@gmail.com')] form_fields = [('summary', summary)] if labels is not None: form_fields.extend([('label', l.strip()) for l in labels]) content_type, body = encode_upload_request(form_fields, file) upload_host = '%s.googlecode.com' % project_name upload_uri = '/files' auth_token = base64.b64encode('%s:%s'% (user_name, password)) headers = { 'Authorization': 'Basic %s' % auth_token, 'User-Agent': 'Googlecode.com uploader v0.9.4', 'Content-Type': content_type, } server = http.client.HTTPSConnection(upload_host) server.request('POST', upload_uri, body, headers) resp = server.getresponse() server.close() if resp.status == 201: location = resp.getheader('Location', None) else: location = None return resp.status, resp.reason, location def encode_upload_request(fields, file_path): """Encode the given fields and file into a multipart form body. fields is a sequence of (name, value) pairs. file is the path of the file to upload. The file will be uploaded to Google Code with the same file name. Returns: (content_type, body) ready for httplib.HTTP instance """ BOUNDARY = '----------Googlecode_boundary_reindeer_flotilla' CRLF = '\r\n' body = [] # Add the metadata about the upload first for key, value in fields: body.extend( ['--' + BOUNDARY, 'Content-Disposition: form-data; name="%s"' % key, '', value, ]) # Now add the file itself file_name = os.path.basename(file_path) f = open(file_path, 'rb') file_content = f.read() f.close() body.extend( ['--' + BOUNDARY, 'Content-Disposition: form-data; name="filename"; filename="%s"' % file_name, # The upload server determines the mime-type, no need to set it. 'Content-Type: application/octet-stream', '', file_content, ]) # Finalize the form body body.extend(['--' + BOUNDARY + '--', '']) return 'multipart/form-data; boundary=%s' % BOUNDARY, CRLF.join(body) def upload_find_auth(file_path, project_name, summary, labels=None, config_dir=None, user_name=None, tries=1): """Find credentials and upload a file to a Google Code project's file server. file_path, project_name, summary, and labels are passed as-is to upload. If config_dir is None, try get_svn_config_dir(); if it is 'none', skip trying the Subversion configuration entirely. If user_name is not None, use it for the first attempt; prompt for subsequent attempts. Args: file_path: The local path to the file. project_name: The name of your project on Google Code. summary: A small description for the file. labels: an optional list of label strings with which to tag the file. config_dir: Path to Subversion configuration directory, 'none', or None. user_name: Your Google account name. tries: How many attempts to make. """ if config_dir != 'none':<|fim▁hole|> if config_dir is None: config_dir = get_svn_config_dir() (svn_username, password) = get_svn_auth(project_name, config_dir) if user_name is None: # If username was not supplied by caller, use svn config. user_name = svn_username else: # Just initialize password for the first try. password = None while tries > 0: if user_name is None: # Read username if not specified or loaded from svn config, or on # subsequent tries. sys.stdout.write('Please enter your googlecode.com username: ') sys.stdout.flush() user_name = sys.stdin.readline().rstrip() if password is None: # Read password if not loaded from svn config, or on subsequent tries. print('Please enter your googlecode.com password.') print('** Note that this is NOT your Gmail account password! **') print('It is the password you use to access Subversion repositories,') print('and can be found here: http://code.google.com/hosting/settings') password = getpass.getpass() status, reason, url = upload(file_path, project_name, user_name, password, summary, labels) # Returns 403 Forbidden instead of 401 Unauthorized for bad # credentials as of 2007-07-17. if status in [http.client.FORBIDDEN]: # Rest for another try. tries = tries - 1 else: # We're done. break return status, reason, url def main(): parser = optparse.OptionParser(usage='googlecode-upload.py -s SUMMARY ' '-p PROJECT [options] FILE') parser.add_option('--config-dir', dest='config_dir', metavar='DIR', help='read svn auth data from DIR' ' ("none" means not to use svn auth data)') parser.add_option('-s', '--summary', dest='summary', help='Short description of the file') parser.add_option('-p', '--project', dest='project', help='Google Code project name') parser.add_option('-u', '--user', dest='user', help='Your Google Code username') parser.add_option('-l', '--labels', dest='labels', help='An optional list of labels to attach to the file') options, args = parser.parse_args() if not options.summary: parser.error('File summary is missing.') elif not options.project: parser.error('Project name is missing.') elif len(args) < 1: parser.error('File to upload not provided.') elif len(args) > 1: parser.error('Only one file may be specified.') file_path = args[0] if options.labels: labels = options.labels.split(',') else: labels = None status, reason, url = upload_find_auth(file_path, options.project, options.summary, labels, options.config_dir, options.user) if url: print('The file was uploaded successfully.') print('URL: %s' % url) return 0 else: print('An error occurred. Your file was not uploaded.') print('Google Code upload server said: %s (%s)' % (reason, status)) return 1 if __name__ == '__main__': sys.exit(main())<|fim▁end|>
# Try to load username/password from svn config for first try.
<|file_name|>index.ts<|end_file_name|><|fim▁begin|>export { default as EthereumTx } from './EthereumTx'; export { default as Ethereum } from './Ethereum';<|fim▁hole|><|fim▁end|>
export { default as EthereumParams } from './EthereumParams';
<|file_name|>test_alive_users.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python #-*-*- encoding: utf-8 -*-*- # # Copyright (C) 2005 onwards University of Deusto # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. # # This software consists of contributions made by many individuals, # listed below: # # Author: Pablo Orduña <[email protected]> # from __future__ import print_function, unicode_literals import unittest import time import Queue import test.unit.configuration as configuration_module import test.unit.weblab.core.test_user_processor as UserProcessorTest import voodoo.configuration as ConfigurationManager import voodoo.sessions.manager as SessionManager import voodoo.sessions.session_type as SessionType import voodoo.sessions.session_id as SessionId import weblab.core.user_processor as UserProcessor import weblab.core.coordinator.store as TemporalInformationStore import weblab.core.alive_users as AliveUsersCollection class TimeModule(object): def __init__(self): self._next_value = time.time() def set(self, next_value): self._next_value = next_value def time(self): return self._next_value class DummyCoordinator(object): def is_post_reservation(self, reservation_id): return False class AliveUsersCollectionTestCase(unittest.TestCase): def setUp(self): cfg_manager = ConfigurationManager.ConfigurationManager() cfg_manager.append_module(configuration_module) commands_store = TemporalInformationStore.CommandsTemporalInformationStore() locator = UserProcessorTest.FakeLocator(None) self.session_mgr = SessionManager.SessionManager( cfg_manager, SessionType.Memory, "foo" ) coordinator = DummyCoordinator() self.finished_reservations_store = Queue.Queue() self.auc = AliveUsersCollection.AliveUsersCollection( locator, cfg_manager, SessionType.Memory, self.session_mgr, coordinator, commands_store, self.finished_reservations_store ) self.tm = TimeModule() self.auc._time_module = self.tm def test_add(self): session_id = SessionId.SessionId("my session") self.auc.add_user(session_id) self.auc.add_user(session_id) # no exception def test_remove(self): session_id = SessionId.SessionId("my session") self.auc.add_user(session_id) self.auc.remove_user(session_id) self.auc.remove_user(session_id) # No exception def create_session(self, timestamp): session_id = self.session_mgr.create_session() self.session_mgr.modify_session( session_id, { 'db_session_id' : 'whatever', 'session_polling' : ( timestamp, UserProcessor.UserProcessor.EXPIRATION_TIME_NOT_SET ) } ) return session_id def test_finished_sessions(self): session_id1 = self.create_session(self.tm.time()) session_id2 = self.create_session(self.tm.time()) self.auc.add_user(session_id1) self.auc.add_user(session_id2) expired_users = self.auc.check_expired_users() self.assertEquals(0, len(expired_users)) self.finished_reservations_store.put(session_id1) expired_users = self.auc.check_expired_users()<|fim▁hole|> def test_finished_sessions2(self): session_id1 = self.create_session(self.tm.time() - 3600) # expired session_id2 = self.create_session(self.tm.time()) # expired self.auc.add_user(session_id1) self.auc.add_user(session_id2) self.finished_reservations_store.put(session_id2) expired_users = self.auc.check_expired_users() self.assertEquals(2, len(expired_users)) self.assertEquals(session_id2, expired_users[0]) self.assertEquals(session_id1, expired_users[1]) expired_users = self.auc.check_expired_users() self.assertEquals(0, len(expired_users)) def test_three_sessions_one_expired(self): session_id1 = self.create_session(self.tm.time()) session_id2 = self.create_session(self.tm.time() - 3600) # expired session_id3 = self.create_session(self.tm.time()) self.auc.add_user(session_id1) self.auc.add_user(session_id2) self.auc.add_user(session_id3) expired_users = self.auc.check_expired_users() self.assertEquals(1, len(expired_users)) self.assertEquals(session_id2, expired_users[0]) expired_users = self.auc.check_expired_users() self.assertEquals(0, len(expired_users)) # Some time passes with same results self.tm.set(self.tm.time() + self.auc._min_time_between_checks + 1) expired_users = self.auc.check_expired_users() self.assertEquals(0, len(expired_users)) def test_three_sessions_one_expired_and_then_another_before_time_passes(self): session_id1 = self.create_session(self.tm.time()) session_id2 = self.create_session(self.tm.time() - 3600) # expired session_id3 = self.create_session(self.tm.time()) self.auc.add_user(session_id1) self.auc.add_user(session_id2) self.auc.add_user(session_id3) expired_users = self.auc.check_expired_users() self.assertEquals(1, len(expired_users)) self.assertEquals(session_id2, expired_users[0]) expired_users = self.auc.check_expired_users() self.assertEquals(0, len(expired_users)) session = self.session_mgr.get_session(session_id3) session['session_polling'] = ( self.tm.time() - 3600, # Expired UserProcessor.UserProcessor.EXPIRATION_TIME_NOT_SET ) self.session_mgr.modify_session( session_id3, session ) # Still it doesn't find it! expired_users = self.auc.check_expired_users() self.assertEquals(0, len(expired_users)) # Some time passes self.tm.set(self.tm.time() + self.auc._min_time_between_checks + 1) # And now it finds the new expired session expired_users = self.auc.check_expired_users() self.assertEquals(1, len(expired_users)) self.assertEquals(session_id3, expired_users[0]) def suite(): return unittest.makeSuite(AliveUsersCollectionTestCase) if __name__ == '__main__': unittest.main()<|fim▁end|>
self.assertEquals(1, len(expired_users)) self.assertEquals(session_id1, expired_users[0])
<|file_name|>index.d.ts<|end_file_name|><|fim▁begin|>// Type definitions for tcp-ping 0.1 // Project: https://github.com/wesolyromek/tcp-ping // Definitions by: JUNG YONG WOO <https://github.com/stegano> // rymate1234 <https://github.com/rymate1234> // Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped export interface Options { address?: string; port?: number; attempts?: number; timeout?: number;<|fim▁hole|> seq: number; time: number | undefined; err?: Error; } export interface Result { address: string; port: number; attempts: number; avg: number; max: number; min: number; results: Results[]; } export function ping(options: Options, callback: (error: Error, result: Result) => void): void; export function probe(address: string, port: number, callback: (error: Error, result: boolean) => void): void;<|fim▁end|>
} export interface Results {
<|file_name|>episode.ts<|end_file_name|><|fim▁begin|>'use strict'; import cheerio = require('cheerio'); import fs = require('fs'); import mkdirp = require('mkdirp'); import my_request = require('./my_request'); import path = require('path'); import subtitle from './subtitle/index'; import vlos from './vlos'; import video from './video/index'; import xml2js = require('xml2js'); import log = require('./log'); /** * Streams the episode to disk. */ export default function(config: IConfig, address: string, done: (err: Error, ign: boolean) => void) { scrapePage(config, address, (err, page) => { if (err) { return done(err, false); } if (page.media != null) { /* No player to scrape */ download(config, page, null, done); } else { /* The old way */ scrapePlayer(config, address, page.id, (errS, player) => { if (errS) { return done(errS, false); } download(config, page, player, done); }); } }); } /** * Completes a download and writes the message with an elapsed time. */ function complete(epName: string, message: string, begin: number, done: (err: Error, ign: boolean) => void) { const timeInMs = Date.now() - begin; const seconds = prefix(Math.floor(timeInMs / 1000) % 60, 2); const minutes = prefix(Math.floor(timeInMs / 1000 / 60) % 60, 2); const hours = prefix(Math.floor(timeInMs / 1000 / 60 / 60), 2); log.dispEpisode(epName, message + ' (' + hours + ':' + minutes + ':' + seconds + ')', true); done(null, false); } /** * Check if a file exist.. */ function fileExist(path: string) { try { fs.statSync(path); return true; } catch (e) { return false; } } function sanitiseFileName(str: string) { const sanitized = str.replace(/[\/':\?\*"<>\\\.\|]/g, '_'); return sanitized.replace(/{DIR_SEPARATOR}/g, '/'); } /** * Downloads the subtitle and video. */ function download(config: IConfig, page: IEpisodePage, player: IEpisodePlayer, done: (err: Error | string, ign: boolean) => void) { const serieFolder = sanitiseFileName(config.series || page.series); let fileName = sanitiseFileName(generateName(config, page)); let filePath = path.join(config.output || process.cwd(), serieFolder, fileName); if (fileExist(filePath + '.mkv')) { let count = 0; if (config.rebuildcrp) { log.warn('Adding \'' + fileName + '\' to the DB...'); return done(null, false); } log.warn('File \'' + fileName + '\' already exist...'); do { count = count + 1; fileName = sanitiseFileName(generateName(config, page, '-' + count)); filePath = path.join(config.output || process.cwd(), serieFolder, fileName); } while (fileExist(filePath + '.mkv')); log.warn('Renaming to \'' + fileName + '\'...'); page.filename = fileName; } if (config.rebuildcrp) { log.warn('Ignoring \'' + fileName + '\' as it does not exist...'); return done(null, true); } const ret = mkdirp(path.dirname(filePath)); if (ret) {<|fim▁hole|> { log.dispEpisode(fileName, 'Error...', true); return done(errDS, false); } const now = Date.now(); if ( ((page.media === null) && (player.video.file !== undefined)) || ((page.media !== null) /* Do they still create page in advance for unreleased episodes? */) ) { log.dispEpisode(fileName, 'Fetching video...', false); downloadVideo(config, page, player, filePath, (errDV) => { if (errDV) { log.dispEpisode(fileName, 'Error...', true); return done(errDV, false); } if (config.merge) { return complete(fileName, 'Finished!', now, done); } let isSubtitled = true; if (page.media === null) { isSubtitled = Boolean(player.subtitle); } else { if (page.media.subtitles.length === 0) { isSubtitled = false; } } let videoExt = '.mp4'; if ( (page.media === null) && (player.video.mode === 'RTMP')) { videoExt = path.extname(player.video.file); } log.dispEpisode(fileName, 'Merging...', false); video.merge(config, isSubtitled, videoExt, filePath, config.verbose, (errVM) => { if (errVM) { log.dispEpisode(fileName, 'Error...', true); return done(errVM, false); } complete(fileName, 'Finished!', now, done); }); }); } else { log.dispEpisode(fileName, 'Ignoring: not released yet', true); done(null, true); } }); } else { log.dispEpisode(fileName, 'Error creating folder \'' + filePath + '\'...', true); return done('Cannot create folder', false); } } /** * Saves the subtitles to disk. */ function downloadSubtitle(config: IConfig, page: IEpisodePage, player: IEpisodePlayer, filePath: string, done: (err?: Error | string) => void) { if (page.media !== null) { const subs = page.media.subtitles; if (subs.length === 0) { /* No downloadable subtitles */ console.warn('Can\'t find subtitle ?!'); return done(); } let i; let j; /* Find a proper subtitles */ for (j = 0; j < config.sublang.length; j++) { const reqSubLang = config.sublang[j]; for (i = 0; i < subs.length; i++) { const curSub = subs[i]; if (curSub.format === 'ass' && curSub.language === reqSubLang) { my_request.get(config, curSub.url, (err, result) => { if (err) { log.error('An error occured while fetching subtitles...'); return done(err); } fs.writeFile(filePath + '.ass', '\ufeff' + result, done); }); /* Break from the first loop */ j = config.sublang.length; break; } } } if (i >= subs.length) { done('Cannot find subtitles with requested language(s)'); } } else { const enc = player.subtitle; if (!enc) { return done(); } subtitle.decode(enc.id, enc.iv, enc.data, (errSD, data) => { if (errSD) { log.error('An error occured while getting subtitles...'); return done(errSD); } if (config.debug) { log.dumpToDebug('SubtitlesXML', data); } const formats = subtitle.formats; const format = formats[config.format] ? config.format : 'ass'; formats[format](config, data, (errF: Error, decodedSubtitle: string) => { if (errF) { return done(errF); } fs.writeFile(filePath + '.' + format, '\ufeff' + decodedSubtitle, done); }); }); } } /** * Streams the video to disk. */ function downloadVideo(config: IConfig, page: IEpisodePage, player: IEpisodePlayer, filePath: string, done: (err: any) => void) { if (player == null) { /* new way */ const streams = page.media.streams; let i; /* Find a proper subtitles */ for (i = 0; i < streams.length; i++) { if (streams[i].format === 'vo_adaptive_hls' && streams[i].audio_lang === 'jaJP' && streams[i].hardsub_lang === null) { video.stream('', streams[i].url, '', filePath, 'mp4', 'HLS', config.verbose, done); break; } } if (i >= streams.length) { done('Cannot find a valid stream'); } } else { /* Old way */ video.stream(player.video.host, player.video.file, page.swf, filePath, path.extname(player.video.file), player.video.mode, config.verbose, done); } } /** * Names the file based on the config, page, series and tag. */ function generateName(config: IConfig, page: IEpisodePage, extra = '') { const episodeNum = parseInt(page.episode, 10); const volumeNum = parseInt(page.volume, 10); const episode = (episodeNum < 10 ? '0' : '') + page.episode; const volume = (volumeNum < 10 ? '0' : '') + page.volume; const tag = config.tag || 'CrunchyRoll'; const series = config.series || page.series; return config.nametmpl .replace(/{EPISODE_ID}/g, page.id.toString()) .replace(/{EPISODE_NUMBER}/g, episode) .replace(/{SEASON_NUMBER}/g, volume) .replace(/{VOLUME_NUMBER}/g, volume) .replace(/{SEASON_TITLE}/g, page.season) .replace(/{VOLUME_TITLE}/g, page.season) .replace(/{SERIES_TITLE}/g, series) .replace(/{EPISODE_TITLE}/g, page.title) .replace(/{TAG}/g, tag) + extra; } /** * Prefixes a value. */ function prefix(value: number|string, length: number) { let valueString = (typeof value !== 'string') ? String(value) : value; while (valueString.length < length) { valueString = '0' + valueString; } return valueString; } /** * Requests the page data and scrapes the id, episode, series and swf. */ function scrapePage(config: IConfig, address: string, done: (err: Error, page?: IEpisodePage) => void) { const epId = parseInt((address.match(/[0-9]+$/) || ['0'])[0], 10); if (!epId) { return done(new Error('Invalid address.')); } my_request.get(config, address, (err, result) => { if (err) { return done(err); } const $ = cheerio.load(result); /* First check if we have the new player */ const vlosScript = $('#vilos-iframe-container'); if (vlosScript) { const pageMetadata = JSON.parse($('script[type="application/ld+json"]')[0].children[0].data); const divScript = $('div[id="showmedia_video_box_wide"]'); const scripts = divScript.find('script').toArray(); const script = scripts[2].children[0].data; let seasonNumber = '1'; let seasonTitle = ''; if (pageMetadata.partOfSeason) { seasonNumber = pageMetadata.partOfSeason.seasonNumber; if (seasonNumber === '0') { seasonNumber = '1'; } seasonTitle = pageMetadata.partOfSeason.name; } done(null, vlos.getMedia(script, seasonTitle, seasonNumber)); } else { /* Use the old way */ const swf = /^([^?]+)/.exec($('link[rel=video_src]').attr('href')); const regexp = /\s*([^\n\r\t\f]+)\n?\s*[^0-9]*([0-9][\-0-9.]*)?,?\n?\s\s*[^0-9]*((PV )?[S0-9][P0-9.]*[a-fA-F]?)/; const seasonTitle = $('span[itemprop="title"]').text(); const look = $('#showmedia_about_media').text(); const episodeTitle = $('#showmedia_about_name').text().replace(/[“”]/g, ''); const data = regexp.exec(look); if (config.debug) { log.dumpToDebug('episode page', $.html()); } if (!swf || !data) { log.warn('Somethig unexpected in the page at ' + address + ' (data are: ' + look + ')'); log.warn('Setting Season to ’0’ and episode to ’0’...'); if (config.debug) { log.dumpToDebug('episode unexpected', look); } done(null, { episode: '0', id: epId, series: seasonTitle, season: seasonTitle, title: episodeTitle, swf: swf[1], volume: '0', filename: '', media: null, }); } else { done(null, { episode: data[3], id: epId, series: data[1], season: seasonTitle, title: episodeTitle, swf: swf[1], volume: data[2] || '1', filename: '', media: null, }); } } }); } /** * Requests the player data and scrapes the subtitle and video data. */ function scrapePlayer(config: IConfig, address: string, id: number, done: (err: Error, player?: IEpisodePlayer) => void) { const url = address.match(/^(https?:\/\/[^\/]+)/); if (!url) { return done(new Error('Invalid address.')); } const postForm = { current_page: address, video_format: config.video_format, video_quality: config.video_quality, media_id: id }; my_request.post(config, url[1] + '/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=' + id, postForm, (err, result) => { if (err) { return done(err); } xml2js.parseString(result, { explicitArray: false, explicitRoot: false, }, (errPS: Error, player: IEpisodePlayerConfig) => { if (errPS) { return done(errPS); } try { const isSubtitled = Boolean(player['default:preload'].subtitle); let streamMode = 'RTMP'; if (player['default:preload'].stream_info.host === '') { streamMode = 'HLS'; } done(null, { subtitle: isSubtitled ? { data: player['default:preload'].subtitle.data, id: parseInt(player['default:preload'].subtitle.$.id, 10), iv: player['default:preload'].subtitle.iv, } : null, video: { file: player['default:preload'].stream_info.file, host: player['default:preload'].stream_info.host, mode: streamMode, }, }); } catch (parseError) { if (config.debug) { log.dumpToDebug('player scrape', parseError); } done(parseError); } }); }); }<|fim▁end|>
log.dispEpisode(fileName, 'Fetching...', false); downloadSubtitle(config, page, player, filePath, (errDS) => { if (errDS)
<|file_name|>TestTruncate.cpp<|end_file_name|><|fim▁begin|>/*- * See the file LICENSE for redistribution information. * * Copyright (c) 2000,2007 Oracle. All rights reserved. * * $Id: TestTruncate.cpp,v 12.5 2007/05/17 15:15:57 bostic Exp $ */ /* * Do some regression tests for constructors. * Run normally (without arguments) it is a simple regression test. * Run with a numeric argument, it repeats the regression a number * of times, to try to determine if there are memory leaks. */ #include <db_cxx.h><|fim▁hole|>{ try { Db *db = new Db(NULL, 0); db->open(NULL, "my.db", NULL, DB_BTREE, DB_CREATE, 0644); // populate our massive database. // all our strings include null for convenience. // Note we have to cast for idiomatic // usage, since newer gcc requires it. Dbt *keydbt = new Dbt((char*)"key", 4); Dbt *datadbt = new Dbt((char*)"data", 5); db->put(NULL, keydbt, datadbt, 0); // Now, retrieve. We could use keydbt over again, // but that wouldn't be typical in an application. Dbt *goodkeydbt = new Dbt((char*)"key", 4); Dbt *badkeydbt = new Dbt((char*)"badkey", 7); Dbt *resultdbt = new Dbt(); resultdbt->set_flags(DB_DBT_MALLOC); int ret; if ((ret = db->get(NULL, goodkeydbt, resultdbt, 0)) != 0) { cout << "get: " << DbEnv::strerror(ret) << "\n"; } else { char *result = (char *)resultdbt->get_data(); cout << "got data: " << result << "\n"; } if ((ret = db->get(NULL, badkeydbt, resultdbt, 0)) != 0) { // We expect this... cout << "get using bad key: " << DbEnv::strerror(ret) << "\n"; } else { char *result = (char *)resultdbt->get_data(); cout << "*** got data using bad key!!: " << result << "\n"; } // Now, truncate and make sure that it's really gone. cout << "truncating data...\n"; u_int32_t nrecords; db->truncate(NULL, &nrecords, 0); cout << "truncate returns " << nrecords << "\n"; if ((ret = db->get(NULL, goodkeydbt, resultdbt, 0)) != 0) { // We expect this... cout << "after truncate get: " << DbEnv::strerror(ret) << "\n"; } else { char *result = (char *)resultdbt->get_data(); cout << "got data: " << result << "\n"; } db->close(0); cout << "finished test\n"; } catch (DbException &dbe) { cerr << "Db Exception: " << dbe.what(); } return 0; }<|fim▁end|>
#include <iostream.h> int main(int argc, char *argv[])
<|file_name|>legacy.py<|end_file_name|><|fim▁begin|>from __future__ import absolute_import import logging import struct import six from six.moves import xrange import kafka.common import kafka.protocol.commit import kafka.protocol.fetch import kafka.protocol.message import kafka.protocol.metadata import kafka.protocol.offset import kafka.protocol.produce from kafka.codec import ( gzip_encode, gzip_decode, snappy_encode, snappy_decode ) from kafka.common import ( ProtocolError, ChecksumError, UnsupportedCodecError, ConsumerMetadataResponse ) from kafka.util import ( crc32, read_short_string, read_int_string, relative_unpack, write_short_string, write_int_string, group_by_topic_and_partition ) log = logging.getLogger(__name__) ATTRIBUTE_CODEC_MASK = 0x03 CODEC_NONE = 0x00 CODEC_GZIP = 0x01 CODEC_SNAPPY = 0x02 ALL_CODECS = (CODEC_NONE, CODEC_GZIP, CODEC_SNAPPY) class KafkaProtocol(object): """ Class to encapsulate all of the protocol encoding/decoding. This class does not have any state associated with it, it is purely for organization. """ PRODUCE_KEY = 0 FETCH_KEY = 1 OFFSET_KEY = 2 METADATA_KEY = 3 OFFSET_COMMIT_KEY = 8 OFFSET_FETCH_KEY = 9 CONSUMER_METADATA_KEY = 10 ################### # Private API # ################### @classmethod def _encode_message_header(cls, client_id, correlation_id, request_key, version=0): """ Encode the common request envelope """ return struct.pack('>hhih%ds' % len(client_id), request_key, # ApiKey version, # ApiVersion correlation_id, # CorrelationId len(client_id), # ClientId size client_id) # ClientId @classmethod def _encode_message_set(cls, messages): """ Encode a MessageSet. Unlike other arrays in the protocol, MessageSets are not length-prefixed Format ====== MessageSet => [Offset MessageSize Message] Offset => int64<|fim▁hole|> """ message_set = [] for message in messages: encoded_message = KafkaProtocol._encode_message(message) message_set.append(struct.pack('>qi%ds' % len(encoded_message), 0, len(encoded_message), encoded_message)) return b''.join(message_set) @classmethod def _encode_message(cls, message): """ Encode a single message. The magic number of a message is a format version number. The only supported magic number right now is zero Format ====== Message => Crc MagicByte Attributes Key Value Crc => int32 MagicByte => int8 Attributes => int8 Key => bytes Value => bytes """ if message.magic == 0: msg = b''.join([ struct.pack('>BB', message.magic, message.attributes), write_int_string(message.key), write_int_string(message.value) ]) crc = crc32(msg) msg = struct.pack('>i%ds' % len(msg), crc, msg) else: raise ProtocolError("Unexpected magic number: %d" % message.magic) return msg ################## # Public API # ################## @classmethod def encode_produce_request(cls, payloads=(), acks=1, timeout=1000): """ Encode a ProduceRequest struct Arguments: payloads: list of ProduceRequestPayload acks: How "acky" you want the request to be 1: written to disk by the leader 0: immediate response -1: waits for all replicas to be in sync timeout: Maximum time (in ms) the server will wait for replica acks. This is _not_ a socket timeout Returns: ProduceRequest """ if acks not in (1, 0, -1): raise ValueError('ProduceRequest acks (%s) must be 1, 0, -1' % acks) return kafka.protocol.produce.ProduceRequest( required_acks=acks, timeout=timeout, topics=[( topic, [( partition, [(0, 0, kafka.protocol.message.Message(msg.value, key=msg.key, magic=msg.magic, attributes=msg.attributes)) for msg in payload.messages]) for partition, payload in topic_payloads.items()]) for topic, topic_payloads in group_by_topic_and_partition(payloads).items()]) @classmethod def decode_produce_response(cls, response): """ Decode ProduceResponse to ProduceResponsePayload Arguments: response: ProduceResponse Return: list of ProduceResponsePayload """ return [ kafka.common.ProduceResponsePayload(topic, partition, error, offset) for topic, partitions in response.topics for partition, error, offset in partitions ] @classmethod def encode_fetch_request(cls, payloads=(), max_wait_time=100, min_bytes=4096): """ Encodes a FetchRequest struct Arguments: payloads: list of FetchRequestPayload max_wait_time (int, optional): ms to block waiting for min_bytes data. Defaults to 100. min_bytes (int, optional): minimum bytes required to return before max_wait_time. Defaults to 4096. Return: FetchRequest """ return kafka.protocol.fetch.FetchRequest( replica_id=-1, max_wait_time=max_wait_time, min_bytes=min_bytes, topics=[( topic, [( partition, payload.offset, payload.max_bytes) for partition, payload in topic_payloads.items()]) for topic, topic_payloads in group_by_topic_and_partition(payloads).items()]) @classmethod def decode_fetch_response(cls, response): """ Decode FetchResponse struct to FetchResponsePayloads Arguments: response: FetchResponse """ return [ kafka.common.FetchResponsePayload( topic, partition, error, highwater_offset, [ kafka.common.OffsetAndMessage(offset, message) for offset, _, message in messages]) for topic, partitions in response.topics for partition, error, highwater_offset, messages in partitions ] @classmethod def encode_offset_request(cls, payloads=()): return kafka.protocol.offset.OffsetRequest( replica_id=-1, topics=[( topic, [( partition, payload.time, payload.max_offsets) for partition, payload in six.iteritems(topic_payloads)]) for topic, topic_payloads in six.iteritems(group_by_topic_and_partition(payloads))]) @classmethod def decode_offset_response(cls, response): """ Decode OffsetResponse into OffsetResponsePayloads Arguments: response: OffsetResponse Returns: list of OffsetResponsePayloads """ return [ kafka.common.OffsetResponsePayload(topic, partition, error, tuple(offsets)) for topic, partitions in response.topics for partition, error, offsets in partitions ] @classmethod def encode_metadata_request(cls, topics=(), payloads=None): """ Encode a MetadataRequest Arguments: topics: list of strings """ if payloads is not None: topics = payloads return kafka.protocol.metadata.MetadataRequest(topics) @classmethod def decode_metadata_response(cls, response): return response @classmethod def encode_consumer_metadata_request(cls, client_id, correlation_id, payloads): """ Encode a ConsumerMetadataRequest Arguments: client_id: string correlation_id: int payloads: string (consumer group) """ message = [] message.append(cls._encode_message_header(client_id, correlation_id, KafkaProtocol.CONSUMER_METADATA_KEY)) message.append(struct.pack('>h%ds' % len(payloads), len(payloads), payloads)) msg = b''.join(message) return write_int_string(msg) @classmethod def decode_consumer_metadata_response(cls, data): """ Decode bytes to a ConsumerMetadataResponse Arguments: data: bytes to decode """ ((correlation_id, error, nodeId), cur) = relative_unpack('>ihi', data, 0) (host, cur) = read_short_string(data, cur) ((port,), cur) = relative_unpack('>i', data, cur) return ConsumerMetadataResponse(error, nodeId, host, port) @classmethod def encode_offset_commit_request(cls, group, payloads): """ Encode an OffsetCommitRequest struct Arguments: group: string, the consumer group you are committing offsets for payloads: list of OffsetCommitRequestPayload """ return kafka.protocol.commit.OffsetCommitRequest_v0( consumer_group=group, topics=[( topic, [( partition, payload.offset, payload.metadata) for partition, payload in six.iteritems(topic_payloads)]) for topic, topic_payloads in six.iteritems(group_by_topic_and_partition(payloads))]) @classmethod def decode_offset_commit_response(cls, response): """ Decode OffsetCommitResponse to an OffsetCommitResponsePayload Arguments: response: OffsetCommitResponse """ return [ kafka.common.OffsetCommitResponsePayload(topic, partition, error) for topic, partitions in response.topics for partition, error in partitions ] @classmethod def encode_offset_fetch_request(cls, group, payloads, from_kafka=False): """ Encode an OffsetFetchRequest struct. The request is encoded using version 0 if from_kafka is false, indicating a request for Zookeeper offsets. It is encoded using version 1 otherwise, indicating a request for Kafka offsets. Arguments: group: string, the consumer group you are fetching offsets for payloads: list of OffsetFetchRequestPayload from_kafka: bool, default False, set True for Kafka-committed offsets """ if from_kafka: request_class = kafka.protocol.commit.OffsetFetchRequest_v1 else: request_class = kafka.protocol.commit.OffsetFetchRequest_v0 return request_class( consumer_group=group, topics=[( topic, list(topic_payloads.keys())) for topic, topic_payloads in six.iteritems(group_by_topic_and_partition(payloads))]) @classmethod def decode_offset_fetch_response(cls, response): """ Decode OffsetFetchResponse to OffsetFetchResponsePayloads Arguments: response: OffsetFetchResponse """ return [ kafka.common.OffsetFetchResponsePayload( topic, partition, offset, metadata, error ) for topic, partitions in response.topics for partition, offset, metadata, error in partitions ] def create_message(payload, key=None): """ Construct a Message Arguments: payload: bytes, the payload to send to Kafka key: bytes, a key used for partition routing (optional) """ return kafka.common.Message(0, 0, key, payload) def create_gzip_message(payloads, key=None, compresslevel=None): """ Construct a Gzipped Message containing multiple Messages The given payloads will be encoded, compressed, and sent as a single atomic message to Kafka. Arguments: payloads: list(bytes), a list of payload to send be sent to Kafka key: bytes, a key used for partition routing (optional) """ message_set = KafkaProtocol._encode_message_set( [create_message(payload, pl_key) for payload, pl_key in payloads]) gzipped = gzip_encode(message_set, compresslevel=compresslevel) codec = ATTRIBUTE_CODEC_MASK & CODEC_GZIP return kafka.common.Message(0, 0x00 | codec, key, gzipped) def create_snappy_message(payloads, key=None): """ Construct a Snappy Message containing multiple Messages The given payloads will be encoded, compressed, and sent as a single atomic message to Kafka. Arguments: payloads: list(bytes), a list of payload to send be sent to Kafka key: bytes, a key used for partition routing (optional) """ message_set = KafkaProtocol._encode_message_set( [create_message(payload, pl_key) for payload, pl_key in payloads]) snapped = snappy_encode(message_set) codec = ATTRIBUTE_CODEC_MASK & CODEC_SNAPPY return kafka.common.Message(0, 0x00 | codec, key, snapped) def create_message_set(messages, codec=CODEC_NONE, key=None, compresslevel=None): """Create a message set using the given codec. If codec is CODEC_NONE, return a list of raw Kafka messages. Otherwise, return a list containing a single codec-encoded message. """ if codec == CODEC_NONE: return [create_message(m, k) for m, k in messages] elif codec == CODEC_GZIP: return [create_gzip_message(messages, key, compresslevel)] elif codec == CODEC_SNAPPY: return [create_snappy_message(messages, key)] else: raise UnsupportedCodecError("Codec 0x%02x unsupported" % codec)<|fim▁end|>
MessageSize => int32
<|file_name|>multicast.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # # See LICENSE for more details. # # Copyright: 2016 IBM # Author: Prudhvi Miryala<[email protected]> # # test multicasting # to test we need to enable multicast option on host # then ping from peer to multicast group import netifaces from avocado import Test from avocado.utils.software_manager import SoftwareManager from avocado.utils.ssh import Session from avocado.utils import process from avocado.utils import distro from avocado.utils.network.interfaces import NetworkInterface from avocado.utils.network.hosts import LocalHost <|fim▁hole|> ''' check multicast receive using ping tool ''' def setUp(self): ''' To check and install dependencies for the test ''' self.peer = self.params.get("peer_ip", default="") self.user = self.params.get("user_name", default="root") self.peer_password = self.params.get("peer_password", '*', default="None") interfaces = netifaces.interfaces() self.iface = self.params.get("interface", default="") if self.iface not in interfaces: self.cancel("%s interface is not available" % self.iface) self.ipaddr = self.params.get("host_ip", default="") self.netmask = self.params.get("netmask", default="") local = LocalHost() self.networkinterface = NetworkInterface(self.iface, local) try: self.networkinterface.add_ipaddr(self.ipaddr, self.netmask) self.networkinterface.save(self.ipaddr, self.netmask) except Exception: self.networkinterface.save(self.ipaddr, self.netmask) self.networkinterface.bring_up() self.session = Session(self.peer, user=self.user, password=self.peer_password) if not self.session.connect(): self.cancel("failed connecting to peer") self.count = self.params.get("count", default="500000") smm = SoftwareManager() pkgs = ["net-tools"] detected_distro = distro.detect() if detected_distro.name == "Ubuntu": pkgs.extend(["openssh-client", "iputils-ping"]) elif detected_distro.name == "SuSE": pkgs.extend(["openssh", "iputils"]) else: pkgs.extend(["openssh-clients", "iputils"]) for pkg in pkgs: if not smm.check_installed(pkg) and not smm.install(pkg): self.cancel("%s package is need to test" % pkg) if self.peer == "": self.cancel("peer ip should specify in input") cmd = "ip addr show | grep %s" % self.peer output = self.session.cmd(cmd) result = "" result = result.join(output.stdout.decode("utf-8")) self.peerif = result.split()[-1] if self.peerif == "": self.cancel("unable to get peer interface") cmd = "ip -f inet -o addr show %s | awk '{print $4}' | cut -d / -f1"\ % self.iface self.local_ip = process.system_output(cmd, shell=True).strip() if self.local_ip == "": self.cancel("unable to get local ip") def test_multicast(self): ''' ping to peer machine ''' cmd = "echo 0 > /proc/sys/net/ipv4/icmp_echo_ignore_broadcasts" if process.system(cmd, shell=True, verbose=True, ignore_status=True) != 0: self.fail("unable to set value to icmp_echo_ignore_broadcasts") cmd = "ip link set %s allmulticast on" % self.iface if process.system(cmd, shell=True, verbose=True, ignore_status=True) != 0: self.fail("unable to set all mulicast option to test interface") cmd = "ip route add 224.0.0.0/4 dev %s" % self.peerif output = self.session.cmd(cmd) if not output.exit_status == 0: self.fail("Unable to add route for Peer interafce") cmd = "timeout 600 ping -I %s 224.0.0.1 -c %s -f" % (self.peerif, self.count) output = self.session.cmd(cmd) if not output.exit_status == 0: self.fail("multicast test failed") def tearDown(self): ''' delete multicast route and turn off multicast option ''' cmd = "ip route del 224.0.0.0/4" output = self.session.cmd(cmd) if not output.exit_status == 0: self.log.info("Unable to delete multicast route added for peer") cmd = "echo 1 > /proc/sys/net/ipv4/icmp_echo_ignore_broadcasts" if process.system(cmd, shell=True, verbose=True, ignore_status=True) != 0: self.log.info("unable to unset all mulicast option") cmd = "ip link set %s allmulticast off" % self.iface if process.system(cmd, shell=True, verbose=True, ignore_status=True) != 0: self.log.info("unable to unset all mulicast option") self.networkinterface.remove_ipaddr(self.ipaddr, self.netmask) try: self.networkinterface.restore_from_backup() except Exception: self.log.info("backup file not availbale, could not restore file.") self.session.quit()<|fim▁end|>
class ReceiveMulticastTest(Test):
<|file_name|>test_table.py<|end_file_name|><|fim▁begin|>import unittest from datetime import date from binder.col import * from binder.table import Table, SqlCondition, SqlSort, AND, OR from bindertest.tabledefs import Foo, Bar class TableTest(unittest.TestCase): def test_init_2_AutoIdCols(self): # Table can have only 1 AutoIdCol try: Table("xyz", AutoIdCol("id1"), IntCol("x"), AutoIdCol("id2")) except AssertionError, e: self.assertEquals("Table 'xyz' has more than one AutoIdCol", str(e)) else: self.fail() def test_init_duplicate_col_name(self): try: Table("xyz", AutoIdCol("id1"), IntCol("x"), UnicodeCol("x", 20)) except AssertionError, e: self.assertEquals("Table 'xyz' has more than one column with name 'x'", str(e)) else: self.fail() def test_cols(self): expected = ["foo_id", "i1", "s1", "d1"] actual = [col.col_name for col in Foo.cols] self.assertEquals(expected, actual) expected = ["bi", "bs", "bd", "bdt1", "bb"] actual = [col.col_name for col in Bar.cols] self.assertEquals(expected, actual)<|fim▁hole|> # AutoIdCol field identified by __init__ self.assert_(Foo.auto_id_col is Foo.cols[0]) self.assert_(Bar.auto_id_col is None) def test_new_parse_defaults(self): expected = { "foo_id": None, "i1": 0, "s1": "", "d1": None, } actual = Foo.new() self.assertEquals(expected, actual) actual = Foo.parse() self.assertEquals(expected, actual) expected = { "bi": None, "bs": "", "bd": None, "bdt1": None, "bb": False, } actual = Bar.new() self.assertEquals(expected, actual) actual = Bar.parse() self.assertEquals(expected, actual) def test_parse_auto_id(self): expected = { "foo_id": None, "i1": 0, "s1": "", "d1": None, } actual = Foo.parse(foo_id=None) self.assertEquals(expected, actual) def test_new_parse_all(self): expected = { "foo_id": 42, "i1": 101, "s1": "alpha", "d1": date(2006,6,6), } actual = Foo.new(foo_id=42, i1=101, s1="alpha", d1=date(2006,6,6)) self.assertEquals(expected, actual) actual = Foo.parse(foo_id="42", i1="101", s1="alpha", d1="2006-06-06") self.assertEquals(expected, actual) # parse some fields str actual = Foo.parse(foo_id="42", i1=101, s1="alpha", d1=date(2006,6,6)) self.assertEquals(expected, actual) def test_new_parse_some_fields(self): expected = { "foo_id": 42, "i1": 0, "s1": "alpha", "d1": None, } actual = Foo.new(foo_id=42, s1="alpha") self.assertEquals(expected, actual) actual = Foo.parse(foo_id="42", s1="alpha") self.assertEquals(expected, actual) def test_new_parse_clone(self): # new() and parse() should return a new dictionary expected = { "foo_id": 42, "i1": 0, "s1": "alpha", "d1": None, } actual = Foo.new(**expected) self.assertEquals(expected, actual) self.assertFalse(actual is expected) actual = Foo.parse(**expected) self.assertEquals(expected, actual) self.assertFalse(actual is expected) def test_new_parse_unkown_cols(self): # DONT copy unknown columns expected = { "foo_id": None, "i1": 16, "s1": "", "d1": None, } actual = Foo.new(i1=16, s2="beta") self.assertEquals(expected, actual) actual = Foo.parse(i1="16", s2="beta") self.assertEquals(expected, actual) def test_parse_empty_string(self): # parse() replaces empty strings with default value expected = { "foo_id": None, "i1": 0, "s1": "", "d1": None, } actual = Foo.parse(foo_id="", i1="", s1="", d1="") self.assertEquals(expected, actual) expected = { "bi": None, "bs": "", "bd": None, "bdt1": None, "bb": False, } actual = Bar.parse(bi="", bs="", bd="", bdt1="", bb="") self.assertEquals(expected, actual) def test_new_bad_values(self): # new() does not allow bad values try: Foo.new(i1="bar", s2=1.1) except TypeError, e: self.assertEquals("IntCol 'i1': int expected, got str", str(e)) else: self.fail() def test_parse_bad_values(self): # parse() does not allow non-string bad values try: Foo.parse(i1=2.3, s2=1.1) except TypeError, e: self.assertEquals("IntCol 'i1': int expected, got float", str(e)) else: self.fail() def test_parse_error(self): # parse() gives parse error for bad strings try: Foo.parse(i1="2.3", s2=1.1) except ValueError, e: self.assert_( str(e) in [ "invalid literal for int(): 2.3", "invalid literal for int() with base 10: '2.3'", ] ) else: self.fail() def test_check_values(self): # defaults / None foo = Foo.new() auto_id = Foo.check_values(foo) self.assert_(auto_id) # given values / no None foo = { "foo_id": 42, "i1": 101, "s1": "alpha", "d1": date(2006,6,6), } auto_id = Foo.check_values(foo) self.assertFalse(auto_id) # bad value foo = Foo.new() foo["i1"] = "bar" try: Foo.check_values(foo) except TypeError, e: self.assertEquals("IntCol 'i1': int expected, got str", str(e)) else: self.fail() # bad value foo = Foo.new() foo["s1"] = 1.1 try: Foo.check_values(foo) except TypeError, e: self.assertEquals("UnicodeCol 's1': unicode expected, got float", str(e)) else: self.fail() # unknown columns ignored foo = Foo.new(s2=None) foo["s3"] = 1.2 auto_id = Foo.check_values(foo) self.assert_(True, auto_id) def test_q(self): q = Foo.q # existing columns q_foo_id = Foo.q.foo_id q_i1 = Foo.q.i1 # non-existing column try: Foo.q.i2 except AttributeError, e: self.assertEquals("QueryCols instance has no attribute 'i2'", str(e)) else: self.fail() def test_q_ops(self): qexpr = Foo.q.foo_id == 1 self.assert_(isinstance(qexpr, SqlCondition)) qexpr = Foo.q.d1 == None self.assert_(isinstance(qexpr, SqlCondition)) qexpr = Foo.q.d1 > date(2007, 5, 22) self.assert_(isinstance(qexpr, SqlCondition)) qexpr = Foo.q.d1 >= date(2007, 5, 22) self.assert_(isinstance(qexpr, SqlCondition)) qexpr = Foo.q.d1 < date(2007, 5, 22) self.assert_(isinstance(qexpr, SqlCondition)) qexpr = Foo.q.d1 <= date(2007, 5, 22) self.assert_(isinstance(qexpr, SqlCondition)) def test_q_ops_assign(self): try: Foo.q.foo_id = "xyz" except AttributeError: pass else: self.fail() def test_q_ops_check_value(self): try: Foo.q.foo_id == "xyz" except TypeError, e: self.assertEquals("AutoIdCol 'foo_id': int expected, got str", str(e)) else: self.fail() try: Foo.q.s1 > 23 except TypeError, e: self.assertEquals("UnicodeCol 's1': unicode expected, got int", str(e)) else: self.fail() def test_q_ops_auto_id(self): try: Foo.q.foo_id == None except AssertionError, e: self.assertEquals("SqlCondition: cannot use None for AutoIdCol", str(e)) else: self.fail() def test_AND(self): qexpr1 = Foo.q.foo_id == 1 qexpr2 = Foo.q.s1 == 'x' qexpr3 = Foo.q.d1 == None AND(qexpr1, qexpr2) AND(qexpr1, qexpr2, qexpr3) try: AND(qexpr1, "xyz") except AssertionError, e: self.assertEquals("AND: conditions must be SqlCondition", str(e)) else: self.fail() try: AND(qexpr1) except AssertionError, e: self.assertEquals("AND: must have at least 2 conditions", str(e)) else: self.fail() def test_OR(self): qexpr1 = Foo.q.foo_id == 1 qexpr2 = Foo.q.s1 == 'x' qexpr3 = Foo.q.d1 == None OR(qexpr1, qexpr2) OR(qexpr1, qexpr2, qexpr3) try: OR(qexpr1, "xyz") except AssertionError, e: self.assertEquals("OR: conditions must be SqlCondition", str(e)) else: self.fail() try: OR(qexpr1) except AssertionError, e: self.assertEquals("OR: must have at least 2 conditions", str(e)) else: self.fail() def test_q_sort(self): qexpr = Foo.q.foo_id.ASC self.assert_(isinstance(qexpr, SqlSort)) qexpr = Foo.q.d1.DESC self.assert_(isinstance(qexpr, SqlSort)) if __name__ == '__main__': unittest.main()<|fim▁end|>
def test_auto_id_col(self):
<|file_name|>(open).js<|end_file_name|><|fim▁begin|>(function (root, undefined) {<|fim▁hole|><|fim▁end|>
"use strict";
<|file_name|>util.rs<|end_file_name|><|fim▁begin|>/* * Copyright (c) Meta Platforms, Inc. and affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ use crate::{ client_extensions::CLIENT_EXTENSION_DIRECTIVE_NAME, connections::ConnectionMetadataDirective, handle_fields::HANDLE_FIELD_DIRECTIVE_NAME, inline_data_fragment::InlineDirectiveMetadata, react_flight::REACT_FLIGHT_SCALAR_FLIGHT_FIELD_METADATA_KEY, refetchable_fragment::RefetchableMetadata, relay_actor_change::RELAY_ACTOR_CHANGE_DIRECTIVE_FOR_CODEGEN, required_directive::{CHILDREN_CAN_BUBBLE_METADATA_KEY, REQUIRED_DIRECTIVE_NAME}, ModuleMetadata, ReactFlightLocalComponentsMetadata, RefetchableDerivedFromMetadata, RelayClientComponentMetadata, RelayResolverSpreadMetadata, RequiredMetadataDirective, CLIENT_EDGE_GENERATED_FRAGMENT_KEY, CLIENT_EDGE_METADATA_KEY, CLIENT_EDGE_QUERY_METADATA_KEY, DIRECTIVE_SPLIT_OPERATION, INTERNAL_METADATA_DIRECTIVE, }; use graphql_ir::{ Argument, Directive, ProvidedVariableMetadata, Value, ARGUMENT_DEFINITION, UNUSED_LOCAL_VARIABLE_DEPRECATED, }; use intern::string_key::{Intern, StringKey}; use lazy_static::lazy_static; use regex::Regex; use schema::{SDLSchema, Schema, Type}; // A wrapper type that allows comparing pointer equality of references. Two // `PointerAddress` values are equal if they point to the same memory location. // // This type is _sound_, but misuse can easily lead to logical bugs if the memory // of one PointerAddress could not have been freed and reused for a subsequent // PointerAddress. #[derive(Hash, Eq, PartialEq, Clone, Copy)] pub struct PointerAddress(usize); impl PointerAddress { pub fn new<T>(ptr: &T) -> Self { let ptr_address: usize = unsafe { std::mem::transmute(ptr) }; Self(ptr_address) } } /// This function will return a new Vec[...] of directives, /// where one will be missing. The one with `remove_directive_name` name pub fn remove_directive( directives: &[Directive], remove_directive_name: StringKey, ) -> Vec<Directive> { let mut next_directives = Vec::with_capacity(directives.len() - 1); for directive in directives { if directive.name.item != remove_directive_name { next_directives.push(directive.clone()); } } next_directives } /// Function will create a new Vec[...] of directives /// when one of them will be replaced with the `replacement`. If the name of /// `replacement` is matched with the item in the list pub fn replace_directive(directives: &[Directive], replacement: Directive) -> Vec<Directive> { directives .iter() .map(|directive| { if directive.name.item == replacement.name.item { return replacement.to_owned(); } directive.to_owned() }) .collect() } /// The function that will return a variable name for an argument /// it it uses a variable (and it the argument is available) pub fn extract_variable_name(argument: Option<&Argument>) -> Option<StringKey> { match argument { Some(arg) => match &arg.value.item { Value::Variable(var) => Some(var.name.item), _ => None, }, None => None, } } lazy_static! { static ref CUSTOM_METADATA_DIRECTIVES: [StringKey; 22] = [ *CLIENT_EXTENSION_DIRECTIVE_NAME, ConnectionMetadataDirective::directive_name(), *HANDLE_FIELD_DIRECTIVE_NAME, ModuleMetadata::directive_name(), *DIRECTIVE_SPLIT_OPERATION, RefetchableMetadata::directive_name(), RefetchableDerivedFromMetadata::directive_name(), *INTERNAL_METADATA_DIRECTIVE, *ARGUMENT_DEFINITION, *REACT_FLIGHT_SCALAR_FLIGHT_FIELD_METADATA_KEY, ReactFlightLocalComponentsMetadata::directive_name(), *REQUIRED_DIRECTIVE_NAME, RequiredMetadataDirective::directive_name(), *CLIENT_EDGE_METADATA_KEY, *CLIENT_EDGE_QUERY_METADATA_KEY, *CLIENT_EDGE_GENERATED_FRAGMENT_KEY, *CHILDREN_CAN_BUBBLE_METADATA_KEY, RelayResolverSpreadMetadata::directive_name(), RelayClientComponentMetadata::directive_name(), *UNUSED_LOCAL_VARIABLE_DEPRECATED, *RELAY_ACTOR_CHANGE_DIRECTIVE_FOR_CODEGEN, ProvidedVariableMetadata::directive_name(), ]; static ref DIRECTIVES_SKIPPED_IN_NODE_IDENTIFIER: [StringKey; 12] = [ *CLIENT_EXTENSION_DIRECTIVE_NAME, ConnectionMetadataDirective::directive_name(), *HANDLE_FIELD_DIRECTIVE_NAME, RefetchableMetadata::directive_name(), RefetchableDerivedFromMetadata::directive_name(), *INTERNAL_METADATA_DIRECTIVE, *ARGUMENT_DEFINITION, *REACT_FLIGHT_SCALAR_FLIGHT_FIELD_METADATA_KEY, ReactFlightLocalComponentsMetadata::directive_name(), *REQUIRED_DIRECTIVE_NAME, RelayResolverSpreadMetadata::directive_name(), RelayClientComponentMetadata::directive_name(), ]; static ref RELAY_CUSTOM_INLINE_FRAGMENT_DIRECTIVES: [StringKey; 6] = [ *CLIENT_EXTENSION_DIRECTIVE_NAME, ModuleMetadata::directive_name(), InlineDirectiveMetadata::directive_name(), *RELAY_ACTOR_CHANGE_DIRECTIVE_FOR_CODEGEN, *CLIENT_EDGE_METADATA_KEY, "defer".intern(), ]; static ref VALID_PROVIDED_VARIABLE_NAME: Regex = Regex::new(r#"^[A-Za-z0-9_]*$"#).unwrap(); pub static ref INTERNAL_RELAY_VARIABLES_PREFIX: StringKey = "__relay_internal".intern(); } pub struct CustomMetadataDirectives; impl CustomMetadataDirectives { pub fn is_custom_metadata_directive(name: StringKey) -> bool { CUSTOM_METADATA_DIRECTIVES.contains(&name) } pub fn should_skip_in_node_identifier(name: StringKey) -> bool { DIRECTIVES_SKIPPED_IN_NODE_IDENTIFIER.contains(&name) } pub fn is_handle_field_directive(name: StringKey) -> bool { name == *HANDLE_FIELD_DIRECTIVE_NAME } } pub fn is_relay_custom_inline_fragment_directive(directive: &Directive) -> bool { RELAY_CUSTOM_INLINE_FRAGMENT_DIRECTIVES.contains(&directive.name.item) } pub fn generate_abstract_type_refinement_key(schema: &SDLSchema, type_: Type) -> StringKey { format!("__is{}", schema.get_type_name(type_).lookup()).intern() } pub fn get_normalization_operation_name(name: StringKey) -> String { format!("{}$normalization", name) } pub fn get_fragment_filename(fragment_name: StringKey) -> StringKey {<|fim▁hole|> format!( "{}.graphql", get_normalization_operation_name(fragment_name) ) .intern() } pub fn format_provided_variable_name(module_name: StringKey) -> StringKey { if VALID_PROVIDED_VARIABLE_NAME.is_match(module_name.lookup()) { format!( "{}__pv__{}", *INTERNAL_RELAY_VARIABLES_PREFIX, module_name.lookup() ) .intern() } else { let transformed_name = module_name .lookup() .chars() .filter(|c| c.is_ascii_alphanumeric() || *c == '_') .collect::<String>(); format!( "{}__pv__{}", *INTERNAL_RELAY_VARIABLES_PREFIX, transformed_name ) .intern() } }<|fim▁end|>
<|file_name|>ctdav_n_auv.py<|end_file_name|><|fim▁begin|>""" @package mi.dataset.parser @file marine-integrations/mi/dataset/parser/ctdav_n_auv.py @author Jeff Roy @brief Parser and particle Classes and tools for the ctdav_n_auv data Release notes: initial release """ __author__ = 'Jeff Roy' __license__ = 'Apache 2.0' from mi.core.log import get_logger log = get_logger() from mi.dataset.parser.auv_common import \ AuvCommonParticle, \ AuvCommonParser, \ compute_timestamp # The structure below is a list of tuples # Each tuple consists of # parameter name, index into raw data parts list, encoding function CTDAV_N_AUV_PARAM_MAP = [ # message ID is typically index 0 ('mission_epoch', 1, int), ('auv_latitude', 2, float), ('auv_longitude', 3, float), ('mission_time', 4, int), ('m_depth', 5, float), ('ctdav_n_auv_conductivity', 6, float), ('temperature', 7, float), ('salinity', 8, float), ('speed_of_sound', 9, float), ('dissolved_oxygen', 10, float), ('powered_on', 11, int) ] class CtdavNAuvInstrumentParticle(AuvCommonParticle): _auv_param_map = CTDAV_N_AUV_PARAM_MAP # must provide a parameter map for _build_parsed_values class CtdavNAuvTelemeteredParticle(CtdavNAuvInstrumentParticle): # set the data_particle_type for the DataParticle class _data_particle_type = "ctdav_n_auv_instrument" class CtdavNAuvRecoveredParticle(CtdavNAuvInstrumentParticle): # set the data_particle_type for the DataParticle class _data_particle_type = "ctdav_n_auv_instrument_recovered" CTDAV_N_AUV_ID = '1181' # message ID of ctdav_n records CTDAV_N_AUV_FIELD_COUNT = 12 # number of expected fields in an ctdav_n record CTDAV_N_AUV_TELEMETERED_MESSAGE_MAP = [(CTDAV_N_AUV_ID, CTDAV_N_AUV_FIELD_COUNT, compute_timestamp, CtdavNAuvTelemeteredParticle)] CTDAV_N_AUV_RECOVERED_MESSAGE_MAP = [(CTDAV_N_AUV_ID, CTDAV_N_AUV_FIELD_COUNT, compute_timestamp, CtdavNAuvRecoveredParticle)] class CtdavNAuvParser(AuvCommonParser): def __init__(self, stream_handle, exception_callback, is_telemetered): if is_telemetered: message_map = CTDAV_N_AUV_TELEMETERED_MESSAGE_MAP else: message_map = CTDAV_N_AUV_RECOVERED_MESSAGE_MAP # provide message ID and # of fields to parent class super(CtdavNAuvParser, self).__init__(stream_handle, exception_callback,<|fim▁hole|><|fim▁end|>
message_map)
<|file_name|>to_bits.rs<|end_file_name|><|fim▁begin|>use malachite_base::num::logic::traits::{BitAccess, SignificantBits}; use malachite_nz::integer::Integer;<|fim▁hole|>pub fn to_bits_asc_naive(n: &Integer) -> Vec<bool> { let mut bits = Vec::new(); if *n == 0 { return bits; } for i in 0..n.significant_bits() { bits.push(n.get_bit(i)); } let last_bit = *bits.last().unwrap(); if last_bit != (*n < 0) { bits.push(!last_bit); } bits } pub fn to_bits_desc_naive(n: &Integer) -> Vec<bool> { let mut bits = Vec::new(); if *n == 0 { return bits; } let significant_bits = n.significant_bits(); let last_bit = n.get_bit(significant_bits - 1); if last_bit != (*n < 0) { bits.push(!last_bit); } for i in (0..significant_bits).rev() { bits.push(n.get_bit(i)); } bits }<|fim▁end|>
<|file_name|>glm.py<|end_file_name|><|fim▁begin|>"""Interface to rpy2.glm Copyright 2012 Allen B. Downey License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html """ import rpy2.robjects as robjects r = robjects.r def linear_model(model, print_flag=True): """Submits model to r.lm and returns the result.""" model = r(model) res = r.lm(model) if print_flag: print_summary(res) return res <|fim▁hole|> def logit_model(model, family=robjects.r.binomial(), weights=None, print_flag=True): """Submits model to r.glm and returns the result.""" model = r(model) if weights is None: res = r.glm(model, family=family) else: weight_vector = robjects.FloatVector(weights) res = r.glm(model, family=family, weights=weight_vector) if print_flag: print_summary(res) return res def print_summary(res): """Prints results from r.lm (just the parts we want).""" flag = False lines = r.summary(res) lines = str(lines) for line in lines.split('\n'): # skip everything until we get to coefficients if line.startswith('Coefficients'): flag = True if line.startswith('Signif'): continue if flag: print line print def get_coeffs(res): """Gets just the lines that contain the estimates. res: R glm result object Returns: list of (name, estimate, error, z-value) tuples and AIC """ flag = False lines = r.summary(res) lines = str(lines) res = [] aic = None for line in lines.split('\n'): #print "line: " + str(line) line = line.strip() #print "lineStrip: " + str(line) if line.startswith('---') or line == "": #print "startswith('---')" flag = False if line.startswith('AIC'): #print "startswith('AIC')" t = line.split() aic = float(t[1]) if flag: #print "flag" t = line.split() #print "t: " + str(t) var = t[0] est = float(t[1]) error = float(t[2]) z = float(t[3]) res.append((var, est, error, z)) if line.startswith('Estimate'): #print "startswith('Estimate')" flag = True return res, aic def inject_col_dict(col_dict, prefix=''): """Copies data columns into the R global environment. col_dict: map from attribute name to column of data prefix: string prepended to the attribute names """ for name, col in col_dict.iteritems(): robjects.globalenv[prefix+name] = robjects.FloatVector(col)<|fim▁end|>
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>""" Gauged https://github.com/chriso/gauged (MIT Licensed) Copyright 2014 (c) Chris O'Hara <[email protected]> """ from urlparse import urlparse, parse_qsl from urllib import unquote from .mysql import MySQLDriver from .sqlite import SQLiteDriver from .postgresql import PostgreSQLDriver def parse_dsn(dsn_string): """Parse a connection string and return the associated driver""" dsn = urlparse(dsn_string) scheme = dsn.scheme.split('+')[0] username = password = host = port = None host = dsn.netloc if '@' in host: username, host = host.split('@') if ':' in username: username, password = username.split(':') password = unquote(password) username = unquote(username) if ':' in host: host, port = host.split(':') port = int(port) database = dsn.path.split('?')[0][1:] query = dsn.path.split('?')[1] if '?' in dsn.path else dsn.query kwargs = dict(parse_qsl(query, True)) if scheme == 'sqlite': return SQLiteDriver, [dsn.path], {} elif scheme == 'mysql': kwargs['user'] = username or 'root' kwargs['db'] = database if port: kwargs['port'] = port if host: kwargs['host'] = host if password: kwargs['passwd'] = password return MySQLDriver, [], kwargs elif scheme == 'postgresql': kwargs['user'] = username or 'postgres' kwargs['database'] = database if port: kwargs['port'] = port<|fim▁hole|> if password: kwargs['password'] = password return PostgreSQLDriver, [], kwargs else: raise ValueError('Unknown driver %s' % dsn_string) def get_driver(dsn_string): driver, args, kwargs = parse_dsn(dsn_string) return driver(*args, **kwargs)<|fim▁end|>
if 'unix_socket' in kwargs: kwargs['host'] = kwargs.pop('unix_socket') elif host: kwargs['host'] = host
<|file_name|>template-param-usage-6.rs<|end_file_name|><|fim▁begin|>#![allow( dead_code, non_snake_case, non_camel_case_types, non_upper_case_globals )] #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct DoesNotUseTemplateParameter { pub x: ::std::os::raw::c_int,<|fim▁hole|>pub type DoesNotUseTemplateParameter_ButAliasDoesUseIt<T> = T;<|fim▁end|>
}
<|file_name|>main.js<|end_file_name|><|fim▁begin|>//main.js // const greeter = require('./Greeter.js'); // document.querySelector("#root").appendChild(greeter()); import React from 'react'; import {render} from 'react-dom'; import Greeter from './Greeter';<|fim▁hole|>import './main.css';//使用require导入css文件 render(<Greeter />, document.getElementById('root'));<|fim▁end|>
<|file_name|>E0663.rs<|end_file_name|><|fim▁begin|>// Copyright 2018 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![feature(asm)] fn main() {<|fim▁hole|> : : "+test"("a") //~ ERROR E0663 ); }<|fim▁end|>
asm!("xor %eax, %eax"
<|file_name|>0003_emailvalidationtoken.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # Generated by Django 1.10.1 on 2016-09-16 21:51 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion <|fim▁hole|> class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('user', '0002_profile_validated'), ] operations = [ migrations.CreateModel( name='EmailValidationToken', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('token', models.CharField(max_length=100, unique=True)), ('expire', models.DateTimeField()), ('consumed', models.BooleanField(default=False)), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), ]<|fim▁end|>
<|file_name|>sudoku_solver_level_3_1.py<|end_file_name|><|fim▁begin|># https://www.codewars.com/kata/sudoku-solver/train/python def sudoku(puzzle): import collections print('=== in sudoku ===') # Print arguments print_board(puzzle) # Count the numbers that are currently populated on the sudoku board number_count = collections.Counter(number for row in puzzle for number in row) number_count = dict(number_count) print_dict(number_count)<|fim▁hole|> if 0 in number_count: zero_count = number_count[0] else: zero_count = 0 print('zero_count = {}'.format(zero_count)) if (zero_count > 0): # Get the missing numbers for all the rows rows_missing_numbers = check_rows(puzzle) print('-- rows_missing_numbers --') # print_board(rows_missing_numbers) # Rotate the board to check the columns rotated_board = list(reversed(zip(*puzzle))) # print('-- Rotated Board --') # print_board(rotated_board) # Get the missing numbers for all the columns columns_missing_numbers = list(reversed(check_rows(rotated_board))) print('-- columns_missing_numbers --') # print_board(columns_missing_numbers) # Validate all the inner 3x3 boards grid_missing_numbers = [] # Step 1: Split the rows into 3 columns # Break up the 9x9 board into 3 9x3 boards (i.e. split up all the rows into 3 parts) split_board = [] # Contains original board with all rows split into 3 parts for row in puzzle: # Go through each row in the board single_row = [row[s_index:s_index+3] for s_index in xrange(0, len(row), 3)] # Break it down: # single_row = [] # # for s_index in xrange(0, len(row), 3): # s_index to define where to split the row (every 3 numbers) # split_row = row[s_index:s_index+3] # Get the sub-row (of length 3) from the original row # single_row.append(split_row) # Append that sub-row list to a new list containing the all 3 sub-rows split_board.append(single_row) # Append "row that is split into 3 lists/rows" as a single row into the split board matrix # Rotate the board # Step 2: Split the columns into 3 rows # Converts the 9x3 boards into 3x9 boards (i.e. split up all the columns into 3 parts) # Technically, we're putting the 9 rows from the 9x3 board into a single row with 9 1x3 rows rotated_board = list(zip(*split_board)) # Rotate the board, so we can work on the columns as if they were rows # Split the board again # Break up the 3 3x9 boards into 9 3x3 boards for row in rotated_board: # For each row in the rotated board for s_index in range(0, len(row), 3): # Define the an index to split the columns on (step by 3) inner_board = row[s_index:s_index+3] # Every 3 1x3 sub-rows in this row define the inner 3x3 matrix single_row = [[digit for row_3x3 in inner_board for digit in row_3x3]] # Convert the 3x3 matrix into a single nested list [[1, ..., 9]], so we can check it # Break it down: # for row_3x3 in inner_board: # # for digit in row_3x3: # # single_row[0].append(digit) # grid_missing_numbers.append(*check_rows(single_row)) print('-- grid_missing_numbers --') # print_board(grid_missing_numbers) # Loop through the puzzle board, until we find a '0' # Count of zeros print('-- Looking for a 0 --') board = replace_zero(puzzle, rows_missing_numbers, columns_missing_numbers, grid_missing_numbers) print('-- (replaced) board --') print_board(board) sudoku(board) else: return puzzle print_board(puzzle) return puzzle def replace_zero(puzzle, rows_missing_numbers, columns_missing_numbers, grid_missing_numbers): print('-- in replace_zero --') grid_mapping = { '00': 0, '01': 3, '02': 6, '10': 1, '11': 4, '12': 7, '20': 2, '21': 5, '22': 8, } for row in xrange(9): for column in xrange(9): # print('zero_count = {}'.format(zero_count)) if (puzzle[row][column] == 0): # print('row = {}'.format(row)) # print('column = {}'.format(column)) # Determine which grid the 0 is in # Determine the row if (0 <= row <= 2): grid_row = 0 elif (3 <= row <= 5): grid_row = 1 elif (6 <= row <= 8): grid_row = 2 # Determine the column if (0 <= column <= 2): grid_column = 0 elif (3 <= column <= 5): grid_column = 1 elif (6 <= column <= 8): grid_column = 2 grid_key = '{}{}'.format(grid_row, grid_column) grid = grid_mapping[grid_key] # print('grid row, column = ({}, {} --> {})'.format(grid_row, grid_column, grid_key)) # print('rows_missing_numbers[{}]: {}'.format(row, rows_missing_numbers[row])) # print('columns_missing_numbers[{}]: {}'.format(column, columns_missing_numbers[column])) # print('grid_missing_numbers[{}]: {}'.format(grid, grid_missing_numbers[grid])) # print # Intersect the 3 lists to get the common numbers from all lists missing_numbers = list(set.intersection(set(rows_missing_numbers[row]), set(columns_missing_numbers[column]), set(grid_missing_numbers[grid]))) # print('missing_numbers = {}'.format(missing_numbers)) # If there's only 1 missing number, put it into the original # puzzle, and re-run the loop if (len(missing_numbers) == 1): print('++ Replacing these numbers ++') print('missing_numbers = {}'.format(missing_numbers)) print('row = {}'.format(row)) print('column = {}'.format(column)) puzzle[row][column] = missing_numbers[0] # zero_count -= 1 # print_board(puzzle) # print return puzzle def check_rows(board): # Define the list of required numbers required_numbers = range(1,9+1) # List of missing numbers for all rows in the board missing_numbers = [] # Default result result = True # Validate all rows for row in board: # print('Row: <{}>'.format(row)) numbers_to_check = required_numbers[:] for number in row: if number == 0: continue elif number in numbers_to_check: # If the number we're checking hasn't been seen yet numbers_to_check.remove(number) # Them remove it from the remaining numbers to check else: # Otherwise, we're seeing a number we do not expect print('???') missing_numbers.append(numbers_to_check) # print('--> numbers_to_check = <{}>'.format(numbers_to_check)) # print return missing_numbers def print_board(board): print('-- print_board --') for row in board: print row def print_dict(my_dict): for key in sorted(my_dict): print('{}: {}'.format(key, my_dict[key]))<|fim▁end|>
<|file_name|>macro_parser.rs<|end_file_name|><|fim▁begin|>// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Earley-like parser for macros. use ast; use ast::{Matcher, MatchTok, MatchSeq, MatchNonterminal, Ident}; use codemap::{BytePos, mk_sp}; use codemap; use parse::lexer::*; //resolve bug? use parse::ParseSess; use parse::attr::ParserAttr;<|fim▁hole|> use std::rc::Rc; use collections::HashMap; /* This is an Earley-like parser, without support for in-grammar nonterminals, only by calling out to the main rust parser for named nonterminals (which it commits to fully when it hits one in a grammar). This means that there are no completer or predictor rules, and therefore no need to store one column per token: instead, there's a set of current Earley items and a set of next ones. Instead of NTs, we have a special case for Kleene star. The big-O, in pathological cases, is worse than traditional Earley parsing, but it's an easier fit for Macro-by-Example-style rules, and I think the overhead is lower. (In order to prevent the pathological case, we'd need to lazily construct the resulting `NamedMatch`es at the very end. It'd be a pain, and require more memory to keep around old items, but it would also save overhead)*/ /* Quick intro to how the parser works: A 'position' is a dot in the middle of a matcher, usually represented as a dot. For example `· a $( a )* a b` is a position, as is `a $( · a )* a b`. The parser walks through the input a character at a time, maintaining a list of items consistent with the current position in the input string: `cur_eis`. As it processes them, it fills up `eof_eis` with items that would be valid if the macro invocation is now over, `bb_eis` with items that are waiting on a Rust nonterminal like `$e:expr`, and `next_eis` with items that are waiting on the a particular token. Most of the logic concerns moving the · through the repetitions indicated by Kleene stars. It only advances or calls out to the real Rust parser when no `cur_eis` items remain Example: Start parsing `a a a a b` against [· a $( a )* a b]. Remaining input: `a a a a b` next_eis: [· a $( a )* a b] - - - Advance over an `a`. - - - Remaining input: `a a a b` cur: [a · $( a )* a b] Descend/Skip (first item). next: [a $( · a )* a b] [a $( a )* · a b]. - - - Advance over an `a`. - - - Remaining input: `a a b` cur: [a $( a · )* a b] next: [a $( a )* a · b] Finish/Repeat (first item) next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b] - - - Advance over an `a`. - - - (this looks exactly like the last step) Remaining input: `a b` cur: [a $( a · )* a b] next: [a $( a )* a · b] Finish/Repeat (first item) next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b] - - - Advance over an `a`. - - - (this looks exactly like the last step) Remaining input: `b` cur: [a $( a · )* a b] next: [a $( a )* a · b] Finish/Repeat (first item) next: [a $( a )* · a b] [a $( · a )* a b] - - - Advance over a `b`. - - - Remaining input: `` eof: [a $( a )* a b ·] */ /* to avoid costly uniqueness checks, we require that `MatchSeq` always has a nonempty body. */ #[deriving(Clone)] pub struct MatcherPos { elts: Vec<ast::Matcher> , // maybe should be <'>? Need to understand regions. sep: Option<Token>, idx: uint, up: Option<Box<MatcherPos>>, matches: Vec<Vec<Rc<NamedMatch>>>, match_lo: uint, match_hi: uint, sp_lo: BytePos, } pub fn count_names(ms: &[Matcher]) -> uint { ms.iter().fold(0, |ct, m| { ct + match m.node { MatchTok(_) => 0u, MatchSeq(ref more_ms, _, _, _, _) => { count_names(more_ms.as_slice()) } MatchNonterminal(_, _, _) => 1u }}) } pub fn initial_matcher_pos(ms: Vec<Matcher> , sep: Option<Token>, lo: BytePos) -> Box<MatcherPos> { let mut match_idx_hi = 0u; for elt in ms.iter() { match elt.node { MatchTok(_) => (), MatchSeq(_,_,_,_,hi) => { match_idx_hi = hi; // it is monotonic... } MatchNonterminal(_,_,pos) => { match_idx_hi = pos+1u; // ...so latest is highest } } } let matches = Vec::from_fn(count_names(ms.as_slice()), |_i| Vec::new()); box MatcherPos { elts: ms, sep: sep, idx: 0u, up: None, matches: matches, match_lo: 0u, match_hi: match_idx_hi, sp_lo: lo } } // NamedMatch is a pattern-match result for a single ast::MatchNonterminal: // so it is associated with a single ident in a parse, and all // MatchedNonterminal's in the NamedMatch have the same nonterminal type // (expr, item, etc). All the leaves in a single NamedMatch correspond to a // single matcher_nonterminal in the ast::Matcher that produced it. // // It should probably be renamed, it has more or less exact correspondence to // ast::match nodes, and the in-memory structure of a particular NamedMatch // represents the match that occurred when a particular subset of an // ast::match -- those ast::Matcher nodes leading to a single // MatchNonterminal -- was applied to a particular token tree. // // The width of each MatchedSeq in the NamedMatch, and the identity of the // MatchedNonterminal's, will depend on the token tree it was applied to: each // MatchedSeq corresponds to a single MatchSeq in the originating // ast::Matcher. The depth of the NamedMatch structure will therefore depend // only on the nesting depth of ast::MatchSeq's in the originating // ast::Matcher it was derived from. pub enum NamedMatch { MatchedSeq(Vec<Rc<NamedMatch>>, codemap::Span), MatchedNonterminal(Nonterminal) } pub fn nameize(p_s: &ParseSess, ms: &[Matcher], res: &[Rc<NamedMatch>]) -> HashMap<Ident, Rc<NamedMatch>> { fn n_rec(p_s: &ParseSess, m: &Matcher, res: &[Rc<NamedMatch>], ret_val: &mut HashMap<Ident, Rc<NamedMatch>>) { match *m { codemap::Spanned {node: MatchTok(_), .. } => (), codemap::Spanned {node: MatchSeq(ref more_ms, _, _, _, _), .. } => { for next_m in more_ms.iter() { n_rec(p_s, next_m, res, ret_val) }; } codemap::Spanned { node: MatchNonterminal(bind_name, _, idx), span } => { if ret_val.contains_key(&bind_name) { let string = token::get_ident(bind_name); p_s.span_diagnostic .span_fatal(span, format!("duplicated bind name: {}", string.get()).as_slice()) } ret_val.insert(bind_name, res[idx].clone()); } } } let mut ret_val = HashMap::new(); for m in ms.iter() { n_rec(p_s, m, res, &mut ret_val) } ret_val } pub enum ParseResult { Success(HashMap<Ident, Rc<NamedMatch>>), Failure(codemap::Span, String), Error(codemap::Span, String) } pub fn parse_or_else(sess: &ParseSess, cfg: ast::CrateConfig, rdr: TtReader, ms: Vec<Matcher> ) -> HashMap<Ident, Rc<NamedMatch>> { match parse(sess, cfg, rdr, ms.as_slice()) { Success(m) => m, Failure(sp, str) => { sess.span_diagnostic.span_fatal(sp, str.as_slice()) } Error(sp, str) => { sess.span_diagnostic.span_fatal(sp, str.as_slice()) } } } // perform a token equality check, ignoring syntax context (that is, an unhygienic comparison) pub fn token_name_eq(t1 : &Token, t2 : &Token) -> bool { match (t1,t2) { (&token::IDENT(id1,_),&token::IDENT(id2,_)) | (&token::LIFETIME(id1),&token::LIFETIME(id2)) => id1.name == id2.name, _ => *t1 == *t2 } } pub fn parse(sess: &ParseSess, cfg: ast::CrateConfig, mut rdr: TtReader, ms: &[Matcher]) -> ParseResult { let mut cur_eis = Vec::new(); cur_eis.push(initial_matcher_pos(ms.iter() .map(|x| (*x).clone()) .collect(), None, rdr.peek().sp.lo)); loop { let mut bb_eis = Vec::new(); // black-box parsed by parser.rs let mut next_eis = Vec::new(); // or proceed normally let mut eof_eis = Vec::new(); let TokenAndSpan {tok: tok, sp: sp} = rdr.peek(); /* we append new items to this while we go */ loop { let ei = match cur_eis.pop() { None => break, /* for each Earley Item */ Some(ei) => ei, }; let idx = ei.idx; let len = ei.elts.len(); /* at end of sequence */ if idx >= len { // can't move out of `match`es, so: if ei.up.is_some() { // hack: a matcher sequence is repeating iff it has a // parent (the top level is just a container) // disregard separator, try to go up // (remove this condition to make trailing seps ok) if idx == len { // pop from the matcher position let mut new_pos = ei.up.clone().unwrap(); // update matches (the MBE "parse tree") by appending // each tree as a subtree. // I bet this is a perf problem: we're preemptively // doing a lot of array work that will get thrown away // most of the time. // Only touch the binders we have actually bound for idx in range(ei.match_lo, ei.match_hi) { let sub = (*ei.matches.get(idx)).clone(); new_pos.matches .get_mut(idx) .push(Rc::new(MatchedSeq(sub, mk_sp(ei.sp_lo, sp.hi)))); } new_pos.idx += 1; cur_eis.push(new_pos); } // can we go around again? // the *_t vars are workarounds for the lack of unary move match ei.sep { Some(ref t) if idx == len => { // we need a separator // i'm conflicted about whether this should be hygienic.... // though in this case, if the separators are never legal // idents, it shouldn't matter. if token_name_eq(&tok, t) { //pass the separator let mut ei_t = ei.clone(); ei_t.idx += 1; next_eis.push(ei_t); } } _ => { // we don't need a separator let mut ei_t = ei; ei_t.idx = 0; cur_eis.push(ei_t); } } } else { eof_eis.push(ei); } } else { match ei.elts.get(idx).node.clone() { /* need to descend into sequence */ MatchSeq(ref matchers, ref sep, zero_ok, match_idx_lo, match_idx_hi) => { if zero_ok { let mut new_ei = ei.clone(); new_ei.idx += 1u; //we specifically matched zero repeats. for idx in range(match_idx_lo, match_idx_hi) { new_ei.matches .get_mut(idx) .push(Rc::new(MatchedSeq(Vec::new(), sp))); } cur_eis.push(new_ei); } let matches = Vec::from_elem(ei.matches.len(), Vec::new()); let ei_t = ei; cur_eis.push(box MatcherPos { elts: (*matchers).clone(), sep: (*sep).clone(), idx: 0u, up: Some(ei_t), matches: matches, match_lo: match_idx_lo, match_hi: match_idx_hi, sp_lo: sp.lo }); } MatchNonterminal(_,_,_) => { bb_eis.push(ei) } MatchTok(ref t) => { let mut ei_t = ei.clone(); //if (token_name_eq(t,&tok)) { if token::mtwt_token_eq(t,&tok) { ei_t.idx += 1; next_eis.push(ei_t); } } } } } /* error messages here could be improved with links to orig. rules */ if token_name_eq(&tok, &EOF) { if eof_eis.len() == 1u { let mut v = Vec::new(); for dv in eof_eis.get_mut(0).matches.mut_iter() { v.push(dv.pop().unwrap()); } return Success(nameize(sess, ms, v.as_slice())); } else if eof_eis.len() > 1u { return Error(sp, "ambiguity: multiple successful parses".to_string()); } else { return Failure(sp, "unexpected end of macro invocation".to_string()); } } else { if (bb_eis.len() > 0u && next_eis.len() > 0u) || bb_eis.len() > 1u { let nts = bb_eis.iter().map(|ei| { match ei.elts.get(ei.idx).node { MatchNonterminal(bind, name, _) => { (format!("{} ('{}')", token::get_ident(name), token::get_ident(bind))).to_string() } _ => fail!() } }).collect::<Vec<String>>().connect(" or "); return Error(sp, format!( "local ambiguity: multiple parsing options: \ built-in NTs {} or {} other options.", nts, next_eis.len()).to_string()); } else if bb_eis.len() == 0u && next_eis.len() == 0u { return Failure(sp, format!("no rules expected the token `{}`", token::to_str(&tok)).to_string()); } else if next_eis.len() > 0u { /* Now process the next token */ while next_eis.len() > 0u { cur_eis.push(next_eis.pop().unwrap()); } rdr.next_token(); } else /* bb_eis.len() == 1 */ { let mut rust_parser = Parser::new(sess, cfg.clone(), box rdr.clone()); let mut ei = bb_eis.pop().unwrap(); match ei.elts.get(ei.idx).node { MatchNonterminal(_, name, idx) => { let name_string = token::get_ident(name); ei.matches.get_mut(idx).push(Rc::new(MatchedNonterminal( parse_nt(&mut rust_parser, name_string.get())))); ei.idx += 1u; } _ => fail!() } cur_eis.push(ei); for _ in range(0, rust_parser.tokens_consumed) { let _ = rdr.next_token(); } } } assert!(cur_eis.len() > 0u); } } pub fn parse_nt(p: &mut Parser, name: &str) -> Nonterminal { match name { "item" => match p.parse_item(Vec::new()) { Some(i) => token::NtItem(i), None => p.fatal("expected an item keyword") }, "block" => token::NtBlock(p.parse_block()), "stmt" => token::NtStmt(p.parse_stmt(Vec::new())), "pat" => token::NtPat(p.parse_pat()), "expr" => token::NtExpr(p.parse_expr()), "ty" => token::NtTy(p.parse_ty(false /* no need to disambiguate*/)), // this could be handled like a token, since it is one "ident" => match p.token { token::IDENT(sn,b) => { p.bump(); token::NtIdent(box sn,b) } _ => { let token_str = token::to_str(&p.token); p.fatal((format!("expected ident, found {}", token_str.as_slice())).as_slice()) } }, "path" => { token::NtPath(box p.parse_path(LifetimeAndTypesWithoutColons).path) } "meta" => token::NtMeta(p.parse_meta_item()), "tt" => { p.quote_depth += 1u; //but in theory, non-quoted tts might be useful let res = token::NtTT(@p.parse_token_tree()); p.quote_depth -= 1u; res } "matchers" => token::NtMatchers(p.parse_matchers()), _ => { p.fatal(format!("unsupported builtin nonterminal parser: {}", name).as_slice()) } } }<|fim▁end|>
use parse::parser::{LifetimeAndTypesWithoutColons, Parser}; use parse::token::{Token, EOF, Nonterminal}; use parse::token;
<|file_name|>context_processors.py<|end_file_name|><|fim▁begin|>def settings(request): """ Add settings (or some) to the templates """ from django.conf import settings <|fim▁hole|> tags['MAP_PROVIDER'] = settings.MAP_PROVIDER if hasattr(settings, 'GOOGLE_ANALYTICS_KEY'): tags['GOOGLE_ANALYTICS_KEY'] = settings.GOOGLE_ANALYTICS_KEY return tags<|fim▁end|>
tags = {} tags['GOOGLE_MAPS_KEY'] = settings.GOOGLE_MAPS_KEY tags['GOOGLE_ANALYTICS_ENABLED'] = getattr(settings, 'GOOGLE_ANALYTICS_ENABLED', True)
<|file_name|>spacer.py<|end_file_name|><|fim▁begin|># # Race Capture App # # Copyright (C) 2014-2017 Autosport Labs # # This file is part of the Race Capture App # # This is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # # See the GNU General Public License for more details. You should # have received a copy of the GNU General Public License along with # this code. If not, see <http://www.gnu.org/licenses/>. from kivy.uix.widget import Widget<|fim▁hole|> def __init__(self, **kwargs): super(HorizontalSpacer, self).__init__( **kwargs) self.size_hint_y = None self.height=0 class VerticalSpacer(Widget): def __init__(self, **kwargs): super(VerticalSpacer, self).__init__( **kwargs) self.size_hint_x = None self.width=0<|fim▁end|>
class HorizontalSpacer(Widget):
<|file_name|>LoginStatus.js<|end_file_name|><|fim▁begin|>import React, {PropTypes} from 'react' import styles from './Form.css' export default React.createClass({ propTypes: { username: PropTypes.string.isRequired, onLogout: PropTypes.func }, render() {<|fim▁hole|> <section> <label>当前用户:</label> <input type="text" value={this.props.username} readOnly /> </section> <section> <button onClick={this.props.onLogout} className={`${styles.btn} ${styles.btnBorderOpen} ${styles.btnPurple}`} > 注销 </button> </section> </div> ) } })<|fim▁end|>
return ( <div className={styles.forms}>
<|file_name|>modeltest_runner.py<|end_file_name|><|fim▁begin|>import os import glob import subprocess def expand_path(path): return os.path.abspath(os.path.expandvars(os.path.expanduser(path)))<|fim▁hole|> if not path: return False if not os.path.isfile(path): return False return True def arg_is_file(path): try: if not is_file(path): raise except: msg = '{0!r} is not a file'.format(path) raise argparse.ArgumentTypeError(msg) return expand_path(path) def run_jmodeltest(name): jmodel_proc=subprocess.Popen('java -jar ~/phylo_tools/jmodeltest-2.1.5/jModelTest.jar -d '+str(name)+' -s 3 -f -i -g 4 -BIC -c 0.95 > '+str(name)+'.results.txt', shell=True, executable='/bin/bash') jmodel_proc.wait() def get_models(f, gene_name, out): fl=file(f) for line in fl: line=line.strip() if "the 95% confidence interval" in line: model=line.split(': ')[1] out.write(str(gene_name)+'\t'+str(model)+'\n') def main(): for f in glob.glob('*.nex'): run_jmodeltest(f) out=open('models.txt','w') for f in glob.glob('*.results.txt'): gene_name=f.split('.')[0] get_models(f, gene_name,out) ''' description = ('This program will run jModelTest on a single file or set ' 'of files in nexus format. User can choose the set of models' 'and type of summary using flags. The standard 24 models used' 'in MrBayes and BIC summary with 95% credible set are defaults.') FILE_FORMATS = ['nex'] parser = argparse.ArgumentParser(description = description) parser.add_argument('input_files', metavar='INPUT-SEQ-FILE', nargs = '+', type = arg_is_file, help = ('Input sequence file(s) name ')) parser.add_argument('-o', '--out-format', type = str, choices = ['nex', 'fasta', 'phy'], help = ('The format of the output sequence file(s). Valid options ')) parser.add_argument('-j', '--path-to-jModelTest', type = str, help=('The full path to the jModelTest executable')) parser.add_argument('-s', '--substitution-models', type = str, choices = ['3','5','7','11'] default = ['3'] help = ('Number of substitution schemes to test. Default is all GTR models "-s 3".')) parser.add_argument('-g', '--gamma', type = str, default = ['4'] help = ('Include models with rate variation among sites and number of categories (e.g., -g 8)')) parser.add_argument('-i', '--invar', type = str, default = ['false'] help = ('include models with a proportion invariable sites (e.g., -i)')) args = parser.parse_args() for f in args.input_files: in_type=os.path.splitext(f)[1] filename=os.path.splitext(f)[0] if in_type == '.nex' or in_type == '.nexus': dict=in_nex(f) elif in_type == '.fa' or in_type == '.fas' or in_type == '.fasta': dict=in_fasta(f) elif in_type == '.phy' or in_type == '.phylip': dict=in_phy(f) if args.out_format == 'nex': out_nex(dict, filename) elif args.out_format == 'fasta': out_fasta(dict, filename) elif args.out_format == 'phy': out_phy(dict, filename)''' if __name__ == '__main__': main()<|fim▁end|>
def is_file(path):
<|file_name|>ToXmlUtils.java<|end_file_name|><|fim▁begin|>/* * XAdES4j - A Java library for generation and verification of XAdES signatures. * Copyright (C) 2010 Luis Goncalves. * * XAdES4j is free software; you can redistribute it and/or modify it under * the terms of the GNU Lesser General Public License as published by the Free * Software Foundation; either version 3 of the License, or any later version. * * XAdES4j is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more * details.<|fim▁hole|> */ package xades4j.xml.marshalling; import java.util.EnumMap; import java.util.List; import xades4j.properties.IdentifierType; import xades4j.properties.ObjectIdentifier; import xades4j.properties.data.BaseCertRefsData; import xades4j.properties.data.CertRef; import xades4j.xml.bind.xades.XmlCertIDListType; import xades4j.xml.bind.xades.XmlCertIDType; import xades4j.xml.bind.xades.XmlDigestAlgAndValueType; import xades4j.xml.bind.xades.XmlIdentifierType; import xades4j.xml.bind.xades.XmlObjectIdentifierType; import xades4j.xml.bind.xades.XmlQualifierType; import xades4j.xml.bind.xmldsig.XmlDigestMethodType; import xades4j.xml.bind.xmldsig.XmlX509IssuerSerialType; /** * @author Luís */ class ToXmlUtils { ToXmlUtils() { } private static final EnumMap<IdentifierType, XmlQualifierType> identifierTypeConv; static { identifierTypeConv = new EnumMap(IdentifierType.class); identifierTypeConv.put(IdentifierType.OIDAsURI, XmlQualifierType.OID_AS_URI); identifierTypeConv.put(IdentifierType.OIDAsURN, XmlQualifierType.OID_AS_URN); } static XmlObjectIdentifierType getXmlObjectId(ObjectIdentifier objId) { XmlObjectIdentifierType xmlObjId = new XmlObjectIdentifierType(); // Object identifier XmlIdentifierType xmlId = new XmlIdentifierType(); xmlId.setValue(objId.getIdentifier()); // If it is IdentifierType.URI the converter returns null, which is the // same as not specifying a qualifier. xmlId.setQualifier(identifierTypeConv.get(objId.getIdentifierType())); xmlObjId.setIdentifier(xmlId); return xmlObjId; } /**/ static XmlCertIDListType getXmlCertRefList(BaseCertRefsData certRefsData) { XmlCertIDListType xmlCertRefListProp = new XmlCertIDListType(); List<XmlCertIDType> xmlCertRefList = xmlCertRefListProp.getCert(); XmlDigestAlgAndValueType certDigest; XmlDigestMethodType certDigestMethod; XmlX509IssuerSerialType issuerSerial; XmlCertIDType certID; for (CertRef certRef : certRefsData.getCertRefs()) { certDigestMethod = new XmlDigestMethodType(); certDigestMethod.setAlgorithm(certRef.digestAlgUri); certDigest = new XmlDigestAlgAndValueType(); certDigest.setDigestMethod(certDigestMethod); certDigest.setDigestValue(certRef.digestValue); issuerSerial = new XmlX509IssuerSerialType(); issuerSerial.setX509IssuerName(certRef.issuerDN); issuerSerial.setX509SerialNumber(certRef.serialNumber); certID = new XmlCertIDType(); certID.setCertDigest(certDigest); certID.setIssuerSerial(issuerSerial); xmlCertRefList.add(certID); } return xmlCertRefListProp; } }<|fim▁end|>
* * You should have received a copy of the GNU Lesser General Public License along * with XAdES4j. If not, see <http://www.gnu.org/licenses/>.
<|file_name|>event_test.go<|end_file_name|><|fim▁begin|>package dao<|fim▁hole|> "context" "testing" "go-common/app/admin/main/reply/model" . "github.com/smartystreets/goconvey/convey" ) func TestEvent(t *testing.T) { var ( mid = int64(1) sub = &model.Subject{} rp = &model.Reply{Content: &model.ReplyContent{}} report = &model.Report{} c = context.Background() ) Convey("pub a event", t, WithDao(func(d *Dao) { err := d.PubEvent(c, model.EventReportAdd, mid, sub, rp, report) So(err, ShouldBeNil) })) }<|fim▁end|>
import (
<|file_name|>reflect_test.go<|end_file_name|><|fim▁begin|>package wire import ( "bytes" "fmt" "reflect" "testing" "time" . "github.com/eris-ltd/mint-client/Godeps/_workspace/src/github.com/tendermint/tendermint/common" ) type SimpleStruct struct { String string Bytes []byte Time time.Time } type Animal interface{} const ( AnimalTypeCat = byte(0x01) AnimalTypeDog = byte(0x02) AnimalTypeSnake = byte(0x03) AnimalTypeViper = byte(0x04) ) // Implements Animal type Cat struct { SimpleStruct } // Implements Animal type Dog struct { SimpleStruct } // Implements Animal type Snake []byte // Implements Animal type Viper struct { Bytes []byte } var _ = RegisterInterface( struct{ Animal }{}, ConcreteType{Cat{}, AnimalTypeCat}, ConcreteType{Dog{}, AnimalTypeDog}, ConcreteType{Snake{}, AnimalTypeSnake}, ConcreteType{&Viper{}, AnimalTypeViper}, ) func TestAnimalInterface(t *testing.T) { var foo Animal // Type of pointer to Animal rt := reflect.TypeOf(&foo) fmt.Printf("rt: %v\n", rt) // Type of Animal itself. // NOTE: normally this is acquired through other means // like introspecting on method signatures, or struct fields. rte := rt.Elem() fmt.Printf("rte: %v\n", rte) // Get a new pointer to the interface // NOTE: calling .Interface() is to get the actual value, // instead of reflection values. ptr := reflect.New(rte).Interface() fmt.Printf("ptr: %v", ptr) // Make a binary byteslice that represents a *snake. foo = Snake([]byte("snake")) snakeBytes := BinaryBytes(foo) snakeReader := bytes.NewReader(snakeBytes) // Now you can read it. n, err := new(int64), new(error) it := ReadBinary(foo, snakeReader, n, err).(Animal) fmt.Println(it, reflect.TypeOf(it)) } //------------------------------------- type Constructor func() interface{} type Instantiator func() (o interface{}, ptr interface{}) type Validator func(o interface{}, t *testing.T) type TestCase struct { Constructor Instantiator Validator } //------------------------------------- func constructBasic() interface{} { cat := Cat{ SimpleStruct{ String: "String", Bytes: []byte("Bytes"), Time: time.Unix(123, 0), }, } return cat } func instantiateBasic() (interface{}, interface{}) { return Cat{}, &Cat{} } func validateBasic(o interface{}, t *testing.T) { cat := o.(Cat) if cat.String != "String" { t.Errorf("Expected cat.String == 'String', got %v", cat.String) } if string(cat.Bytes) != "Bytes" { t.Errorf("Expected cat.Bytes == 'Bytes', got %X", cat.Bytes) } if cat.Time.Unix() != 123 { t.Errorf("Expected cat.Time == 'Unix(123)', got %v", cat.Time) } } //------------------------------------- type NilTestStruct struct { IntPtr *int CatPtr *Cat Animal Animal } func constructNilTestStruct() interface{} { return NilTestStruct{} } func instantiateNilTestStruct() (interface{}, interface{}) { return NilTestStruct{}, &NilTestStruct{} } func validateNilTestStruct(o interface{}, t *testing.T) { nts := o.(NilTestStruct) if nts.IntPtr != nil { t.Errorf("Expected nts.IntPtr to be nil, got %v", nts.IntPtr) } if nts.CatPtr != nil { t.Errorf("Expected nts.CatPtr to be nil, got %v", nts.CatPtr) } if nts.Animal != nil { t.Errorf("Expected nts.Animal to be nil, got %v", nts.Animal) } } //------------------------------------- type ComplexStruct struct { Name string Animal Animal } func constructComplex() interface{} { c := ComplexStruct{ Name: "Complex", Animal: constructBasic(), } return c } func instantiateComplex() (interface{}, interface{}) { return ComplexStruct{}, &ComplexStruct{} } func validateComplex(o interface{}, t *testing.T) { c2 := o.(ComplexStruct) if cat, ok := c2.Animal.(Cat); ok { validateBasic(cat, t) } else { t.Errorf("Expected c2.Animal to be of type cat, got %v", reflect.ValueOf(c2.Animal).Elem().Type()) } } //------------------------------------- type ComplexStruct2 struct { Cat Cat Dog *Dog Snake Snake Snake2 *Snake Viper Viper Viper2 *Viper } func constructComplex2() interface{} { snake_ := Snake([]byte("hiss")) snakePtr_ := &snake_ c := ComplexStruct2{ Cat: Cat{ SimpleStruct{ String: "String", Bytes: []byte("Bytes"), }, }, Dog: &Dog{ SimpleStruct{ String: "Woof", Bytes: []byte("Bark"), }, }, Snake: Snake([]byte("hiss")), Snake2: snakePtr_, Viper: Viper{Bytes: []byte("hizz")}, Viper2: &Viper{Bytes: []byte("hizz")}, } return c } func instantiateComplex2() (interface{}, interface{}) { return ComplexStruct2{}, &ComplexStruct2{} } func validateComplex2(o interface{}, t *testing.T) { c2 := o.(ComplexStruct2) cat := c2.Cat if cat.String != "String" { t.Errorf("Expected cat.String == 'String', got %v", cat.String) } if string(cat.Bytes) != "Bytes" { t.Errorf("Expected cat.Bytes == 'Bytes', got %X", cat.Bytes) } dog := c2.Dog if dog.String != "Woof" { t.Errorf("Expected dog.String == 'Woof', got %v", dog.String) } if string(dog.Bytes) != "Bark" { t.Errorf("Expected dog.Bytes == 'Bark', got %X", dog.Bytes) } snake := c2.Snake if string(snake) != "hiss" { t.Errorf("Expected string(snake) == 'hiss', got %v", string(snake)) } snake2 := c2.Snake2 if string(*snake2) != "hiss" { t.Errorf("Expected string(snake2) == 'hiss', got %v", string(*snake2)) } viper := c2.Viper if string(viper.Bytes) != "hizz" { t.Errorf("Expected string(viper.Bytes) == 'hizz', got %v", string(viper.Bytes)) } viper2 := c2.Viper2 if string(viper2.Bytes) != "hizz" { t.Errorf("Expected string(viper2.Bytes) == 'hizz', got %v", string(viper2.Bytes)) } } //------------------------------------- type ComplexStructArray struct { Animals []Animal Bytes [5]byte Ints [5]int Array SimpleArray } func constructComplexArray() interface{} { c := ComplexStructArray{ Animals: []Animal{ Cat{ SimpleStruct{ String: "String", Bytes: []byte("Bytes"), }, }, Dog{ SimpleStruct{ String: "Woof", Bytes: []byte("Bark"), }, }, Snake([]byte("hiss")), &Viper{ Bytes: []byte("hizz"), }, }, Bytes: [5]byte{1, 10, 50, 100, 200}, Ints: [5]int{1, 2, 3, 4, 5}, Array: SimpleArray([5]byte{1, 10, 50, 100, 200}), } return c } func instantiateComplexArray() (interface{}, interface{}) { return ComplexStructArray{}, &ComplexStructArray{} } func validateComplexArray(o interface{}, t *testing.T) { c2 := o.(ComplexStructArray) if cat, ok := c2.Animals[0].(Cat); ok { if cat.String != "String" { t.Errorf("Expected cat.String == 'String', got %v", cat.String) } if string(cat.Bytes) != "Bytes" { t.Errorf("Expected cat.Bytes == 'Bytes', got %X", cat.Bytes) } } else { t.Errorf("Expected c2.Animals[0] to be of type cat, got %v", reflect.ValueOf(c2.Animals[0]).Elem().Type()) } if dog, ok := c2.Animals[1].(Dog); ok { if dog.String != "Woof" { t.Errorf("Expected dog.String == 'Woof', got %v", dog.String) } if string(dog.Bytes) != "Bark" { t.Errorf("Expected dog.Bytes == 'Bark', got %X", dog.Bytes) } } else { t.Errorf("Expected c2.Animals[1] to be of type dog, got %v", reflect.ValueOf(c2.Animals[1]).Elem().Type()) } if snake, ok := c2.Animals[2].(Snake); ok { if string(snake) != "hiss" { t.Errorf("Expected string(snake) == 'hiss', got %v", string(snake)) } } else { t.Errorf("Expected c2.Animals[2] to be of type Snake, got %v", reflect.ValueOf(c2.Animals[2]).Elem().Type()) } if viper, ok := c2.Animals[3].(*Viper); ok {<|fim▁hole|> t.Errorf("Expected c2.Animals[3] to be of type *Viper, got %v", reflect.ValueOf(c2.Animals[3]).Elem().Type()) } } //----------------------------------------------------------------------------- var testCases = []TestCase{} func init() { testCases = append(testCases, TestCase{constructBasic, instantiateBasic, validateBasic}) testCases = append(testCases, TestCase{constructComplex, instantiateComplex, validateComplex}) testCases = append(testCases, TestCase{constructComplex2, instantiateComplex2, validateComplex2}) testCases = append(testCases, TestCase{constructComplexArray, instantiateComplexArray, validateComplexArray}) testCases = append(testCases, TestCase{constructNilTestStruct, instantiateNilTestStruct, validateNilTestStruct}) } func TestBinary(t *testing.T) { for i, testCase := range testCases { log.Notice(fmt.Sprintf("Running test case %v", i)) // Construct an object o := testCase.Constructor() // Write the object data := BinaryBytes(o) t.Logf("Binary: %X", data) instance, instancePtr := testCase.Instantiator() // Read onto a struct n, err := new(int64), new(error) res := ReadBinary(instance, bytes.NewReader(data), n, err) if *err != nil { t.Fatalf("Failed to read into instance: %v", *err) } // Validate object testCase.Validator(res, t) // Read onto a pointer n, err = new(int64), new(error) res = ReadBinaryPtr(instancePtr, bytes.NewReader(data), n, err) if *err != nil { t.Fatalf("Failed to read into instance: %v", *err) } if res != instancePtr { t.Errorf("Expected pointer to pass through") } // Validate object testCase.Validator(reflect.ValueOf(res).Elem().Interface(), t) } } func TestJSON(t *testing.T) { for i, testCase := range testCases { log.Notice(fmt.Sprintf("Running test case %v", i)) // Construct an object o := testCase.Constructor() // Write the object data := JSONBytes(o) t.Logf("JSON: %v", string(data)) instance, instancePtr := testCase.Instantiator() // Read onto a struct err := new(error) res := ReadJSON(instance, data, err) if *err != nil { t.Fatalf("Failed to read cat: %v", *err) } // Validate object testCase.Validator(res, t) // Read onto a pointer res = ReadJSON(instancePtr, data, err) if *err != nil { t.Fatalf("Failed to read cat: %v", *err) } if res != instancePtr { t.Errorf("Expected pointer to pass through") } // Validate object testCase.Validator(reflect.ValueOf(res).Elem().Interface(), t) } } //------------------------------------------------------------------------------ type Foo struct { FieldA string `json:"fieldA"` // json field name is "fieldA" FieldB string // json field name is "FieldB" fieldC string // not exported, not serialized. } func TestJSONFieldNames(t *testing.T) { for i := 0; i < 20; i++ { // Try to ensure deterministic success. foo := Foo{"a", "b", "c"} stringified := string(JSONBytes(foo)) expected := `{"fieldA":"a","FieldB":"b"}` if stringified != expected { t.Fatalf("JSONFieldNames error: expected %v, got %v", expected, stringified) } } } //------------------------------------------------------------------------------ func TestBadAlloc(t *testing.T) { n, err := new(int64), new(error) instance := new([]byte) data := RandBytes(100 * 1024) b := new(bytes.Buffer) // this slice of data claims to be much bigger than it really is WriteUvarint(uint(10000000000000000), b, n, err) b.Write(data) res := ReadBinary(instance, b, n, err) fmt.Println(res, *err) } //------------------------------------------------------------------------------ type SimpleArray [5]byte func TestSimpleArray(t *testing.T) { var foo SimpleArray // Type of pointer to array rt := reflect.TypeOf(&foo) fmt.Printf("rt: %v\n", rt) // *binary.SimpleArray // Type of array itself. // NOTE: normally this is acquired through other means // like introspecting on method signatures, or struct fields. rte := rt.Elem() fmt.Printf("rte: %v\n", rte) // binary.SimpleArray // Get a new pointer to the array // NOTE: calling .Interface() is to get the actual value, // instead of reflection values. ptr := reflect.New(rte).Interface() fmt.Printf("ptr: %v\n", ptr) // &[0 0 0 0 0] // Make a simple int aray fooArray := SimpleArray([5]byte{1, 10, 50, 100, 200}) fooBytes := BinaryBytes(fooArray) fooReader := bytes.NewReader(fooBytes) // Now you can read it. n, err := new(int64), new(error) it := ReadBinary(foo, fooReader, n, err).(SimpleArray) if !bytes.Equal(it[:], fooArray[:]) { t.Errorf("Expected %v but got %v", fooArray, it) } }<|fim▁end|>
if string(viper.Bytes) != "hizz" { t.Errorf("Expected string(viper.Bytes) == 'hizz', got %v", string(viper.Bytes)) } } else {
<|file_name|>game.js<|end_file_name|><|fim▁begin|>document.addEventListener("DOMContentLoaded", function() { "use_strict"; // Store game in global variable const CASUDOKU = {}; CASUDOKU.game = (function() { // Controls the state of the game // Game UI let uiStats = document.getElementById("gameStats"), uiComplete = document.getElementById("gameComplete"), uiNewGame = document.getElementById("gameNew"), gamePadKeys = document.querySelectorAll("#gameKeypad li a"); // GameKeypad Events for (let i = gamePadKeys.length - 1; i >= 0; i--) { let key = gamePadKeys[i]; key.onclick = function(e) { e.preventDefault(); // Parse keycode value let number = parseInt(e.currentTarget.innerText, 10); if (!number) { CASUDOKU.board.update_cell(0); } else { CASUDOKU.board.update_cell(number); } } } uiNewGame.onclick = function(e) { e.preventDefault(); <|fim▁hole|> CASUDOKU.game.start(); } start = function() { CASUDOKU.timer.start(); CASUDOKU.board.new_puzzle(); uiComplete.style.display = "none"; uiStats.style.display = "block"; }; over = function() { CASUDOKU.timer.stop(); uiComplete.style.display = "block"; uiStats.style.display = "none"; }; // Public api return { start: start, over: over }; }()); CASUDOKU.timer = (function() { let timeout, seconds = 0, minutes = 0, secCounter = document.querySelectorAll(".secCounter"), minCounter = document.querySelectorAll(".minCounter"); start = function() { if (seconds === 0 && minutes === 0) { timer(); } else { stop(); seconds = 0; minutes = 0; setText(minCounter, 0); timer(); } }; stop = function() { clearTimeout(timeout); }; setText = function(element, time) { element.forEach(function(val) { val.innerText = time; }); }; timer = function() { timeout = setTimeout(timer, 1000); if (seconds === 59) { setText(minCounter, ++minutes); seconds = 0; } setText(secCounter, seconds++); }; // Public api return { start: start, stop: stop }; }()); CASUDOKU.board = (function() { // Stores the cells that make up the Sudoku board let grid = [], // Canvas settings canvas = document.getElementById("gameCanvas"), context = canvas.getContext("2d"), canvasWidth = canvas.offsetWidth, canvasHeight = canvas.offsetHeight, // Board Settings numRows = 9, numCols = 9, regionWidth = canvasWidth / 3, regionHeight = canvasHeight / 3, // Cell Settings cellWidth = canvasWidth / numCols, cellHeight = canvasHeight / numRows, numCells = numRows * numCols, selectedCellIndex = 0, //Key Codes keycode = { arrowLeft: 37, arrowUp: 38, arrowRight: 39, arrowDown: 40, zero: 48, nine: 57 }; // End let // Keyboard & Mouse Events canvas.addEventListener("click", function (e) { // Calculate position of mouse click and update selected cell let xAxis, yAxis, canvasOffset = getOffset(), cellIndex, resultsX = [], resultsY = []; function getOffset() { return { left: canvas.getBoundingClientRect().left + window.scrollX, top: canvas.getBoundingClientRect().top + window.scrollY }; } if (e.pageX !== undefined && e.pageY !== undefined) { xAxis = e.pageX; yAxis = e.pageY; } else { xAxis = e.clientX + document.body.scrollLeft + document.documentElement.scrollLeft; yAxis = e.clientY + document.body.scrollTop + document.documentElement.scrollTop; } xAxis -= canvasOffset.left; yAxis -= canvasOffset.top; xAxis = Math.min(xAxis, canvasWidth); yAxis = Math.min(yAxis, canvasHeight); xAxis = Math.floor(xAxis/cellWidth); yAxis = Math.floor(yAxis/cellHeight); // Matches clicked coordinates to a cell for (let i = 0; i < numCells; i+= 1) { if (grid[i].col === xAxis && grid[i].row === yAxis) { selectedCellIndex = i; refresh_board(); } } }); window.addEventListener("keypress", function (e) { if (e.which >= keycode.zero && e.which <= keycode.nine) { // Subtract 48 to get actual number update_cell(e.which - 48); } }); window.addEventListener("keydown", function (e) { // Arrow Key events for changing the selected cell let pressed = e.which, col = grid[selectedCellIndex].col, row = grid[selectedCellIndex].row; if (pressed >= keycode.arrowLeft && pressed <= keycode.arrowDown) { if (col < (numCols - 1) && pressed == keycode.arrowRight) { selectedCellIndex++; refresh_board(); } if (col > 0 && pressed == keycode.arrowLeft) { selectedCellIndex--; refresh_board(); } if (row < (numRows - 1) && pressed == keycode.arrowDown) { selectedCellIndex += numCols; refresh_board(); } if (row > 0 && pressed == keycode.arrowUp) { selectedCellIndex -= numCols; refresh_board(); } } }); new_puzzle = function() { let workerSudokuSolver = new Worker("js/sudoku-solver.js"), clues = 24, puzzle; workerSudokuSolver.postMessage(clues); workerSudokuSolver.onmessage = function(e) { puzzle = e.data; make_grid(puzzle); }; }; make_grid = function(puzzle) { // Makes a grid array filled with cell instances. Each cell stores // one puzzle value let colCounter = 0, rowCounter = 0; class Cell { constructor() { // set fixed puzzle values this.isDefault = true; this.value = 0; // Store position on the canvas this.x = 0; this.y = 0; this.col = 0; this.row = 0; } }; for (let i = 0; i < puzzle.length; i++) { grid[i] = new Cell(); grid[i].value = puzzle[i]; if (puzzle[i] === 0) { grid[i].isDefault = false; } // Set cell column and row grid[i].col = colCounter; grid[i].row = rowCounter; colCounter++; // change row if ((i + 1) % 9 === 0) { rowCounter++; colCounter = 0; } } refresh_board(); }; refresh_board = function () { let workerSudokuValidator = new Worker("js/sudoku-validator.js"); workerSudokuValidator.postMessage(grid); workerSudokuValidator.onmessage = function(e) { let correct = e.data; if (correct) { CASUDOKU.game.over(); } draw(); }; }; draw = function () { // renders the canvas let regionPosX = 0, regionPosY = 0, cellPosX = 0, cellPosY = 0, textPosX = cellWidth * 0.4, textPosY = cellHeight * 0.65; // board outline context.clearRect(0, 0, canvasWidth, canvasHeight); context.strokeRect(0 , 0, canvasWidth, canvasHeight); context.globalCompositeOperation = "destination-over"; context.lineWidth = 10; // regions for (let x = 0; x < numRows; x++) { context.strokeRect(regionPosX, regionPosY, regionWidth, regionHeight); regionPosX += regionWidth; if (regionPosX == canvasWidth){ regionPosY += regionHeight; regionPosX = 0; } } // Start to draw the Grid context.beginPath(); // vertical lines for (let z = 0; z <= canvasWidth; z += cellWidth) { context.moveTo(0.5 + z, 0); context.lineTo(0.5 + z, canvasWidth); } // horizontal lines for (let y = 0; y <= canvasHeight; y += cellHeight) { context.moveTo(0, 0.5 + y); context.lineTo(canvasHeight, 0.5 + y); } // cell outline context.lineWidth = 2; context.strokeStyle = "black"; context.stroke(); for (let i = 0; i < numCells; i++) { grid[i].x = cellPosX; grid[i].y = cellPosY; // Cell values if (grid[i].isDefault) { context.font = "bold 1.6em Droid Sans, sans-serif"; context.fillStyle = "black"; context.fillText(grid[i].value, textPosX, textPosY); } if (grid[i].value !== 0 && !grid[i].isDefault) { context.font = "1.4em Droid Sans, sans-serif"; context.fillStyle = "grey"; context.fillText(grid[i].value, textPosX, textPosY); } // Cell background colour if (i == selectedCellIndex) { context.fillStyle = "#00B4FF"; } else { context.fillStyle = "#EEEEEE"; } // Cell background context.fillRect(cellPosX, cellPosY, cellWidth, cellHeight); cellPosX += cellWidth; textPosX += cellWidth; // Change row if ((i + 1) % numRows === 0) { cellPosX = 0; cellPosY += cellHeight; textPosY += cellHeight; textPosX = cellWidth * 0.4; } } }; update_cell = function (value) { if (!grid[selectedCellIndex].isDefault) { grid[selectedCellIndex].value = value; refresh_board(); } }; // Public api return { new_puzzle: new_puzzle, update_cell: update_cell }; }()); CASUDOKU.game.start(); });<|fim▁end|>
<|file_name|>bitcoin_da.ts<|end_file_name|><|fim▁begin|><TS language="da" version="2.1"> <context> <name>AddressBookPage</name> <message> <source>Right-click to edit address or label</source> <translation>Højreklik for at redigere adresse eller mærkat</translation> </message> <message> <source>Create a new address</source> <translation>Opret en ny adresse</translation> </message> <message> <source>&amp;New</source> <translation>&amp;Ny</translation> </message> <message> <source>Copy the currently selected address to the system clipboard</source> <translation>Kopiér den valgte adresse til systemets udklipsholder</translation> </message> <message> <source>&amp;Copy</source> <translation>&amp;Kopiér</translation> </message> <message> <source>C&amp;lose</source> <translation>&amp;Luk</translation> </message> <message> <source>Delete the currently selected address from the list</source> <translation>Slet den markerede adresse fra listen</translation> </message> <message> <source>Enter address or label to search</source> <translation>Indtast adresse eller mærkat for at søge</translation> </message> <message> <source>Export the data in the current tab to a file</source> <translation>Eksportér dataen i den aktuelle visning til en fil</translation> </message> <message> <source>&amp;Export</source> <translation>&amp;Eksportér</translation> </message> <message> <source>&amp;Delete</source> <translation>&amp;Slet</translation> </message> <message> <source>Choose the address to send coins to</source> <translation>Vælg adresse at sende bitcoins til</translation> </message> <message> <source>Choose the address to receive coins with</source> <translation>Vælg adresse at modtage bitcoins med</translation> </message> <message> <source>C&amp;hoose</source> <translation>&amp;Vælg</translation> </message> <message> <source>Sending addresses</source> <translation>Afsendelsesadresser</translation> </message> <message> <source>Receiving addresses</source> <translation>Modtagelsesadresser</translation> </message> <message> <source>These are your Bitcoin addresses for sending payments. Always check the amount and the receiving address before sending coins.</source> <translation>Disse er dine Bitcoin-adresser til afsendelse af betalinger. Tjek altid beløb og modtagelsesadresse, inden du sender bitcoins.</translation> </message> <message> <source>These are your Bitcoin addresses for receiving payments. Use the 'Create new receiving address' button in the receive tab to create new addresses. Signing is only possible with addresses of the type 'legacy'.</source> <translation>Disse er dine Bitcoin adresser til at modtage betalinger. Benyt 'Opret ny modtager adresse' knappen i modtag fanen for at oprette nye adresser.</translation> </message> <message> <source>&amp;Copy Address</source> <translation>&amp;Kopiér adresse</translation> </message> <message> <source>Copy &amp;Label</source> <translation>Kopiér &amp;mærkat</translation> </message> <message> <source>&amp;Edit</source> <translation>&amp;Redigér</translation> </message> <message> <source>Export Address List</source> <translation>Eksportér adresseliste</translation> </message> <message> <source>Comma separated file (*.csv)</source> <translation>Kommasepareret fil (*.csv)</translation> </message> <message> <source>Exporting Failed</source> <translation>Eksport mislykkedes</translation> </message> <message> <source>There was an error trying to save the address list to %1. Please try again.</source> <translation>Der opstod en fejl under gemning af adresselisten til %1. Prøv venligst igen.</translation> </message> </context> <context> <name>AddressTableModel</name> <message> <source>Label</source> <translation>Mærkat</translation> </message> <message> <source>Address</source> <translation>Adresse</translation> </message> <message> <source>(no label)</source> <translation>(ingen mærkat)</translation> </message> </context> <context> <name>AskPassphraseDialog</name> <message> <source>Passphrase Dialog</source> <translation>Adgangskodedialog</translation> </message> <message> <source>Enter passphrase</source> <translation>Indtast adgangskode</translation> </message> <message> <source>New passphrase</source> <translation>Ny adgangskode</translation> </message> <message> <source>Repeat new passphrase</source> <translation>Gentag ny adgangskode</translation> </message> <message> <source>Show passphrase</source> <translation>Vis adgangskode</translation> </message> <message> <source>Encrypt wallet</source> <translation>Kryptér tegnebog</translation> </message> <message> <source>This operation needs your wallet passphrase to unlock the wallet.</source> <translation>Denne funktion har brug for din tegnebogs adgangskode for at låse tegnebogen op.</translation> </message> <message> <source>Unlock wallet</source> <translation>Lås tegnebog op</translation> </message> <message> <source>This operation needs your wallet passphrase to decrypt the wallet.</source> <translation>Denne funktion har brug for din tegnebogs adgangskode for at dekryptere tegnebogen.</translation> </message> <message> <source>Decrypt wallet</source> <translation>Dekryptér tegnebog</translation> </message> <message> <source>Change passphrase</source> <translation>Skift adgangskode</translation> </message> <message> <source>Confirm wallet encryption</source> <translation>Bekræft tegnebogskryptering</translation> </message> <message> <source>Warning: If you encrypt your wallet and lose your passphrase, you will &lt;b&gt;LOSE ALL OF YOUR BITCOINS&lt;/b&gt;!</source> <translation>Advarsel: Hvis du krypterer din tegnebog og mister din adgangskode, vil du &lt;b&gt;MISTE ALLE DINE BITCOINS&lt;/b&gt;!</translation> </message> <message> <source>Are you sure you wish to encrypt your wallet?</source> <translation>Er du sikker på, at du ønsker at kryptere din tegnebog?</translation> </message> <message> <source>Wallet encrypted</source> <translation>Tegnebog krypteret</translation> </message> <message> <source>Enter the new passphrase for the wallet.&lt;br/&gt;Please use a passphrase of &lt;b&gt;ten or more random characters&lt;/b&gt;, or &lt;b&gt;eight or more words&lt;/b&gt;.</source> <translation>Indtast den nye adgangskode til tegnebogen.&lt;br/&gt;Brug venligst en adgangskode på &lt;b&gt;ti eller flere tilfældige tegn&lt;/b&gt; eller &lt;b&gt;otte eller flere ord&lt;/b&gt;.</translation> </message> <message> <source>Enter the old passphrase and new passphrase for the wallet.</source> <translation>Indtast den gamle adgangskode og en ny adgangskode til tegnebogen.</translation> </message> <message> <source>Remember that encrypting your wallet cannot fully protect your bitcoins from being stolen by malware infecting your computer.</source> <translation>Husk, at kryptere din tegnebog vil ikke fuldt ud beskytte dine bitcoins mod at blive stjålet af malware på din computer.</translation> </message> <message> <source>Wallet to be encrypted</source> <translation>Tegnebog, der skal krypteres</translation> </message> <message> <source>Your wallet is about to be encrypted. </source> <translation>Din tegnebog krypteres om et øjeblik.</translation> </message> <message> <source>Your wallet is now encrypted. </source> <translation>Din tegnebog er nu krypteret.</translation> </message> <message> <source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source> <translation>VIGTIGT: Enhver tidligere sikkerhedskopi, som du har lavet af tegnebogsfilen, bør blive erstattet af den nyligt genererede, krypterede tegnebogsfil. Af sikkerhedsmæssige årsager vil tidligere sikkerhedskopier af den ikke-krypterede tegnebogsfil blive ubrugelige i det øjeblik, du starter med at anvende den nye, krypterede tegnebog.</translation> </message> <message> <source>Wallet encryption failed</source> <translation>Tegnebogskryptering mislykkedes</translation> </message> <message> <source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source> <translation>Tegnebogskryptering mislykkedes på grund af en intern fejl. Din tegnebog blev ikke krypteret.</translation> </message> <message> <source>The supplied passphrases do not match.</source> <translation>De angivne adgangskoder stemmer ikke overens.</translation> </message> <message> <source>Wallet unlock failed</source> <translation>Tegnebogsoplåsning mislykkedes</translation> </message> <message> <source>The passphrase entered for the wallet decryption was incorrect.</source> <translation>Den angivne adgangskode for tegnebogsdekrypteringen er forkert.</translation> </message> <message> <source>Wallet decryption failed</source> <translation>Tegnebogsdekryptering mislykkedes</translation> </message> <message> <source>Wallet passphrase was successfully changed.</source> <translation>Tegnebogens adgangskode blev ændret.</translation> </message> <message> <source>Warning: The Caps Lock key is on!</source> <translation>Advarsel: Caps Lock-tasten er aktiveret!</translation> </message> </context> <context> <name>BanTableModel</name> <message> <source>IP/Netmask</source> <translation>IP/Netmaske</translation> </message> <message> <source>Banned Until</source> <translation>Bandlyst indtil</translation> </message> </context> <context> <name>BitcoinGUI</name> <message> <source>Sign &amp;message...</source> <translation>Signér &amp;besked…</translation> </message> <message> <source>Synchronizing with network...</source> <translation>Synkroniserer med netværk…</translation> </message> <message> <source>&amp;Overview</source> <translation>&amp;Oversigt</translation> </message> <message> <source>Show general overview of wallet</source> <translation>Vis generel oversigt over tegnebog</translation> </message> <message> <source>&amp;Transactions</source> <translation>&amp;Transaktioner</translation> </message> <message> <source>Browse transaction history</source> <translation>Gennemse transaktionshistorik</translation> </message> <message> <source>E&amp;xit</source> <translation>&amp;Luk</translation> </message> <message> <source>Quit application</source> <translation>Afslut program</translation> </message> <message> <source>&amp;About %1</source> <translation>&amp;Om %1</translation> </message> <message> <source>Show information about %1</source> <translation>Vis informationer om %1</translation> </message> <message> <source>About &amp;Qt</source> <translation>Om &amp;Qt</translation> </message> <message> <source>Show information about Qt</source> <translation>Vis informationer om Qt</translation> </message> <message> <source>&amp;Options...</source> <translation>&amp;Indstillinger…</translation> </message> <message> <source>Modify configuration options for %1</source> <translation>Redigér konfigurationsindstillinger for %1</translation> </message> <message> <source>&amp;Encrypt Wallet...</source> <translation>&amp;Kryptér tegnebog…</translation> </message> <message> <source>&amp;Backup Wallet...</source> <translation>&amp;Sikkerhedskopiér tegnebog…</translation> </message> <message> <source>&amp;Change Passphrase...</source> <translation>&amp;Skift adgangskode…</translation> </message> <message> <source>Open &amp;URI...</source> <translation>&amp;Åbn URI…</translation> </message> <message> <source>Create Wallet...</source> <translation>Opret tegnebog…</translation> </message> <message> <source>Create a new wallet</source> <translation>Opret en ny tegnebog</translation> </message> <message> <source>Wallet:</source> <translation>Tegnebog:</translation> </message> <message> <source>Click to disable network activity.</source> <translation>Klik for at deaktivere netværksaktivitet.</translation> </message> <message> <source>Network activity disabled.</source> <translation>Netværksaktivitet deaktiveret.</translation> </message> <message> <source>Click to enable network activity again.</source> <translation>Klik for a aktivere netværksaktivitet igen.</translation> </message> <message> <source>Syncing Headers (%1%)...</source> <translation>Synkroniserer hoveder (%1%)…</translation> </message> <message> <source>Reindexing blocks on disk...</source> <translation>Genindekserer blokke på disken…</translation> </message> <message> <source>Proxy is &lt;b&gt;enabled&lt;/b&gt;: %1</source> <translation>Proxy er &lt;b&gt;aktiveret&lt;/b&gt;: %1</translation> </message> <message> <source>Send coins to a Bitcoin address</source> <translation>Send bitcoins til en Bitcoin-adresse</translation> </message> <message> <source>Backup wallet to another location</source> <translation>Lav sikkerhedskopi af tegnebogen til et andet sted</translation> </message> <message> <source>Change the passphrase used for wallet encryption</source> <translation>Skift adgangskode anvendt til tegnebogskryptering</translation> </message> <message> <source>&amp;Verify message...</source> <translation>&amp;Verificér besked…</translation> </message> <message> <source>&amp;Send</source> <translation>&amp;Send</translation> </message> <message> <source>&amp;Receive</source> <translation>&amp;Modtag</translation> </message> <message> <source>&amp;Show / Hide</source> <translation>&amp;Vis / skjul</translation> </message> <message> <source>Show or hide the main Window</source> <translation>Vis eller skjul hovedvinduet</translation> </message> <message> <source>Encrypt the private keys that belong to your wallet</source> <translation>Kryptér de private nøgler, der hører til din tegnebog</translation> </message> <message> <source>Sign messages with your Bitcoin addresses to prove you own them</source> <translation>Signér beskeder med dine Bitcoin-adresser for at bevise, at de tilhører dig</translation> </message> <message> <source>Verify messages to ensure they were signed with specified Bitcoin addresses</source> <translation>Verificér beskeder for at sikre, at de er signeret med de angivne Bitcoin-adresser</translation> </message> <message> <source>&amp;File</source> <translation>&amp;Fil</translation> </message> <message> <source>&amp;Settings</source> <translation>&amp;Opsætning</translation> </message> <message> <source>&amp;Help</source> <translation>&amp;Hjælp</translation> </message> <message> <source>Tabs toolbar</source> <translation>Faneværktøjslinje</translation> </message> <message> <source>Request payments (generates QR codes and bitcoin: URIs)</source> <translation>Anmod om betalinger (genererer QR-koder og “bitcoin:”-URI'er)</translation> </message> <message> <source>Show the list of used sending addresses and labels</source> <translation>Vis listen over brugte afsendelsesadresser og -mærkater</translation> </message> <message> <source>Show the list of used receiving addresses and labels</source> <translation>Vis listen over brugte modtagelsesadresser og -mærkater</translation> </message> <message> <source>&amp;Command-line options</source> <translation>Tilvalg for &amp;kommandolinje</translation> </message> <message numerus="yes"> <source>%n active connection(s) to Bitcoin network</source> <translation><numerusform>%n aktiv forbindelse til Bitcoin-netværket</numerusform><numerusform>%n aktive forbindelser til Bitcoin-netværket</numerusform></translation> </message> <message> <source>Indexing blocks on disk...</source> <translation>Genindekserer blokke på disken…</translation> </message> <message> <source>Processing blocks on disk...</source> <translation>Bearbejder blokke på disken…</translation> </message> <message numerus="yes"> <source>Processed %n block(s) of transaction history.</source> <translation><numerusform>Bearbejdede %n blok med transaktionshistorik.</numerusform><numerusform>Bearbejdede %n blokke med transaktionshistorik.</numerusform></translation> </message> <message> <source>%1 behind</source> <translation>%1 bagud</translation> </message> <message> <source>Last received block was generated %1 ago.</source> <translation>Senest modtagne blok blev genereret for %1 siden.</translation> </message> <message> <source>Transactions after this will not yet be visible.</source> <translation>Transaktioner herefter vil endnu ikke være synlige.</translation> </message> <message> <source>Error</source> <translation>Fejl</translation> </message> <message> <source>Warning</source> <translation>Advarsel</translation> </message> <message> <source>Information</source> <translation>Information</translation> </message> <message> <source>Up to date</source> <translation>Opdateret</translation> </message> <message> <source>&amp;Load PSBT from file...</source> <translation>&amp;Indlæs PSBT fra fil...</translation> </message> <message> <source>Load Partially Signed Bitcoin Transaction</source> <translation>Indlæs Partvist Signeret Bitcoin-Transaktion</translation> </message> <message> <source>Load PSBT from clipboard...</source> <translation>Indlæs PSBT fra udklipsholder...</translation> </message> <message> <source>Load Partially Signed Bitcoin Transaction from clipboard</source> <translation>Indlæs Partvist Signeret Bitcoin-Transaktion fra udklipsholder</translation> </message> <message> <source>Node window</source> <translation>Knudevindue</translation> </message> <message> <source>Open node debugging and diagnostic console</source> <translation>Åbn knudens fejlsøgningskonsol</translation> </message> <message> <source>&amp;Sending addresses</source> <translation>&amp;Afsenderadresser</translation> </message> <message> <source>&amp;Receiving addresses</source> <translation>&amp;Modtageradresser</translation> </message> <message> <source>Open a bitcoin: URI</source> <translation>Åbn en bitcoin:-URI</translation> </message> <message> <source>Open Wallet</source> <translation>Åben Tegnebog</translation> </message> <message> <source>Open a wallet</source> <translation>Åben en tegnebog</translation> </message> <message> <source>Close Wallet...</source> <translation>Luk Tegnebog...</translation> </message> <message> <source>Close wallet</source> <translation>Luk tegnebog</translation> </message> <message> <source>Close All Wallets...</source> <translation>Luk alle tegnebøgerne ...</translation> </message> <message> <source>Close all wallets</source> <translation>Luk alle tegnebøgerne </translation> </message> <message> <source>Show the %1 help message to get a list with possible Bitcoin command-line options</source> <translation>Vis %1 hjælpebesked for at få en liste over mulige tilvalg for Bitcoin kommandolinje</translation> </message> <message> <source>&amp;Mask values</source> <translation>&amp;Maskér værdier</translation> </message> <message> <source>Mask the values in the Overview tab</source> <translation>Maskér værdierne i Oversigt-fanebladet</translation> </message> <message> <source>default wallet</source> <translation>Standard tegnebog</translation> </message> <message> <source>No wallets available</source> <translation>Ingen tegnebøger tilgængelige</translation> </message> <message> <source>&amp;Window</source> <translation>&amp;Vindue</translation> </message> <message> <source>Minimize</source> <translation>Minimér</translation> </message> <message> <source>Zoom</source> <translation>Zoom</translation> </message> <message> <source>Main Window</source> <translation>Hoved Vindue</translation> </message> <message> <source>%1 client</source> <translation>%1-klient</translation> </message> <message> <source>Connecting to peers...</source> <translation>Forbinder til knuder…</translation> </message> <message> <source>Catching up...</source> <translation>Indhenter…</translation> </message> <message> <source>Error: %1</source> <translation>Fejl: %1</translation> </message> <message> <source>Warning: %1</source> <translation>Advarsel: %1</translation> </message> <message> <source>Date: %1 </source> <translation>Dato: %1 </translation> </message> <message> <source>Amount: %1 </source> <translation>Beløb: %1 </translation> </message> <message> <source>Wallet: %1 </source> <translation>Tegnebog: %1 </translation> </message> <message> <source>Type: %1 </source> <translation>Type: %1 </translation> </message> <message> <source>Label: %1 </source> <translation>Mærkat: %1 </translation> </message> <message> <source>Address: %1 </source> <translation>Adresse: %1 </translation> </message> <message> <source>Sent transaction</source> <translation>Afsendt transaktion</translation> </message> <message> <source>Incoming transaction</source> <translation>Indgående transaktion</translation> </message> <message> <source>HD key generation is &lt;b&gt;enabled&lt;/b&gt;</source> <translation>Generering af HD-nøgler er &lt;b&gt;aktiveret&lt;/b&gt;</translation> </message> <message> <source>HD key generation is &lt;b&gt;disabled&lt;/b&gt;</source> <translation>Generering af HD-nøgler er &lt;b&gt;deaktiveret&lt;/b&gt;</translation> </message> <message> <source>Private key &lt;b&gt;disabled&lt;/b&gt;</source> <translation>Private nøgle &lt;b&gt;deaktiveret&lt;/b&gt;</translation> </message> <message> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;unlocked&lt;/b&gt;</source> <translation>Tegnebog er &lt;b&gt;krypteret&lt;/b&gt; og i øjeblikket &lt;b&gt;ulåst&lt;/b&gt;</translation> </message> <message> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;locked&lt;/b&gt;</source> <translation>Tegnebog er &lt;b&gt;krypteret&lt;/b&gt; og i øjeblikket &lt;b&gt;låst&lt;/b&gt;</translation> </message> <message> <source>Original message:</source> <translation>Original besked:</translation> </message> <message> <source>A fatal error occurred. %1 can no longer continue safely and will quit.</source> <translation>Der skete en fatal fejl. %1 kan ikke længere fortsætte sikkert og vil afslutte.</translation> </message> </context> <context> <name>CoinControlDialog</name> <message> <source>Coin Selection</source> <translation>Coin-styring</translation> </message> <message> <source>Quantity:</source> <translation>Mængde:</translation> </message> <message> <source>Bytes:</source> <translation>Byte:</translation> </message> <message> <source>Amount:</source> <translation>Beløb:</translation> </message> <message> <source>Fee:</source> <translation>Gebyr:</translation> </message> <message> <source>Dust:</source> <translation>Støv:</translation> </message> <message> <source>After Fee:</source> <translation>Efter gebyr:</translation> </message> <message> <source>Change:</source> <translation>Byttepenge:</translation> </message> <message> <source>(un)select all</source> <translation>(af)vælg alle</translation> </message> <message> <source>Tree mode</source> <translation>Trætilstand</translation> </message> <message> <source>List mode</source> <translation>Listetilstand</translation> </message> <message> <source>Amount</source> <translation>Beløb</translation> </message> <message> <source>Received with label</source> <translation>Modtaget med mærkat</translation> </message> <message> <source>Received with address</source> <translation>Modtaget med adresse</translation> </message> <message> <source>Date</source> <translation>Dato</translation> </message> <message> <source>Confirmations</source> <translation>Bekræftelser</translation> </message> <message> <source>Confirmed</source> <translation>Bekræftet</translation> </message> <message> <source>Copy address</source> <translation>Kopiér adresse</translation> </message> <message> <source>Copy label</source> <translation>Kopiér mærkat</translation> </message> <message> <source>Copy amount</source> <translation>Kopiér beløb</translation> </message> <message> <source>Copy transaction ID</source> <translation>Kopiér transaktions-ID</translation> </message> <message> <source>Lock unspent</source> <translation>Fastlås ubrugte</translation> </message> <message> <source>Unlock unspent</source> <translation>Lås ubrugte op</translation> </message> <message> <source>Copy quantity</source> <translation>Kopiér mængde</translation> </message> <message> <source>Copy fee</source> <translation>Kopiér gebyr</translation> </message> <message> <source>Copy after fee</source> <translation>Kopiér eftergebyr</translation> </message> <message> <source>Copy bytes</source> <translation>Kopiér byte</translation> </message> <message> <source>Copy dust</source> <translation>Kopiér støv</translation> </message> <message> <source>Copy change</source> <translation>Kopiér byttepenge</translation> </message> <message> <source>(%1 locked)</source> <translation>(%1 fastlåst)</translation> </message> <message> <source>yes</source> <translation>ja</translation> </message> <message> <source>no</source> <translation>nej</translation> </message> <message> <source>This label turns red if any recipient receives an amount smaller than the current dust threshold.</source> <translation>Denne mærkat bliver rød, hvis en eller flere modtagere modtager et beløb, der er mindre end den aktuelle støvgrænse.</translation> </message> <message> <source>Can vary +/- %1 satoshi(s) per input.</source> <translation>Kan variere med ±%1 satoshi per input.</translation> </message> <message> <source>(no label)</source> <translation>(ingen mærkat)</translation> </message> <message> <source>change from %1 (%2)</source> <translation>byttepenge fra %1 (%2)</translation> </message> <message> <source>(change)</source> <translation>(byttepange)</translation> </message> </context> <context> <name>CreateWalletActivity</name> <message> <source>Creating Wallet &lt;b&gt;%1&lt;/b&gt;...</source> <translation>Opretter tegnebog &lt;b&gt;%1&lt;/b&gt;…</translation> </message> <message> <source>Create wallet failed</source> <translation>Oprettelse af tegnebog mislykkedes</translation> </message> <message> <source>Create wallet warning</source> <translation>Advarsel for oprettelse af tegnebog</translation> </message> </context> <context> <name>CreateWalletDialog</name> <message> <source>Create Wallet</source> <translation>Opret tegnebog</translation> </message> <message> <source>Wallet</source> <translation>Tegnebog</translation> </message> <message> <source>Wallet Name</source> <translation>Navn på tegnebog</translation> </message> <message> <source>Encrypt the wallet. The wallet will be encrypted with a passphrase of your choice.</source> <translation>Kryptér tegnebogen. Tegnebogen bliver krypteret med en adgangskode, du vælger.</translation> </message> <message> <source>Encrypt Wallet</source> <translation>Kryptér tegnebog</translation> </message> <message> <source>Advanced Options</source> <translation>Avancerede Indstillinger</translation> </message> <message> <source>Disable private keys for this wallet. Wallets with private keys disabled will have no private keys and cannot have an HD seed or imported private keys. This is ideal for watch-only wallets.</source> <translation>Slå private nøgler fra for denne tegnebog. Tegnebøger med private nøgler slået fra vil ikke have nogen private nøgler og kan ikke have et HD-seed eller importerede private nøgler. Dette er ideelt til kigge-tegnebøger.</translation> </message> <message> <source>Disable Private Keys</source> <translation>Slå private nøgler fra</translation> </message> <message> <source>Make a blank wallet. Blank wallets do not initially have private keys or scripts. Private keys and addresses can be imported, or an HD seed can be set, at a later time.</source> <translation>Lav en flad tegnebog. Flade tegnebøger har indledningsvist ikke private nøgler eller skripter. Private nøgler og adresser kan importeres, eller et HD-seed kan indstilles senere.</translation> </message> <message> <source>Make Blank Wallet</source> <translation>Lav flad tegnebog</translation> </message> <message> <source>Use descriptors for scriptPubKey management</source> <translation>Brug beskrivere til håndtering af scriptPubKey</translation> </message> <message> <source>Descriptor Wallet</source> <translation>Beskriver-Pung</translation> </message> <message> <source>Create</source> <translation>Opret</translation> </message> <message> <source>Compiled without sqlite support (required for descriptor wallets)</source> <translation>Kompileret uden sqlite-understøttelse (krævet til beskriver-punge)</translation> </message> </context> <context> <name>EditAddressDialog</name> <message> <source>Edit Address</source> <translation>Redigér adresse</translation> </message> <message> <source>&amp;Label</source> <translation>&amp;Mærkat</translation> </message> <message> <source>The label associated with this address list entry</source> <translation>Mærkatet, der er associeret med denne indgang i adresselisten</translation> </message> <message> <source>The address associated with this address list entry. This can only be modified for sending addresses.</source> <translation>Adressen, der er associeret med denne indgang i adresselisten. Denne kan kune ændres for afsendelsesadresser.</translation> </message> <message> <source>&amp;Address</source> <translation>&amp;Adresse</translation> </message> <message> <source>New sending address</source> <translation>Ny afsendelsesadresse</translation> </message> <message> <source>Edit receiving address</source> <translation>Redigér modtagelsesadresse</translation> </message> <message> <source>Edit sending address</source> <translation>Redigér afsendelsesadresse</translation> </message> <message> <source>The entered address "%1" is not a valid Bitcoin address.</source> <translation>Den indtastede adresse “%1” er ikke en gyldig Bitcoin-adresse.</translation> </message> <message> <source>Address "%1" already exists as a receiving address with label "%2" and so cannot be added as a sending address.</source> <translation>Adressen "%1" eksisterer allerede som modtagende adresse med mærkat "%2" og kan derfor ikke tilføjes som sende adresse.</translation> </message> <message> <source>The entered address "%1" is already in the address book with label "%2".</source> <translation>Den indtastede adresse "%1" er allerede i adresse bogen med mærkat "%2".</translation> </message> <message> <source>Could not unlock wallet.</source> <translation>Kunne ikke låse tegnebog op.</translation> </message> <message> <source>New key generation failed.</source> <translation>Ny nøglegenerering mislykkedes.</translation> </message> </context> <context> <name>FreespaceChecker</name> <message> <source>A new data directory will be created.</source> <translation>En ny datamappe vil blive oprettet.</translation> </message> <message> <source>name</source> <translation>navn</translation> </message> <message> <source>Directory already exists. Add %1 if you intend to create a new directory here.</source> <translation>Mappe eksisterer allerede. Tilføj %1, hvis du vil oprette en ny mappe her.</translation> </message> <message> <source>Path already exists, and is not a directory.</source> <translation>Sti eksisterer allerede og er ikke en mappe.</translation> </message> <message> <source>Cannot create data directory here.</source> <translation>Kan ikke oprette en mappe her.</translation> </message> </context> <context> <name>HelpMessageDialog</name> <message> <source>version</source> <translation>version</translation> </message> <message> <source>About %1</source> <translation>Om %1</translation> </message> <message> <source>Command-line options</source> <translation>Kommandolinjetilvalg</translation> </message> </context> <context> <name>Intro</name> <message> <source>Welcome</source> <translation>Velkommen</translation> </message> <message> <source>Welcome to %1.</source> <translation>Velkommen til %1.</translation> </message> <message> <source>As this is the first time the program is launched, you can choose where %1 will store its data.</source> <translation>Siden dette er første gang, programmet startes, kan du vælge, hvor %1 skal gemme sin data.</translation> </message> <message> <source>When you click OK, %1 will begin to download and process the full %4 block chain (%2GB) starting with the earliest transactions in %3 when %4 initially launched.</source> <translation>Når du klikker OK, vil %1 begynde at downloade og bearbejde den fulde %4-blokkæde (%2 GB), startende med de tidligste transaktioner i %3, da %4 først startede.</translation> </message> <message> <source>Reverting this setting requires re-downloading the entire blockchain. It is faster to download the full chain first and prune it later. Disables some advanced features.</source> <translation>Ændring af denne indstilling senere kræver gendownload af hele blokkæden. Det er hurtigere at downloade den komplette kæde først og beskære den senere. Slår nogle avancerede funktioner fra.</translation> </message> <message> <source>This initial synchronisation is very demanding, and may expose hardware problems with your computer that had previously gone unnoticed. Each time you run %1, it will continue downloading where it left off.</source> <translation>Denne indledningsvise synkronisering er meget krævende, og den kan potentielt afsløre hardwareproblemer med din computer, som du ellers ikke har lagt mærke til. Hver gang, du kører %1, vil den fortsætte med at downloade, hvor den sidst slap.</translation> </message> <message> <source>If you have chosen to limit block chain storage (pruning), the historical data must still be downloaded and processed, but will be deleted afterward to keep your disk usage low.</source> <translation>Hvis du har valgt at begrænse opbevaringen af blokkæden (beskæring/pruning), vil al historisk data stadig skulle downloades og bearbejdes men vil blive slettet efterfølgende for at holde dit diskforbrug lavt.</translation> </message> <message> <source>Use the default data directory</source> <translation>Brug standardmappen for data</translation> </message> <message> <source>Use a custom data directory:</source> <translation>Brug tilpasset mappe for data:</translation> </message> <message> <source>Bitcoin</source> <translation>Bitcoin</translation> </message> <message> <source>Discard blocks after verification, except most recent %1 GB (prune)</source> <translation>Kassér blokke efter verificering, undtaget de seneste %1 GB (beskær)</translation> </message> <message> <source>At least %1 GB of data will be stored in this directory, and it will grow over time.</source> <translation>Mindst %1 GB data vil blive gemt i denne mappe, og det vil vokse over tid.</translation> </message> <message> <source>Approximately %1 GB of data will be stored in this directory.</source> <translation>Omtrent %1 GB data vil blive gemt i denne mappe.</translation> </message> <message> <source>%1 will download and store a copy of the Bitcoin block chain.</source> <translation>%1 vil downloade og gemme en kopi af Bitcoin-blokkæden.</translation> </message> <message> <source>The wallet will also be stored in this directory.</source> <translation>Tegnebogen vil også blive gemt i denne mappe.</translation> </message> <message> <source>Error: Specified data directory "%1" cannot be created.</source> <translation>Fejl: Angivet datamappe “%1” kan ikke oprettes.</translation> </message> <message> <source>Error</source> <translation>Fejl</translation> </message> <message numerus="yes"> <source>%n GB of free space available</source> <translation><numerusform>%n GB fri plads tilgængelig</numerusform><numerusform>%n GB fri plads tilgængelig</numerusform></translation> </message> <message numerus="yes"> <source>(of %n GB needed)</source> <translation><numerusform>(ud af %n GB nødvendig)</numerusform><numerusform>(ud af %n GB nødvendig)</numerusform></translation> </message> <message numerus="yes"> <source>(%n GB needed for full chain)</source> <translation><numerusform>(%n GB nødvendig for komplet kæde)</numerusform><numerusform>(%n GB nødvendig for komplet kæde)</numerusform></translation> </message> </context> <context> <name>ModalOverlay</name> <message> <source>Form</source> <translation>Formular</translation> </message> <message> <source>Recent transactions may not yet be visible, and therefore your wallet's balance might be incorrect. This information will be correct once your wallet has finished synchronizing with the bitcoin network, as detailed below.</source> <translation>Nylige transaktioner er måske ikke synlige endnu, og derfor kan din tegnebogs saldo være ukorrekt. Denne information vil være korrekt, når din tegnebog er færdig med at synkronisere med bitcoin-netværket, som detaljerne herunder viser.</translation> </message> <message> <source>Attempting to spend bitcoins that are affected by not-yet-displayed transactions will not be accepted by the network.</source> <translation>Forsøg på at bruge bitcoin, som er indeholdt i endnu-ikke-viste transaktioner, accepteres ikke af netværket.</translation> </message> <message> <source>Number of blocks left</source> <translation>Antal blokke tilbage</translation> </message> <message> <source>Unknown...</source> <translation>Ukendt…</translation> </message> <message> <source>Last block time</source> <translation>Tidsstempel for seneste blok</translation> </message> <message> <source>Progress</source> <translation>Fremgang</translation> </message> <message> <source>Progress increase per hour</source> <translation>Øgning af fremgang pr. time</translation> </message> <message> <source>calculating...</source> <translation>beregner…</translation> </message> <message> <source>Estimated time left until synced</source> <translation>Estimeret tid tilbage af synkronisering</translation> </message> <message> <source>Hide</source> <translation>Skjul</translation> </message> <message> <source>Esc</source> <translation>Esc</translation> </message> <message> <source>%1 is currently syncing. It will download headers and blocks from peers and validate them until reaching the tip of the block chain.</source> <translation>%1 synkroniserer lige nu. Hoveder og blokke bliver downloadet og valideret fra andre knuder. Processen fortsætter indtil den seneste blok nås.</translation> </message> <message> <source>Unknown. Syncing Headers (%1, %2%)...</source> <translation>Ukendt. Synkroniserer Hoveder (%1, %2%)...</translation> </message> </context> <context> <name>OpenURIDialog</name> <message> <source>Open bitcoin URI</source> <translation>Åbn bitcoin-URI</translation> </message> <message> <source>URI:</source> <translation>URI:</translation> </message> </context> <context> <name>OpenWalletActivity</name> <message> <source>Open wallet failed</source> <translation>Åbning af tegnebog mislykkedes</translation> </message> <message> <source>Open wallet warning</source> <translation>Advarsel for åbning af tegnebog</translation> </message> <message> <source>default wallet</source> <translation>Standard tegnebog</translation> </message> <message> <source>Opening Wallet &lt;b&gt;%1&lt;/b&gt;...</source> <translation>Åbner Tegnebog &lt;b&gt;%1&lt;/b&gt;...</translation> </message> </context> <context> <name>OptionsDialog</name> <message> <source>Options</source> <translation>Indstillinger</translation> </message> <message> <source>&amp;Main</source> <translation>&amp;Generelt</translation> </message> <message> <source>Automatically start %1 after logging in to the system.</source> <translation>Start %1 automatisk, når der logges ind på systemet.</translation> </message> <message> <source>&amp;Start %1 on system login</source> <translation>&amp;Start %1 ved systemlogin</translation> </message> <message> <source>Size of &amp;database cache</source> <translation>Størrelsen på &amp;databasens cache</translation> </message> <message> <source>Number of script &amp;verification threads</source> <translation>Antallet af script&amp;verificeringstråde</translation> </message> <message> <source>IP address of the proxy (e.g. IPv4: 127.0.0.1 / IPv6: ::1)</source> <translation>IP-adresse for proxyen (fx IPv4: 127.0.0.1 / IPv6: ::1)</translation> </message> <message> <source>Shows if the supplied default SOCKS5 proxy is used to reach peers via this network type.</source> <translation>Viser om den angivne standard-SOCKS5-proxy bruges til at nå knuder via denne netværkstype.</translation> </message> <message> <source>Hide the icon from the system tray.</source> <translation>Skjul ikonet fra statusfeltet.</translation> </message> <message> <source>&amp;Hide tray icon</source> <translation>&amp;Skjul statusikon</translation> </message> <message> <source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Exit in the menu.</source> <translation>Minimér i stedet for at lukke applikationen, når vinduet lukkes. Når denne indstilling er aktiveret, vil applikationen først blive lukket, når Afslut vælges i menuen.</translation> </message> <message> <source>Third party URLs (e.g. a block explorer) that appear in the transactions tab as context menu items. %s in the URL is replaced by transaction hash. Multiple URLs are separated by vertical bar |.</source> <translation>Tredjeparts-URL'er (fx et blokhåndteringsværktøj), der vises i transaktionsfanen som genvejsmenupunkter. %s i URL'en erstattes med transaktionens hash. Flere URL'er separeres med en lodret streg |.</translation> </message> <message> <source>Open the %1 configuration file from the working directory.</source> <translation>Åbn konfigurationsfilen for %1 fra arbejdsmappen.</translation> </message> <message> <source>Open Configuration File</source> <translation>Åbn konfigurationsfil</translation> </message> <message> <source>Reset all client options to default.</source> <translation>Nulstil alle klientindstillinger til deres standard.</translation> </message> <message> <source>&amp;Reset Options</source> <translation>&amp;Nulstil indstillinger</translation> </message> <message> <source>&amp;Network</source> <translation>&amp;Netværk</translation> </message> <message> <source>Disables some advanced features but all blocks will still be fully validated. Reverting this setting requires re-downloading the entire blockchain. Actual disk usage may be somewhat higher.</source> <translation>Deaktiverer nogle avancerede funktioner men alle blokke vil stadig blive fuldt validerede. Ændring af denne indstilling senere kræver download af hele blokkæden igen. Det aktuelle disk forbrug kan være noget højere.</translation> </message> <message> <source>Prune &amp;block storage to</source> <translation>Beskære &amp;blok opbevaring til</translation> </message> <message> <source>GB</source> <translation>GB</translation> </message> <message> <source>Reverting this setting requires re-downloading the entire blockchain.</source> <translation>Ændring af denne indstilling senere kræver download af hele blokkæden igen.</translation> </message> <message> <source>MiB</source> <translation>MiB</translation> </message> <message> <source>(0 = auto, &lt;0 = leave that many cores free)</source> <translation>(0 = auto, &lt;0 = efterlad så mange kerner fri)</translation> </message> <message> <source>W&amp;allet</source> <translation>&amp;Tegnebog</translation> </message> <message> <source>Expert</source> <translation>Ekspert</translation> </message> <message> <source>Enable coin &amp;control features</source> <translation>Aktivér egenskaber for &amp;coin-styring</translation> </message> <message> <source>If you disable the spending of unconfirmed change, the change from a transaction cannot be used until that transaction has at least one confirmation. This also affects how your balance is computed.</source> <translation>Hvis du deaktiverer brug af ubekræftede byttepenge, kan byttepengene fra en transaktion ikke bruges, før pågældende transaktion har mindst én bekræftelse. Dette påvirker også måden hvorpå din saldo beregnes.</translation> </message> <message> <source>&amp;Spend unconfirmed change</source> <translation>&amp;Brug ubekræftede byttepenge</translation> </message> <message> <source>Automatically open the Bitcoin client port on the router. This only works when your router supports UPnP and it is enabled.</source> <translation>Åbn automatisk Bitcoin-klientens port på routeren. Dette virker kun, når din router understøtter UPnP, og UPnP er aktiveret.</translation> </message> <message> <source>Map port using &amp;UPnP</source> <translation>Konfigurér port vha. &amp;UPnP</translation> </message> <message> <source>Accept connections from outside.</source> <translation>Acceptér forbindelser udefra.</translation> </message> <message> <source>Allow incomin&amp;g connections</source> <translation>Tillad &amp;indkommende forbindelser</translation> </message> <message> <source>Connect to the Bitcoin network through a SOCKS5 proxy.</source> <translation>Forbind til Bitcoin-netværket gennem en SOCKS5-proxy.</translation> </message> <message> <source>&amp;Connect through SOCKS5 proxy (default proxy):</source> <translation>&amp;Forbind gennem SOCKS5-proxy (standard-proxy):</translation> </message> <message> <source>Proxy &amp;IP:</source> <translation>Proxy-&amp;IP:</translation> </message> <message> <source>&amp;Port:</source> <translation>&amp;Port:</translation> </message> <message> <source>Port of the proxy (e.g. 9050)</source> <translation>Port for proxyen (fx 9050)</translation> </message> <message> <source>Used for reaching peers via:</source> <translation>Bruges til at nå knuder via:</translation> </message> <message> <source>IPv4</source> <translation>IPv4</translation> </message> <message> <source>IPv6</source> <translation>IPv6</translation> </message> <message> <source>Tor</source> <translation>Tor</translation> </message> <message> <source>&amp;Window</source> <translation>&amp;Vindue</translation> </message> <message> <source>Show only a tray icon after minimizing the window.</source> <translation>Vis kun et statusikon efter minimering af vinduet.</translation> </message> <message> <source>&amp;Minimize to the tray instead of the taskbar</source> <translation>&amp;Minimér til statusfeltet i stedet for proceslinjen</translation> </message> <message> <source>M&amp;inimize on close</source> <translation>M&amp;inimér ved lukning</translation> </message> <message> <source>&amp;Display</source> <translation>&amp;Visning</translation> </message> <message> <source>User Interface &amp;language:</source> <translation>&amp;Sprog for brugergrænseflade:</translation> </message> <message> <source>The user interface language can be set here. This setting will take effect after restarting %1.</source> <translation>Sproget for brugerfladen kan vælges her. Denne indstilling vil træde i kraft efter genstart af %1.</translation> </message> <message> <source>&amp;Unit to show amounts in:</source> <translation>&amp;Enhed, som beløb vises i:</translation> </message> <message> <source>Choose the default subdivision unit to show in the interface and when sending coins.</source> <translation>Vælg standard for underopdeling af enhed, som skal vises i brugergrænsefladen og ved afsendelse af bitcoins.</translation> </message> <message> <source>Whether to show coin control features or not.</source> <translation>Hvorvidt egenskaber for coin-styring skal vises eller ej.</translation> </message> <message> <source>Connect to the Bitcoin network through a separate SOCKS5 proxy for Tor onion services.</source> <translation>Opret forbindelse til Bitcoin-netværk igennem en separat SOCKS5 proxy til Tor-onion-tjenester.</translation> </message> <message> <source>Use separate SOCKS&amp;5 proxy to reach peers via Tor onion services:</source> <translation>Brug separate SOCKS&amp;5 proxy, for at nå fælle via Tor-onion-tjenester:</translation> </message> <message> <source>&amp;Third party transaction URLs</source> <translation>&amp;Tredjeparts-transaktions-URL'er</translation> </message> <message> <source>Options set in this dialog are overridden by the command line or in the configuration file:</source> <translation>Valgmuligheder sat i denne dialog er overskrevet af kommandolinjen eller i konfigurationsfilen:</translation> </message> <message> <source>&amp;OK</source> <translation>&amp;Ok</translation> </message> <message> <source>&amp;Cancel</source> <translation>&amp;Annullér</translation> </message> <message> <source>default</source> <translation>standard</translation> </message> <message> <source>none</source> <translation>ingen</translation> </message> <message> <source>Confirm options reset</source> <translation>Bekræft nulstilling af indstillinger</translation> </message> <message> <source>Client restart required to activate changes.</source> <translation>Genstart af klienten er nødvendig for at aktivere ændringer.</translation> </message> <message> <source>Client will be shut down. Do you want to proceed?</source> <translation>Klienten vil lukke ned. Vil du fortsætte?</translation> </message> <message> <source>Configuration options</source> <translation>Konfigurationsindstillinger</translation> </message> <message> <source>The configuration file is used to specify advanced user options which override GUI settings. Additionally, any command-line options will override this configuration file.</source> <translation>Konfigurationsfilen bruges til at opsætte avancerede brugerindstillinger, som tilsidesætter indstillingerne i den grafiske brugerflade. Derudover vil eventuelle kommandolinjetilvalg tilsidesætte denne konfigurationsfil.</translation> </message> <message> <source>Error</source> <translation>Fejl</translation> </message> <message> <source>The configuration file could not be opened.</source> <translation>Konfigurationsfilen kunne ikke åbnes.</translation> </message> <message> <source>This change would require a client restart.</source> <translation>Denne ændring vil kræve en genstart af klienten.</translation> </message> <message> <source>The supplied proxy address is invalid.</source> <translation>Den angivne proxy-adresse er ugyldig.</translation> </message> </context> <context> <name>OverviewPage</name> <message> <source>Form</source> <translation>Formular</translation> </message> <message> <source>The displayed information may be out of date. Your wallet automatically synchronizes with the Bitcoin network after a connection is established, but this process has not completed yet.</source> <translation>Den viste information kan være forældet. Din tegnebog synkroniserer automatisk med Bitcoin-netværket, når en forbindelse etableres, men denne proces er ikke gennemført endnu.</translation> </message> <message> <source>Watch-only:</source> <translation>Kigge:</translation> </message> <message> <source>Available:</source> <translation>Tilgængelig:</translation> </message> <message> <source>Your current spendable balance</source> <translation>Din nuværende tilgængelige saldo</translation> </message> <message> <source>Pending:</source> <translation>Afventende:</translation> </message> <message> <source>Total of transactions that have yet to be confirmed, and do not yet count toward the spendable balance</source> <translation>Total saldo for transaktioner, som ikke er blevet bekræftet endnu, og som ikke endnu er en del af den tilgængelige saldo</translation> </message> <message> <source>Immature:</source> <translation>Umodne:</translation> </message> <message> <source>Mined balance that has not yet matured</source> <translation>Minet saldo, som endnu ikke er modnet</translation> </message> <message> <source>Balances</source> <translation>Saldi:</translation> </message> <message> <source>Total:</source> <translation>Total:</translation> </message> <message> <source>Your current total balance</source> <translation>Din nuværende totale saldo</translation> </message> <message> <source>Your current balance in watch-only addresses</source> <translation>Din nuværende saldo på kigge-adresser</translation> </message> <message> <source>Spendable:</source> <translation>Spendérbar:</translation> </message> <message> <source>Recent transactions</source> <translation>Nylige transaktioner</translation> </message> <message> <source>Unconfirmed transactions to watch-only addresses</source> <translation>Ubekræftede transaktioner til kigge-adresser</translation> </message> <message> <source>Mined balance in watch-only addresses that has not yet matured</source> <translation>Minet saldo på kigge-adresser, som endnu ikke er modnet</translation> </message> <message> <source>Current total balance in watch-only addresses</source> <translation>Nuværende totalsaldo på kigge-adresser</translation> </message> <message> <source>Privacy mode activated for the Overview tab. To unmask the values, uncheck Settings-&gt;Mask values.</source> <translation>Privatlivstilstand aktiveret for Oversigt-fanebladet. Fjern flueben fra Instillinger-&gt;Maskér værdier, for at afmaskere værdierne.</translation> </message> </context> <context> <name>PSBTOperationsDialog</name> <message> <source>Dialog</source> <translation>Dialog</translation> </message> <message> <source>Sign Tx</source> <translation>Signér Tx</translation> </message> <message> <source>Broadcast Tx</source> <translation>Udsend Tx</translation> </message> <message> <source>Copy to Clipboard</source> <translation>Kopier til udklipsholder</translation> </message> <message> <source>Save...</source> <translation>Gem...</translation> </message> <message> <source>Close</source> <translation>Luk</translation> </message> <message> <source>Failed to load transaction: %1</source> <translation>Kunne ikke indlæse transaktion: %1</translation> </message> <message> <source>Failed to sign transaction: %1</source> <translation>Kunne ikke signere transaktion: %1</translation> </message> <message> <source>Could not sign any more inputs.</source> <translation>Kunne ikke signere flere input.</translation> </message> <message> <source>Signed %1 inputs, but more signatures are still required.</source> <translation>Signerede %1 input, men flere signaturer kræves endnu.</translation> </message> <message> <source>Signed transaction successfully. Transaction is ready to broadcast.</source> <translation>Signering af transaktion lykkedes. Transaktion er klar til udsendelse.</translation> </message> <message> <source>Unknown error processing transaction.</source> <translation>Ukendt fejl i behandling af transaktion.</translation> </message> <message> <source>Transaction broadcast successfully! Transaction ID: %1</source> <translation>Udsendelse af transaktion lykkedes! Transaktions-ID: %1</translation> </message> <message> <source>Transaction broadcast failed: %1</source> <translation>Udsendelse af transaktion mislykkedes: %1</translation> </message> <message> <source>PSBT copied to clipboard.</source> <translation>PSBT kopieret til udklipsholder.</translation> </message> <message> <source>Save Transaction Data</source> <translation>Gem Transaktionsdata</translation> </message> <message> <source>Partially Signed Transaction (Binary) (*.psbt)</source> <translation>Partvist Signeret Transaktion (Binær) (*.psbt)</translation> </message> <message> <source>PSBT saved to disk.</source> <translation>PSBT gemt på disk.</translation> </message> <message> <source> * Sends %1 to %2</source> <translation>* Sender %1 til %2</translation> </message> <message> <source>Unable to calculate transaction fee or total transaction amount.</source> <translation>Kunne ikke beregne transaktionsgebyr eller totalt transaktionsbeløb.</translation> </message> <message> <source>Pays transaction fee: </source> <translation>Betaler transaktionsgebyr</translation> </message> <message> <source>Total Amount</source> <translation>Total Mængde</translation> </message> <message> <source>or</source> <translation>eller</translation> </message> <message> <source>Transaction has %1 unsigned inputs.</source> <translation>Transaktion har %1 usignerede input.</translation> </message> <message> <source>Transaction is missing some information about inputs.</source> <translation>Transaktion mangler noget information om input.</translation> </message> <message> <source>Transaction still needs signature(s).</source> <translation>Transaktion mangler stadig signatur(er).</translation> </message> <message> <source>(But this wallet cannot sign transactions.)</source> <translation>(Men denne pung kan ikke signere transaktioner.)</translation> </message> <message> <source>(But this wallet does not have the right keys.)</source> <translation>(Men denne pung har ikke de rette nøgler.)</translation> </message> <message> <source>Transaction is fully signed and ready for broadcast.</source> <translation>Transaktion er fuldt signeret og klar til udsendelse.</translation> </message> <message> <source>Transaction status is unknown.</source> <translation>Transaktionsstatus er ukendt.</translation> </message> </context> <context> <name>PaymentServer</name> <message> <source>Payment request error</source> <translation>Fejl i betalingsanmodning</translation> </message> <message> <source>Cannot start bitcoin: click-to-pay handler</source> <translation>Kan ikke starte bitcoin: click-to-pay-håndtering</translation> </message> <message> <source>URI handling</source> <translation>URI-håndtering</translation> </message> <message> <source>'bitcoin://' is not a valid URI. Use 'bitcoin:' instead.</source> <translation>'bitcoin://' er ikke et gyldigt URI. Brug 'bitcoin:' istedet.</translation> </message> <message> <source>Cannot process payment request because BIP70 is not supported.</source> <translation>Betalingsanmodninger kan ikke behandles mere, da BIP70 ikke længere er understøttet.</translation> </message> <message> <source>Due to widespread security flaws in BIP70 it's strongly recommended that any merchant instructions to switch wallets be ignored.</source> <translation>På grund af vidtstrakte sikkerhedsfejl i BIP70 anbefales det kraftigt, at enhver instruktion fra handlende om at skifte til en BIP70-tegnebog ignoreres.</translation> </message> <message> <source>If you are receiving this error you should request the merchant provide a BIP21 compatible URI.</source> <translation>Hvis du modtager denne fejl, bør du anmode den handlende om at give dig en BIP21-kompatibel URI.</translation> </message> <message> <source>Invalid payment address %1</source> <translation>Ugyldig betalingsadresse %1</translation> </message> <message> <source>URI cannot be parsed! This can be caused by an invalid Bitcoin address or malformed URI parameters.</source> <translation>URI kan ikke tolkes! Dette kan skyldes en ugyldig Bitcoin-adresse eller forkert udformede URL-parametre.</translation> </message> <message> <source>Payment request file handling</source> <translation>Filhåndtering for betalingsanmodninger</translation> </message> </context> <context> <name>PeerTableModel</name> <message> <source>User Agent</source> <translation>Brugeragent</translation> </message> <message> <source>Node/Service</source> <translation>Knude/tjeneste</translation> </message> <message> <source>NodeId</source> <translation>Knude-id</translation> </message> <message> <source>Ping</source> <translation>Ping</translation> </message> <message> <source>Sent</source> <translation>Sendt</translation> </message> <message> <source>Received</source> <translation>Modtaget</translation> </message> </context> <context> <name>QObject</name> <message> <source>Amount</source> <translation>Beløb</translation> </message> <message> <source>Enter a Bitcoin address (e.g. %1)</source> <translation>Indtast en Bitcoin-adresse (fx %1)</translation> </message> <message> <source>%1 d</source> <translation>%1 d</translation> </message> <message> <source>%1 h</source> <translation>%1 t</translation> </message> <message> <source>%1 m</source> <translation>%1 m</translation> </message> <message> <source>%1 s</source> <translation>%1 s</translation> </message> <message> <source>None</source> <translation>Ingen</translation> </message> <message> <source>N/A</source> <translation>N/A</translation> </message> <message> <source>%1 ms</source> <translation>%1 ms</translation> </message> <message numerus="yes"> <source>%n second(s)</source> <translation><numerusform>%n sekund</numerusform><numerusform>%n sekunder</numerusform></translation> </message> <message numerus="yes"> <source>%n minute(s)</source> <translation><numerusform>%n minut</numerusform><numerusform>%n minutter</numerusform></translation> </message> <message numerus="yes"> <source>%n hour(s)</source> <translation><numerusform>%n time</numerusform><numerusform>%n timer</numerusform></translation> </message> <message numerus="yes"> <source>%n day(s)</source> <translation><numerusform>%n dag</numerusform><numerusform>%n dage</numerusform></translation> </message> <message numerus="yes"> <source>%n week(s)</source> <translation><numerusform>%n uge</numerusform><numerusform>%n uger</numerusform></translation> </message> <message> <source>%1 and %2</source> <translation>%1 og %2</translation> </message> <message numerus="yes"> <source>%n year(s)</source> <translation><numerusform>%n år</numerusform><numerusform>%n år</numerusform></translation> </message> <message> <source>%1 B</source> <translation>%1 B</translation> </message> <message> <source>%1 KB</source> <translation>%1 KB</translation> </message> <message> <source>%1 MB</source> <translation>%1 MB</translation> </message> <message> <source>%1 GB</source> <translation>%1 GB</translation> </message> <message> <source>Error: Specified data directory "%1" does not exist.</source> <translation>Fejl: Angivet datamappe “%1” eksisterer ikke.</translation> </message> <message> <source>Error: Cannot parse configuration file: %1.</source> <translation>Fejl: Kan ikke fortolke konfigurations filen: %1.</translation> </message> <message> <source>Error: %1</source> <translation>Fejl: %1</translation> </message> <message> <source>Error initializing settings: %1</source> <translation>Fejl ved initialisering af indstillinger: %1</translation> </message> <message> <source>%1 didn't yet exit safely...</source> <translation>%1 har endnu ikke afsluttet på sikker vis…</translation> </message> <message> <source>unknown</source> <translation>ukendt</translation> </message> </context> <context> <name>QRImageWidget</name> <message> <source>&amp;Save Image...</source> <translation>Gem billede…</translation> </message> <message> <source>&amp;Copy Image</source> <translation>&amp;Kopiér foto</translation> </message> <message> <source>Resulting URI too long, try to reduce the text for label / message.</source> <translation>Resulterende URI var for lang; prøv at forkorte teksten til mærkaten/beskeden.</translation> </message> <message> <source>Error encoding URI into QR Code.</source> <translation>Fejl ved kodning fra URI til QR-kode.</translation> </message> <message> <source>QR code support not available.</source> <translation>QR-kode understøttelse er ikke tilgængelig.</translation> </message> <message> <source>Save QR Code</source> <translation>Gem QR-kode</translation> </message> <message> <source>PNG Image (*.png)</source> <translation>PNG-billede (*.png)</translation> </message> </context> <context> <name>RPCConsole</name> <message> <source>N/A</source> <translation>N/A</translation> </message> <message> <source>Client version</source> <translation>Klientversion</translation> </message> <message> <source>&amp;Information</source> <translation>&amp;Information</translation> </message> <message> <source>General</source> <translation>Generelt</translation> </message> <message> <source>Using BerkeleyDB version</source> <translation>Bruger BerkeleyDB version</translation> </message> <message> <source>Datadir</source> <translation>Datamappe</translation> </message> <message> <source>To specify a non-default location of the data directory use the '%1' option.</source> <translation>For at angive en alternativ placering af mappen med data, skal du bruge tilvalget ‘%1’.</translation> </message> <message> <source>Blocksdir</source> <translation>Blokmappe</translation> </message> <message> <source>To specify a non-default location of the blocks directory use the '%1' option.</source> <translation>For at angive en alternativ placering af mappen med blokke, skal du bruge tilvalget ‘%1’.</translation> </message> <message> <source>Startup time</source> <translation>Opstartstidspunkt</translation> </message> <message> <source>Network</source> <translation>Netværk</translation> </message> <message> <source>Name</source> <translation>Navn</translation> </message> <message> <source>Number of connections</source> <translation>Antal forbindelser</translation> </message> <message> <source>Block chain</source> <translation>Blokkæde</translation> </message> <message> <source>Memory Pool</source> <translation>Hukommelsespulje</translation> </message> <message> <source>Current number of transactions</source> <translation>Aktuelt antal transaktioner</translation> </message> <message> <source>Memory usage</source> <translation>Hukommelsesforbrug</translation> </message> <message> <source>Wallet: </source> <translation>Tegnebog:</translation> </message> <message> <source>(none)</source> <translation>(ingen)</translation> </message> <message> <source>&amp;Reset</source> <translation>&amp;Nulstil</translation> </message> <message> <source>Received</source> <translation>Modtaget</translation> </message> <message> <source>Sent</source> <translation>Sendt</translation> </message> <message> <source>&amp;Peers</source> <translation>Andre &amp;knuder</translation> </message> <message> <source>Banned peers</source> <translation>Bandlyste knuder</translation> </message> <message> <source>Select a peer to view detailed information.</source> <translation>Vælg en anden knude for at se detaljeret information.</translation> </message> <message> <source>Direction</source> <translation>Retning</translation> </message> <message> <source>Version</source> <translation>Version</translation> </message> <message> <source>Starting Block</source> <translation>Startblok</translation> </message> <message> <source>Synced Headers</source> <translation>Synkroniserede hoveder</translation> </message> <message> <source>Synced Blocks</source> <translation>Synkroniserede blokke</translation> </message> <message> <source>The mapped Autonomous System used for diversifying peer selection.</source> <translation>Afbildning fra Autonome Systemer (et Internet-Protocol-rutefindingsprefiks) til IP-adresser som bruges til at diversificere knudeforbindelser. Den engelske betegnelse er "asmap".</translation> </message> <message> <source>Mapped AS</source> <translation>Autonomt-System-afbildning</translation> </message> <message> <source>User Agent</source> <translation>Brugeragent</translation> </message> <message> <source>Node window</source> <translation>Knudevindue</translation> </message> <message> <source>Current block height</source> <translation>Nuværende blokhøjde</translation> </message> <message> <source>Open the %1 debug log file from the current data directory. This can take a few seconds for large log files.</source> <translation>Åbn %1s fejlsøgningslogfil fra den aktuelle datamappe. Dette kan tage nogle få sekunder for store logfiler.</translation> </message> <message> <source>Decrease font size</source> <translation>Formindsk skrifttypestørrelse</translation> </message> <message> <source>Increase font size</source> <translation>Forstør skrifttypestørrelse</translation> </message> <message> <source>Permissions</source> <translation>Tilladelser</translation> </message> <message> <source>Services</source> <translation>Tjenester</translation> </message> <message> <source>Connection Time</source> <translation>Forbindelsestid</translation> </message> <message> <source>Last Send</source> <translation>Seneste afsendelse</translation> </message> <message> <source>Last Receive</source> <translation>Seneste modtagelse</translation> </message> <message> <source>Ping Time</source> <translation>Ping-tid</translation> </message> <message> <source>The duration of a currently outstanding ping.</source> <translation>Varigheden af den aktuelt igangværende ping.</translation> </message> <message> <source>Ping Wait</source> <translation>Ping-ventetid</translation> </message> <message> <source>Min Ping</source> <translation>Minimum ping</translation> </message> <message> <source>Time Offset</source> <translation>Tidsforskydning</translation> </message> <message> <source>Last block time</source> <translation>Tidsstempel for seneste blok</translation> </message> <message> <source>&amp;Open</source> <translation>&amp;Åbn</translation> </message> <message> <source>&amp;Console</source> <translation>&amp;Konsol</translation> </message> <message> <source>&amp;Network Traffic</source> <translation>&amp;Netværkstrafik</translation> </message> <message> <source>Totals</source> <translation>Totaler</translation> </message> <message> <source>In:</source> <translation>Indkommende:</translation> </message> <message> <source>Out:</source> <translation>Udgående:</translation> </message> <message> <source>Debug log file</source> <translation>Fejlsøgningslogfil</translation> </message> <message> <source>Clear console</source> <translation>Ryd konsol</translation> </message> <message> <source>1 &amp;hour</source> <translation>1 &amp;time</translation> </message> <message> <source>1 &amp;day</source> <translation>1 &amp;dag</translation> </message> <message> <source>1 &amp;week</source> <translation>1 &amp;uge</translation> </message> <message> <source>1 &amp;year</source> <translation>1 &amp;år</translation> </message> <message> <source>&amp;Disconnect</source> <translation>&amp;Afbryd forbindelse</translation> </message> <message> <source>Ban for</source> <translation>Bandlys i</translation> </message> <message> <source>&amp;Unban</source> <translation>&amp;Fjern bandlysning</translation> </message> <message> <source>Welcome to the %1 RPC console.</source> <translation>Velkommen til %1s RPC-konsol.</translation> </message> <message> <source>Use up and down arrows to navigate history, and %1 to clear screen.</source> <translation>Brug op- og nedpilene til at navigere i historikken og %1 til at rydde skærmen.</translation> </message> <message> <source>Type %1 for an overview of available commands.</source> <translation>Tast %1 for en oversigt over de tilgængelige kommandoer.</translation> </message> <message> <source>For more information on using this console type %1.</source> <translation>For mere information om at bruge denne konsol, tast %1.</translation> </message> <message> <source>WARNING: Scammers have been active, telling users to type commands here, stealing their wallet contents. Do not use this console without fully understanding the ramifications of a command.</source> <translation>ADVARSEL: Svindlere har tidligere aktivt bedt brugere om at indtaste kommandoer her for at stjæle indholdet af deres tegnebøger. Brug ikke denne konsol uden fuldt ud at forstå følgerne af en kommando.</translation> </message> <message> <source>Network activity disabled</source> <translation>Netværksaktivitet deaktiveret</translation> </message> <message> <source>Executing command without any wallet</source> <translation>Udfører kommando uden en tegnebog</translation> </message> <message> <source>Executing command using "%1" wallet</source> <translation>Eksekverer kommando ved brug af "%1" tegnebog</translation> </message> <message> <source>(node id: %1)</source> <translation>(knude-id: %1)</translation> </message> <message> <source>via %1</source> <translation>via %1</translation> </message> <message> <source>never</source> <translation>aldrig</translation> </message> <message> <source>Inbound</source> <translation>Indkommende</translation> </message> <message> <source>Outbound</source> <translation>Udgående</translation> </message> <message> <source>Unknown</source> <translation>Ukendt</translation> </message> </context> <context> <name>ReceiveCoinsDialog</name> <message> <source>&amp;Amount:</source> <translation>&amp;Beløb:</translation> </message> <message> <source>&amp;Label:</source> <translation>&amp;Mærkat:</translation> </message> <message> <source>&amp;Message:</source> <translation>&amp;Besked:</translation> </message> <message> <source>An optional message to attach to the payment request, which will be displayed when the request is opened. Note: The message will not be sent with the payment over the Bitcoin network.</source> <translation>En valgfri besked, der føjes til betalingsanmodningen, og som vil vises, når anmodningen åbnes. Bemærk: Beskeden vil ikke sendes sammen med betalingen over Bitcoin-netværket.</translation> </message> <message> <source>An optional label to associate with the new receiving address.</source> <translation>Et valgfrit mærkat, der associeres med den nye modtagelsesadresse.</translation> </message> <message> <source>Use this form to request payments. All fields are &lt;b&gt;optional&lt;/b&gt;.</source> <translation>Brug denne formular for at anmode om betalinger. Alle felter er &lt;b&gt;valgfri&lt;/b&gt;.</translation> </message> <message> <source>An optional amount to request. Leave this empty or zero to not request a specific amount.</source> <translation>Et valgfrit beløb til anmodning. Lad dette felt være tomt eller indeholde nul for at anmode om et ikke-specifikt beløb.</translation> </message> <message> <source>An optional label to associate with the new receiving address (used by you to identify an invoice). It is also attached to the payment request.</source> <translation>Et valgfrit mærkat, der associeres med den nye modtagelsesadresse. Det bruges til at identificere en faktura. Det er også indlejret i betalingsanmodningen.</translation> </message> <message> <source>An optional message that is attached to the payment request and may be displayed to the sender.</source> <translation>En valgfri meddelelse som er indlejret i betalingsanmodningen og som kan blive vist til afsenderen.</translation> </message> <message> <source>&amp;Create new receiving address</source> <translation>&amp;Opret ny modtager adresse</translation> </message> <message> <source>Clear all fields of the form.</source> <translation>Ryd alle felter af formen.</translation> </message> <message> <source>Clear</source> <translation>Ryd</translation> </message> <message> <source>Native segwit addresses (aka Bech32 or BIP-173) reduce your transaction fees later on and offer better protection against typos, but old wallets don't support them. When unchecked, an address compatible with older wallets will be created instead.</source> <translation>Rene segwit-adresser (kendt som Bech32 eller BIP-173) reducerer dine transaktionsgebyrer i det lange løb og giver bedre beskyttelse imod tastefejl, men gamle tegnebøger understøtter dem ikke. Hvis dette ikke vælges, vil i stedet en adresse, der fungerer med ældre tegnebøger, oprettes.</translation> </message> <message> <source>Generate native segwit (Bech32) address</source> <translation>Generér rene segwit-adresser (Bech32)</translation> </message> <message> <source>Requested payments history</source> <translation>Historik over betalingsanmodninger</translation> </message> <message> <source>Show the selected request (does the same as double clicking an entry)</source> <translation>Vis den valgte anmodning (gør det samme som dobbeltklik på en indgang)</translation> </message> <message> <source>Show</source> <translation>Vis</translation> </message> <message> <source>Remove the selected entries from the list</source> <translation>Fjern de valgte indgange fra listen</translation> </message> <message> <source>Remove</source> <translation>Fjern</translation> </message> <message> <source>Copy URI</source> <translation>Kopiér URI</translation> </message> <message> <source>Copy label</source> <translation>Kopiér mærkat</translation> </message> <message> <source>Copy message</source> <translation>Kopiér besked</translation> </message> <message> <source>Copy amount</source> <translation>Kopiér beløb</translation> </message> <message> <source>Could not unlock wallet.</source> <translation>Kunne ikke låse tegnebog op.</translation> </message> <message> <source>Could not generate new %1 address</source> <translation>Kunne ikke generere ny %1 adresse</translation> </message> </context> <context> <name>ReceiveRequestDialog</name> <message> <source>Request payment to ...</source> <translation>Anmod om betaling til</translation> </message> <message> <source>Address:</source> <translation>Adresse</translation> </message> <message> <source>Amount:</source> <translation>Beløb:</translation> </message> <message> <source>Label:</source> <translation>Mærkat:</translation> </message> <message> <source>Message:</source> <translation>Besked:</translation> </message> <message> <source>Wallet:</source> <translation>Tegnebog:</translation> </message> <message> <source>Copy &amp;URI</source> <translation>Kopiér &amp;URI</translation> </message> <message> <source>Copy &amp;Address</source> <translation>Kopiér &amp;adresse</translation> </message> <message> <source>&amp;Save Image...</source> <translation>&amp;Gem billede…</translation> </message> <message> <source>Request payment to %1</source> <translation>Anmod om betaling til %1</translation> </message> <message> <source>Payment information</source> <translation>Betalingsinformation</translation> </message> </context> <context> <name>RecentRequestsTableModel</name> <message> <source>Date</source> <translation>Dato</translation> </message> <message> <source>Label</source> <translation>Mærkat</translation> </message> <message> <source>Message</source> <translation>Besked</translation> </message> <message> <source>(no label)</source> <translation>(ingen mærkat)</translation> </message> <message> <source>(no message)</source> <translation>(ingen besked)</translation> </message> <message> <source>(no amount requested)</source> <translation>(intet anmodet beløb)</translation> </message> <message> <source>Requested</source> <translation>Anmodet</translation> </message> </context> <context> <name>SendCoinsDialog</name> <message> <source>Send Coins</source> <translation>Send bitcoins</translation> </message> <message> <source>Coin Control Features</source> <translation>Egenskaber for coin-styring</translation> </message> <message> <source>Inputs...</source> <translation>Inputs…</translation> </message> <message> <source>automatically selected</source> <translation>valgt automatisk</translation> </message> <message> <source>Insufficient funds!</source> <translation>Utilstrækkelige midler!</translation> </message> <message> <source>Quantity:</source> <translation>Mængde:</translation> </message> <message> <source>Bytes:</source> <translation>Byte:</translation> </message> <message> <source>Amount:</source> <translation>Beløb:</translation> </message> <message> <source>Fee:</source> <translation>Gebyr:</translation> </message> <message> <source>After Fee:</source> <translation>Efter gebyr:</translation> </message> <message> <source>Change:</source> <translation>Byttepenge:</translation> </message> <message> <source>If this is activated, but the change address is empty or invalid, change will be sent to a newly generated address.</source> <translation>Hvis dette aktiveres, men byttepengeadressen er tom eller ugyldig, vil byttepenge blive sendt til en nygenereret adresse.</translation> </message> <message> <source>Custom change address</source> <translation>Tilpasset byttepengeadresse</translation> </message> <message> <source>Transaction Fee:</source> <translation>Transaktionsgebyr:</translation> </message> <message> <source>Choose...</source> <translation>Vælg…</translation> </message> <message> <source>Using the fallbackfee can result in sending a transaction that will take several hours or days (or never) to confirm. Consider choosing your fee manually or wait until you have validated the complete chain.</source> <translation>Brug af tilbagefaldsgebyret kan resultere i en transaktion, der tager adskillige timer eller dage (eller aldrig) at bekræfte. Overvej at vælge dit gebyr manuelt eller at vente indtil du har valideret hele kæden.</translation> </message> <message> <source>Warning: Fee estimation is currently not possible.</source> <translation>Advarsel: Gebyrestimering er ikke muligt i øjeblikket.</translation> </message> <message> <source>Specify a custom fee per kB (1,000 bytes) of the transaction's virtual size. Note: Since the fee is calculated on a per-byte basis, a fee of "100 satoshis per kB" for a transaction size of 500 bytes (half of 1 kB) would ultimately yield a fee of only 50 satoshis.</source> <translation>Specificer et brugerdefineret gebyr per kB (1.000 bytes) af transaktionens virtuelle størrelse. Note: Siden gebyret er kalkuleret på en per-byte basis, et gebyr på "100 satoshis per kB" for en transkationsstørrelse på 500 bytes (halvdelen af 1kB) ville ultimativt udbytte et gebyr på kun 50 satoshis.</translation> </message> <message> <source>per kilobyte</source> <translation>pr. kilobyte</translation> </message> <message> <source>Hide</source> <translation>Skjul</translation> </message> <message> <source>Recommended:</source> <translation>Anbefalet:</translation> </message> <message> <source>Custom:</source> <translation>Brugertilpasset:</translation> </message> <message> <source>(Smart fee not initialized yet. This usually takes a few blocks...)</source> <translation>(Smart-gebyr er ikke initialiseret endnu. Dette tager typisk nogle få blokke…)</translation> </message> <message> <source>Send to multiple recipients at once</source> <translation>Send til flere modtagere på en gang</translation> </message> <message> <source>Add &amp;Recipient</source> <translation>Tilføj &amp;modtager</translation> </message> <message> <source>Clear all fields of the form.</source> <translation>Ryd alle felter af formen.</translation> </message> <message> <source>Dust:</source> <translation>Støv:</translation> </message> <message> <source>Hide transaction fee settings</source> <translation>Skjul indstillinger for transaktionsgebyr</translation> </message> <message> <source>When there is less transaction volume than space in the blocks, miners as well as relaying nodes may enforce a minimum fee. Paying only this minimum fee is just fine, but be aware that this can result in a never confirming transaction once there is more demand for bitcoin transactions than the network can process.</source> <translation>På tidspunkter, hvor der er færre transaktioner, end der er plads til i nye blokke, kan minere og videresendende knuder gennemtvinge et minimumsgebyr. Du kan vælge kun at betale dette minimumsgebyr, men vær opmærksom på, at det kan resultere i en transaktion, der aldrig bliver bekræftet, hvis mængden af nye bitcoin-transaktioner stiger til mere, end hvad netværket kan behandle ad gangen.</translation> </message> <message> <source>A too low fee might result in a never confirming transaction (read the tooltip)</source> <translation>Et for lavt gebyr kan resultere i en transaktion, der aldrig bekræftes (læs værktøjstippet)</translation> </message> <message> <source>Confirmation time target:</source> <translation>Mål for bekræftelsestid:</translation> </message> <message> <source>Enable Replace-By-Fee</source> <translation>Aktivér erstat-med-gebyr (RBF)</translation> </message> <message> <source>With Replace-By-Fee (BIP-125) you can increase a transaction's fee after it is sent. Without this, a higher fee may be recommended to compensate for increased transaction delay risk.</source> <translation>Med erstat-med-gebyr (Replace-By-Fee, BIP-125) kan du øge en transaktions gebyr, efter den er sendt. Uden dette kan et højere gebyr anbefales for at kompensere for øget risiko for at transaktionen bliver forsinket.</translation> </message> <message> <source>Clear &amp;All</source> <translation>Ryd &amp;alle</translation> </message> <message> <source>Balance:</source> <translation>Saldo:</translation> </message> <message> <source>Confirm the send action</source> <translation>Bekræft afsendelsen</translation> </message> <message> <source>S&amp;end</source> <translation>&amp;Afsend</translation> </message> <message> <source>Copy quantity</source> <translation>Kopiér mængde</translation> </message> <message> <source>Copy amount</source> <translation>Kopiér beløb</translation> </message> <message> <source>Copy fee</source> <translation>Kopiér gebyr</translation> </message> <message> <source>Copy after fee</source> <translation>Kopiér eftergebyr</translation> </message> <message> <source>Copy bytes</source> <translation>Kopiér byte</translation> </message> <message> <source>Copy dust</source> <translation>Kopiér støv</translation> </message> <message> <source>Copy change</source> <translation>Kopiér byttepenge</translation> </message> <message> <source>%1 (%2 blocks)</source> <translation>%1 (%2 blokke)</translation> </message> <message> <source>Cr&amp;eate Unsigned</source> <translation>L&amp;av usigneret</translation> </message> <message> <source> from wallet '%1'</source> <translation>fra tegnebog '%1'</translation> </message> <message> <source>%1 to '%2'</source> <translation>%1 til '%2'</translation> </message> <message> <source>%1 to %2</source> <translation>%1 til %2</translation> </message> <message> <source>Do you want to draft this transaction?</source> <translation>Vil du lave et udkast til denne transaktion?</translation> </message> <message> <source>Are you sure you want to send?</source> <translation>Er du sikker på, at du vil sende?</translation> </message> <message> <source>Create Unsigned</source> <translation>Opret Usigneret</translation> </message> <message> <source>Save Transaction Data</source> <translation>Gem Transaktionsdata</translation> </message> <message> <source>Partially Signed Transaction (Binary) (*.psbt)</source> <translation>Partvist Signeret Transaktion (Binær) (*.psbt)</translation> </message> <message> <source>PSBT saved</source> <translation>PSBT gemt</translation> </message> <message> <source>or</source> <translation>eller</translation> </message><|fim▁hole|> <translation>Du kan øge gebyret senere (signalerer erstat-med-gebyr, BIP-125).</translation> </message> <message> <source>Please, review your transaction proposal. This will produce a Partially Signed Bitcoin Transaction (PSBT) which you can save or copy and then sign with e.g. an offline %1 wallet, or a PSBT-compatible hardware wallet.</source> <translation>Gennemse venligst dit transaktionsforslag. Dette vil producere en Partvist Signeret Bitcoin Transaktion (PSBT), som du kan gemme eller kopiere, og så signere med f.eks. en offline %1 pung, eller en PSBT-kompatibel maskinelpung.</translation> </message> <message> <source>Please, review your transaction.</source> <translation>Venligst, vurder din transaktion.</translation> </message> <message> <source>Transaction fee</source> <translation>Transaktionsgebyr</translation> </message> <message> <source>Not signalling Replace-By-Fee, BIP-125.</source> <translation>Signalerer ikke erstat-med-gebyr, BIP-125.</translation> </message> <message> <source>Total Amount</source> <translation>Total Mængde</translation> </message> <message> <source>To review recipient list click "Show Details..."</source> <translation>For at vurdere modtager listen tryk "Vis Detaljer..."</translation> </message> <message> <source>Confirm send coins</source> <translation>Bekræft afsendelse af bitcoins</translation> </message> <message> <source>Confirm transaction proposal</source> <translation>Bekræft transaktionsudkast</translation> </message> <message> <source>Send</source> <translation>Afsend</translation> </message> <message> <source>Watch-only balance:</source> <translation>Kiggebalance:</translation> </message> <message> <source>The recipient address is not valid. Please recheck.</source> <translation>Modtageradressen er ikke gyldig. Tjek venligst igen.</translation> </message> <message> <source>The amount to pay must be larger than 0.</source> <translation>Beløbet til betaling skal være større end 0.</translation> </message> <message> <source>The amount exceeds your balance.</source> <translation>Beløbet overstiger din saldo.</translation> </message> <message> <source>The total exceeds your balance when the %1 transaction fee is included.</source> <translation>Totalen overstiger din saldo, når transaktionsgebyret på %1 er inkluderet.</translation> </message> <message> <source>Duplicate address found: addresses should only be used once each.</source> <translation>Adressegenganger fundet. Adresser bør kun bruges én gang hver.</translation> </message> <message> <source>Transaction creation failed!</source> <translation>Oprettelse af transaktion mislykkedes!</translation> </message> <message> <source>A fee higher than %1 is considered an absurdly high fee.</source> <translation>Et gebyr højere end %1 opfattes som et absurd højt gebyr.</translation> </message> <message> <source>Payment request expired.</source> <translation>Betalingsanmodning er udløbet.</translation> </message> <message numerus="yes"> <source>Estimated to begin confirmation within %n block(s).</source> <translation><numerusform>Bekræftelse estimeret til at begynde om %n blok.</numerusform><numerusform>Bekræftelse estimeret til at begynde om %n blokke.</numerusform></translation> </message> <message> <source>Warning: Invalid Bitcoin address</source> <translation>Advarsel: Ugyldig Bitcoin-adresse</translation> </message> <message> <source>Warning: Unknown change address</source> <translation>Advarsel: Ukendt byttepengeadresse</translation> </message> <message> <source>Confirm custom change address</source> <translation>Bekræft tilpasset byttepengeadresse</translation> </message> <message> <source>The address you selected for change is not part of this wallet. Any or all funds in your wallet may be sent to this address. Are you sure?</source> <translation>Den adresse, du har valgt til byttepenge, er ikke en del af denne tegnebog. Nogle af eller alle penge i din tegnebog kan blive sendt til denne adresse. Er du sikker?</translation> </message> <message> <source>(no label)</source> <translation>(ingen mærkat)</translation> </message> </context> <context> <name>SendCoinsEntry</name> <message> <source>A&amp;mount:</source> <translation>&amp;Beløb:</translation> </message> <message> <source>Pay &amp;To:</source> <translation>Betal &amp;til:</translation> </message> <message> <source>&amp;Label:</source> <translation>&amp;Mærkat:</translation> </message> <message> <source>Choose previously used address</source> <translation>Vælg tidligere brugt adresse</translation> </message> <message> <source>The Bitcoin address to send the payment to</source> <translation>Bitcoin-adresse, som betalingen skal sendes til</translation> </message> <message> <source>Alt+A</source> <translation>Alt+A</translation> </message> <message> <source>Paste address from clipboard</source> <translation>Indsæt adresse fra udklipsholderen</translation> </message> <message> <source>Alt+P</source> <translation>Alt+P</translation> </message> <message> <source>Remove this entry</source> <translation>Fjern denne indgang</translation> </message> <message> <source>The amount to send in the selected unit</source> <translation>Beløbet der skal afsendes i den valgte enhed</translation> </message> <message> <source>The fee will be deducted from the amount being sent. The recipient will receive less bitcoins than you enter in the amount field. If multiple recipients are selected, the fee is split equally.</source> <translation>Gebyret vil blive trukket fra det sendte beløb. Modtageren vil modtage færre bitcoin, end du indtaster i beløbfeltet. Hvis flere modtagere vælges, vil gebyret deles ligeligt.</translation> </message> <message> <source>S&amp;ubtract fee from amount</source> <translation>&amp;Træk gebyr fra beløb</translation> </message> <message> <source>Use available balance</source> <translation>Brug tilgængelig saldo</translation> </message> <message> <source>Message:</source> <translation>Besked:</translation> </message> <message> <source>This is an unauthenticated payment request.</source> <translation>Dette er en uautentificeret betalingsanmodning.</translation> </message> <message> <source>This is an authenticated payment request.</source> <translation>Dette er en autentificeret betalingsanmodning.</translation> </message> <message> <source>Enter a label for this address to add it to the list of used addresses</source> <translation>Indtast et mærkat for denne adresse for at føje den til listen over brugte adresser</translation> </message> <message> <source>A message that was attached to the bitcoin: URI which will be stored with the transaction for your reference. Note: This message will not be sent over the Bitcoin network.</source> <translation>En besked, som blev føjet til “bitcoin:”-URI'en, som vil gemmes med transaktionen til din reference. Bemærk: Denne besked vil ikke blive sendt over Bitcoin-netværket.</translation> </message> <message> <source>Pay To:</source> <translation>Betal til:</translation> </message> <message> <source>Memo:</source> <translation>Memo:</translation> </message> </context> <context> <name>ShutdownWindow</name> <message> <source>%1 is shutting down...</source> <translation>%1 lukker ned…</translation> </message> <message> <source>Do not shut down the computer until this window disappears.</source> <translation>Luk ikke computeren ned, før dette vindue forsvinder.</translation> </message> </context> <context> <name>SignVerifyMessageDialog</name> <message> <source>Signatures - Sign / Verify a Message</source> <translation>Signaturer – Underskriv/verificér en besked</translation> </message> <message> <source>&amp;Sign Message</source> <translation>&amp;Singér besked</translation> </message> <message> <source>You can sign messages/agreements with your addresses to prove you can receive bitcoins sent to them. Be careful not to sign anything vague or random, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source> <translation>Du kan signere beskeder/aftaler med dine adresser for at bevise, at du kan modtage bitcoin, der bliver sendt til adresserne. Vær forsigtig med ikke at signere noget vagt eller tilfældigt, da eventuelle phishing-angreb kan snyde dig til at overlade din identitet til dem. Signér kun fuldt ud detaljerede udsagn, som du er enig i.</translation> </message> <message> <source>The Bitcoin address to sign the message with</source> <translation>Bitcoin-adresse, som beskeden skal signeres med</translation> </message> <message> <source>Choose previously used address</source> <translation>Vælg tidligere brugt adresse</translation> </message> <message> <source>Alt+A</source> <translation>Alt+A</translation> </message> <message> <source>Paste address from clipboard</source> <translation>Indsæt adresse fra udklipsholderen</translation> </message> <message> <source>Alt+P</source> <translation>Alt+P</translation> </message> <message> <source>Enter the message you want to sign here</source> <translation>Indtast her beskeden, du ønsker at signere</translation> </message> <message> <source>Signature</source> <translation>Signatur</translation> </message> <message> <source>Copy the current signature to the system clipboard</source> <translation>Kopiér den nuværende signatur til systemets udklipsholder</translation> </message> <message> <source>Sign the message to prove you own this Bitcoin address</source> <translation>Signér denne besked for at bevise, at Bitcoin-adressen tilhører dig</translation> </message> <message> <source>Sign &amp;Message</source> <translation>Signér &amp;besked</translation> </message> <message> <source>Reset all sign message fields</source> <translation>Nulstil alle “signér besked”-felter</translation> </message> <message> <source>Clear &amp;All</source> <translation>Ryd &amp;alle</translation> </message> <message> <source>&amp;Verify Message</source> <translation>&amp;Verificér besked</translation> </message> <message> <source>Enter the receiver's address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack. Note that this only proves the signing party receives with the address, it cannot prove sendership of any transaction!</source> <translation>Indtast modtagerens adresse, besked (vær sikker på at kopiere linjeskift, mellemrum, tabuleringer, etc. præcist) og signatur herunder for at verificere beskeden. Vær forsigtig med ikke at læse noget ud fra signaturen, som ikke står i selve beskeden, for at undgå at blive snydt af et eventuelt man-in-the-middle-angreb. Bemærk, at dette kun beviser, at den signerende person kan modtage med adressen; det kan ikke bevise hvem der har sendt en given transaktion!</translation> </message> <message> <source>The Bitcoin address the message was signed with</source> <translation>Bitcoin-adressen, som beskeden blev signeret med</translation> </message> <message> <source>The signed message to verify</source> <translation>Den signerede meddelelse som skal verificeres</translation> </message> <message> <source>The signature given when the message was signed</source> <translation>Signaturen som blev givet da meddelelsen blev signeret</translation> </message> <message> <source>Verify the message to ensure it was signed with the specified Bitcoin address</source> <translation>Verificér beskeden for at sikre, at den er signeret med den angivne Bitcoin-adresse</translation> </message> <message> <source>Verify &amp;Message</source> <translation>Verificér &amp;besked</translation> </message> <message> <source>Reset all verify message fields</source> <translation>Nulstil alle “verificér besked”-felter</translation> </message> <message> <source>Click "Sign Message" to generate signature</source> <translation>Klik “Signér besked” for at generere underskriften</translation> </message> <message> <source>The entered address is invalid.</source> <translation>Den indtastede adresse er ugyldig.</translation> </message> <message> <source>Please check the address and try again.</source> <translation>Tjek venligst adressen og forsøg igen.</translation> </message> <message> <source>The entered address does not refer to a key.</source> <translation>Den indtastede adresse henviser ikke til en nøgle.</translation> </message> <message> <source>Wallet unlock was cancelled.</source> <translation>Tegnebogsoplåsning annulleret.</translation> </message> <message> <source>No error</source> <translation>Ingen fejl</translation> </message> <message> <source>Private key for the entered address is not available.</source> <translation>Den private nøgle for den indtastede adresse er ikke tilgængelig.</translation> </message> <message> <source>Message signing failed.</source> <translation>Signering af besked mislykkedes.</translation> </message> <message> <source>Message signed.</source> <translation>Besked signeret.</translation> </message> <message> <source>The signature could not be decoded.</source> <translation>Signaturen kunne ikke afkodes.</translation> </message> <message> <source>Please check the signature and try again.</source> <translation>Tjek venligst signaturen og forsøg igen.</translation> </message> <message> <source>The signature did not match the message digest.</source> <translation>Signaturen passer ikke overens med beskedens indhold.</translation> </message> <message> <source>Message verification failed.</source> <translation>Verificering af besked mislykkedes.</translation> </message> <message> <source>Message verified.</source> <translation>Besked verificeret.</translation> </message> </context> <context> <name>TrafficGraphWidget</name> <message> <source>KB/s</source> <translation>KB/s</translation> </message> </context> <context> <name>TransactionDesc</name> <message numerus="yes"> <source>Open for %n more block(s)</source> <translation><numerusform>Åben i %n yderligere blok</numerusform><numerusform>Åben i %n yderligere blokke</numerusform></translation> </message> <message> <source>Open until %1</source> <translation>Åben indtil %1</translation> </message> <message> <source>conflicted with a transaction with %1 confirmations</source> <translation>i konflikt med en transaktion, der har %1 bekræftelser</translation> </message> <message> <source>0/unconfirmed, %1</source> <translation>0/ubekræftet, %1</translation> </message> <message> <source>in memory pool</source> <translation>i hukommelsespulje</translation> </message> <message> <source>not in memory pool</source> <translation>ikke i hukommelsespulje</translation> </message> <message> <source>abandoned</source> <translation>opgivet</translation> </message> <message> <source>%1/unconfirmed</source> <translation>%1/ubekræftet</translation> </message> <message> <source>%1 confirmations</source> <translation>%1 bekræftelser</translation> </message> <message> <source>Status</source> <translation>Status</translation> </message> <message> <source>Date</source> <translation>Dato</translation> </message> <message> <source>Source</source> <translation>Kilde</translation> </message> <message> <source>Generated</source> <translation>Genereret</translation> </message> <message> <source>From</source> <translation>Fra</translation> </message> <message> <source>unknown</source> <translation>ukendt</translation> </message> <message> <source>To</source> <translation>Til</translation> </message> <message> <source>own address</source> <translation>egen adresse</translation> </message> <message> <source>watch-only</source> <translation>kigge</translation> </message> <message> <source>label</source> <translation>mærkat</translation> </message> <message> <source>Credit</source> <translation>Kredit</translation> </message> <message numerus="yes"> <source>matures in %n more block(s)</source> <translation><numerusform>modner om %n blok</numerusform><numerusform>modner om %n blokke</numerusform></translation> </message> <message> <source>not accepted</source> <translation>ikke accepteret</translation> </message> <message> <source>Debit</source> <translation>Debet</translation> </message> <message> <source>Total debit</source> <translation>Total debet</translation> </message> <message> <source>Total credit</source> <translation>Total kredit</translation> </message> <message> <source>Transaction fee</source> <translation>Transaktionsgebyr</translation> </message> <message> <source>Net amount</source> <translation>Nettobeløb</translation> </message> <message> <source>Message</source> <translation>Besked</translation> </message> <message> <source>Comment</source> <translation>Kommentar</translation> </message> <message> <source>Transaction ID</source> <translation>Transaktions-ID</translation> </message> <message> <source>Transaction total size</source> <translation>Totalstørrelse af transaktion</translation> </message> <message> <source>Transaction virtual size</source> <translation>Transaktion virtuel størrelse</translation> </message> <message> <source>Output index</source> <translation>Outputindeks</translation> </message> <message> <source> (Certificate was not verified)</source> <translation>(certifikat er ikke verificeret)</translation> </message> <message> <source>Merchant</source> <translation>Forretningsdrivende</translation> </message> <message> <source>Generated coins must mature %1 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to "not accepted" and it won't be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source> <translation>Minede bitcoins skal modne %1 blokke, før de kan bruges. Da du genererede denne blok, blev den transmitteret til netværket for at blive føjet til blokkæden. Hvis det ikke lykkes at få den i kæden, vil dens tilstand ændres til “ikke accepteret”, og den vil ikke kunne bruges. Dette kan ske nu og da, hvis en anden knude udvinder en blok inden for nogle få sekunder fra din.</translation> </message> <message> <source>Debug information</source> <translation>Fejlsøgningsinformation</translation> </message> <message> <source>Transaction</source> <translation>Transaktion</translation> </message> <message> <source>Inputs</source> <translation>Input</translation> </message> <message> <source>Amount</source> <translation>Beløb</translation> </message> <message> <source>true</source> <translation>sand</translation> </message> <message> <source>false</source> <translation>falsk</translation> </message> </context> <context> <name>TransactionDescDialog</name> <message> <source>This pane shows a detailed description of the transaction</source> <translation>Denne rude viser en detaljeret beskrivelse af transaktionen</translation> </message> <message> <source>Details for %1</source> <translation>Detaljer for %1</translation> </message> </context> <context> <name>TransactionTableModel</name> <message> <source>Date</source> <translation>Dato</translation> </message> <message> <source>Type</source> <translation>Type</translation> </message> <message> <source>Label</source> <translation>Mærkat</translation> </message> <message numerus="yes"> <source>Open for %n more block(s)</source> <translation><numerusform>Åben i %n yderligere blok</numerusform><numerusform>Åben i %n yderligere blokke</numerusform></translation> </message> <message> <source>Open until %1</source> <translation>Åben indtil %1</translation> </message> <message> <source>Unconfirmed</source> <translation>Ubekræftet</translation> </message> <message> <source>Abandoned</source> <translation>Opgivet</translation> </message> <message> <source>Confirming (%1 of %2 recommended confirmations)</source> <translation>Bekræfter (%1 af %2 anbefalede bekræftelser)</translation> </message> <message> <source>Confirmed (%1 confirmations)</source> <translation>Bekræftet (%1 bekræftelser)</translation> </message> <message> <source>Conflicted</source> <translation>Konflikt</translation> </message> <message> <source>Immature (%1 confirmations, will be available after %2)</source> <translation>Umoden (%1 bekræftelser; vil være tilgængelig efter %2)</translation> </message> <message> <source>Generated but not accepted</source> <translation>Genereret, men ikke accepteret</translation> </message> <message> <source>Received with</source> <translation>Modtaget med</translation> </message> <message> <source>Received from</source> <translation>Modtaget fra</translation> </message> <message> <source>Sent to</source> <translation>Sendt til</translation> </message> <message> <source>Payment to yourself</source> <translation>Betaling til dig selv</translation> </message> <message> <source>Mined</source> <translation>Minet</translation> </message> <message> <source>watch-only</source> <translation>kigge</translation> </message> <message> <source>(n/a)</source> <translation>(n/a)</translation> </message> <message> <source>(no label)</source> <translation>(ingen mærkat)</translation> </message> <message> <source>Transaction status. Hover over this field to show number of confirmations.</source> <translation>Transaktionsstatus. Hold musen over dette felt for at vise antallet af bekræftelser.</translation> </message> <message> <source>Date and time that the transaction was received.</source> <translation>Dato og klokkeslæt for modtagelse af transaktionen.</translation> </message> <message> <source>Type of transaction.</source> <translation>Transaktionstype.</translation> </message> <message> <source>Whether or not a watch-only address is involved in this transaction.</source> <translation>Afgør hvorvidt en kigge-adresse er involveret i denne transaktion.</translation> </message> <message> <source>User-defined intent/purpose of the transaction.</source> <translation>Brugerdefineret hensigt/formål med transaktionen.</translation> </message> <message> <source>Amount removed from or added to balance.</source> <translation>Beløb trukket fra eller tilføjet balance.</translation> </message> </context> <context> <name>TransactionView</name> <message> <source>All</source> <translation>Alle</translation> </message> <message> <source>Today</source> <translation>I dag</translation> </message> <message> <source>This week</source> <translation>Denne uge</translation> </message> <message> <source>This month</source> <translation>Denne måned</translation> </message> <message> <source>Last month</source> <translation>Sidste måned</translation> </message> <message> <source>This year</source> <translation>I år</translation> </message> <message> <source>Range...</source> <translation>Interval…</translation> </message> <message> <source>Received with</source> <translation>Modtaget med</translation> </message> <message> <source>Sent to</source> <translation>Sendt til</translation> </message> <message> <source>To yourself</source> <translation>Til dig selv</translation> </message> <message> <source>Mined</source> <translation>Minet</translation> </message> <message> <source>Other</source> <translation>Andet</translation> </message> <message> <source>Enter address, transaction id, or label to search</source> <translation>Indtast adresse, transaktions-ID eller mærkat for at søge</translation> </message> <message> <source>Min amount</source> <translation>Minimumsbeløb</translation> </message> <message> <source>Abandon transaction</source> <translation>Opgiv transaktion</translation> </message> <message> <source>Increase transaction fee</source> <translation>Forøg transaktionsgebyr</translation> </message> <message> <source>Copy address</source> <translation>Kopiér adresse</translation> </message> <message> <source>Copy label</source> <translation>Kopiér mærkat</translation> </message> <message> <source>Copy amount</source> <translation>Kopiér beløb</translation> </message> <message> <source>Copy transaction ID</source> <translation>Kopiér transaktions-ID</translation> </message> <message> <source>Copy raw transaction</source> <translation>Kopiér rå transaktion</translation> </message> <message> <source>Copy full transaction details</source> <translation>Kopiér komplette transaktionsdetaljer</translation> </message> <message> <source>Edit label</source> <translation>Redigér mærkat</translation> </message> <message> <source>Show transaction details</source> <translation>Vis transaktionsdetaljer</translation> </message> <message> <source>Export Transaction History</source> <translation>Eksportér transaktionshistorik</translation> </message> <message> <source>Comma separated file (*.csv)</source> <translation>Kommasepareret fil (*.csv)</translation> </message> <message> <source>Confirmed</source> <translation>Bekræftet</translation> </message> <message> <source>Watch-only</source> <translation>Kigge</translation> </message> <message> <source>Date</source> <translation>Dato</translation> </message> <message> <source>Type</source> <translation>Type</translation> </message> <message> <source>Label</source> <translation>Mærkat</translation> </message> <message> <source>Address</source> <translation>Adresse</translation> </message> <message> <source>ID</source> <translation>ID</translation> </message> <message> <source>Exporting Failed</source> <translation>Eksport mislykkedes</translation> </message> <message> <source>There was an error trying to save the transaction history to %1.</source> <translation>En fejl opstod under gemning af transaktionshistorik til %1.</translation> </message> <message> <source>Exporting Successful</source> <translation>Eksport problemfri</translation> </message> <message> <source>The transaction history was successfully saved to %1.</source> <translation>Transaktionshistorikken blev gemt til %1.</translation> </message> <message> <source>Range:</source> <translation>Interval:</translation> </message> <message> <source>to</source> <translation>til</translation> </message> </context> <context> <name>UnitDisplayStatusBarControl</name> <message> <source>Unit to show amounts in. Click to select another unit.</source> <translation>Enhed, som beløb vises i. Klik for at vælge en anden enhed.</translation> </message> </context> <context> <name>WalletController</name> <message> <source>Close wallet</source> <translation>Luk tegnebog</translation> </message> <message> <source>Are you sure you wish to close the wallet &lt;i&gt;%1&lt;/i&gt;?</source> <translation>Er du sikker på, at du ønsker at lukke tegnebog &lt;i&gt;%1&lt;/i&gt;?</translation> </message> <message> <source>Closing the wallet for too long can result in having to resync the entire chain if pruning is enabled.</source> <translation>Lukning af tegnebog i for lang tid kan resultere i at synkronisere hele kæden forfra, hvis beskæring er aktiveret.</translation> </message> <message> <source>Close all wallets</source> <translation>Luk alle tegnebøgerne </translation> </message> <message> <source>Are you sure you wish to close all wallets?</source> <translation>Er du sikker på du vil lukke alle tegnebøgerne?</translation> </message> </context> <context> <name>WalletFrame</name> <message> <source>No wallet has been loaded. Go to File &gt; Open Wallet to load a wallet. - OR -</source> <translation>Ingen pung er blevet indlæst. Gå til Fil &gt; Åbn Pung for, at indlæse en pung. - ELLER -</translation> </message> <message> <source>Create a new wallet</source> <translation>Opret en ny tegnebog</translation> </message> </context> <context> <name>WalletModel</name> <message> <source>Send Coins</source> <translation>Send bitcoins</translation> </message> <message> <source>Fee bump error</source> <translation>Fejl ved gebyrforøgelse</translation> </message> <message> <source>Increasing transaction fee failed</source> <translation>Forøgelse af transaktionsgebyr mislykkedes</translation> </message> <message> <source>Do you want to increase the fee?</source> <translation>Vil du forøge gebyret?</translation> </message> <message> <source>Do you want to draft a transaction with fee increase?</source> <translation>Vil du lave et transaktionsudkast med øget gebyr?</translation> </message> <message> <source>Current fee:</source> <translation>Aktuelt gebyr:</translation> </message> <message> <source>Increase:</source> <translation>Forøgelse:</translation> </message> <message> <source>New fee:</source> <translation>Nyt gebyr:</translation> </message> <message> <source>Confirm fee bump</source> <translation>Bekræft gebyrforøgelse</translation> </message> <message> <source>Can't draft transaction.</source> <translation>Kan ikke lave transaktionsudkast.</translation> </message> <message> <source>PSBT copied</source> <translation>PSBT kopieret</translation> </message> <message> <source>Can't sign transaction.</source> <translation>Kan ikke signere transaktionen.</translation> </message> <message> <source>Could not commit transaction</source> <translation>Kunne ikke gennemføre transaktionen</translation> </message> <message> <source>default wallet</source> <translation>Standard tegnebog</translation> </message> </context> <context> <name>WalletView</name> <message> <source>&amp;Export</source> <translation>&amp;Eksportér</translation> </message> <message> <source>Export the data in the current tab to a file</source> <translation>Eksportér den aktuelle visning til en fil</translation> </message> <message> <source>Error</source> <translation>Fejl</translation> </message> <message> <source>Unable to decode PSBT from clipboard (invalid base64)</source> <translation>Kan ikke afkode PSBT fra udklipsholder (ugyldigt base64)</translation> </message> <message> <source>Load Transaction Data</source> <translation>Indlæs transaktions data</translation> </message> <message> <source>Partially Signed Transaction (*.psbt)</source> <translation>Partvist Signeret Transaktion (*.psbt)</translation> </message> <message> <source>PSBT file must be smaller than 100 MiB</source> <translation>PSBT-fil skal være mindre end 100 MiB</translation> </message> <message> <source>Unable to decode PSBT</source> <translation>Kunne ikke afkode PSBT</translation> </message> <message> <source>Backup Wallet</source> <translation>Sikkerhedskopiér tegnebog</translation> </message> <message> <source>Wallet Data (*.dat)</source> <translation>Tegnebogsdata (*.dat)</translation> </message> <message> <source>Backup Failed</source> <translation>Sikkerhedskopiering mislykkedes</translation> </message> <message> <source>There was an error trying to save the wallet data to %1.</source> <translation>Der skete en fejl under gemning af tegnebogsdata til %1.</translation> </message> <message> <source>Backup Successful</source> <translation>Sikkerhedskopiering problemfri</translation> </message> <message> <source>The wallet data was successfully saved to %1.</source> <translation>Tegnebogsdata blev gemt til %1.</translation> </message> <message> <source>Cancel</source> <translation>Fortryd</translation> </message> </context> <context> <name>bitcoin-core</name> <message> <source>Distributed under the MIT software license, see the accompanying file %s or %s</source> <translation>Distribueret under MIT-softwarelicensen; se den vedlagte fil %s eller %s</translation> </message> <message> <source>Prune configured below the minimum of %d MiB. Please use a higher number.</source> <translation>Beskæring er sat under minimumsgrænsen på %d MiB. Brug venligst et større tal.</translation> </message> <message> <source>Prune: last wallet synchronisation goes beyond pruned data. You need to -reindex (download the whole blockchain again in case of pruned node)</source> <translation>Beskæring: Seneste synkronisering rækker udover beskårne data. Du er nødt til at bruge -reindex (downloade hele blokkæden igen i fald af beskåret knude)</translation> </message> <message> <source>Pruning blockstore...</source> <translation>Beskærer bloklager…</translation> </message> <message> <source>Unable to start HTTP server. See debug log for details.</source> <translation>Kunne ikke starte HTTP-server. Se fejlretningslog for detaljer.</translation> </message> <message> <source>The %s developers</source> <translation>Udviklerne af %s</translation> </message> <message> <source>Cannot obtain a lock on data directory %s. %s is probably already running.</source> <translation>Kan ikke opnå en lås på datamappe %s. %s kører sansynligvis allerede.</translation> </message> <message> <source>Cannot provide specific connections and have addrman find outgoing connections at the same.</source> <translation>Kan ikke give specifikke forbindelser og få addrman til at finde udgående forbindelser på samme tid.</translation> </message> <message> <source>Error reading %s! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source> <translation>Fejl under læsning af %s! Alle nøgler blev læst korrekt, men transaktionsdata eller indgange i adressebogen kan mangle eller være ukorrekte.</translation> </message> <message> <source>More than one onion bind address is provided. Using %s for the automatically created Tor onion service.</source> <translation>Mere end én onion-bindingsadresse er opgivet. Bruger %s til den automatiske oprettelse af Tor-onion-tjeneste.</translation> </message> <message> <source>Please check that your computer's date and time are correct! If your clock is wrong, %s will not work properly.</source> <translation>Undersøg venligst at din computers dato og klokkeslet er korrekt indstillet! Hvis der er fejl i disse, vil %s ikke fungere korrekt.</translation> </message> <message> <source>Please contribute if you find %s useful. Visit %s for further information about the software.</source> <translation>Overvej venligst at bidrage til udviklingen, hvis du finder %s brugbar. Besøg %s for yderligere information om softwaren.</translation> </message> <message> <source>SQLiteDatabase: Failed to prepare the statement to fetch sqlite wallet schema version: %s</source> <translation>SQLiteDatabase: Forberedelse af udtrykket på, at gribe sqlite-pung-skemaversion mislykkedes: %s</translation> </message> <message> <source>SQLiteDatabase: Failed to prepare the statement to fetch the application id: %s</source> <translation>SQLiteDatabase: Forberedelse af udtrykket på, at gribe applikations-ID mislykkedes: %s</translation> </message> <message> <source>SQLiteDatabase: Unknown sqlite wallet schema version %d. Only version %d is supported</source> <translation>SQLiteDatabase: Ukendt sqlite-pung-skemaversion %d. Kun version %d understøttes</translation> </message> <message> <source>The block database contains a block which appears to be from the future. This may be due to your computer's date and time being set incorrectly. Only rebuild the block database if you are sure that your computer's date and time are correct</source> <translation>Blokdatabasen indeholder en blok, som ser ud til at være fra fremtiden. Dette kan skyldes, at din computers dato og tid ikke er sat korrekt. Genopbyg kun blokdatabasen, hvis du er sikker på, at din computers dato og tid er korrekt</translation> </message> <message> <source>This is a pre-release test build - use at your own risk - do not use for mining or merchant applications</source> <translation>Dette er en foreløbig testudgivelse – brug på eget ansvar – brug ikke til mining eller handelsprogrammer</translation> </message> <message> <source>This is the transaction fee you may discard if change is smaller than dust at this level</source> <translation>Dette er det transaktionsgebyr, du kan kassere, hvis byttepengene er mindre end støv på dette niveau</translation> </message> <message> <source>Unable to replay blocks. You will need to rebuild the database using -reindex-chainstate.</source> <translation>Kan ikke genafspille blokke. Du er nødt til at genopbytte databasen ved hjælp af -reindex-chainstate.</translation> </message> <message> <source>Unable to rewind the database to a pre-fork state. You will need to redownload the blockchain</source> <translation>Kan ikke spole databasen tilbage til en tilstand inden en splitning. Du er nødt til at downloade blokkæden igen</translation> </message> <message> <source>Warning: The network does not appear to fully agree! Some miners appear to be experiencing issues.</source> <translation>Advarsel: Netværket ser ikke ud til at være fuldt ud enige! Enkelte minere ser ud til at opleve problemer.</translation> </message> <message> <source>Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade.</source> <translation>Advarsel: Vi ser ikke ud til at være fuldt ud enige med andre knuder! Du kan være nødt til at opgradere, eller andre knuder kan være nødt til at opgradere.</translation> </message> <message> <source>-maxmempool must be at least %d MB</source> <translation>-maxmempool skal være mindst %d MB</translation> </message> <message> <source>Cannot resolve -%s address: '%s'</source> <translation>Kan ikke finde -%s-adressen: “%s”</translation> </message> <message> <source>Change index out of range</source> <translation>Ændr indeks uden for interval</translation> </message> <message> <source>Config setting for %s only applied on %s network when in [%s] section.</source> <translation>Opsætningen af %s bliver kun udført på %s-netværk under [%s]-sektionen.</translation> </message> <message> <source>Copyright (C) %i-%i</source> <translation>Ophavsret © %i-%i</translation> </message> <message> <source>Corrupted block database detected</source> <translation>Ødelagt blokdatabase opdaget</translation> </message> <message> <source>Could not find asmap file %s</source> <translation>Kan ikke finde asmap-filen %s </translation> </message> <message> <source>Could not parse asmap file %s</source> <translation>Kan ikke fortolke asmap-filen %s</translation> </message> <message> <source>Do you want to rebuild the block database now?</source> <translation>Ønsker du at genopbygge blokdatabasen nu?</translation> </message> <message> <source>Error initializing block database</source> <translation>Klargøring af blokdatabase mislykkedes</translation> </message> <message> <source>Error initializing wallet database environment %s!</source> <translation>Klargøring af tegnebogsdatabasemiljøet %s mislykkedes!</translation> </message> <message> <source>Error loading %s</source> <translation>Fejl under indlæsning af %s</translation> </message> <message> <source>Error loading %s: Private keys can only be disabled during creation</source> <translation>Fejl ved indlæsning af %s: Private nøgler kan kun deaktiveres under oprettelse</translation> </message> <message> <source>Error loading %s: Wallet corrupted</source> <translation>Fejl under indlæsning af %s: Tegnebog ødelagt</translation> </message> <message> <source>Error loading %s: Wallet requires newer version of %s</source> <translation>Fejl under indlæsning af %s: Tegnebog kræver nyere version af %s</translation> </message> <message> <source>Error loading block database</source> <translation>Indlæsning af blokdatabase mislykkedes</translation> </message> <message> <source>Error opening block database</source> <translation>Åbning af blokdatabase mislykkedes</translation> </message> <message> <source>Failed to listen on any port. Use -listen=0 if you want this.</source> <translation>Lytning på enhver port mislykkedes. Brug -listen=0, hvis du ønsker dette.</translation> </message> <message> <source>Failed to rescan the wallet during initialization</source> <translation>Genindlæsning af tegnebogen under initialisering mislykkedes</translation> </message> <message> <source>Failed to verify database</source> <translation>Kunne ikke verificere databasen</translation> </message> <message> <source>Ignoring duplicate -wallet %s.</source> <translation>Ignorerer duplikeret -pung %s.</translation> </message> <message> <source>Importing...</source> <translation>Importerer…</translation> </message> <message> <source>Incorrect or no genesis block found. Wrong datadir for network?</source> <translation>Ukorrekt eller ingen tilblivelsesblok fundet. Forkert datamappe for netværk?</translation> </message> <message> <source>Initialization sanity check failed. %s is shutting down.</source> <translation>Sundhedstjek under initialisering mislykkedes. %s lukker ned.</translation> </message> <message> <source>Invalid P2P permission: '%s'</source> <translation>Invalid P2P tilladelse: '%s'</translation> </message> <message> <source>Invalid amount for -%s=&lt;amount&gt;: '%s'</source> <translation>Ugyldigt beløb for -%s=&lt;beløb&gt;: “%s”</translation> </message> <message> <source>Invalid amount for -discardfee=&lt;amount&gt;: '%s'</source> <translation>Ugyldigt beløb for -discardfee=&lt;amount&gt;: “%s”</translation> </message> <message> <source>Invalid amount for -fallbackfee=&lt;amount&gt;: '%s'</source> <translation>Ugyldigt beløb for -fallbackfee=&lt;beløb&gt;: “%s”</translation> </message> <message> <source>SQLiteDatabase: Failed to execute statement to verify database: %s</source> <translation>SQLiteDatabase: Udførelse af udtryk for, at bekræfte database mislykkedes: %s</translation> </message> <message> <source>SQLiteDatabase: Failed to prepare statement to verify database: %s</source> <translation>SQLiteDatabase: Forberedelse af udtryk på, at bekræfte database mislykkedes: %s</translation> </message> <message> <source>SQLiteDatabase: Failed to read database verification error: %s</source> <translation>SQLiteDatabase: Indlæsning af database-bekræftelsesfejl mislykkedes: %s</translation> </message> <message> <source>SQLiteDatabase: Unexpected application id. Expected %u, got %u</source> <translation>SQLiteDatabase: Uventet applikations-ID. Ventede %u, fik %u</translation> </message> <message> <source>Specified blocks directory "%s" does not exist.</source> <translation>Angivet blokmappe “%s” eksisterer ikke.</translation> </message> <message> <source>Unknown address type '%s'</source> <translation>Ukendt adressetype ‘%s’</translation> </message> <message> <source>Unknown change type '%s'</source> <translation>Ukendt byttepengetype ‘%s’</translation> </message> <message> <source>Upgrading txindex database</source> <translation>Opgraderer txindex database</translation> </message> <message> <source>Loading P2P addresses...</source> <translation>Indlæser P2P-adresser…</translation> </message> <message> <source>Loading banlist...</source> <translation>Indlæser bandlysningsliste…</translation> </message> <message> <source>Not enough file descriptors available.</source> <translation>For få tilgængelige fildeskriptorer.</translation> </message> <message> <source>Prune cannot be configured with a negative value.</source> <translation>Beskæring kan ikke opsættes med en negativ værdi.</translation> </message> <message> <source>Prune mode is incompatible with -txindex.</source> <translation>Beskæringstilstand er ikke kompatibel med -txindex.</translation> </message> <message> <source>Replaying blocks...</source> <translation>Genafspiller blokke…</translation> </message> <message> <source>Rewinding blocks...</source> <translation>Spoler blokke tilbage…</translation> </message> <message> <source>The source code is available from %s.</source> <translation>Kildekoden er tilgængelig fra %s.</translation> </message> <message> <source>Transaction fee and change calculation failed</source> <translation>Beregning af transaktionsgebyr og byttepenge mislykkedes</translation> </message> <message> <source>Unable to bind to %s on this computer. %s is probably already running.</source> <translation>Ikke i stand til at tildele til %s på denne computer. %s kører formodentlig allerede.</translation> </message> <message> <source>Unable to generate keys</source> <translation>U-istand til at generere nøgler</translation> </message> <message> <source>Unsupported logging category %s=%s.</source> <translation>Ikke understøttet logningskategori %s=%s.</translation> </message> <message> <source>Upgrading UTXO database</source> <translation>Opgraderer UTXO-database</translation> </message> <message> <source>User Agent comment (%s) contains unsafe characters.</source> <translation>Brugeragent-kommentar (%s) indeholder usikre tegn.</translation> </message> <message> <source>Verifying blocks...</source> <translation>Verificerer blokke…</translation> </message> <message> <source>Wallet needed to be rewritten: restart %s to complete</source> <translation>Det var nødvendigt at genskrive tegnebogen: Genstart %s for at gennemføre</translation> </message> <message> <source>Error: Listening for incoming connections failed (listen returned error %s)</source> <translation>Fejl: Lytning efter indkommende forbindelser mislykkedes (lytning resultarede i fejl %s)</translation> </message> <message> <source>%s corrupt. Try using the wallet tool bitcoin-wallet to salvage or restoring a backup.</source> <translation>%s beskadiget. Prøv at bruge pung-værktøjet bitcoin-wallet til, at bjærge eller gendanne en sikkerhedskopi.</translation> </message> <message> <source>Cannot upgrade a non HD split wallet without upgrading to support pre split keypool. Please use version 169900 or no version specified.</source> <translation>Kan ikke opgradere en ikke-HD-splittet pung, uden at opgradere for, at understøtte præ-split-nøglepøl. Brug venligst version 169900 eller ingen specificeret version.</translation> </message> <message> <source>Invalid amount for -maxtxfee=&lt;amount&gt;: '%s' (must be at least the minrelay fee of %s to prevent stuck transactions)</source> <translation>Ugyldigt beløb for -maxtxfee=&lt;beløb&gt;: “%s” (skal være på mindst minrelay-gebyret på %s for at undgå hængende transaktioner)</translation> </message> <message> <source>The transaction amount is too small to send after the fee has been deducted</source> <translation>Transaktionsbeløbet er for lille til at sende, når gebyret er trukket fra</translation> </message> <message> <source>This error could occur if this wallet was not shutdown cleanly and was last loaded using a build with a newer version of Berkeley DB. If so, please use the software that last loaded this wallet</source> <translation>Denne fejl kunne finde sted hvis denne pung ikke blev lukket rent ned og sidst blev indlæst vha. en udgave med en nyere version af Berkeley DB. Brug i så fald venligst den programvare, som sidst indlæste denne pung</translation> </message> <message> <source>This is the maximum transaction fee you pay (in addition to the normal fee) to prioritize partial spend avoidance over regular coin selection.</source> <translation>Dette er det maksimale transaktionsgebyr, du betaler (ud over det normale gebyr) for, at prioritere partisk forbrugsafvigelse over almindelig møntudvælgelse.</translation> </message> <message> <source>Transaction needs a change address, but we can't generate it. Please call keypoolrefill first.</source> <translation>Transaktion har brug for for, at skifte adresse, men vi kan ikke generere den. Tilkald venligst keypoolrefill først.</translation> </message> <message> <source>You need to rebuild the database using -reindex to go back to unpruned mode. This will redownload the entire blockchain</source> <translation>Du er nødt til at genopbygge databasen ved hjælp af -reindex for at gå tilbage til ikke-beskåret tilstand. Dette vil downloade hele blokkæden igen</translation> </message> <message> <source>A fatal internal error occurred, see debug.log for details</source> <translation>Der er sket en fatal intern fejl, se debug.log for detaljer</translation> </message> <message> <source>Cannot set -peerblockfilters without -blockfilterindex.</source> <translation>Kan ikke indstille -peerblockfilters uden -blockfilterindex.</translation> </message> <message> <source>Disk space is too low!</source> <translation>Fejl: Disk pladsen er for lav!</translation> </message> <message> <source>Error reading from database, shutting down.</source> <translation>Fejl under læsning fra database; lukker ned.</translation> </message> <message> <source>Error upgrading chainstate database</source> <translation>Fejl under opgradering af kædetilstandsdatabase</translation> </message> <message> <source>Error: Disk space is low for %s</source> <translation>Fejl: Disk plads er lavt for %s</translation> </message> <message> <source>Error: Keypool ran out, please call keypoolrefill first</source> <translation>Fejl: Nøglepøl løb tør, tilkald venligst keypoolrefill først</translation> </message> <message> <source>Fee rate (%s) is lower than the minimum fee rate setting (%s)</source> <translation>Gebyrrate (%s) er lavere end den minimale gebyrrate-indstilling (%s)</translation> </message> <message> <source>Invalid -onion address or hostname: '%s'</source> <translation>Ugyldig -onion-adresse eller værtsnavn: “%s”</translation> </message> <message> <source>Invalid -proxy address or hostname: '%s'</source> <translation>Ugyldig -proxy-adresse eller værtsnavn: “%s”</translation> </message> <message> <source>Invalid amount for -paytxfee=&lt;amount&gt;: '%s' (must be at least %s)</source> <translation>Ugyldigt beløb for -paytxfee=&lt;beløb&gt;: “%s” (skal være mindst %s)</translation> </message> <message> <source>Invalid netmask specified in -whitelist: '%s'</source> <translation>Ugyldig netmaske angivet i -whitelist: “%s”</translation> </message> <message> <source>Need to specify a port with -whitebind: '%s'</source> <translation>Nødt til at angive en port med -whitebinde: “%s”</translation> </message> <message> <source>No proxy server specified. Use -proxy=&lt;ip&gt; or -proxy=&lt;ip:port&gt;.</source> <translation>Ingen proxyserver specificeret. Brug -proxy=&lt;ip&gt; eller -proxy=&lt;ip:port&gt;.</translation> </message> <message> <source>Prune mode is incompatible with -blockfilterindex.</source> <translation>Beskærings tilstand er ikke understøttet med -blockfilterindex.</translation> </message> <message> <source>Reducing -maxconnections from %d to %d, because of system limitations.</source> <translation>Reducerer -maxconnections fra %d til %d på grund af systembegrænsninger.</translation> </message> <message> <source>Section [%s] is not recognized.</source> <translation>Sektion [%s] er ikke genkendt.</translation> </message> <message> <source>Signing transaction failed</source> <translation>Signering af transaktion mislykkedes</translation> </message> <message> <source>Specified -walletdir "%s" does not exist</source> <translation>Angivet -walletdir “%s” eksisterer ikke</translation> </message> <message> <source>Specified -walletdir "%s" is a relative path</source> <translation>Angivet -walletdir “%s” er en relativ sti</translation> </message> <message> <source>Specified -walletdir "%s" is not a directory</source> <translation>Angivet -walletdir “%s” er ikke en mappe</translation> </message> <message> <source>The specified config file %s does not exist </source> <translation>Den specificerede konfigurationsfil %s eksisterer ikke. </translation> </message> <message> <source>The transaction amount is too small to pay the fee</source> <translation>Transaktionsbeløbet er for lille til at betale gebyret</translation> </message> <message> <source>This is experimental software.</source> <translation>Dette er eksperimentelt software.</translation> </message> <message> <source>Transaction amount too small</source> <translation>Transaktionsbeløb er for lavt</translation> </message> <message> <source>Transaction too large</source> <translation>Transaktionen er for stor</translation> </message> <message> <source>Unable to bind to %s on this computer (bind returned error %s)</source> <translation>Ikke i stand til at tildele til %s på denne computer (bind returnerede fejl %s)</translation> </message> <message> <source>Unable to create the PID file '%s': %s</source> <translation>Ikke i stand til at oprette PID fil '%s': %s</translation> </message> <message> <source>Unable to generate initial keys</source> <translation>Kan ikke generere indledningsvise nøgler</translation> </message> <message> <source>Unknown -blockfilterindex value %s.</source> <translation>Ukendt -blockfilterindex værdi %s.</translation> </message> <message> <source>Verifying wallet(s)...</source> <translation>Verificerer tegnebøger…</translation> </message> <message> <source>Warning: unknown new rules activated (versionbit %i)</source> <translation>Advarsel: Ukendte nye regler aktiveret (versionsbit %i)</translation> </message> <message> <source>-maxtxfee is set very high! Fees this large could be paid on a single transaction.</source> <translation>-maxtxfee er sat meget højt! Gebyrer så store risikeres betalt på en enkelt transaktion.</translation> </message> <message> <source>This is the transaction fee you may pay when fee estimates are not available.</source> <translation>Dette er transaktionsgebyret, du kan betale, når gebyrestimeringer ikke er tilgængelige.</translation> </message> <message> <source>Total length of network version string (%i) exceeds maximum length (%i). Reduce the number or size of uacomments.</source> <translation>Den totale længde på netværksversionsstrengen (%i) overstiger maksimallængden (%i). Reducér antaller af eller størrelsen på uacomments.</translation> </message> <message> <source>%s is set very high!</source> <translation>%s er meget højt sat!</translation> </message> <message> <source>Starting network threads...</source> <translation>Starter netværkstråde…</translation> </message> <message> <source>The wallet will avoid paying less than the minimum relay fee.</source> <translation>Tegnebogen vil undgå at betale mindre end minimum-videresendelsesgebyret.</translation> </message> <message> <source>This is the minimum transaction fee you pay on every transaction.</source> <translation>Dette er det transaktionsgebyr, du minimum betaler for hver transaktion.</translation> </message> <message> <source>This is the transaction fee you will pay if you send a transaction.</source> <translation>Dette er transaktionsgebyret, som betaler, når du sender en transaktion.</translation> </message> <message> <source>Transaction amounts must not be negative</source> <translation>Transaktionsbeløb må ikke være negative</translation> </message> <message> <source>Transaction has too long of a mempool chain</source> <translation>Transaktionen har en for lang hukommelsespuljekæde</translation> </message> <message> <source>Transaction must have at least one recipient</source> <translation>Transaktionen skal have mindst én modtager</translation> </message> <message> <source>Unknown network specified in -onlynet: '%s'</source> <translation>Ukendt netværk anført i -onlynet: “%s”</translation> </message> <message> <source>Insufficient funds</source> <translation>Manglende dækning</translation> </message> <message> <source>Fee estimation failed. Fallbackfee is disabled. Wait a few blocks or enable -fallbackfee.</source> <translation>Estimering af gebyr mislykkedes. Tilbagefaldsgebyr er deaktiveret. Vent et par blokke eller aktiver -fallbackfee.</translation> </message> <message> <source>Warning: Private keys detected in wallet {%s} with disabled private keys</source> <translation>Advarsel: Private nøgler opdaget i tegnebog {%s} med deaktiverede private nøgler</translation> </message> <message> <source>Cannot write to data directory '%s'; check permissions.</source> <translation>Kan ikke skrive til datamappe '%s'; tjek tilladelser.</translation> </message> <message> <source>Loading block index...</source> <translation>Indlæser blokindeks…</translation> </message> <message> <source>Loading wallet...</source> <translation>Indlæser tegnebog…</translation> </message> <message> <source>Cannot downgrade wallet</source> <translation>Kan ikke nedgradere tegnebog</translation> </message> <message> <source>Rescanning...</source> <translation>Genindlæser…</translation> </message> <message> <source>Done loading</source> <translation>Indlæsning gennemført</translation> </message> </context> </TS><|fim▁end|>
<message> <source>You can increase the fee later (signals Replace-By-Fee, BIP-125).</source>
<|file_name|>pow.js<|end_file_name|><|fim▁begin|>/* Copyright (c) 2012 Greg Reimer ( http://obadger.com/ ) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ (function(w, d, $){ // defaults var defaultArgs = { rays: 16, originX: '50%', originY: '50%', bgColorStart: 'rgba(0,0,0,0.1)', bgColorEnd: 'rgba(0,0,0,0.2)', rayColorStart: 'hsla(0,0%,100%,0.2)', rayColorEnd: 'hsla(0,0%,100%,0.3)', sizingRatio: 1 }; $.fn.pow = (function(){ return function(args){ // bail if none if (this.length === 0) { return; } // set defaults args = $.extend({}, defaultArgs, args); // set vars and grab a few values to use later var $el = this.eq(0); var width = $el.outerWidth(); var height = $el.outerHeight(); var offset = $el.offset(); var originX = (parseFloat(args.originX) || 0) / 100; var originY = (parseFloat(args.originY) || 0) / 100; <|fim▁hole|> if (args.originEl) { var $oel = $(args.originEl); if ($oel.length) { var oOffset = $oel.offset(); var oWidth = $oel.outerWidth(); var oHeight = $oel.outerHeight(); originX = (((oOffset.left - offset.left) + (oWidth / 2)) / width); originY = (((oOffset.top - offset.top) + (oHeight / 2)) / height); } } // convert to absolute lengths originX = width * originX; originY = height * originY; // find maximum distance to a corner var radius = Math.max.apply(Math, [ {x:0,y:0}, {x:width,y:0}, {x:0,y:height}, {x:width,y:height} ].map(function(c){ // use the pythagorean theorem, luke return Math.sqrt(Math.pow(c.x - originX, 2) + Math.pow(c.y - originY, 2)); })); try{ var canvas = $('<canvas width="'+width+'" height="'+height+'" style="position:fixed;top:-999999px"></canvas>').appendTo(d.body).get(0); var ctx = canvas.getContext('2d'); } catch(err) { return; } // build the background gradient var bgGrad = ctx.createRadialGradient( originX, originY, 0, // inner circle, infinitely small originX, originY, radius // outer circle, will just cover canvas area ); bgGrad.addColorStop(0, args.bgColorStart); bgGrad.addColorStop(1, args.bgColorEnd); // build the foreground gradient var rayGrad = ctx.createRadialGradient( originX, originY, 0, // inner circle, infinitely small originX, originY, radius // outer circle, will just cover canvas area ); rayGrad.addColorStop(0, args.rayColorStart); rayGrad.addColorStop(1, args.rayColorEnd); // fill in bg ctx.fillStyle = bgGrad; ctx.fillRect(0,0,width,height); // draw rays ctx.fillStyle = rayGrad; ctx.beginPath(); var spokeCount = args.rays * 2; ctx.moveTo(originX, originY); for (var i=0; i<args.rays; i++){ for (var j=0; j<2; j++) { var thisSpoke = i * 2 + j; var traversal = thisSpoke / spokeCount; var ax = originX + radius * 1.5 * Math.cos(traversal * 2 * Math.PI); var ay = originY + radius * 1.5 * Math.sin(traversal * 2 * Math.PI); ctx.lineTo(ax,ay); } ctx.lineTo(originX, originY); } ctx.fill(); // set the data as css to the element var data = canvas.toDataURL("image/png"); $(canvas).remove(); $el.css({ 'background-image':'url("'+data+'")', 'background-repeat':'no-repeat', 'background-position':'50% 50%', 'background-size':'cover' }); }; })(); })(window, document, jQuery);<|fim▁end|>
// center rays on a given element
<|file_name|>solution.py<|end_file_name|><|fim▁begin|>from collections import deque N = int(input()) d = deque() i = 0<|fim▁hole|> command = input().split() if command[0] == 'append': d.append(command[1]) elif command[0] == 'appendleft': d.appendleft(command[1]) elif command[0] == 'pop': d.pop() else: d.popleft() i += 1 [print(i, end=' ') for i in d]<|fim▁end|>
while i < N:
<|file_name|>test.py<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|>
zhangyu
<|file_name|>mobilenet_tflite.ts<|end_file_name|><|fim▁begin|>/** * @license * Copyright 2021 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS,<|fim▁hole|> * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ import {ModelItem} from '../model_item'; /** MobileNet TFLite task model. */ export class MobileNetTfLite extends ModelItem { constructor() { super('mobilenet_tflite'); } }<|fim▁end|>
<|file_name|>Transponder.py<|end_file_name|><|fim▁begin|>from enigma import eDVBFrontendParametersSatellite, eDVBFrontendParametersCable, eDVBFrontendParametersTerrestrial from Components.NimManager import nimmanager def ConvertToHumanReadable(tp, type = None): ret = { } if type is None: type = tp.get("tuner_type", "None") if type == "DVB-S": ret["tuner_type"] = _("Satellite") ret["inversion"] = { eDVBFrontendParametersSatellite.Inversion_Unknown : _("Auto"), eDVBFrontendParametersSatellite.Inversion_On : _("On"), eDVBFrontendParametersSatellite.Inversion_Off : _("Off")}[tp["inversion"]] ret["fec_inner"] = { eDVBFrontendParametersSatellite.FEC_None : _("None"), eDVBFrontendParametersSatellite.FEC_Auto : _("Auto"), eDVBFrontendParametersSatellite.FEC_1_2 : "1/2", eDVBFrontendParametersSatellite.FEC_2_3 : "2/3", eDVBFrontendParametersSatellite.FEC_3_4 : "3/4", eDVBFrontendParametersSatellite.FEC_5_6 : "5/6", eDVBFrontendParametersSatellite.FEC_7_8 : "7/8", eDVBFrontendParametersSatellite.FEC_3_5 : "3/5", eDVBFrontendParametersSatellite.FEC_4_5 : "4/5", eDVBFrontendParametersSatellite.FEC_8_9 : "8/9", eDVBFrontendParametersSatellite.FEC_9_10 : "9/10"}.get(tp.get("fec_inner", _("Auto"))) ret["modulation"] = { eDVBFrontendParametersSatellite.Modulation_Auto : _("Auto"), eDVBFrontendParametersSatellite.Modulation_QPSK : "QPSK", eDVBFrontendParametersSatellite.Modulation_QAM16 : "QAM16", eDVBFrontendParametersSatellite.Modulation_8PSK : "8PSK"}[tp["modulation"]] ret["orbital_position"] = nimmanager.getSatName(int(tp["orbital_position"]))<|fim▁hole|> eDVBFrontendParametersSatellite.Polarisation_Horizontal : _("Horizontal"), eDVBFrontendParametersSatellite.Polarisation_Vertical : _("Vertical"), eDVBFrontendParametersSatellite.Polarisation_CircularLeft : _("Circular left"), eDVBFrontendParametersSatellite.Polarisation_CircularRight : _("Circular right")}[tp["polarization"]] ret["system"] = { eDVBFrontendParametersSatellite.System_DVB_S : "DVB-S", eDVBFrontendParametersSatellite.System_DVB_S2 : "DVB-S2"}[tp["system"]] if ret["system"] == "DVB-S2": ret["rolloff"] = { eDVBFrontendParametersSatellite.RollOff_alpha_0_35 : "0.35", eDVBFrontendParametersSatellite.RollOff_alpha_0_25 : "0.25", eDVBFrontendParametersSatellite.RollOff_alpha_0_20 : "0.20"}.get(tp.get("rolloff", "auto")) ret["pilot"] = { eDVBFrontendParametersSatellite.Pilot_Unknown : _("Auto"), eDVBFrontendParametersSatellite.Pilot_On : _("On"), eDVBFrontendParametersSatellite.Pilot_Off : _("Off")}[tp["pilot"]] elif type == "DVB-C": ret["tuner_type"] = _("Cable") ret["modulation"] = { eDVBFrontendParametersCable.Modulation_Auto: _("Auto"), eDVBFrontendParametersCable.Modulation_QAM16 : "QAM16", eDVBFrontendParametersCable.Modulation_QAM32 : "QAM32", eDVBFrontendParametersCable.Modulation_QAM64 : "QAM64", eDVBFrontendParametersCable.Modulation_QAM128 : "QAM128", eDVBFrontendParametersCable.Modulation_QAM256 : "QAM256"}[tp["modulation"]] ret["inversion"] = { eDVBFrontendParametersCable.Inversion_Unknown : _("Auto"), eDVBFrontendParametersCable.Inversion_On : _("On"), eDVBFrontendParametersCable.Inversion_Off : _("Off")}[tp["inversion"]] ret["fec_inner"] = { eDVBFrontendParametersCable.FEC_None : _("None"), eDVBFrontendParametersCable.FEC_Auto : _("Auto"), eDVBFrontendParametersCable.FEC_1_2 : "1/2", eDVBFrontendParametersCable.FEC_2_3 : "2/3", eDVBFrontendParametersCable.FEC_3_4 : "3/4", eDVBFrontendParametersCable.FEC_5_6 : "5/6", eDVBFrontendParametersCable.FEC_7_8 : "7/8", eDVBFrontendParametersCable.FEC_8_9 : "8/9"}[tp["fec_inner"]] elif type == "DVB-T": ret["tuner_type"] = _("Terrestrial") ret["bandwidth"] = { eDVBFrontendParametersTerrestrial.Bandwidth_Auto : _("Auto"), eDVBFrontendParametersTerrestrial.Bandwidth_10MHz : "10 MHz", eDVBFrontendParametersTerrestrial.Bandwidth_8MHz : "8 MHz", eDVBFrontendParametersTerrestrial.Bandwidth_7MHz : "7 MHz", eDVBFrontendParametersTerrestrial.Bandwidth_6MHz : "6 MHz", eDVBFrontendParametersTerrestrial.Bandwidth_5MHz : "5 MHz", eDVBFrontendParametersTerrestrial.Bandwidth_1_712MHz : "1.172 MHz"}.get(tp.get("bandwidth", " ")) ret["code_rate_lp"] = { eDVBFrontendParametersTerrestrial.FEC_Auto : _("Auto"), eDVBFrontendParametersTerrestrial.FEC_1_2 : "1/2", eDVBFrontendParametersTerrestrial.FEC_2_3 : "2/3", eDVBFrontendParametersTerrestrial.FEC_3_4 : "3/4", eDVBFrontendParametersTerrestrial.FEC_5_6 : "5/6", eDVBFrontendParametersTerrestrial.FEC_6_7 : "6/7", eDVBFrontendParametersTerrestrial.FEC_7_8 : "7/8", eDVBFrontendParametersTerrestrial.FEC_8_9 : "8/9"}.get(tp.get("code_rate_lp", " ")) ret["code_rate_hp"] = { eDVBFrontendParametersTerrestrial.FEC_Auto : _("Auto"), eDVBFrontendParametersTerrestrial.FEC_1_2 : "1/2", eDVBFrontendParametersTerrestrial.FEC_2_3 : "2/3", eDVBFrontendParametersTerrestrial.FEC_3_4 : "3/4", eDVBFrontendParametersTerrestrial.FEC_5_6 : "5/6", eDVBFrontendParametersTerrestrial.FEC_6_7 : "6/7", eDVBFrontendParametersTerrestrial.FEC_7_8 : "7/8", eDVBFrontendParametersTerrestrial.FEC_8_9 : "8/9"}.get(tp.get("code_rate_hp", " ")) ret["constellation"] = { eDVBFrontendParametersTerrestrial.Modulation_Auto : _("Auto"), eDVBFrontendParametersTerrestrial.Modulation_QPSK : "QPSK", eDVBFrontendParametersTerrestrial.Modulation_QAM16 : "QAM16", eDVBFrontendParametersTerrestrial.Modulation_QAM64 : "QAM64", eDVBFrontendParametersTerrestrial.Modulation_QAM256 : "QAM256"}.get(tp.get("constellation", " ")) ret["transmission_mode"] = { eDVBFrontendParametersTerrestrial.TransmissionMode_Auto : _("Auto"), eDVBFrontendParametersTerrestrial.TransmissionMode_1k : "1k", eDVBFrontendParametersTerrestrial.TransmissionMode_2k : "2k", eDVBFrontendParametersTerrestrial.TransmissionMode_4k : "4k", eDVBFrontendParametersTerrestrial.TransmissionMode_8k : "8k", eDVBFrontendParametersTerrestrial.TransmissionMode_16k : "16k", eDVBFrontendParametersTerrestrial.TransmissionMode_32k : "32k"}.get(tp.get("transmission_mode", " ")) ret["guard_interval"] = { eDVBFrontendParametersTerrestrial.GuardInterval_Auto : _("Auto"), eDVBFrontendParametersTerrestrial.GuardInterval_19_256 : "19/256", eDVBFrontendParametersTerrestrial.GuardInterval_19_128 : "19/128", eDVBFrontendParametersTerrestrial.GuardInterval_1_128 : "1/128", eDVBFrontendParametersTerrestrial.GuardInterval_1_32 : "1/32", eDVBFrontendParametersTerrestrial.GuardInterval_1_16 : "1/16", eDVBFrontendParametersTerrestrial.GuardInterval_1_8 : "1/8", eDVBFrontendParametersTerrestrial.GuardInterval_1_4 : "1/4"}.get(tp.get("guard_interval", " ")) ret["hierarchy_information"] = { eDVBFrontendParametersTerrestrial.Hierarchy_Auto : _("Auto"), eDVBFrontendParametersTerrestrial.Hierarchy_None : _("None"), eDVBFrontendParametersTerrestrial.Hierarchy_1 : "1", eDVBFrontendParametersTerrestrial.Hierarchy_2 : "2", eDVBFrontendParametersTerrestrial.Hierarchy_4 : "4"}.get(tp.get("hierarchy_information", " ")) ret["inversion"] = { eDVBFrontendParametersTerrestrial.Inversion_Unknown : _("Auto"), eDVBFrontendParametersTerrestrial.Inversion_On : _("On"), eDVBFrontendParametersTerrestrial.Inversion_Off : _("Off")}.get(tp.get("inversion", " ")) ret["system"] = { eDVBFrontendParametersTerrestrial.System_DVB_T : "DVB-T", eDVBFrontendParametersTerrestrial.System_DVB_T2 : "DVB-T2"}[tp.get("system")] else: print "ConvertToHumanReadable: no or unknown type in tpdata dict!" for x in tp.keys(): if not ret.has_key(x): ret[x] = tp[x] return ret<|fim▁end|>
ret["polarization"] = {
<|file_name|>factory.js<|end_file_name|><|fim▁begin|>angular.module('factoria', ['firebase']) .factory('fireService', ['$firebaseArray', function($firebaseArray){ var firebaseRef= ""; var setFirebaseSource = function(url){ firebaseRef= new Firebase(url); }; var getFirebaseRoot = function(){ return firebaseRef; }; var addData = function(data){ // persist our data to firebase var ref = getFirebaseRoot(); return $firebase(ref).$push(data); }; var getData = function(callback){ var ref = getFirebaseRoot(); //TODO: //Call koaRender to update new elements style return $firebaseArray(ref); } var service = { setFirebaseSource : setFirebaseSource,<|fim▁hole|> getFirebaseRoot : getFirebaseRoot }; return service; }]);<|fim▁end|>
addData : addData, getData : getData,
<|file_name|>depthwise_conv_test.cc<|end_file_name|><|fim▁begin|>/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/micro/kernels/kernel_runner.h" #include "tensorflow/lite/micro/test_helpers.h" #include "tensorflow/lite/micro/testing/micro_test.h" namespace tflite { namespace testing { namespace { #if !defined(XTENSA) // Needed to avoid build errors from unused variables. constexpr int kMaxFilterChannels = 64; constexpr int kMaxBiasChannels = 64; #endif // !defined(XTENSA) // Index of the output tensor in context->tensors, specific to // DepthwiseConv. constexpr int kOutputTensorIndex = 3; // Creates a DepthwiseConv opeerator, calls it with the provided input tensors // and some defaults parameters, and compares the output with // expected_output_data. // // The tensors parameter contains both the input tensors as well as a // preallocated output tensor into which the output is stored.<|fim▁hole|> TfLiteTensor* tensors) { int inputs_array_data[] = {3, 0, 1, 2}; TfLiteIntArray* inputs_array = IntArrayFromInts(inputs_array_data); int outputs_array_data[] = {1, 3}; TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data); const TfLiteRegistration registration = Register_DEPTHWISE_CONV_2D(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, outputs_array, reinterpret_cast<void*>(conv_params)); int input_depth = tensors[0].dims->data[3]; int output_depth = tensors[1].dims->data[3]; int depth_mul = output_depth / input_depth; conv_params->padding = kTfLitePaddingValid; conv_params->stride_height = 1; conv_params->stride_width = 1; conv_params->depth_multiplier = depth_mul; const char* init_data = reinterpret_cast<const char*>(conv_params); // TODO(b/154240825): Use a test macro here which fails and returns. TfLiteStatus status = runner.InitAndPrepare(init_data); if (status != kTfLiteOk) { return status; } TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); const T* output_data = tflite::GetTensorData<T>(&tensors[kOutputTensorIndex]); for (int i = 0; i < output_length; ++i) { TF_LITE_MICRO_EXPECT_NEAR(expected_output_data[i], output_data[i], tolerance); } return kTfLiteOk; } #if !defined(XTENSA) // Needed to avoid build errors from unsused functions. void TestDepthwiseConvFloat(const int* input_dims_data, const float* input_data, const int* filter_dims_data, const float* filter_data, const int* bias_dims_data, const float* bias_data, const float* expected_output_data, const int* output_dims_data, TfLiteDepthwiseConvParams* conv_params, float* output_data) { TfLiteIntArray* input_dims = IntArrayFromInts(input_dims_data); TfLiteIntArray* filter_dims = IntArrayFromInts(filter_dims_data); TfLiteIntArray* bias_dims = IntArrayFromInts(bias_dims_data); TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data); const int output_dims_count = ElementCount(*output_dims); constexpr int inputs_size = 3; constexpr int outputs_size = 1; constexpr int tensors_size = inputs_size + outputs_size; TfLiteTensor tensors[tensors_size] = { CreateTensor(input_data, input_dims), CreateTensor(filter_data, filter_dims), CreateTensor(bias_data, bias_dims), CreateTensor(output_data, output_dims), }; ValidateDepthwiseConvGoldens(expected_output_data, output_dims_count, conv_params, 1e-5, tensors_size, tensors); } void TestDepthwiseConvQuantizedPerChannel( const int* input_dims_data, const float* input_data, int8_t* input_quantized, float input_scale, int input_zero_point, const int* filter_dims_data, const float* filter_data, int8_t* filter_data_quantized, const int* bias_dims_data, const float* bias_data, int32_t* bias_data_quantized, const int* output_dims_data, const float* expected_output_data, int8_t* expected_output_data_quantized, int8_t* output_data, float output_scale, int output_zero_point, TfLiteDepthwiseConvParams* conv_params) { TfLiteIntArray* input_dims = IntArrayFromInts(input_dims_data); TfLiteIntArray* filter_dims = IntArrayFromInts(filter_dims_data); TfLiteIntArray* bias_dims = IntArrayFromInts(bias_dims_data); TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data); const int output_dims_count = ElementCount(*output_dims); int filter_zero_points[kMaxFilterChannels]; float filter_scales[kMaxFilterChannels]; int bias_zero_points[kMaxBiasChannels]; float bias_scales[kMaxBiasChannels]; TfLiteAffineQuantization filter_quant; TfLiteAffineQuantization bias_quant; TfLiteTensor input_tensor = CreateQuantizedTensor( input_data, input_quantized, input_dims, input_scale, input_zero_point); TfLiteTensor filter_tensor = CreateSymmetricPerChannelQuantizedTensor( filter_data, filter_data_quantized, filter_dims, filter_scales, filter_zero_points, &filter_quant, 3 /* quantized dimension */ ); TfLiteTensor bias_tensor = CreatePerChannelQuantizedBiasTensor( bias_data, bias_data_quantized, bias_dims, input_scale, &filter_scales[1], bias_scales, bias_zero_points, &bias_quant, 3 /* quantized dimension */ ); TfLiteTensor output_tensor = CreateQuantizedTensor( output_data, output_dims, output_scale, input_zero_point); // TODO(njeff): Affine Quantization Params should be set on tensor creation. float input_scales[] = {1, input_scale}; int input_zero_points[] = {1, input_zero_point}; TfLiteAffineQuantization input_quant = {FloatArrayFromFloats(input_scales), IntArrayFromInts(input_zero_points), 0}; input_tensor.quantization = {kTfLiteAffineQuantization, &input_quant}; float output_scales[] = {1, output_scale}; int output_zero_points[] = {1, output_zero_point}; TfLiteAffineQuantization output_quant = {FloatArrayFromFloats(output_scales), IntArrayFromInts(output_zero_points), 0}; output_tensor.quantization = {kTfLiteAffineQuantization, &output_quant}; constexpr int inputs_size = 3; constexpr int outputs_size = 1; constexpr int tensors_size = inputs_size + outputs_size; TfLiteTensor tensors[tensors_size] = { input_tensor, filter_tensor, bias_tensor, output_tensor, }; Quantize(expected_output_data, expected_output_data_quantized, output_dims_count, output_scale, output_zero_point); TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, ValidateDepthwiseConvGoldens(expected_output_data_quantized, output_dims_count, conv_params, 1.0, tensors_size, tensors)); } #endif // !defined(XTENSA) } // namespace } // namespace testing } // namespace tflite TF_LITE_MICRO_TESTS_BEGIN #if !defined(XTENSA) // TODO(b/170322965): xtensa kernels are less general than // reference kernels and we ifdef out test cases that are // currently known to fail. TF_LITE_MICRO_TEST(SimpleTest) { const int input_shape[] = {4, 1, 3, 2, 2}; const float input_values[] = {1, 2, 7, 8, 3, 4, 9, 10, 5, 6, 11, 12}; const int filter_shape[] = {4, 1, 2, 2, 4}; const float filter_values[] = {1, 2, 3, 4, -9, 10, -11, 12, 5, 6, 7, 8, 13, -14, 15, -16}; const int bias_shape[] = {4, 1, 1, 1, 4}; const float bias_values[] = {1, 2, 3, 4}; const float golden[] = { 71, -34, 99, -20, 91, -26, 127, -4, }; const int output_shape[] = {4, 1, 2, 1, 4}; const int output_dims_count = 8; float output_data[output_dims_count]; TfLiteDepthwiseConvParams conv_params; conv_params.activation = kTfLiteActNone; conv_params.dilation_width_factor = 1; conv_params.dilation_height_factor = 1; tflite::testing::TestDepthwiseConvFloat( input_shape, input_values, filter_shape, filter_values, bias_shape, bias_values, golden, output_shape, &conv_params, output_data); } TF_LITE_MICRO_TEST(SimpleTestRelu) { const int input_shape[] = {4, 1, 3, 2, 2}; const float input_values[] = {1, 2, 7, 8, 3, 4, 9, 10, 5, 6, 11, 12}; const int filter_shape[] = {4, 1, 2, 2, 4}; const float filter_values[] = {1, 2, 3, 4, -9, 10, -11, 12, 5, 6, 7, 8, 13, -14, 15, -16}; const int bias_shape[] = {4, 1, 1, 1, 4}; const float bias_values[] = {1, 2, 3, 4}; const int output_shape[] = {4, 1, 2, 1, 4}; const int output_dims_count = 8; const float golden_relu[] = {71, 0, 99, 0, 91, 0, 127, 0}; float output_data[output_dims_count]; TfLiteDepthwiseConvParams conv_params; conv_params.activation = kTfLiteActRelu; conv_params.dilation_width_factor = 1; conv_params.dilation_height_factor = 1; tflite::testing::TestDepthwiseConvFloat( input_shape, input_values, filter_shape, filter_values, bias_shape, bias_values, golden_relu, output_shape, &conv_params, output_data); } TF_LITE_MICRO_TEST(SimpleTestQuantizedPerChannel) { const int input_elements = 12; const int input_shape[] = {4, 1, 3, 2, 2}; const float input_values[] = {1, 2, 7, 8, 3, 4, 9, 10, 5, 6, 11, 12}; const int filter_elements = 16; const int filter_shape[] = {4, 1, 2, 2, 4}; const float filter_values[] = {1, 2, 3, 4, -9, 10, -11, 12, 5, 6, 7, 8, 13, -14, 15, -16}; const int bias_elements = 4; const int bias_shape[] = {4, 1, 1, 1, 4}; const int output_elements = 8; const float bias_values[] = {1, 2, 3, 4}; const float golden[] = { 71, -34, 99, -20, 91, -26, 127, -4, }; const int output_shape[] = {4, 1, 2, 1, 4}; const int output_dims_count = 8; int8_t output_data[output_dims_count]; const float input_scale = 0.5; const float output_scale = 1.0f; const int input_zero_point = 0; const int output_zero_point = 0; int8_t input_quantized[input_elements]; int8_t filter_quantized[filter_elements]; int32_t bias_quantized[bias_elements]; int8_t golden_quantized[output_elements]; TfLiteDepthwiseConvParams conv_params; conv_params.activation = kTfLiteActNone; conv_params.dilation_width_factor = 1; conv_params.dilation_height_factor = 1; tflite::testing::TestDepthwiseConvQuantizedPerChannel( input_shape, input_values, input_quantized, input_scale, input_zero_point, filter_shape, filter_values, filter_quantized, bias_shape, bias_values, bias_quantized, output_shape, golden, golden_quantized, output_data, output_scale, output_zero_point, &conv_params); } TF_LITE_MICRO_TEST(SimpleTestQuantizedPerChannelDepthMultiplier1) { const int input_elements = 12; const int input_shape[] = {4, 1, 3, 2, 2}; const float input_values[] = {1, 2, 7, 8, 3, 4, 9, 10, 5, 6, 11, 12}; const int filter_elements = 8; const int filter_shape[] = {4, 1, 2, 2, 2}; const float filter_values[] = {1, 2, 3, 4, -9, 10, -11, 12}; const int bias_elements = 2; const int bias_shape[] = {4, 1, 1, 1, 2}; const int output_elements = 4; const float bias_values[] = {1, 2}; const float golden[] = { -103, 127, -128, 127, }; const int output_shape[] = {4, 1, 2, 1, 2}; const int output_dims_count = 4; int8_t output_data[output_dims_count]; const float input_scale = 1.0f; const float output_scale = 1.0f; const int input_zero_point = 0; const int output_zero_point = 0; int8_t input_quantized[input_elements]; int8_t filter_quantized[filter_elements]; int32_t bias_quantized[bias_elements]; int8_t golden_quantized[output_elements]; TfLiteDepthwiseConvParams conv_params; conv_params.activation = kTfLiteActNone; conv_params.dilation_width_factor = 1; conv_params.dilation_height_factor = 1; tflite::testing::TestDepthwiseConvQuantizedPerChannel( input_shape, input_values, input_quantized, input_scale, input_zero_point, filter_shape, filter_values, filter_quantized, bias_shape, bias_values, bias_quantized, output_shape, golden, golden_quantized, output_data, output_scale, output_zero_point, &conv_params); } TF_LITE_MICRO_TEST(TestQuantizedPerChannelDepthMultiplier1Relu6) { const int input_elements = 24; const int input_shape[] = {4, 1, 3, 2, 4}; const float input_values[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; const int filter_elements = 16; const int filter_shape[] = {4, 1, 2, 2, 4}; const float filter_values[] = {0, 1, 8, -2, -1, 2, -10, 0, -1, 3, -18, 0, 0, 4, 20, -3}; const int bias_elements = 4; const int bias_shape[] = {4, 1, 1, 1, 4}; const int output_elements = 8; const float bias_values[] = {1, 2, 3, 4}; const float golden[] = { 0, 6, 3, 0, 0, 6, 3, 0, }; const int output_shape[] = {4, 1, 2, 1, 4}; int8_t output_data[output_elements]; const float input_scale = 0.023529f; const float output_scale = 0.023529f; const int input_zero_point = -128; const int output_zero_point = -128; int8_t input_quantized[input_elements]; int8_t filter_quantized[filter_elements]; int32_t bias_quantized[bias_elements]; int8_t golden_quantized[output_elements]; TfLiteDepthwiseConvParams conv_params; conv_params.activation = kTfLiteActRelu6; conv_params.dilation_width_factor = 1; conv_params.dilation_height_factor = 1; tflite::testing::TestDepthwiseConvQuantizedPerChannel( input_shape, input_values, input_quantized, input_scale, input_zero_point, filter_shape, filter_values, filter_quantized, bias_shape, bias_values, bias_quantized, output_shape, golden, golden_quantized, output_data, output_scale, output_zero_point, &conv_params); } TF_LITE_MICRO_TEST(SimpleTestDilatedQuantizedPerChannel) { const int input_elements = 48; const int input_shape[] = {4, 1, 4, 6, 2}; const float input_values[] = {1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, // h = 0 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, // h = 1 1, 2, 3, 4, 5, 6, 2, 6, 2, 4, 4, 2, // h = 2 3, 2, 6, 5, 1, 4, 1, 2, 1, 4, 6, 3}; // h = 3 const int filter_elements = 16; const int filter_shape[] = {4, 1, 2, 2, 4}; const float filter_values[] = {1, 2, 3, 4, -9, 10, -11, 12, 5, 6, 7, 8, 13, -14, 15, -16}; const int bias_elements = 4; const int bias_shape[] = {4, 1, 1, 1, 4}; const int output_elements = 24; const float bias_values[] = {1, 2, 3, 4}; const float golden[] = { 15, 2, 88, -48, 25, 14, 72, 0, 61, -2, 56, 48, // h = 0 -4, 52, 12, 48, 11, 70, 63, 40, 51, -30, 41, 48 // h = 1 }; const int output_shape[] = {4, 1, 2, 3, 4}; int8_t output_data[output_elements]; const float input_scale = 0.5; const float output_scale = 1.0f; const int input_zero_point = 0; const int output_zero_point = 0; int8_t input_quantized[input_elements]; int8_t filter_quantized[filter_elements]; int32_t bias_quantized[bias_elements]; int8_t golden_quantized[output_elements]; TfLiteDepthwiseConvParams conv_params; conv_params.activation = kTfLiteActNone; conv_params.dilation_width_factor = 3; conv_params.dilation_height_factor = 2; tflite::testing::TestDepthwiseConvQuantizedPerChannel( input_shape, input_values, input_quantized, input_scale, input_zero_point, filter_shape, filter_values, filter_quantized, bias_shape, bias_values, bias_quantized, output_shape, golden, golden_quantized, output_data, output_scale, output_zero_point, &conv_params); } TF_LITE_MICRO_TEST(TestQuantizedPerChannelCompareWithFloat) { const int input_dims[] = {4, 1, 2, 3, 2}; const float input_data[] = {3, 2, 1, -1, -2, -3, 4, 3, 2, -2, -3, -4}; const int filter_dims[] = {4, 1, 2, 2, 4}; const float filter_data[] = {1, 2, 3, 4, 3, 4, 5, 6, 7, 8, 5, 6, 3, 4, 1, 2}; const int bias_dims[] = {4, 1, 1, 1, 4}; const float bias_data[] = {3, -2, 4, 6}; const int output_dims[] = {4, 1, 1, 2, 4}; const float golden[] = {43, 48, 18, 22, 3, -4, -28, -36}; const int input_size = 12; const int filter_size = 16; const int output_size = 8; const int bias_size = 4; int8_t input_quantized[input_size]; int8_t filter_quantized[filter_size]; int32_t bias_quantized[bias_size]; int8_t golden_quantized[output_size]; int8_t output_data[output_size]; float output_float[output_size]; const float input_scale = 0.5; const float output_scale = 1.0; const int input_zero_point = 0; const int output_zero_point = 0; TfLiteDepthwiseConvParams conv_params; conv_params.activation = kTfLiteActNone; conv_params.dilation_width_factor = 1; conv_params.dilation_height_factor = 1; tflite::testing::TestDepthwiseConvQuantizedPerChannel( input_dims, input_data, input_quantized, input_scale, input_zero_point, filter_dims, filter_data, filter_quantized, bias_dims, bias_data, bias_quantized, output_dims, golden, golden_quantized, output_data, output_scale, output_zero_point, &conv_params); tflite::testing::TestDepthwiseConvFloat( input_dims, input_data, filter_dims, filter_data, bias_dims, bias_data, golden, output_dims, &conv_params, output_float); } TF_LITE_MICRO_TEST(PerChannelBroadcastQuantizationParams) { const float input_scale = 1.0f; const float filter_scale = 1.0f; const float output_scale = 1.0f; const int input_elements = 12; const int input_shape[] = {4, 1, 3, 2, 2}; const float input_values[] = {1, 2, 7, 8, 3, 4, 9, 10, 5, 6, 11, 12}; const int filter_elements = 16; const int filter_shape[] = {4, 1, 2, 2, 4}; const float filter_values[] = {1, 2, 3, 4, -9, 10, -11, 12, 5, 6, 7, 8, 13, -14, 15, -16}; const int bias_elements = 4; const int bias_shape[] = {4, 1, 1, 1, 4}; const int output_elements = 8; const float bias_values[] = {1, 2, 3, 4}; const float golden[] = { 71, -34, 99, -20, 91, -26, 127, -4, }; const int output_shape[] = {4, 1, 2, 1, 4}; const int output_dims_count = 8; int8_t output_data[output_dims_count]; int8_t input_quantized[input_elements]; int8_t filter_quantized[filter_elements]; int32_t bias_quantized[bias_elements]; int8_t golden_quantized[output_elements]; TfLiteIntArray* input_dims = tflite::testing::IntArrayFromInts(input_shape); TfLiteIntArray* filter_dims = tflite::testing::IntArrayFromInts(filter_shape); TfLiteIntArray* bias_dims = tflite::testing::IntArrayFromInts(bias_shape); TfLiteIntArray* output_dims = tflite::testing::IntArrayFromInts(output_shape); // Create per-layer quantized int8_t input tensor. TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor( input_values, input_quantized, input_dims, input_scale, 0); int input_zero_points[2] = {1, 0}; float input_scales[2] = {1, input_scale}; TfLiteAffineQuantization input_quant = { tflite::testing::FloatArrayFromFloats(input_scales), tflite::testing::IntArrayFromInts(input_zero_points), 0}; input_tensor.quantization = {kTfLiteAffineQuantization, &input_quant}; // Create per-layer quantized int8_t filter tensor. TfLiteTensor filter_tensor = tflite::testing::CreateQuantizedTensor( filter_values, filter_quantized, filter_dims, filter_scale, 0); int filter_zero_points[2] = {1, 0}; float filter_scales[2] = {1, filter_scale}; TfLiteAffineQuantization filter_quant = { tflite::testing::FloatArrayFromFloats(filter_scales), tflite::testing::IntArrayFromInts(filter_zero_points), 0}; filter_tensor.quantization = {kTfLiteAffineQuantization, &filter_quant}; // Create per-layer quantized int32_t bias tensor. tflite::SymmetricQuantize(bias_values, bias_quantized, bias_elements, input_scale * output_scale); TfLiteTensor bias_tensor = tflite::testing::CreateTensor(bias_quantized, bias_dims); int bias_zero_points[2] = {1, 0}; float bias_scales[2] = {1, input_scale * filter_scale}; TfLiteAffineQuantization bias_quant = { tflite::testing::FloatArrayFromFloats(bias_scales), tflite::testing::IntArrayFromInts(bias_zero_points), 0}; bias_tensor.quantization = {kTfLiteAffineQuantization, &bias_quant}; // Create per-layer quantized int8_t output tensor. TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor( output_data, output_dims, output_scale, 0); int output_zero_points[2] = {1, 0}; float output_scales[2] = {1, output_scale}; TfLiteAffineQuantization output_quant = { tflite::testing::FloatArrayFromFloats(output_scales), tflite::testing::IntArrayFromInts(output_zero_points), 0}; output_tensor.quantization = {kTfLiteAffineQuantization, &output_quant}; constexpr int inputs_size = 3; constexpr int outputs_size = 1; constexpr int tensors_size = inputs_size + outputs_size; TfLiteTensor tensors[tensors_size] = { input_tensor, filter_tensor, bias_tensor, output_tensor, }; tflite::Quantize(golden, golden_quantized, output_dims_count, output_scale, 0); TfLiteDepthwiseConvParams conv_params; conv_params.activation = kTfLiteActNone; conv_params.dilation_width_factor = 1; conv_params.dilation_height_factor = 1; TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, tflite::testing::ValidateDepthwiseConvGoldens( golden_quantized, output_dims_count, &conv_params, 1e-5, tensors_size, tensors)); } #endif // !defined(XTENSA) TF_LITE_MICRO_TEST(FilterDimsNotMatchingAffineQuantization) { const int input_shape[] = {4, 1, 2, 3, 2}; const float input_data[] = {3, 2, 1, -1, -2, -3, 4, 3, 2, -2, -3, -4}; const int filter_shape[] = {4, 1, 2, 2, 4}; const float filter_data[] = {1, 2, 3, 4, 3, 4, 5, 6, 7, 8, 5, 6, 3, 4, 1, 2}; const int bias_shape[] = {4, 1, 1, 1, 4}; const float bias_data[] = {3, -2, 4, 6}; const int output_shape[] = {4, 1, 1, 2, 4}; const int input_size = 12; const int filter_size = 16; const int output_size = 8; const int bias_size = 4; int8_t input_quantized[input_size]; int8_t filter_quantized[filter_size]; int32_t bias_quantized[bias_size]; int8_t golden_quantized[output_size]; int zero_points[bias_size + 1]; float scales[bias_size + 1]; int8_t output_data[output_size]; const float input_scale = 0.5; const float output_scale = 1.0; const int input_zero_point = 0; const int output_zero_point = 0; TfLiteIntArray* input_dims = tflite::testing::IntArrayFromInts(input_shape); TfLiteIntArray* filter_dims = tflite::testing::IntArrayFromInts(filter_shape); TfLiteIntArray* bias_dims = tflite::testing::IntArrayFromInts(bias_shape); TfLiteIntArray* output_dims = tflite::testing::IntArrayFromInts(output_shape); int filter_zero_points[5]; float filter_scales[5]; TfLiteAffineQuantization filter_quant; TfLiteAffineQuantization bias_quant; TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor( input_data, input_quantized, input_dims, input_scale, input_zero_point); TfLiteTensor filter_tensor = tflite::testing::CreateSymmetricPerChannelQuantizedTensor( filter_data, filter_quantized, filter_dims, filter_scales, filter_zero_points, &filter_quant, 0 /* quantized dimension */); TfLiteTensor bias_tensor = tflite::testing::CreatePerChannelQuantizedBiasTensor( bias_data, bias_quantized, bias_dims, input_scale, &filter_scales[1], scales, zero_points, &bias_quant, 0); TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor( output_data, output_dims, output_scale, output_zero_point); float input_scales[] = {1, input_scale}; int input_zero_points[] = {1, input_zero_point}; TfLiteAffineQuantization input_quant = { tflite::testing::FloatArrayFromFloats(input_scales), tflite::testing::IntArrayFromInts(input_zero_points), 0}; input_tensor.quantization = {kTfLiteAffineQuantization, &input_quant}; constexpr int inputs_size = 3; constexpr int outputs_size = 1; constexpr int tensors_size = inputs_size + outputs_size; TfLiteTensor tensors[tensors_size] = { input_tensor, filter_tensor, bias_tensor, output_tensor, }; TfLiteDepthwiseConvParams conv_params; conv_params.activation = kTfLiteActNone; conv_params.dilation_width_factor = 1; conv_params.dilation_height_factor = 1; // Set filter quant to mismatched dimension. TfLiteAffineQuantization* quant = reinterpret_cast<TfLiteAffineQuantization*>( filter_tensor.quantization.params); quant->scale->size = 2; TF_LITE_MICRO_EXPECT_EQ(kTfLiteError, tflite::testing::ValidateDepthwiseConvGoldens( golden_quantized, output_size, &conv_params, 1e-5, tensors_size, tensors)); // Set scale back to correct dimension, and make zero point array too short. quant->scale->size = filter_shape[0]; quant->zero_point->size = 2; TF_LITE_MICRO_EXPECT_EQ(kTfLiteError, tflite::testing::ValidateDepthwiseConvGoldens( golden_quantized, output_size, &conv_params, 1e-5, tensors_size, tensors)); } TF_LITE_MICRO_TEST(Int8Input32x4Filter32x4ShouldMatchGolden) { const int input_elements = 32 * 4; const int filter_elements = 32 * 4; const int bias_elements = 32; const int output_elements = 32; const int input_shape[] = {4, 1, 4, 1, 32}; const int filter_shape[] = {4, 1, 4, 1, 32}; const int bias_shape[] = {1, 32}; const int output_shape[] = {4, 1, 1, 1, 32}; const float input_values[] = { 11.0589, 10.8824, 11.1766, 11.5295, 10.8236, 9.5295, 9.5295, 10.0001, 11.2354, 10.8824, 9.1765, 9.0589, 9.6471, 8.9412, 7.9412, 9.0001, 9.3530, 7.5295, 9.2354, 9.5883, 7.5883, 8.1765, 7.5883, 9.2942, 9.1177, 8.5883, 8.2354, 8.6471, 8.0589, 8.0001, 7.4118, 7.3530, 11.0001, 11.1177, 11.0589, 11.2354, 10.5883, 9.2942, 9.2942, 10.1177, 11.2354, 10.8824, 8.9412, 8.8236, 9.2354, 8.8824, 7.0001, 9.1177, 9.5883, 8.2354, 9.1765, 9.5295, 7.4118, 8.5883, 8.1177, 9.1765, 9.0001, 9.0589, 8.9412, 8.2942, 7.8824, 8.4118, 7.2942, 7.2354, 10.4118, 10.8824, 11.1177, 11.0001, 10.0001, 9.7060, 9.7648, 10.1766, 11.1766, 10.6471, 8.6471, 8.5295, 9.5295, 9.0001, 7.0001, 9.4118, 9.8236, 8.0001, 9.2354, 9.5883, 7.5295, 9.0001, 8.5295, 9.0589, 8.9412, 9.1177, 8.9412, 8.0001, 8.0589, 8.8824, 7.0589, 7.3530, 11.3530, 11.0589, 10.7060, 10.7648, 9.9413, 9.1177, 9.1177, 9.7648, 10.7060, 10.2354, 8.5883, 8.8236, 9.7648, 9.2942, 7.5295, 9.2354, 9.7060, 8.1177, 9.2942, 9.5883, 7.7648, 9.6471, 9.1177, 9.4707, 9.3530, 8.8236, 8.5295, 8.0589, 8.6471, 9.5883, 7.4118, 7.5883}; const float filter_values[] = { -0.1617, -0.1948, 0.1419, -0.2311, -0.0891, 0.1551, 0.0033, 0.3037, -0.1683, 0.1353, 0.1518, -0.1683, -0.1386, 0.1452, 0.1816, 0.1716, -0.1948, 0.2080, 0.2245, -0.1981, -0.2410, 0.1849, 0.1981, 0.1584, 0.2509, 0.1783, -0.2146, -0.1518, 0.2080, -0.2872, 0.2014, 0.2476, -0.4126, -0.0561, -0.3235, -0.0594, -0.0957, 0.2014, -0.1056, 0.1386, -0.2542, -0.1617, 0.1287, -0.1816, -0.0363, 0.1419, -0.0594, 0.2344, -0.0099, 0.4192, 0.1287, -0.2311, -0.2212, -0.0528, -0.2080, 0.1816, -0.1452, 0.1221, 0.1254, -0.1056, -0.0759, 0.1221, 0.1023, 0.1485, 0.2707, 0.1716, -0.1882, -0.1783, 0.1650, -0.2740, 0.1915, 0.2080, -0.2971, -0.2575, -0.3169, 0.0198, -0.0231, 0.2410, -0.0429, 0.0660, -0.1816, 0.1981, 0.2014, -0.1386, -0.1915, 0.1716, 0.1320, 0.1419, 0.1320, 0.1353, -0.1386, -0.1716, 0.1320, -0.1650, 0.1386, 0.0825, -0.1419, -0.1023, 0.1783, 0.0462, 0.2047, -0.2179, -0.1518, -0.1551, 0.1518, 0.3334, 0.3103, -0.2047, -0.2047, -0.0957, -0.1650, 0.1221, 0.0990, 0.1353, -0.1617, -0.1485, 0.1650, -0.1816, 0.1518, 0.1254, -0.0363, -0.1254, 0.1386, 0.0429, 0.2113, -0.2839, -0.1056, -0.2278}; const float bias_values[] = { 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000}; const float golden[] = { -5.1194, -2.0075, -2.1751, -4.7958, 1.7073, -1.2963, -0.4641, 5.0416, -6.4424, 0.3836, 2.4684, -4.7643, -3.8913, 3.8382, -0.5164, 5.4304, -2.7400, 7.7016, 3.6115, -6.8545, -3.6290, 0.8509, 2.3247, 5.6117, 1.8215, 2.7645, -0.7032, -3.2156, 3.9689, -5.4583, 2.4346, 1.7731}; // Quantization Parameters. All scales except output are 1.0, and all zero // points are 0. This direct-maps the values to floating point and makes it // easy to reson about them. const float input_scale = 0.058824; const float filter_scale = 0.003301; const float output_scale = 0.092596; const int input_zero_point = -128; const int output_zero_point = 0; TfLiteIntArray* input_dims = tflite::testing::IntArrayFromInts(input_shape); TfLiteIntArray* filter_dims = tflite::testing::IntArrayFromInts(filter_shape); TfLiteIntArray* bias_dims = tflite::testing::IntArrayFromInts(bias_shape); TfLiteIntArray* output_dims = tflite::testing::IntArrayFromInts(output_shape); // Create per-tensor quantized int8_t input tensor. int8_t input_quantized[input_elements]; TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor( input_values, input_quantized, input_dims, input_scale, input_zero_point); // Set zero point and scale arrays with a single element for each. int input_zero_points[] = {1, input_zero_point}; float input_scales[] = {1, input_scale}; TfLiteAffineQuantization input_quant = { tflite::testing::FloatArrayFromFloats(input_scales), tflite::testing::IntArrayFromInts(input_zero_points), 0}; input_tensor.quantization = {kTfLiteAffineQuantization, &input_quant}; // Create per-tensor quantized int8_t filter tensor. int8_t filter_quantized[filter_elements]; TfLiteTensor filter_tensor = tflite::testing::CreateQuantizedTensor( filter_values, filter_quantized, filter_dims, filter_scale, 0); // Set zero point and scale arrays with a single element for each. int filter_zero_points[] = {1, 0}; float filter_scales[] = {1, filter_scale}; TfLiteAffineQuantization filter_quant = { tflite::testing::FloatArrayFromFloats(filter_scales), tflite::testing::IntArrayFromInts(filter_zero_points), 0}; filter_tensor.quantization = {kTfLiteAffineQuantization, &filter_quant}; // Create per-tensor quantized int32_t bias tensor. int32_t bias_quantized[bias_elements]; // See https://www.tensorflow.org/lite/performance/quantization_spec for a // detailed explanation of why bias scale is input_scale * filter_scale. tflite::SymmetricQuantize(bias_values, bias_quantized, bias_elements, input_scale * output_scale); TfLiteTensor bias_tensor = tflite::testing::CreateTensor(bias_quantized, bias_dims); // Set zero point and scale arrays with a single element for each. int bias_zero_points[] = {1, 0}; float bias_scales[] = {1, input_scale * filter_scale}; TfLiteAffineQuantization bias_quant = { tflite::testing::FloatArrayFromFloats(bias_scales), tflite::testing::IntArrayFromInts(bias_zero_points), 0}; bias_tensor.quantization = {kTfLiteAffineQuantization, &bias_quant}; // Create per-tensor quantized int8_t output tensor. int8_t output_quantized[output_elements]; TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor( output_quantized, output_dims, output_scale, output_zero_point); // Set zero point and scale arrays with a single element for each. int output_zero_points[] = {1, output_zero_point}; float output_scales[] = {1, output_scale}; TfLiteAffineQuantization output_quant = { tflite::testing::FloatArrayFromFloats(output_scales), tflite::testing::IntArrayFromInts(output_zero_points), 0}; output_tensor.quantization = {kTfLiteAffineQuantization, &output_quant}; // The 3 inputs include the input, filter and bias tensors. constexpr int kInputsSize = 3; constexpr int kOutputsSize = 1; constexpr int kTensorsSize = kInputsSize + kOutputsSize; TfLiteTensor tensors[kTensorsSize] = { input_tensor, filter_tensor, bias_tensor, output_tensor, }; int8_t golden_quantized[output_elements]; tflite::Quantize(golden, golden_quantized, output_elements, output_scale, 0); // Errors due to quantization should not exceed 1. constexpr int kQuantizationTolerance = 1; TfLiteDepthwiseConvParams conv_params; conv_params.activation = kTfLiteActNone; conv_params.dilation_width_factor = 1; conv_params.dilation_height_factor = 1; tflite::testing::ValidateDepthwiseConvGoldens( golden_quantized, output_elements, &conv_params, kQuantizationTolerance, kTensorsSize, tensors); } TF_LITE_MICRO_TESTS_END<|fim▁end|>
template <typename T> TfLiteStatus ValidateDepthwiseConvGoldens( const T* expected_output_data, int output_length, TfLiteDepthwiseConvParams* conv_params, float tolerance, int tensors_size,
<|file_name|>_route_filter_rules_operations.py<|end_file_name|><|fim▁begin|># coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling from ... import models as _models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class RouteFilterRulesOperations: """RouteFilterRulesOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.network.v2018_06_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config async def _delete_initial( self, resource_group_name: str, route_filter_name: str, rule_name: str, **kwargs: Any ) -> None: cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2018-06-01" # Construct URL url = self._delete_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'), 'ruleName': self._serialize.url("rule_name", rule_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore async def begin_delete( self, resource_group_name: str, route_filter_name: str, rule_name: str, **kwargs: Any ) -> AsyncLROPoller[None]: """Deletes the specified rule from a route filter. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param route_filter_name: The name of the route filter. :type route_filter_name: str :param rule_name: The name of the rule. :type rule_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._delete_initial( resource_group_name=resource_group_name, route_filter_name=route_filter_name, rule_name=rule_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'), 'ruleName': self._serialize.url("rule_name", rule_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore async def get( self, resource_group_name: str, route_filter_name: str, rule_name: str, **kwargs: Any ) -> "_models.RouteFilterRule": """Gets the specified rule from a route filter. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param route_filter_name: The name of the route filter. :type route_filter_name: str :param rule_name: The name of the rule. :type rule_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: RouteFilterRule, or the result of cls(response) :rtype: ~azure.mgmt.network.v2018_06_01.models.RouteFilterRule :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2018-06-01" accept = "application/json" # Construct URL url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'), 'ruleName': self._serialize.url("rule_name", rule_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('RouteFilterRule', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore async def _create_or_update_initial( self, resource_group_name: str, route_filter_name: str, rule_name: str, route_filter_rule_parameters: "_models.RouteFilterRule", **kwargs: Any ) -> "_models.RouteFilterRule": cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2018-06-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self._create_or_update_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'), 'ruleName': self._serialize.url("rule_name", rule_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(route_filter_rule_parameters, 'RouteFilterRule') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if response.status_code == 200: deserialized = self._deserialize('RouteFilterRule', pipeline_response) if response.status_code == 201: deserialized = self._deserialize('RouteFilterRule', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore async def begin_create_or_update( self, resource_group_name: str, route_filter_name: str, rule_name: str, route_filter_rule_parameters: "_models.RouteFilterRule", **kwargs: Any ) -> AsyncLROPoller["_models.RouteFilterRule"]: """Creates or updates a route in the specified route filter. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param route_filter_name: The name of the route filter. :type route_filter_name: str :param rule_name: The name of the route filter rule. :type rule_name: str :param route_filter_rule_parameters: Parameters supplied to the create or update route filter rule operation. :type route_filter_rule_parameters: ~azure.mgmt.network.v2018_06_01.models.RouteFilterRule :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either RouteFilterRule or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_06_01.models.RouteFilterRule] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._create_or_update_initial( resource_group_name=resource_group_name, route_filter_name=route_filter_name, rule_name=rule_name, route_filter_rule_parameters=route_filter_rule_parameters, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('RouteFilterRule', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'), 'ruleName': self._serialize.url("rule_name", rule_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore async def _update_initial( self, resource_group_name: str, route_filter_name: str, rule_name: str, route_filter_rule_parameters: "_models.PatchRouteFilterRule", **kwargs: Any ) -> "_models.RouteFilterRule": cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2018-06-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self._update_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'), 'ruleName': self._serialize.url("rule_name", rule_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(route_filter_rule_parameters, 'PatchRouteFilterRule') body_content_kwargs['content'] = body_content request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('RouteFilterRule', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore async def begin_update( self, resource_group_name: str, route_filter_name: str, rule_name: str, route_filter_rule_parameters: "_models.PatchRouteFilterRule", **kwargs: Any ) -> AsyncLROPoller["_models.RouteFilterRule"]: """Updates a route in the specified route filter. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param route_filter_name: The name of the route filter. :type route_filter_name: str :param rule_name: The name of the route filter rule. :type rule_name: str :param route_filter_rule_parameters: Parameters supplied to the update route filter rule operation. :type route_filter_rule_parameters: ~azure.mgmt.network.v2018_06_01.models.PatchRouteFilterRule :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either RouteFilterRule or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_06_01.models.RouteFilterRule] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._update_initial( resource_group_name=resource_group_name, route_filter_name=route_filter_name, rule_name=rule_name, route_filter_rule_parameters=route_filter_rule_parameters, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('RouteFilterRule', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized <|fim▁hole|> 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore def list_by_route_filter( self, resource_group_name: str, route_filter_name: str, **kwargs: Any ) -> AsyncIterable["_models.RouteFilterRuleListResult"]: """Gets all RouteFilterRules in a route filter. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param route_filter_name: The name of the route filter. :type route_filter_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either RouteFilterRuleListResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_06_01.models.RouteFilterRuleListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRuleListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2018-06-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list_by_route_filter.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('RouteFilterRuleListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list_by_route_filter.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules'} # type: ignore<|fim▁end|>
path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'), 'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
<|file_name|>update.ts<|end_file_name|><|fim▁begin|>import { CommandInstanceInfo, CommandLineInputs, CommandLineOptions, CommandMetadata, CommandPreRun } from '../../definitions'; import { input } from '../../lib/color'; import { CapacitorCommand } from './base'; export class UpdateCommand extends CapacitorCommand implements CommandPreRun { async getMetadata(): Promise<CommandMetadata> { return { name: 'update', type: 'project', summary: 'Update Capacitor native platforms, install Capacitor/Cordova plugins',<|fim▁hole|>${input('ionic capacitor update')} will do the following: - Update Capacitor native platform(s) and dependencies - Install any discovered Capacitor or Cordova plugins `, inputs: [ { name: 'platform', summary: `The platform to update (e.g. ${['android', 'ios'].map(v => input(v)).join(', ')})`, }, ], }; } async preRun(inputs: CommandLineInputs, options: CommandLineOptions, runinfo: CommandInstanceInfo): Promise<void> { await this.preRunChecks(runinfo); if (inputs[0]) { await this.checkForPlatformInstallation(inputs[0]); } } async run(inputs: CommandLineInputs, options: CommandLineOptions): Promise<void> { const [ platform ] = inputs; const args = ['update']; if (platform) { args.push(platform); } await this.runCapacitor(args); } }<|fim▁end|>
description: `
<|file_name|>result_functions_file.py<|end_file_name|><|fim▁begin|>import os import datetime import lib.maglib as MSG #这是一个对结果进行初步处理的库 #用来分离抓取结果,作者,发帖时间 #抓取结果应该储存在【用户端根目录】并以result命名 #在测试情况下,抓取结果文件为results.txt #重要全局变量 PATH_SUFFIX = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)) print(PATH_SUFFIX) PATH_SUFFIX = PATH_SUFFIX[::-1] PATH_SUFFIX = PATH_SUFFIX[PATH_SUFFIX.find('\\'):] PATH_SUFFIX = PATH_SUFFIX[::-1] print(PATH_SUFFIX) PATH_RESULT_FILE = PATH_SUFFIX + "\\datasource.ini" DBSETTINGS = {'H':'', #HOST 'U':'', #USER 'P':'', #PASSWORD 'D':''} #DATABASE_NAME #该函数用于读取数据源信息 #返回值:成功true,否则false def loadDataSource(): print("加载数据源配置:",PATH_RESULT_FILE) f = open(PATH_RESULT_FILE,'rb') data = f.read() f.close() data = data.decode('gbk', 'ignore') dbl = data.split("\r\n") for db in dbl: DBSETTINGS[db[0]] = db[db.find('=')+1:].replace('\'','').replace(' ','') return data loadDataSource() DBCONN = pymysql.connect(host=DBSETTINGS['H'], port=3306,user=DBSETTINGS['U'],passwd=DBSETTINGS['P'],db=DBSETTINGS['D'],charset='UTF8') DBCUR = DBCONN.cursor() #从数据库查询包含指定字词的所有数据集 #返回值:包含指定字词的数据集列表 def queryWordContainPostListbyKeyword(word): SEL = "select CONTENT from `postdata` where CONTENT like('%" + word +"%')" DBCUR.execute("SET names 'utf8mb4'") DBCUR.execute(SEL) DBCONN.commit() datalist = DBCUR.fetchall() return datalist #从数据库查询指定作者的所有帖子信息 #返回值:指定作者的所有回帖信息 # [ [主题帖链接,贴吧名,作者,帖子内容,发帖时间,回复给sb,所在页面],[......],..... ] def queryPostdataListbyAuthor(author): SEL = "select * from `postdata` where AUTHOR=\"" + author +"\"" DBCUR.execute("SET names 'utf8mb4'") DBCUR.execute(SEL) DBCONN.commit() datalist = DBCUR.fetchall() return datalist #从数据库查询最大日期 #返回值:一个最大日期 def queryDatasourceLatestTime(): SEL = "select MAX(DATE) from `postdata`" DBCUR.execute("SET names 'utf8mb4'") DBCUR.execute(SEL) DBCONN.commit() datalist = DBCUR.fetchall() return datalist[0][0] #从数据库查询小日期 #返回值:一个最小日期 def queryDatasourceEarlyTime(): SEL = "select MIN(DATE) from `postdata`" DBCUR.execute("SET names 'utf8mb4'")<|fim▁hole|> DBCONN.commit() datalist = DBCUR.fetchall() return datalist[0][0] #从数据库查询指定作者的指定日期之间的数据集 #返回值:指定日期之间的数据集列表 # [ [主题帖链接,贴吧名,作者,帖子内容,发帖时间,回复给sb,所在页面],[......],..... ] def queryPostdataListAfterTime(author,earlydatestr): SEL = "select * from `postdata` where AUTHOR=\"" + author + "\" and DATE>'" + earlydatestr + "'" DBCUR.execute("SET names 'utf8mb4'") DBCUR.execute(SEL) DBCONN.commit() datalist = DBCUR.fetchall() print(len(datalist)) return datalist<|fim▁end|>
DBCUR.execute(SEL)
<|file_name|>f32-as-u32.rs<|end_file_name|><|fim▁begin|>fn main() { let a: f32 = 42.42; let frankentype: u32 = unsafe { std::mem::transmute(a)<|fim▁hole|> println!("{:032b}", frankentype); }<|fim▁end|>
};
<|file_name|>rest_api_test_course.py<|end_file_name|><|fim▁begin|>''' Testing class for database API's course related functions. Authors: Ari Kairala, Petteri Ponsimaa Originally adopted from Ivan's exercise 1 test class. ''' import unittest, hashlib import re, base64, copy, json, server from database_api_test_common import BaseTestCase, db from flask import json, jsonify from exam_archive import ExamDatabaseErrorNotFound, ExamDatabaseErrorExists from unittest import TestCase from resources_common import COLLECTIONJSON, PROBLEMJSON, COURSE_PROFILE, API_VERSION class RestCourseTestCase(BaseTestCase): ''' RestCourseTestCase contains course related unit tests of the database API. ''' # List of user credentials in exam_archive_data_dump.sql for testing purposes super_user = "bigboss" super_pw = hashlib.sha256("ultimatepw").hexdigest() admin_user = "antti.admin" admin_pw = hashlib.sha256("qwerty1234").hexdigest() basic_user = "testuser" basic_pw = hashlib.sha256("testuser").hexdigest() wrong_pw = "wrong-pw" test_course_template_1 = {"template": { "data": [ {"name": "archiveId", "value": 1}, {"name": "courseCode", "value": "810136P"}, {"name": "name", "value": "Johdatus tietojenk\u00e4sittelytieteisiin"}, {"name": "description", "value": "Lorem ipsum"}, {"name": "inLanguage", "value": "fi"}, {"name": "creditPoints", "value": 4}, {"name": "teacherId", "value": 1}] } } test_course_template_2 = {"template": { "data": [ {"name": "archiveId", "value": 1}, {"name": "courseCode", "value": "810137P"}, {"name": "name", "value": "Introduction to Information Processing Sciences"}, {"name": "description", "value": "Aaa Bbbb"}, {"name": "inLanguage", "value": "en"}, {"name": "creditPoints", "value": 5}, {"name": "teacherId", "value": 2}] } } course_resource_url = '/exam_archive/api/archives/1/courses/1/' course_resource_not_allowed_url = '/exam_archive/api/archives/2/courses/1/' courselist_resource_url = '/exam_archive/api/archives/1/courses/' # Set a ready header for authorized admin user header_auth = {'Authorization': 'Basic ' + base64.b64encode(super_user + ":" + super_pw)} # Define a list of the sample contents of the database, so we can later compare it to the test results @classmethod def setUpClass(cls): print "Testing ", cls.__name__ def test_user_not_authorized(self): ''' Check that user in not able to get course list without authenticating. ''' print '(' + self.test_user_not_authorized.__name__ + ')', \ self.test_user_not_authorized.__doc__ # Test CourseList/GET rv = self.app.get(self.courselist_resource_url) self.assertEquals(rv.status_code,401) self.assertEquals(PROBLEMJSON,rv.mimetype) # Test CourseList/POST rv = self.app.post(self.courselist_resource_url) self.assertEquals(rv.status_code,401) self.assertEquals(PROBLEMJSON,rv.mimetype) # Test Course/GET rv = self.app.get(self.course_resource_url) self.assertEquals(rv.status_code,401) self.assertEquals(PROBLEMJSON,rv.mimetype) # Test Course/PUT rv = self.app.put(self.course_resource_url) self.assertEquals(rv.status_code,401) self.assertEquals(PROBLEMJSON,rv.mimetype) # Test Course/DELETE rv = self.app.put(self.course_resource_url) self.assertEquals(rv.status_code,401) self.assertEquals(PROBLEMJSON,rv.mimetype) # Try to Course/POST when not admin or super user rv = self.app.post(self.courselist_resource_url, headers={'Authorization': 'Basic ' + \ base64.b64encode(self.basic_user + ":" + self.basic_pw)}) self.assertEquals(rv.status_code,403) self.assertEquals(PROBLEMJSON,rv.mimetype) # Try to delete course, when not admin or super user rv = self.app.delete(self.course_resource_url, headers={'Authorization': 'Basic ' + \ base64.b64encode(self.basic_user + ":" + self.basic_pw)}) self.assertEquals(rv.status_code,403) self.assertEquals(PROBLEMJSON,rv.mimetype) # Try to get Course list as basic user from unallowed archive rv = self.app.get(self.course_resource_not_allowed_url, headers={'Authorization': 'Basic ' + \ base64.b64encode(self.basic_user + ":" + self.basic_pw)}) self.assertEquals(rv.status_code,403) self.assertEquals(PROBLEMJSON,rv.mimetype) # Try to get Course list as super user with wrong password rv = self.app.get(self.courselist_resource_url, headers={'Authorization': 'Basic ' + \ base64.b64encode(self.super_user + ":" + self.wrong_pw)}) self.assertEquals(rv.status_code,401) self.assertEquals(PROBLEMJSON,rv.mimetype) def test_user_authorized(self): ''' Check that authenticated user is able to get course list. ''' print '(' + self.test_user_authorized.__name__ + ')', \ self.test_user_authorized.__doc__ # Try to get Course list as basic user from the correct archive rv = self.app.get(self.course_resource_url, headers={'Authorization': 'Basic ' + \ base64.b64encode(self.basic_user + ":" + self.basic_pw)}) self.assertEquals(rv.status_code,200) self.assertEquals(COLLECTIONJSON+";"+COURSE_PROFILE,rv.content_type) # User authorized as super user rv = self.app.get(self.courselist_resource_url, headers={'Authorization': 'Basic ' + \ base64.b64encode(self.super_user + ":" + self.super_pw)}) self.assertEquals(rv.status_code,200) self.assertEquals(COLLECTIONJSON+";"+COURSE_PROFILE,rv.content_type) def test_course_get(self): ''' Check data consistency of Course/GET and CourseList/GET. ''' print '(' + self.test_course_get.__name__ + ')', \ self.test_course_get.__doc__ # Test CourseList/GET self._course_get(self.courselist_resource_url) # Test single course Course/GET self._course_get(self.course_resource_url) def _course_get(self, resource_url): ''' Check data consistency of CourseList/GET. ''' # Get all the courses from database courses = db.browse_courses(1) # Get all the courses from API rv = self.app.get(resource_url, headers=self.header_auth) self.assertEquals(rv.status_code,200) self.assertEquals(COLLECTIONJSON+";"+COURSE_PROFILE,rv.content_type) input = json.loads(rv.data) assert input # Go through the data data = input['collection'] items = data['items'] self.assertEquals(data['href'], resource_url) self.assertEquals(data['version'], API_VERSION) for item in items: obj = self._create_dict(item['data']) course = db.get_course(obj['courseId']) assert self._isIdentical(obj, course) def test_course_post(self): ''' Check that a new course can be created. ''' print '(' + self.test_course_post.__name__ + ')', \ self.test_course_post.__doc__ resource_url = self.courselist_resource_url new_course = self.test_course_template_1.copy() # Test CourseList/POST rv = self.app.post(resource_url, headers=self.header_auth, data=json.dumps(new_course)) self.assertEquals(rv.status_code,201) # Post returns the address of newly created resource URL in header, in 'location'. Get the identifier of # the just created item, fetch it from database and compare. location = rv.location location_match = re.match('.*courses/([^/]+)/', location) self.assertIsNotNone(location_match) new_id = location_match.group(1) # Fetch the item from database and set it to course_id_db, and convert the filled post template data above to # similar format by replacing the keys with post data attributes. course_in_db = db.get_course(new_id) course_posted = self._convert(new_course) # Compare the data in database and the post template above. self.assertDictContainsSubset(course_posted, course_in_db) # Next, try to add the same course twice - there should be conflict rv = self.app.post(resource_url, headers=self.header_auth, data=json.dumps(new_course)) self.assertEquals(rv.status_code,409) # Next check that by posting invalid JSON data we get status code 415 invalid_json = "INVALID " + json.dumps(new_course) rv = self.app.post(resource_url, headers=self.header_auth, data=invalid_json) self.assertEquals(rv.status_code,415) # Check that template structure is validated invalid_json = json.dumps(new_course['template']) rv = self.app.post(resource_url, headers=self.header_auth, data=invalid_json) self.assertEquals(rv.status_code,400) # Check for the missing required field by removing the third row in array (course name) invalid_template = copy.deepcopy(new_course) invalid_template['template']['data'].pop(2) rv = self.app.post(resource_url, headers=self.header_auth, data=json.dumps(invalid_template)) self.assertEquals(rv.status_code,400) # Lastly, delete the item rv = self.app.delete(location, headers=self.header_auth) self.assertEquals(rv.status_code,204) def test_course_put(self): ''' Check that an existing course can be modified. ''' print '(' + self.test_course_put.__name__ + ')', \ self.test_course_put.__doc__ resource_url = self.courselist_resource_url new_course = self.test_course_template_1 edited_course = self.test_course_template_2 # First create the course rv = self.app.post(resource_url, headers=self.header_auth, data=json.dumps(new_course)) self.assertEquals(rv.status_code,201) location = rv.location self.assertIsNotNone(location) # Then try to edit the course rv = self.app.put(location, headers=self.header_auth, data=json.dumps(edited_course)) self.assertEquals(rv.status_code,200) location = rv.location self.assertIsNotNone(location) # Put returns the address of newly created resource URL in header, in 'location'. Get the identifier of # the just created item, fetch it from database and compare. location = rv.location location_match = re.match('.*courses/([^/]+)/', location) self.assertIsNotNone(location_match) new_id = location_match.group(1) # Fetch the item from database and set it to course_id_db, and convert the filled post template data above to # similar format by replacing the keys with post data attributes. course_in_db = db.get_course(new_id) course_posted = self._convert(edited_course) # Compare the data in database and the post template above. self.assertDictContainsSubset(course_posted, course_in_db) # Next check that by posting invalid JSON data we get status code 415 invalid_json = "INVALID " + json.dumps(new_course) rv = self.app.put(location, headers=self.header_auth, data=invalid_json) self.assertEquals(rv.status_code,415) # Check that template structure is validated invalid_json = json.dumps(new_course['template']) rv = self.app.put(location, headers=self.header_auth, data=invalid_json) self.assertEquals(rv.status_code,400) # Lastly, we delete the course rv = self.app.delete(location, headers=self.header_auth) self.assertEquals(rv.status_code,204) def test_course_delete(self): ''' Check that course in not able to get course list without authenticating. ''' print '(' + self.test_course_delete.__name__ + ')', \ self.test_course_delete.__doc__ # First create the course resource_url = self.courselist_resource_url rv = self.app.post(resource_url, headers=self.header_auth, data=json.dumps(self.test_course_template_2)) self.assertEquals(rv.status_code,201) location = rv.location self.assertIsNotNone(location) # Get the identifier of the just created item, fetch it from database and compare. location = rv.location location_match = re.match('.*courses/([^/]+)/', location) self.assertIsNotNone(location_match) new_id = location_match.group(1) # Then, we delete the course rv = self.app.delete(location, headers=self.header_auth) self.assertEquals(rv.status_code,204) # Try to fetch the deleted course from database - expect to fail self.assertIsNone(db.get_course(new_id)) def test_for_method_not_allowed(self): ''' For inconsistency check for 405, method not allowed. ''' print '(' + self.test_course_get.__name__ + ')', \ self.test_course_get.__doc__ # CourseList/PUT should not exist rv = self.app.put(self.courselist_resource_url, headers=self.header_auth) self.assertEquals(rv.status_code,405) # CourseList/DELETE should not exist rv = self.app.delete(self.courselist_resource_url, headers=self.header_auth) self.assertEquals(rv.status_code,405) # Course/POST should not exist rv = self.app.post(self.course_resource_url, headers=self.header_auth) self.assertEquals(rv.status_code,405) def _isIdentical(self, api_item, db_item): ''' Check whether template data corresponds to data stored in the database. ''' return api_item['courseId'] == db_item['course_id'] and \ api_item['name'] == db_item['course_name'] and \ api_item['archiveId'] == db_item['archive_id'] and \ api_item['description'] == db_item['description'] and \ api_item['inLanguage'] == db_item['language_id'] and \ api_item['creditPoints'] == db_item['credit_points'] and \ api_item['courseCode'] == db_item['course_code'] def _convert(self, template_data): ''' Convert template data to a dictionary representing the format the data is saved in the database. ''' trans_table = {"name":"course_name", "url":"url", "archiveId":"archive_id", "courseCode":"course_code", "dateModified": "modified_date", "modifierId":"modifier_id", "courseId":"course_id", "description":"description", "inLanguage":"language_id", "creditPoints":"credit_points", "teacherId":"teacher_id", "teacherName":"teacher_name"} data = self._create_dict(template_data['template']['data']) db_item = {} <|fim▁hole|> return db_item def _create_dict(self,item): ''' Create a dictionary from template data for easier handling. ''' dict = {} for f in item: dict[f['name']] = f['value'] return dict if __name__ == '__main__': print 'Start running tests' unittest.main()<|fim▁end|>
for key, val in data.items(): db_item[trans_table[key]] = val
<|file_name|>page.py<|end_file_name|><|fim▁begin|>import os import shutil from jinja2 import Environment, PackageLoader import html import xml.etree.ElementTree as et class moodle_module: def __init__(self, **kwargs): self.backup = kwargs['backup'] self.temp_dir = kwargs['temp_dir'] self.db = kwargs['db'] self.directory = kwargs['directory'] self.final_dir = kwargs['working_dir'] self.db_cursor = self.db.cursor() query = "CREATE TABLE IF NOT EXISTS pages (activityid int, moduleid int, contextid int, name text, content text)" self.db_cursor.execute(query) self.db.commit() self.env = Environment(loader=PackageLoader( 'mbzextract.plugins.page', 'templates')) def parse(self): page_xml = et.parse(self.backup.open( self.directory + "/page.xml")).getroot() inforef_xml = et.parse(self.backup.open( self.directory + "/inforef.xml")).getroot() page = (page_xml.get('id'), page_xml.get('moduleid'), page_xml.get('contextid'), page_xml.find('./page/name').text,<|fim▁hole|> html.unescape(page_xml.find('./page/content').text)) self.name = page_xml.find('./page/name').text self.db_cursor.execute( "INSERT INTO pages VALUES(?,?,?,?,?)", page) self.db.commit() self.current_id = page_xml.get('id') # create a list of files self.files = self.backup.list_files(inforef_xml, self.db_cursor) def extract(self): self.db_cursor.execute('SELECT name,content FROM pages WHERE activityid=?',(self.current_id,)) results = self.db_cursor.fetchone() template = self.env.get_template('page.html') output = (template.render(name=results[0],content=results[1])) path = os.path.join(self.final_dir, self.backup.stripped(self.name)) if os.path.exists(path) == False: os.makedirs(path) os.chdir(path) # write the page f = open("page.html",'w+') f.write(output) f.close() # files for fileid in self.files: self.db_cursor.execute( 'SELECT contenthash,filename FROM files WHERE filename != "." AND id=?', (fileid,)) results = self.db_cursor.fetchone() if results is not None: os.chdir(self.temp_dir) self.backup.extract_file( results[0], os.path.join(path, results[1]))<|fim▁end|>
<|file_name|>KenLanguageModel.java<|end_file_name|><|fim▁begin|>package edu.stanford.nlp.mt.lm; import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.ReentrantLock; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import edu.stanford.nlp.mt.util.IString; import edu.stanford.nlp.mt.util.Sequence; import edu.stanford.nlp.mt.util.TokenUtils; import edu.stanford.nlp.mt.util.Vocabulary; /** * KenLM language model support via JNI. * * @author daniel cer * @author Spence Green * @author Kenneth Heafield * */ public class KenLanguageModel implements LanguageModel<IString> { private static final Logger logger = LogManager.getLogger(KenLanguageModel.class.getName()); private static final int[] EMPTY_INT_ARRAY = new int[0]; private static final KenLMState ZERO_LENGTH_STATE = new KenLMState(0.0f, EMPTY_INT_ARRAY, 0); public static final String KENLM_LIBRARY_NAME = "PhrasalKenLM"; static { try { System.loadLibrary(KENLM_LIBRARY_NAME); logger.info("Loaded KenLM JNI library."); } catch (java.lang.UnsatisfiedLinkError e) { logger.fatal("KenLM has not been compiled!", e); System.exit(-1); } } private final KenLM model; private final String name; private AtomicReference<int[]> istringIdToKenLMId; private final ReentrantLock preventDuplicateWork = new ReentrantLock(); /** * Constructor for multi-threaded queries. * * @param filename */ public KenLanguageModel(String filename) { model = new KenLM(filename); name = String.format("KenLM(%s)", filename); initializeIdTable(); } /** * Create the mapping between IString word ids and KenLM word ids. */ private void initializeIdTable() { // Don't remove this line!! Sanity check to make sure that start and end load before // building the index. logger.info("Special tokens: start: {} end: {}", TokenUtils.START_TOKEN, TokenUtils.END_TOKEN); int[] table = new int[Vocabulary.systemSize()]; for (int i = 0; i < table.length; ++i) { table[i] = model.index(Vocabulary.systemGet(i)); } istringIdToKenLMId = new AtomicReference<int[]>(table); } /** * Maps the IString id to a kenLM id. If the IString * id is out of range, update the vocab mapping. * @param token * @return kenlm id of the string */ private int toKenLMId(IString token) { { int[] map = istringIdToKenLMId.get(); if (token.id < map.length) { return map[token.id]; } } // Rare event: we have to expand the vocabulary. // In principle, this doesn't need to be a lock, but it does // prevent unnecessary work duplication. if (preventDuplicateWork.tryLock()) { // This thread is responsible for updating the mapping. try { // Maybe another thread did the work for us? int[] oldTable = istringIdToKenLMId.get(); if (token.id < oldTable.length) { return oldTable[token.id]; } int[] newTable = new int[Vocabulary.systemSize()]; System.arraycopy(oldTable, 0, newTable, 0, oldTable.length); for (int i = oldTable.length; i < newTable.length; ++i) { newTable[i] = model.index(Vocabulary.systemGet(i)); } istringIdToKenLMId.set(newTable); return newTable[token.id]; } finally { preventDuplicateWork.unlock(); } } // Another thread is working. Lookup directly. return model.index(token.toString()); } @Override public IString getStartToken() { return TokenUtils.START_TOKEN; } @Override public IString getEndToken() { return TokenUtils.END_TOKEN; } @Override public String getName() { return name; } @Override public int order() { return model.order(); } @Override public LMState score(Sequence<IString> sequence, int startIndex, LMState priorState) { if (sequence.size() == 0) { // Source deletion rule return priorState == null ? ZERO_LENGTH_STATE : priorState; } // Extract prior state final int[] state = priorState == null ? EMPTY_INT_ARRAY : ((KenLMState) priorState).getState(); final int[] ngramIds = makeKenLMInput(sequence, state); if (sequence.size() == 1 && priorState == null && sequence.get(0).equals(TokenUtils.START_TOKEN)) { // Special case: Source deletion rule (e.g., from the OOV model) at the start of a string assert ngramIds.length == 1; return new KenLMState(0.0f, ngramIds, ngramIds.length); } // Reverse the start index for KenLM final int kenLMStartIndex = ngramIds.length - state.length - startIndex - 1; assert kenLMStartIndex >= 0; // Execute the query (via JNI) and construct the return state final long got = model.scoreSeqMarshalled(ngramIds, kenLMStartIndex); return new KenLMState(KenLM.scoreFromMarshalled(got), ngramIds, KenLM.rightStateFromMarshalled(got)); } /** * Convert a Sequence and an optional state to an input for KenLM. * * @param sequence * @param priorState * @return */ private int[] makeKenLMInput(Sequence<IString> sequence, int[] priorState) { final int sequenceSize = sequence.size(); int[] ngramIds = new int[sequenceSize + priorState.length]; if (priorState.length > 0) { System.arraycopy(priorState, 0, ngramIds, sequenceSize, priorState.length); } for (int i = 0; i < sequenceSize; i++) { // Notice: ngramids are in reverse order vv. the Sequence ngramIds[sequenceSize-1-i] = toKenLMId(sequence.get(i)); } return ngramIds; } // TODO(spenceg) This never yielded an improvement.... // private static final int DEFAULT_CACHE_SIZE = 10000;<|fim▁hole|>// // private static class KenLMCache { // private final long[] keys; // private final long[] values; // private final int mask; // public KenLMCache(int size) { // this.keys = new long[size]; // this.values = new long[size]; // this.mask = size - 1; // } // // public Long get(int[] kenLMInput, int startIndex) { // long hashValue = MurmurHash2.hash64(kenLMInput, kenLMInput.length, startIndex); // int k = ideal(hashValue); // return keys[k] == hashValue ? values[k] : null; // } // private int ideal(long hashed) { // return ((int)hashed) & mask; // } // public void insert(int[] kenLMInput, int startIndex, long value) { // long hashValue = MurmurHash2.hash64(kenLMInput, kenLMInput.length, startIndex); // int k = ideal(hashValue); // keys[k] = hashValue; // values[k] = value; // } // } }<|fim▁end|>
// private static final ThreadLocal<KenLMCache> threadLocalCache = // new ThreadLocal<KenLMCache>();
<|file_name|>KittenRestService.java<|end_file_name|><|fim▁begin|>package fables.kotlin.jee.rest; import fables.kotlin.jee.business.KittenBusinessService; import fables.kotlin.jee.business.KittenEntity; import javax.inject.Inject; import javax.ws.rs.*; /** * JSON REST CRud service. * JEE will first create one noarg instance, and then injected instances. * * @author Zeljko Trogrlic */ @Path("kitten") public class KittenRestService { @Inject protected KittenBusinessService kittenBusinessService; @GET @Path("{id}") @Produces({"application/json"})<|fim▁hole|> .find(id) .map(kittenEntity -> new KittenRest(kittenEntity.getName(), kittenEntity.getCuteness())) .orElseThrow(() -> new NotFoundException("ID " + id + " not found")); } @POST @Produces({"application/json"}) public Integer add(KittenRest kittenRest) { KittenEntity kittenEntity = new KittenEntity(kittenRest.getName(), kittenRest.getCuteness()); return kittenBusinessService.add(kittenEntity); } }<|fim▁end|>
public KittenRest find( @PathParam("id") final int id ) { return kittenBusinessService
<|file_name|>IPhoneAddressView.java<|end_file_name|><|fim▁begin|>package com.rx.mvp.cn.view.iface; import com.rx.mvp.cn.base.IBaseView; import com.rx.mvp.cn.model.bean.AddressBean; /** * 手机归属地页面view接口 * * @author ZhongDaFeng */<|fim▁hole|> //显示结果 void showResult(AddressBean bean); }<|fim▁end|>
public interface IPhoneAddressView extends IBaseView {
<|file_name|>provider.js<|end_file_name|><|fim▁begin|>/* * provider.js: Abstraction providing an interface into pluggable configuration storage. * * (C) 2011, Nodejitsu Inc. * */ var async = require('async'), common = require('./common'); // // ### function Provider (options) // #### @options {Object} Options for this instance. // Constructor function for the Provider object responsible // for exposing the pluggable storage features of `nconf`. // var Provider = exports.Provider = function (options) { // // Setup default options for working with `stores`, // `overrides`, `process.env` and `process.argv`. // options = options || {}; this.stores = {}; this.sources = []; this.init(options); }; // // Define wrapper functions for using basic stores // in this instance // ['argv', 'env', 'file'].forEach(function (type) { Provider.prototype[type] = function (options) { return this.add(type, options); }; }); // // Define wrapper functions for using // overrides and defaults // ['defaults', 'overrides'].forEach(function (type) { Provider.prototype[type] = function (options) { return this.add('literal', options); }; }); // // ### function use (name, options) // #### @type {string} Type of the nconf store to use. // #### @options {Object} Options for the store instance. // Adds (or replaces) a new store with the specified `name` // and `options`. If `options.type` is not set, then `name` // will be used instead: // // provider.use('file'); // provider.use('file', { type: 'file', filename: '/path/to/userconf' }) // Provider.prototype.use = function (name, options) { options = options || {}; var type = options.type || name; function sameOptions (store) { return Object.keys(options).every(function (key) { return options[key] === store[key]; }); } var store = this.stores[name], update = store && !sameOptions(store); if (!store || update) { if (update) { this.remove(name); } this.add(name, options); } return this; }; // // ### function add (name, options) // #### @name {string} Name of the store to add to this instance // #### @options {Object} Options for the store to create // Adds a new store with the specified `name` and `options`. If `options.type` // is not set, then `name` will be used instead: // // provider.add('memory'); // provider.add('userconf', { type: 'file', filename: '/path/to/userconf' }) // Provider.prototype.add = function (name, options) { options = options || {}; var type = options.type || name; if (!require('../nconf')[common.capitalize(type)]) { throw new Error('Cannot add store with unknown type: ' + type); } this.stores[name] = this.create(type, options); if (this.stores[name].loadSync) { this.stores[name].loadSync(); } return this; }; // // ### function remove (name) // #### @name {string} Name of the store to remove from this instance // Removes a store with the specified `name` from this instance. Users // are allowed to pass in a type argument (e.g. `memory`) as name if // this was used in the call to `.add()`. // Provider.prototype.remove = function (name) { delete this.stores[name]; return this; }; // // ### function create (type, options) // #### @type {string} Type of the nconf store to use. // #### @options {Object} Options for the store instance. // Creates a store of the specified `type` using the // specified `options`. // Provider.prototype.create = function (type, options) { return new (require('../nconf')[common.capitalize(type.toLowerCase())])(options); }; // // ### function init (options) // #### @options {Object} Options to initialize this instance with. // Initializes this instance with additional `stores` or `sources` in the // `options` supplied. // Provider.prototype.init = function (options) { var self = this; // // Add any stores passed in through the options // to this instance. // if (options.type) { this.add(options.type, options); } else if (options.store) { this.add(options.store.name || options.store.type, options.store); } else if (options.stores) { Object.keys(options.stores).forEach(function (name) { var store = options.stores[name]; self.add(store.name || name || store.type, store); }); } // // Add any read-only sources to this instance // if (options.source) { this.sources.push(this.create(options.source.type || options.source.name, options.source)); } else if (options.sources) { Object.keys(options.sources).forEach(function (name) { var source = options.sources[name]; self.sources.push(self.create(source.type || source.name || name, source)); }); } }; // // ### function get (key, callback) // #### @key {string} Key to retrieve for this instance. // #### @callback {function} **Optional** Continuation to respond to when complete. // Retrieves the value for the specified key (if any). // Provider.prototype.get = function (key, callback) { // // If there is no callback we can short-circuit into the default // logic for traversing stores. // if (!callback) { return this._execute('get', 1, key, callback); } // // Otherwise the asynchronous, hierarchical `get` is // slightly more complicated because we do not need to traverse // the entire set of stores, but up until there is a defined value. // var current = 0, names = Object.keys(this.stores), self = this, response; async.whilst(function () { return typeof response === 'undefined' && current < names.length; }, function (next) { var store = self.stores[names[current]]; current++; if (store.get.length >= 2) { return store.get(key, function (err, value) { if (err) { return next(err); } response = value; next(); }); } response = store.get(key); next(); }, function (err) {<|fim▁hole|>}; // // ### function set (key, value, callback) // #### @key {string} Key to set in this instance // #### @value {literal|Object} Value for the specified key // #### @callback {function} **Optional** Continuation to respond to when complete. // Sets the `value` for the specified `key` in this instance. // Provider.prototype.set = function (key, value, callback) { return this._execute('set', 2, key, value, callback); }; // // ### function reset (callback) // #### @callback {function} **Optional** Continuation to respond to when complete. // Clears all keys associated with this instance. // Provider.prototype.reset = function (callback) { return this._execute('reset', 0, callback); }; // // ### function clear (key, callback) // #### @key {string} Key to remove from this instance // #### @callback {function} **Optional** Continuation to respond to when complete. // Removes the value for the specified `key` from this instance. // Provider.prototype.clear = function (key, callback) { return this._execute('clear', 1, key, callback); }; // // ### function merge ([key,] value [, callback]) // #### @key {string} Key to merge the value into // #### @value {literal|Object} Value to merge into the key // #### @callback {function} **Optional** Continuation to respond to when complete. // Merges the properties in `value` into the existing object value at `key`. // // 1. If the existing value `key` is not an Object, it will be completely overwritten. // 2. If `key` is not supplied, then the `value` will be merged into the root. // Provider.prototype.merge = function () { var self = this, args = Array.prototype.slice.call(arguments), callback = typeof args[args.length - 1] === 'function' && args.pop(), value = args.pop(), key = args.pop(); function mergeProperty (prop, next) { return self._execute('merge', 2, prop, value[prop], next); } if (!key) { if (Array.isArray(value) || typeof value !== 'object') { return onError(new Error('Cannot merge non-Object into top-level.'), callback); } return async.forEach(Object.keys(value), mergeProperty, callback || function () { }) } return this._execute('merge', 2, key, value, callback); }; // // ### function load (callback) // #### @callback {function} Continuation to respond to when complete. // Responds with an Object representing all keys associated in this instance. // Provider.prototype.load = function (callback) { var self = this; function getStores () { return Object.keys(self.stores).map(function (name) { return self.stores[name]; }); } function loadStoreSync(store) { if (!store.loadSync) { throw new Error('nconf store ' + store.type + ' has no loadSync() method'); } return store.loadSync(); } function loadStore(store, next) { if (!store.load && !store.loadSync) { return next(new Error('nconf store ' + store.type + ' has no load() method')); } return store.loadSync ? next(null, store.loadSync()) : store.load(next); } function loadBatch (targets, done) { if (!done) { return common.merge(targets.map(loadStoreSync)); } async.map(targets, loadStore, function (err, objs) { return err ? done(err) : done(null, common.merge(objs)); }); } function mergeSources (data) { // // If `data` was returned then merge it into // the system store. // if (data && typeof data === 'object') { self.use('sources', { type: 'literal', store: data }); } } function loadSources () { // // If we don't have a callback and the current // store is capable of loading synchronously // then do so. // if (!callback) { mergeSources(loadBatch(self.sources)); return loadBatch(getStores()); } loadBatch(self.sources, function (err, data) { if (err) { return callback(err); } mergeSources(data); return loadBatch(getStores(), callback); }); } return self.sources.length ? loadSources() : loadBatch(getStores(), callback); }; // // ### function save (value, callback) // #### @value {Object} **Optional** Config object to set for this instance // #### @callback {function} Continuation to respond to when complete. // Removes any existing configuration settings that may exist in this // instance and then adds all key-value pairs in `value`. // Provider.prototype.save = function (value, callback) { if (!callback && typeof value === 'function') { callback = value; value = null; } var self = this, names = Object.keys(this.stores); function saveStoreSync(name) { var store = self.stores[name]; // // If the `store` doesn't have a `saveSync` method, // just ignore it and continue. // return store.saveSync ? store.saveSync() : null; } function saveStore(name, next) { var store = self.stores[name]; // // If the `store` doesn't have a `save` or saveSync` // method(s), just ignore it and continue. // if (!store.save && !store.saveSync) { return next(); } return store.saveSync ? next(null, store.saveSync()) : store.save(next); } // // If we don't have a callback and the current // store is capable of saving synchronously // then do so. // if (!callback) { return common.merge(names.map(saveStoreSync)); } async.map(names, saveStore, function (err, objs) { return err ? callback(err) : callback(); }); }; // // ### @private function _execute (action, syncLength, [arguments]) // #### @action {string} Action to execute on `this.store`. // #### @syncLength {number} Function length of the sync version. // #### @arguments {Array} Arguments array to apply to the action // Executes the specified `action` on all stores for this instance, ensuring a callback supplied // to a synchronous store function is still invoked. // Provider.prototype._execute = function (action, syncLength /* [arguments] */) { var args = Array.prototype.slice.call(arguments, 2), callback = typeof args[args.length - 1] === 'function' && args.pop(), destructive = ['set', 'clear', 'merge'].indexOf(action) !== -1, self = this, response; function runAction (name, next) { var store = self.stores[name]; if (destructive && store.readOnly) { return next(); } return store[action].length > syncLength ? store[action].apply(store, args.concat(next)) : next(null, store[action].apply(store, args)); } if (callback) { return async.forEach(Object.keys(this.stores), runAction, function (err) { return err ? callback(err) : callback(); }); } Object.keys(this.stores).forEach(function (name) { if (typeof response === 'undefined') { var store = self.stores[name]; if (destructive && store.readOnly) { return; } response = store[action].apply(store, args); } }); return response; } // // Throw the `err` if a callback is not supplied // function onError(err, callback) { if (callback) { return callback(err); } throw err; }<|fim▁end|>
return err ? callback(err) : callback(null, response); });
<|file_name|>animation.js<|end_file_name|><|fim▁begin|>goog.provide('ol.animation'); goog.require('ol'); goog.require('ol.PreRenderFunction'); goog.require('ol.ViewHint'); goog.require('ol.coordinate'); goog.require('ol.easing'); /** * Generate an animated transition that will "bounce" the resolution as it * approaches the final value. * @param {olx.animation.BounceOptions} options Bounce options. * @return {ol.PreRenderFunction} Pre-render function. * @api */ ol.animation.bounce = function(options) { var resolution = options.resolution; var start = options.start ? options.start : Date.now(); var duration = options.duration !== undefined ? options.duration : 1000; var easing = options.easing ? options.easing : ol.easing.upAndDown;<|fim▁hole|> * @param {ol.Map} map Map. * @param {?olx.FrameState} frameState Frame state. * @return {boolean} Run this function in the next frame. */ function(map, frameState) { if (frameState.time < start) { frameState.animate = true; frameState.viewHints[ol.ViewHint.ANIMATING] += 1; return true; } else if (frameState.time < start + duration) { var delta = easing((frameState.time - start) / duration); var deltaResolution = resolution - frameState.viewState.resolution; frameState.animate = true; frameState.viewState.resolution += delta * deltaResolution; frameState.viewHints[ol.ViewHint.ANIMATING] += 1; return true; } else { return false; } }); }; /** * Generate an animated transition while updating the view center. * @param {olx.animation.PanOptions} options Pan options. * @return {ol.PreRenderFunction} Pre-render function. * @api */ ol.animation.pan = function(options) { var source = options.source; var start = options.start ? options.start : Date.now(); var sourceX = source[0]; var sourceY = source[1]; var duration = options.duration !== undefined ? options.duration : 1000; var easing = options.easing ? options.easing : ol.easing.inAndOut; return ( /** * @param {ol.Map} map Map. * @param {?olx.FrameState} frameState Frame state. * @return {boolean} Run this function in the next frame. */ function(map, frameState) { if (frameState.time < start) { frameState.animate = true; frameState.viewHints[ol.ViewHint.ANIMATING] += 1; return true; } else if (frameState.time < start + duration) { var delta = 1 - easing((frameState.time - start) / duration); var deltaX = sourceX - frameState.viewState.center[0]; var deltaY = sourceY - frameState.viewState.center[1]; frameState.animate = true; frameState.viewState.center[0] += delta * deltaX; frameState.viewState.center[1] += delta * deltaY; frameState.viewHints[ol.ViewHint.ANIMATING] += 1; return true; } else { return false; } }); }; /** * Generate an animated transition while updating the view rotation. * @param {olx.animation.RotateOptions} options Rotate options. * @return {ol.PreRenderFunction} Pre-render function. * @api */ ol.animation.rotate = function(options) { var sourceRotation = options.rotation ? options.rotation : 0; var start = options.start ? options.start : Date.now(); var duration = options.duration !== undefined ? options.duration : 1000; var easing = options.easing ? options.easing : ol.easing.inAndOut; var anchor = options.anchor ? options.anchor : null; return ( /** * @param {ol.Map} map Map. * @param {?olx.FrameState} frameState Frame state. * @return {boolean} Run this function in the next frame. */ function(map, frameState) { if (frameState.time < start) { frameState.animate = true; frameState.viewHints[ol.ViewHint.ANIMATING] += 1; return true; } else if (frameState.time < start + duration) { var delta = 1 - easing((frameState.time - start) / duration); var deltaRotation = (sourceRotation - frameState.viewState.rotation) * delta; frameState.animate = true; frameState.viewState.rotation += deltaRotation; if (anchor) { var center = frameState.viewState.center; ol.coordinate.sub(center, anchor); ol.coordinate.rotate(center, deltaRotation); ol.coordinate.add(center, anchor); } frameState.viewHints[ol.ViewHint.ANIMATING] += 1; return true; } else { return false; } }); }; /** * Generate an animated transition while updating the view resolution. * @param {olx.animation.ZoomOptions} options Zoom options. * @return {ol.PreRenderFunction} Pre-render function. * @api */ ol.animation.zoom = function(options) { var sourceResolution = options.resolution; var start = options.start ? options.start : Date.now(); var duration = options.duration !== undefined ? options.duration : 1000; var easing = options.easing ? options.easing : ol.easing.inAndOut; return ( /** * @param {ol.Map} map Map. * @param {?olx.FrameState} frameState Frame state. * @return {boolean} Run this function in the next frame. */ function(map, frameState) { if (frameState.time < start) { frameState.animate = true; frameState.viewHints[ol.ViewHint.ANIMATING] += 1; return true; } else if (frameState.time < start + duration) { var delta = 1 - easing((frameState.time - start) / duration); var deltaResolution = sourceResolution - frameState.viewState.resolution; frameState.animate = true; frameState.viewState.resolution += delta * deltaResolution; frameState.viewHints[ol.ViewHint.ANIMATING] += 1; return true; } else { return false; } }); };<|fim▁end|>
return ( /**
<|file_name|>app.js<|end_file_name|><|fim▁begin|>(function(app, undefined) { 'use strict'; if(!app) throw new Error('Application "app" namespace not found.'); //---------------------------------------------------------------------------- console.log( 'hello world' ); console.log( 'Application Running...' ); //---------------------------------------------------------------------------- // @begin: renders app.render.jquery(); app.render.vanilla(); // @end: renders //---------------------------------------------------------------------------- // @begin: to_jquery app.to_jquery.run(); // @end: to_jquery //---------------------------------------------------------------------------- // @begin: mustache app.menu.render();<|fim▁hole|> app.menu.option.reset(); // @begin: mustache //---------------------------------------------------------------------------- })(window.app);<|fim▁end|>
<|file_name|>client.py<|end_file_name|><|fim▁begin|>"""tcprocd client.""" from __future__ import unicode_literals, print_function, absolute_import from tcprocd.protocol import Protocol import socket import select import sys if sys.version_info[0] < 3: str_types = (str, unicode) # noqa else: str_types = (str, bytes) # noqa <|fim▁hole|> Adds a line buffer for the socket and passes received messages to ``on_receive``. Messages by the user are passed to ``on_stdin``. :param client: :class:`tcprocd.client.Client` - The client to use for the connection. """ def __init__(self, client): """Initialize shell.""" self.client = client self.sockets = [sys.stdin, client.socket] self._do_stop = False def on_stdin_ready(self): """Called when some input is ready.""" line = sys.stdin.readline().strip() if line == 'exit': return True self.client.protocol.sendline(line + '\n') def on_socket_ready(self): """Called when receiving some process output.""" line = self.client.protocol.readline() if line == 'exit': return True sys.stdout.write(line + '\n') sys.stdout.flush() def run(self): """Start waiting for input/output.""" try: while not self._do_stop: ready = select.select(self.sockets, [], [])[0] for s in ready: if s == self.client.socket: # message by server if self.on_socket_ready(): self._do_stop = True else: # message by user if self.on_stdin_ready(): self._do_stop = True finally: self.client.socket.close() class AuthenticationError(Exception): """Exception raised when authentication fails.""" pass class ServerError(Exception): """Exception raised when the server answers with an error.""" pass class Client(object): """ A class to connect to a tcprocd server. :param server_address: tuple of host and port or the path to the socket file """ def __init__(self, server_address): """Initialize client.""" self.server_address = server_address if isinstance(server_address, str_types): self.is_unix_domain = True sock_type = socket.AF_UNIX else: self.is_unix_domain = False sock_type = socket.AF_INET self.socket = socket.socket(sock_type, socket.SOCK_STREAM) self.protocol = Protocol(self.socket) self.server_version = None self.attached_to = None def connect(self, username=None, password=None, username_callback=None, password_callback=None): """Connect to the server.""" if self.is_unix_domain: try: SO_PASSCRED = socket.SO_PASSCRED except AttributeError: SO_PASSCRED = 16 self.socket.setsockopt(socket.SOL_SOCKET, SO_PASSCRED, 1) self.socket.connect(self.server_address) self.server_version = self.protocol.recv_part(3) answer = self.protocol.recv_part(2) # TCP connections always require username and password. # A unix domain socket does not accept an username and # only requires a password if the connecting user has one. # TODO: return 'authentication required' and let the caller authenticate on its own if answer == self.protocol.AUTHENTICATION_REQUIRED: if not self.is_unix_domain: if username is None: username = username_callback() self.protocol.send_part(2, username) if password is None: password = password_callback() self.protocol.send_part(2, password) answer = self.protocol.recv_part(2) if answer != self.protocol.OK: raise AuthenticationError() elif answer != self.protocol.OK: raise ServerError(answer) def close(self): """Close the connection.""" self.socket.close() def __enter__(self): """Connect when used as context manager.""" self.connect() return self def __exit__(self, exc_type, exc_val, exc_tb): """Disconnect afterwards, when used as context manager.""" self.close() def list(self): """List servers.""" self.protocol.send_part(2, 'list') answer = self.protocol.recv_part(2) data = [] if answer == self.protocol.OK: data = self.protocol.recv_part(6).split('\n') return answer, data def cat(self, name, start=0): """Get output of given process. :param name: :class:`str` - Name of the process. :param start: :class:`int` - Start at this line. (Default: ``0``) :return: :class:`str` - Multi-line output of the process. """ self.protocol.send_part(2, 'cat') self.protocol.send_part(2, name) self.protocol.send_part(1, str(start)) answer = self.protocol.recv_part(2) data = [] if answer == self.protocol.OK: data = self.protocol.recv_part(6).split('\n') return answer, data def start(self, name, command, path=''): """Create a new process with the given ``name`` and ``command``. :param name: :class:`str` - Name of the process. :param command: :class:`str` - The command to run the process. :param path: :class:`str` - The (remote) path to execute the command in. (Default: ``None``) :return: :class:`str` - Status message """ self.protocol.send_part(2, 'start') self.protocol.send_part(2, name) self.protocol.send_part(3, command) self.protocol.send_part(3, path) return self.protocol.recv_part(2) def kill(self, name): """Kill the given process. :param name: :class:`str` - Name of the process. :return: :class:`str` - Status message """ self.protocol.send_part(2, 'kill') self.protocol.send_part(2, name) return self.protocol.recv_part(2) def command(self, name, command): """Write the given command to the given process's stdin. .. Note: Use ``cat`` to see the process's stdout! :param name: :class:`str` - Name of the process. :param command: :class:`str` - The command to send to the process. :return: :class:`str` - Status message """ self.protocol.send_part(2, 'command') self.protocol.send_part(2, name) self.protocol.send_part(3, command) return self.protocol.recv_part(2) def attach(self, name): """ Attach to the given process's shell. :param name: :class:`str` - Name of the process. :return: :class:`str` - Status message """ self.protocol.send_part(2, 'attach') self.protocol.send_part(2, name) return self.protocol.recv_part(2)<|fim▁end|>
class SocketShell(object): """ A class to connect to a process's thread.
<|file_name|>test_bigmem.py<|end_file_name|><|fim▁begin|>from test import test_support from test.test_support import bigmemtest, _1G, _2G, _4G, precisionbigmemtest import unittest import operator import string import sys # Bigmem testing houserules: # # - Try not to allocate too many large objects. It's okay to rely on # refcounting semantics, but don't forget that 's = create_largestring()' # doesn't release the old 's' (if it exists) until well after its new # value has been created. Use 'del s' before the create_largestring call. # # - Do *not* compare large objects using assertEquals or similar. It's a # lengty operation and the errormessage will be utterly useless due to # its size. To make sure whether a result has the right contents, better # to use the strip or count methods, or compare meaningful slices. # # - Don't forget to test for large indices, offsets and results and such, # in addition to large sizes. # # - When repeating an object (say, a substring, or a small list) to create # a large object, make the subobject of a length that is not a power of # 2. That way, int-wrapping problems are more easily detected. # # - While the bigmemtest decorator speaks of 'minsize', all tests will # actually be called with a much smaller number too, in the normal # test run (5Kb currently.) This is so the tests themselves get frequent # testing. Consequently, always make all large allocations based on the # passed-in 'size', and don't rely on the size being very large. Also, # memuse-per-size should remain sane (less than a few thousand); if your # test uses more, adjust 'size' upward, instead. class StrTest(unittest.TestCase): @bigmemtest(minsize=_2G, memuse=2) def test_capitalize(self, size): SUBSTR = ' abc def ghi' s = '-' * size + SUBSTR caps = s.capitalize() self.assertEquals(caps[-len(SUBSTR):], SUBSTR.capitalize()) self.assertEquals(caps.lstrip('-'), SUBSTR) @bigmemtest(minsize=_2G + 10, memuse=1) def test_center(self, size): SUBSTR = ' abc def ghi' s = SUBSTR.center(size) self.assertEquals(len(s), size) lpadsize = rpadsize = (len(s) - len(SUBSTR)) // 2 if len(s) % 2: lpadsize += 1 self.assertEquals(s[lpadsize:-rpadsize], SUBSTR) self.assertEquals(s.strip(), SUBSTR.strip()) @precisionbigmemtest(size=_2G - 1, memuse=1) def test_center_unicode(self, size): SUBSTR = u' abc def ghi' try: s = SUBSTR.center(size) except OverflowError: pass # acceptable on 32-bit else: self.assertEquals(len(s), size) lpadsize = rpadsize = (len(s) - len(SUBSTR)) // 2 if len(s) % 2: lpadsize += 1 self.assertEquals(s[lpadsize:-rpadsize], SUBSTR) self.assertEquals(s.strip(), SUBSTR.strip()) del s @bigmemtest(minsize=_2G, memuse=2) def test_count(self, size): SUBSTR = ' abc def ghi' s = '.' * size + SUBSTR self.assertEquals(s.count('.'), size) s += '.' self.assertEquals(s.count('.'), size + 1) self.assertEquals(s.count(' '), 3) self.assertEquals(s.count('i'), 1) self.assertEquals(s.count('j'), 0) @bigmemtest(minsize=_2G + 2, memuse=3) def test_decode(self, size): s = '.' * size self.assertEquals(len(s.decode('utf-8')), size) def basic_encode_test(self, size, enc, c=u'.', expectedsize=None): if expectedsize is None: expectedsize = size s = c * size self.assertEquals(len(s.encode(enc)), expectedsize) @bigmemtest(minsize=_2G + 2, memuse=3) def test_encode(self, size): return self.basic_encode_test(size, 'utf-8') @precisionbigmemtest(size=_4G // 6 + 2, memuse=2) def test_encode_raw_unicode_escape(self, size): try: return self.basic_encode_test(size, 'raw_unicode_escape') except MemoryError: pass # acceptable on 32-bit @precisionbigmemtest(size=_4G // 5 + 70, memuse=3) def test_encode_utf7(self, size): try: return self.basic_encode_test(size, 'utf7') except MemoryError: pass # acceptable on 32-bit @precisionbigmemtest(size=_4G // 4 + 5, memuse=6) def test_encode_utf32(self, size): try: return self.basic_encode_test(size, 'utf32', expectedsize=4*size+4) except MemoryError: pass # acceptable on 32-bit @precisionbigmemtest(size=_2G-1, memuse=2) def test_decodeascii(self, size): return self.basic_encode_test(size, 'ascii', c='A') @precisionbigmemtest(size=_4G // 5, memuse=6+2) def test_unicode_repr_oflw(self, size): try: s = u"\uAAAA"*size r = repr(s) except MemoryError: pass # acceptable on 32-bit else: self.failUnless(s == eval(r)) @bigmemtest(minsize=_2G, memuse=2) def test_endswith(self, size): SUBSTR = ' abc def ghi' s = '-' * size + SUBSTR self.failUnless(s.endswith(SUBSTR)) self.failUnless(s.endswith(s)) s2 = '...' + s self.failUnless(s2.endswith(s)) self.failIf(s.endswith('a' + SUBSTR)) self.failIf(SUBSTR.endswith(s)) @bigmemtest(minsize=_2G + 10, memuse=2) def test_expandtabs(self, size): s = '-' * size tabsize = 8 self.assertEquals(s.expandtabs(), s) del s slen, remainder = divmod(size, tabsize) s = ' \t' * slen s = s.expandtabs(tabsize) self.assertEquals(len(s), size - remainder) self.assertEquals(len(s.strip(' ')), 0) @bigmemtest(minsize=_2G, memuse=2) def test_find(self, size): SUBSTR = ' abc def ghi' sublen = len(SUBSTR) s = ''.join([SUBSTR, '-' * size, SUBSTR]) self.assertEquals(s.find(' '), 0) self.assertEquals(s.find(SUBSTR), 0) self.assertEquals(s.find(' ', sublen), sublen + size) self.assertEquals(s.find(SUBSTR, len(SUBSTR)), sublen + size) self.assertEquals(s.find('i'), SUBSTR.find('i')) self.assertEquals(s.find('i', sublen), sublen + size + SUBSTR.find('i')) self.assertEquals(s.find('i', size), sublen + size + SUBSTR.find('i')) self.assertEquals(s.find('j'), -1) @bigmemtest(minsize=_2G, memuse=2) def test_index(self, size): SUBSTR = ' abc def ghi' sublen = len(SUBSTR) s = ''.join([SUBSTR, '-' * size, SUBSTR]) self.assertEquals(s.index(' '), 0) self.assertEquals(s.index(SUBSTR), 0) self.assertEquals(s.index(' ', sublen), sublen + size) self.assertEquals(s.index(SUBSTR, sublen), sublen + size) self.assertEquals(s.index('i'), SUBSTR.index('i')) self.assertEquals(s.index('i', sublen), sublen + size + SUBSTR.index('i')) self.assertEquals(s.index('i', size), sublen + size + SUBSTR.index('i')) self.assertRaises(ValueError, s.index, 'j') @bigmemtest(minsize=_2G, memuse=2) def test_isalnum(self, size): SUBSTR = '123456' s = 'a' * size + SUBSTR self.failUnless(s.isalnum()) s += '.' self.failIf(s.isalnum()) @bigmemtest(minsize=_2G, memuse=2) def test_isalpha(self, size): SUBSTR = 'zzzzzzz' s = 'a' * size + SUBSTR self.failUnless(s.isalpha()) s += '.' self.failIf(s.isalpha()) @bigmemtest(minsize=_2G, memuse=2) def test_isdigit(self, size): SUBSTR = '123456' s = '9' * size + SUBSTR self.failUnless(s.isdigit()) s += 'z' self.failIf(s.isdigit()) @bigmemtest(minsize=_2G, memuse=2) def test_islower(self, size): chars = ''.join([ chr(c) for c in range(255) if not chr(c).isupper() ]) repeats = size // len(chars) + 2 s = chars * repeats self.failUnless(s.islower()) s += 'A' self.failIf(s.islower()) @bigmemtest(minsize=_2G, memuse=2) def test_isspace(self, size): whitespace = ' \f\n\r\t\v' repeats = size // len(whitespace) + 2 s = whitespace * repeats self.failUnless(s.isspace()) s += 'j' self.failIf(s.isspace()) @bigmemtest(minsize=_2G, memuse=2) def test_istitle(self, size): SUBSTR = '123456' s = ''.join(['A', 'a' * size, SUBSTR]) self.failUnless(s.istitle()) s += 'A' self.failUnless(s.istitle()) s += 'aA' self.failIf(s.istitle()) @bigmemtest(minsize=_2G, memuse=2) def test_isupper(self, size): chars = ''.join([ chr(c) for c in range(255) if not chr(c).islower() ]) repeats = size // len(chars) + 2 s = chars * repeats self.failUnless(s.isupper()) s += 'a' self.failIf(s.isupper()) @bigmemtest(minsize=_2G, memuse=2) def test_join(self, size): s = 'A' * size x = s.join(['aaaaa', 'bbbbb']) self.assertEquals(x.count('a'), 5) self.assertEquals(x.count('b'), 5) self.failUnless(x.startswith('aaaaaA')) self.failUnless(x.endswith('Abbbbb')) @bigmemtest(minsize=_2G + 10, memuse=1) def test_ljust(self, size): SUBSTR = ' abc def ghi' s = SUBSTR.ljust(size) self.failUnless(s.startswith(SUBSTR + ' ')) self.assertEquals(len(s), size) self.assertEquals(s.strip(), SUBSTR.strip()) @bigmemtest(minsize=_2G + 10, memuse=2) def test_lower(self, size): s = 'A' * size s = s.lower() self.assertEquals(len(s), size) self.assertEquals(s.count('a'), size) @bigmemtest(minsize=_2G + 10, memuse=1) def test_lstrip(self, size): SUBSTR = 'abc def ghi' s = SUBSTR.rjust(size) self.assertEquals(len(s), size) self.assertEquals(s.lstrip(), SUBSTR.lstrip()) del s s = SUBSTR.ljust(size) self.assertEquals(len(s), size) stripped = s.lstrip() self.failUnless(stripped is s) @bigmemtest(minsize=_2G + 10, memuse=2) def test_replace(self, size): replacement = 'a' s = ' ' * size s = s.replace(' ', replacement) self.assertEquals(len(s), size) self.assertEquals(s.count(replacement), size) s = s.replace(replacement, ' ', size - 4) self.assertEquals(len(s), size) self.assertEquals(s.count(replacement), 4) self.assertEquals(s[-10:], ' aaaa') @bigmemtest(minsize=_2G, memuse=2) def test_rfind(self, size): SUBSTR = ' abc def ghi' sublen = len(SUBSTR) s = ''.join([SUBSTR, '-' * size, SUBSTR]) self.assertEquals(s.rfind(' '), sublen + size + SUBSTR.rfind(' ')) self.assertEquals(s.rfind(SUBSTR), sublen + size) self.assertEquals(s.rfind(' ', 0, size), SUBSTR.rfind(' ')) self.assertEquals(s.rfind(SUBSTR, 0, sublen + size), 0) self.assertEquals(s.rfind('i'), sublen + size + SUBSTR.rfind('i')) self.assertEquals(s.rfind('i', 0, sublen), SUBSTR.rfind('i')) self.assertEquals(s.rfind('i', 0, sublen + size), SUBSTR.rfind('i')) self.assertEquals(s.rfind('j'), -1) @bigmemtest(minsize=_2G, memuse=2) def test_rindex(self, size): SUBSTR = ' abc def ghi' sublen = len(SUBSTR) s = ''.join([SUBSTR, '-' * size, SUBSTR]) self.assertEquals(s.rindex(' '), sublen + size + SUBSTR.rindex(' ')) self.assertEquals(s.rindex(SUBSTR), sublen + size) self.assertEquals(s.rindex(' ', 0, sublen + size - 1), SUBSTR.rindex(' ')) self.assertEquals(s.rindex(SUBSTR, 0, sublen + size), 0) self.assertEquals(s.rindex('i'), sublen + size + SUBSTR.rindex('i')) self.assertEquals(s.rindex('i', 0, sublen), SUBSTR.rindex('i')) self.assertEquals(s.rindex('i', 0, sublen + size), SUBSTR.rindex('i')) self.assertRaises(ValueError, s.rindex, 'j') @bigmemtest(minsize=_2G + 10, memuse=1) def test_rjust(self, size): SUBSTR = ' abc def ghi' s = SUBSTR.ljust(size) self.failUnless(s.startswith(SUBSTR + ' ')) self.assertEquals(len(s), size) self.assertEquals(s.strip(), SUBSTR.strip()) @bigmemtest(minsize=_2G + 10, memuse=1) def test_rstrip(self, size): SUBSTR = ' abc def ghi' s = SUBSTR.ljust(size) self.assertEquals(len(s), size) self.assertEquals(s.rstrip(), SUBSTR.rstrip()) del s s = SUBSTR.rjust(size) self.assertEquals(len(s), size) stripped = s.rstrip() self.failUnless(stripped is s) # The test takes about size bytes to build a string, and then about # sqrt(size) substrings of sqrt(size) in size and a list to # hold sqrt(size) items. It's close but just over 2x size. @bigmemtest(minsize=_2G, memuse=2.1) def test_split_small(self, size): # Crudely calculate an estimate so that the result of s.split won't # take up an inordinate amount of memory chunksize = int(size ** 0.5 + 2) SUBSTR = 'a' + ' ' * chunksize s = SUBSTR * chunksize l = s.split() self.assertEquals(len(l), chunksize) self.assertEquals(set(l), set(['a'])) del l l = s.split('a') self.assertEquals(len(l), chunksize + 1) self.assertEquals(set(l), set(['', ' ' * chunksize])) # Allocates a string of twice size (and briefly two) and a list of # size. Because of internal affairs, the s.split() call produces a # list of size times the same one-character string, so we only # suffer for the list size. (Otherwise, it'd cost another 48 times # size in bytes!) Nevertheless, a list of size takes # 8*size bytes. @bigmemtest(minsize=_2G + 5, memuse=10) def test_split_large(self, size): s = ' a' * size + ' ' l = s.split() self.assertEquals(len(l), size) self.assertEquals(set(l), set(['a'])) del l l = s.split('a') self.assertEquals(len(l), size + 1) self.assertEquals(set(l), set([' '])) @bigmemtest(minsize=_2G, memuse=2.1) def test_splitlines(self, size): # Crudely calculate an estimate so that the result of s.split won't # take up an inordinate amount of memory chunksize = int(size ** 0.5 + 2) // 2 SUBSTR = ' ' * chunksize + '\n' + ' ' * chunksize + '\r\n' s = SUBSTR * chunksize l = s.splitlines() self.assertEquals(len(l), chunksize * 2) self.assertEquals(set(l), set([' ' * chunksize])) @bigmemtest(minsize=_2G, memuse=2) def test_startswith(self, size): SUBSTR = ' abc def ghi' s = '-' * size + SUBSTR self.failUnless(s.startswith(s)) self.failUnless(s.startswith('-' * size)) self.failIf(s.startswith(SUBSTR)) @bigmemtest(minsize=_2G, memuse=1) def test_strip(self, size): SUBSTR = ' abc def ghi ' s = SUBSTR.rjust(size) self.assertEquals(len(s), size) self.assertEquals(s.strip(), SUBSTR.strip()) del s s = SUBSTR.ljust(size) self.assertEquals(len(s), size) self.assertEquals(s.strip(), SUBSTR.strip()) @bigmemtest(minsize=_2G, memuse=2) def test_swapcase(self, size): SUBSTR = "aBcDeFG12.'\xa9\x00" sublen = len(SUBSTR) repeats = size // sublen + 2 s = SUBSTR * repeats s = s.swapcase() self.assertEquals(len(s), sublen * repeats) self.assertEquals(s[:sublen * 3], SUBSTR.swapcase() * 3) self.assertEquals(s[-sublen * 3:], SUBSTR.swapcase() * 3) @bigmemtest(minsize=_2G, memuse=2) def test_title(self, size): SUBSTR = 'SpaaHAaaAaham' s = SUBSTR * (size // len(SUBSTR) + 2) s = s.title() self.failUnless(s.startswith((SUBSTR * 3).title())) self.failUnless(s.endswith(SUBSTR.lower() * 3)) @bigmemtest(minsize=_2G, memuse=2) def test_translate(self, size): trans = string.maketrans('.aZ', '-!$') SUBSTR = 'aZz.z.Aaz.' sublen = len(SUBSTR) repeats = size // sublen + 2 s = SUBSTR * repeats s = s.translate(trans) self.assertEquals(len(s), repeats * sublen) self.assertEquals(s[:sublen], SUBSTR.translate(trans)) self.assertEquals(s[-sublen:], SUBSTR.translate(trans)) self.assertEquals(s.count('.'), 0) self.assertEquals(s.count('!'), repeats * 2) self.assertEquals(s.count('z'), repeats * 3) @bigmemtest(minsize=_2G + 5, memuse=2) def test_upper(self, size): s = 'a' * size s = s.upper() self.assertEquals(len(s), size) self.assertEquals(s.count('A'), size) @bigmemtest(minsize=_2G + 20, memuse=1) def test_zfill(self, size): SUBSTR = '-568324723598234' s = SUBSTR.zfill(size) self.failUnless(s.endswith('0' + SUBSTR[1:])) self.failUnless(s.startswith('-0')) self.assertEquals(len(s), size) self.assertEquals(s.count('0'), size - len(SUBSTR)) @bigmemtest(minsize=_2G + 10, memuse=2) def test_format(self, size): s = '-' * size sf = '%s' % (s,) self.failUnless(s == sf) del sf sf = '..%s..' % (s,) self.assertEquals(len(sf), len(s) + 4) self.failUnless(sf.startswith('..-')) self.failUnless(sf.endswith('-..')) del s, sf size //= 2 edge = '-' * size s = ''.join([edge, '%s', edge]) del edge s = s % '...' self.assertEquals(len(s), size * 2 + 3) self.assertEquals(s.count('.'), 3) self.assertEquals(s.count('-'), size * 2) @bigmemtest(minsize=_2G + 10, memuse=2) def test_repr_small(self, size): s = '-' * size s = repr(s) self.assertEquals(len(s), size + 2) self.assertEquals(s[0], "'") self.assertEquals(s[-1], "'") self.assertEquals(s.count('-'), size) del s # repr() will create a string four times as large as this 'binary # string', but we don't want to allocate much more than twice # size in total. (We do extra testing in test_repr_large()) size = size // 5 * 2 s = '\x00' * size s = repr(s) self.assertEquals(len(s), size * 4 + 2) self.assertEquals(s[0], "'") self.assertEquals(s[-1], "'") self.assertEquals(s.count('\\'), size) <|fim▁hole|> @bigmemtest(minsize=_2G + 10, memuse=5) def test_repr_large(self, size): s = '\x00' * size s = repr(s) self.assertEquals(len(s), size * 4 + 2) self.assertEquals(s[0], "'") self.assertEquals(s[-1], "'") self.assertEquals(s.count('\\'), size) self.assertEquals(s.count('0'), size * 2) @bigmemtest(minsize=2**32 // 5, memuse=6+2) def test_unicode_repr(self, size): s = u"\uAAAA" * size self.failUnless(len(repr(s)) > size) # This test is meaningful even with size < 2G, as long as the # doubled string is > 2G (but it tests more if both are > 2G :) @bigmemtest(minsize=_1G + 2, memuse=3) def test_concat(self, size): s = '.' * size self.assertEquals(len(s), size) s = s + s self.assertEquals(len(s), size * 2) self.assertEquals(s.count('.'), size * 2) # This test is meaningful even with size < 2G, as long as the # repeated string is > 2G (but it tests more if both are > 2G :) @bigmemtest(minsize=_1G + 2, memuse=3) def test_repeat(self, size): s = '.' * size self.assertEquals(len(s), size) s = s * 2 self.assertEquals(len(s), size * 2) self.assertEquals(s.count('.'), size * 2) @bigmemtest(minsize=_2G + 20, memuse=1) def test_slice_and_getitem(self, size): SUBSTR = '0123456789' sublen = len(SUBSTR) s = SUBSTR * (size // sublen) stepsize = len(s) // 100 stepsize = stepsize - (stepsize % sublen) for i in range(0, len(s) - stepsize, stepsize): self.assertEquals(s[i], SUBSTR[0]) self.assertEquals(s[i:i + sublen], SUBSTR) self.assertEquals(s[i:i + sublen:2], SUBSTR[::2]) if i > 0: self.assertEquals(s[i + sublen - 1:i - 1:-3], SUBSTR[sublen::-3]) # Make sure we do some slicing and indexing near the end of the # string, too. self.assertEquals(s[len(s) - 1], SUBSTR[-1]) self.assertEquals(s[-1], SUBSTR[-1]) self.assertEquals(s[len(s) - 10], SUBSTR[0]) self.assertEquals(s[-sublen], SUBSTR[0]) self.assertEquals(s[len(s):], '') self.assertEquals(s[len(s) - 1:], SUBSTR[-1]) self.assertEquals(s[-1:], SUBSTR[-1]) self.assertEquals(s[len(s) - sublen:], SUBSTR) self.assertEquals(s[-sublen:], SUBSTR) self.assertEquals(len(s[:]), len(s)) self.assertEquals(len(s[:len(s) - 5]), len(s) - 5) self.assertEquals(len(s[5:-5]), len(s) - 10) self.assertRaises(IndexError, operator.getitem, s, len(s)) self.assertRaises(IndexError, operator.getitem, s, len(s) + 1) self.assertRaises(IndexError, operator.getitem, s, len(s) + 1<<31) @bigmemtest(minsize=_2G, memuse=2) def test_contains(self, size): SUBSTR = '0123456789' edge = '-' * (size // 2) s = ''.join([edge, SUBSTR, edge]) del edge self.failUnless(SUBSTR in s) self.failIf(SUBSTR * 2 in s) self.failUnless('-' in s) self.failIf('a' in s) s += 'a' self.failUnless('a' in s) @bigmemtest(minsize=_2G + 10, memuse=2) def test_compare(self, size): s1 = '-' * size s2 = '-' * size self.failUnless(s1 == s2) del s2 s2 = s1 + 'a' self.failIf(s1 == s2) del s2 s2 = '.' * size self.failIf(s1 == s2) @bigmemtest(minsize=_2G + 10, memuse=1) def test_hash(self, size): # Not sure if we can do any meaningful tests here... Even if we # start relying on the exact algorithm used, the result will be # different depending on the size of the C 'long int'. Even this # test is dodgy (there's no *guarantee* that the two things should # have a different hash, even if they, in the current # implementation, almost always do.) s = '\x00' * size h1 = hash(s) del s s = '\x00' * (size + 1) self.failIf(h1 == hash(s)) class TupleTest(unittest.TestCase): # Tuples have a small, fixed-sized head and an array of pointers to # data. Since we're testing 64-bit addressing, we can assume that the # pointers are 8 bytes, and that thus that the tuples take up 8 bytes # per size. # As a side-effect of testing long tuples, these tests happen to test # having more than 2<<31 references to any given object. Hence the # use of different types of objects as contents in different tests. @bigmemtest(minsize=_2G + 2, memuse=16) def test_compare(self, size): t1 = (u'',) * size t2 = (u'',) * size self.failUnless(t1 == t2) del t2 t2 = (u'',) * (size + 1) self.failIf(t1 == t2) del t2 t2 = (1,) * size self.failIf(t1 == t2) # Test concatenating into a single tuple of more than 2G in length, # and concatenating a tuple of more than 2G in length separately, so # the smaller test still gets run even if there isn't memory for the # larger test (but we still let the tester know the larger test is # skipped, in verbose mode.) def basic_concat_test(self, size): t = ((),) * size self.assertEquals(len(t), size) t = t + t self.assertEquals(len(t), size * 2) @bigmemtest(minsize=_2G // 2 + 2, memuse=24) def test_concat_small(self, size): return self.basic_concat_test(size) @bigmemtest(minsize=_2G + 2, memuse=24) def test_concat_large(self, size): return self.basic_concat_test(size) @bigmemtest(minsize=_2G // 5 + 10, memuse=8 * 5) def test_contains(self, size): t = (1, 2, 3, 4, 5) * size self.assertEquals(len(t), size * 5) self.failUnless(5 in t) self.failIf((1, 2, 3, 4, 5) in t) self.failIf(0 in t) @bigmemtest(minsize=_2G + 10, memuse=8) def test_hash(self, size): t1 = (0,) * size h1 = hash(t1) del t1 t2 = (0,) * (size + 1) self.failIf(h1 == hash(t2)) @bigmemtest(minsize=_2G + 10, memuse=8) def test_index_and_slice(self, size): t = (None,) * size self.assertEquals(len(t), size) self.assertEquals(t[-1], None) self.assertEquals(t[5], None) self.assertEquals(t[size - 1], None) self.assertRaises(IndexError, operator.getitem, t, size) self.assertEquals(t[:5], (None,) * 5) self.assertEquals(t[-5:], (None,) * 5) self.assertEquals(t[20:25], (None,) * 5) self.assertEquals(t[-25:-20], (None,) * 5) self.assertEquals(t[size - 5:], (None,) * 5) self.assertEquals(t[size - 5:size], (None,) * 5) self.assertEquals(t[size - 6:size - 2], (None,) * 4) self.assertEquals(t[size:size], ()) self.assertEquals(t[size:size+5], ()) # Like test_concat, split in two. def basic_test_repeat(self, size): t = ('',) * size self.assertEquals(len(t), size) t = t * 2 self.assertEquals(len(t), size * 2) @bigmemtest(minsize=_2G // 2 + 2, memuse=24) def test_repeat_small(self, size): return self.basic_test_repeat(size) @bigmemtest(minsize=_2G + 2, memuse=24) def test_repeat_large(self, size): return self.basic_test_repeat(size) @bigmemtest(minsize=_1G - 1, memuse=12) def test_repeat_large_2(self, size): return self.basic_test_repeat(size) @precisionbigmemtest(size=_1G - 1, memuse=9) def test_from_2G_generator(self, size): try: t = tuple(xrange(size)) except MemoryError: pass # acceptable on 32-bit else: count = 0 for item in t: self.assertEquals(item, count) count += 1 self.assertEquals(count, size) @precisionbigmemtest(size=_1G - 25, memuse=9) def test_from_almost_2G_generator(self, size): try: t = tuple(xrange(size)) count = 0 for item in t: self.assertEquals(item, count) count += 1 self.assertEquals(count, size) except MemoryError: pass # acceptable, expected on 32-bit # Like test_concat, split in two. def basic_test_repr(self, size): t = (0,) * size s = repr(t) # The repr of a tuple of 0's is exactly three times the tuple length. self.assertEquals(len(s), size * 3) self.assertEquals(s[:5], '(0, 0') self.assertEquals(s[-5:], '0, 0)') self.assertEquals(s.count('0'), size) @bigmemtest(minsize=_2G // 3 + 2, memuse=8 + 3) def test_repr_small(self, size): return self.basic_test_repr(size) @bigmemtest(minsize=_2G + 2, memuse=8 + 3) def test_repr_large(self, size): return self.basic_test_repr(size) class ListTest(unittest.TestCase): # Like tuples, lists have a small, fixed-sized head and an array of # pointers to data, so 8 bytes per size. Also like tuples, we make the # lists hold references to various objects to test their refcount # limits. @bigmemtest(minsize=_2G + 2, memuse=16) def test_compare(self, size): l1 = [u''] * size l2 = [u''] * size self.failUnless(l1 == l2) del l2 l2 = [u''] * (size + 1) self.failIf(l1 == l2) del l2 l2 = [2] * size self.failIf(l1 == l2) # Test concatenating into a single list of more than 2G in length, # and concatenating a list of more than 2G in length separately, so # the smaller test still gets run even if there isn't memory for the # larger test (but we still let the tester know the larger test is # skipped, in verbose mode.) def basic_test_concat(self, size): l = [[]] * size self.assertEquals(len(l), size) l = l + l self.assertEquals(len(l), size * 2) @bigmemtest(minsize=_2G // 2 + 2, memuse=24) def test_concat_small(self, size): return self.basic_test_concat(size) @bigmemtest(minsize=_2G + 2, memuse=24) def test_concat_large(self, size): return self.basic_test_concat(size) def basic_test_inplace_concat(self, size): l = [sys.stdout] * size l += l self.assertEquals(len(l), size * 2) self.failUnless(l[0] is l[-1]) self.failUnless(l[size - 1] is l[size + 1]) @bigmemtest(minsize=_2G // 2 + 2, memuse=24) def test_inplace_concat_small(self, size): return self.basic_test_inplace_concat(size) @bigmemtest(minsize=_2G + 2, memuse=24) def test_inplace_concat_large(self, size): return self.basic_test_inplace_concat(size) @bigmemtest(minsize=_2G // 5 + 10, memuse=8 * 5) def test_contains(self, size): l = [1, 2, 3, 4, 5] * size self.assertEquals(len(l), size * 5) self.failUnless(5 in l) self.failIf([1, 2, 3, 4, 5] in l) self.failIf(0 in l) @bigmemtest(minsize=_2G + 10, memuse=8) def test_hash(self, size): l = [0] * size self.failUnlessRaises(TypeError, hash, l) @bigmemtest(minsize=_2G + 10, memuse=8) def test_index_and_slice(self, size): l = [None] * size self.assertEquals(len(l), size) self.assertEquals(l[-1], None) self.assertEquals(l[5], None) self.assertEquals(l[size - 1], None) self.assertRaises(IndexError, operator.getitem, l, size) self.assertEquals(l[:5], [None] * 5) self.assertEquals(l[-5:], [None] * 5) self.assertEquals(l[20:25], [None] * 5) self.assertEquals(l[-25:-20], [None] * 5) self.assertEquals(l[size - 5:], [None] * 5) self.assertEquals(l[size - 5:size], [None] * 5) self.assertEquals(l[size - 6:size - 2], [None] * 4) self.assertEquals(l[size:size], []) self.assertEquals(l[size:size+5], []) l[size - 2] = 5 self.assertEquals(len(l), size) self.assertEquals(l[-3:], [None, 5, None]) self.assertEquals(l.count(5), 1) self.assertRaises(IndexError, operator.setitem, l, size, 6) self.assertEquals(len(l), size) l[size - 7:] = [1, 2, 3, 4, 5] size -= 2 self.assertEquals(len(l), size) self.assertEquals(l[-7:], [None, None, 1, 2, 3, 4, 5]) l[:7] = [1, 2, 3, 4, 5] size -= 2 self.assertEquals(len(l), size) self.assertEquals(l[:7], [1, 2, 3, 4, 5, None, None]) del l[size - 1] size -= 1 self.assertEquals(len(l), size) self.assertEquals(l[-1], 4) del l[-2:] size -= 2 self.assertEquals(len(l), size) self.assertEquals(l[-1], 2) del l[0] size -= 1 self.assertEquals(len(l), size) self.assertEquals(l[0], 2) del l[:2] size -= 2 self.assertEquals(len(l), size) self.assertEquals(l[0], 4) # Like test_concat, split in two. def basic_test_repeat(self, size): l = [] * size self.failIf(l) l = [''] * size self.assertEquals(len(l), size) l = l * 2 self.assertEquals(len(l), size * 2) @bigmemtest(minsize=_2G // 2 + 2, memuse=24) def test_repeat_small(self, size): return self.basic_test_repeat(size) @bigmemtest(minsize=_2G + 2, memuse=24) def test_repeat_large(self, size): return self.basic_test_repeat(size) def basic_test_inplace_repeat(self, size): l = [''] l *= size self.assertEquals(len(l), size) self.failUnless(l[0] is l[-1]) del l l = [''] * size l *= 2 self.assertEquals(len(l), size * 2) self.failUnless(l[size - 1] is l[-1]) @bigmemtest(minsize=_2G // 2 + 2, memuse=16) def test_inplace_repeat_small(self, size): return self.basic_test_inplace_repeat(size) @bigmemtest(minsize=_2G + 2, memuse=16) def test_inplace_repeat_large(self, size): return self.basic_test_inplace_repeat(size) def basic_test_repr(self, size): l = [0] * size s = repr(l) # The repr of a list of 0's is exactly three times the list length. self.assertEquals(len(s), size * 3) self.assertEquals(s[:5], '[0, 0') self.assertEquals(s[-5:], '0, 0]') self.assertEquals(s.count('0'), size) @bigmemtest(minsize=_2G // 3 + 2, memuse=8 + 3) def test_repr_small(self, size): return self.basic_test_repr(size) @bigmemtest(minsize=_2G + 2, memuse=8 + 3) def test_repr_large(self, size): return self.basic_test_repr(size) # list overallocates ~1/8th of the total size (on first expansion) so # the single list.append call puts memuse at 9 bytes per size. @bigmemtest(minsize=_2G, memuse=9) def test_append(self, size): l = [object()] * size l.append(object()) self.assertEquals(len(l), size+1) self.failUnless(l[-3] is l[-2]) self.failIf(l[-2] is l[-1]) @bigmemtest(minsize=_2G // 5 + 2, memuse=8 * 5) def test_count(self, size): l = [1, 2, 3, 4, 5] * size self.assertEquals(l.count(1), size) self.assertEquals(l.count("1"), 0) def basic_test_extend(self, size): l = [file] * size l.extend(l) self.assertEquals(len(l), size * 2) self.failUnless(l[0] is l[-1]) self.failUnless(l[size - 1] is l[size + 1]) @bigmemtest(minsize=_2G // 2 + 2, memuse=16) def test_extend_small(self, size): return self.basic_test_extend(size) @bigmemtest(minsize=_2G + 2, memuse=16) def test_extend_large(self, size): return self.basic_test_extend(size) @bigmemtest(minsize=_2G // 5 + 2, memuse=8 * 5) def test_index(self, size): l = [1L, 2L, 3L, 4L, 5L] * size size *= 5 self.assertEquals(l.index(1), 0) self.assertEquals(l.index(5, size - 5), size - 1) self.assertEquals(l.index(5, size - 5, size), size - 1) self.assertRaises(ValueError, l.index, 1, size - 4, size) self.assertRaises(ValueError, l.index, 6L) # This tests suffers from overallocation, just like test_append. @bigmemtest(minsize=_2G + 10, memuse=9) def test_insert(self, size): l = [1.0] * size l.insert(size - 1, "A") size += 1 self.assertEquals(len(l), size) self.assertEquals(l[-3:], [1.0, "A", 1.0]) l.insert(size + 1, "B") size += 1 self.assertEquals(len(l), size) self.assertEquals(l[-3:], ["A", 1.0, "B"]) l.insert(1, "C") size += 1 self.assertEquals(len(l), size) self.assertEquals(l[:3], [1.0, "C", 1.0]) self.assertEquals(l[size - 3:], ["A", 1.0, "B"]) @bigmemtest(minsize=_2G // 5 + 4, memuse=8 * 5) def test_pop(self, size): l = [u"a", u"b", u"c", u"d", u"e"] * size size *= 5 self.assertEquals(len(l), size) item = l.pop() size -= 1 self.assertEquals(len(l), size) self.assertEquals(item, u"e") self.assertEquals(l[-2:], [u"c", u"d"]) item = l.pop(0) size -= 1 self.assertEquals(len(l), size) self.assertEquals(item, u"a") self.assertEquals(l[:2], [u"b", u"c"]) item = l.pop(size - 2) size -= 1 self.assertEquals(len(l), size) self.assertEquals(item, u"c") self.assertEquals(l[-2:], [u"b", u"d"]) @bigmemtest(minsize=_2G + 10, memuse=8) def test_remove(self, size): l = [10] * size self.assertEquals(len(l), size) l.remove(10) size -= 1 self.assertEquals(len(l), size) # Because of the earlier l.remove(), this append doesn't trigger # a resize. l.append(5) size += 1 self.assertEquals(len(l), size) self.assertEquals(l[-2:], [10, 5]) l.remove(5) size -= 1 self.assertEquals(len(l), size) self.assertEquals(l[-2:], [10, 10]) @bigmemtest(minsize=_2G // 5 + 2, memuse=8 * 5) def test_reverse(self, size): l = [1, 2, 3, 4, 5] * size l.reverse() self.assertEquals(len(l), size * 5) self.assertEquals(l[-5:], [5, 4, 3, 2, 1]) self.assertEquals(l[:5], [5, 4, 3, 2, 1]) @bigmemtest(minsize=_2G // 5 + 2, memuse=8 * 5) def test_sort(self, size): l = [1, 2, 3, 4, 5] * size l.sort() self.assertEquals(len(l), size * 5) self.assertEquals(l.count(1), size) self.assertEquals(l[:10], [1] * 10) self.assertEquals(l[-10:], [5] * 10) class BufferTest(unittest.TestCase): @precisionbigmemtest(size=_1G, memuse=4) def test_repeat(self, size): try: with test_support._check_py3k_warnings(): b = buffer("AAAA")*size except MemoryError: pass # acceptable on 32-bit else: count = 0 for c in b: self.assertEquals(c, 'A') count += 1 self.assertEquals(count, size*4) def test_main(): test_support.run_unittest(StrTest, TupleTest, ListTest, BufferTest) if __name__ == '__main__': if len(sys.argv) > 1: test_support.set_memlimit(sys.argv[1]) test_main()<|fim▁end|>
self.assertEquals(s.count('0'), size * 2)
<|file_name|>control_tower_resources.py<|end_file_name|><|fim▁begin|>from collections import namedtuple from model.flyweight import Flyweight from model.static.database import database class ControlTowerResource(Flyweight): def __init__(self,control_tower_type_id): #prevents reinitializing if "_inited" in self.__dict__: return self._inited = None<|fim▁hole|> self.control_tower_type_id = control_tower_type_id cursor = database.get_cursor( "select * from invControlTowerResources where controlTowerTypeID={};".format(self.control_tower_type_id)) self.resources = list() resource_tuple = namedtuple("resource_tuple", "resource_type_id purpose quantity min_security_level faction_id ") for row in cursor: self.resources.append(resource_tuple( resource_type_id=row["resourceTypeID"], purpose=row["purpose"], quantity=row["quantity"], min_security_level=row["minSecurityLevel"], faction_id=row["factionID"])) cursor.close()<|fim▁end|>
#prevents reinitializing
<|file_name|>pipeline_structure.ts<|end_file_name|><|fim▁begin|>/* * Copyright 2021 ThoughtWorks, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import _ from "lodash"; import Stream from "mithril/stream"; import {Origin, OriginJSON} from "models/origin"; interface Nameable { name: string; } export interface DependentPipeline { dependent_pipeline_name: string; depends_on_stage: string; } export interface PipelineJSON extends Nameable { origin: OriginJSON; stages: StageJSON[]; template_name?: string; environment: string | null; dependant_pipelines: DependentPipeline[]; } export interface PipelineGroupJSON extends Nameable { pipelines: PipelineJSON[]; } export interface PipelineGroupsJSON { groups: PipelineGroupJSON[]; } export interface PipelineStructureJSON extends PipelineGroupsJSON { groups: PipelineGroupJSON[]; templates: TemplateJSON[]; } export interface TemplateJSON extends Nameable { stages: StageJSON[]; parameters: string[]; } export interface StageJSON extends Nameable { jobs: JobJSON[]; } export interface JobJSON extends Nameable { is_elastic: boolean; } export class Pipeline { name: Stream<string>; constructor(name: string) { this.name = Stream(name); } static fromJSON(data: PipelineJSON) { return new Pipeline(data.name); } } export class PipelineWithOrigin extends Pipeline { readonly origin: Stream<Origin>; readonly templateName: Stream<string | undefined>; readonly stages: Stream<Stages>; readonly environment: Stream<string | null>; readonly dependantPipelines: Stream<DependentPipeline[]>; constructor(name: string, templateName: string | undefined, origin: Origin, stages: Stages, environment: string | null, dependantPipelines: DependentPipeline[]) { super(name); this.origin = Stream(origin); this.templateName = Stream(templateName); this.stages = Stream(stages); this.environment = Stream(environment); this.dependantPipelines = Stream(dependantPipelines); } static fromJSON(data: PipelineJSON) { return new PipelineWithOrigin(data.name, data.template_name, Origin.fromJSON(data.origin), Stages.fromJSON(data.stages), data.environment, data.dependant_pipelines); } usesTemplate() { return !_.isEmpty(this.templateName()); } clone() { return new PipelineWithOrigin(this.name(), this.templateName(), this.origin(), this.stages().map((s) => s.clone()), this.environment(), this.dependantPipelines()); } isDefinedRemotely() { return this.origin().isDefinedInConfigRepo(); } canBeDeleted(): boolean { const envNotDefined = this.environment() === undefined || this.environment() === null; const noDependantPipelines = this.dependantPipelines() === undefined || this.dependantPipelines()!.length === 0; if (!envNotDefined) { return false; } if (!noDependantPipelines) { return false; } return !this.isDefinedRemotely(); } } export class Pipelines extends Array<PipelineWithOrigin> { //in an ideal world this class should not be extending array and should completely rely on internal hashmap. //todo: get rid array extension private readonly pipelinesAsMap: Map<string, PipelineWithOrigin> = new Map(); constructor(...items: PipelineWithOrigin[]) { super(...items); Object.setPrototypeOf(this, Object.create(Pipelines.prototype)); this.forEach(ele => this.pipelinesAsMap.set(ele.name(), ele)); } static fromJSON(pipelines: PipelineJSON[] = []) { return new Pipelines(...pipelines.map(PipelineWithOrigin.fromJSON)); } containsPipeline(name: string): boolean { return !!this.pipelinesAsMap.get(name); } //todo: when two sources of data are removed, array and map (and kept only map), this method simply changes to `this.pipelinesAsMap.set` add(pipeline: PipelineWithOrigin) { this.push(pipeline); this.pipelinesAsMap.set(pipeline.name(), pipeline); } //todo: when two sources of data are removed, array and map (and kept only map), this method simply changes to `this.pipelinesAsMap.delete` remove(pipeline: PipelineWithOrigin) { _.remove(this, (p) => p.name() === pipeline.name()); this.pipelinesAsMap.delete(pipeline.name()); } findByName(name: string): PipelineWithOrigin | undefined { return this.pipelinesAsMap.get(name); } clone() { return new Pipelines(...this.map((p) => p.clone())); } } export class PipelineGroup { readonly name: Stream<string>; readonly pipelines: Stream<Pipelines>; constructor(name: string, pipelines: Pipelines) { this.name = Stream(name); this.pipelines = Stream(pipelines); } static fromJSON(data: PipelineGroupJSON) { return new PipelineGroup(data.name, Pipelines.fromJSON(data.pipelines)); } isEmpty() { return _.isEmpty(this.pipelines()); } hasPipelines() { return !this.isEmpty(); } containsRemotelyDefinedPipelines() { return this.pipelines().some((pipeline) => pipeline.isDefinedRemotely()); } matches(textToMatch: string): boolean { if (!textToMatch) { return true; } const searchableStrings = [this.name()]; searchableStrings.push(...this.pipelines().map((pipeline) => pipeline.name())); return searchableStrings.some((value) => value ? value.toLowerCase().includes(textToMatch.toLowerCase()) : false); } } export class PipelineGroups extends Array<PipelineGroup> { constructor(...pipelines: PipelineGroup[]) { super(...pipelines); Object.setPrototypeOf(this, Object.create(PipelineGroups.prototype)); } static fromJSON(data: PipelineGroupJSON[] = []) { return new PipelineGroups(...data.map(PipelineGroup.fromJSON)); } } export class Job { readonly name: Stream<string>; readonly isElastic: Stream<boolean>; constructor(name: string, isElastic: boolean) { this.name = Stream(name); this.isElastic = Stream(isElastic); } static fromJSON(data: JobJSON) { return new Job(data.name, data.is_elastic); } clone() { return new Job(this.name(), this.isElastic()); } } export class Jobs extends Array<Job> { constructor(...items: Job[]) { super(...items); Object.setPrototypeOf(this, Object.create(Jobs.prototype)); } static fromJSON(jobs: JobJSON[] = []) { return new Jobs(...jobs.map(Job.fromJSON)); } } export class Stage { readonly name: Stream<string>; readonly jobs: Stream<Jobs>; constructor(name: string, jobs: Jobs) { this.name = Stream(name); this.jobs = Stream(jobs); } static fromJSON(data: StageJSON) { return new Stage(data.name, Jobs.fromJSON(data.jobs)); } clone() { return new Stage(this.name(), new Jobs(...this.jobs().map((j) => j.clone()))); } } export class Stages extends Array<Stage> { constructor(...items: Stage[]) { super(...items); Object.setPrototypeOf(this, Object.create(Stages.prototype)); } static fromJSON(stages: StageJSON[] = []) { return new Stages(...stages.map(Stage.fromJSON)); } } export class Template { readonly name: Stream<string>; readonly stages: Stream<Stages>; readonly parameters: Stream<string[]>; constructor(name: string, stages: Stages, parameters: string[]) { this.name = Stream(name); this.stages = Stream(stages); this.parameters = Stream(parameters || []); } static fromJSON(data: TemplateJSON) { return new Template(data.name, Stages.fromJSON(data.stages), data.parameters || []); } } export class Templates extends Array<Template> { constructor(...items: Template[]) { super(...items); Object.setPrototypeOf(this, Object.create(Templates.prototype)); } static fromJSON(templates: TemplateJSON[] = []) { return new Templates(...templates.map(Template.fromJSON)); } } export class PipelineStructure { readonly groups: Stream<PipelineGroups>; readonly templates: Stream<Templates>; constructor(groups: PipelineGroups, templates: Templates) { this.groups = Stream(groups); this.templates = Stream(templates); } static fromJSON(data: PipelineStructureJSON) { return new PipelineStructure(PipelineGroups.fromJSON(data.groups), Templates.fromJSON(data.templates)); } /** * Returns `undefined` if the pipeline is not present, typically when you don't have appropriate access to it. */ findPipeline(name: string) { for (const eachGroup of this.groups()) {<|fim▁hole|> } } } getAllConfigPipelinesNotUsingTemplates() { const result: string[] = []; this.groups().forEach((eachGroup) => { eachGroup.pipelines().forEach((eachPipeline) => { if (_.isEmpty(eachPipeline.templateName()) && !eachPipeline.origin().isDefinedInConfigRepo()) { result.push(eachPipeline.name()); } }); }); return result; } } export interface PipelineStructureWithAdditionalInfoJSON extends PipelineStructureJSON { additional_info: AdditionalInfoJSON; } interface AdditionalInfoJSON { users: string[]; roles: string[]; } class AdditionalInfo { users: string[]; roles: string[]; constructor(users: string[], roles: string[]) { this.users = users; this.roles = roles; } static fromJSON(data: AdditionalInfoJSON): AdditionalInfo { return new AdditionalInfo(data.users, data.roles); } } export class PipelineStructureWithAdditionalInfo { pipelineStructure: PipelineStructure; additionalInfo: AdditionalInfo; constructor(pipelineStructure: PipelineStructure, additionalInfo: AdditionalInfo) { this.pipelineStructure = pipelineStructure; this.additionalInfo = additionalInfo; } static fromJSON(data: PipelineStructureWithAdditionalInfoJSON): PipelineStructureWithAdditionalInfo { return new PipelineStructureWithAdditionalInfo(PipelineStructure.fromJSON(data), AdditionalInfo.fromJSON(data.additional_info)); } }<|fim▁end|>
for (const eachPipeline of eachGroup.pipelines()) { if (eachPipeline.name().toLowerCase() === name.toLowerCase()) { return eachPipeline; }
<|file_name|>MipsMCCodeEmitter.cpp<|end_file_name|><|fim▁begin|>//===-- MipsMCCodeEmitter.cpp - Convert Mips code to machine code ---------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the MipsMCCodeEmitter class. // //===----------------------------------------------------------------------===// // #define DEBUG_TYPE "mccodeemitter" #include "MCTargetDesc/MipsBaseInfo.h" #include "MCTargetDesc/MipsFixupKinds.h" #include "MCTargetDesc/MipsMCTargetDesc.h" #include "llvm/ADT/APFloat.h" #include "llvm/ADT/Statistic.h" #include "llvm/MC/MCCodeEmitter.h" #include "llvm/MC/MCExpr.h" #include "llvm/MC/MCInst.h"<|fim▁hole|>#include "llvm/Support/raw_ostream.h" using namespace llvm; namespace { class MipsMCCodeEmitter : public MCCodeEmitter { MipsMCCodeEmitter(const MipsMCCodeEmitter &); // DO NOT IMPLEMENT void operator=(const MipsMCCodeEmitter &); // DO NOT IMPLEMENT const MCInstrInfo &MCII; const MCSubtargetInfo &STI; MCContext &Ctx; public: MipsMCCodeEmitter(const MCInstrInfo &mcii, const MCSubtargetInfo &sti, MCContext &ctx) : MCII(mcii), STI(sti) , Ctx(ctx) {} ~MipsMCCodeEmitter() {} void EmitByte(unsigned char C, raw_ostream &OS) const { OS << (char)C; } void EmitInstruction(uint64_t Val, unsigned Size, raw_ostream &OS) const { // Output the instruction encoding in little endian byte order. for (unsigned i = 0; i != Size; ++i) { EmitByte(Val & 255, OS); Val >>= 8; } } void EncodeInstruction(const MCInst &MI, raw_ostream &OS, SmallVectorImpl<MCFixup> &Fixups) const; // getBinaryCodeForInstr - TableGen'erated function for getting the // binary encoding for an instruction. unsigned getBinaryCodeForInstr(const MCInst &MI, SmallVectorImpl<MCFixup> &Fixups) const; // getBranchJumpOpValue - Return binary encoding of the jump // target operand. If the machine operand requires relocation, // record the relocation and return zero. unsigned getJumpTargetOpValue(const MCInst &MI, unsigned OpNo, SmallVectorImpl<MCFixup> &Fixups) const; // getBranchTargetOpValue - Return binary encoding of the branch // target operand. If the machine operand requires relocation, // record the relocation and return zero. unsigned getBranchTargetOpValue(const MCInst &MI, unsigned OpNo, SmallVectorImpl<MCFixup> &Fixups) const; // getMachineOpValue - Return binary encoding of operand. If the machin // operand requires relocation, record the relocation and return zero. unsigned getMachineOpValue(const MCInst &MI,const MCOperand &MO, SmallVectorImpl<MCFixup> &Fixups) const; unsigned getMemEncoding(const MCInst &MI, unsigned OpNo, SmallVectorImpl<MCFixup> &Fixups) const; unsigned getSizeExtEncoding(const MCInst &MI, unsigned OpNo, SmallVectorImpl<MCFixup> &Fixups) const; unsigned getSizeInsEncoding(const MCInst &MI, unsigned OpNo, SmallVectorImpl<MCFixup> &Fixups) const; }; // class MipsMCCodeEmitter } // namespace MCCodeEmitter *llvm::createMipsMCCodeEmitter(const MCInstrInfo &MCII, const MCSubtargetInfo &STI, MCContext &Ctx) { return new MipsMCCodeEmitter(MCII, STI, Ctx); } /// EncodeInstruction - Emit the instruction. /// Size the instruction (currently only 4 bytes void MipsMCCodeEmitter:: EncodeInstruction(const MCInst &MI, raw_ostream &OS, SmallVectorImpl<MCFixup> &Fixups) const { uint32_t Binary = getBinaryCodeForInstr(MI, Fixups); // Check for unimplemented opcodes. // Unfortunately in MIPS both NOT and SLL will come in with Binary == 0 // so we have to special check for them. unsigned Opcode = MI.getOpcode(); if ((Opcode != Mips::NOP) && (Opcode != Mips::SLL) && !Binary) llvm_unreachable("unimplemented opcode in EncodeInstruction()"); const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); uint64_t TSFlags = Desc.TSFlags; // Pseudo instructions don't get encoded and shouldn't be here // in the first place! if ((TSFlags & MipsII::FormMask) == MipsII::Pseudo) llvm_unreachable("Pseudo opcode found in EncodeInstruction()"); // For now all instructions are 4 bytes int Size = 4; // FIXME: Have Desc.getSize() return the correct value! EmitInstruction(Binary, Size, OS); } /// getBranchTargetOpValue - Return binary encoding of the branch /// target operand. If the machine operand requires relocation, /// record the relocation and return zero. unsigned MipsMCCodeEmitter:: getBranchTargetOpValue(const MCInst &MI, unsigned OpNo, SmallVectorImpl<MCFixup> &Fixups) const { const MCOperand &MO = MI.getOperand(OpNo); assert(MO.isExpr() && "getBranchTargetOpValue expects only expressions"); const MCExpr *Expr = MO.getExpr(); Fixups.push_back(MCFixup::Create(0, Expr, MCFixupKind(Mips::fixup_Mips_PC16))); return 0; } /// getJumpTargetOpValue - Return binary encoding of the jump /// target operand. If the machine operand requires relocation, /// record the relocation and return zero. unsigned MipsMCCodeEmitter:: getJumpTargetOpValue(const MCInst &MI, unsigned OpNo, SmallVectorImpl<MCFixup> &Fixups) const { const MCOperand &MO = MI.getOperand(OpNo); assert(MO.isExpr() && "getJumpTargetOpValue expects only expressions"); const MCExpr *Expr = MO.getExpr(); Fixups.push_back(MCFixup::Create(0, Expr, MCFixupKind(Mips::fixup_Mips_26))); return 0; } /// getMachineOpValue - Return binary encoding of operand. If the machine /// operand requires relocation, record the relocation and return zero. unsigned MipsMCCodeEmitter:: getMachineOpValue(const MCInst &MI, const MCOperand &MO, SmallVectorImpl<MCFixup> &Fixups) const { if (MO.isReg()) { unsigned Reg = MO.getReg(); unsigned RegNo = getMipsRegisterNumbering(Reg); return RegNo; } else if (MO.isImm()) { return static_cast<unsigned>(MO.getImm()); } else if (MO.isFPImm()) { return static_cast<unsigned>(APFloat(MO.getFPImm()) .bitcastToAPInt().getHiBits(32).getLimitedValue()); } else if (MO.isExpr()) { const MCExpr *Expr = MO.getExpr(); MCExpr::ExprKind Kind = Expr->getKind(); unsigned Ret = 0; if (Kind == MCExpr::Binary) { const MCBinaryExpr *BE = static_cast<const MCBinaryExpr*>(Expr); Expr = BE->getLHS(); Kind = Expr->getKind(); const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(BE->getRHS()); assert((Kind == MCExpr::SymbolRef) && CE && "Binary expression must be sym+const."); Ret = CE->getValue(); } if (Kind == MCExpr::SymbolRef) { Mips::Fixups FixupKind; switch(cast<MCSymbolRefExpr>(Expr)->getKind()) { case MCSymbolRefExpr::VK_Mips_GPREL: FixupKind = Mips::fixup_Mips_GPREL16; break; case MCSymbolRefExpr::VK_Mips_GOT_CALL: FixupKind = Mips::fixup_Mips_CALL16; break; case MCSymbolRefExpr::VK_Mips_GOT: FixupKind = Mips::fixup_Mips_GOT16; break; case MCSymbolRefExpr::VK_Mips_ABS_HI: FixupKind = Mips::fixup_Mips_HI16; break; case MCSymbolRefExpr::VK_Mips_ABS_LO: FixupKind = Mips::fixup_Mips_LO16; break; case MCSymbolRefExpr::VK_Mips_TLSGD: FixupKind = Mips::fixup_Mips_TLSGD; break; case MCSymbolRefExpr::VK_Mips_GOTTPREL: FixupKind = Mips::fixup_Mips_GOTTPREL; break; case MCSymbolRefExpr::VK_Mips_TPREL_HI: FixupKind = Mips::fixup_Mips_TPREL_HI; break; case MCSymbolRefExpr::VK_Mips_TPREL_LO: FixupKind = Mips::fixup_Mips_TPREL_LO; break; default: return Ret; } // switch Fixups.push_back(MCFixup::Create(0, Expr, MCFixupKind(FixupKind))); } // if SymbolRef // All of the information is in the fixup. return Ret; } llvm_unreachable("Unable to encode MCOperand!"); // Not reached return 0; } /// getMemEncoding - Return binary encoding of memory related operand. /// If the offset operand requires relocation, record the relocation. unsigned MipsMCCodeEmitter::getMemEncoding(const MCInst &MI, unsigned OpNo, SmallVectorImpl<MCFixup> &Fixups) const { // Base register is encoded in bits 20-16, offset is encoded in bits 15-0. assert(MI.getOperand(OpNo).isReg()); unsigned RegBits = getMachineOpValue(MI, MI.getOperand(OpNo),Fixups) << 16; unsigned OffBits = getMachineOpValue(MI, MI.getOperand(OpNo+1), Fixups); return (OffBits & 0xFFFF) | RegBits; } unsigned MipsMCCodeEmitter::getSizeExtEncoding(const MCInst &MI, unsigned OpNo, SmallVectorImpl<MCFixup> &Fixups) const { assert(MI.getOperand(OpNo).isImm()); unsigned szEncoding = getMachineOpValue(MI, MI.getOperand(OpNo), Fixups); return szEncoding - 1; } // FIXME: should be called getMSBEncoding // unsigned MipsMCCodeEmitter::getSizeInsEncoding(const MCInst &MI, unsigned OpNo, SmallVectorImpl<MCFixup> &Fixups) const { assert(MI.getOperand(OpNo-1).isImm()); assert(MI.getOperand(OpNo).isImm()); unsigned pos = getMachineOpValue(MI, MI.getOperand(OpNo-1), Fixups); unsigned sz = getMachineOpValue(MI, MI.getOperand(OpNo), Fixups); return pos + sz - 1; } #include "MipsGenMCCodeEmitter.inc"<|fim▁end|>
#include "llvm/MC/MCInstrInfo.h" #include "llvm/MC/MCRegisterInfo.h" #include "llvm/MC/MCSubtargetInfo.h"
<|file_name|>angular-animate.js<|end_file_name|><|fim▁begin|>/** * @license AngularJS v1.3.0-build.2480+sha.fb6062f * (c) 2010-2014 Google, Inc. http://angularjs.org * License: MIT */ (function(window, angular, undefined) {'use strict'; /* jshint maxlen: false */ /** * @ngdoc module * @name ngAnimate * @description * * # ngAnimate * * The `ngAnimate` module provides support for JavaScript, CSS3 transition and CSS3 keyframe animation hooks within existing core and custom directives. * * * <div doc-module-components="ngAnimate"></div> * * # Usage * * To see animations in action, all that is required is to define the appropriate CSS classes * or to register a JavaScript animation via the myModule.animation() function. The directives that support animation automatically are: * `ngRepeat`, `ngInclude`, `ngIf`, `ngSwitch`, `ngShow`, `ngHide`, `ngView` and `ngClass`. Custom directives can take advantage of animation * by using the `$animate` service. * * Below is a more detailed breakdown of the supported animation events provided by pre-existing ng directives: * * | Directive | Supported Animations | * |---------------------------------------------------------- |----------------------------------------------------| * | {@link ng.directive:ngRepeat#usage_animations ngRepeat} | enter, leave and move | * | {@link ngRoute.directive:ngView#usage_animations ngView} | enter and leave | * | {@link ng.directive:ngInclude#usage_animations ngInclude} | enter and leave | * | {@link ng.directive:ngSwitch#usage_animations ngSwitch} | enter and leave | * | {@link ng.directive:ngIf#usage_animations ngIf} | enter and leave | * | {@link ng.directive:ngClass#usage_animations ngClass} | add and remove | * | {@link ng.directive:ngShow#usage_animations ngShow & ngHide} | add and remove (the ng-hide class value) | * | {@link ng.directive:form#usage_animations form} | add and remove (dirty, pristine, valid, invalid & all other validations) | * | {@link ng.directive:ngModel#usage_animations ngModel} | add and remove (dirty, pristine, valid, invalid & all other validations) | * * You can find out more information about animations upon visiting each directive page. * * Below is an example of how to apply animations to a directive that supports animation hooks: * * ```html * <style type="text/css"> * .slide.ng-enter, .slide.ng-leave { * -webkit-transition:0.5s linear all; * transition:0.5s linear all; * } * * .slide.ng-enter { } /&#42; starting animations for enter &#42;/ * .slide.ng-enter-active { } /&#42; terminal animations for enter &#42;/ * .slide.ng-leave { } /&#42; starting animations for leave &#42;/ * .slide.ng-leave-active { } /&#42; terminal animations for leave &#42;/ * </style> * * <!-- * the animate service will automatically add .ng-enter and .ng-leave to the element * to trigger the CSS transition/animations * --> * <ANY class="slide" ng-include="..."></ANY> * ``` * * Keep in mind that if an animation is running, any child elements cannot be animated until the parent element's * animation has completed. * * <h2>CSS-defined Animations</h2> * The animate service will automatically apply two CSS classes to the animated element and these two CSS classes * are designed to contain the start and end CSS styling. Both CSS transitions and keyframe animations are supported * and can be used to play along with this naming structure. * * The following code below demonstrates how to perform animations using **CSS transitions** with Angular: * * ```html * <style type="text/css"> * /&#42; * The animate class is apart of the element and the ng-enter class * is attached to the element once the enter animation event is triggered * &#42;/ * .reveal-animation.ng-enter { * -webkit-transition: 1s linear all; /&#42; Safari/Chrome &#42;/ * transition: 1s linear all; /&#42; All other modern browsers and IE10+ &#42;/ * * /&#42; The animation preparation code &#42;/ * opacity: 0; * } * * /&#42; * Keep in mind that you want to combine both CSS * classes together to avoid any CSS-specificity * conflicts * &#42;/ * .reveal-animation.ng-enter.ng-enter-active { * /&#42; The animation code itself &#42;/ * opacity: 1; * } * </style> * * <div class="view-container"> * <div ng-view class="reveal-animation"></div> * </div> * ``` * * The following code below demonstrates how to perform animations using **CSS animations** with Angular: * * ```html * <style type="text/css"> * .reveal-animation.ng-enter { * -webkit-animation: enter_sequence 1s linear; /&#42; Safari/Chrome &#42;/ * animation: enter_sequence 1s linear; /&#42; IE10+ and Future Browsers &#42;/<|fim▁hole|> * from { opacity:0; } * to { opacity:1; } * } * &#64keyframes enter_sequence { * from { opacity:0; } * to { opacity:1; } * } * </style> * * <div class="view-container"> * <div ng-view class="reveal-animation"></div> * </div> * ``` * * Both CSS3 animations and transitions can be used together and the animate service will figure out the correct duration and delay timing. * * Upon DOM mutation, the event class is added first (something like `ng-enter`), then the browser prepares itself to add * the active class (in this case `ng-enter-active`) which then triggers the animation. The animation module will automatically * detect the CSS code to determine when the animation ends. Once the animation is over then both CSS classes will be * removed from the DOM. If a browser does not support CSS transitions or CSS animations then the animation will start and end * immediately resulting in a DOM element that is at its final state. This final state is when the DOM element * has no CSS transition/animation classes applied to it. * * <h3>CSS Staggering Animations</h3> * A Staggering animation is a collection of animations that are issued with a slight delay in between each successive operation resulting in a * curtain-like effect. The ngAnimate module, as of 1.2.0, supports staggering animations and the stagger effect can be * performed by creating a **ng-EVENT-stagger** CSS class and attaching that class to the base CSS class used for * the animation. The style property expected within the stagger class can either be a **transition-delay** or an * **animation-delay** property (or both if your animation contains both transitions and keyframe animations). * * ```css * .my-animation.ng-enter { * /&#42; standard transition code &#42;/ * -webkit-transition: 1s linear all; * transition: 1s linear all; * opacity:0; * } * .my-animation.ng-enter-stagger { * /&#42; this will have a 100ms delay between each successive leave animation &#42;/ * -webkit-transition-delay: 0.1s; * transition-delay: 0.1s; * * /&#42; in case the stagger doesn't work then these two values * must be set to 0 to avoid an accidental CSS inheritance &#42;/ * -webkit-transition-duration: 0s; * transition-duration: 0s; * } * .my-animation.ng-enter.ng-enter-active { * /&#42; standard transition styles &#42;/ * opacity:1; * } * ``` * * Staggering animations work by default in ngRepeat (so long as the CSS class is defined). Outside of ngRepeat, to use staggering animations * on your own, they can be triggered by firing multiple calls to the same event on $animate. However, the restrictions surrounding this * are that each of the elements must have the same CSS className value as well as the same parent element. A stagger operation * will also be reset if more than 10ms has passed after the last animation has been fired. * * The following code will issue the **ng-leave-stagger** event on the element provided: * * ```js * var kids = parent.children(); * * $animate.leave(kids[0]); //stagger index=0 * $animate.leave(kids[1]); //stagger index=1 * $animate.leave(kids[2]); //stagger index=2 * $animate.leave(kids[3]); //stagger index=3 * $animate.leave(kids[4]); //stagger index=4 * * $timeout(function() { * //stagger has reset itself * $animate.leave(kids[5]); //stagger index=0 * $animate.leave(kids[6]); //stagger index=1 * }, 100, false); * ``` * * Stagger animations are currently only supported within CSS-defined animations. * * <h2>JavaScript-defined Animations</h2> * In the event that you do not want to use CSS3 transitions or CSS3 animations or if you wish to offer animations on browsers that do not * yet support CSS transitions/animations, then you can make use of JavaScript animations defined inside of your AngularJS module. * * ```js * //!annotate="YourApp" Your AngularJS Module|Replace this or ngModule with the module that you used to define your application. * var ngModule = angular.module('YourApp', ['ngAnimate']); * ngModule.animation('.my-crazy-animation', function() { * return { * enter: function(element, done) { * //run the animation here and call done when the animation is complete * return function(cancelled) { * //this (optional) function will be called when the animation * //completes or when the animation is cancelled (the cancelled * //flag will be set to true if cancelled). * }; * }, * leave: function(element, done) { }, * move: function(element, done) { }, * * //animation that can be triggered before the class is added * beforeAddClass: function(element, className, done) { }, * * //animation that can be triggered after the class is added * addClass: function(element, className, done) { }, * * //animation that can be triggered before the class is removed * beforeRemoveClass: function(element, className, done) { }, * * //animation that can be triggered after the class is removed * removeClass: function(element, className, done) { } * }; * }); * ``` * * JavaScript-defined animations are created with a CSS-like class selector and a collection of events which are set to run * a javascript callback function. When an animation is triggered, $animate will look for a matching animation which fits * the element's CSS class attribute value and then run the matching animation event function (if found). * In other words, if the CSS classes present on the animated element match any of the JavaScript animations then the callback function will * be executed. It should be also noted that only simple, single class selectors are allowed (compound class selectors are not supported). * * Within a JavaScript animation, an object containing various event callback animation functions is expected to be returned. * As explained above, these callbacks are triggered based on the animation event. Therefore if an enter animation is run, * and the JavaScript animation is found, then the enter callback will handle that animation (in addition to the CSS keyframe animation * or transition code that is defined via a stylesheet). * */ angular.module('ngAnimate', ['ng']) /** * @ngdoc provider * @name $animateProvider * @description * * The `$animateProvider` allows developers to register JavaScript animation event handlers directly inside of a module. * When an animation is triggered, the $animate service will query the $animate service to find any animations that match * the provided name value. * * Requires the {@link ngAnimate `ngAnimate`} module to be installed. * * Please visit the {@link ngAnimate `ngAnimate`} module overview page learn more about how to use animations in your application. * */ //this private service is only used within CSS-enabled animations //IE8 + IE9 do not support rAF natively, but that is fine since they //also don't support transitions and keyframes which means that the code //below will never be used by the two browsers. .factory('$$animateReflow', ['$$rAF', '$document', function($$rAF, $document) { var bod = $document[0].body; return function(fn) { //the returned function acts as the cancellation function return $$rAF(function() { //the line below will force the browser to perform a repaint //so that all the animated elements within the animation frame //will be properly updated and drawn on screen. This is //required to perform multi-class CSS based animations with //Firefox. DO NOT REMOVE THIS LINE. var a = bod.offsetWidth + 1; fn(); }); }; }]) .config(['$provide', '$animateProvider', function($provide, $animateProvider) { var noop = angular.noop; var forEach = angular.forEach; var selectors = $animateProvider.$$selectors; var ELEMENT_NODE = 1; var NG_ANIMATE_STATE = '$$ngAnimateState'; var NG_ANIMATE_CLASS_NAME = 'ng-animate'; var rootAnimateState = {running: true}; function extractElementNode(element) { for(var i = 0; i < element.length; i++) { var elm = element[i]; if(elm.nodeType == ELEMENT_NODE) { return elm; } } } function stripCommentsFromElement(element) { return angular.element(extractElementNode(element)); } function isMatchingElement(elm1, elm2) { return extractElementNode(elm1) == extractElementNode(elm2); } $provide.decorator('$animate', ['$delegate', '$injector', '$sniffer', '$rootElement', '$$asyncCallback', '$rootScope', '$document', function($delegate, $injector, $sniffer, $rootElement, $$asyncCallback, $rootScope, $document) { var globalAnimationCounter = 0; $rootElement.data(NG_ANIMATE_STATE, rootAnimateState); // disable animations during bootstrap, but once we bootstrapped, wait again // for another digest until enabling animations. The reason why we digest twice // is because all structural animations (enter, leave and move) all perform a // post digest operation before animating. If we only wait for a single digest // to pass then the structural animation would render its animation on page load. // (which is what we're trying to avoid when the application first boots up.) $rootScope.$$postDigest(function() { $rootScope.$$postDigest(function() { rootAnimateState.running = false; }); }); var classNameFilter = $animateProvider.classNameFilter(); var isAnimatableClassName = !classNameFilter ? function() { return true; } : function(className) { return classNameFilter.test(className); }; function lookup(name) { if (name) { var matches = [], flagMap = {}, classes = name.substr(1).split('.'); //the empty string value is the default animation //operation which performs CSS transition and keyframe //animations sniffing. This is always included for each //element animation procedure if the browser supports //transitions and/or keyframe animations if ($sniffer.transitions || $sniffer.animations) { classes.push(''); } for(var i=0; i < classes.length; i++) { var klass = classes[i], selectorFactoryName = selectors[klass]; if(selectorFactoryName && !flagMap[klass]) { matches.push($injector.get(selectorFactoryName)); flagMap[klass] = true; } } return matches; } } function animationRunner(element, animationEvent, className) { //transcluded directives may sometimes fire an animation using only comment nodes //best to catch this early on to prevent any animation operations from occurring var node = element[0]; if(!node) { return; } var isSetClassOperation = animationEvent == 'setClass'; var isClassBased = isSetClassOperation || animationEvent == 'addClass' || animationEvent == 'removeClass'; var classNameAdd, classNameRemove; if(angular.isArray(className)) { classNameAdd = className[0]; classNameRemove = className[1]; className = classNameAdd + ' ' + classNameRemove; } var currentClassName = element.attr('class'); var classes = currentClassName + ' ' + className; if(!isAnimatableClassName(classes)) { return; } var beforeComplete = noop, beforeCancel = [], before = [], afterComplete = noop, afterCancel = [], after = []; var animationLookup = (' ' + classes).replace(/\s+/g,'.'); forEach(lookup(animationLookup), function(animationFactory) { var created = registerAnimation(animationFactory, animationEvent); if(!created && isSetClassOperation) { registerAnimation(animationFactory, 'addClass'); registerAnimation(animationFactory, 'removeClass'); } }); function registerAnimation(animationFactory, event) { var afterFn = animationFactory[event]; var beforeFn = animationFactory['before' + event.charAt(0).toUpperCase() + event.substr(1)]; if(afterFn || beforeFn) { if(event == 'leave') { beforeFn = afterFn; //when set as null then animation knows to skip this phase afterFn = null; } after.push({ event : event, fn : afterFn }); before.push({ event : event, fn : beforeFn }); return true; } } function run(fns, cancellations, allCompleteFn) { var animations = []; forEach(fns, function(animation) { animation.fn && animations.push(animation); }); var count = 0; function afterAnimationComplete(index) { if(cancellations) { (cancellations[index] || noop)(); if(++count < animations.length) return; cancellations = null; } allCompleteFn(); } //The code below adds directly to the array in order to work with //both sync and async animations. Sync animations are when the done() //operation is called right away. DO NOT REFACTOR! forEach(animations, function(animation, index) { var progress = function() { afterAnimationComplete(index); }; switch(animation.event) { case 'setClass': cancellations.push(animation.fn(element, classNameAdd, classNameRemove, progress)); break; case 'addClass': cancellations.push(animation.fn(element, classNameAdd || className, progress)); break; case 'removeClass': cancellations.push(animation.fn(element, classNameRemove || className, progress)); break; default: cancellations.push(animation.fn(element, progress)); break; } }); if(cancellations && cancellations.length === 0) { allCompleteFn(); } } return { node : node, event : animationEvent, className : className, isClassBased : isClassBased, isSetClassOperation : isSetClassOperation, before : function(allCompleteFn) { beforeComplete = allCompleteFn; run(before, beforeCancel, function() { beforeComplete = noop; allCompleteFn(); }); }, after : function(allCompleteFn) { afterComplete = allCompleteFn; run(after, afterCancel, function() { afterComplete = noop; allCompleteFn(); }); }, cancel : function() { if(beforeCancel) { forEach(beforeCancel, function(cancelFn) { (cancelFn || noop)(true); }); beforeComplete(true); } if(afterCancel) { forEach(afterCancel, function(cancelFn) { (cancelFn || noop)(true); }); afterComplete(true); } } }; } /** * @ngdoc service * @name $animate * @function * * @description * The `$animate` service provides animation detection support while performing DOM operations (enter, leave and move) as well as during addClass and removeClass operations. * When any of these operations are run, the $animate service * will examine any JavaScript-defined animations (which are defined by using the $animateProvider provider object) * as well as any CSS-defined animations against the CSS classes present on the element once the DOM operation is run. * * The `$animate` service is used behind the scenes with pre-existing directives and animation with these directives * will work out of the box without any extra configuration. * * Requires the {@link ngAnimate `ngAnimate`} module to be installed. * * Please visit the {@link ngAnimate `ngAnimate`} module overview page learn more about how to use animations in your application. * */ return { /** * @ngdoc method * @name $animate#enter * @function * * @description * Appends the element to the parentElement element that resides in the document and then runs the enter animation. Once * the animation is started, the following CSS classes will be present on the element for the duration of the animation: * * Below is a breakdown of each step that occurs during enter animation: * * | Animation Step | What the element class attribute looks like | * |----------------------------------------------------------------------------------------------|---------------------------------------------| * | 1. $animate.enter(...) is called | class="my-animation" | * | 2. element is inserted into the parentElement element or beside the afterElement element | class="my-animation" | * | 3. $animate runs any JavaScript-defined animations on the element | class="my-animation ng-animate" | * | 4. the .ng-enter class is added to the element | class="my-animation ng-animate ng-enter" | * | 5. $animate scans the element styles to get the CSS transition/animation duration and delay | class="my-animation ng-animate ng-enter" | * | 6. $animate waits for 10ms (this performs a reflow) | class="my-animation ng-animate ng-enter" | * | 7. the .ng-enter-active and .ng-animate-active classes are added (this triggers the CSS transition/animation) | class="my-animation ng-animate ng-animate-active ng-enter ng-enter-active" | * | 8. $animate waits for X milliseconds for the animation to complete | class="my-animation ng-animate ng-animate-active ng-enter ng-enter-active" | * | 9. The animation ends and all generated CSS classes are removed from the element | class="my-animation" | * | 10. The doneCallback() callback is fired (if provided) | class="my-animation" | * * @param {DOMElement} element the element that will be the focus of the enter animation * @param {DOMElement} parentElement the parent element of the element that will be the focus of the enter animation * @param {DOMElement} afterElement the sibling element (which is the previous element) of the element that will be the focus of the enter animation * @param {function()=} doneCallback the callback function that will be called once the animation is complete */ enter : function(element, parentElement, afterElement, doneCallback) { this.enabled(false, element); $delegate.enter(element, parentElement, afterElement); $rootScope.$$postDigest(function() { element = stripCommentsFromElement(element); performAnimation('enter', 'ng-enter', element, parentElement, afterElement, noop, doneCallback); }); }, /** * @ngdoc method * @name $animate#leave * @function * * @description * Runs the leave animation operation and, upon completion, removes the element from the DOM. Once * the animation is started, the following CSS classes will be added for the duration of the animation: * * Below is a breakdown of each step that occurs during leave animation: * * | Animation Step | What the element class attribute looks like | * |----------------------------------------------------------------------------------------------|---------------------------------------------| * | 1. $animate.leave(...) is called | class="my-animation" | * | 2. $animate runs any JavaScript-defined animations on the element | class="my-animation ng-animate" | * | 3. the .ng-leave class is added to the element | class="my-animation ng-animate ng-leave" | * | 4. $animate scans the element styles to get the CSS transition/animation duration and delay | class="my-animation ng-animate ng-leave" | * | 5. $animate waits for 10ms (this performs a reflow) | class="my-animation ng-animate ng-leave" | * | 6. the .ng-leave-active and .ng-animate-active classes is added (this triggers the CSS transition/animation) | class="my-animation ng-animate ng-animate-active ng-leave ng-leave-active" | * | 7. $animate waits for X milliseconds for the animation to complete | class="my-animation ng-animate ng-animate-active ng-leave ng-leave-active" | * | 8. The animation ends and all generated CSS classes are removed from the element | class="my-animation" | * | 9. The element is removed from the DOM | ... | * | 10. The doneCallback() callback is fired (if provided) | ... | * * @param {DOMElement} element the element that will be the focus of the leave animation * @param {function()=} doneCallback the callback function that will be called once the animation is complete */ leave : function(element, doneCallback) { cancelChildAnimations(element); this.enabled(false, element); $rootScope.$$postDigest(function() { performAnimation('leave', 'ng-leave', stripCommentsFromElement(element), null, null, function() { $delegate.leave(element); }, doneCallback); }); }, /** * @ngdoc method * @name $animate#move * @function * * @description * Fires the move DOM operation. Just before the animation starts, the animate service will either append it into the parentElement container or * add the element directly after the afterElement element if present. Then the move animation will be run. Once * the animation is started, the following CSS classes will be added for the duration of the animation: * * Below is a breakdown of each step that occurs during move animation: * * | Animation Step | What the element class attribute looks like | * |----------------------------------------------------------------------------------------------|---------------------------------------------| * | 1. $animate.move(...) is called | class="my-animation" | * | 2. element is moved into the parentElement element or beside the afterElement element | class="my-animation" | * | 3. $animate runs any JavaScript-defined animations on the element | class="my-animation ng-animate" | * | 4. the .ng-move class is added to the element | class="my-animation ng-animate ng-move" | * | 5. $animate scans the element styles to get the CSS transition/animation duration and delay | class="my-animation ng-animate ng-move" | * | 6. $animate waits for 10ms (this performs a reflow) | class="my-animation ng-animate ng-move" | * | 7. the .ng-move-active and .ng-animate-active classes is added (this triggers the CSS transition/animation) | class="my-animation ng-animate ng-animate-active ng-move ng-move-active" | * | 8. $animate waits for X milliseconds for the animation to complete | class="my-animation ng-animate ng-animate-active ng-move ng-move-active" | * | 9. The animation ends and all generated CSS classes are removed from the element | class="my-animation" | * | 10. The doneCallback() callback is fired (if provided) | class="my-animation" | * * @param {DOMElement} element the element that will be the focus of the move animation * @param {DOMElement} parentElement the parentElement element of the element that will be the focus of the move animation * @param {DOMElement} afterElement the sibling element (which is the previous element) of the element that will be the focus of the move animation * @param {function()=} doneCallback the callback function that will be called once the animation is complete */ move : function(element, parentElement, afterElement, doneCallback) { cancelChildAnimations(element); this.enabled(false, element); $delegate.move(element, parentElement, afterElement); $rootScope.$$postDigest(function() { element = stripCommentsFromElement(element); performAnimation('move', 'ng-move', element, parentElement, afterElement, noop, doneCallback); }); }, /** * @ngdoc method * @name $animate#addClass * * @description * Triggers a custom animation event based off the className variable and then attaches the className value to the element as a CSS class. * Unlike the other animation methods, the animate service will suffix the className value with {@type -add} in order to provide * the animate service the setup and active CSS classes in order to trigger the animation (this will be skipped if no CSS transitions * or keyframes are defined on the -add or base CSS class). * * Below is a breakdown of each step that occurs during addClass animation: * * | Animation Step | What the element class attribute looks like | * |------------------------------------------------------------------------------------------------|---------------------------------------------| * | 1. $animate.addClass(element, 'super') is called | class="my-animation" | * | 2. $animate runs any JavaScript-defined animations on the element | class="my-animation ng-animate" | * | 3. the .super-add class are added to the element | class="my-animation ng-animate super-add" | * | 4. $animate scans the element styles to get the CSS transition/animation duration and delay | class="my-animation ng-animate super-add" | * | 5. $animate waits for 10ms (this performs a reflow) | class="my-animation ng-animate super-add" | * | 6. the .super, .super-add-active and .ng-animate-active classes are added (this triggers the CSS transition/animation) | class="my-animation ng-animate ng-animate-active super super-add super-add-active" | * | 7. $animate waits for X milliseconds for the animation to complete | class="my-animation super super-add super-add-active" | * | 8. The animation ends and all generated CSS classes are removed from the element | class="my-animation super" | * | 9. The super class is kept on the element | class="my-animation super" | * | 10. The doneCallback() callback is fired (if provided) | class="my-animation super" | * * @param {DOMElement} element the element that will be animated * @param {string} className the CSS class that will be added to the element and then animated * @param {function()=} doneCallback the callback function that will be called once the animation is complete */ addClass : function(element, className, doneCallback) { element = stripCommentsFromElement(element); performAnimation('addClass', className, element, null, null, function() { $delegate.addClass(element, className); }, doneCallback); }, /** * @ngdoc method * @name $animate#removeClass * * @description * Triggers a custom animation event based off the className variable and then removes the CSS class provided by the className value * from the element. Unlike the other animation methods, the animate service will suffix the className value with {@type -remove} in * order to provide the animate service the setup and active CSS classes in order to trigger the animation (this will be skipped if * no CSS transitions or keyframes are defined on the -remove or base CSS classes). * * Below is a breakdown of each step that occurs during removeClass animation: * * | Animation Step | What the element class attribute looks like | * |-----------------------------------------------------------------------------------------------|---------------------------------------------| * | 1. $animate.removeClass(element, 'super') is called | class="my-animation super" | * | 2. $animate runs any JavaScript-defined animations on the element | class="my-animation super ng-animate" | * | 3. the .super-remove class are added to the element | class="my-animation super ng-animate super-remove"| * | 4. $animate scans the element styles to get the CSS transition/animation duration and delay | class="my-animation super ng-animate super-remove" | * | 5. $animate waits for 10ms (this performs a reflow) | class="my-animation super ng-animate super-remove" | * | 6. the .super-remove-active and .ng-animate-active classes are added and .super is removed (this triggers the CSS transition/animation) | class="my-animation ng-animate ng-animate-active super-remove super-remove-active" | * | 7. $animate waits for X milliseconds for the animation to complete | class="my-animation ng-animate ng-animate-active super-remove super-remove-active" | * | 8. The animation ends and all generated CSS classes are removed from the element | class="my-animation" | * | 9. The doneCallback() callback is fired (if provided) | class="my-animation" | * * * @param {DOMElement} element the element that will be animated * @param {string} className the CSS class that will be animated and then removed from the element * @param {function()=} doneCallback the callback function that will be called once the animation is complete */ removeClass : function(element, className, doneCallback) { element = stripCommentsFromElement(element); performAnimation('removeClass', className, element, null, null, function() { $delegate.removeClass(element, className); }, doneCallback); }, /** * * @ngdoc function * @name $animate#setClass * @function * @description Adds and/or removes the given CSS classes to and from the element. * Once complete, the done() callback will be fired (if provided). * @param {DOMElement} element the element which will it's CSS classes changed * removed from it * @param {string} add the CSS classes which will be added to the element * @param {string} remove the CSS class which will be removed from the element * @param {Function=} done the callback function (if provided) that will be fired after the * CSS classes have been set on the element */ setClass : function(element, add, remove, doneCallback) { element = stripCommentsFromElement(element); performAnimation('setClass', [add, remove], element, null, null, function() { $delegate.setClass(element, add, remove); }, doneCallback); }, /** * @ngdoc method * @name $animate#enabled * @function * * @param {boolean=} value If provided then set the animation on or off. * @param {DOMElement=} element If provided then the element will be used to represent the enable/disable operation * @return {boolean} Current animation state. * * @description * Globally enables/disables animations. * */ enabled : function(value, element) { switch(arguments.length) { case 2: if(value) { cleanup(element); } else { var data = element.data(NG_ANIMATE_STATE) || {}; data.disabled = true; element.data(NG_ANIMATE_STATE, data); } break; case 1: rootAnimateState.disabled = !value; break; default: value = !rootAnimateState.disabled; break; } return !!value; } }; /* all animations call this shared animation triggering function internally. The animationEvent variable refers to the JavaScript animation event that will be triggered and the className value is the name of the animation that will be applied within the CSS code. Element, parentElement and afterElement are provided DOM elements for the animation and the onComplete callback will be fired once the animation is fully complete. */ function performAnimation(animationEvent, className, element, parentElement, afterElement, domOperation, doneCallback) { var runner = animationRunner(element, animationEvent, className); if(!runner) { fireDOMOperation(); fireBeforeCallbackAsync(); fireAfterCallbackAsync(); closeAnimation(); return; } className = runner.className; var elementEvents = angular.element._data(runner.node); elementEvents = elementEvents && elementEvents.events; if (!parentElement) { parentElement = afterElement ? afterElement.parent() : element.parent(); } var ngAnimateState = element.data(NG_ANIMATE_STATE) || {}; var runningAnimations = ngAnimateState.active || {}; var totalActiveAnimations = ngAnimateState.totalActive || 0; var lastAnimation = ngAnimateState.last; //only allow animations if the currently running animation is not structural //or if there is no animation running at all var skipAnimations = runner.isClassBased ? ngAnimateState.disabled || (lastAnimation && !lastAnimation.isClassBased) : false; //skip the animation if animations are disabled, a parent is already being animated, //the element is not currently attached to the document body or then completely close //the animation if any matching animations are not found at all. //NOTE: IE8 + IE9 should close properly (run closeAnimation()) in case an animation was found. if (skipAnimations || animationsDisabled(element, parentElement)) { fireDOMOperation(); fireBeforeCallbackAsync(); fireAfterCallbackAsync(); closeAnimation(); return; } var skipAnimation = false; if(totalActiveAnimations > 0) { var animationsToCancel = []; if(!runner.isClassBased) { if(animationEvent == 'leave' && runningAnimations['ng-leave']) { skipAnimation = true; } else { //cancel all animations when a structural animation takes place for(var klass in runningAnimations) { animationsToCancel.push(runningAnimations[klass]); cleanup(element, klass); } runningAnimations = {}; totalActiveAnimations = 0; } } else if(lastAnimation.event == 'setClass') { animationsToCancel.push(lastAnimation); cleanup(element, className); } else if(runningAnimations[className]) { var current = runningAnimations[className]; if(current.event == animationEvent) { skipAnimation = true; } else { animationsToCancel.push(current); cleanup(element, className); } } if(animationsToCancel.length > 0) { forEach(animationsToCancel, function(operation) { operation.cancel(); }); } } if(runner.isClassBased && !runner.isSetClassOperation && !skipAnimation) { skipAnimation = (animationEvent == 'addClass') == element.hasClass(className); //opposite of XOR } if(skipAnimation) { fireBeforeCallbackAsync(); fireAfterCallbackAsync(); fireDoneCallbackAsync(); return; } if(animationEvent == 'leave') { //there's no need to ever remove the listener since the element //will be removed (destroyed) after the leave animation ends or //is cancelled midway element.one('$destroy', function(e) { var element = angular.element(this); var state = element.data(NG_ANIMATE_STATE); if(state) { var activeLeaveAnimation = state.active['ng-leave']; if(activeLeaveAnimation) { activeLeaveAnimation.cancel(); cleanup(element, 'ng-leave'); } } }); } //the ng-animate class does nothing, but it's here to allow for //parent animations to find and cancel child animations when needed element.addClass(NG_ANIMATE_CLASS_NAME); var localAnimationCount = globalAnimationCounter++; totalActiveAnimations++; runningAnimations[className] = runner; element.data(NG_ANIMATE_STATE, { last : runner, active : runningAnimations, index : localAnimationCount, totalActive : totalActiveAnimations }); //first we run the before animations and when all of those are complete //then we perform the DOM operation and run the next set of animations fireBeforeCallbackAsync(); runner.before(function(cancelled) { var data = element.data(NG_ANIMATE_STATE); cancelled = cancelled || !data || !data.active[className] || (runner.isClassBased && data.active[className].event != animationEvent); fireDOMOperation(); if(cancelled === true) { closeAnimation(); } else { fireAfterCallbackAsync(); runner.after(closeAnimation); } }); function fireDOMCallback(animationPhase) { var eventName = '$animate:' + animationPhase; if(elementEvents && elementEvents[eventName] && elementEvents[eventName].length > 0) { $$asyncCallback(function() { element.triggerHandler(eventName, { event : animationEvent, className : className }); }); } } function fireBeforeCallbackAsync() { fireDOMCallback('before'); } function fireAfterCallbackAsync() { fireDOMCallback('after'); } function fireDoneCallbackAsync() { fireDOMCallback('close'); if(doneCallback) { $$asyncCallback(function() { doneCallback(); }); } } //it is less complicated to use a flag than managing and canceling //timeouts containing multiple callbacks. function fireDOMOperation() { if(!fireDOMOperation.hasBeenRun) { fireDOMOperation.hasBeenRun = true; domOperation(); } } function closeAnimation() { if(!closeAnimation.hasBeenRun) { closeAnimation.hasBeenRun = true; var data = element.data(NG_ANIMATE_STATE); if(data) { /* only structural animations wait for reflow before removing an animation, but class-based animations don't. An example of this failing would be when a parent HTML tag has a ng-class attribute causing ALL directives below to skip animations during the digest */ if(runner && runner.isClassBased) { cleanup(element, className); } else { $$asyncCallback(function() { var data = element.data(NG_ANIMATE_STATE) || {}; if(localAnimationCount == data.index) { cleanup(element, className, animationEvent); } }); element.data(NG_ANIMATE_STATE, data); } } fireDoneCallbackAsync(); } } } function cancelChildAnimations(element) { var node = extractElementNode(element); if (node) { var nodes = angular.isFunction(node.getElementsByClassName) ? node.getElementsByClassName(NG_ANIMATE_CLASS_NAME) : node.querySelectorAll('.' + NG_ANIMATE_CLASS_NAME); forEach(nodes, function(element) { element = angular.element(element); var data = element.data(NG_ANIMATE_STATE); if(data && data.active) { forEach(data.active, function(runner) { runner.cancel(); }); } }); } } function cleanup(element, className) { if(isMatchingElement(element, $rootElement)) { if(!rootAnimateState.disabled) { rootAnimateState.running = false; rootAnimateState.structural = false; } } else if(className) { var data = element.data(NG_ANIMATE_STATE) || {}; var removeAnimations = className === true; if(!removeAnimations && data.active && data.active[className]) { data.totalActive--; delete data.active[className]; } if(removeAnimations || !data.totalActive) { element.removeClass(NG_ANIMATE_CLASS_NAME); element.removeData(NG_ANIMATE_STATE); } } } function animationsDisabled(element, parentElement) { if (rootAnimateState.disabled) return true; if(isMatchingElement(element, $rootElement)) { return rootAnimateState.disabled || rootAnimateState.running; } do { //the element did not reach the root element which means that it //is not apart of the DOM. Therefore there is no reason to do //any animations on it if(parentElement.length === 0) break; var isRoot = isMatchingElement(parentElement, $rootElement); var state = isRoot ? rootAnimateState : parentElement.data(NG_ANIMATE_STATE); var result = state && (!!state.disabled || state.running || state.totalActive > 0); if(isRoot || result) { return result; } if(isRoot) return true; } while(parentElement = parentElement.parent()); return true; } }]); $animateProvider.register('', ['$window', '$sniffer', '$timeout', '$$animateReflow', function($window, $sniffer, $timeout, $$animateReflow) { // Detect proper transitionend/animationend event names. var CSS_PREFIX = '', TRANSITION_PROP, TRANSITIONEND_EVENT, ANIMATION_PROP, ANIMATIONEND_EVENT; // If unprefixed events are not supported but webkit-prefixed are, use the latter. // Otherwise, just use W3C names, browsers not supporting them at all will just ignore them. // Note: Chrome implements `window.onwebkitanimationend` and doesn't implement `window.onanimationend` // but at the same time dispatches the `animationend` event and not `webkitAnimationEnd`. // Register both events in case `window.onanimationend` is not supported because of that, // do the same for `transitionend` as Safari is likely to exhibit similar behavior. // Also, the only modern browser that uses vendor prefixes for transitions/keyframes is webkit // therefore there is no reason to test anymore for other vendor prefixes: http://caniuse.com/#search=transition if (window.ontransitionend === undefined && window.onwebkittransitionend !== undefined) { CSS_PREFIX = '-webkit-'; TRANSITION_PROP = 'WebkitTransition'; TRANSITIONEND_EVENT = 'webkitTransitionEnd transitionend'; } else { TRANSITION_PROP = 'transition'; TRANSITIONEND_EVENT = 'transitionend'; } if (window.onanimationend === undefined && window.onwebkitanimationend !== undefined) { CSS_PREFIX = '-webkit-'; ANIMATION_PROP = 'WebkitAnimation'; ANIMATIONEND_EVENT = 'webkitAnimationEnd animationend'; } else { ANIMATION_PROP = 'animation'; ANIMATIONEND_EVENT = 'animationend'; } var DURATION_KEY = 'Duration'; var PROPERTY_KEY = 'Property'; var DELAY_KEY = 'Delay'; var ANIMATION_ITERATION_COUNT_KEY = 'IterationCount'; var NG_ANIMATE_PARENT_KEY = '$$ngAnimateKey'; var NG_ANIMATE_CSS_DATA_KEY = '$$ngAnimateCSS3Data'; var NG_ANIMATE_BLOCK_CLASS_NAME = 'ng-animate-block-transitions'; var ELAPSED_TIME_MAX_DECIMAL_PLACES = 3; var CLOSING_TIME_BUFFER = 1.5; var ONE_SECOND = 1000; var lookupCache = {}; var parentCounter = 0; var animationReflowQueue = []; var cancelAnimationReflow; function afterReflow(element, callback) { if(cancelAnimationReflow) { cancelAnimationReflow(); } animationReflowQueue.push(callback); cancelAnimationReflow = $$animateReflow(function() { forEach(animationReflowQueue, function(fn) { fn(); }); animationReflowQueue = []; cancelAnimationReflow = null; lookupCache = {}; }); } var closingTimer = null; var closingTimestamp = 0; var animationElementQueue = []; function animationCloseHandler(element, totalTime) { var node = extractElementNode(element); element = angular.element(node); //this item will be garbage collected by the closing //animation timeout animationElementQueue.push(element); //but it may not need to cancel out the existing timeout //if the timestamp is less than the previous one var futureTimestamp = Date.now() + (totalTime * 1000); if(futureTimestamp <= closingTimestamp) { return; } $timeout.cancel(closingTimer); closingTimestamp = futureTimestamp; closingTimer = $timeout(function() { closeAllAnimations(animationElementQueue); animationElementQueue = []; }, totalTime, false); } function closeAllAnimations(elements) { forEach(elements, function(element) { var elementData = element.data(NG_ANIMATE_CSS_DATA_KEY); if(elementData) { (elementData.closeAnimationFn || noop)(); } }); } function getElementAnimationDetails(element, cacheKey) { var data = cacheKey ? lookupCache[cacheKey] : null; if(!data) { var transitionDuration = 0; var transitionDelay = 0; var animationDuration = 0; var animationDelay = 0; var transitionDelayStyle; var animationDelayStyle; var transitionDurationStyle; var transitionPropertyStyle; //we want all the styles defined before and after forEach(element, function(element) { if (element.nodeType == ELEMENT_NODE) { var elementStyles = $window.getComputedStyle(element) || {}; transitionDurationStyle = elementStyles[TRANSITION_PROP + DURATION_KEY]; transitionDuration = Math.max(parseMaxTime(transitionDurationStyle), transitionDuration); transitionPropertyStyle = elementStyles[TRANSITION_PROP + PROPERTY_KEY]; transitionDelayStyle = elementStyles[TRANSITION_PROP + DELAY_KEY]; transitionDelay = Math.max(parseMaxTime(transitionDelayStyle), transitionDelay); animationDelayStyle = elementStyles[ANIMATION_PROP + DELAY_KEY]; animationDelay = Math.max(parseMaxTime(animationDelayStyle), animationDelay); var aDuration = parseMaxTime(elementStyles[ANIMATION_PROP + DURATION_KEY]); if(aDuration > 0) { aDuration *= parseInt(elementStyles[ANIMATION_PROP + ANIMATION_ITERATION_COUNT_KEY], 10) || 1; } animationDuration = Math.max(aDuration, animationDuration); } }); data = { total : 0, transitionPropertyStyle: transitionPropertyStyle, transitionDurationStyle: transitionDurationStyle, transitionDelayStyle: transitionDelayStyle, transitionDelay: transitionDelay, transitionDuration: transitionDuration, animationDelayStyle: animationDelayStyle, animationDelay: animationDelay, animationDuration: animationDuration }; if(cacheKey) { lookupCache[cacheKey] = data; } } return data; } function parseMaxTime(str) { var maxValue = 0; var values = angular.isString(str) ? str.split(/\s*,\s*/) : []; forEach(values, function(value) { maxValue = Math.max(parseFloat(value) || 0, maxValue); }); return maxValue; } function getCacheKey(element) { var parentElement = element.parent(); var parentID = parentElement.data(NG_ANIMATE_PARENT_KEY); if(!parentID) { parentElement.data(NG_ANIMATE_PARENT_KEY, ++parentCounter); parentID = parentCounter; } return parentID + '-' + extractElementNode(element).className; } function animateSetup(animationEvent, element, className, calculationDecorator) { var cacheKey = getCacheKey(element); var eventCacheKey = cacheKey + ' ' + className; var itemIndex = lookupCache[eventCacheKey] ? ++lookupCache[eventCacheKey].total : 0; var stagger = {}; if(itemIndex > 0) { var staggerClassName = className + '-stagger'; var staggerCacheKey = cacheKey + ' ' + staggerClassName; var applyClasses = !lookupCache[staggerCacheKey]; applyClasses && element.addClass(staggerClassName); stagger = getElementAnimationDetails(element, staggerCacheKey); applyClasses && element.removeClass(staggerClassName); } /* the animation itself may need to add/remove special CSS classes * before calculating the anmation styles */ calculationDecorator = calculationDecorator || function(fn) { return fn(); }; element.addClass(className); var formerData = element.data(NG_ANIMATE_CSS_DATA_KEY) || {}; var timings = calculationDecorator(function() { return getElementAnimationDetails(element, eventCacheKey); }); var transitionDuration = timings.transitionDuration; var animationDuration = timings.animationDuration; if(transitionDuration === 0 && animationDuration === 0) { element.removeClass(className); return false; } element.data(NG_ANIMATE_CSS_DATA_KEY, { running : formerData.running || 0, itemIndex : itemIndex, stagger : stagger, timings : timings, closeAnimationFn : noop }); //temporarily disable the transition so that the enter styles //don't animate twice (this is here to avoid a bug in Chrome/FF). var isCurrentlyAnimating = formerData.running > 0 || animationEvent == 'setClass'; if(transitionDuration > 0) { blockTransitions(element, className, isCurrentlyAnimating); } //staggering keyframe animations work by adjusting the `animation-delay` CSS property //on the given element, however, the delay value can only calculated after the reflow //since by that time $animate knows how many elements are being animated. Therefore, //until the reflow occurs the element needs to be blocked (where the keyframe animation //is set to `none 0s`). This blocking mechanism should only be set for when a stagger //animation is detected and when the element item index is greater than 0. if(animationDuration > 0 && stagger.animationDelay > 0 && stagger.animationDuration === 0) { blockKeyframeAnimations(element); } return true; } function isStructuralAnimation(className) { return className == 'ng-enter' || className == 'ng-move' || className == 'ng-leave'; } function blockTransitions(element, className, isAnimating) { if(isStructuralAnimation(className) || !isAnimating) { extractElementNode(element).style[TRANSITION_PROP + PROPERTY_KEY] = 'none'; } else { element.addClass(NG_ANIMATE_BLOCK_CLASS_NAME); } } function blockKeyframeAnimations(element) { extractElementNode(element).style[ANIMATION_PROP] = 'none 0s'; } function unblockTransitions(element, className) { var prop = TRANSITION_PROP + PROPERTY_KEY; var node = extractElementNode(element); if(node.style[prop] && node.style[prop].length > 0) { node.style[prop] = ''; } element.removeClass(NG_ANIMATE_BLOCK_CLASS_NAME); } function unblockKeyframeAnimations(element) { var prop = ANIMATION_PROP; var node = extractElementNode(element); if(node.style[prop] && node.style[prop].length > 0) { node.style[prop] = ''; } } function animateRun(animationEvent, element, className, activeAnimationComplete) { var node = extractElementNode(element); var elementData = element.data(NG_ANIMATE_CSS_DATA_KEY); if(node.className.indexOf(className) == -1 || !elementData) { activeAnimationComplete(); return; } var activeClassName = ''; forEach(className.split(' '), function(klass, i) { activeClassName += (i > 0 ? ' ' : '') + klass + '-active'; }); var stagger = elementData.stagger; var timings = elementData.timings; var itemIndex = elementData.itemIndex; var maxDuration = Math.max(timings.transitionDuration, timings.animationDuration); var maxDelay = Math.max(timings.transitionDelay, timings.animationDelay); var maxDelayTime = maxDelay * ONE_SECOND; var startTime = Date.now(); var css3AnimationEvents = ANIMATIONEND_EVENT + ' ' + TRANSITIONEND_EVENT; var style = '', appliedStyles = []; if(timings.transitionDuration > 0) { var propertyStyle = timings.transitionPropertyStyle; if(propertyStyle.indexOf('all') == -1) { style += CSS_PREFIX + 'transition-property: ' + propertyStyle + ';'; style += CSS_PREFIX + 'transition-duration: ' + timings.transitionDurationStyle + ';'; appliedStyles.push(CSS_PREFIX + 'transition-property'); appliedStyles.push(CSS_PREFIX + 'transition-duration'); } } if(itemIndex > 0) { if(stagger.transitionDelay > 0 && stagger.transitionDuration === 0) { var delayStyle = timings.transitionDelayStyle; style += CSS_PREFIX + 'transition-delay: ' + prepareStaggerDelay(delayStyle, stagger.transitionDelay, itemIndex) + '; '; appliedStyles.push(CSS_PREFIX + 'transition-delay'); } if(stagger.animationDelay > 0 && stagger.animationDuration === 0) { style += CSS_PREFIX + 'animation-delay: ' + prepareStaggerDelay(timings.animationDelayStyle, stagger.animationDelay, itemIndex) + '; '; appliedStyles.push(CSS_PREFIX + 'animation-delay'); } } if(appliedStyles.length > 0) { //the element being animated may sometimes contain comment nodes in //the jqLite object, so we're safe to use a single variable to house //the styles since there is always only one element being animated var oldStyle = node.getAttribute('style') || ''; node.setAttribute('style', oldStyle + ' ' + style); } element.on(css3AnimationEvents, onAnimationProgress); element.addClass(activeClassName); elementData.closeAnimationFn = function() { onEnd(); activeAnimationComplete(); }; var staggerTime = itemIndex * (Math.max(stagger.animationDelay, stagger.transitionDelay) || 0); var animationTime = (maxDelay + maxDuration) * CLOSING_TIME_BUFFER; var totalTime = (staggerTime + animationTime) * ONE_SECOND; elementData.running++; animationCloseHandler(element, totalTime); return onEnd; // This will automatically be called by $animate so // there is no need to attach this internally to the // timeout done method. function onEnd(cancelled) { element.off(css3AnimationEvents, onAnimationProgress); element.removeClass(activeClassName); animateClose(element, className); var node = extractElementNode(element); for (var i in appliedStyles) { node.style.removeProperty(appliedStyles[i]); } } function onAnimationProgress(event) { event.stopPropagation(); var ev = event.originalEvent || event; var timeStamp = ev.$manualTimeStamp || ev.timeStamp || Date.now(); /* Firefox (or possibly just Gecko) likes to not round values up * when a ms measurement is used for the animation */ var elapsedTime = parseFloat(ev.elapsedTime.toFixed(ELAPSED_TIME_MAX_DECIMAL_PLACES)); /* $manualTimeStamp is a mocked timeStamp value which is set * within browserTrigger(). This is only here so that tests can * mock animations properly. Real events fallback to event.timeStamp, * or, if they don't, then a timeStamp is automatically created for them. * We're checking to see if the timeStamp surpasses the expected delay, * but we're using elapsedTime instead of the timeStamp on the 2nd * pre-condition since animations sometimes close off early */ if(Math.max(timeStamp - startTime, 0) >= maxDelayTime && elapsedTime >= maxDuration) { activeAnimationComplete(); } } } function prepareStaggerDelay(delayStyle, staggerDelay, index) { var style = ''; forEach(delayStyle.split(','), function(val, i) { style += (i > 0 ? ',' : '') + (index * staggerDelay + parseInt(val, 10)) + 's'; }); return style; } function animateBefore(animationEvent, element, className, calculationDecorator) { if(animateSetup(animationEvent, element, className, calculationDecorator)) { return function(cancelled) { cancelled && animateClose(element, className); }; } } function animateAfter(animationEvent, element, className, afterAnimationComplete) { if(element.data(NG_ANIMATE_CSS_DATA_KEY)) { return animateRun(animationEvent, element, className, afterAnimationComplete); } else { animateClose(element, className); afterAnimationComplete(); } } function animate(animationEvent, element, className, animationComplete) { //If the animateSetup function doesn't bother returning a //cancellation function then it means that there is no animation //to perform at all var preReflowCancellation = animateBefore(animationEvent, element, className); if(!preReflowCancellation) { animationComplete(); return; } //There are two cancellation functions: one is before the first //reflow animation and the second is during the active state //animation. The first function will take care of removing the //data from the element which will not make the 2nd animation //happen in the first place var cancel = preReflowCancellation; afterReflow(element, function() { unblockTransitions(element, className); unblockKeyframeAnimations(element); //once the reflow is complete then we point cancel to //the new cancellation function which will remove all of the //animation properties from the active animation cancel = animateAfter(animationEvent, element, className, animationComplete); }); return function(cancelled) { (cancel || noop)(cancelled); }; } function animateClose(element, className) { element.removeClass(className); var data = element.data(NG_ANIMATE_CSS_DATA_KEY); if(data) { if(data.running) { data.running--; } if(!data.running || data.running === 0) { element.removeData(NG_ANIMATE_CSS_DATA_KEY); } } } return { enter : function(element, animationCompleted) { return animate('enter', element, 'ng-enter', animationCompleted); }, leave : function(element, animationCompleted) { return animate('leave', element, 'ng-leave', animationCompleted); }, move : function(element, animationCompleted) { return animate('move', element, 'ng-move', animationCompleted); }, beforeSetClass : function(element, add, remove, animationCompleted) { var className = suffixClasses(remove, '-remove') + ' ' + suffixClasses(add, '-add'); var cancellationMethod = animateBefore('setClass', element, className, function(fn) { /* when classes are removed from an element then the transition style * that is applied is the transition defined on the element without the * CSS class being there. This is how CSS3 functions outside of ngAnimate. * http://plnkr.co/edit/j8OzgTNxHTb4n3zLyjGW?p=preview */ var klass = element.attr('class'); element.removeClass(remove); element.addClass(add); var timings = fn(); element.attr('class', klass); return timings; }); if(cancellationMethod) { afterReflow(element, function() { unblockTransitions(element, className); unblockKeyframeAnimations(element); animationCompleted(); }); return cancellationMethod; } animationCompleted(); }, beforeAddClass : function(element, className, animationCompleted) { var cancellationMethod = animateBefore('addClass', element, suffixClasses(className, '-add'), function(fn) { /* when a CSS class is added to an element then the transition style that * is applied is the transition defined on the element when the CSS class * is added at the time of the animation. This is how CSS3 functions * outside of ngAnimate. */ element.addClass(className); var timings = fn(); element.removeClass(className); return timings; }); if(cancellationMethod) { afterReflow(element, function() { unblockTransitions(element, className); unblockKeyframeAnimations(element); animationCompleted(); }); return cancellationMethod; } animationCompleted(); }, setClass : function(element, add, remove, animationCompleted) { remove = suffixClasses(remove, '-remove'); add = suffixClasses(add, '-add'); var className = remove + ' ' + add; return animateAfter('setClass', element, className, animationCompleted); }, addClass : function(element, className, animationCompleted) { return animateAfter('addClass', element, suffixClasses(className, '-add'), animationCompleted); }, beforeRemoveClass : function(element, className, animationCompleted) { var cancellationMethod = animateBefore('removeClass', element, suffixClasses(className, '-remove'), function(fn) { /* when classes are removed from an element then the transition style * that is applied is the transition defined on the element without the * CSS class being there. This is how CSS3 functions outside of ngAnimate. * http://plnkr.co/edit/j8OzgTNxHTb4n3zLyjGW?p=preview */ var klass = element.attr('class'); element.removeClass(className); var timings = fn(); element.attr('class', klass); return timings; }); if(cancellationMethod) { afterReflow(element, function() { unblockTransitions(element, className); unblockKeyframeAnimations(element); animationCompleted(); }); return cancellationMethod; } animationCompleted(); }, removeClass : function(element, className, animationCompleted) { return animateAfter('removeClass', element, suffixClasses(className, '-remove'), animationCompleted); } }; function suffixClasses(classes, suffix) { var className = ''; classes = angular.isArray(classes) ? classes : classes.split(/\s+/); forEach(classes, function(klass, i) { if(klass && klass.length > 0) { className += (i > 0 ? ' ' : '') + klass + suffix; } }); return className; } }]); }]); })(window, window.angular);<|fim▁end|>
* } * &#64-webkit-keyframes enter_sequence {
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|>
""" pyvalence """ __version__ = '0.0.1.3'
<|file_name|>jinja_helper.py<|end_file_name|><|fim▁begin|># Copyright (c) 2016 Dustin Doloff # Licensed under Apache License v2.0 import jinja2 import os MESSAGE_FILL = '`' AUTO_GEN_MESSAGE = """ `````````````````````````````````````````````````````` `````````````````````````````````````````````````````` ````````______________________________________ `````` ```````/ /\ ````` ``````/ /..\ ```` `````/ AUTO-GENERATED FILE. DO NOT EDIT /....\ ``` ````/ /______\ `` ```/_____________________________________/```````````` `````````````````````````````````````````````````````` `````````````````````````````````````````````````````` """ def reverse(v): """ Reverses any iterable value """ return v[::-1] def auto_gen_message(open, fill, close): """ Produces the auto-generated warning header with language-spcific syntax open - str - The language-specific opening of the comment fill - str - The values to fill the background with close - str - The language-specific closing of the comment """ assert open or fill or close message = AUTO_GEN_MESSAGE.strip() if open: message = message.replace(MESSAGE_FILL * len(open), open, 1) if close: message = reverse(reverse(message).replace(MESSAGE_FILL * len(close), close[::-1], 1)) if fill: message = message.replace(MESSAGE_FILL * len(fill), fill) return message def generate(template, config, out_file, pretty=False): path, ext = os.path.splitext(out_file.name) ext = ext[1:] if pretty: if ext == 'py': out_file.write(auto_gen_message('#', '#', '')) elif ext == 'html':<|fim▁hole|> template_path, template_filename = os.path.split(template) env = jinja2.Environment(loader = jinja2.FileSystemLoader([template_path])) template = env.get_template(template_filename) template.stream(config).dump(out_file) # There needs to be an extra line at the end to make it a valid text file. Jinja strips trailing # whitespace if pretty: out_file.write(os.linesep)<|fim▁end|>
out_file.write(auto_gen_message('<!--', '-', '-->'))
<|file_name|>test_Sphere.py<|end_file_name|><|fim▁begin|>import batoid import numpy as np from test_helpers import timer, do_pickle, all_obj_diff, init_gpu, rays_allclose @timer def test_properties(): rng = np.random.default_rng(5) for i in range(100): R = rng.normal(0.0, 0.3) # negative allowed sphere = batoid.Sphere(R) assert sphere.R == R do_pickle(sphere) @timer def test_sag(): rng = np.random.default_rng(57) for i in range(100): R = 1./rng.normal(0.0, 0.3) sphere = batoid.Sphere(R) for j in range(10): x = rng.uniform(-0.7*abs(R), 0.7*abs(R)) y = rng.uniform(-0.7*abs(R), 0.7*abs(R)) result = sphere.sag(x, y) np.testing.assert_allclose( result, R*(1-np.sqrt(1.0-(x*x + y*y)/R/R)) ) # Check that it returned a scalar float and not an array assert isinstance(result, float) # Check 0,0 np.testing.assert_allclose(sphere.sag(0, 0), 0.0, rtol=0, atol=1e-17) # Check vectorization x = rng.uniform(-0.7*abs(R), 0.7*abs(R), size=(10, 10)) y = rng.uniform(-0.7*abs(R), 0.7*abs(R), size=(10, 10)) np.testing.assert_allclose( sphere.sag(x, y), R*(1-np.sqrt(1.0-(x*x + y*y)/R/R)) ) # Make sure non-unit stride arrays also work np.testing.assert_allclose( sphere.sag(x[::5,::2], y[::5,::2]), R*(1-np.sqrt(1.0-(x*x + y*y)/R/R))[::5,::2] ) do_pickle(sphere) @timer def test_normal(): rng = np.random.default_rng(577) for i in range(100): R = 1./rng.normal(0.0, 0.3) sphere = batoid.Sphere(R) for j in range(10): x = rng.uniform(-0.7*abs(R), 0.7*abs(R)) y = rng.uniform(-0.7*abs(R), 0.7*abs(R)) result = sphere.normal(x, y) r = np.hypot(x, y) rat = r/R dzdr = rat/np.sqrt(1-rat*rat) nz = 1/np.sqrt(1+dzdr*dzdr) normal = np.array([-x/r*dzdr*nz, -y/r*dzdr*nz, nz]) np.testing.assert_allclose(result, normal) # Check 0,0 np.testing.assert_equal(sphere.normal(0, 0), np.array([0, 0, 1])) # Check vectorization x = rng.uniform(-0.7*abs(R), 0.7*abs(R), size=(10, 10)) y = rng.uniform(-0.7*abs(R), 0.7*abs(R), size=(10, 10)) r = np.hypot(x, y) rat = r/R dzdr = rat/np.sqrt(1-rat*rat) nz = 1/np.sqrt(1+dzdr*dzdr) normal = np.dstack([-x/r*dzdr*nz, -y/r*dzdr*nz, nz]) np.testing.assert_allclose( sphere.normal(x, y), normal ) # Make sure non-unit stride arrays also work np.testing.assert_allclose( sphere.normal(x[::5,::2], y[::5,::2]), normal[::5, ::2] ) @timer def test_intersect(): rng = np.random.default_rng(5772) size = 10_000 for i in range(100): R = 1./rng.normal(0.0, 0.3) sphereCoordSys = batoid.CoordSys(origin=[0, 0, -1]) sphere = batoid.Sphere(R) x = rng.uniform(-0.3*abs(R), 0.3*abs(R), size=size) y = rng.uniform(-0.3*abs(R), 0.3*abs(R), size=size) z = np.full_like(x, -2*abs(R)) # If we shoot rays straight up, then it's easy to predict the intersection vx = np.zeros_like(x) vy = np.zeros_like(x) vz = np.ones_like(x) rv = batoid.RayVector(x, y, z, vx, vy, vz) np.testing.assert_allclose(rv.z, -2*abs(R)) rv2 = batoid.intersect(sphere, rv.copy(), sphereCoordSys) assert rv2.coordSys == sphereCoordSys rv2 = rv2.toCoordSys(batoid.CoordSys()) np.testing.assert_allclose(rv2.x, x) np.testing.assert_allclose(rv2.y, y) np.testing.assert_allclose(rv2.z, sphere.sag(x, y)-1, rtol=0, atol=1e-9) # Check default intersect coordTransform rv2 = rv.copy().toCoordSys(sphereCoordSys) batoid.intersect(sphere, rv2) assert rv2.coordSys == sphereCoordSys rv2 = rv2.toCoordSys(batoid.CoordSys()) np.testing.assert_allclose(rv2.x, x) np.testing.assert_allclose(rv2.y, y) np.testing.assert_allclose(rv2.z, sphere.sag(x, y)-1, rtol=0, atol=1e-9) @timer def test_reflect(): rng = np.random.default_rng(57721) size = 10_000<|fim▁hole|> x = rng.uniform(-0.3*abs(R), 0.3*abs(R), size=size) y = rng.uniform(-0.3*abs(R), 0.3*abs(R), size=size) z = np.full_like(x, -2*abs(R)) vx = rng.uniform(-1e-5, 1e-5, size=size) vy = rng.uniform(-1e-5, 1e-5, size=size) vz = np.full_like(x, 1) rv = batoid.RayVector(x, y, z, vx, vy, vz) rvr = batoid.reflect(sphere, rv.copy()) rvr2 = sphere.reflect(rv.copy()) rays_allclose(rvr, rvr2) # print(f"{np.sum(rvr.failed)/len(rvr)*100:.2f}% failed") normal = sphere.normal(rvr.x, rvr.y) # Test law of reflection a0 = np.einsum("ad,ad->a", normal, rv.v)[~rvr.failed] a1 = np.einsum("ad,ad->a", normal, -rvr.v)[~rvr.failed] np.testing.assert_allclose( a0, a1, rtol=0, atol=1e-12 ) # Test that rv.v, rvr.v and normal are all in the same plane np.testing.assert_allclose( np.einsum( "ad,ad->a", np.cross(normal, rv.v), rv.v )[~rvr.failed], 0.0, rtol=0, atol=1e-12 ) @timer def test_refract(): rng = np.random.default_rng(577215) size = 10_000 for i in range(100): R = 1./rng.normal(0.0, 0.3) sphere = batoid.Sphere(R) m0 = batoid.ConstMedium(rng.normal(1.2, 0.01)) m1 = batoid.ConstMedium(rng.normal(1.3, 0.01)) x = rng.uniform(-0.3*abs(R), 0.3*abs(R), size=size) y = rng.uniform(-0.3*abs(R), 0.3*abs(R), size=size) z = np.full_like(x, -2*abs(R)) vx = rng.uniform(-1e-5, 1e-5, size=size) vy = rng.uniform(-1e-5, 1e-5, size=size) vz = np.sqrt(1-vx*vx-vy*vy)/m0.n rv = batoid.RayVector(x, y, z, vx, vy, vz) rvr = batoid.refract(sphere, rv.copy(), m0, m1) rvr2 = sphere.refract(rv.copy(), m0, m1) rays_allclose(rvr, rvr2) # print(f"{np.sum(rvr.failed)/len(rvr)*100:.2f}% failed") normal = sphere.normal(rvr.x, rvr.y) # Test Snell's law s0 = np.sum(np.cross(normal, rv.v*m0.n)[~rvr.failed], axis=-1) s1 = np.sum(np.cross(normal, rvr.v*m1.n)[~rvr.failed], axis=-1) np.testing.assert_allclose( m0.n*s0, m1.n*s1, rtol=0, atol=1e-9 ) # Test that rv.v, rvr.v and normal are all in the same plane np.testing.assert_allclose( np.einsum( "ad,ad->a", np.cross(normal, rv.v), rv.v )[~rvr.failed], 0.0, rtol=0, atol=1e-12 ) @timer def test_ne(): objs = [ batoid.Sphere(1.0), batoid.Sphere(2.0), batoid.Plane() ] all_obj_diff(objs) @timer def test_fail(): sphere = batoid.Sphere(1.0) rv = batoid.RayVector(0, 10, 0, 0, 0, -1) # Too far to side rv2 = batoid.intersect(sphere, rv.copy()) np.testing.assert_equal(rv2.failed, np.array([True])) # This one passes rv = batoid.RayVector(0, 0, 0, 0, 0, -1) rv2 = batoid.intersect(sphere, rv.copy()) np.testing.assert_equal(rv2.failed, np.array([False])) if __name__ == '__main__': test_properties() test_sag() test_normal() test_intersect() test_reflect() test_refract() test_ne() test_fail()<|fim▁end|>
for i in range(100): R = 1./rng.normal(0.0, 0.3) sphere = batoid.Sphere(R)
<|file_name|>browserclient.ts<|end_file_name|><|fim▁begin|>/** * MHub client library using native browser WebSocket. */ import * as events from "events"; import { BaseClient, BaseClientOptions, Connection } from "./baseclient"; import * as protocol from "./protocol"; const DEFAULT_PORT_WS = 13900; const DEFAULT_PORT_WSS = 13901; /** * Options to be passed to MClient constructor. */ // tslint:disable-next-line:no-empty-interface export interface MClientOptions extends BaseClientOptions {} export const defaultClientOptions: MClientOptions = {}; <|fim▁hole|> constructor(url: string) { super(); this._socket = new WebSocket(url); this._socket.addEventListener("error", (e: any) => this.emit("error", e) ); this._socket.addEventListener("open", () => { this.emit("open"); }); this._socket.addEventListener("close", () => { this.emit("close"); }); this._socket.addEventListener("message", (event: MessageEvent) => { if (!event.data) { // Ignore empty 'lines' return; } const response = JSON.parse(event.data); this.emit("message", response); }); } /** * Transmit data object. * @return Promise that resolves when transmit is accepted (i.e. not necessarily * arrived at other side, can be e.g. queued). */ public send(data: protocol.Command): Promise<void> { return new Promise<void>((resolve) => { this._socket.send(JSON.stringify(data)); resolve(undefined); }); } /** * Gracefully close connection, i.e. allow pending transmissions * to be completed. * @return Promise that resolves when connection is succesfully closed. */ public close(code?: number): Promise<void> { let result: Promise<void>; if (!this._connected) { result = Promise.resolve(); } else { result = new Promise<void>((resolve) => { this._socket.addEventListener("close", () => resolve(undefined) ); }); } this._socket.close(code); return result; } /** * Forcefully close connection. * @return Promise that resolves when connection is succesfully closed. */ public terminate(): Promise<void> { return this.close(CLOSE_GOING_AWAY); } private get _connected(): boolean { return ( this._socket.readyState === WebSocket.CONNECTING || this._socket.readyState === WebSocket.OPEN ); } } /** * MHub client using server-side WebSocket. * * Allows subscribing and publishing to MHub server nodes. * * @event open() Emitted when connection was established. * @event close() Emitted when connection was closed. * @event error(e: Error) Emitted when there was a connection, server or protocol error. * @event message(m: Message) Emitted when message was received (due to subscription). */ export class MClient extends BaseClient { private _url: string; /** * Create new connection to MServer. * @param url Websocket URL of MServer, e.g. ws://localhost:13900 * @param options Optional options, see `MClientOptions`. */ constructor(url: string, options?: MClientOptions) { // Ensure options is an object and fill in defaults options = { ...defaultClientOptions, ...options }; // Prefix URL with "ws://" if needed if (url.indexOf("://") < 0) { url = "ws://" + url; } // Append default port if necessary if (!url.match(":\\d+$")) { const useTls = url.indexOf("wss://") === 0; url = url + ":" + (useTls ? DEFAULT_PORT_WSS : DEFAULT_PORT_WS); } super(() => new WebSocketConnection(url), options); this._url = url; } /** * Full URL of MHub connection. */ public get url(): string { return this._url; } } export default MClient;<|fim▁end|>
const CLOSE_GOING_AWAY = 1001; // https://developer.mozilla.org/en-US/docs/Web/API/CloseEvent#Status_codes class WebSocketConnection extends events.EventEmitter implements Connection { private _socket: WebSocket;
<|file_name|>layout_debug.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! Supports writing a trace file created during each layout scope //! that can be viewed by an external tool to make layout debugging easier. // for thread_local #![allow(unsafe_code)] use flow; use flow_ref::FlowRef; use rustc_serialize::json; use std::borrow::ToOwned; use std::cell::RefCell; use std::fs::File; use std::io::Write; #[cfg(debug_assertions)] use std::sync::atomic::{ATOMIC_USIZE_INIT, AtomicUsize, Ordering}; thread_local!(static STATE_KEY: RefCell<Option<State>> = RefCell::new(None)); #[cfg(debug_assertions)] static DEBUG_ID_COUNTER: AtomicUsize = ATOMIC_USIZE_INIT; pub struct Scope; #[macro_export] macro_rules! layout_debug_scope( ($($arg:tt)*) => ( if cfg!(debug_assertions) { layout_debug::Scope::new(format!($($arg)*))<|fim▁hole|> } else { layout_debug::Scope } ) ); #[derive(RustcEncodable)] struct ScopeData { name: String, pre: String, post: String, children: Vec<Box<ScopeData>>, } impl ScopeData { fn new(name: String, pre: String) -> ScopeData { ScopeData { name: name, pre: pre, post: String::new(), children: vec!(), } } } struct State { flow_root: FlowRef, scope_stack: Vec<Box<ScopeData>>, } /// A layout debugging scope. The entire state of the flow tree /// will be output at the beginning and end of this scope. impl Scope { pub fn new(name: String) -> Scope { STATE_KEY.with(|ref r| { match *r.borrow_mut() { Some(ref mut state) => { let flow_trace = json::encode(&flow::base(&*state.flow_root)).unwrap(); let data = box ScopeData::new(name.clone(), flow_trace); state.scope_stack.push(data); } None => {} } }); Scope } } #[cfg(debug_assertions)] impl Drop for Scope { fn drop(&mut self) { STATE_KEY.with(|ref r| { match *r.borrow_mut() { Some(ref mut state) => { let mut current_scope = state.scope_stack.pop().unwrap(); current_scope.post = json::encode(&flow::base(&*state.flow_root)).unwrap(); let previous_scope = state.scope_stack.last_mut().unwrap(); previous_scope.children.push(current_scope); } None => {} } }); } } /// Generate a unique ID. This is used for items such as Fragment /// which are often reallocated but represent essentially the /// same data. #[cfg(debug_assertions)] pub fn generate_unique_debug_id() -> u16 { DEBUG_ID_COUNTER.fetch_add(1, Ordering::SeqCst) as u16 } /// Begin a layout debug trace. If this has not been called, /// creating debug scopes has no effect. pub fn begin_trace(flow_root: FlowRef) { assert!(STATE_KEY.with(|ref r| r.borrow().is_none())); STATE_KEY.with(|ref r| { let flow_trace = json::encode(&flow::base(&*flow_root)).unwrap(); let state = State { scope_stack: vec![box ScopeData::new("root".to_owned(), flow_trace)], flow_root: flow_root.clone(), }; *r.borrow_mut() = Some(state); }); } /// End the debug layout trace. This will write the layout /// trace to disk in the current directory. The output /// file can then be viewed with an external tool. pub fn end_trace(generation: u32) { let mut thread_state = STATE_KEY.with(|ref r| r.borrow_mut().take().unwrap()); assert!(thread_state.scope_stack.len() == 1); let mut root_scope = thread_state.scope_stack.pop().unwrap(); root_scope.post = json::encode(&flow::base(&*thread_state.flow_root)).unwrap(); let result = json::encode(&root_scope).unwrap(); let mut file = File::create(format!("layout_trace-{}.json", generation)).unwrap(); file.write_all(result.as_bytes()).unwrap(); }<|fim▁end|>
<|file_name|>context_tip_test.go<|end_file_name|><|fim▁begin|>// Copyright 2019 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package contexttip import ( "context" "fmt" "net/http" "net/http/httptest" "os" "strings" "testing" "cloud.google.com/go/pubsub" ) func TestPublishMessage(t *testing.T) { // TODO: Use testutil to get the project. projectID = os.Getenv("GOLANG_SAMPLES_PROJECT_ID") if projectID == "" { t.Skip("Missing GOLANG_SAMPLES_PROJECT_ID.") } ctx := context.Background() var err error client, err = pubsub.NewClient(ctx, projectID) if err != nil { t.Fatalf("pubsub.NewClient: %v", err) } topicName := os.Getenv("FUNCTIONS_TOPIC_NAME") if topicName == "" { topicName = "functions-test-topic" } topic := client.Topic(topicName) exists, err := topic.Exists(ctx) if err != nil { t.Fatalf("topic(%s).Exists: %v", topicName, err) } if !exists { _, err = client.CreateTopic(context.Background(), topicName) if err != nil { t.Fatalf("topic(%s).CreateTopic: %v", topicName, err) }<|fim▁hole|> rr := httptest.NewRecorder() req := httptest.NewRequest("GET", "/", payload) PublishMessage(rr, req) if rr.Code != http.StatusOK { t.Errorf("PublishMessage: got response code %v, want %v", rr.Code, http.StatusOK) } want := "published" if got := rr.Body.String(); !strings.Contains(got, want) { t.Errorf("PublishMessage: got %q, want to contain %q", got, want) } }<|fim▁end|>
} payload := strings.NewReader(fmt.Sprintf(`{"topic":%q, "message": %q}`, topicName, "my_message"))
<|file_name|>app.js<|end_file_name|><|fim▁begin|>var path = require('path'); var ndir = require('ndir'); var express = require('express'); var MongoStore = require('connect-mongo')(express); var ejs = require('ejs'); var routes = require('./routes'); var config = require('./config').config; var adminFilter = require('./filter/adminFilter'); var businessFilter = require('./filter/businessFilter'); var home = require('./controllers/open/home'); var maxAge = 3600000 * 24 * 30; var staticDir = path.join(__dirname, 'assets'); // 静态文件存放更目录. config.upload_temp_dir = config.upload_temp_dir || path.join(__dirname, 'assets', 'user_data'); // ensure upload dir exists <|fim▁hole|> if (err) { throw err; } }); config.upload_img_dir = config.upload_img_dir || path.join(__dirname, 'assets', 'user_data', 'images'); ndir.mkdir(config.upload_img_dir, function(err) { // 建立上传文件目录 if (err) { throw err; } }); var app = express(); // all environments app.configure(function() { app.set('title', 'Zero App'); app.set('port', 80); app.set('env', 'production'); ejs.open = '{{'; ejs.close = '}}'; app.engine('.html', ejs.__express); app.set('view engine', 'html'); app.set('views', path.join(__dirname, 'views')); // html 文件存放目录 app.use(express.cookieParser()); require('./model'); var mongoose = require('mongoose'); app.use(express.session({ secret: config.session_secret, store: new MongoStore({ mongooseConnection: mongoose.connection }), cookie: { maxAge: 3600000 } })); // --- 设置中间件 --- app.use(express.favicon(path.join(__dirname, 'assets/img/favicon.ico'))); app.use('/assets', express.static(staticDir)); app.use(express.logger('dev')); app.use('/admin', adminFilter.userNeeded); app.use('/business', businessFilter.bserNeeded); app.use('/', home.init); app.use(express.bodyParser({ uploadDir: config.upload_temp_dir })); app.use(express.methodOverride()); // 配置路由 routes(app); }) // development only app.configure('development', function() { app.use(express.errorHandler()); }) // production only app.configure('production', function() { //FIXME: // app.use(function(err, req, res, next) { // console.error(err.stack); // res.redirect('/500'); // }); }); app.listen(app.get('port'), function() { console.log('Express server listening on port ' + app.get('port')); });<|fim▁end|>
ndir.mkdir(config.upload_temp_dir, function(err) { // 建立上传文件目录
<|file_name|>rules.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ //! Bindings for CSS Rule objects use crate::counter_style::{self, CounterBound}; use crate::gecko_bindings::structs::{self, nsCSSValue}; use crate::gecko_bindings::sugar::ns_css_value::ToNsCssValue; impl<'a> ToNsCssValue for &'a counter_style::System { fn convert(self, nscssvalue: &mut nsCSSValue) { use crate::counter_style::System::*; match *self { Cyclic => nscssvalue.set_enum(structs::NS_STYLE_COUNTER_SYSTEM_CYCLIC as i32), Numeric => nscssvalue.set_enum(structs::NS_STYLE_COUNTER_SYSTEM_NUMERIC as i32), Alphabetic => nscssvalue.set_enum(structs::NS_STYLE_COUNTER_SYSTEM_ALPHABETIC as i32), Symbolic => nscssvalue.set_enum(structs::NS_STYLE_COUNTER_SYSTEM_SYMBOLIC as i32), Additive => nscssvalue.set_enum(structs::NS_STYLE_COUNTER_SYSTEM_ADDITIVE as i32), Fixed { ref first_symbol_value, } => { let mut a = nsCSSValue::null(); let mut b = nsCSSValue::null(); a.set_enum(structs::NS_STYLE_COUNTER_SYSTEM_FIXED as i32); b.set_integer(first_symbol_value.map_or(1, |v| v.value())); nscssvalue.set_pair(&a, &b); }, Extends(ref other) => { let mut a = nsCSSValue::null(); let mut b = nsCSSValue::null(); a.set_enum(structs::NS_STYLE_COUNTER_SYSTEM_EXTENDS as i32); b.set_atom_ident(other.0.clone()); nscssvalue.set_pair(&a, &b); }, } } } impl<'a> ToNsCssValue for &'a counter_style::Negative { fn convert(self, nscssvalue: &mut nsCSSValue) { if let Some(ref second) = self.1 { let mut a = nsCSSValue::null(); let mut b = nsCSSValue::null(); a.set_from(&self.0); b.set_from(second); nscssvalue.set_pair(&a, &b); } else { nscssvalue.set_from(&self.0) } } } impl<'a> ToNsCssValue for &'a counter_style::Symbol { fn convert(self, nscssvalue: &mut nsCSSValue) { match *self { counter_style::Symbol::String(ref s) => nscssvalue.set_string(s), counter_style::Symbol::Ident(ref s) => nscssvalue.set_ident_from_atom(&s.0), } } } impl<'a> ToNsCssValue for &'a counter_style::Ranges { fn convert(self, nscssvalue: &mut nsCSSValue) { if self.0.is_empty() { nscssvalue.set_auto(); } else { nscssvalue.set_pair_list(self.0.iter().map(|range| { fn set_bound(bound: CounterBound, nscssvalue: &mut nsCSSValue) { if let CounterBound::Integer(finite) = bound { nscssvalue.set_integer(finite.value()) } else { nscssvalue.set_enum(structs::NS_STYLE_COUNTER_RANGE_INFINITE as i32) } } let mut start = nsCSSValue::null(); let mut end = nsCSSValue::null(); set_bound(range.start, &mut start); set_bound(range.end, &mut end); (start, end) })); } } } impl<'a> ToNsCssValue for &'a counter_style::Pad { fn convert(self, nscssvalue: &mut nsCSSValue) { let mut min_length = nsCSSValue::null(); let mut pad_with = nsCSSValue::null(); min_length.set_integer(self.0.value()); pad_with.set_from(&self.1); nscssvalue.set_pair(&min_length, &pad_with); } } impl<'a> ToNsCssValue for &'a counter_style::Fallback { fn convert(self, nscssvalue: &mut nsCSSValue) { nscssvalue.set_atom_ident(self.0 .0.clone()) } } impl<'a> ToNsCssValue for &'a counter_style::Symbols { fn convert(self, nscssvalue: &mut nsCSSValue) { nscssvalue.set_list(self.0.iter().map(|item| { let mut value = nsCSSValue::null(); value.set_from(item); value })); } } impl<'a> ToNsCssValue for &'a counter_style::AdditiveSymbols { fn convert(self, nscssvalue: &mut nsCSSValue) { nscssvalue.set_pair_list(self.0.iter().map(|tuple| { let mut weight = nsCSSValue::null(); let mut symbol = nsCSSValue::null(); weight.set_integer(tuple.weight.value()); symbol.set_from(&tuple.symbol); (weight, symbol) })); } }<|fim▁hole|> match *self { Auto => nscssvalue.set_auto(), Bullets => nscssvalue.set_enum(structs::NS_STYLE_COUNTER_SPEAKAS_BULLETS as i32), Numbers => nscssvalue.set_enum(structs::NS_STYLE_COUNTER_SPEAKAS_NUMBERS as i32), Words => nscssvalue.set_enum(structs::NS_STYLE_COUNTER_SPEAKAS_WORDS as i32), Other(ref other) => nscssvalue.set_atom_ident(other.0.clone()), } } }<|fim▁end|>
impl<'a> ToNsCssValue for &'a counter_style::SpeakAs { fn convert(self, nscssvalue: &mut nsCSSValue) { use crate::counter_style::SpeakAs::*;
<|file_name|>MIDI_nl_NL.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" encoding="utf-8"?> <!DOCTYPE TS> <TS version="2.0" language="nl_NL"> <context> <name>ConfigureMidiPlugin</name> <message> <location filename="common/configuremidiplugin.ui" line="14"/> <source>Configure MIDI Plugin</source> <translation>Configureer MIDI Plugin</translation> </message> <message> <location filename="common/configuremidiplugin.ui" line="47"/> <source>Name</source> <translation>Naam</translation> </message> <message> <location filename="common/configuremidiplugin.ui" line="52"/> <source>MIDI Channel</source> <translation>MIDI kanaal</translation> </message> <message> <location filename="common/configuremidiplugin.ui" line="57"/> <source>Mode</source> <translation>Modus</translation> </message> <message> <location filename="common/configuremidiplugin.ui" line="62"/> <source>Init Message</source> <translation>Init Message</translation> </message> <message> <location filename="common/configuremidiplugin.ui" line="20"/> <source>Refresh</source> <translation>Ververs</translation> </message> <message> <location filename="common/configuremidiplugin.cpp" line="127"/> <source>Outputs</source> <translation>Outputs</translation> </message> <message> <location filename="common/configuremidiplugin.cpp" line="150"/> <source>Inputs</source> <translation>Inputs</translation> </message> <message> <location filename="common/configuremidiplugin.cpp" line="214"/> <source>None</source> <translation>Geen</translation> </message> </context> <context> <name>MidiPlugin</name> <message> <location filename="common/midiplugin.cpp" line="139"/> <source>This plugin provides input/output support for MIDI devices.</source> <translation>Deze plugin verzorgt input/output voor MIDI apparaten.</translation> </message> <message> <location filename="common/midiplugin.cpp" line="153"/> <source>No output support available.</source> <translation>Output niet ondersteund.</translation> </message> <message> <location filename="common/midiplugin.cpp" line="161"/> <source>Output</source> <translation>Output</translation> </message> <message> <location filename="common/midiplugin.cpp" line="164"/> <location filename="common/midiplugin.cpp" line="272"/> <source>Open</source> <translation>Open</translation> </message> <message> <location filename="common/midiplugin.cpp" line="166"/> <location filename="common/midiplugin.cpp" line="274"/> <source>Not Open</source> <translation>Ongeopend</translation> </message> <message> <location filename="common/midiplugin.cpp" line="167"/> <location filename="common/midiplugin.cpp" line="275"/> <source>Status</source> <translation>Status</translation> </message> <message> <location filename="common/midiplugin.cpp" line="173"/> <source>Invalid Output</source> <translation>Ongeldige output</translation> </message> <message> <location filename="common/midiplugin.cpp" line="261"/> <source>No input support available.</source> <translation>Input niet ondersteund.</translation> </message> <message> <location filename="common/midiplugin.cpp" line="269"/> <source>Input</source> <translation>Input</translation> </message> <message> <location filename="common/midiplugin.cpp" line="281"/> <source>Invalid Input</source> <translation>Input niet ondersteund</translation> </message> </context> <context> <name>QObject</name> <message> <location filename="../../engine/src/qlcfile.cpp" line="195"/> <source>No error occurred.</source><|fim▁hole|> </message> <message> <location filename="../../engine/src/qlcfile.cpp" line="197"/> <source>An error occurred when reading from the file.</source> <translation>Er is een fout opgetreden tijdens het lezen van het bestand.</translation> </message> <message> <location filename="../../engine/src/qlcfile.cpp" line="199"/> <source>An error occurred when writing to the file.</source> <translation>Er is een fout opgetreden tijdens het schrijven naar het bestand.</translation> </message> <message> <location filename="../../engine/src/qlcfile.cpp" line="201"/> <source>A fatal error occurred.</source> <translation>Er is een fatale fout opgetreden.</translation> </message> <message> <location filename="../../engine/src/qlcfile.cpp" line="203"/> <source>Resource error occurred.</source> <translation>Er is een resource fout opgetreden.</translation> </message> <message> <location filename="../../engine/src/qlcfile.cpp" line="205"/> <source>The file could not be opened.</source> <translation>Het bestand kan niet geopend worden.</translation> </message> <message> <location filename="../../engine/src/qlcfile.cpp" line="207"/> <source>The operation was aborted.</source> <translation>De bewerking is afgebroken.</translation> </message> <message> <location filename="../../engine/src/qlcfile.cpp" line="209"/> <source>A timeout occurred.</source> <translation>Er is een time-out opgetreden.</translation> </message> <message> <location filename="../../engine/src/qlcfile.cpp" line="211"/> <source>An unspecified error occurred.</source> <translation>Er is een onbekende fout opgetreden. </translation> </message> <message> <location filename="../../engine/src/qlcfile.cpp" line="213"/> <source>The file could not be removed.</source> <translation>Het bestand kan niet worden verwijderd.</translation> </message> <message> <location filename="../../engine/src/qlcfile.cpp" line="215"/> <source>The file could not be renamed.</source> <translation>Het bestand kan niet hernoemd worden.</translation> </message> <message> <location filename="../../engine/src/qlcfile.cpp" line="217"/> <source>The position in the file could not be changed.</source> <translation>De positie in het bestand kan niet veranderd worden.</translation> </message> <message> <location filename="../../engine/src/qlcfile.cpp" line="219"/> <source>The file could not be resized.</source> <translation>Het bestand kan niet van grootte veranderd worden.</translation> </message> <message> <location filename="../../engine/src/qlcfile.cpp" line="221"/> <source>The file could not be accessed.</source> <translation>Het bestand kan niet benaderd worden.</translation> </message> <message> <location filename="../../engine/src/qlcfile.cpp" line="223"/> <source>The file could not be copied.</source> <translation>Het bestand kan niet gekopieerd worden.</translation> </message> <message> <location filename="../../engine/src/qlcfile.cpp" line="225"/> <source>An unknown error occurred.</source> <translation>Er is een onbekende fout opgetreden.</translation> </message> </context> </TS><|fim▁end|>
<translation>Er is geen fout opgetreden.</translation>
<|file_name|>depart.rs<|end_file_name|><|fim▁begin|>// Copyright (c) 2017 Chef Software Inc. and/or applicable contributors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::thread; use std::time;<|fim▁hole|> use error::{Error, Result}; pub fn run( ui: &mut UI, member_id: &str, peers: Vec<String>, ring_key: Option<SymKey>, ) -> Result<()> { ui.begin( format!("Permanently marking {} as departed", member_id), )?; ui.status( Status::Creating, format!("service configuration"), )?; for peer in peers.into_iter() { ui.status(Status::Applying, format!("to peer {}", peer))?; let mut client = Client::new(peer, ring_key.clone()).map_err(|e| { Error::ButterflyError(e.to_string()) })?; client.send_departure(member_id).map_err(|e| { Error::ButterflyError(e.to_string()) })?; // please take a moment to weep over the following line // of code. We must sleep to allow messages to be sent // before freeing the socket to prevent loss. // see https://github.com/zeromq/libzmq/issues/1264 thread::sleep(time::Duration::from_millis(100)); } ui.end("Departure recorded.")?; Ok(()) }<|fim▁end|>
use butterfly::client::Client; use common::ui::{Status, UI}; use hcore::crypto::SymKey;
<|file_name|>diagnostics.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![allow(non_snake_case)] register_long_diagnostics! { E0373: r##" This error occurs when an attempt is made to use data captured by a closure, when that data may no longer exist. It's most commonly seen when attempting to return a closure: ``` fn foo() -> Box<Fn(u32) -> u32> { let x = 0u32; Box::new(|y| x + y) } ``` Notice that `x` is stack-allocated by `foo()`. By default, Rust captures closed-over data by reference. This means that once `foo()` returns, `x` no longer exists. An attempt to access `x` within the closure would thus be unsafe. Another situation where this might be encountered is when spawning threads: ``` fn foo() { let x = 0u32; let y = 1u32; let thr = std::thread::spawn(|| { x + y }); } ``` Since our new thread runs in parallel, the stack frame containing `x` and `y` may well have disappeared by the time we try to use them. Even if we call `thr.join()` within foo (which blocks until `thr` has completed, ensuring the stack frame won't disappear), we will not succeed: the compiler cannot prove that this behaviour is safe, and so won't let us do it. The solution to this problem is usually to switch to using a `move` closure. This approach moves (or copies, where possible) data into the closure, rather than taking references to it. For example: ``` fn foo() -> Box<Fn(u32) -> u32> { let x = 0u32; Box::new(move |y| x + y) } ``` Now that the closure has its own copy of the data, there's no need to worry about safety. "##, E0381: r##" It is not allowed to use or capture an uninitialized variable. For example: ``` fn main() { let x: i32; let y = x; // error, use of possibly uninitialized variable ``` To fix this, ensure that any declared variables are initialized before being used. "##, E0382: r##" This error occurs when an attempt is made to use a variable after its contents have been moved elsewhere. For example: ``` struct MyStruct { s: u32 } fn main() { let mut x = MyStruct{ s: 5u32 }; let y = x; x.s = 6; println!("{}", x.s); } ``` Since `MyStruct` is a type that is not marked `Copy`, the data gets moved out of `x` when we set `y`. This is fundamental to Rust's ownership system: outside of workarounds like `Rc`, a value cannot be owned by more than one variable. If we own the type, the easiest way to address this problem is to implement `Copy` and `Clone` on it, as shown below. This allows `y` to copy the information in `x`, while leaving the original version owned by `x`. Subsequent changes to `x` will not be reflected when accessing `y`. ``` #[derive(Copy, Clone)] struct MyStruct { s: u32 } fn main() { let mut x = MyStruct{ s: 5u32 }; let y = x; x.s = 6; println!("{}", x.s); } ``` Alternatively, if we don't control the struct's definition, or mutable shared ownership is truly required, we can use `Rc` and `RefCell`: ``` use std::cell::RefCell; use std::rc::Rc; struct MyStruct { s: u32 } fn main() { let mut x = Rc::new(RefCell::new(MyStruct{ s: 5u32 })); let y = x.clone(); x.borrow_mut().s = 6; println!("{}", x.borrow.s); } ``` With this approach, x and y share ownership of the data via the `Rc` (reference count type). `RefCell` essentially performs runtime borrow checking: ensuring that at most one writer or multiple readers can access the data at any one time. If you wish to learn more about ownership in Rust, start with the chapter in the Book: https://doc.rust-lang.org/book/ownership.html "##, E0384: r##" This error occurs when an attempt is made to reassign an immutable variable. For example: <|fim▁hole|>} ``` By default, variables in Rust are immutable. To fix this error, add the keyword `mut` after the keyword `let` when declaring the variable. For example: ``` fn main(){ let mut x = 3; x = 5; } ``` "##, E0387: r##" This error occurs when an attempt is made to mutate or mutably reference data that a closure has captured immutably. Examples of this error are shown below: ``` // Accepts a function or a closure that captures its environment immutably. // Closures passed to foo will not be able to mutate their closed-over state. fn foo<F: Fn()>(f: F) { } // Attempts to mutate closed-over data. Error message reads: // `cannot assign to data in a captured outer variable...` fn mutable() { let mut x = 0u32; foo(|| x = 2); } // Attempts to take a mutable reference to closed-over data. Error message // reads: `cannot borrow data mutably in a captured outer variable...` fn mut_addr() { let mut x = 0u32; foo(|| { let y = &mut x; }); } ``` The problem here is that foo is defined as accepting a parameter of type `Fn`. Closures passed into foo will thus be inferred to be of type `Fn`, meaning that they capture their context immutably. If the definition of `foo` is under your control, the simplest solution is to capture the data mutably. This can be done by defining `foo` to take FnMut rather than Fn: ``` fn foo<F: FnMut()>(f: F) { } ``` Alternatively, we can consider using the `Cell` and `RefCell` types to achieve interior mutability through a shared reference. Our example's `mutable` function could be redefined as below: ``` use std::cell::Cell; fn mutable() { let x = Cell::new(0u32); foo(|| x.set(2)); } ``` You can read more about cell types in the API documentation: https://doc.rust-lang.org/std/cell/ "## } register_diagnostics! { E0383, // partial reinitialization of uninitialized structure E0385, // {} in an aliasable location E0386, // {} in an immutable container E0388, // {} in a static location E0389 // {} in a `&` reference }<|fim▁end|>
``` fn main(){ let x = 3; x = 5; // error, reassignment of immutable variable
<|file_name|>DoInterestManager.py<|end_file_name|><|fim▁begin|>""" The DoInterestManager keeps track of which parent/zones that we currently have interest in. When you want to "look" into a zone you add an interest to that zone. When you want to get rid of, or ignore, the objects in that zone, remove interest in that zone. p.s. A great deal of this code is just code moved from ClientRepository.py. """ from panda3d.core import * from panda3d.direct import * from .MsgTypes import * from direct.showbase.PythonUtil import * from direct.showbase import DirectObject from .PyDatagram import PyDatagram from direct.directnotify.DirectNotifyGlobal import directNotify import types from direct.showbase.PythonUtil import report class InterestState: StateActive = 'Active' StatePendingDel = 'PendingDel' def __init__(self, desc, state, context, event, parentId, zoneIdList, eventCounter, auto=False): self.desc = desc self.state = state self.context = context # We must be ready to keep track of multiple events. If somebody # requested an interest to be removed and we get a second request # for removal of the same interest before we get a response for the # first interest removal, we now have two parts of the codebase # waiting for a response on the removal of a single interest. self.events = [] self.eventCounter = eventCounter if event: self.addEvent(event) self.parentId = parentId self.zoneIdList = zoneIdList self.auto = auto def addEvent(self, event): self.events.append(event) self.eventCounter.num += 1 def getEvents(self): return list(self.events) def clearEvents(self): self.eventCounter.num -= len(self.events) assert self.eventCounter.num >= 0 self.events = [] def sendEvents(self): for event in self.events: messenger.send(event) self.clearEvents() def setDesc(self, desc): self.desc = desc def isPendingDelete(self): return self.state == InterestState.StatePendingDel def __repr__(self): return 'InterestState(desc=%s, state=%s, context=%s, event=%s, parentId=%s, zoneIdList=%s)' % ( self.desc, self.state, self.context, self.events, self.parentId, self.zoneIdList) class InterestHandle: """This class helps to ensure that valid handles get passed in to DoInterestManager funcs""" def __init__(self, id): self._id = id def asInt(self): return self._id def __eq__(self, other): if type(self) == type(other): return self._id == other._id return self._id == other def __repr__(self): return '%s(%s)' % (self.__class__.__name__, self._id) # context value for interest changes that have no complete event NO_CONTEXT = 0 class DoInterestManager(DirectObject.DirectObject): """ Top level Interest Manager """ notify = directNotify.newCategory("DoInterestManager") InterestDebug = ConfigVariableBool('interest-debug', False) # 'handle' is a number that represents a single interest set that the # client has requested; the interest set may be modified _HandleSerialNum = 0 # high bit is reserved for server interests _HandleMask = 0x7FFF # 'context' refers to a single request to change an interest set _ContextIdSerialNum = 100 _ContextIdMask = 0x3FFFFFFF # avoid making Python create a long _interests = {} if __debug__: _debug_interestHistory = [] _debug_maxDescriptionLen = 40 _SerialGen = SerialNumGen() _SerialNum = serialNum() def __init__(self): assert DoInterestManager.notify.debugCall() DirectObject.DirectObject.__init__(self) self._addInterestEvent = uniqueName('DoInterestManager-Add') self._removeInterestEvent = uniqueName('DoInterestManager-Remove') self._noNewInterests = False self._completeDelayedCallback = None # keep track of request contexts that have not completed self._completeEventCount = ScratchPad(num=0) self._allInterestsCompleteCallbacks = [] def __verbose(self): return self.InterestDebug.getValue() or self.getVerbose() def _getAnonymousEvent(self, desc): return 'anonymous-%s-%s' % (desc, DoInterestManager._SerialGen.next()) def setNoNewInterests(self, flag): self._noNewInterests = flag def noNewInterests(self): return self._noNewInterests def setAllInterestsCompleteCallback(self, callback): if ((self._completeEventCount.num == 0) and (self._completeDelayedCallback is None)): callback() else: self._allInterestsCompleteCallbacks.append(callback) def getAllInterestsCompleteEvent(self): return 'allInterestsComplete-%s' % DoInterestManager._SerialNum def resetInterestStateForConnectionLoss(self): DoInterestManager._interests.clear() self._completeEventCount = ScratchPad(num=0) if __debug__: self._addDebugInterestHistory("RESET", "", 0, 0, 0, []) def isValidInterestHandle(self, handle): # pass in a handle (or anything else) and this will return true if it is # still a valid interest handle if not isinstance(handle, InterestHandle): return False return handle.asInt() in DoInterestManager._interests def updateInterestDescription(self, handle, desc): iState = DoInterestManager._interests.get(handle.asInt()) if iState: iState.setDesc(desc) def addInterest(self, parentId, zoneIdList, description, event=None): """ Look into a (set of) zone(s). """ assert DoInterestManager.notify.debugCall() handle = self._getNextHandle() # print 'base.cr.addInterest(',description,',',handle,'):',globalClock.getFrameCount() if self._noNewInterests: DoInterestManager.notify.warning( "addInterest: addingInterests on delete: %s" % (handle)) return # make sure we've got parenting rules set in the DC if parentId not in (self.getGameDoId(),): parent = self.getDo(parentId) if not parent: DoInterestManager.notify.error( 'addInterest: attempting to add interest under unknown object %s' % parentId) else: if not parent.hasParentingRules(): DoInterestManager.notify.error( 'addInterest: no setParentingRules defined in the DC for object %s (%s)' '' % (parentId, parent.__class__.__name__)) if event: contextId = self._getNextContextId() else: contextId = 0 # event = self._getAnonymousEvent('addInterest') DoInterestManager._interests[handle] = InterestState( description, InterestState.StateActive, contextId, event, parentId, zoneIdList, self._completeEventCount) if self.__verbose(): print('CR::INTEREST.addInterest(handle=%s, parentId=%s, zoneIdList=%s, description=%s, event=%s)' % ( handle, parentId, zoneIdList, description, event)) self._sendAddInterest(handle, contextId, parentId, zoneIdList, description) if event: messenger.send(self._getAddInterestEvent(), [event]) assert self.printInterestsIfDebug() return InterestHandle(handle) def addAutoInterest(self, parentId, zoneIdList, description): """ Look into a (set of) zone(s). """ assert DoInterestManager.notify.debugCall() handle = self._getNextHandle() if self._noNewInterests: DoInterestManager.notify.warning( "addInterest: addingInterests on delete: %s" % (handle)) return # make sure we've got parenting rules set in the DC if parentId not in (self.getGameDoId(),): parent = self.getDo(parentId) if not parent: DoInterestManager.notify.error( 'addInterest: attempting to add interest under unknown object %s' % parentId) else: if not parent.hasParentingRules(): DoInterestManager.notify.error( 'addInterest: no setParentingRules defined in the DC for object %s (%s)' '' % (parentId, parent.__class__.__name__)) DoInterestManager._interests[handle] = InterestState( description, InterestState.StateActive, 0, None, parentId, zoneIdList, self._completeEventCount, True) if self.__verbose(): print('CR::INTEREST.addInterest(handle=%s, parentId=%s, zoneIdList=%s, description=%s)' % ( handle, parentId, zoneIdList, description)) assert self.printInterestsIfDebug() return InterestHandle(handle) def removeInterest(self, handle, event = None): """ Stop looking in a (set of) zone(s) """ # print 'base.cr.removeInterest(',handle,'):',globalClock.getFrameCount() assert DoInterestManager.notify.debugCall() assert isinstance(handle, InterestHandle) existed = False if not event: event = self._getAnonymousEvent('removeInterest') handle = handle.asInt() if handle in DoInterestManager._interests: existed = True intState = DoInterestManager._interests[handle] if event: messenger.send(self._getRemoveInterestEvent(), [event, intState.parentId, intState.zoneIdList]) if intState.isPendingDelete(): self.notify.warning( 'removeInterest: interest %s already pending removal' % handle) # this interest is already pending delete, so let's just tack this # callback onto the list if event is not None: intState.addEvent(event) else: if len(intState.events) > 0: # we're not pending a removal, but we have outstanding events? # probably we are waiting for an add/alter complete. # should we send those events now? assert self.notify.warning('removeInterest: abandoning events: %s' % intState.events) intState.clearEvents() intState.state = InterestState.StatePendingDel contextId = self._getNextContextId() intState.context = contextId if event: intState.addEvent(event) self._sendRemoveInterest(handle, contextId) if not event: self._considerRemoveInterest(handle) if self.__verbose(): print('CR::INTEREST.removeInterest(handle=%s, event=%s)' % ( handle, event)) else: DoInterestManager.notify.warning( "removeInterest: handle not found: %s" % (handle)) assert self.printInterestsIfDebug() return existed def removeAutoInterest(self, handle): """ Stop looking in a (set of) zone(s) """ assert DoInterestManager.notify.debugCall() assert isinstance(handle, InterestHandle) existed = False handle = handle.asInt() if handle in DoInterestManager._interests: existed = True intState = DoInterestManager._interests[handle] if intState.isPendingDelete(): self.notify.warning( 'removeInterest: interest %s already pending removal' % handle) # this interest is already pending delete, so let's just tack this # callback onto the list else: if len(intState.events) > 0: # we're not pending a removal, but we have outstanding events? # probably we are waiting for an add/alter complete. # should we send those events now? self.notify.warning('removeInterest: abandoning events: %s' % intState.events) intState.clearEvents() intState.state = InterestState.StatePendingDel self._considerRemoveInterest(handle) if self.__verbose(): print('CR::INTEREST.removeAutoInterest(handle=%s)' % (handle)) else: DoInterestManager.notify.warning( "removeInterest: handle not found: %s" % (handle)) assert self.printInterestsIfDebug() return existed @report(types = ['args'], dConfigParam = 'guildmgr') def removeAIInterest(self, handle): """ handle is NOT an InterestHandle. It's just a bare integer representing an AI opened interest. We're making the client close down this interest since the AI has trouble removing interests(that its opened) when the avatar goes offline. See GuildManager(UD) for how it's being used. """ self._sendRemoveAIInterest(handle) def alterInterest(self, handle, parentId, zoneIdList, description=None, event=None): """ Removes old interests and adds new interests. Note that when an interest is changed, only the most recent change's event will be triggered. Previous events are abandoned. If this is a problem, consider opening multiple interests. """ assert DoInterestManager.notify.debugCall() assert isinstance(handle, InterestHandle) #assert not self._noNewInterests handle = handle.asInt() if self._noNewInterests: DoInterestManager.notify.warning( "alterInterest: addingInterests on delete: %s" % (handle)) return exists = False if event is None: event = self._getAnonymousEvent('alterInterest') if handle in DoInterestManager._interests: if description is not None: DoInterestManager._interests[handle].desc = description else: description = DoInterestManager._interests[handle].desc # are we overriding an existing change? if DoInterestManager._interests[handle].context != NO_CONTEXT: DoInterestManager._interests[handle].clearEvents() contextId = self._getNextContextId() DoInterestManager._interests[handle].context = contextId DoInterestManager._interests[handle].parentId = parentId DoInterestManager._interests[handle].zoneIdList = zoneIdList DoInterestManager._interests[handle].addEvent(event) if self.__verbose(): print('CR::INTEREST.alterInterest(handle=%s, parentId=%s, zoneIdList=%s, description=%s, event=%s)' % ( handle, parentId, zoneIdList, description, event)) self._sendAddInterest(handle, contextId, parentId, zoneIdList, description, action='modify') exists = True assert self.printInterestsIfDebug() else: DoInterestManager.notify.warning( "alterInterest: handle not found: %s" % (handle)) return exists def openAutoInterests(self, obj): if hasattr(obj, '_autoInterestHandle'): # must be multiple inheritance self.notify.debug('openAutoInterests(%s): interests already open' % obj.__class__.__name__) return autoInterests = obj.getAutoInterests() obj._autoInterestHandle = None if not len(autoInterests): return obj._autoInterestHandle = self.addAutoInterest(obj.doId, autoInterests, '%s-autoInterest' % obj.__class__.__name__) def closeAutoInterests(self, obj): if not hasattr(obj, '_autoInterestHandle'): # must be multiple inheritance self.notify.debug('closeAutoInterests(%s): interests already closed' % obj) return if obj._autoInterestHandle is not None: self.removeAutoInterest(obj._autoInterestHandle) del obj._autoInterestHandle # events for InterestWatcher def _getAddInterestEvent(self): return self._addInterestEvent def _getRemoveInterestEvent(self): return self._removeInterestEvent def _getInterestState(self, handle): return DoInterestManager._interests[handle] def _getNextHandle(self): handle = DoInterestManager._HandleSerialNum while True: handle = (handle + 1) & DoInterestManager._HandleMask # skip handles that are already in use if handle not in DoInterestManager._interests: break DoInterestManager.notify.warning( 'interest %s already in use' % handle) DoInterestManager._HandleSerialNum = handle return DoInterestManager._HandleSerialNum def _getNextContextId(self): contextId = DoInterestManager._ContextIdSerialNum while True: contextId = (contextId + 1) & DoInterestManager._ContextIdMask # skip over the 'no context' id if contextId != NO_CONTEXT: break DoInterestManager._ContextIdSerialNum = contextId return DoInterestManager._ContextIdSerialNum def _considerRemoveInterest(self, handle): """ Consider whether we should cull the interest set. """ assert DoInterestManager.notify.debugCall() if handle in DoInterestManager._interests: if DoInterestManager._interests[handle].isPendingDelete(): # make sure there is no pending event for this interest if DoInterestManager._interests[handle].context == NO_CONTEXT: assert len(DoInterestManager._interests[handle].events) == 0 del DoInterestManager._interests[handle] if __debug__: def printInterestsIfDebug(self): if DoInterestManager.notify.getDebug(): self.printInterests() return 1 # for assert def _addDebugInterestHistory(self, action, description, handle, contextId, parentId, zoneIdList): if description is None: description = '' DoInterestManager._debug_interestHistory.append( (action, description, handle, contextId, parentId, zoneIdList)) DoInterestManager._debug_maxDescriptionLen = max( DoInterestManager._debug_maxDescriptionLen, len(description)) def printInterestHistory(self): print("***************** Interest History *************") format = '%9s %' + str(DoInterestManager._debug_maxDescriptionLen) + 's %6s %6s %9s %s' print(format % ( "Action", "Description", "Handle", "Context", "ParentId", "ZoneIdList")) for i in DoInterestManager._debug_interestHistory: print(format % tuple(i)) print("Note: interests with a Context of 0 do not get" \ " done/finished notices.") def printInterestSets(self): print("******************* Interest Sets **************") format = '%6s %' + str(DoInterestManager._debug_maxDescriptionLen) + 's %11s %11s %8s %8s %8s' print(format % ( "Handle", "Description", "ParentId", "ZoneIdList", "State", "Context", "Event")) for id, state in DoInterestManager._interests.items(): if len(state.events) == 0: event = '' elif len(state.events) == 1: event = state.events[0] else: event = state.events print(format % (id, state.desc, state.parentId, state.zoneIdList, state.state, state.context, event)) print("************************************************") def printInterests(self): self.printInterestHistory() self.printInterestSets() def _sendAddInterest(self, handle, contextId, parentId, zoneIdList, description, action=None): """ Part of the new otp-server code. handle is a client-side created number that refers to a set of interests. The same handle number doesn't necessarily have any relationship to the same handle on another client. """ assert DoInterestManager.notify.debugCall() if __debug__: if isinstance(zoneIdList, list): zoneIdList.sort() if action is None: action = 'add' self._addDebugInterestHistory( action, description, handle, contextId, parentId, zoneIdList) if parentId == 0: DoInterestManager.notify.error( 'trying to set interest to invalid parent: %s' % parentId) datagram = PyDatagram() # Add message type if isinstance(zoneIdList, list): vzl = list(zoneIdList) vzl.sort() uniqueElements(vzl) datagram.addUint16(CLIENT_ADD_INTEREST_MULTIPLE) datagram.addUint32(contextId) datagram.addUint16(handle) datagram.addUint32(parentId) datagram.addUint16(len(vzl)) for zone in vzl: datagram.addUint32(zone) else: datagram.addUint16(CLIENT_ADD_INTEREST) datagram.addUint32(contextId) datagram.addUint16(handle) datagram.addUint32(parentId) datagram.addUint32(zoneIdList) self.send(datagram) def _sendRemoveInterest(self, handle, contextId): """ handle is a client-side created number that refers to a set of interests. The same handle number doesn't necessarily have any relationship to the same handle on another client. """ assert DoInterestManager.notify.debugCall() assert handle in DoInterestManager._interests datagram = PyDatagram() # Add message type datagram.addUint16(CLIENT_REMOVE_INTEREST) datagram.addUint32(contextId) datagram.addUint16(handle) self.send(datagram) if __debug__: state = DoInterestManager._interests[handle] self._addDebugInterestHistory( "remove", state.desc, handle, contextId, state.parentId, state.zoneIdList) def _sendRemoveAIInterest(self, handle): """ handle is a bare int, NOT an InterestHandle. Use this to close an AI opened interest. """ datagram = PyDatagram() # Add message type datagram.addUint16(CLIENT_REMOVE_INTEREST) datagram.addUint16((1<<15) + handle) self.send(datagram) def cleanupWaitAllInterestsComplete(self): if self._completeDelayedCallback is not None: self._completeDelayedCallback.destroy() self._completeDelayedCallback = None def queueAllInterestsCompleteEvent(self, frames=5): # wait for N frames, if no new interests, send out all-done event # calling this is OK even if there are no pending interest completes def checkMoreInterests(): # if there are new interests, cancel this delayed callback, another # will automatically be scheduled when all interests complete # print 'checkMoreInterests(',self._completeEventCount.num,'):',globalClock.getFrameCount() return self._completeEventCount.num > 0 def sendEvent(): messenger.send(self.getAllInterestsCompleteEvent()) for callback in self._allInterestsCompleteCallbacks: callback() self._allInterestsCompleteCallbacks = [] self.cleanupWaitAllInterestsComplete() self._completeDelayedCallback = FrameDelayedCall( 'waitForAllInterestCompletes', callback=sendEvent, frames=frames, cancelFunc=checkMoreInterests) checkMoreInterests = None sendEvent = None def handleInterestDoneMessage(self, di): """ This handles the interest done messages and may dispatch an event """ assert DoInterestManager.notify.debugCall() contextId = di.getUint32() handle = di.getUint16() if self.__verbose(): print('CR::INTEREST.interestDone(handle=%s)' % handle) DoInterestManager.notify.debug( "handleInterestDoneMessage--> Received handle %s, context %s" % ( handle, contextId)) if handle in DoInterestManager._interests: eventsToSend = [] # if the context matches, send out the event if contextId == DoInterestManager._interests[handle].context: DoInterestManager._interests[handle].context = NO_CONTEXT # the event handlers may call back into the interest manager. Send out # the events after we're once again in a stable state. #DoInterestManager._interests[handle].sendEvents() eventsToSend = list(DoInterestManager._interests[handle].getEvents()) DoInterestManager._interests[handle].clearEvents() else: DoInterestManager.notify.debug( "handleInterestDoneMessage--> handle: %s: Expecting context %s, got %s" % ( handle, DoInterestManager._interests[handle].context, contextId)) if __debug__: state = DoInterestManager._interests[handle] self._addDebugInterestHistory( "finished", state.desc, handle, contextId, state.parentId, state.zoneIdList) self._considerRemoveInterest(handle) for event in eventsToSend: messenger.send(event) else: DoInterestManager.notify.warning( "handleInterestDoneMessage: handle not found: %s" % (handle)) # if there are no more outstanding interest-completes, send out global all-done event if self._completeEventCount.num == 0: self.queueAllInterestsCompleteEvent() assert self.printInterestsIfDebug() if __debug__: import unittest class AsyncTestCase(unittest.TestCase): def setCompleted(self): self._async_completed = True def isCompleted(self): return getattr(self, '_async_completed', False) class AsyncTestSuite(unittest.TestSuite): pass class AsyncTestLoader(unittest.TestLoader): suiteClass = AsyncTestSuite class AsyncTextTestRunner(unittest.TextTestRunner): def run(self, testCase): result = self._makeResult() startTime = time.time() test(result) stopTime = time.time() timeTaken = stopTime - startTime result.printErrors() self.stream.writeln(result.separator2) run = result.testsRun<|fim▁hole|> (run, run != 1 and "s" or "", timeTaken)) self.stream.writeln() if not result.wasSuccessful(): self.stream.write("FAILED (") failed, errored = map(len, (result.failures, result.errors)) if failed: self.stream.write("failures=%d" % failed) if errored: if failed: self.stream.write(", ") self.stream.write("errors=%d" % errored) self.stream.writeln(")") else: self.stream.writeln("OK") return result class TestInterestAddRemove(AsyncTestCase, DirectObject.DirectObject): def testInterestAdd(self): event = uniqueName('InterestAdd') self.acceptOnce(event, self.gotInterestAddResponse) self.handle = base.cr.addInterest(base.cr.GameGlobalsId, 100, 'TestInterest', event=event) def gotInterestAddResponse(self): event = uniqueName('InterestRemove') self.acceptOnce(event, self.gotInterestRemoveResponse) base.cr.removeInterest(self.handle, event=event) def gotInterestRemoveResponse(self): self.setCompleted() def runTests(): suite = unittest.makeSuite(TestInterestAddRemove) unittest.AsyncTextTestRunner(verbosity=2).run(suite)<|fim▁end|>
self.stream.writeln("Ran %d test%s in %.3fs" %
<|file_name|>cli_caller.py<|end_file_name|><|fim▁begin|>from api.callers.api_caller import ApiCaller from exceptions import ResponseTextContentTypeError from colors import Color import os from cli.arguments_builders.default_cli_arguments import DefaultCliArguments import datetime from cli.cli_file_writer import CliFileWriter from cli.formatter.cli_json_formatter import CliJsonFormatter from constants import CALLED_SCRIPT class CliCaller: api_object = None action_name = None help_description = '' given_args = {} result_msg_for_files = 'Response contains files. They were saved in the output folder ({}).' result_msg_for_json = '{}' cli_output_folder = '' args_to_prevent_from_being_send = ['chosen_action', 'verbose', 'quiet'] def __init__(self, api_object: ApiCaller, action_name: str): self.api_object = api_object self.action_name = action_name self.help_description = self.help_description.format(self.api_object.endpoint_url) def init_verbose_mode(self): self.result_msg_for_json = 'JSON:\n\n{}' def build_argument_builder(self, child_parser): return DefaultCliArguments(child_parser) def add_parser_args(self, child_parser): parser_argument_builder = self.build_argument_builder(child_parser) parser_argument_builder.add_verbose_arg() parser_argument_builder.add_help_opt() parser_argument_builder.add_quiet_opt() return parser_argument_builder def attach_args(self, args): self.given_args = args.copy() args_to_send = args.copy() for arg_to_remove in self.args_to_prevent_from_being_send: if arg_to_remove in args_to_send: del args_to_send[arg_to_remove] if 'output' in args: self.cli_output_folder = args['output'] del args_to_send['output'] args_to_send = {k: v for k, v in args_to_send.items() if v not in [None, '']} # Removing some 'empty' elements from dictionary if 'file' in args: del args_to_send['file'] # attaching file is handled by separated method if self.api_object.request_method_name == ApiCaller.CONST_REQUEST_METHOD_GET: self.api_object.attach_params(args_to_send) else: # POST self.api_object.attach_data(args_to_send) def attach_file(self, file): if isinstance(file, str): file = open(file, 'rb') self.api_object.attach_files({'file': file}) # it's already stored as file handler def get_colored_response_status_code(self): response_code = self.api_object.get_response_status_code() return Color.success(response_code) if self.api_object.if_request_success() is True else Color.error(response_code) def get_colored_prepared_response_msg(self): response_msg = self.api_object.get_prepared_response_msg() return Color.success(response_msg) if self.api_object.if_request_success() is True else Color.error(response_msg) def get_result_msg(self): if self.api_object.api_response.headers['Content-Type'] == 'text/html': raise ResponseTextContentTypeError('Can\'t print result, since it\'s \'text/html\' instead of expected content type with \'{}\' on board.'.format(self.api_object.api_expected_data_type)) if self.api_object.api_expected_data_type == ApiCaller.CONST_EXPECTED_DATA_TYPE_JSON: return self.result_msg_for_json.format(CliJsonFormatter.format_to_pretty_string(self.api_object.get_response_json())) elif self.api_object.api_expected_data_type == ApiCaller.CONST_EXPECTED_DATA_TYPE_FILE: if self.api_object.if_request_success() is True: return self.get_result_msg_for_files() else: error_msg = 'Error has occurred and your files were not saved.' if self.given_args['verbose'] is False: error_msg += ' To get more information, please run command in verbose mode. (add \'-v\')' return error_msg def get_processed_output_path(self): output_path = self.cli_output_folder<|fim▁hole|> final_output_path = output_path else: path_parts = os.path.dirname(os.path.realpath(__file__)).split('/')[:-2] called_script_dir = os.path.dirname(CALLED_SCRIPT) # It's about a case when user is calling script from not root directory.€ if called_script_dir != 'vxapi.py': new_path_parts = [] bad_parts = called_script_dir.split('/') for part in reversed(path_parts): if part in bad_parts: bad_parts.remove(part) continue new_path_parts.append(part) new_path_parts.reverse() path_parts = new_path_parts prepared_file_path = path_parts + [self.cli_output_folder] final_output_path = '/'.join(prepared_file_path) if not final_output_path.startswith('/'): final_output_path = '/' + final_output_path return final_output_path def get_result_msg_for_files(self): return self.result_msg_for_files.format(self.get_processed_output_path()) def do_post_processing(self): if self.api_object.api_expected_data_type == ApiCaller.CONST_EXPECTED_DATA_TYPE_FILE and self.api_object.if_request_success() is True: self.save_files() def get_date_string(self): now = datetime.datetime.now() return '{}_{}_{}_{}_{}_{}'.format(now.year, now.month, now.day, now.hour, now.minute, now.second) def convert_file_hashes_to_array(self, args, file_arg='hash_list', key_of_array_arg='hashes'): with args[file_arg] as file: hashes = file.read().splitlines() if not hashes: raise Exception('Given file does not contain any data.') for key, value in enumerate(hashes): args['{}[{}]'.format(key_of_array_arg, key)] = value del args[file_arg] return args def save_files(self): api_response = self.api_object.api_response identifier = None if 'id' in self.given_args: identifier = self.given_args['id'] elif 'sha256' in self.given_args: identifier = self.given_args['sha256'] filename = '{}-{}-{}'.format(self.action_name, identifier, api_response.headers['Vx-Filename']) if identifier is not None else '{}-{}'.format(self.action_name, api_response.headers['Vx-Filename']) return CliFileWriter.write(self.get_processed_output_path(), filename, api_response.content)<|fim▁end|>
if output_path.startswith('/') is True: # Given path is absolute
<|file_name|>Gruntfile.js<|end_file_name|><|fim▁begin|>module.exports = function(grunt) { // Initializing the configuration object grunt.initConfig({ copy: { main: { files: [ // includes files within path { expand: true, flatten: true, src: ['bower_components/bootstrap/fonts/*'], dest: './fonts/', filter: 'isFile' }, { expand: true, flatten: true, src: ['bower_components/fontawesome/fonts/*'], dest: './fonts/', filter: 'isFile' }, ] } }, // Task configuration<|fim▁hole|> compress: true, // minifying the result }, files: { // compiling styles.less into styles.css './css/main.min.css': './src/less/main.less' } } }, concat: { options: { separator: ';', }, js_frontend: { src: [ './bower_components/jquery/dist/jquery.js', './bower_components/bootstrap/dist/js/bootstrap.min.js', './src/js/plugins.js', './src/js/main.js' ], dest: './js/main.js', }, }, uglify: { options: { mangle: false // Use if you want the names of your functions and variables // unchanged. }, frontend: { files: { './js/main.min.js' : './js/main.js', } }, }, watch: { js_frontend: { files: [ // watched files './bower_components/bootstrap/dist/js/bootstrap.js', './src/js/main.js', './src/js/plugins.js', ], // tasks to run tasks: ['concat:js_frontend', 'uglify:frontend'], }, less: { files: ['./src/less/*.less', './src/less/**/*.less' ], // watched files tasks: ['less'], // tasks to run }, } }); // Plugin loading grunt.loadNpmTasks('grunt-contrib-concat'); grunt.loadNpmTasks('grunt-contrib-watch'); grunt.loadNpmTasks('grunt-contrib-less'); grunt.loadNpmTasks('grunt-contrib-uglify'); grunt.loadNpmTasks('grunt-contrib-copy'); // Task definition grunt.registerTask('default', ['watch']); };<|fim▁end|>
less: { development: { options: {
<|file_name|>authorization_code.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python # encoding: utf-8 from __future__ import absolute_import from .base import AuthenticationMixinBase from . import GrantFailed # We need to get urlencode from urllib.parse in Python 3, but fall back to # urllib in Python 2 try: from urllib.parse import urlencode except ImportError: from urllib import urlencode try: basestring except NameError: basestring = str class AuthorizationCodeMixin(AuthenticationMixinBase): """Implement helpers for the Authorization Code grant for OAuth2.""" def auth_url(self, scope, redirect, state): """Get the url to direct a user to authenticate.""" url = self.API_ROOT + "/oauth/authorize?" query = { "response_type": "code", "client_id": self.app_info[0] } if scope: if not isinstance(scope, basestring): scope = ' '.join(scope) <|fim▁hole|> query['redirect_uri'] = redirect if state: query['state'] = state return url + urlencode(query) def exchange_code(self, code, redirect): """Perform the exchange step for the code from the redirected user.""" code, headers, resp = self.call_grant( '/oauth/access_token', { "grant_type": "authorization_code", "code": code, "redirect_uri": redirect }) if not code == 200: raise GrantFailed() self.token = resp['access_token'] return self.token, resp['user'], resp['scope']<|fim▁end|>
query['scope'] = scope if redirect:
<|file_name|>mucc.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # Multiple URL Command Client # # Combine a list of mv, cp, rm, and put commands on URLs into a single commit # # To read the help for this program, type python mucc.py --help import os from csvn.core import * from csvn.repos import RemoteRepository, RepositoryURI from csvn.auth import User from optparse import OptionParser usage = """python mucc.py [OPTION]... [ACTION]... Actions: cp REV URL1 URL2 copy URL1@REV to URL2 mkdir URL create new directory URL mv URL1 URL2 move URL1 to URL2 rm URL delete URL put SRC-FILE URL add or modify file URL with contents copied from SRC-FILE propset NAME VAL URL Set property NAME on URL to value VAL propdel NAME URL Delete property NAME from URL """ # Read and parse options parser = OptionParser(usage=usage) parser.add_option("-m", "--message", dest="message", help="use MESSAGE as a log message") parser.add_option("-F", "--file", dest="file", help="read log message from FILE") parser.add_option("-u", "--username", dest="username", help="commit the changes as USERNAME") parser.add_option("-p", "--password", dest="password", help="use password PASSWORD") parser.add_option("-U", "--root-url", dest="root_url", help="Interpret all action URLs as relative to ROOT_URL") parser.add_option("-r", "--revision", dest="rev", help="Use REV as baseline for changes") parser.add_option("-X", "--extra-args ARG", dest="extra_args", help='append arguments from file EXTRA_ARGS (one per line; ' 'use "-" to read from standard input)') (options, args) = parser.parse_args() # Read any extra arguments if options.extra_args: f = file(options.extra_args) for line in f: args.append(line.strip()) if not args:<|fim▁hole|> # Initialize variables root_url = options.root_url actions = [] svn_cmdline_init("", stderr) pool = Pool() action = None if root_url: anchor = RepositoryURI(root_url) else: anchor = None states = None ancestor = None # A list of the arguments accepted by each command cmds = { "cp": [ "rev", "url", "url" ], "mkdir": [ "url" ], "mv": [ "url", "url" ], "rm": [ "url" ], "put": [ "file", "url" ], "propset": [ "name", "val", "url" ], "propdel": [ "name", "url" ], } # Build up a list of the actions we want to perform for arg in args: if not states: action = [arg] actions.append((arg, action)) states = list(cmds[arg]) states.reverse() else: state = states.pop() if state == "rev": action.append(arg.upper() != "HEAD" and int(arg) or None) elif state == "url": arg = RepositoryURI(arg) if anchor: arg = anchor.join(arg) action.append(arg) # It's legal to make a copy of the repository root, # so, we should treat copyfrom paths as possible # repository roots may_be_root = (len(action) == 2 and action[0] == "cp") if not may_be_root: arg = arg.dirname() if ancestor: ancestor = ancestor.longest_ancestor(arg) else: ancestor = arg else: action.append(arg) session = RemoteRepository(ancestor, user=User(username=options.username)) txn = session.txn() # Carry out the transaction for action, args in actions: if action == "cp": txn.copy(src_rev=args[1], src_path=args[2], dest_path=args[3]) elif action == "mv": txn.delete(str(args[1])) txn.copy(src_path=args[1], dest_path=args[2]) elif action == "rm": txn.delete(args[1]) elif action == "mkdir": txn.mkdir(args[1]) elif action == "put": txn.upload(local_path=args[1], remote_path=args[2]) elif action == "propset": txn.propset(key=args[1], value=args[2], path=args[3]) elif action == "propdel": txn.propdel(key=args[1], path=args[2]) # Get the log message message = options.message if options.file: message = file(options.file).read() # Finally commit txn.commit(message) print("r%ld committed by %s at %s" % (txn.committed_rev, options.username, txn.committed_date))<|fim▁end|>
parser.print_help() sys.exit(1)
<|file_name|>latlon-spherical.js<|end_file_name|><|fim▁begin|>/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ /* Latitude/longitude spherical geodesy formulae & scripts (c) Chris Veness 2002-2015 */ /* - www.movable-type.co.uk/scripts/latlong.html MIT Licence */ /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ 'use strict'; /** * Creates a LatLon point on the earth's surface at the specified latitude / longitude. * * @classdesc Tools for geodetic calculations * @requires Dms from 'dms.js' * * @constructor * @param {number} lat - Latitude in degrees. * @param {number} lon - Longitude in degrees. * * @example * var p1 = new LatLon(52.205, 0.119); */ function LatLon(lat, lon) { // allow instantiation without 'new' if (!(this instanceof LatLon)) return new LatLon(lat, lon); this.lat = Number(lat); this.lon = Number(lon); } /** * Returns the distance from 'this' point to destination point (using haversine formula). * * @param {LatLon} point - Latitude/longitude of destination point. * @param {number} [radius=6371e3] - (Mean) radius of earth (defaults to radius in metres). * @returns {number} Distance between this point and destination point, in same units as radius. * * @example * var p1 = new LatLon(52.205, 0.119), p2 = new LatLon(48.857, 2.351); * var d = p1.distanceTo(p2); // Number(d.toPrecision(4)): 404300 */ LatLon.prototype.distanceTo = function(point, radius) { if (!(point instanceof LatLon)) throw new TypeError('point is not LatLon object'); radius = (radius === undefined) ? 6371e3 : Number(radius); var R = radius; var φ1 = this.lat.toRadians(), λ1 = this.lon.toRadians(); var φ2 = point.lat.toRadians(), λ2 = point.lon.toRadians(); var Δφ = φ2 - φ1; var Δλ = λ2 - λ1; var a = Math.sin(Δφ/2) * Math.sin(Δφ/2) + Math.cos(φ1) * Math.cos(φ2) * Math.sin(Δλ/2) * Math.sin(Δλ/2); var c = 2 * Math.atan2(Math.sqrt(a), Math.sqrt(1-a)); var d = R * c; return d; }; /** * Returns the (initial) bearing from 'this' point to destination point. * * @param {LatLon} point - Latitude/longitude of destination point. * @returns {number} Initial bearing in degrees from north. * * @example * var p1 = new LatLon(52.205, 0.119), p2 = new LatLon(48.857, 2.351); * var b1 = p1.bearingTo(p2); // b1.toFixed(1): 156.2 */ LatLon.prototype.bearingTo = function(point) { if (!(point instanceof LatLon)) throw new TypeError('point is not LatLon object'); var φ1 = this.lat.toRadians(), φ2 = point.lat.toRadians(); var Δλ = (point.lon-this.lon).toRadians(); // see http://mathforum.org/library/drmath/view/55417.html var y = Math.sin(Δλ) * Math.cos(φ2); var x = Math.cos(φ1)*Math.sin(φ2) - Math.sin(φ1)*Math.cos(φ2)*Math.cos(Δλ); var θ = Math.atan2(y, x); return (θ.toDegrees()+360) % 360; }; /** * Returns final bearing arriving at destination destination point from 'this' point; the final bearing * will differ from the initial bearing by varying degrees according to distance and latitude. * * @param {LatLon} point - Latitude/longitude of destination point. * @returns {number} Final bearing in degrees from north. * * @example * var p1 = new LatLon(52.205, 0.119), p2 = new LatLon(48.857, 2.351); * var b2 = p1.finalBearingTo(p2); // b2.toFixed(1): 157.9 */ LatLon.prototype.finalBearingTo = function(point) { if (!(point instanceof LatLon)) throw new TypeError('point is not LatLon object'); // get initial bearing from destination point to this point & reverse it by adding 180° return ( point.bearingTo(this)+180 ) % 360; }; /** * Returns the midpoint between 'this' point and the supplied point. * * @param {LatLon} point - Latitude/longitude of destination point. * @returns {LatLon} Midpoint between this point and the supplied point. * * @example * var p1 = new LatLon(52.205, 0.119), p2 = new LatLon(48.857, 2.351); * var pMid = p1.midpointTo(p2); // pMid.toString(): 50.5363°N, 001.2746°E */ LatLon.prototype.midpointTo = function(point) { if (!(point instanceof LatLon)) throw new TypeError('point is not LatLon object'); // see http://mathforum.org/library/drmath/view/51822.html for derivation var φ1 = this.lat.toRadians(), λ1 = this.lon.toRadians(); var φ2 = point.lat.toRadians(); var Δλ = (point.lon-this.lon).toRadians(); var Bx = Math.cos(φ2) * Math.cos(Δλ); var By = Math.cos(φ2) * Math.sin(Δλ); var φ3 = Math.atan2(Math.sin(φ1)+Math.sin(φ2), Math.sqrt( (Math.cos(φ1)+Bx)*(Math.cos(φ1)+Bx) + By*By) ); var λ3 = λ1 + Math.atan2(By, Math.cos(φ1) + Bx); λ3 = (λ3+3*Math.PI) % (2*Math.PI) - Math.PI; // normalise to -180..+180° return new LatLon(φ3.toDegrees(), λ3.toDegrees()); }; /** * Returns the destination point from 'this' point having travelled the given distance on the * given initial bearing (bearing normally varies around path followed). * * @param {number} distance - Distance travelled, in same units as earth radius (default: metres). * @param {number} bearing - Initial bearing in degrees from north. * @param {number} [radius=6371e3] - (Mean) radius of earth (defaults to radius in metres). * @returns {LatLon} Destination point. * * @example * var p1 = new LatLon(51.4778, -0.0015); * var p2 = p1.destinationPoint(7794, 300.7); // p2.toString(): 51.5135°N, 000.0983°W */ LatLon.prototype.destinationPoint = function(distance, bearing, radius) { radius = (radius === undefined) ? 6371e3 : Number(radius); // see http://williams.best.vwh.net/avform.htm#LL var δ = Number(distance) / radius; // angular distance in radians var θ = Number(bearing).toRadians(); var φ1 = this.lat.toRadians(); var λ1 = this.lon.toRadians(); var φ2 = Math.asin( Math.sin(φ1)*Math.cos(δ) + Math.cos(φ1)*Math.sin(δ)*Math.cos(θ) ); var λ2 = λ1 + Math.atan2(Math.sin(θ)*Math.sin(δ)*Math.cos(φ1), Math.cos(δ)-Math.sin(φ1)*Math.sin(φ2)); λ2 = (λ2+3*Math.PI) % (2*Math.PI) - Math.PI; // normalise to -180..+180° return new LatLon(φ2.toDegrees(), λ2.toDegrees()); }; /** * Returns the point of intersection of two paths defined by point and bearing. * * @param {LatLon} p1 - First point. * @param {number} brng1 - Initial bearing from first point. * @param {LatLon} p2 - Second point. * @param {number} brng2 - Initial bearing from second point. * @returns {LatLon} Destination point (null if no unique intersection defined). * * @example * var p1 = LatLon(51.8853, 0.2545), brng1 = 108.547; * var p2 = LatLon(49.0034, 2.5735), brng2 = 32.435; * var pInt = LatLon.intersection(p1, brng1, p2, brng2); // pInt.toString(): 50.9078°N, 004.5084°E */ LatLon.intersection = function(p1, brng1, p2, brng2) { if (!(p1 instanceof LatLon)) throw new TypeError('p1 is not LatLon object'); if (!(p2 instanceof LatLon)) throw new TypeError('p2 is not LatLon object'); // see http://williams.best.vwh.net/avform.htm#Intersection var φ1 = p1.lat.toRadians(), λ1 = p1.lon.toRadians(); var φ2 = p2.lat.toRadians(), λ2 = p2.lon.toRadians(); var θ13 = Number(brng1).toRadians(), θ23 = Number(brng2).toRadians(); var Δφ = φ2-φ1, Δλ = λ2-λ1; var δ12 = 2*Math.asin( Math.sqrt( Math.sin(Δφ/2)*Math.sin(Δφ/2) + Math.cos(φ1)*Math.cos(φ2)*Math.sin(Δλ/2)*Math.sin(Δλ/2) ) ); if (δ12 == 0) return null; // initial/final bearings between points var θ1 = Math.acos( ( Math.sin(φ2) - Math.sin(φ1)*Math.cos(δ12) ) / ( Math.sin(δ12)*Math.cos(φ1) ) ); if (isNaN(θ1)) θ1 = 0; // protect against rounding var θ2 = Math.acos( ( Math.sin(φ1) - Math.sin(φ2)*Math.cos(δ12) ) / ( Math.sin(δ12)*Math.cos(φ2) ) ); var θ12, θ21; if (Math.sin(λ2-λ1) > 0) { θ12 = θ1; θ21 = 2*Math.PI - θ2; } else { θ12 = 2*Math.PI - θ1; θ21 = θ2; } var α1 = (θ13 - θ12 + Math.PI) % (2*Math.PI) - Math.PI; // angle 2-1-3 var α2 = (θ21 - θ23 + Math.PI) % (2*Math.PI) - Math.PI; // angle 1-2-3 if (Math.sin(α1)==0 && Math.sin(α2)==0) return null; // infinite intersections if (Math.sin(α1)*Math.sin(α2) < 0) return null; // ambiguous intersection //α1 = Math.abs(α1); //α2 = Math.abs(α2); // ... Ed Williams takes abs of α1/α2, but seems to break calculation? var α3 = Math.acos( -Math.cos(α1)*Math.cos(α2) + Math.sin(α1)*Math.sin(α2)*Math.cos(δ12) ); var δ13 = Math.atan2( Math.sin(δ12)*Math.sin(α1)*Math.sin(α2), Math.cos(α2)+Math.cos(α1)*Math.cos(α3) ); var φ3 = Math.asin( Math.sin(φ1)*Math.cos(δ13) + Math.cos(φ1)*Math.sin(δ13)*Math.cos(θ13) ); var Δλ13 = Math.atan2( Math.sin(θ13)*Math.sin(δ13)*Math.cos(φ1), Math.cos(δ13)-Math.sin(φ1)*Math.sin(φ3) ); var λ3 = λ1 + Δλ13; λ3 = (λ3+3*Math.PI) % (2*Math.PI) - Math.PI; // normalise to -180..+180° return new LatLon(φ3.toDegrees(), λ3.toDegrees()); }; /** * Returns (signed) distance from ‘this’ point to great circle defined by start-point and end-point. * * @param {LatLon} pathStart - Start point of great circle path. * @param {LatLon} pathEnd - End point of great circle path. * @param {number} [radius=6371e3] - (Mean) radius of earth (defaults to radius in metres). * @returns {number} Distance to great circle (-ve if to left, +ve if to right of path). * * @example * var pCurrent = new LatLon(53.2611, -0.7972); * var p1 = new LatLon(53.3206, -1.7297), p2 = new LatLon(53.1887, 0.1334); * var d = pCurrent.crossTrackDistanceTo(p1, p2); // Number(d.toPrecision(4)): -307.5 */ LatLon.prototype.crossTrackDistanceTo = function(pathStart, pathEnd, radius) { if (!(pathStart instanceof LatLon)) throw new TypeError('pathStart is not LatLon object'); if (!(pathEnd instanceof LatLon)) throw new TypeError('pathEnd is not LatLon object'); radius = (radius === undefined) ? 6371e3 : Number(radius); var δ13 = pathStart.distanceTo(this, radius)/radius; var θ13 = pathStart.bearingTo(this).toRadians(); var θ12 = pathStart.bearingTo(pathEnd).toRadians(); var dxt = Math.asin( Math.sin(δ13) * Math.sin(θ13-θ12) ) * radius; return dxt; }; /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ /** * Returns the distance travelling from 'this' point to destination point along a rhumb line. * * @param {LatLon} point - Latitude/longitude of destination point. * @param {number} [radius=6371e3] - (Mean) radius of earth (defaults to radius in metres). * @returns {number} Distance in km between this point and destination point (same units as radius). * * @example * var p1 = new LatLon(51.127, 1.338), p2 = new LatLon(50.964, 1.853); * var d = p1.distanceTo(p2); // Number(d.toPrecision(4)): 40310 */ LatLon.prototype.rhumbDistanceTo = function(point, radius) { if (!(point instanceof LatLon)) throw new TypeError('point is not LatLon object'); radius = (radius === undefined) ? 6371e3 : Number(radius); // see http://williams.best.vwh.net/avform.htm#Rhumb var R = radius; var φ1 = this.lat.toRadians(), φ2 = point.lat.toRadians(); var Δφ = φ2 - φ1; var Δλ = Math.abs(point.lon-this.lon).toRadians(); // if dLon over 180° take shorter rhumb line across the anti-meridian: if (Math.abs(Δλ) > Math.PI) Δλ = Δλ>0 ? -(2*Math.PI-Δλ) : (2*Math.PI+Δλ); // on Mercator projection, longitude distances shrink by latitude; q is the 'stretch factor' // q becomes ill-conditioned along E-W line (0/0); use empirical tolerance to avoid it var Δψ = Math.log(Math.tan(φ2/2+Math.PI/4)/Math.tan(φ1/2+Math.PI/4)); var q = Math.abs(Δψ) > 10e-12 ? Δφ/Δψ : Math.cos(φ1); // distance is pythagoras on 'stretched' Mercator projection var δ = Math.sqrt(Δφ*Δφ + q*q*Δλ*Δλ); // angular distance in radians var dist = δ * R; return dist; }; /** * Returns the bearing from 'this' point to destination point along a rhumb line. * * @param {LatLon} point - Latitude/longitude of destination point. * @returns {number} Bearing in degrees from north. * * @example * var p1 = new LatLon(51.127, 1.338), p2 = new LatLon(50.964, 1.853); * var d = p1.rhumbBearingTo(p2); // d.toFixed(1): 116.7 */ LatLon.prototype.rhumbBearingTo = function(point) { if (!(point instanceof LatLon)) throw new TypeError('point is not LatLon object'); var φ1 = this.lat.toRadians(), φ2 = point.lat.toRadians(); var Δλ = (point.lon-this.lon).toRadians(); // if dLon over 180° take shorter rhumb line across the anti-meridian: if (Math.abs(Δλ) > Math.PI) Δλ = Δλ>0 ? -(2*Math.PI-Δλ) : (2*Math.PI+Δλ); var Δψ = Math.log(Math.tan(φ2/2+Math.PI/4)/Math.tan(φ1/2+Math.PI/4)); var θ = Math.atan2(Δλ, Δψ); return (θ.toDegrees()+360) % 360; }; /** * Returns the destination point having travelled along a rhumb line from 'this' point the given * distance on the given bearing. * * @param {number} distance - Distance travelled, in same units as earth radius (default: metres). * @param {number} bearing - Bearing in degrees from north. * @param {number} [radius=6371e3] - (Mean) radius of earth (defaults to radius in metres). * @returns {LatLon} Destination point. * * @example * var p1 = new LatLon(51.127, 1.338); * var p2 = p1.rhumbDestinationPoint(40300, 116.7); // p2.toString(): 50.9642°N, 001.8530°E */ LatLon.prototype.rhumbDestinationPoint = function(distance, bearing, radius) { radius = (radius === undefined) ? 6371e3 : Number(radius); var δ = Number(distance) / radius; // angular distance in radians var φ1 = this.lat.toRadians(), λ1 = this.lon.toRadians(); var θ = Number(bearing).toRadians(); var Δφ = δ * Math.cos(θ); var φ2 = φ1 + Δφ; // check for some daft bugger going past the pole, normalise latitude if so if (Math.abs(φ2) > Math.PI/2) φ2 = φ2>0 ? Math.PI-φ2 : -Math.PI-φ2; var Δψ = Math.log(Math.tan(φ2/2+Math.PI/4)/Math.tan(φ1/2+Math.PI/4)); var q = Math.abs(Δψ) > 10e-12 ? Δφ / Δψ : Math.cos(φ1); // E-W course becomes ill-conditioned with 0/0 var Δλ = δ*Math.sin(θ)/q; var λ2 = λ1 + Δλ; λ2 = (λ2 + 3*Math.PI) % (2*Math.PI) - Math.PI; // normalise to -180..+180° return new LatLon(φ2.toDegrees(), λ2.toDegrees()); }; /** * Returns the loxodromic midpoint (along a rhumb line) between 'this' point and second point. * * @param {LatLon} point - Latitude/longitude of second point. * @returns {LatLon} Midpoint between this point and second point. * * @example * var p1 = new LatLon(51.127, 1.338), p2 = new LatLon(50.964, 1.853); * var p2 = p1.rhumbMidpointTo(p2); // p2.toString(): 51.0455°N, 001.5957°E */ LatLon.prototype.rhumbMidpointTo = function(point) { if (!(point instanceof LatLon)) throw new TypeError('point is not LatLon object'); // http://mathforum.org/kb/message.jspa?messageID=148837 var φ1 = this.lat.toRadians(), λ1 = this.lon.toRadians(); var φ2 = point.lat.toRadians(), λ2 = point.lon.toRadians(); if (Math.abs(λ2-λ1) > Math.PI) λ1 += 2*Math.PI; // crossing anti-meridian var φ3 = (φ1+φ2)/2; var f1 = Math.tan(Math.PI/4 + φ1/2); var f2 = Math.tan(Math.PI/4 + φ2/2); var f3 = Math.tan(Math.PI/4 + φ3/2); var λ3 = ( (λ2-λ1)*Math.log(f3) + λ1*Math.log(f2) - λ2*Math.log(f1) ) / Math.log(f2/f1); if (!isFinite(λ3)) λ3 = (λ1+λ2)/2; // parallel of latitude λ3 = (λ3 + 3*Math.PI) % (2*Math.PI) - Math.PI; // normalise to -180..+180° return new LatLon(φ3.toDegrees(), λ3.toDegrees()); }; /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ /**<|fim▁hole|> * * @param {string} [format=dms] - Format point as 'd', 'dm', 'dms'. * @param {number} [dp=0|2|4] - Number of decimal places to use - default 0 for dms, 2 for dm, 4 for d. * @returns {string} Comma-separated latitude/longitude. */ LatLon.prototype.toString = function(format, dp) { if (format === undefined) format = 'dms'; return Dms.toLat(this.lat, format, dp) + ', ' + Dms.toLon(this.lon, format, dp); }; /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ /** Extend Number object with method to convert numeric degrees to radians */ if (Number.prototype.toRadians === undefined) { Number.prototype.toRadians = function() { return this * Math.PI / 180; }; } /** Extend Number object with method to convert radians to numeric (signed) degrees */ if (Number.prototype.toDegrees === undefined) { Number.prototype.toDegrees = function() { return this * 180 / Math.PI; }; }<|fim▁end|>
* Returns a string representation of 'this' point, formatted as degrees, degrees+minutes, or * degrees+minutes+seconds.
<|file_name|>message_file_reader.cc<|end_file_name|><|fim▁begin|>// Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include <limits.h> #include <stddef.h> #include <stdint.h> #include "base/files/file_path.h" #include "base/files/memory_mapped_file.h" #include "base/logging.h" #include "base/macros.h" #include "base/memory/ptr_util.h" #include "base/strings/string_piece.h" #include "ipc/ipc_message.h" #include "tools/ipc_fuzzer/message_lib/message_cracker.h" #include "tools/ipc_fuzzer/message_lib/message_file.h" #include "tools/ipc_fuzzer/message_lib/message_file_format.h" #include "tools/ipc_fuzzer/message_lib/message_names.h" namespace ipc_fuzzer { namespace { // Helper class to read IPC message file into a MessageVector and // fix message types. class Reader { public: Reader(const base::FilePath& path); bool Read(MessageVector* messages); private: template <typename T> bool CutObject(const T** object); // Reads the header, checks magic and version. bool ReadHeader(); bool MapFile(); bool ReadMessages(); // Last part of the file is a string table for message names. bool ReadStringTable(); // Reads type <-> name mapping into name_map_. References string table. bool ReadNameTable(); // Removes obsolete messages from the vector. bool RemoveUnknownMessages(); // Does type -> name -> correct_type fixup. void FixMessageTypes(); // Raw data. base::FilePath path_; base::MemoryMappedFile mapped_file_; base::StringPiece file_data_; base::StringPiece string_table_; // Parsed data. const FileHeader* header_; MessageVector* messages_; MessageNames name_map_; DISALLOW_COPY_AND_ASSIGN(Reader); }; Reader::Reader(const base::FilePath& path) : path_(path), header_(NULL), messages_(NULL) { } template <typename T> bool Reader::CutObject(const T** object) { if (file_data_.size() < sizeof(T)) { LOG(ERROR) << "Unexpected EOF."; return false; } *object = reinterpret_cast<const T*>(file_data_.data()); file_data_.remove_prefix(sizeof(T)); return true; } bool Reader::ReadHeader() { if (!CutObject<FileHeader>(&header_)) return false; if (header_->magic != FileHeader::kMagicValue) { LOG(ERROR) << path_.value() << " is not an IPC message file."; return false; } if (header_->version != FileHeader::kCurrentVersion) { LOG(ERROR) << "Wrong version for message file " << path_.value() << ". " << "File version is " << header_->version << ", " << "current version is " << FileHeader::kCurrentVersion << "."; return false; } return true; } bool Reader::MapFile() { if (!mapped_file_.Initialize(path_)) { LOG(ERROR) << "Failed to map testcase: " << path_.value(); return false; } const char* data = reinterpret_cast<const char*>(mapped_file_.data()); file_data_ = base::StringPiece(data, mapped_file_.length()); return true; } bool Reader::ReadMessages() { for (size_t i = 0; i < header_->message_count; ++i) { const char* begin = file_data_.begin(); const char* end = file_data_.end(); IPC::Message::NextMessageInfo info; IPC::Message::FindNext(begin, end, &info); if (!info.message_found) { LOG(ERROR) << "Failed to parse message."; return false; } CHECK_EQ(info.message_end, info.pickle_end); size_t msglen = info.message_end - begin; if (msglen > INT_MAX) { LOG(ERROR) << "Message too large."; return false; } // Copy is necessary to fix message type later. IPC::Message const_message(begin, msglen); messages_->push_back(std::make_unique<IPC::Message>(const_message)); file_data_.remove_prefix(msglen); } return true; } bool Reader::ReadStringTable() { size_t name_count = header_->name_count; if (!name_count) return true; if (name_count > file_data_.size() / sizeof(NameTableEntry)) { LOG(ERROR) << "Invalid name table size: " << name_count; return false; } size_t string_table_offset = name_count * sizeof(NameTableEntry); string_table_ = file_data_.substr(string_table_offset); if (string_table_.empty()) { LOG(ERROR) << "Missing string table."; return false; } if (string_table_.end()[-1] != '\0') { LOG(ERROR) << "String table doesn't end with NUL."; return false; } return true; } bool Reader::ReadNameTable() { for (size_t i = 0; i < header_->name_count; ++i) { const NameTableEntry* entry; if (!CutObject<NameTableEntry>(&entry)) return false; size_t offset = entry->string_table_offset; if (offset >= string_table_.size()) { LOG(ERROR) << "Invalid string table offset: " << offset; return false; } name_map_.Add(entry->type, string_table_.data() + offset); } return true; } bool Reader::RemoveUnknownMessages() { MessageVector::iterator it = messages_->begin(); while (it != messages_->end()) { uint32_t type = (*it)->type(); if (!name_map_.TypeExists(type)) { LOG(ERROR) << "Missing name table entry for type " << type; return false; } const std::string& name = name_map_.TypeToName(type); if (!MessageNames::GetInstance()->NameExists(name)) { LOG(WARNING) << "Unknown message " << name; it = messages_->erase(it); } else { ++it; } } return true; } // Message types are based on line numbers, so a minor edit of *_messages.h // changes the types of messages in that file. The types are fixed here to // increase the lifetime of message files. This is only a partial fix because // message arguments and structure layouts can change as well. void Reader::FixMessageTypes() { for (const auto& message : *messages_) { uint32_t type = message->type(); const std::string& name = name_map_.TypeToName(type); uint32_t correct_type = MessageNames::GetInstance()->NameToType(name); if (type != correct_type) MessageCracker::SetMessageType(message.get(), correct_type); } }<|fim▁hole|>bool Reader::Read(MessageVector* messages) { messages_ = messages; if (!MapFile()) return false; if (!ReadHeader()) return false; if (!ReadMessages()) return false; if (!ReadStringTable()) return false; if (!ReadNameTable()) return false; if (!RemoveUnknownMessages()) return false; FixMessageTypes(); return true; } } // namespace bool MessageFile::Read(const base::FilePath& path, MessageVector* messages) { Reader reader(path); return reader.Read(messages); } } // namespace ipc_fuzzer<|fim▁end|>
<|file_name|>to-oblivion-and-beyond.rs<|end_file_name|><|fim▁begin|><|fim▁hole|> loop { i += 1000; print!("{}..", i); if i % 10_000 == 0 { print!{"\n"} } } }<|fim▁end|>
fn main() { let mut i: u16 = 0; print!("{}..", i);
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python from distutils.core import setup, Extension poppler_install_path = '/usr' import multivio setup( name='multivio', version=multivio.__version__, description='Multivio server.', long_description='''Multivio is a project...''', license=multivio.__license__, url='http://www.multivio.org', ext_modules=[Extension('multivio/poppler/_mypoppler', ['multivio/poppler/mypoppler.i'], swig_opts=['-c++', '-modern', '-I%s/include' % poppler_install_path], extra_compile_args=['-I%s/include/poppler' % poppler_install_path], extra_link_args=['-lpoppler'])], py_modules=['multivio.poppler.mypoppler'], packages=[ 'multivio' ], scripts=[ 'tools/multivio_server.py', 'tools/mvo_config_example.py' ], keywords=['multivio'], classifiers=[ 'Development Status :: Beta', 'Environment :: Console', 'Intended Audience :: Developers',<|fim▁hole|> 'Intended Audience :: Science/Research', 'License :: OSI Approved :: Internal', ], install_requires=[ 'PIL>=1.1.7' ] )<|fim▁end|>
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>#[macro_use] extern crate serde_derive; extern crate clap; extern crate hyper; extern crate reqwest; extern crate serde_json; mod json_response; use crate::json_response::{BringResponse, ErrorConsignmentSet, Eventset, Packageset}; use clap::{App, AppSettings, Arg}; use reqwest::Error; #[tokio::main] async fn main() -> Result<(), reqwest::Error> { let input = get_content(parse_input()).await; deserialize(input.unwrap()).await; Ok(()) } async fn get_content(url: String) -> Result<String, Error> { let body = reqwest::get(&url).await?.text().await?; Ok(body) } fn parse_input() -> String { let matches = App::new("Rosten") .version("0.1.1") .author("Stian Eklund. <[email protected]>") .about("Get shipment status of your Bring & Posten packages") .setting(AppSettings::ArgRequiredElseHelp) .arg( Arg::with_name("track") .short("t") .long("track") .help("Get package status") .takes_value(true), ) .get_matches(); let input = matches.value_of("track").unwrap(); String::from(format!( "https://tracking.bring.com/api/v2/tracking.json?q={}", input ))<|fim▁hole|> match deserialized { Ok(deserialized) => { let sets = deserialized.consignment_set; for i in 0..sets.len() { let consignment_set = &sets[i]; for x in 0..consignment_set.package_set.len() { let package_set = &consignment_set.package_set[x]; match consignment_set.package_set[x] { Packageset { product_name: Some(ref product_name), package_number: Some(ref package_number), .. } => println!( "Product Name: {}\nPackage number: {}", product_name, package_number ), _ => println!("Not covered"), } for n in 0..package_set.event_set.len() { match package_set.event_set[n] { Eventset { description: Some(ref description), status: Some(ref status), .. } => println!("Description: {}\nStatus: {}", description, status), _ => println!("Not covered"), } } } } } Err(_) => deserialize_err(&buf).await, } } async fn deserialize_err(buf: &String) { let deserialized: Result<ErrorConsignmentSet, serde_json::Error> = serde_json::from_str(&buf); match deserialized { Ok(deserialized) => { eprintln!( "Error: {}, Code:{}", deserialized.error.message, deserialized.error.code ); } Err(e) => eprintln!( "Error while deserializing, please check if your tracking number is valid. {}", e ), } }<|fim▁end|>
} async fn deserialize(buf: String) { let deserialized: Result<BringResponse, serde_json::Error> = serde_json::from_str(buf.trim());
<|file_name|>WikidataInterfaceTest.java<|end_file_name|><|fim▁begin|>package com.formulasearchengine.mathosphere.mlp.text; import com.google.common.collect.Lists; import org.junit.Assert; import org.junit.Test; import java.io.File; import java.nio.charset.Charset; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; import java.util.List; import java.util.stream.Collectors; /** * Created by Moritz on 12.12.2015. */ public class WikidataInterfaceTest { String[] qIds = new String[] {"Q7913892", "Q12503", "Q3176558", "Q36161", "Q739925", "Q49008", "Q12503", "Q5156597", "Q11567", "Q1413083", "Q50700", "Q50701", "Q935944", "Q50701", "Q935944", "Q1144319", "Q50700", "Q3150667", "Q2256802", "Q729113", "Q21199", "Q33456", "Q44946", "Q230883", "Q21199", "Q21199", "Q50700", "Q50700", "Q50700", "Q50700", "Q378201", "Q302462", "Q3913", "Q3913", "Q3913", "Q12916", "Q12916", "Q11352", "Q2303886", "Q526719", "Q11348", "Q1027788", "Q12916", "Q12916", "Q946764", "Q19033", "Q126017", "Q230963", "Q2303886", "Q168698", "Q917476", "Q17285", "Q1663694", "Q1663694", "Q1663694", "Q1663694", "Q5597315", "Q5597315", "Q2303886", "Q46276", "Q2140940", "Q36253", "Q1096885", "Q189569", "Q3176558", "Q188889", "Q188889", "Q13824", "Q2111", "Q174102", "Q1440227", "Q167", "Q1515261", "Q1128317", "Q111059", "Q111059", "Q43260", "Q3150667", "Q43260", "Q11567", "Q2095069", "Q21199", "Q21199", "Q2303886", "Q2303886", "Q1137759", "Q193796", "Q12916", "Q6520159", "Q11471", "Q167", "Q12916", "Q12916", "Q21199", "Q21199", "Q3686031", "Q11471", "Q9492", "Q12916", "Q4440864", "Q12916", "Q18373", "Q2111", "Q1289248", "Q876346", "Q1289248", "Q464794", "Q193794", "Q192826", "Q11471", "Q929043", "Q2518235", "Q782566", "Q1074380", "Q1413083", "Q1413083", "Q1008943", "Q1256787", "Q13471665", "Q1289248", "Q2337858", "Q11348", "Q11348", "Q11348", "Q11471", "Q2918589", "Q1045555", "Q21199", "Q82580", "Q18848", "Q18848", "Q1952404", "Q11703678", "Q11703678", "Q2303886", "Q1096885", "Q4440864", "Q2362761", "Q11471", "Q3176558", "Q30006", "Q11567", "Q3258885", "Q131030", "Q21406831", "Q131030", "Q186290", "Q1591095", "Q11348", "Q3150667", "Q474715", "Q379825", "Q379825", "Q192704",<|fim▁hole|> "Q12916", "Q2627460", "Q2627460", "Q190109", "Q83478", "Q18848", "Q379825", "Q844128", "Q2608202", "Q29539", "Q11465", "Q176737", "Q176737", "Q176737", "Q1413083", "Q1759756", "Q900231", "Q39297", "Q39297", "Q39552", "Q39297", "Q1948412", "Q3554818", "Q21199", "Q12916", "Q168698", "Q50701", "Q11053", "Q12916", "Q12916", "Q12916", "Q12503", "Q12503", "Q176623", "Q10290214", "Q10290214", "Q505735", "Q1057607", "Q11471", "Q1057607", "Q5227327", "Q6901742", "Q159375", "Q2858846", "Q1134404", "Q12916", "Q4440864", "Q838611", "Q44946", "Q173817", "Q12916", "Q21199", "Q12916", "Q190056", "Q10290214", "Q10290214", "Q506041", "Q2858846"}; @Test public void testGetAliases() throws Exception { for (String qid : qIds) { Path file = Paths.get(File.createTempFile("temp", Long.toString(System.nanoTime())).getPath()); List<String> aliases = WikidataInterface.getAliases(qid); aliases = aliases.stream().map(a -> "\"" + a + "\"").collect(Collectors.toList()); Files.write(file, aliases, Charset.forName("UTF-8")); } } @Test public void testGetEntities() throws Exception { final ArrayList<String> expected = Lists.newArrayList("Q12916"); Assert.assertEquals(expected.get(0), WikidataInterface.getEntities("real number").get(0)); } }<|fim▁end|>
"Q44432", "Q44432", "Q319913", "Q12916",
<|file_name|>kerio-api.js<|end_file_name|><|fim▁begin|>class Client { constructor(http_client){ this.http_client = http_client this.method_list = [] } xyz() { return this.http_client } } function chainable_client () {<|fim▁hole|> chainable_method = require('./chainable_method.js') return chainable_method(new Client(http_client), true) } module.exports = chainable_client<|fim▁end|>
HttpClient = require('./http_client.js') http_client = new HttpClient(arguments[0])
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 # Copyright (c) 2014-2016 Thomas Jost and the Contributors # # This file is part of git-annex-remote-hubic. # # git-annex-remote-hubic is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) any # later version. # # git-annex-remote-hubic is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # git-annex-remote-hubic. If not, see <http://www.gnu.org/licenses/>. from setuptools import setup, find_packages setup(name="git-annex-remote-hubic", version="0.3.2", description="A git-annex special remote for hubiC", long_description=open("README.md", "r").read(), author="Thomas Jost", author_email="[email protected]", url="https://github.com/Schnouki/git-annex-remote-hubic", packages=find_packages(), install_requires=[ "python-dateutil",<|fim▁hole|> ], entry_points={ "console_scripts": [ "git-annex-remote-hubic = hubic_remote.main:main", "git-annex-remote-hubic-migrate = hubic_remote.migrate:main", ], }, classifiers=[ "Development Status :: 4 - Beta", "Environment :: Plugins", "License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)", "Programming Language :: Python :: 2", "Topic :: System :: Archiving", ], )<|fim▁end|>
"python-swiftclient>=2.1.0", "rauth>=0.7",
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>// Copyright 2015-2018 Benjamin Fry <[email protected]> // // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or // http://opensource.org/licenses/MIT>, at your option. This file may not be // copied, modified, or distributed except according to those terms. //! `Server` component for hosting a domain name servers operations.<|fim▁hole|> #[cfg(feature = "dns-over-https")] mod https_handler; mod request_handler; mod response_handler; mod server_future; mod timeout_stream; pub use self::request_handler::{Request, RequestHandler}; pub use self::response_handler::{ResponseHandle, ResponseHandler}; pub use self::server_future::ServerFuture; pub use self::timeout_stream::TimeoutStream;<|fim▁end|>
<|file_name|>database.hpp<|end_file_name|><|fim▁begin|>/* * database.hpp * * Created on: Sep 22, 2016 * Author: dan */ #ifndef SRC_TURBO_BROCCOLI_DATABASE_HPP_ #define SRC_TURBO_BROCCOLI_DATABASE_HPP_ #include <boost/filesystem.hpp> #include <turbo_broccoli/type/key.hpp> #include <turbo_broccoli/type/value.hpp> #include <turbo_broccoli/type/blob.hpp> #include <turbo_broccoli/type/tags.hpp> #include <turbo_broccoli/type/tagged_records.hpp> #include <turbo_broccoli/type/result_find.hpp> #include <turbo_broccoli/type/result_key.hpp> #include <turbo_broccoli/detail/utils.hpp> namespace turbo_broccoli { using types::blob; using types::db_key; using types::result_key; using types::result_find; struct database { database(const std::string& path) : path_(path) { namespace fs = boost::filesystem; if(!fs::exists(path_) ) { if(!fs::create_directories(path_)) { throw std::runtime_error("cannot open db, cannot create directory: " + path_.generic_string()); } }<|fim▁hole|> throw std::runtime_error("cannot open db, is not a directory: " + path_.generic_string()); } } } result_find find(const std::string& key) { return find(detail::calculate_key(key)); } result_find find(const db_key& key) { result_find result{}; result.success = false; if(!record_exists(key)) { std::cout << "no record with key" << types::to_string(key) << std::endl; return result; } auto record = read_record(key); if( is_blob(record)) { result.success = true; result.results.push_back(detail::deserialize<blob>(record.data)); } if(is_tag_list(record)) { auto records = detail::deserialize<types::tagged_records>(record.data); for(auto& t : records.keys) { auto k = types::string_to_key(t); if(record_exists(k)) { auto r = read_record(k); if( is_blob(r)) { result.success = true; result.results.push_back(detail::deserialize<blob>(r.data)); } else { std::cout << "inconsistent: record is not blob " << t << std::endl; } } else { std::cout << "inconsistent no record from tag list " << t << std::endl; } } } return result; } result_key store(const blob& new_blob) { static const result_key failed_result{false, turbo_broccoli::types::nil_key() }; if(record_exists(new_blob)) { /* * read all tags and update them! */ auto r = read_record(new_blob.key_hash()); auto old_blob = detail::deserialize<blob>(r.data); types::tag_list::list_type to_delete = diff( old_blob.tags().tags, new_blob.tags().tags); types::tag_list::list_type to_add = diff( new_blob.tags().tags, old_blob.tags().tags); for(auto& t : to_add ) { update_tag_add(t, types::to_string(new_blob.key_hash())); } for(auto& t : to_delete ) { update_tag_remove(t, types::to_string(new_blob.key_hash())); } } else { detail::create_folder(path_, new_blob.key_hash()); for(auto& t : new_blob.tags().tags ) { update_tag_add(t, types::to_string(new_blob.key_hash())); } } write_blob(new_blob); return {true, new_blob.key_hash()}; return failed_result; } private: inline bool record_exists(const blob& b) { namespace fs = boost::filesystem; return fs::exists(detail::to_filename(path_, b.key_hash())); } inline bool record_exists(const db_key& k) { namespace fs = boost::filesystem; return fs::exists(detail::to_filename(path_, k)); } inline void write_blob(const blob& b) { namespace fs = boost::filesystem; types::value_t v; v.data = detail::serialize(b); v.reccord_type = types::value_type::blob; v.key = b.key(); detail::create_folder(path_, b.key_hash()); detail::write_file(detail::to_filename(path_, b.key_hash()).generic_string(), detail::serialize(v)); } inline types::value_t read_record(const db_key& k) { namespace fs = boost::filesystem; auto tmp = detail::read_file(detail::to_filename(path_, k).generic_string() ); return detail::deserialize<types::value_t>(tmp); } inline void update_tag_add(const std::string& tag_name, const std::string& record_key) { auto tag_key = detail::calculate_key(tag_name); types::value_t v; types::tagged_records records; if(record_exists(tag_key)) { v = read_record(tag_key); if(types::is_tag_list(v)) { records = detail::deserialize<types::tagged_records>(v.data); for(auto& r : records.keys) { if(record_key.compare(r) == 0) { return; } } records.keys.push_back(record_key); } else { throw std::runtime_error("record exissts and is not a tagged_list: " + tag_name); } } else { records.keys.push_back(record_key); v.key = tag_name; v.reccord_type = types::value_type::tag_list; v.data = detail::serialize(records); detail::create_folder(path_, tag_key); } v.data = detail::serialize(records); detail::write_file(detail::to_filename(path_, tag_key).generic_string(), detail::serialize(v)); } inline void update_tag_remove(const std::string& tag_name, const std::string& record_key) { auto tag_key = detail::calculate_key(tag_name); types::value_t v = read_record(tag_key); if(types::is_tag_list(v)) { types::tagged_records records = detail::deserialize<types::tagged_records>(v.data); records.keys.erase(std::remove(records.keys.begin(), records.keys.end(), record_key), records.keys.end()); v.data = detail::serialize(records); detail::write_file(detail::to_filename(path_, tag_key).generic_string(), detail::serialize(v)); } } /* * \brief return list of all elements that are only in a * a{0, 1, 2, 3, 4} * b{3, 4, 5, 6, 7} * d{0, 1, 2} */ inline std::vector<std::string> diff(const std::vector<std::string>& a, const std::vector<std::string>& b) { std::vector<std::string> d; for(auto& a_i : a) { bool contains_b_i{false}; for(auto& b_i : b) { if(a_i.compare(b_i) == 0) { contains_b_i = true; break; } } if(!contains_b_i) { d.push_back(a_i); } } return d; } using path_t = boost::filesystem::path; path_t path_; }; } #endif /* SRC_TURBO_BROCCOLI_DATABASE_HPP_ */<|fim▁end|>
else { if(!fs::is_directory(path_)) {
<|file_name|>recorder_window.js<|end_file_name|><|fim▁begin|>const { BrowserWindow } = require('electron'); const path = require('path'); class RecorderWindow { constructor() {<|fim▁hole|> let htmlPath = 'file://' + path.join(__dirname, '..') + '/pages/recorder_window.html' this.window = new BrowserWindow({ show: false, height: 400, width: 600, minHeight: 200, minWidth: 200, frame: false, hasShadow: false, alwaysOnTop: true, transparent: true, resizable: true }); this.window.loadURL(htmlPath); } disable() { this.window.setResizable(false); this.window.setIgnoreMouseEvents(true); } enable() { this.window.setResizable(true); this.window.setIgnoreMouseEvents(false); } } module.exports = RecorderWindow;<|fim▁end|>
<|file_name|>dialogs_jy.py<|end_file_name|><|fim▁begin|># Copyright 2008-2015 Nokia Solutions and Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time from java.awt import GridLayout from java.awt.event import WindowAdapter from javax.swing import JLabel, JOptionPane, JPanel, JPasswordField, JTextField from javax.swing.JOptionPane import PLAIN_MESSAGE, UNINITIALIZED_VALUE, \ YES_NO_OPTION, OK_CANCEL_OPTION, OK_OPTION, DEFAULT_OPTION class _SwingDialog(object): def __init__(self, pane): self._pane = pane def show(self): self._show_dialog(self._pane) return self._get_value(self._pane) def _show_dialog(self, pane): dialog = pane.createDialog(None, 'Robot Framework') dialog.setModal(False) dialog.setAlwaysOnTop(True) dialog.addWindowFocusListener(pane.focus_listener) dialog.show() while dialog.isShowing(): time.sleep(0.2) dialog.dispose() def _get_value(self, pane): value = pane.getInputValue() return value if value != UNINITIALIZED_VALUE else None class MessageDialog(_SwingDialog): def __init__(self, message): pane = WrappedOptionPane(message, PLAIN_MESSAGE, DEFAULT_OPTION) _SwingDialog.__init__(self, pane) class InputDialog(_SwingDialog): def __init__(self, message, default, hidden=False): self._input_field = JPasswordField() if hidden else JTextField() self._input_field.setText(default) self._input_field.selectAll() panel = JPanel(layout=GridLayout(2, 1)) panel.add(JLabel(message)) panel.add(self._input_field) pane = WrappedOptionPane(panel, PLAIN_MESSAGE, OK_CANCEL_OPTION) pane.set_focus_listener(self._input_field) _SwingDialog.__init__(self, pane) def _get_value(self, pane): if pane.getValue() != OK_OPTION: return None return self._input_field.getText() class SelectionDialog(_SwingDialog): def __init__(self, message, options): pane = WrappedOptionPane(message, PLAIN_MESSAGE, OK_CANCEL_OPTION) pane.setWantsInput(True) pane.setSelectionValues(options) _SwingDialog.__init__(self, pane) class PassFailDialog(_SwingDialog): def __init__(self, message): pane = WrappedOptionPane(message, PLAIN_MESSAGE, YES_NO_OPTION, None, ['PASS', 'FAIL'], 'PASS') _SwingDialog.__init__(self, pane) def _get_value(self, pane): return pane.getValue() == 'PASS' class WrappedOptionPane(JOptionPane): focus_listener = None def getMaxCharactersPerLineCount(self): return 120 def set_focus_listener(self, component): self.focus_listener = WindowFocusListener(component) class WindowFocusListener(WindowAdapter): def __init__(self, component): self.component = component def windowGainedFocus(self, event):<|fim▁hole|><|fim▁end|>
self.component.requestFocusInWindow()
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>// Copyright © 2015, Peter Atashian // Licensed under the MIT License <LICENSE.md> //! FFI bindings to difxapi. #![no_std] #![experimental] extern crate winapi; use winapi::*; extern "system" {<|fim▁hole|><|fim▁end|>
}
<|file_name|>embedder.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ //! Implements the global methods required by Servo (not window/gl/compositor related). use crate::events_loop::EventsLoop; use servo::compositing::windowing::EmbedderMethods; use servo::embedder_traits::{EmbedderProxy, EventLoopWaker}; use servo::servo_config::pref; use std::cell::RefCell; use std::rc::Rc; use webxr::glwindow::GlWindowDiscovery; pub struct EmbedderCallbacks { events_loop: Rc<RefCell<EventsLoop>>, xr_discovery: Option<GlWindowDiscovery>, } impl EmbedderCallbacks { pub fn new( events_loop: Rc<RefCell<EventsLoop>>, xr_discovery: Option<GlWindowDiscovery>, ) -> EmbedderCallbacks { EmbedderCallbacks { events_loop, xr_discovery, } } } impl EmbedderMethods for EmbedderCallbacks { fn create_event_loop_waker(&mut self) -> Box<dyn EventLoopWaker> { self.events_loop.borrow().create_event_loop_waker() } fn register_webxr(<|fim▁hole|> ) { if pref!(dom.webxr.test) { xr.register_mock(webxr::headless::HeadlessMockDiscovery::new()); } else if let Some(xr_discovery) = self.xr_discovery.take() { xr.register(xr_discovery); } } }<|fim▁end|>
&mut self, xr: &mut webxr::MainThreadRegistry, _embedder_proxy: EmbedderProxy,
<|file_name|>wav_to_mp3.py<|end_file_name|><|fim▁begin|># YouTube Video: https://www.youtube.com/watch?v=4E7N7W1lUkU import os import pydub import glob<|fim▁hole|> mp3_file = os.path.splitext(wav_file)[0] + '.mp3' sound = pydub.AudioSegment.from_wav(wav_file) sound.export(mp3_file, format="mp3") os.remove(wav_file) print("Conversion Complete")<|fim▁end|>
wav_files = glob.glob('./*.wav') for wav_file in wav_files:
<|file_name|>get-site-comments.js<|end_file_name|><|fim▁begin|>import { createSelector } from '@automattic/state-utils'; import { filter, orderBy } from 'lodash'; import 'calypso/state/comments/init'; <|fim▁hole|>function filterCommentsByStatus( comments, status ) { return 'all' === status ? filter( comments, ( comment ) => 'approved' === comment.status || 'unapproved' === comment.status ) : filter( comments, ( comment ) => status === comment.status ); } /** * Returns list of loaded comments for a given site, filtered by status * * @param {object} state Redux state * @param {number} siteId Site for whose comments to find * @param {string} [status] Status to filter comments * @param {string} [order=asc] Order in which to sort filtered comments * @returns {Array<object>} Available comments for site, filtered by status */ export const getSiteComments = createSelector( ( state, siteId, status, order = 'asc' ) => { const comments = state.comments.items ?? {}; const parsedComments = Object.keys( comments ) .filter( ( key ) => parseInt( key.split( '-', 1 ), 10 ) === siteId ) .reduce( ( list, key ) => [ ...list, ...comments[ key ] ], [] ); return status ? orderBy( filterCommentsByStatus( parsedComments, status ), 'date', order ) : orderBy( parsedComments, 'date', order ); }, ( state ) => [ state.comments.items ] );<|fim▁end|>
<|file_name|>FnParameterisedHolderTest.py<|end_file_name|><|fim▁begin|>########################################################################## # # Copyright (c) 2008-2015, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # * Neither the name of Image Engine Design nor the names of any # other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## from __future__ import with_statement import os import unittest import maya.cmds import maya.OpenMaya import IECore import IECoreMaya class TestOp( IECore.Op ) : def __init__( self ) : IECore.Op.__init__( self, "Tests stuff", IECore.IntParameter( name = "result", description = "", defaultValue = 0 ) ) self.parameters().addParameters( [ IECore.IntParameter( name = "i", description = "i", defaultValue = 1 ), ] ) def doOperation( self, args ) : return IECore.IntData( 10 ) class FnParameterisedHolderTest( IECoreMaya.TestCase ) : def test( self ) : node = maya.cmds.createNode( "ieOpHolderNode" ) fnPH = IECoreMaya.FnParameterisedHolder( node ) self.assertEqual( fnPH.getParameterised(), ( None, "", 0, "" ) ) op = TestOp() fnPH.setParameterised( op ) parameterisedTuple = fnPH.getParameterised() self.assert_( parameterisedTuple[0].isSame( op ) ) self.assertEqual( parameterisedTuple[1:], ( "", 0, "" ) ) self.assertEqual( parameterisedTuple[0](), IECore.IntData( 10 ) ) iPlug = fnPH.parameterPlug( op["i"] ) self.assert_( isinstance( iPlug, maya.OpenMaya.MPlug ) ) self.assert_( iPlug.asInt(), 1 ) self.assert_( fnPH.plugParameter( iPlug ).isSame( op["i"] ) ) self.assert_( fnPH.plugParameter( iPlug.name() ).isSame( op["i"] ) ) iPlug.setInt( 2 ) fnPH.setParameterisedValue( op["i"] ) self.assert_( op["i"].getNumericValue(), 2 ) op["i"].setNumericValue( 3 ) fnPH.setNodeValue( op["i"] ) self.assert_( iPlug.asInt(), 3 ) iPlug.setInt( 10 ) fnPH.setParameterisedValues() self.assert_( op["i"].getNumericValue(), 10 ) op["i"].setNumericValue( 11 ) fnPH.setNodeValues() self.assert_( iPlug.asInt(), 11 ) def testFullPathName( self ) : node = maya.cmds.createNode( "ieOpHolderNode" ) fnPH = IECoreMaya.FnParameterisedHolder( node ) self.assertEqual( node, fnPH.fullPathName() ) procedural = maya.cmds.createNode( "ieProceduralHolder" ) fnPH = IECoreMaya.FnParameterisedHolder( procedural ) self.assertEqual( maya.cmds.ls( procedural, long=True )[0], fnPH.fullPathName() ) def testPlugParameterWithNonUniqueNames( self ) : node = maya.cmds.createNode( "ieProceduralHolder" ) node2 = maya.cmds.createNode( "ieProceduralHolder" ) node = maya.cmds.ls( maya.cmds.rename( node, "iAmNotUnique" ), long=True )[0] node2 = maya.cmds.ls( maya.cmds.rename( node2, "iAmNotUnique" ), long=True )[0] fnPH = IECoreMaya.FnProceduralHolder( node ) proc = IECore.ReadProcedural() fnPH.setParameterised( proc ) self.assert_( fnPH.getParameterised()[0].isSame( proc ) ) fnPH2 = IECoreMaya.FnProceduralHolder( node2 ) proc2 = IECore.ReadProcedural() fnPH2.setParameterised( proc2 ) self.assert_( fnPH2.getParameterised()[0].isSame( proc2 ) ) # check that each function set references a different node. self.assert_( fnPH.object()!=fnPH2.object() ) self.assert_( fnPH.fullPathName()!=fnPH2.fullPathName() ) plug = fnPH.parameterPlug( proc["motion"]["blur"] ) plug2 = fnPH2.parameterPlug( proc2["motion"]["blur"] ) self.assertEqual( plug.node(), fnPH.object() ) self.assertEqual( plug2.node(), fnPH2.object() ) self.assertEqual( fnPH.parameterPlugPath( proc["motion"]["blur"] ), "|transform1|iAmNotUnique.parm_motion_blur" ) self.assertEqual( fnPH2.parameterPlugPath( proc2["motion"]["blur"] ), "|transform2|iAmNotUnique.parm_motion_blur" ) self.assert_( maya.cmds.isConnected( "time1.outTime", fnPH.parameterPlugPath( proc["files"]["frame"] ), iuc=True ) ) self.assert_( maya.cmds.isConnected( "time1.outTime", fnPH2.parameterPlugPath( proc2["files"]["frame"] ), iuc=True ) ) def testSetNodeValuesUndo( self ) : # make an opholder ########################################################################## node = maya.cmds.createNode( "ieOpHolderNode" ) fnPH = IECoreMaya.FnParameterisedHolder( node ) op = IECore.ClassLoader.defaultOpLoader().load( "parameterTypes", 1 )() op.parameters().removeParameter( "m" ) # no color4f support in maya fnPH.setParameterised( op ) # check we have the starting values we expect ########################################################################### self.assertEqual( op["a"].getNumericValue(), 1 ) aPlug = fnPH.parameterPlug( op["a"] ) self.assertEqual( aPlug.asInt(), 1 ) self.assertEqual( op["b"].getNumericValue(), 2 ) bPlug = fnPH.parameterPlug( op["b"] ) self.assertEqual( bPlug.asFloat(), 2 ) self.assertEqual( op["c"].getNumericValue(), 3 ) cPlug = fnPH.parameterPlug( op["c"] ) self.assertEqual( cPlug.asDouble(), 3 ) self.assertEqual( op["d"].getTypedValue(), "ssss" ) dPlug = fnPH.parameterPlug( op["d"] ) self.assertEqual( dPlug.asString(), "ssss" ) self.assertEqual( op["e"].getValue(), IECore.IntVectorData( [ 4, -1, 2 ] ) ) ePlug = fnPH.parameterPlug( op["e"] ) fnE = maya.OpenMaya.MFnIntArrayData( ePlug.asMObject() ) self.assertEqual( fnE[0], 4 ) self.assertEqual( fnE[1], -1 ) self.assertEqual( fnE[2], 2 ) self.assertEqual( fnE.length(), 3 ) self.assertEqual( op["f"].getValue(), IECore.StringVectorData( [ "one", "two", "three" ] ) ) fPlug = fnPH.parameterPlug( op["f"] ) fnF = maya.OpenMaya.MFnStringArrayData( fPlug.asMObject() ) fList = [] fnF.copyTo( fList ) self.assertEqual( fList, [ "one", "two", "three" ] ) self.assertEqual( op["g"].getTypedValue(), IECore.V2f( 1, 2 ) ) gPlug = fnPH.parameterPlug( op["g"] ) self.assertEqual( gPlug.child( 0 ).asFloat(), 1 ) self.assertEqual( gPlug.child( 1 ).asFloat(), 2 ) self.assertEqual( op["h"].getTypedValue(), IECore.V3f( 1, 1, 1 ) ) hPlug = fnPH.parameterPlug( op["h"] ) self.assertEqual( hPlug.child( 0 ).asFloat(), 1 ) self.assertEqual( hPlug.child( 1 ).asFloat(), 1 ) self.assertEqual( hPlug.child( 2 ).asFloat(), 1 ) self.assertEqual( op["q"].getTypedValue(), False ) qPlug = fnPH.parameterPlug( op["q"] ) self.assertEqual( qPlug.asBool(), False ) self.assertEqual( op["t"].getTypedValue(), IECore.Box3f( IECore.V3f( -1 ), IECore.V3f( 1 ) ) ) tPlug = fnPH.parameterPlug( op["t"] ) self.assertEqual( tPlug.child( 0 ).child( 0 ).asFloat(), -1 ) self.assertEqual( tPlug.child( 0 ).child( 1 ).asFloat(), -1 ) self.assertEqual( tPlug.child( 0 ).child( 2 ).asFloat(), -1 ) self.assertEqual( tPlug.child( 1 ).child( 0 ).asFloat(), 1 ) self.assertEqual( tPlug.child( 1 ).child( 1 ).asFloat(), 1 ) self.assertEqual( tPlug.child( 1 ).child( 2 ).asFloat(), 1 ) # change all the node values, making sure undo is enabled ############################################################################# self.assert_( maya.cmds.undoInfo( query=True, state=True ) ) # change the parameters op["a"].setNumericValue( 10 ) op["b"].setNumericValue( 100 ) op["c"].setNumericValue( 12 ) op["d"].setTypedValue( "a" ) op["e"].setValue( IECore.IntVectorData( [ 1, 2, 3, 4 ] ) ) op["f"].setValue( IECore.StringVectorData( [ "hi" ] ) ) op["g"].setTypedValue( IECore.V2f( 10, 100 ) ) op["h"].setTypedValue( IECore.V3f( -1, -2, -3 ) ) op["q"].setTypedValue( True ) op["t"].setTypedValue( IECore.Box3f( IECore.V3f( -10 ), IECore.V3f( 0 ) ) ) # check they are changed self.assertEqual( op["a"].getNumericValue(), 10 ) self.assertEqual( op["b"].getNumericValue(), 100 ) self.assertEqual( op["c"].getNumericValue(), 12 ) self.assertEqual( op["d"].getTypedValue(), "a" ) self.assertEqual( op["e"].getValue(), IECore.IntVectorData( [ 1, 2, 3, 4 ] ) ) self.assertEqual( op["f"].getValue(), IECore.StringVectorData( [ "hi" ] ) ) self.assertEqual( op["g"].getTypedValue(), IECore.V2f( 10, 100 ) ) self.assertEqual( op["h"].getTypedValue(), IECore.V3f( -1, -2, -3 ) ) self.assertEqual( op["q"].getTypedValue(), True ) self.assertEqual( op["t"].getTypedValue(), IECore.Box3f( IECore.V3f( -10 ), IECore.V3f( 0 ) ) ) # push the changes onto the node fnPH.setNodeValues() # check the node values are changed ############################################################################# self.assertEqual( aPlug.asInt(), 10 ) self.assertEqual( bPlug.asFloat(), 100 ) self.assertEqual( cPlug.asDouble(), 12 ) self.assertEqual( dPlug.asString(), "a" ) fnE = maya.OpenMaya.MFnIntArrayData( ePlug.asMObject() ) self.assertEqual( fnE[0], 1 ) self.assertEqual( fnE[1], 2 ) self.assertEqual( fnE[2], 3 ) self.assertEqual( fnE[3], 4 ) self.assertEqual( fnE.length(), 4 ) fnF = maya.OpenMaya.MFnStringArrayData( fPlug.asMObject() ) fList = [] fnF.copyTo( fList ) self.assertEqual( fList, [ "hi" ] ) self.assertEqual( gPlug.child( 0 ).asFloat(), 10 ) self.assertEqual( gPlug.child( 1 ).asFloat(), 100 ) self.assertEqual( hPlug.child( 0 ).asFloat(), -1 ) self.assertEqual( hPlug.child( 1 ).asFloat(), -2 ) self.assertEqual( hPlug.child( 2 ).asFloat(), -3 ) self.assertEqual( qPlug.asBool(), True ) self.assertEqual( tPlug.child( 0 ).child( 0 ).asFloat(), -10 ) self.assertEqual( tPlug.child( 0 ).child( 1 ).asFloat(), -10 ) self.assertEqual( tPlug.child( 0 ).child( 2 ).asFloat(), -10 ) self.assertEqual( tPlug.child( 1 ).child( 0 ).asFloat(), 0 ) self.assertEqual( tPlug.child( 1 ).child( 1 ).asFloat(), 0 ) self.assertEqual( tPlug.child( 1 ).child( 2 ).asFloat(), 0 ) # check that the parameter values are unchanged in the process of # pushing them to maya ############################################################################# self.assertEqual( op["a"].getNumericValue(), 10 ) self.assertEqual( op["b"].getNumericValue(), 100 ) self.assertEqual( op["c"].getNumericValue(), 12 ) self.assertEqual( op["d"].getTypedValue(), "a" ) self.assertEqual( op["e"].getValue(), IECore.IntVectorData( [ 1, 2, 3, 4 ] ) ) self.assertEqual( op["f"].getValue(), IECore.StringVectorData( [ "hi" ] ) ) self.assertEqual( op["g"].getTypedValue(), IECore.V2f( 10, 100 ) ) self.assertEqual( op["h"].getTypedValue(), IECore.V3f( -1, -2, -3 ) ) self.assertEqual( op["q"].getTypedValue(), True ) self.assertEqual( op["t"].getTypedValue(), IECore.Box3f( IECore.V3f( -10 ), IECore.V3f( 0 ) ) ) # undo, and check the node values are back to before ############################################################################# maya.cmds.undo() self.assertEqual( aPlug.asInt(), 1 ) self.assertEqual( bPlug.asFloat(), 2 ) self.assertEqual( cPlug.asDouble(), 3 ) self.assertEqual( dPlug.asString(), "ssss" ) fnE = maya.OpenMaya.MFnIntArrayData( ePlug.asMObject() ) self.assertEqual( fnE[0], 4 ) self.assertEqual( fnE[1], -1 ) self.assertEqual( fnE[2], 2 ) self.assertEqual( fnE.length(), 3 ) fnF = maya.OpenMaya.MFnStringArrayData( fPlug.asMObject() ) fList = [] fnF.copyTo( fList ) self.assertEqual( fList, [ "one", "two", "three" ] ) self.assertEqual( gPlug.child( 0 ).asFloat(), 1 ) self.assertEqual( gPlug.child( 1 ).asFloat(), 2 ) self.assertEqual( hPlug.child( 0 ).asFloat(), 1 ) self.assertEqual( hPlug.child( 1 ).asFloat(), 1 ) self.assertEqual( hPlug.child( 2 ).asFloat(), 1 ) self.assertEqual( qPlug.asBool(), False ) self.assertEqual( tPlug.child( 0 ).child( 0 ).asFloat(), -1 ) self.assertEqual( tPlug.child( 0 ).child( 1 ).asFloat(), -1 ) self.assertEqual( tPlug.child( 0 ).child( 2 ).asFloat(), -1 ) self.assertEqual( tPlug.child( 1 ).child( 0 ).asFloat(), 1 ) self.assertEqual( tPlug.child( 1 ).child( 1 ).asFloat(), 1 ) self.assertEqual( tPlug.child( 1 ).child( 2 ).asFloat(), 1 ) # check that the parameter values are unchanged in the undo process ############################################################################# self.assertEqual( op["a"].getNumericValue(), 10 ) self.assertEqual( op["b"].getNumericValue(), 100 ) self.assertEqual( op["c"].getNumericValue(), 12 ) self.assertEqual( op["d"].getTypedValue(), "a" ) self.assertEqual( op["e"].getValue(), IECore.IntVectorData( [ 1, 2, 3, 4 ] ) ) self.assertEqual( op["f"].getValue(), IECore.StringVectorData( [ "hi" ] ) ) self.assertEqual( op["g"].getTypedValue(), IECore.V2f( 10, 100 ) ) self.assertEqual( op["h"].getTypedValue(), IECore.V3f( -1, -2, -3 ) ) self.assertEqual( op["q"].getTypedValue(), True ) self.assertEqual( op["t"].getTypedValue(), IECore.Box3f( IECore.V3f( -10 ), IECore.V3f( 0 ) ) ) # redo, and check they are changed again ############################################################################# maya.cmds.redo() self.assertEqual( aPlug.asInt(), 10 ) self.assertEqual( bPlug.asFloat(), 100 ) self.assertEqual( cPlug.asDouble(), 12 ) self.assertEqual( dPlug.asString(), "a" ) fnE = maya.OpenMaya.MFnIntArrayData( ePlug.asMObject() ) self.assertEqual( fnE[0], 1 ) self.assertEqual( fnE[1], 2 ) self.assertEqual( fnE[2], 3 ) self.assertEqual( fnE[3], 4 ) self.assertEqual( fnE.length(), 4 ) fnF = maya.OpenMaya.MFnStringArrayData( fPlug.asMObject() ) fList = [] fnF.copyTo( fList ) self.assertEqual( fList, [ "hi" ] ) self.assertEqual( gPlug.child( 0 ).asFloat(), 10 ) self.assertEqual( gPlug.child( 1 ).asFloat(), 100 ) self.assertEqual( hPlug.child( 0 ).asFloat(), -1 )<|fim▁hole|> self.assertEqual( qPlug.asBool(), True ) self.assertEqual( tPlug.child( 0 ).child( 0 ).asFloat(), -10 ) self.assertEqual( tPlug.child( 0 ).child( 1 ).asFloat(), -10 ) self.assertEqual( tPlug.child( 0 ).child( 2 ).asFloat(), -10 ) self.assertEqual( tPlug.child( 1 ).child( 0 ).asFloat(), 0 ) self.assertEqual( tPlug.child( 1 ).child( 1 ).asFloat(), 0 ) self.assertEqual( tPlug.child( 1 ).child( 2 ).asFloat(), 0 ) # check that the parameter values are unchanged in the redo process ############################################################################# self.assertEqual( op["a"].getNumericValue(), 10 ) self.assertEqual( op["b"].getNumericValue(), 100 ) self.assertEqual( op["c"].getNumericValue(), 12 ) self.assertEqual( op["d"].getTypedValue(), "a" ) self.assertEqual( op["e"].getValue(), IECore.IntVectorData( [ 1, 2, 3, 4 ] ) ) self.assertEqual( op["f"].getValue(), IECore.StringVectorData( [ "hi" ] ) ) self.assertEqual( op["g"].getTypedValue(), IECore.V2f( 10, 100 ) ) self.assertEqual( op["h"].getTypedValue(), IECore.V3f( -1, -2, -3 ) ) self.assertEqual( op["q"].getTypedValue(), True ) self.assertEqual( op["t"].getTypedValue(), IECore.Box3f( IECore.V3f( -10 ), IECore.V3f( 0 ) ) ) def testSetNodeValueUndo( self ) : p = IECore.Parameterised( "" ) p.parameters().addParameters( [ IECore.IntParameter( "i", "", 1 ), IECore.FloatParameter( "f", "", 2 ) ] ) node = maya.cmds.createNode( "ieParameterisedHolderLocator" ) fnOH = IECoreMaya.FnParameterisedHolder( node ) fnOH.setParameterised( p ) # check the start values are as expected self.assertEqual( p["i"].getNumericValue(), 1 ) self.assertEqual( p["f"].getNumericValue(), 2 ) self.assertEqual( fnOH.parameterPlug( p["i"] ).asInt(), 1 ) self.assertEqual( fnOH.parameterPlug( p["f"] ).asInt(), 2 ) # change both parameters self.assert_( maya.cmds.undoInfo( query=True, state=True ) ) p["i"].setNumericValue( 10 ) p["f"].setNumericValue( 11 ) self.assertEqual( p["i"].getNumericValue(), 10 ) self.assertEqual( p["f"].getNumericValue(), 11 ) self.assertEqual( fnOH.parameterPlug( p["i"] ).asInt(), 1 ) self.assertEqual( fnOH.parameterPlug( p["f"] ).asInt(), 2 ) # but push only one into maya fnOH.setNodeValue( p["i"] ) # and check we see what we expect self.assertEqual( p["i"].getNumericValue(), 10 ) self.assertEqual( p["f"].getNumericValue(), 11 ) self.assertEqual( fnOH.parameterPlug( p["i"] ).asInt(), 10 ) self.assertEqual( fnOH.parameterPlug( p["f"] ).asInt(), 2 ) # undo and check maya.cmds.undo() self.assertEqual( p["i"].getNumericValue(), 10 ) self.assertEqual( p["f"].getNumericValue(), 11 ) self.assertEqual( fnOH.parameterPlug( p["i"] ).asInt(), 1 ) self.assertEqual( fnOH.parameterPlug( p["f"] ).asInt(), 2 ) # redo and check maya.cmds.redo() self.assertEqual( p["i"].getNumericValue(), 10 ) self.assertEqual( p["f"].getNumericValue(), 11 ) self.assertEqual( fnOH.parameterPlug( p["i"] ).asInt(), 10 ) self.assertEqual( fnOH.parameterPlug( p["f"] ).asInt(), 2 ) def testExcessReferenceEdits( self ) : IECoreMaya.FnOpHolder.create( "testOp", "maths/multiply", 2 ) # Save the scene out so we can reference it maya.cmds.file( rename = os.path.join( os.getcwd(), "test", "IECoreMaya", "referenceEditCounts.ma" ) ) referenceScene = maya.cmds.file( force = True, type = "mayaAscii", save = True ) # New scene, and read it in. maya.cmds.file( new = True, force = True ) maya.cmds.file( referenceScene, reference = True, namespace = "ns1" ) # Check there are no reference edits fnOH = IECoreMaya.FnOpHolder( 'ns1:testOp' ) op = fnOH.getOp() aPath = fnOH.parameterPlugPath( op["a"] ) bPath = fnOH.parameterPlugPath( op["b"] ) self.assertEqual( maya.cmds.getAttr( aPath ), 1 ) self.assertEqual( maya.cmds.getAttr( bPath ), 2 ) self.assertEqual( len(maya.cmds.referenceQuery( referenceScene, editStrings=True )), 0 ) # set values, but with no changes with fnOH.parameterModificationContext() : op["a"].setNumericValue( 1 ) op["b"].setNumericValue( 2 ) # Check the values are the same, and there are still no reference edits self.assertEqual( maya.cmds.getAttr( aPath ), 1 ) self.assertEqual( maya.cmds.getAttr( bPath ), 2 ) self.assertEqual( len(maya.cmds.referenceQuery( referenceScene, editStrings=True )), 0 ) # change a value to a genuinely new value with fnOH.parameterModificationContext() : op["a"].setNumericValue( 100 ) # Check the maya value is updated and there is 1 reference edit self.assertEqual( maya.cmds.getAttr( aPath ), 100 ) self.assertEqual( maya.cmds.getAttr( bPath ), 2 ) self.assertEqual( len(maya.cmds.referenceQuery( referenceScene, editStrings=True )), 1 ) # Undo and check there is still 1 reference edit. Ideally there would be none but # maya isn't that clever. maya.cmds.undo() self.assertEqual( maya.cmds.getAttr( aPath ), 1 ) self.assertEqual( maya.cmds.getAttr( bPath ), 2 ) self.assertEqual( len(maya.cmds.referenceQuery( referenceScene, editStrings=True )), 1 ) def testExcessClassParameterReferenceEdits( self ) : # Save a scene with a ClassParameter in it fnOH = IECoreMaya.FnOpHolder.create( "node", "classParameterTest", 1 ) op = fnOH.getOp() with fnOH.parameterModificationContext() : op["cp"].setClass( "maths/multiply", 1, "IECORE_OP_PATHS" ) self.assertEqual( op["cp"].getClass( True )[1:], ( "maths/multiply", 1, "IECORE_OP_PATHS" ) ) maya.cmds.file( rename = os.path.join( os.getcwd(), "test", "IECoreMaya", "referenceEditCounts.ma" ) ) referenceScene = maya.cmds.file( force = True, type = "mayaAscii", save = True ) # And reference it back in to a new scene maya.cmds.file( new = True, force = True ) maya.cmds.file( referenceScene, reference = True, namespace = "ns1" ) self.assertEqual( len(maya.cmds.referenceQuery( referenceScene, editStrings=True )), 0 ) # Make a modification which does nothing and check that there are no reference edits fnOH = IECoreMaya.FnOpHolder( "ns1:node" ) with fnOH.parameterModificationContext() : pass self.assertEqual( len(maya.cmds.referenceQuery( referenceScene, editStrings=True )), 0 ) # Make a modification which happens to set things to the values they're already at and # check that there are no reference edits op = fnOH.getOp() with fnOH.parameterModificationContext() : op["cp"].setClass( "maths/multiply", 1, "IECORE_OP_PATHS" ) self.assertEqual( len(maya.cmds.referenceQuery( referenceScene, editStrings=True )), 0 ) def testExcessClassVectorParameterReferenceEdits( self ) : # Save a scene with a ClassParameter in it fnOH = IECoreMaya.FnOpHolder.create( "node", "classVectorParameterTest", 1 ) op = fnOH.getOp() with fnOH.parameterModificationContext() : op["cv"].setClasses( [ ( "mult", "maths/multiply", 1 ), ( "coIO", "compoundObjectInOut", 1 ), ] ) maya.cmds.file( rename = os.path.join( os.getcwd(), "test", "IECoreMaya", "referenceEditCounts.ma" ) ) referenceScene = maya.cmds.file( force = True, type = "mayaAscii", save = True ) # And reference it back in to a new scene maya.cmds.file( new = True, force = True ) maya.cmds.file( referenceScene, reference = True, namespace = "ns1" ) self.assertEqual( len(maya.cmds.referenceQuery( referenceScene, editStrings=True )), 0 ) # Make a modification which does nothing and check that there are no reference edits fnOH = IECoreMaya.FnOpHolder( "ns1:node" ) with fnOH.parameterModificationContext() : pass self.assertEqual( len(maya.cmds.referenceQuery( referenceScene, editStrings=True )), 0 ) # Make a modification which happens to set things to the values they're already at and # check that there are no reference edits with fnOH.parameterModificationContext() : op["cv"].setClasses( [ ( "mult", "maths/multiply", 1 ), ( "coIO", "compoundObjectInOut", 1 ), ] ) self.assertEqual( len(maya.cmds.referenceQuery( referenceScene, editStrings=True )), 0 ) def testSetParameterValuesUsingContext( self ) : fnOH = IECoreMaya.FnOpHolder.create( "testOp", "maths/multiply", 2 ) op = fnOH.getOp() aPath = fnOH.parameterPlugPath( op["a"] ) self.assertEqual( maya.cmds.getAttr( aPath ), 1 ) with fnOH.parameterModificationContext() : op["a"].setNumericValue( 10023 ) self.assertEqual( maya.cmds.getAttr( aPath ), 10023 ) maya.cmds.undo() self.assertEqual( maya.cmds.getAttr( aPath ), 1 ) maya.cmds.redo() self.assertEqual( maya.cmds.getAttr( aPath ), 10023 ) def testSetParameterValuesAndClassesUsingContext( self ) : fnOH = IECoreMaya.FnOpHolder.create( "node", "classParameterTest", 1 ) with fnOH.parameterModificationContext() as op : op["cp"].setClass( "maths/multiply", 1, "IECORE_OP_PATHS" ) op["cp"]["a"].setNumericValue( 10101 ) aPath = fnOH.parameterPlugPath( op["cp"]["a"] ) self.assertEqual( maya.cmds.getAttr( aPath ), 10101 ) maya.cmds.undo() self.assertEqual( op["cp"].getClass(), None ) maya.cmds.redo() self.assertEqual( op["cp"].getClass( True )[1:], ( "maths/multiply", 1, "IECORE_OP_PATHS" ) ) self.assertEqual( maya.cmds.getAttr( aPath ), 10101 ) def testBoxDefaultValue( self ) : node = maya.cmds.createNode( "ieOpHolderNode" ) fnPH = IECoreMaya.FnParameterisedHolder( node ) op = IECore.ClassLoader.defaultOpLoader().load( "parameterTypes", 1 )() op.parameters().removeParameter( "m" ) # no color4f support in maya fnPH.setParameterised( op ) node, plug = fnPH.parameterPlugPath( op["s"] ).split( "." ) self.assertEqual( maya.cmds.attributeQuery( plug + "Min", listDefault=True, node=node ), [ -1.0, -1.0 ] ) self.assertEqual( maya.cmds.attributeQuery( plug + "Max", listDefault=True, node=node ), [ 1.0, 1.0 ] ) node, plug = fnPH.parameterPlugPath( op["t"] ).split( "." ) self.assertEqual( maya.cmds.attributeQuery( plug + "Min", listDefault=True, node=node ), [ -1.0, -1.0, -1.0 ] ) self.assertEqual( maya.cmds.attributeQuery( plug + "Max", listDefault=True, node=node ), [ 1.0, 1.0, 1.0 ] ) def testArrayPlugCreation( self ) : op = IECore.Op( 'test op', IECore.IntParameter( 'result', '', 0 ) ) op.parameters().addParameters( [ IECore.V3fVectorParameter( 'v3fVector', '', IECore.V3fVectorData() ), IECore.V3dVectorParameter( 'v3dVector', '', IECore.V3dVectorData() ), IECore.StringVectorParameter( 'stringVector', '', IECore.StringVectorData() ), IECore.DoubleVectorParameter( 'doubleVector', '', IECore.DoubleVectorData() ), IECore.FloatVectorParameter( 'floatVector', '', IECore.FloatVectorData() ), IECore.IntVectorParameter( 'intVector', '', IECore.IntVectorData() ), IECore.BoolVectorParameter( 'boolVector', '', IECore.BoolVectorData() ), IECore.M44fVectorParameter( 'm44fVector', '', IECore.M44fVectorData() ), IECore.M44dVectorParameter( 'm44dVector', '', IECore.M44dVectorData() ), ] ) node = maya.cmds.createNode( 'ieOpHolderNode' ) fnPH = IECoreMaya.FnParameterisedHolder( node ) self.assert_( not maya.cmds.objExists( node+'.parm_v3fVector' ) ) self.assert_( not maya.cmds.objExists( node+'.parm_v3dVector' ) ) self.assert_( not maya.cmds.objExists( node+'.parm_stringVector' ) ) self.assert_( not maya.cmds.objExists( node+'.parm_doubleVector' ) ) self.assert_( not maya.cmds.objExists( node+'.parm_floatVector' ) ) self.assert_( not maya.cmds.objExists( node+'.parm_intVector' ) ) self.assert_( not maya.cmds.objExists( node+'.parm_boolVector' ) ) self.assert_( not maya.cmds.objExists( node+'.parm_m44fVector' ) ) self.assert_( not maya.cmds.objExists( node+'.parm_m44dVector' ) ) fnPH.setParameterised( op ) self.assert_( maya.cmds.objExists( node+'.parm_v3fVector' ) ) self.assert_( maya.cmds.objExists( node+'.parm_v3dVector' ) ) self.assert_( maya.cmds.objExists( node+'.parm_stringVector' ) ) self.assert_( maya.cmds.objExists( node+'.parm_doubleVector' ) ) self.assert_( maya.cmds.objExists( node+'.parm_floatVector' ) ) self.assert_( maya.cmds.objExists( node+'.parm_intVector' ) ) self.assert_( maya.cmds.objExists( node+'.parm_boolVector' ) ) self.assert_( maya.cmds.objExists( node+'.parm_m44fVector' ) ) self.assert_( maya.cmds.objExists( node+'.parm_m44dVector' ) ) self.assertEqual( maya.cmds.getAttr( node+'.parm_v3fVector', type=True ), 'vectorArray' ) self.assertEqual( maya.cmds.getAttr( node+'.parm_v3dVector', type=True ), 'vectorArray' ) self.assertEqual( maya.cmds.getAttr( node+'.parm_stringVector', type=True ), 'stringArray' ) self.assertEqual( maya.cmds.getAttr( node+'.parm_doubleVector', type=True ), 'doubleArray' ) self.assertEqual( maya.cmds.getAttr( node+'.parm_floatVector', type=True ), 'doubleArray' ) self.assertEqual( maya.cmds.getAttr( node+'.parm_intVector', type=True ), 'Int32Array' ) self.assertEqual( maya.cmds.getAttr( node+'.parm_boolVector', type=True ), 'Int32Array' ) self.assertEqual( maya.cmds.getAttr( node+'.parm_m44fVector', type=True ), 'doubleArray' ) self.assertEqual( maya.cmds.getAttr( node+'.parm_m44dVector', type=True ), 'doubleArray' ) def testMatrixVectorPlugs( self ) : m44fVector = IECore.M44fVectorData( [ IECore.M44f( 1 ), IECore.M44f( 2 ), IECore.M44f( 3 ) ] ) m44dVector = IECore.M44dVectorData( [ IECore.M44d( 1 ), IECore.M44d( 2 ), IECore.M44d( 3 ) ] ) reverseM44fVector = IECore.M44fVectorData( [ IECore.M44f( 3 ), IECore.M44f( 2 ), IECore.M44f( 1 ) ] ) reverseM44dVector = IECore.M44dVectorData( [ IECore.M44d( 3 ), IECore.M44d( 2 ), IECore.M44d( 1 ) ] ) mayaArray = [] for i in range( 0, 3 ) : for j in range( 0, 16 ) : mayaArray.append( i+1 ) reverseMayaArray = list(mayaArray) reverseMayaArray.reverse() op = IECore.Op( 'test op', IECore.IntParameter( 'result', '', 0 ) ) op.parameters().addParameters( [ IECore.M44fVectorParameter( 'm44fVector', '', IECore.M44fVectorData() ), IECore.M44dVectorParameter( 'm44dVector', '', IECore.M44dVectorData() ), ] ) node = maya.cmds.createNode( 'ieOpHolderNode' ) fnPH = IECoreMaya.FnParameterisedHolder( node ) fnPH.setParameterised( op ) # set from cortex to maya self.assertNotEqual( mayaArray, maya.cmds.getAttr( node+'.parm_m44fVector' ) ) self.assertNotEqual( mayaArray, maya.cmds.getAttr( node+'.parm_m44dVector' ) ) fnPH.getParameterised()[0].parameters()['m44fVector'].setValue( m44fVector ) fnPH.getParameterised()[0].parameters()['m44dVector'].setValue( m44dVector ) fnPH.setNodeValues() self.assertEqual( mayaArray, maya.cmds.getAttr( node+'.parm_m44fVector' ) ) self.assertEqual( mayaArray, maya.cmds.getAttr( node+'.parm_m44dVector' ) ) # set from maya to cortex self.assertNotEqual( reverseM44fVector, fnPH.getParameterised()[0].parameters()['m44fVector'].getValue() ) self.assertNotEqual( reverseM44dVector, fnPH.getParameterised()[0].parameters()['m44dVector'].getValue() ) maya.cmds.setAttr( node+'.parm_m44fVector', reverseMayaArray, type="doubleArray" ) maya.cmds.setAttr( node+'.parm_m44dVector', reverseMayaArray, type="doubleArray" ) fnPH.setParameterisedValues() self.assertEqual( reverseM44fVector, fnPH.getParameterised()[0].parameters()['m44fVector'].getValue() ) self.assertEqual( reverseM44dVector, fnPH.getParameterised()[0].parameters()['m44dVector'].getValue() ) # set to incorrect length from maya maya.cmds.setAttr( node+'.parm_m44fVector', [0,1,2], type="doubleArray" ) maya.cmds.setAttr( node+'.parm_m44dVector', [0,1,2], type="doubleArray" ) fnPH.setParameterisedValues() self.assertEqual( None, fnPH.getParameterised()[0].parameters()['m44fVector'].getValue() ) self.assertEqual( None, fnPH.getParameterised()[0].parameters()['m44dVector'].getValue() ) def testResultAttrSaveLoad( self ) : node = maya.cmds.createNode( "ieOpHolderNode" ) fnPH = IECoreMaya.FnOpHolder( node ) fnPH.setOp( "floatParameter" ) self.assertNotEqual( maya.cmds.getAttr( node + ".parm_f" ), 50.5 ) self.assertNotEqual( maya.cmds.getAttr( node + ".result" ), 50.5 ) maya.cmds.setAttr( node + ".parm_f", 50.5 ) self.assertEqual( maya.cmds.getAttr( node + ".parm_f" ), 50.5 ) self.assertEqual( maya.cmds.getAttr( node + ".result" ), 50.5 ) maya.cmds.file( rename = os.path.join( os.getcwd(), "test", "IECoreMaya", "resultAttrLoadTest.ma" ) ) testScene = maya.cmds.file( force = True, type = "mayaAscii", save = True ) maya.cmds.file( testScene, f = True, o = True ) self.assertEqual( maya.cmds.getAttr( node + ".parm_f" ), 50.5 ) self.assertEqual( maya.cmds.getAttr( node + ".result" ), 50.5 ) @unittest.skipIf( maya.OpenMaya.MGlobal.apiVersion() < 201600, "Inactive node state causes a seg fault prior to Maya 2016" ) def testResultAttrSaveLoadMeshConnections( self ) : box = maya.cmds.listRelatives( maya.cmds.polyCube(), shapes=True )[0] torus = maya.cmds.listRelatives( maya.cmds.polyTorus(), shapes=True )[0] node = maya.cmds.createNode( "ieOpHolderNode" ) fnPH = IECoreMaya.FnOpHolder( node ) fnPH.setOp( "meshMerge" ) maya.cmds.connectAttr( box + ".outMesh", node + ".parm_input" ) maya.cmds.connectAttr( torus + ".outMesh", node + ".parm_mesh" ) mesh = maya.cmds.createNode( "mesh" ) maya.cmds.connectAttr( node + ".result", mesh + ".inMesh" ) quads = maya.cmds.polyQuad( mesh )[0] joint = maya.cmds.createNode( "joint" ) cluster = maya.cmds.skinCluster( mesh, joint ) fnMesh = maya.OpenMaya.MFnMesh( IECoreMaya.dependencyNodeFromString( mesh ) ) self.assertEqual( fnMesh.numVertices(), 408 ) self.assertEqual( fnMesh.numPolygons(), 406 ) maya.cmds.file( rename = os.path.join( os.getcwd(), "test", "IECoreMaya", "resultAttrLoadTest.ma" ) ) testScene = maya.cmds.file( force = True, type = "mayaAscii", save = True ) maya.cmds.file( testScene, f = True, o = True ) fnMesh = maya.OpenMaya.MFnMesh( IECoreMaya.dependencyNodeFromString( mesh ) ) self.assertEqual( fnMesh.numVertices(), 408 ) self.assertEqual( fnMesh.numPolygons(), 406 ) self.assertEqual( maya.cmds.getAttr( quads + ".nodeState" ), 0 ) def testParameterPlugForMissingPlug( self ) : ## Make sure that null plugs are returned from the parameterPlug() method # if no plug exists. node = maya.cmds.createNode( "ieOpHolderNode" ) fnPH = IECoreMaya.FnOpHolder( node ) fnPH.setOp( "floatParameter" ) op = fnPH.getOp() plug = fnPH.parameterPlug( op.parameters() ) self.failUnless( isinstance( plug, maya.OpenMaya.MPlug ) ) self.failUnless( plug.isNull() ) def testLsMethods( self ) : # create a couple of holders: opHolderNode = maya.cmds.createNode( "ieOpHolderNode" ) fnOH = IECoreMaya.FnOpHolder( opHolderNode ) fnOH.setOp( "floatParameter" ) converterHolderNode = maya.cmds.createNode( "ieConverterHolder" ) fnCH = IECoreMaya.FnConverterHolder( converterHolderNode ) #fnCH.setOp( "floatParameter" ) node = maya.cmds.createNode( "ieProceduralHolder" ) node2 = maya.cmds.createNode( "ieProceduralHolder" ) fnPH = IECoreMaya.FnProceduralHolder( node ) proc = IECore.ReadProcedural() fnPH.setParameterised( proc ) fnPH2 = IECoreMaya.FnProceduralHolder( node2 ) # do an ls on the op holders: should only be one opHolders = IECoreMaya.FnOpHolder.ls() self.assertEqual( len( opHolders ), 1 ) self.failUnless( isinstance( opHolders[0], IECoreMaya.FnOpHolder ) ) self.assertEqual( opHolders[0].fullPathName(), opHolderNode ) # do an ls on the procedural holders: should be two self.assertEqual( len( IECoreMaya.FnProceduralHolder.ls() ), 2 ) # do an ls on the procedural holders containing IECore.ReadProcedurals: should be one self.assertEqual( len( IECoreMaya.FnProceduralHolder.ls( classType=IECore.ReadProcedural ) ), 1 ) # find full path name of node holding ReadProcedural, and check it's the same as the one returned by ls: node = maya.cmds.ls( node, l=True )[0] self.assertEqual( IECoreMaya.FnProceduralHolder.ls( classType=IECore.ReadProcedural )[0].fullPathName(), node ) # do an ls on the converter holders, this time just returning node names: converterHolders = IECoreMaya.FnConverterHolder.ls( fnSets=False ) self.assertEqual( len( converterHolders ), 1 ) self.assertEqual( converterHolders[0], converterHolderNode ) def tearDown( self ) : for f in [ "test/IECoreMaya/referenceEditCounts.ma", "test/IECoreMaya/resultAttrLoadTest.ma", "test/IECoreMaya/resultGetParameterisedTest.ma", ] : if os.path.exists( f ) : os.remove( f ) if __name__ == "__main__": IECoreMaya.TestProgram( plugins = [ "ieCore" ] )<|fim▁end|>
self.assertEqual( hPlug.child( 1 ).asFloat(), -2 ) self.assertEqual( hPlug.child( 2 ).asFloat(), -3 )
<|file_name|>BeanValidatorComponentAutoConfiguration.java<|end_file_name|><|fim▁begin|>/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.component.bean.validator.springboot; import java.util.HashMap; import java.util.List; import java.util.Map; import javax.annotation.Generated; import org.apache.camel.CamelContext; import org.apache.camel.component.bean.validator.BeanValidatorComponent; import org.apache.camel.spi.ComponentCustomizer; import org.apache.camel.spi.HasId; import org.apache.camel.spring.boot.CamelAutoConfiguration; import org.apache.camel.spring.boot.ComponentConfigurationProperties; import org.apache.camel.spring.boot.util.CamelPropertiesHelper; import org.apache.camel.spring.boot.util.ConditionalOnCamelContextAndAutoConfigurationBeans; import org.apache.camel.spring.boot.util.GroupCondition; import org.apache.camel.spring.boot.util.HierarchicalPropertiesEvaluator;<|fim▁hole|>import org.apache.camel.support.IntrospectionSupport; import org.apache.camel.util.ObjectHelper; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.autoconfigure.AutoConfigureAfter; import org.springframework.boot.autoconfigure.condition.ConditionalOnBean; import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean; import org.springframework.boot.context.properties.EnableConfigurationProperties; import org.springframework.context.ApplicationContext; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Conditional; import org.springframework.context.annotation.Configuration; import org.springframework.context.annotation.Lazy; /** * Generated by camel-package-maven-plugin - do not edit this file! */ @Generated("org.apache.camel.maven.packaging.SpringBootAutoConfigurationMojo") @Configuration @Conditional({ConditionalOnCamelContextAndAutoConfigurationBeans.class, BeanValidatorComponentAutoConfiguration.GroupConditions.class}) @AutoConfigureAfter(CamelAutoConfiguration.class) @EnableConfigurationProperties({ComponentConfigurationProperties.class, BeanValidatorComponentConfiguration.class}) public class BeanValidatorComponentAutoConfiguration { private static final Logger LOGGER = LoggerFactory .getLogger(BeanValidatorComponentAutoConfiguration.class); @Autowired private ApplicationContext applicationContext; @Autowired private CamelContext camelContext; @Autowired private BeanValidatorComponentConfiguration configuration; @Autowired(required = false) private List<ComponentCustomizer<BeanValidatorComponent>> customizers; static class GroupConditions extends GroupCondition { public GroupConditions() { super("camel.component", "camel.component.bean-validator"); } } @Lazy @Bean(name = "bean-validator-component") @ConditionalOnMissingBean(BeanValidatorComponent.class) public BeanValidatorComponent configureBeanValidatorComponent() throws Exception { BeanValidatorComponent component = new BeanValidatorComponent(); component.setCamelContext(camelContext); Map<String, Object> parameters = new HashMap<>(); IntrospectionSupport.getProperties(configuration, parameters, null, false); for (Map.Entry<String, Object> entry : parameters.entrySet()) { Object value = entry.getValue(); Class<?> paramClass = value.getClass(); if (paramClass.getName().endsWith("NestedConfiguration")) { Class nestedClass = null; try { nestedClass = (Class) paramClass.getDeclaredField( "CAMEL_NESTED_CLASS").get(null); HashMap<String, Object> nestedParameters = new HashMap<>(); IntrospectionSupport.getProperties(value, nestedParameters, null, false); Object nestedProperty = nestedClass.newInstance(); CamelPropertiesHelper.setCamelProperties(camelContext, nestedProperty, nestedParameters, false); entry.setValue(nestedProperty); } catch (NoSuchFieldException e) { } } } CamelPropertiesHelper.setCamelProperties(camelContext, component, parameters, false); if (ObjectHelper.isNotEmpty(customizers)) { for (ComponentCustomizer<BeanValidatorComponent> customizer : customizers) { boolean useCustomizer = (customizer instanceof HasId) ? HierarchicalPropertiesEvaluator.evaluate( applicationContext.getEnvironment(), "camel.component.customizer", "camel.component.bean-validator.customizer", ((HasId) customizer).getId()) : HierarchicalPropertiesEvaluator.evaluate( applicationContext.getEnvironment(), "camel.component.customizer", "camel.component.bean-validator.customizer"); if (useCustomizer) { LOGGER.debug("Configure component {}, with customizer {}", component, customizer); customizer.customize(component); } } } return component; } }<|fim▁end|>
<|file_name|>asyncInjectors.test.js<|end_file_name|><|fim▁begin|>/** * Test async injectors */ import { memoryHistory } from 'react-router'; import { put } from 'redux-saga/effects'; import { fromJS } from 'immutable'; import configureStore from 'store'; import {<|fim▁hole|> // Fixtures const initialState = fromJS({ reduced: 'soon' }); const reducer = (state = initialState, action) => { switch (action.type) { case 'TEST': return state.set('reduced', action.payload); default: return state; } }; function* testSaga() { yield put({ type: 'TEST', payload: 'yup' }); } const sagas = [ testSaga, ]; describe('asyncInjectors', () => { let store; describe('getAsyncInjectors', () => { beforeAll(() => { store = configureStore({}, memoryHistory); }); it('given a store, should return all async injectors', () => { const { injectReducer, injectSagas } = getAsyncInjectors(store); injectReducer('test', reducer); injectSagas(sagas); const actual = store.getState().get('test'); const expected = initialState.merge({ reduced: 'yup' }); expect(actual.toJS()).toEqual(expected.toJS()); }); it('should throw if passed invalid store shape', () => { let result = false; Reflect.deleteProperty(store, 'dispatch'); try { getAsyncInjectors(store); } catch (err) { result = err.name === 'Invariant Violation'; } expect(result).toEqual(true); }); }); describe('helpers', () => { beforeAll(() => { store = configureStore({}, memoryHistory); }); describe('injectAsyncReducer', () => { it('given a store, it should provide a function to inject a reducer', () => { const injectReducer = injectAsyncReducer(store); injectReducer('test', reducer); const actual = store.getState().get('test'); const expected = initialState; expect(actual.toJS()).toEqual(expected.toJS()); }); it('should not assign reducer if already existing', () => { const injectReducer = injectAsyncReducer(store); injectReducer('test', reducer); injectReducer('test', () => {}); expect(store.asyncReducers.test.toString()).toEqual(reducer.toString()); }); it('should throw if passed invalid name', () => { let result = false; const injectReducer = injectAsyncReducer(store); try { injectReducer('', reducer); } catch (err) { result = err.name === 'Invariant Violation'; } try { injectReducer(999, reducer); } catch (err) { result = err.name === 'Invariant Violation'; } expect(result).toEqual(true); }); it('should throw if passed invalid reducer', () => { let result = false; const injectReducer = injectAsyncReducer(store); try { injectReducer('bad', 'nope'); } catch (err) { result = err.name === 'Invariant Violation'; } try { injectReducer('coolio', 12345); } catch (err) { result = err.name === 'Invariant Violation'; } expect(result).toEqual(true); }); }); describe('injectAsyncSagas', () => { it('given a store, it should provide a function to inject a saga', () => { const injectSagas = injectAsyncSagas(store); injectSagas(sagas); const actual = store.getState().get('test'); const expected = initialState.merge({ reduced: 'yup' }); expect(actual.toJS()).toEqual(expected.toJS()); }); it('should throw if passed invalid saga', () => { let result = false; const injectSagas = injectAsyncSagas(store); try { injectSagas({ testSaga }); } catch (err) { result = err.name === 'Invariant Violation'; } try { injectSagas(testSaga); } catch (err) { result = err.name === 'Invariant Violation'; } expect(result).toEqual(true); }); }); }); });<|fim▁end|>
injectAsyncReducer, injectAsyncSagas, getAsyncInjectors, } from '../asyncInjectors';
<|file_name|>facets_with_custom_projection.py<|end_file_name|><|fim▁begin|><|fim▁hole|>""" FacetGrid with custom projection ================================ _thumb: .33, .5 """ import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns sns.set() # Generate an example radial datast r = np.linspace(0, 10, num=100) df = pd.DataFrame({'r': r, 'slow': r, 'medium': 2 * r, 'fast': 4 * r}) # Convert the dataframe to long-form or "tidy" format df = pd.melt(df, id_vars=['r'], var_name='speed', value_name='theta') # Set up a grid of axes with a polar projection g = sns.FacetGrid(df, col="speed", hue="speed", subplot_kws=dict(projection='polar'), size=4.5, sharex=False, sharey=False, despine=False) # Draw a scatterplot onto each axes in the grid g.map(plt.scatter, "theta", "r")<|fim▁end|>
<|file_name|>classgr__interleave.js<|end_file_name|><|fim▁begin|>var classgr__interleave = [ [ "~gr_interleave", "classgr__interleave.html#ae342ba63322b78359ee71de113e41fc1", null ], [ "check_topology", "classgr__interleave.html#ade74f196c0fc8a91ca4f853a2d1202e1", null ], [ "work", "classgr__interleave.html#a44664518c86559da58b3feccb9e45d7f", null ], [ "gr_make_interleave", "classgr__interleave.html#acf7153a343a7bfbf2687bcc4c98d410e", null ]<|fim▁hole|><|fim▁end|>
];
<|file_name|>swarm.go<|end_file_name|><|fim▁begin|>// package swarm implements a connection muxer with a pair of channels // to synchronize all network communication. package swarm import ( "fmt" "sync" "time" metrics "github.com/ipfs/go-ipfs/metrics" inet "github.com/ipfs/go-ipfs/p2p/net" addrutil "github.com/ipfs/go-ipfs/p2p/net/swarm/addr" peer "github.com/ipfs/go-ipfs/p2p/peer" eventlog "github.com/ipfs/go-ipfs/thirdparty/eventlog" ctxgroup "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-ctxgroup" ma "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" ps "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-peerstream" psy "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-peerstream/transport/yamux" context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" ) var log = eventlog.Logger("swarm2") var PSTransport = psy.DefaultTransport // Swarm is a connection muxer, allowing connections to other peers to // be opened and closed, while still using the same Chan for all // communication. The Chan sends/receives Messages, which note the // destination or source Peer. // // Uses peerstream.Swarm type Swarm struct { swarm *ps.Swarm local peer.ID peers peer.Peerstore connh ConnHandler dsync dialsync backf dialbackoff dialT time.Duration // mainly for tests notifmu sync.RWMutex notifs map[inet.Notifiee]ps.Notifiee cg ctxgroup.ContextGroup bwc metrics.Reporter } // NewSwarm constructs a Swarm, with a Chan. func NewSwarm(ctx context.Context, listenAddrs []ma.Multiaddr, local peer.ID, peers peer.Peerstore, bwc metrics.Reporter) (*Swarm, error) { listenAddrs, err := filterAddrs(listenAddrs) if err != nil { return nil, err } s := &Swarm{ swarm: ps.NewSwarm(PSTransport), local: local, peers: peers, cg: ctxgroup.WithContext(ctx), dialT: DialTimeout, notifs: make(map[inet.Notifiee]ps.Notifiee), bwc: bwc, } // configure Swarm s.cg.SetTeardown(s.teardown) s.SetConnHandler(nil) // make sure to setup our own conn handler. return s, s.listen(listenAddrs) } func (s *Swarm) teardown() error { return s.swarm.Close() } // CtxGroup returns the Context Group of the swarm func filterAddrs(listenAddrs []ma.Multiaddr) ([]ma.Multiaddr, error) { if len(listenAddrs) > 0 { filtered := addrutil.FilterUsableAddrs(listenAddrs) if len(filtered) < 1 { return nil, fmt.Errorf("swarm cannot use any addr in: %s", listenAddrs) } listenAddrs = filtered } return listenAddrs, nil } // CtxGroup returns the Context Group of the swarm func (s *Swarm) Listen(addrs ...ma.Multiaddr) error { addrs, err := filterAddrs(addrs) if err != nil { return err } return s.listen(addrs) } // CtxGroup returns the Context Group of the swarm func (s *Swarm) CtxGroup() ctxgroup.ContextGroup { return s.cg } // Close stops the Swarm. func (s *Swarm) Close() error { return s.cg.Close() } // StreamSwarm returns the underlying peerstream.Swarm func (s *Swarm) StreamSwarm() *ps.Swarm { return s.swarm } // SetConnHandler assigns the handler for new connections. // See peerstream. You will rarely use this. See SetStreamHandler func (s *Swarm) SetConnHandler(handler ConnHandler) { // handler is nil if user wants to clear the old handler. if handler == nil { s.swarm.SetConnHandler(func(psconn *ps.Conn) { s.connHandler(psconn) }) return } s.swarm.SetConnHandler(func(psconn *ps.Conn) { // sc is nil if closed in our handler. if sc := s.connHandler(psconn); sc != nil { // call the user's handler. in a goroutine for sync safety. go handler(sc) } }) } // SetStreamHandler assigns the handler for new streams. // See peerstream. func (s *Swarm) SetStreamHandler(handler inet.StreamHandler) { s.swarm.SetStreamHandler(func(s *ps.Stream) { handler(wrapStream(s)) }) } // NewStreamWithPeer creates a new stream on any available connection to p func (s *Swarm) NewStreamWithPeer(p peer.ID) (*Stream, error) { // if we have no connections, try connecting. if len(s.ConnectionsToPeer(p)) == 0 { log.Debug("Swarm: NewStreamWithPeer no connections. Attempting to connect...") if _, err := s.Dial(context.Background(), p); err != nil { return nil, err } } log.Debug("Swarm: NewStreamWithPeer...") st, err := s.swarm.NewStreamWithGroup(p) return wrapStream(st), err } // StreamsWithPeer returns all the live Streams to p func (s *Swarm) StreamsWithPeer(p peer.ID) []*Stream {<|fim▁hole|> // ConnectionsToPeer returns all the live connections to p func (s *Swarm) ConnectionsToPeer(p peer.ID) []*Conn { return wrapConns(ps.ConnsWithGroup(p, s.swarm.Conns())) } // Connections returns a slice of all connections. func (s *Swarm) Connections() []*Conn { return wrapConns(s.swarm.Conns()) } // CloseConnection removes a given peer from swarm + closes the connection func (s *Swarm) CloseConnection(p peer.ID) error { conns := s.swarm.ConnsWithGroup(p) // boom. for _, c := range conns { c.Close() } return nil } // Peers returns a copy of the set of peers swarm is connected to. func (s *Swarm) Peers() []peer.ID { conns := s.Connections() seen := make(map[peer.ID]struct{}) peers := make([]peer.ID, 0, len(conns)) for _, c := range conns { p := c.RemotePeer() if _, found := seen[p]; found { continue } seen[p] = struct{}{} peers = append(peers, p) } return peers } // LocalPeer returns the local peer swarm is associated to. func (s *Swarm) LocalPeer() peer.ID { return s.local } // notifyAll sends a signal to all Notifiees func (s *Swarm) notifyAll(notify func(inet.Notifiee)) { s.notifmu.RLock() for f := range s.notifs { go notify(f) } s.notifmu.RUnlock() } // Notify signs up Notifiee to receive signals when events happen func (s *Swarm) Notify(f inet.Notifiee) { // wrap with our notifiee, to translate function calls n := &ps2netNotifee{net: (*Network)(s), not: f} s.notifmu.Lock() s.notifs[f] = n s.notifmu.Unlock() // register for notifications in the peer swarm. s.swarm.Notify(n) } // StopNotify unregisters Notifiee fromr receiving signals func (s *Swarm) StopNotify(f inet.Notifiee) { s.notifmu.Lock() n, found := s.notifs[f] if found { delete(s.notifs, f) } s.notifmu.Unlock() if found { s.swarm.StopNotify(n) } } type ps2netNotifee struct { net *Network not inet.Notifiee } func (n *ps2netNotifee) Connected(c *ps.Conn) { n.not.Connected(n.net, inet.Conn((*Conn)(c))) } func (n *ps2netNotifee) Disconnected(c *ps.Conn) { n.not.Disconnected(n.net, inet.Conn((*Conn)(c))) } func (n *ps2netNotifee) OpenedStream(s *ps.Stream) { n.not.OpenedStream(n.net, inet.Stream((*Stream)(s))) } func (n *ps2netNotifee) ClosedStream(s *ps.Stream) { n.not.ClosedStream(n.net, inet.Stream((*Stream)(s))) }<|fim▁end|>
return wrapStreams(ps.StreamsWithGroup(p, s.swarm.Streams())) }
<|file_name|>test_console.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- import io import pytest import re from collections import namedtuple from unittest import mock from toot import console, User, App, http from toot.exceptions import ConsoleError from tests.utils import MockResponse app = App('habunek.com', 'https://habunek.com', 'foo', 'bar') user = User('habunek.com', '[email protected]', 'xxx') MockUuid = namedtuple("MockUuid", ["hex"]) def uncolorize(text): """Remove ANSI color sequences from a string""" return re.sub(r'\x1b[^m]*m', '', text) def test_print_usage(capsys): console.print_usage() out, err = capsys.readouterr() assert "toot - a Mastodon CLI client" in out @mock.patch('uuid.uuid4') @mock.patch('toot.http.post') def test_post_defaults(mock_post, mock_uuid, capsys): mock_uuid.return_value = MockUuid("rock-on") mock_post.return_value = MockResponse({ 'url': 'https://habunek.com/@ihabunek/1234567890' }) console.run_command(app, user, 'post', ['Hello world']) mock_post.assert_called_once_with(app, user, '/api/v1/statuses', { 'status': 'Hello world', 'visibility': 'public', 'media_ids[]': [], 'sensitive': "false", 'spoiler_text': None, 'in_reply_to_id': None, 'language': None, 'scheduled_at': None, }, headers={"Idempotency-Key": "rock-on"}) out, err = capsys.readouterr() assert 'Toot posted' in out assert 'https://habunek.com/@ihabunek/1234567890' in out assert not err @mock.patch('uuid.uuid4') @mock.patch('toot.http.post') def test_post_with_options(mock_post, mock_uuid, capsys): mock_uuid.return_value = MockUuid("up-the-irons") args = [ 'Hello world', '--visibility', 'unlisted', '--sensitive', '--spoiler-text', 'Spoiler!', '--reply-to', '123a', '--language', 'hrv', ] mock_post.return_value = MockResponse({ 'url': 'https://habunek.com/@ihabunek/1234567890' }) console.run_command(app, user, 'post', args) mock_post.assert_called_once_with(app, user, '/api/v1/statuses', { 'status': 'Hello world', 'media_ids[]': [], 'visibility': 'unlisted', 'sensitive': "true", 'spoiler_text': "Spoiler!", 'in_reply_to_id': '123a', 'language': 'hrv', 'scheduled_at': None, }, headers={"Idempotency-Key": "up-the-irons"}) out, err = capsys.readouterr() assert 'Toot posted' in out assert 'https://habunek.com/@ihabunek/1234567890' in out assert not err def test_post_invalid_visibility(capsys): args = ['Hello world', '--visibility', 'foo'] with pytest.raises(SystemExit): console.run_command(app, user, 'post', args) out, err = capsys.readouterr() assert "invalid visibility value: 'foo'" in err def test_post_invalid_media(capsys): args = ['Hello world', '--media', 'does_not_exist.jpg'] with pytest.raises(SystemExit): console.run_command(app, user, 'post', args) out, err = capsys.readouterr() assert "can't open 'does_not_exist.jpg'" in err @mock.patch('toot.http.delete') def test_delete(mock_delete, capsys): console.run_command(app, user, 'delete', ['12321']) mock_delete.assert_called_once_with(app, user, '/api/v1/statuses/12321') out, err = capsys.readouterr() assert 'Status deleted' in out assert not err @mock.patch('toot.http.get') def test_timeline(mock_get, monkeypatch, capsys): mock_get.return_value = MockResponse([{ 'id': '111111111111111111', 'account': { 'display_name': 'Frank Zappa 🎸', 'acct': 'fz' }, 'created_at': '2017-04-12T15:53:18.174Z', 'content': "<p>The computer can&apos;t tell you the emotional story. It can give you the exact mathematical design, but what's missing is the eyebrows.</p>", 'reblog': None, 'in_reply_to_id': None, 'media_attachments': [], }]) console.run_command(app, user, 'timeline', ['--once']) mock_get.assert_called_once_with(app, user, '/api/v1/timelines/home?limit=10', None) out, err = capsys.readouterr() lines = out.split("\n") assert "Frank Zappa 🎸" in lines[1] assert "@fz" in lines[1] assert "2017-04-12 15:53" in lines[1] assert ( "The computer can't tell you the emotional story. It can give you the " "exact mathematical design, but\nwhat's missing is the eyebrows." in out) assert "111111111111111111" in lines[-3] assert err == "" @mock.patch('toot.http.get') def test_timeline_with_re(mock_get, monkeypatch, capsys): mock_get.return_value = MockResponse([{ 'id': '111111111111111111', 'created_at': '2017-04-12T15:53:18.174Z', 'account': { 'display_name': 'Frank Zappa', 'acct': 'fz' }, 'reblog': { 'account': { 'display_name': 'Johnny Cash', 'acct': 'jc' }, 'content': "<p>The computer can&apos;t tell you the emotional story. It can give you the exact mathematical design, but what's missing is the eyebrows.</p>", 'media_attachments': [], }, 'in_reply_to_id': '111111111111111110', 'media_attachments': [], }]) console.run_command(app, user, 'timeline', ['--once']) mock_get.assert_called_once_with(app, user, '/api/v1/timelines/home?limit=10', None) out, err = capsys.readouterr() lines = out.split("\n") assert "Frank Zappa" in lines[1] assert "@fz" in lines[1] assert "2017-04-12 15:53" in lines[1] assert ( "The computer can't tell you the emotional story. It can give you the " "exact mathematical design, but\nwhat's missing is the eyebrows." in out)<|fim▁hole|> assert "↻ Reblogged @jc" in lines[-3] assert err == "" @mock.patch('toot.http.get') def test_thread(mock_get, monkeypatch, capsys): mock_get.side_effect = [ MockResponse({ 'id': '111111111111111111', 'account': { 'display_name': 'Frank Zappa', 'acct': 'fz' }, 'created_at': '2017-04-12T15:53:18.174Z', 'content': "my response in the middle", 'reblog': None, 'in_reply_to_id': '111111111111111110', 'media_attachments': [], }), MockResponse({ 'ancestors': [{ 'id': '111111111111111110', 'account': { 'display_name': 'Frank Zappa', 'acct': 'fz' }, 'created_at': '2017-04-12T15:53:18.174Z', 'content': "original content", 'media_attachments': [], 'reblog': None, 'in_reply_to_id': None}], 'descendants': [{ 'id': '111111111111111112', 'account': { 'display_name': 'Frank Zappa', 'acct': 'fz' }, 'created_at': '2017-04-12T15:53:18.174Z', 'content': "response message", 'media_attachments': [], 'reblog': None, 'in_reply_to_id': '111111111111111111'}], }), ] console.run_command(app, user, 'thread', ['111111111111111111']) calls = [ mock.call(app, user, '/api/v1/statuses/111111111111111111'), mock.call(app, user, '/api/v1/statuses/111111111111111111/context'), ] mock_get.assert_has_calls(calls, any_order=False) out, err = capsys.readouterr() assert not err # Display order assert out.index('original content') < out.index('my response in the middle') assert out.index('my response in the middle') < out.index('response message') assert "original content" in out assert "my response in the middle" in out assert "response message" in out assert "Frank Zappa" in out assert "@fz" in out assert "111111111111111111" in out assert "In reply to" in out @mock.patch('toot.http.get') def test_reblogged_by(mock_get, monkeypatch, capsys): mock_get.return_value = MockResponse([{ 'display_name': 'Terry Bozzio', 'acct': '[email protected]', }, { 'display_name': 'Dweezil', 'acct': '[email protected]', }]) console.run_command(app, user, 'reblogged_by', ['111111111111111111']) calls = [ mock.call(app, user, '/api/v1/statuses/111111111111111111/reblogged_by'), ] mock_get.assert_has_calls(calls, any_order=False) out, err = capsys.readouterr() # Display order expected = "\n".join([ "Terry Bozzio", " @[email protected]", "Dweezil", " @[email protected]", "", ]) assert out == expected @mock.patch('toot.http.post') def test_upload(mock_post, capsys): mock_post.return_value = MockResponse({ 'id': 123, 'url': 'https://bigfish.software/123/456', 'preview_url': 'https://bigfish.software/789/012', 'text_url': 'https://bigfish.software/345/678', 'type': 'image', }) console.run_command(app, user, 'upload', [__file__]) mock_post.call_count == 1 args, kwargs = http.post.call_args assert args == (app, user, '/api/v1/media') assert isinstance(kwargs['files']['file'], io.BufferedReader) out, err = capsys.readouterr() assert "Uploading media" in out assert __file__ in out @mock.patch('toot.http.get') def test_search(mock_get, capsys): mock_get.return_value = MockResponse({ 'hashtags': [ { 'history': [], 'name': 'foo', 'url': 'https://mastodon.social/tags/foo' }, { 'history': [], 'name': 'bar', 'url': 'https://mastodon.social/tags/bar' }, { 'history': [], 'name': 'baz', 'url': 'https://mastodon.social/tags/baz' }, ], 'accounts': [{ 'acct': 'thequeen', 'display_name': 'Freddy Mercury' }, { 'acct': '[email protected]', 'display_name': 'Mercury Freddy' }], 'statuses': [], }) console.run_command(app, user, 'search', ['freddy']) mock_get.assert_called_once_with(app, user, '/api/v2/search', { 'q': 'freddy', 'resolve': False, }) out, err = capsys.readouterr() assert "Hashtags:\n#foo, #bar, #baz" in out assert "Accounts:" in out assert "@thequeen Freddy Mercury" in out assert "@[email protected] Mercury Freddy" in out @mock.patch('toot.http.post') @mock.patch('toot.http.get') def test_follow(mock_get, mock_post, capsys): mock_get.return_value = MockResponse([ {'id': 123, 'acct': '[email protected]'}, {'id': 321, 'acct': 'blixa'}, ]) mock_post.return_value = MockResponse() console.run_command(app, user, 'follow', ['blixa']) mock_get.assert_called_once_with(app, user, '/api/v1/accounts/search', {'q': 'blixa'}) mock_post.assert_called_once_with(app, user, '/api/v1/accounts/321/follow') out, err = capsys.readouterr() assert "You are now following blixa" in out @mock.patch('toot.http.get') def test_follow_not_found(mock_get, capsys): mock_get.return_value = MockResponse() with pytest.raises(ConsoleError) as ex: console.run_command(app, user, 'follow', ['blixa']) mock_get.assert_called_once_with(app, user, '/api/v1/accounts/search', {'q': 'blixa'}) assert "Account not found" == str(ex.value) @mock.patch('toot.http.post') @mock.patch('toot.http.get') def test_unfollow(mock_get, mock_post, capsys): mock_get.return_value = MockResponse([ {'id': 123, 'acct': '[email protected]'}, {'id': 321, 'acct': 'blixa'}, ]) mock_post.return_value = MockResponse() console.run_command(app, user, 'unfollow', ['blixa']) mock_get.assert_called_once_with(app, user, '/api/v1/accounts/search', {'q': 'blixa'}) mock_post.assert_called_once_with(app, user, '/api/v1/accounts/321/unfollow') out, err = capsys.readouterr() assert "You are no longer following blixa" in out @mock.patch('toot.http.get') def test_unfollow_not_found(mock_get, capsys): mock_get.return_value = MockResponse([]) with pytest.raises(ConsoleError) as ex: console.run_command(app, user, 'unfollow', ['blixa']) mock_get.assert_called_once_with(app, user, '/api/v1/accounts/search', {'q': 'blixa'}) assert "Account not found" == str(ex.value) @mock.patch('toot.http.get') def test_whoami(mock_get, capsys): mock_get.return_value = MockResponse({ 'acct': 'ihabunek', 'avatar': 'https://files.mastodon.social/accounts/avatars/000/046/103/original/6a1304e135cac514.jpg?1491312434', 'avatar_static': 'https://files.mastodon.social/accounts/avatars/000/046/103/original/6a1304e135cac514.jpg?1491312434', 'created_at': '2017-04-04T13:23:09.777Z', 'display_name': 'Ivan Habunek', 'followers_count': 5, 'following_count': 9, 'header': '/headers/original/missing.png', 'header_static': '/headers/original/missing.png', 'id': 46103, 'locked': False, 'note': 'A developer.', 'statuses_count': 19, 'url': 'https://mastodon.social/@ihabunek', 'username': 'ihabunek' }) console.run_command(app, user, 'whoami', []) mock_get.assert_called_once_with(app, user, '/api/v1/accounts/verify_credentials') out, err = capsys.readouterr() out = uncolorize(out) assert "@ihabunek Ivan Habunek" in out assert "A developer." in out assert "https://mastodon.social/@ihabunek" in out assert "ID: 46103" in out assert "Since: 2017-04-04 @ 13:23:09" in out assert "Followers: 5" in out assert "Following: 9" in out assert "Statuses: 19" in out @mock.patch('toot.http.get') def test_notifications(mock_get, capsys): mock_get.return_value = MockResponse([{ 'id': '1', 'type': 'follow', 'created_at': '2019-02-16T07:01:20.714Z', 'account': { 'display_name': 'Frank Zappa', 'acct': '[email protected]', }, }, { 'id': '2', 'type': 'mention', 'created_at': '2017-01-12T12:12:12.0Z', 'account': { 'display_name': 'Dweezil Zappa', 'acct': '[email protected]', }, 'status': { 'id': '111111111111111111', 'account': { 'display_name': 'Dweezil Zappa', 'acct': '[email protected]', }, 'created_at': '2017-04-12T15:53:18.174Z', 'content': "<p>We still have fans in 2017 @fan123</p>", 'reblog': None, 'in_reply_to_id': None, 'media_attachments': [], }, }, { 'id': '3', 'type': 'reblog', 'created_at': '1983-11-03T03:03:03.333Z', 'account': { 'display_name': 'Terry Bozzio', 'acct': '[email protected]', }, 'status': { 'id': '1234', 'account': { 'display_name': 'Zappa Fan', 'acct': '[email protected]' }, 'created_at': '1983-11-04T15:53:18.174Z', 'content': "<p>The Black Page, a masterpiece</p>", 'reblog': None, 'in_reply_to_id': None, 'media_attachments': [], }, }, { 'id': '4', 'type': 'favourite', 'created_at': '1983-12-13T01:02:03.444Z', 'account': { 'display_name': 'Zappa Old Fan', 'acct': '[email protected]', }, 'status': { 'id': '1234', 'account': { 'display_name': 'Zappa Fan', 'acct': '[email protected]' }, 'created_at': '1983-11-04T15:53:18.174Z', 'content': "<p>The Black Page, a masterpiece</p>", 'reblog': None, 'in_reply_to_id': None, 'media_attachments': [], }, }]) console.run_command(app, user, 'notifications', []) mock_get.assert_called_once_with(app, user, '/api/v1/notifications', {'exclude_types[]': [], 'limit': 20}) out, err = capsys.readouterr() out = uncolorize(out) width = 100 assert not err assert out == "\n".join([ "─" * width, "Frank Zappa @[email protected] now follows you", "─" * width, "Dweezil Zappa @[email protected] mentioned you in", "Dweezil Zappa @[email protected] 2017-04-12 15:53", "", "We still have fans in 2017 @fan123", "", "ID 111111111111111111 ", "─" * width, "Terry Bozzio @[email protected] reblogged your status", "Zappa Fan @[email protected] 1983-11-04 15:53", "", "The Black Page, a masterpiece", "", "ID 1234 ", "─" * width, "Zappa Old Fan @[email protected] favourited your status", "Zappa Fan @[email protected] 1983-11-04 15:53", "", "The Black Page, a masterpiece", "", "ID 1234 ", "─" * width, "", ]) @mock.patch('toot.http.get') def test_notifications_empty(mock_get, capsys): mock_get.return_value = MockResponse([]) console.run_command(app, user, 'notifications', []) mock_get.assert_called_once_with(app, user, '/api/v1/notifications', {'exclude_types[]': [], 'limit': 20}) out, err = capsys.readouterr() out = uncolorize(out) assert not err assert out == "No notification\n" @mock.patch('toot.http.post') def test_notifications_clear(mock_post, capsys): console.run_command(app, user, 'notifications', ['--clear']) out, err = capsys.readouterr() out = uncolorize(out) mock_post.assert_called_once_with(app, user, '/api/v1/notifications/clear') assert not err assert out == 'Cleared notifications\n' def u(user_id, access_token="abc"): username, instance = user_id.split("@") return { "instance": instance, "username": username, "access_token": access_token, } @mock.patch('toot.config.save_config') @mock.patch('toot.config.load_config') def test_logout(mock_load, mock_save, capsys): mock_load.return_value = { "users": { "[email protected]": u("[email protected]"), "[email protected]": u("[email protected]"), }, "active_user": "[email protected]", } console.run_command(app, user, "logout", ["[email protected]"]) mock_save.assert_called_once_with({ 'users': { '[email protected]': u("[email protected]") }, 'active_user': None }) out, err = capsys.readouterr() assert "✓ User [email protected] logged out" in out @mock.patch('toot.config.save_config') @mock.patch('toot.config.load_config') def test_activate(mock_load, mock_save, capsys): mock_load.return_value = { "users": { "[email protected]": u("[email protected]"), "[email protected]": u("[email protected]"), }, "active_user": "[email protected]", } console.run_command(app, user, "activate", ["[email protected]"]) mock_save.assert_called_once_with({ 'users': { "[email protected]": u("[email protected]"), '[email protected]': u("[email protected]") }, 'active_user': "[email protected]" }) out, err = capsys.readouterr() assert "✓ User [email protected] active" in out<|fim▁end|>
assert "111111111111111111" in lines[-3]