prompt
large_stringlengths
70
991k
completion
large_stringlengths
0
1.02k
<|file_name|>ParseDepends.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # # __COPYRIGHT__ # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including<|fim▁hole|># without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__" import os.path import string import TestSCons _python_ = TestSCons._python_ test = TestSCons.TestSCons() test.subdir('subdir', 'sub2') test.write('build.py', r""" import sys contents = open(sys.argv[2], 'rb').read() + open(sys.argv[3], 'rb').read() file = open(sys.argv[1], 'wb') file.write(contents) file.close() """) test.write('SConstruct', """ Foo = Builder(action = r'%(_python_)s build.py $TARGET $SOURCES subdir/foo.dep') Bar = Builder(action = r'%(_python_)s build.py $TARGET $SOURCES subdir/bar.dep') env = Environment(BUILDERS = { 'Foo' : Foo, 'Bar' : Bar }, SUBDIR='subdir') env.ParseDepends('foo.d') env.ParseDepends('bar.d') env.Foo(target = 'f1.out', source = 'f1.in') env.Foo(target = 'f2.out', source = 'f2.in') env.Bar(target = 'subdir/f3.out', source = 'f3.in') SConscript('subdir/SConscript', "env") env.Foo(target = 'f5.out', source = 'f5.in') env.Bar(target = 'sub2/f6.out', source = 'f6.in') """ % locals()) test.write('foo.d', "f1.out f2.out: %s\n" % os.path.join('subdir', 'foo.dep')) test.write('bar.d', "%s: %s\nf5.out: sub2" % (os.path.join('subdir', 'f3.out'), os.path.join('subdir', 'bar.dep'))) test.write(['subdir', 'SConscript'], """ Import("env") ParseDepends('bar.d') env.Bar(target = 'f4.out', source = 'f4.in') """) test.write(['subdir', 'bar.d'], "f4.out: bar.dep\n") test.write('f1.in', "f1.in\n") test.write('f2.in', "f2.in\n") test.write('f3.in', "f3.in\n") test.write(['subdir', 'f4.in'], "subdir/f4.in\n") test.write('f5.in', "f5.in\n") test.write('f6.in', "f6.in\n") test.write(['subdir', 'foo.dep'], "subdir/foo.dep 1\n") test.write(['subdir', 'bar.dep'], "subdir/bar.dep 1\n") test.run(arguments = '.') test.must_match('f1.out', "f1.in\nsubdir/foo.dep 1\n") test.must_match('f2.out', "f2.in\nsubdir/foo.dep 1\n") test.must_match(['subdir', 'f3.out'], "f3.in\nsubdir/bar.dep 1\n") test.must_match(['subdir', 'f4.out'], "subdir/f4.in\nsubdir/bar.dep 1\n") test.must_match('f5.out', "f5.in\nsubdir/foo.dep 1\n") test.must_match(['sub2', 'f6.out'], "f6.in\nsubdir/bar.dep 1\n") # test.write(['subdir', 'foo.dep'], "subdir/foo.dep 2\n") test.write(['subdir', 'bar.dep'], "subdir/bar.dep 2\n") test.write('f6.in', "f6.in 2\n") test.run(arguments = '.') test.must_match('f1.out', "f1.in\nsubdir/foo.dep 2\n") test.must_match('f2.out', "f2.in\nsubdir/foo.dep 2\n") test.must_match(['subdir', 'f3.out'], "f3.in\nsubdir/bar.dep 2\n") test.must_match(['subdir', 'f4.out'], "subdir/f4.in\nsubdir/bar.dep 2\n") test.must_match('f5.out', "f5.in\nsubdir/foo.dep 2\n") test.must_match(['sub2', 'f6.out'], "f6.in 2\nsubdir/bar.dep 2\n") # test.write(['subdir', 'foo.dep'], "subdir/foo.dep 3\n") test.run(arguments = '.') test.must_match('f1.out', "f1.in\nsubdir/foo.dep 3\n") test.must_match('f2.out', "f2.in\nsubdir/foo.dep 3\n") test.must_match(['subdir', 'f3.out'], "f3.in\nsubdir/bar.dep 2\n") test.must_match(['subdir', 'f4.out'], "subdir/f4.in\nsubdir/bar.dep 2\n") test.must_match('f5.out', "f5.in\nsubdir/foo.dep 2\n") test.must_match(['sub2', 'f6.out'], "f6.in 2\nsubdir/bar.dep 2\n") # test.write(['subdir', 'bar.dep'], "subdir/bar.dep 3\n") test.run(arguments = '.') test.must_match('f1.out', "f1.in\nsubdir/foo.dep 3\n") test.must_match('f2.out', "f2.in\nsubdir/foo.dep 3\n") test.must_match(['subdir', 'f3.out'], "f3.in\nsubdir/bar.dep 3\n") test.must_match(['subdir', 'f4.out'], "subdir/f4.in\nsubdir/bar.dep 3\n") test.must_match('f5.out', "f5.in\nsubdir/foo.dep 2\n") test.must_match(['sub2', 'f6.out'], "f6.in 2\nsubdir/bar.dep 2\n") # test.write('f6.in', "f6.in 3\n") test.run(arguments = '.') test.must_match('f1.out', "f1.in\nsubdir/foo.dep 3\n") test.must_match('f2.out', "f2.in\nsubdir/foo.dep 3\n") test.must_match(['subdir', 'f3.out'], "f3.in\nsubdir/bar.dep 3\n") test.must_match(['subdir', 'f4.out'], "subdir/f4.in\nsubdir/bar.dep 3\n") test.must_match('f5.out', "f5.in\nsubdir/foo.dep 3\n") test.must_match(['sub2', 'f6.out'], "f6.in 3\nsubdir/bar.dep 3\n") test.write('SConstruct', """ ParseDepends('nonexistent_file') """) test.run() test.write('SConstruct', """ ParseDepends('nonexistent_file', must_exist=1) """) test.run(status=2, stderr=None) test.fail_test(string.find(test.stderr(), "No such file or directory") == -1) test.pass_test()<|fim▁end|>
<|file_name|>DoubleMatrix1D.java<|end_file_name|><|fim▁begin|>/** * Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies * * Please see distribution for license. */ package com.opengamma.analytics.math.matrix; import java.util.Arrays; import org.apache.commons.lang.Validate; /** * A minimal implementation of a vector (in the mathematical sense) that contains doubles. */ public class DoubleMatrix1D implements Matrix<Double> { private final double[] _data; private final int _elements; /** Empty vector. */ public static final DoubleMatrix1D EMPTY_MATRIX = new DoubleMatrix1D(new double[0]); /** * @param data * The data, not null */ public DoubleMatrix1D(final Double[] data) { Validate.notNull(data); _elements = data.length; _data = new double[_elements]; for (int i = 0; i < _elements; i++) { _data[i] = data[i]; } }<|fim▁hole|> /** * @param data * The data, not null */ public DoubleMatrix1D(final double... data) { Validate.notNull(data); _elements = data.length; _data = Arrays.copyOf(data, _elements); } /** * Create an vector of length n with all entries equal to value. * * @param n * number of elements * @param value * value of elements */ public DoubleMatrix1D(final int n, final double value) { _elements = n; _data = new double[_elements]; Arrays.fill(_data, value); } /** * Returns the underlying vector data. If this is changed so is the vector. * * @see #toArray to get a copy of data * @return An array containing the vector elements */ public double[] getData() { return _data; } /** * Convert the vector to a double array. As its elements are copied, the array is independent from the vector data. * * @return An array containing a copy of vector elements */ public double[] toArray() { return Arrays.copyOf(_data, _elements); } /** * {@inheritDoc} */ @Override public int getNumberOfElements() { return _elements; } /** * {@inheritDoc} This method expects one index - any subsequent indices will be ignored. */ @Override public Double getEntry(final int... index) { return _data[index[0]]; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + Arrays.hashCode(_data); return result; } @Override public boolean equals(final Object obj) { if (this == obj) { return true; } if (obj == null) { return false; } if (getClass() != obj.getClass()) { return false; } final DoubleMatrix1D other = (DoubleMatrix1D) obj; if (!Arrays.equals(_data, other._data)) { return false; } return true; } @Override public String toString() { final StringBuffer sb = new StringBuffer(); final int n = _data.length; sb.append(" ("); for (int i = 0; i < n - 1; i++) { sb.append(_data[i] + ", "); } sb.append(_data[n - 1] + ") "); return sb.toString(); } }<|fim▁end|>
<|file_name|>vm.py<|end_file_name|><|fim▁begin|>import sys import os import pty, shlex import signal import subprocess import socket import time import atexit import re import string import logging import random from .vif import VirtualInterface from .vnet import VirtualNet from .riotnative import RIOT from string import Template import libvirt import hashlib all_domains = None class VMException(Exception): def __init__(s, msg=None): if not msg: s.message = "Unknown VM Error." else: s.message = msg class VM(): def __init__(self, name, nodeType, nics=None, binary=None, vmgroup_name=""): self.name = name self.nodeType = nodeType self.binary = binary<|fim▁hole|> self.vmgroup_name = vmgroup_name self.vm_instance = None self.fullname = self.name if self.vmgroup_name: self.fullname = "%s_%s" % (self.vmgroup_name, name) def lookup(self, conn=None): global all_domains if self.nodeType == "meshrouter": if not all_domains: all_domains = {} for id in conn.listDomainsID(): dom = conn.lookupByID(id) all_domains[dom.name()] = dom for id in conn.listDefinedDomains(): all_domains[id] = conn.lookupByName(id) try: self.vm_instance = all_domains[self.fullname] logging.getLogger("").debug("Domain %s already defined." % self.fullname) self.conn = conn return True except libvirt.libvirtError: return False except KeyError: return False elif self.nodeType == "riot_native": logging.getLogger("Looking up this node") self.vm_instance = RIOT(self.fullname, self.binary, self.vmgroup_name, self.nics[0].tap) return True def define(self, conn=None): if self.nodeType == "meshrouter": if not self.lookup(conn): logging.getLogger("").info("Defining VM %s" %(self.fullname)) self.vm_instance = conn.defineXML(self.create_vm_xml()) else: logging.getLogger("").info("Defining RIOT native process %s" % (self.fullname)) if not self.binary: logging.getLogger("").error("No binary for RIOT native given. Exiting...") sys.exit(1) self.vm_instance = RIOT(self.fullname, self.binary, self.vmgroup_name, self.nics[0].tap) def undefine(self, conn=None): # TODO: needs here anything to be done for RIOT native? if self.nodeType == "meshrouter": if self.vm_instance or self.lookup(conn): self.vm_instance.undefine() def start(self): if self.vm_instance: if not self.vm_instance.isActive(): self.vm_instance.create() def stop(self): if self.vm_instance: logging.getLogger("").debug("stopping %s (%s)" % (self.name, self.vm_instance.pid)) if self.vm_instance.isActive(): logging.getLogger("").debug("destroying %s" % self.vm_instance.pid) self.vm_instance.destroy() def getType(self): return self.nodeType def create_interfaces_xml(self): if len(self.nics)<1: return "" ifxml = "" nic_options = '' for nic in self.nics: macaddr = "" if nic.macaddr: macaddr = macaddr_template.substitute(mac=nic.macaddr) ifxml = ifxml + if_tmpl.substitute(mac=macaddr,tap=nic.tap) return ifxml def create_vm_xml(self): ifxml = self.create_interfaces_xml() return vm_xml_tmpl.substitute(name=self.fullname,memory=262144,interfaces=ifxml) vm_xml_tmpl = Template(''' <domain type='kvm'> <name>$name</name> <memory>$memory</memory> <vcpu>1</vcpu> <os> <type arch='i686'>hvm</type> <boot dev='hd'/> </os> <features> <acpi/> <pae/> </features> <clock offset='utc'/> <on_poweroff>restart</on_poweroff> <on_reboot>restart</on_reboot> <on_crash>restart</on_crash> <devices> <disk type='file' device='disk'> <source file='/usr/local/share/qemu/gpxe-serial.bin'/> <target dev='hda' bus='ide'/> </disk> <controller type='ide' index='0'/> $interfaces <serial type='pty'> <target port='0'/> </serial> <console type='pty'> <target port='0'/> </console> </devices> </domain> ''') if_tmpl = Template(''' <interface type='ethernet'> $mac <target dev='$tap'/> <model type='e1000'/> <script path='/bin/true'/> </interface> '''); # if_tmpl = Template(''' # <interface type='bridge'> # <source bridge='$bridge'/> # $mac # <target dev='$tap'/> # <model type='e1000'/> # </interface> # '''); macaddr_template = Template(''' <mac address='$mac'/> ''');<|fim▁end|>
self.nics = nics if not nics: self.nics = []
<|file_name|>daltonparser.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # # Copyright (c) 2016, the cclib development team # # This file is part of cclib (http://cclib.github.io) and is distributed under # the terms of the BSD 3-Clause License. """Parser for DALTON output files""" from __future__ import print_function import numpy from . import logfileparser from . import utils class DALTON(logfileparser.Logfile): """A DALTON log file.""" def __init__(self, *args, **kwargs): # Call the __init__ method of the superclass super(DALTON, self).__init__(logname="DALTON", *args, **kwargs) def __str__(self): """Return a string representation of the object.""" return "DALTON log file %s" % (self.filename) def __repr__(self): """Return a representation of the object.""" return 'DALTON("%s")' % (self.filename) def normalisesym(self, label): """Normalise the symmetries used by DALTON.""" # It appears that DALTON is using the correct labels. return label def before_parsing(self): # Used to decide whether to wipe the atomcoords clean. self.firststdorient = True # Use to track which section/program output we are parsing, # since some programs print out the same headers, which we # would like to use as triggers. self.section = None # If there is no symmetry, assume this. self.symlabels = ['Ag'] # Is the basis set from a single library file? This is true # when the first line is BASIS, false for INTGRL/ATOMBASIS. self.basislibrary = True def parse_geometry(self, lines): """Parse DALTON geometry lines into an atomcoords array.""" coords = [] for lin in lines: # Without symmetry there are simply four columns, and with symmetry # an extra label is printed after the atom type. cols = lin.split() if cols[1][0] == "_": xyz = cols[2:] else: xyz = cols[1:] # The assumption is that DALTON always print in atomic units. xyz = [utils.convertor(float(x), 'bohr', 'Angstrom') for x in xyz] coords.append(xyz) return coords def extract(self, inputfile, line): """Extract information from the file object inputfile.""" # extract the version number first if line[4:30] == "This is output from DALTON": if line.split()[5] == "release" or line.split()[5] == "(Release": self.metadata["package_version"] = line.split()[6][6:] else: self.metadata["package_version"] = line.split()[5] # Is the basis set from a single library file, or is it # manually specified? See before_parsing(). if line[:6] == 'INTGRL'or line[:9] == 'ATOMBASIS': self.basislibrary = False # This section at the start of geometry optimization jobs gives us information # about optimization targets (geotargets) and possibly other things as well. # Notice how the number of criteria required to converge is set to 2 here, # but this parameter can (probably) be tweaked in the input. # # Chosen parameters for *OPTIMI : # ------------------------------- # # Default 1st order method will be used: BFGS update. # Optimization will be performed in redundant internal coordinates (by default). # Model Hessian will be used as initial Hessian. # The model Hessian parameters of Roland Lindh will be used. # # # Trust region method will be used to control step (default). # # Convergence threshold for gradient set to : 1.00D-04 # Convergence threshold for energy set to : 1.00D-06 # Convergence threshold for step set to : 1.00D-04 # Number of convergence criteria set to : 2 # if line.strip()[:25] == "Convergence threshold for": if not hasattr(self, 'geotargets'): self.geotargets = [] self.geotargets_names = [] target = self.float(line.split()[-1]) name = line.strip()[25:].split()[0] self.geotargets.append(target) self.geotargets_names.append(name) # This is probably the first place where atomic symmetry labels are printed, # somewhere afer the SYMGRP point group information section. We need to know # which atom is in which symmetry, since this influences how some things are # print later on. We can also get some generic attributes along the way. # # Isotopic Masses # --------------- # # C _1 12.000000 # C _2 12.000000 # C _1 12.000000 # C _2 12.000000 # ... # # Note that when there is no symmetry there are only two columns here. # # It is also a good idea to keep in mind that DALTON, with symmetry on, operates # in a specific point group, so symmetry atoms have no internal representation. # Therefore only atoms marked as "_1" or "#1" in other places are actually # represented in the model. The symmetry atoms (higher symmetry indices) are # generated on the fly when writing the output. We will save the symmetry indices # here for later use. # # Additional note: the symmetry labels are printed only for atoms that have # symmetry images... so assume "_1" if a label is missing. For example, there will # be no label for atoms on an axes, such as the oxygen in water in C2v: # # O 15.994915 # H _1 1.007825 # H _2 1.007825 # if line.strip() == "Isotopic Masses": self.skip_lines(inputfile, ['d', 'b']) # Since some symmetry labels may be missing, read in all lines first. lines = [] line = next(inputfile) while line.strip(): lines.append(line) line = next(inputfile) # Split lines into columsn and dd any missing symmetry labels, if needed. lines = [l.split() for l in lines] if any([len(l) == 3 for l in lines]): for il, l in enumerate(lines): if len(l) == 2: lines[il] = [l[0], "_1", l[1]] atomnos = [] symmetry_atoms = [] atommasses = [] for cols in lines: cols0 = ''.join([i for i in cols[0] if not i.isdigit()]) #remove numbers atomnos.append(self.table.number[cols0]) if len(cols) == 3: symmetry_atoms.append(int(cols[1][1])) atommasses.append(float(cols[2])) else: atommasses.append(float(cols[1])) self.set_attribute('atomnos', atomnos) self.set_attribute('atommasses', atommasses) self.set_attribute('natom', len(atomnos)) self.set_attribute('natom', len(atommasses)) # Save this for later if there were any labels. self.symmetry_atoms = symmetry_atoms or None # This section is close to the beginning of the file, and can be used # to parse natom, nbasis and atomnos. We also construct atombasis here, # although that is symmetry-dependent (see inline comments). Note that # DALTON operates on the idea of atom type, which are not necessarily # unique element-wise. # # Atoms and basis sets # -------------------- # # Number of atom types : 6 # Total number of atoms: 20 # # Basis set used is "STO-3G" from the basis set library. # # label atoms charge prim cont basis # ---------------------------------------------------------------------- # C 6 6.0000 15 5 [6s3p|2s1p] # H 4 1.0000 3 1 [3s|1s] # C 2 6.0000 15 5 [6s3p|2s1p] # H 2 1.0000 3 1 [3s|1s] # C 2 6.0000 15 5 [6s3p|2s1p] # H 4 1.0000 3 1 [3s|1s] # ---------------------------------------------------------------------- # total: 20 70.0000 180 60 # ---------------------------------------------------------------------- # # Threshold for neglecting AO integrals: 1.00D-12 # if line.strip() == "Atoms and basis sets": self.skip_lines(inputfile, ['d', 'b']) line = next(inputfile) assert "Number of atom types" in line self.ntypes = int(line.split()[-1]) line = next(inputfile) assert "Total number of atoms:" in line self.set_attribute("natom", int(line.split()[-1])) # When using the INTGRL keyword and not pulling from the # basis set library, the "Basis set used" line doesn't # appear. if not self.basislibrary: self.skip_line(inputfile, 'b') else: #self.skip_lines(inputfile, ['b', 'basisname', 'b']) line = next(inputfile) line = next(inputfile) self.metadata["basis_set"] = line.split()[4].strip('\"') line = next(inputfile) line = next(inputfile) cols = line.split() # Detecting which columns things are in will be somewhat more robust # to formatting changes in the future. iatoms = cols.index('atoms') icharge = cols.index('charge') icont = cols.index('cont') self.skip_line(inputfile, 'dashes') atomnos = [] atombasis = [] nbasis = 0 for itype in range(self.ntypes): line = next(inputfile) cols = line.split() atoms = int(cols[iatoms]) charge = float(cols[icharge]) assert int(charge) == charge charge = int(charge) cont = int(cols[icont]) for at in range(atoms): atomnos.append(charge) # If symmetry atoms are present, these will have basis functions # printed immediately after the one unique atom, so for all # practical purposes cclib can assume the ordering in atombasis # follows this out-of order scheme to match the output. if self.symmetry_atoms: # So we extend atombasis only for the unique atoms (with a # symmetry index of 1), interleaving the basis functions # for this atoms with basis functions for all symmetry atoms. if self.symmetry_atoms[at] == 1: nsyms = 1 while (at + nsyms < self.natom) and self.symmetry_atoms[at + nsyms] == nsyms + 1: nsyms += 1 for isym in range(nsyms): istart = nbasis + isym iend = nbasis + cont*nsyms + isym atombasis.append(list(range(istart, iend, nsyms))) nbasis += cont*nsyms else: atombasis.append(list(range(nbasis, nbasis + cont))) nbasis += cont self.set_attribute('atomnos', atomnos) self.set_attribute('atombasis', atombasis) self.set_attribute('nbasis', nbasis) self.skip_line(inputfile, 'dashes') line = next(inputfile) self.set_attribute('natom', int(line.split()[iatoms])) self.set_attribute('nbasis', int(line.split()[icont])) self.skip_line(inputfile, 'dashes') # The Gaussian exponents and contraction coefficients are printed for each primitive # and then the contraction information is printed separately (see below) Both segmented # and general contractions are used, but we can parse them the same way since zeros are # inserted for primitives that are not used. However, no atom index is printed here # so we don't really know when a new atom is started without using information # from other section (we should already have atombasis parsed at this point). # # Orbital exponents and contraction coefficients # ---------------------------------------------- # # # C #1 1s 1 71.616837 0.1543 0.0000 # seg. cont. 2 13.045096 0.5353 0.0000 # 3 3.530512 0.4446 0.0000 # 4 2.941249 0.0000 -0.1000 # ... # # Here is a corresponding fragment for general contractions: # # C 1s 1 33980.000000 0.0001 -0.0000 0.0000 0.0000 0.0000 # 0.0000 0.0000 0.0000 0.0000 # gen. cont. 2 5089.000000 0.0007 -0.0002 0.0000 0.0000 0.0000 # 0.0000 0.0000 0.0000 0.0000 # 3 1157.000000 0.0037 -0.0008 0.0000 0.0000 0.0000 # 0.0000 0.0000 0.0000 0.0000 # 4 326.600000 0.0154 -0.0033 0.0000 0.0000 0.0000 # ... # if line.strip() == "Orbital exponents and contraction coefficients": self.skip_lines(inputfile, ['d', 'b', 'b']) # Here we simply want to save the numbers defining each primitive for later use, # where the first number is the exponent, and the rest are coefficients which # should be zero if the primitive is not used in a contraction. This list is # symmetry agnostic, although primitives/contractions are not generally. self.primitives = [] prims = [] line = next(inputfile) while line.strip(): # Each contraction/section is separated by a blank line, and at the very # end there is an extra blank line. while line.strip(): # For generalized contraction it is typical to see the coefficients wrapped # to new lines, so we must collect them until we are sure a primitive starts. if line[:30].strip(): if prims: self.primitives.append(prims) prims = [] prims += [float(x) for x in line[20:].split()] line = next(inputfile) line = next(inputfile) # At the end we have the final primitive to save. self.primitives.append(prims) # This is the corresponding section to the primitive definitions parsed above, so we # assume those numbers are available in the variable 'primitives'. Here we read in the # indicies of primitives, which we use to construct gbasis. # # Contracted Orbitals # ------------------- # # 1 C 1s 1 2 3 4 5 6 7 8 9 10 11 12 # 2 C 1s 1 2 3 4 5 6 7 8 9 10 11 12 # 3 C 1s 10 # 4 C 1s 11 # ... # # Here is an fragment with symmetry labels: # # ... # 1 C #1 1s 1 2 3 # 2 C #2 1s 7 8 9 # 3 C #1 1s 4 5 6 # ... # if line.strip() == "Contracted Orbitals": self.skip_lines(inputfile, ['d', 'b']) # This is the reverse of atombasis, so that we can easily map from a basis functions # to the corresponding atom for use in the loop below. basisatoms = [None for i in range(self.nbasis)] for iatom in range(self.natom): for ibasis in self.atombasis[iatom]: basisatoms[ibasis] = iatom # Since contractions are not generally given in order (when there is symmetry), # start with an empty list for gbasis. gbasis = [[] for i in range(self.natom)] # This will hold the number of contractions already printed for each orbital, # counting symmetry orbitals separately. orbitalcount = {} for ibasis in range(self.nbasis): <|fim▁hole|> cols = line.split() # The first columns is always the basis function index, which we can assert. assert int(cols[0]) == ibasis + 1 # The number of columns is differnet when symmetry is used. If there are further # complications, it may be necessary to use exact slicing, since the formatting # of this section seems to be fixed (although columns can be missing). Notice how # We subtract one from the primitive indices here already to match cclib's # way of counting from zero in atombasis. if '#' in line: sym = cols[2] orbital = cols[3] prims = [int(i) - 1 for i in cols[4:]] else: sym = None orbital = cols[2] prims = [int(i) - 1 for i in cols[3:]] shell = orbital[0] subshell = orbital[1].upper() iatom = basisatoms[ibasis] # We want to count the number of contractiong already parsed for each orbital, # but need to make sure to differentiate between atoms and symmetry atoms. orblabel = str(iatom) + '.' + orbital + (sym or "") orbitalcount[orblabel] = orbitalcount.get(orblabel, 0) + 1 # Here construct the actual primitives for gbasis, which should be a list # of 2-tuples containing an exponent an coefficient. Note how we are indexing # self.primitives from zero although the printed numbering starts from one. primitives = [] for ip in prims: p = self.primitives[ip] exponent = p[0] coefficient = p[orbitalcount[orblabel]] primitives.append((exponent, coefficient)) contraction = (subshell, primitives) if contraction not in gbasis[iatom]: gbasis[iatom].append(contraction) self.skip_line(inputfile, 'blank') self.set_attribute('gbasis', gbasis) # Since DALTON sometimes uses symmetry labels (Ag, Au, etc.) and sometimes # just the symmetry group index, we need to parse and keep a mapping between # these two for later use. # # Symmetry Orbitals # ----------------- # # Number of orbitals in each symmetry: 25 5 25 5 # # # Symmetry Ag ( 1) # # 1 C 1s 1 + 2 # 2 C 1s 3 + 4 # ... # if line.strip() == "Symmetry Orbitals": self.skip_lines(inputfile, ['d', 'b']) line = inputfile.next() self.symcounts = [int(c) for c in line.split(':')[1].split()] self.symlabels = [] for sc in self.symcounts: self.skip_lines(inputfile, ['b', 'b']) # If the number of orbitals for a symmetry is zero, the printout # is different (see MP2 unittest logfile for an example). line = inputfile.next() if sc == 0: assert "No orbitals in symmetry" in line else: assert line.split()[0] == "Symmetry" self.symlabels.append(line.split()[1]) self.skip_line(inputfile, 'blank') for i in range(sc): orbital = inputfile.next() if "Starting in Wave Function Section (SIRIUS)" in line: self.section = "SIRIUS" # Orbital specifications # ====================== # Abelian symmetry species All | 1 2 3 4 # | Ag Au Bu Bg # --- | --- --- --- --- # Total number of orbitals 60 | 25 5 25 5 # Number of basis functions 60 | 25 5 25 5 # # ** Automatic occupation of RKS orbitals ** # # -- Initial occupation of symmetries is determined from extended Huckel guess. # -- Initial occupation of symmetries is : # @ Occupied SCF orbitals 35 | 15 2 15 3 # # Maximum number of Fock iterations 0 # Maximum number of DIIS iterations 60 # Maximum number of QC-SCF iterations 60 # Threshold for SCF convergence 1.00D-05 # This is a DFT calculation of type: B3LYP # ... # if "Total number of orbitals" in line: # DALTON 2015 adds a @ in front of number of orbitals chomp = line.split() index = 4 if "@" in chomp: index = 5 self.set_attribute("nbasis", int(chomp[index])) self.nmo_per_symmetry = list(map(int, chomp[index+2:])) assert self.nbasis == sum(self.nmo_per_symmetry) if "Threshold for SCF convergence" in line: if not hasattr(self, "scftargets"): self.scftargets = [] scftarget = self.float(line.split()[-1]) self.scftargets.append([scftarget]) # Wave function specification # ============================ # @ Wave function type >>> KS-DFT <<< # @ Number of closed shell electrons 70 # @ Number of electrons in active shells 0 # @ Total charge of the molecule 0 # # @ Spin multiplicity and 2 M_S 1 0 # @ Total number of symmetries 4 (point group: C2h) # @ Reference state symmetry 1 (irrep name : Ag ) # # This is a DFT calculation of type: B3LYP # ... # if line.strip() == "Wave function specification": self.skip_line(inputfile, 'e') line = next(inputfile) # Must be a coupled cluster calculation. if line.strip() == '': self.skip_lines(inputfile, ['b', 'Coupled Cluster', 'b']) else: assert "wave function" in line.lower() line = next(inputfile) assert "Number of closed shell electrons" in line self.paired_electrons = int(line.split()[-1]) line = next(inputfile) assert "Number of electrons in active shells" in line self.unpaired_electrons = int(line.split()[-1]) line = next(inputfile) assert "Total charge of the molecule" in line self.set_attribute("charge", int(line.split()[-1])) self.skip_line(inputfile, 'b') line = next(inputfile) assert "Spin multiplicity and 2 M_S" in line self.set_attribute("mult", int(line.split()[-2])) # Dalton only has ROHF, no UHF if self.mult != 1: self.metadata["unrestricted"] = True if not hasattr(self, 'homos'): self.set_attribute('homos', [(self.paired_electrons // 2) - 1]) if self.unpaired_electrons > 0: self.homos.append(self.homos[0]) self.homos[0] += self.unpaired_electrons # ********************************************* # ***** DIIS optimization of Hartree-Fock ***** # ********************************************* # # C1-DIIS algorithm; max error vectors = 8 # # Automatic occupation of symmetries with 70 electrons. # # Iter Total energy Error norm Delta(E) SCF occupation # ----------------------------------------------------------------------------- # K-S energy, electrons, error : -46.547567739269 69.9999799123 -2.01D-05 # @ 1 -381.645762476 4.00D+00 -3.82D+02 15 2 15 3 # Virial theorem: -V/T = 2.008993 # @ MULPOP C _1 0.15; C _2 0.15; C _1 0.12; C _2 0.12; C _1 0.11; C _2 0.11; H _1 -0.15; H _2 -0.15; H _1 -0.14; H _2 -0.14; # @ C _1 0.23; C _2 0.23; H _1 -0.15; H _2 -0.15; C _1 0.08; C _2 0.08; H _1 -0.12; H _2 -0.12; H _1 -0.13; H _2 -0.13; # ----------------------------------------------------------------------------- # K-S energy, electrons, error : -46.647668038900 69.9999810430 -1.90D-05 # @ 2 -381.949410128 1.05D+00 -3.04D-01 15 2 15 3 # Virial theorem: -V/T = 2.013393 # ... # # With and without symmetry, the "Total energy" line is shifted a little. if self.section == "SIRIUS" and "Iter" in line and "Total energy" in line: iteration = 0 converged = False values = [] if not hasattr(self, "scfvalues"): self.scfvalues = [] while not converged: try: line = next(inputfile) except StopIteration: self.logger.warning('File terminated before end of last SCF!') break # each iteration is bracketed by "-------------" if "-------------------" in line: iteration += 1 continue # the first hit of @ n where n is the current iteration strcompare = "@{0:>3d}".format(iteration) if strcompare in line: temp = line.split() error_norm = self.float(temp[3]) values.append([error_norm]) if line[0] == "@" and "converged in" in line: converged = True # It seems DALTON does change the SCF convergence criteria during a # geometry optimization, but also does not print them. So, assume they # are unchanged and copy the initial values after the first step. However, # it would be good to check up on this - perhaps it is possible to print. self.scfvalues.append(values) if len(self.scfvalues) > 1: self.scftargets.append(self.scftargets[-1]) # DALTON organizes the energies by symmetry, so we need to parse first, # and then sort the energies (and labels) before we store them. # # The formatting varies depending on RHF/DFT and/or version. Here is # an example from a DFT job: # # *** SCF orbital energy analysis *** # # Only the five lowest virtual orbital energies printed in each symmetry. # # Number of electrons : 70 # Orbital occupations : 15 2 15 3 # # Sym Kohn-Sham orbital energies # # 1 Ag -10.01616533 -10.00394288 -10.00288640 -10.00209612 -9.98818062 # -0.80583154 -0.71422407 -0.58487249 -0.55551093 -0.50630125 # ... # # Here is an example from an RHF job that only has symmetry group indices: # # *** SCF orbital energy analysis *** # # Only the five lowest virtual orbital energies printed in each symmetry. # # Number of electrons : 70 # Orbital occupations : 15 2 15 3 # # Sym Hartree-Fock orbital energies # # 1 -11.04052518 -11.03158921 -11.02882211 -11.02858563 -11.01747921 # -1.09029777 -0.97492511 -0.79988247 -0.76282547 -0.69677619 # ... # if self.section == "SIRIUS" and "*** SCF orbital energy analysis ***" in line: # to get ALL orbital energies, the .PRINTLEVELS keyword needs # to be at least 0,10 (up from 0,5). I know, obvious, right? # this, however, will conflict with the scfvalues output that # changes into some weird form of DIIS debug output. mosyms = [] moenergies = [] self.skip_line(inputfile, 'blank') line = next(inputfile) # There is some extra text between the section header and # the number of electrons for open-shell calculations. while "Number of electrons" not in line: line = next(inputfile) nelectrons = int(line.split()[-1]) line = next(inputfile) occupations = [int(o) for o in line.split()[3:]] nsym = len(occupations) self.skip_lines(inputfile, ['b', 'header', 'b']) # now parse nsym symmetries for isym in range(nsym): # For unoccupied symmetries, nothing is printed here. if occupations[isym] == 0: continue # When there are exactly five energies printed (on just one line), it seems # an extra blank line is printed after a block. line = next(inputfile) if not line.strip(): line = next(inputfile) cols = line.split() # The first line has the orbital symmetry information, but sometimes # it's the label and sometimes it's the index. There are always five # energies per line, though, so we can deduce if we have the labels or # not just the index. In the latter case, we depend on the labels # being read earlier into the list `symlabels`. Finally, if no symlabels # were read that implies there is only one symmetry, namely Ag. if 'A' in cols[1] or 'B' in cols[1]: sym = self.normalisesym(cols[1]) energies = [float(t) for t in cols[2:]] else: if hasattr(self, 'symlabels'): sym = self.normalisesym(self.symlabels[int(cols[0]) - 1]) else: assert cols[0] == '1' sym = "Ag" energies = [float(t) for t in cols[1:]] while len(energies) > 0: moenergies.extend(energies) mosyms.extend(len(energies)*[sym]) line = next(inputfile) energies = [float(col) for col in line.split()] # now sort the data about energies and symmetries. see the following post for the magic # http://stackoverflow.com/questions/19339/a-transpose-unzip-function-in-python-inverse-of-zip sdata = sorted(zip(moenergies, mosyms), key=lambda x: x[0]) moenergies, mosyms = zip(*sdata) self.moenergies = [[]] self.moenergies[0] = [utils.convertor(moenergy, 'hartree', 'eV') for moenergy in moenergies] self.mosyms = [[]] self.mosyms[0] = mosyms if not hasattr(self, "nmo"): self.nmo = self.nbasis if len(self.moenergies[0]) != self.nmo: self.set_attribute('nmo', len(self.moenergies[0])) # .-----------------------------------. # | >>> Final results from SIRIUS <<< | # `-----------------------------------' # # # @ Spin multiplicity: 1 # @ Spatial symmetry: 1 ( irrep Ag in C2h ) # @ Total charge of molecule: 0 # # @ Final DFT energy: -382.050716652387 # @ Nuclear repulsion: 445.936979976608 # @ Electronic energy: -827.987696628995 # # @ Final gradient norm: 0.000003746706 # ... # if "Final HF energy" in line and not (hasattr(self, "mpenergies") or hasattr(self, "ccenergies")): self.metadata["methods"].append("HF") if "Final DFT energy" in line: self.metadata["methods"].append("DFT") if "This is a DFT calculation of type" in line: self.metadata["functional"] = line.split()[-1] if "Final DFT energy" in line or "Final HF energy" in line: if not hasattr(self, "scfenergies"): self.scfenergies = [] temp = line.split() self.scfenergies.append(utils.convertor(float(temp[-1]), "hartree", "eV")) if "@ = MP2 second order energy" in line: self.metadata["methods"].append("MP2") energ = utils.convertor(float(line.split()[-1]), 'hartree', 'eV') if not hasattr(self, "mpenergies"): self.mpenergies = [] self.mpenergies.append([]) self.mpenergies[-1].append(energ) if "Total CCSD energy:" in line: self.metadata["methods"].append("CCSD") energ = utils.convertor(float(line.split()[-1]), 'hartree', 'eV') if not hasattr(self, "ccenergies"): self.ccenergies = [] self.ccenergies.append(energ) if "Total energy CCSD(T)" in line: self.metadata["methods"].append("CCSD(T)") energ = utils.convertor(float(line.split()[-1]), 'hartree', 'eV') if not hasattr(self, "ccenergies"): self.ccenergies = [] self.ccenergies.append(energ) # The molecular geometry requires the use of .RUN PROPERTIES in the input. # Note that the second column is not the nuclear charge, but the atom type # index used internally by DALTON. # # Molecular geometry (au) # ----------------------- # # C _1 1.3498778652 2.3494125195 0.0000000000 # C _2 -1.3498778652 -2.3494125195 0.0000000000 # C _1 2.6543517307 0.0000000000 0.0000000000 # ... # if "Molecular geometry (au)" in line: if not hasattr(self, "atomcoords"): self.atomcoords = [] if self.firststdorient: self.firststdorient = False self.skip_lines(inputfile, ['d', 'b']) lines = [next(inputfile) for i in range(self.natom)] atomcoords = self.parse_geometry(lines) self.atomcoords.append(atomcoords) if "Optimization Control Center" in line: self.section = "OPT" assert set(next(inputfile).strip()) == set(":") # During geometry optimizations the geometry is printed in the section # that is titles "Optimization Control Center". Note that after an optimizations # finishes, DALTON normally runs another "static property section (ABACUS)", # so the final geometry will be repeated in atomcoords. # # Next geometry (au) # ------------------ # # C _1 1.3203201560 2.3174808341 0.0000000000 # C _2 -1.3203201560 -2.3174808341 0.0000000000 # ... if self.section == "OPT" and line.strip() == "Next geometry (au)": self.skip_lines(inputfile, ['d', 'b']) lines = [next(inputfile) for i in range(self.natom)] coords = self.parse_geometry(lines) self.atomcoords.append(coords) # This section contains data for optdone and geovalues, although we could use # it to double check some atttributes that were parsed before. # # Optimization information # ------------------------ # # Iteration number : 4 # End of optimization : T # Energy at this geometry is : -379.777956 # Energy change from last geom. : -0.000000 # Predicted change : -0.000000 # Ratio, actual/predicted change : 0.952994 # Norm of gradient : 0.000058 # Norm of step : 0.000643 # Updated trust radius : 0.714097 # Total Hessian index : 0 # if self.section == "OPT" and line.strip() == "Optimization information": self.skip_lines(inputfile, ['d', 'b']) line = next(inputfile) assert 'Iteration number' in line iteration = int(line.split()[-1]) line = next(inputfile) assert 'End of optimization' in line if not hasattr(self, 'optdone'): self.optdone = [] self.optdone.append(line.split()[-1] == 'T') # We need a way to map between lines here and the targets stated at the # beginning of the file in 'Chosen parameters for *OPTIMI (see above), # and this dictionary facilitates that. The keys are target names parsed # in that initial section after input processing, and the values are # substrings that should appear in the lines in this section. Make an # exception for the energy at iteration zero where there is no gradient, # and take the total energy for geovalues. targets_labels = { 'gradient': 'Norm of gradient', 'energy': 'Energy change from last', 'step': 'Norm of step', } values = [numpy.nan] * len(self.geotargets) while line.strip(): if iteration == 0 and "Energy at this geometry" in line: index = self.geotargets_names.index('energy') values[index] = self.float(line.split()[-1]) for tgt, lbl in targets_labels.items(): if lbl in line and tgt in self.geotargets_names: index = self.geotargets_names.index(tgt) values[index] = self.float(line.split()[-1]) line = next(inputfile) # If we're missing something above, throw away the partial geovalues since # we don't want artificial NaNs getting into cclib. Instead, fix the dictionary # to make things work. if not numpy.nan in values: if not hasattr(self, 'geovalues'): self.geovalues = [] self.geovalues.append(values) # ------------------------------------------------- # extract the center of mass line if "Center-of-mass coordinates (a.u.):" in line: temp = line.split() reference = [utils.convertor(float(temp[i]), "bohr", "Angstrom") for i in [3, 4, 5]] if not hasattr(self, 'moments'): self.moments = [reference] # ------------------------------------------------- # Extract the dipole moment if "Dipole moment components" in line: dipole = numpy.zeros(3) line = next(inputfile) line = next(inputfile) line = next(inputfile) if not "zero by symmetry" in line: line = next(inputfile) line = next(inputfile) temp = line.split() for i in range(3): dipole[i] = float(temp[2]) # store the Debye value if hasattr(self, 'moments'): self.moments.append(dipole) ## 'vibfreqs', 'vibirs', and 'vibsyms' appear in ABACUS. # Vibrational Frequencies and IR Intensities # ------------------------------------------ # # mode irrep frequency IR intensity # ============================================================ # cm-1 hartrees km/mol (D/A)**2/amu # ------------------------------------------------------------ # 1 A 3546.72 0.016160 0.000 0.0000 # 2 A 3546.67 0.016160 0.024 0.0006 # ... if "Vibrational Frequencies and IR Intensities" in line: self.skip_lines(inputfile, ['dashes', 'blank']) line = next(inputfile) assert line.strip() == "mode irrep frequency IR intensity" self.skip_line(inputfile, 'equals') line = next(inputfile) assert line.strip() == "cm-1 hartrees km/mol (D/A)**2/amu" self.skip_line(inputfile, 'dashes') line = next(inputfile) # The normal modes are in order of decreasing IR # frequency, so they can't be added directly to # attributes; they must be grouped together first, sorted # in order of increasing frequency, then added to their # respective attributes. vibdata = [] while line.strip(): sline = line.split() vibsym = sline[1] vibfreq = float(sline[2]) vibir = float(sline[4]) vibdata.append((vibfreq, vibir, vibsym)) line = next(inputfile) vibdata.sort(key=lambda normalmode: normalmode[0]) self.vibfreqs = [normalmode[0] for normalmode in vibdata] self.vibirs = [normalmode[1] for normalmode in vibdata] self.vibsyms = [normalmode[2] for normalmode in vibdata] # Now extract the normal mode displacements. self.skip_lines(inputfile, ['b', 'b']) line = next(inputfile) assert line.strip() == "Normal Coordinates (bohrs*amu**(1/2)):" # Normal Coordinates (bohrs*amu**(1/2)): # -------------------------------------- # # # 1 3547 2 3547 3 3474 4 3471 5 3451 # ---------------------------------------------------------------------- # # C x -0.000319 -0.000314 0.002038 0.000003 -0.001599 # C y -0.000158 -0.000150 -0.001446 0.003719 -0.002576 # C z 0.000000 -0.000000 -0.000000 0.000000 -0.000000 # # C x 0.000319 -0.000315 -0.002038 0.000003 0.001600 # C y 0.000157 -0.000150 0.001448 0.003717 0.002577 # ... self.skip_line(inputfile, 'd') line = next(inputfile) vibdisps = numpy.empty(shape=(len(self.vibirs), self.natom, 3)) ndisps = 0 while ndisps < len(self.vibirs): # Skip two blank lines. line = next(inputfile) line = next(inputfile) # Use the header with the normal mode indices and # frequencies to update where we are. ndisps_block = (len(line.split()) // 2) mode_min, mode_max = ndisps, ndisps + ndisps_block # Skip a line of dashes and a blank line. line = next(inputfile) line = next(inputfile) for w in range(self.natom): for coord in range(3): line = next(inputfile) vibdisps[mode_min:mode_max, w, coord] = [float(i) for i in line.split()[2:]] # Skip a blank line. line = next(inputfile) ndisps += ndisps_block # The vibrational displacements are in the wrong order; # reverse them. self.vibdisps = vibdisps[::-1, :, :] ## 'vibramans' # Raman related properties for freq. 0.000000 au = Infinity nm # --------------------------------------------------------------- # # Mode Freq. Alpha**2 Beta(a)**2 Pol.Int. Depol.Int. Dep. Ratio # # 1 3546.72 0.379364 16.900089 84.671721 50.700268 0.598786 # 2 3546.67 0.000000 0.000000 0.000000 0.000000 0.599550 if "Raman related properties for freq." in line: self.skip_lines(inputfile, ['d', 'b']) line = next(inputfile) assert line[1:76] == "Mode Freq. Alpha**2 Beta(a)**2 Pol.Int. Depol.Int. Dep. Ratio" self.skip_line(inputfile, 'b') line = next(inputfile) vibramans = [] # The Raman intensities appear under the "Pol.Int." # (polarization intensity) column. for m in range(len(self.vibfreqs)): vibramans.append(float(line.split()[4])) line = next(inputfile) # All vibrational properties in DALTON appear in reverse # order. self.vibramans = vibramans[::-1] # Static polarizability from **PROPERTIES/.POLARI. if line.strip() == "Static polarizabilities (au)": if not hasattr(self, 'polarizabilities'): self.polarizabilities = [] polarizability = [] self.skip_lines(inputfile, ['d', 'b', 'directions', 'b']) for _ in range(3): line = next(inputfile) polarizability.append(line.split()[1:]) self.polarizabilities.append(numpy.array(polarizability)) # Static and dynamic polarizability from **PROPERTIES/.ALPHA/*ABALNR. if "Polarizability tensor for frequency" in line: if not hasattr(self, 'polarizabilities'): self.polarizabilities = [] polarizability = [] self.skip_lines(inputfile, ['d', 'directions', 'b']) for _ in range(3): line = next(inputfile) polarizability.append(line.split()[1:]) self.polarizabilities.append(numpy.array(polarizability)) # Static and dynamic polarizability from **RESPONSE/*LINEAR. # This section is *very* general and will need to be expanded later. # For now, only form the matrix from dipole (length gauge) values. if "@ FREQUENCY INDEPENDENT SECOND ORDER PROPERTIES" in line: coord_to_idx = {'X': 0, 'Y': 1, 'Z': 2} self.skip_line(inputfile, 'b') line = next(inputfile) polarizability_diplen = numpy.empty(shape=(3, 3)) while "Time used in linear response calculation is" not in line: tokens = line.split() if line.count("DIPLEN") == 2: assert len(tokens) == 8 if not hasattr(self, 'polarizabilities'): self.polarizabilities = [] i, j = coord_to_idx[tokens[2][0]], coord_to_idx[tokens[4][0]] polarizability_diplen[i, j] = self.float(tokens[7]) line = next(inputfile) polarizability_diplen = utils.symmetrize(polarizability_diplen, use_triangle='upper') if hasattr(self, 'polarizabilities'): self.polarizabilities.append(polarizability_diplen) # Electronic excitations: single residues of the linear # response equations. if "Linear Response single residue calculation" in line: etsyms = [] etenergies = [] # etoscs = [] etsecs = [] symmap = {"T": "Triplet", "F": "Singlet"} while "End of Dynamic Property Section (RESPONS)" not in line: line = next(inputfile) if "Operator symmetry" in line: do_triplet = line[-2] if "@ Excited state no:" in line: etsym = line.split()[9] # -2 etsyms.append(symmap[do_triplet] + "-" + etsym) self.skip_lines(inputfile, ['d', 'b', 'Excitation energy in a.u.']) line = next(inputfile) etenergy = float(line.split()[1]) etenergies.append(etenergy) while "The dominant contributions" not in line: line = next(inputfile) self.skip_line(inputfile, 'b') line = next(inputfile) # [0] is the starting (occupied) MO # [1] is the ending (unoccupied) MO # [2] and [3] are the excitation/deexcitation coefficients # [4] is the orbital overlap # [5] is the ... # [6] is the ... # [7] is the ... assert "I A K_IA K_AI <|I|*|A|> <I^2*A^2> Weight Contrib" in line self.skip_line(inputfile, 'b') line = next(inputfile) sec = [] while line.strip(): chomp = line.split() startidx = int(chomp[0]) - 1 endidx = int(chomp[1]) - 1 contrib = float(chomp[2]) # Since DALTON is restricted open-shell only, # there is not distinction between alpha and # beta spin. sec.append([(startidx, 0), (endidx, 0), contrib]) line = next(inputfile) etsecs.append(sec) self.set_attribute('etsyms', etsyms) self.set_attribute('etenergies', etenergies) # self.set_attribute('etoscs', etoscs) self.set_attribute('etsecs', etsecs) # TODO: # aonames # aooverlaps # atomcharges # atomspins # coreelectrons # enthalpy # entropy # etoscs # etrotats # freeenergy # grads # hessian # mocoeffs # nocoeffs # nooccnos # scancoords # scanenergies # scannames # scanparm # temperature # vibanharms # N/A: # fonames # fooverlaps # fragnames # frags if __name__ == "__main__": import doctest, daltonparser, sys if len(sys.argv) == 1: doctest.testmod(daltonparser, verbose=False) if len(sys.argv) >= 2: parser = daltonparser.DALTON(sys.argv[1]) data = parser.parse() if len(sys.argv) > 2: for i in range(len(sys.argv[2:])): if hasattr(data, sys.argv[2 + i]): print(getattr(data, sys.argv[2 + i]))<|fim▁end|>
line = next(inputfile)
<|file_name|>testEth4.js<|end_file_name|><|fim▁begin|>/******************************************************************************** * Ledger Node JS API * (c) 2016-2017 Ledger * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at<|fim▁hole|>* * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ********************************************************************************/ function runTest(comm, ledger, timeout) { return comm.create_async(timeout, true).then(function (comm) { var eth = new ledger.eth(comm); return eth.signPersonalMessage_async("44'/60'/0'/0'/0", Buffer.from('test').toString('hex')).then(function (result) { var v = result['v'] - 27; v = v.toString(16); if (v.length < 2) { v = "0" + v; } console.log("Signature 0x" + result['r'] + result['s'] + v); }) }) } module.exports = runTest;<|fim▁end|>
<|file_name|>test_parsers_module.py<|end_file_name|><|fim▁begin|>import pytest from collections import OrderedDict from insights.parsers import (calc_offset, keyword_search, optlist_to_dict, parse_delimited_table, parse_fixed_table, split_kv_pairs, unsplit_lines, ParseException, SkipException) SPLIT_TEST_1 = """ # Comment line keyword1 = value1 # Inline comments # Comment indented keyword3 # Key with no separator keyword2 = value2a=True, value2b=100M """.strip() SPLIT_TEST_1_OD = OrderedDict([ ('keyword1', 'value1'), ('keyword3', ''), ('keyword2', 'value2a=True, value2b=100M') ]) SPLIT_TEST_2 = """ @ Comment line keyword1: value1 @ Inline comments keyword2 : value2a=True, value2b=100M @ Comment indented keyword3 @ Key with no separator """.strip() OFFSET_CONTENT_1 = """ data 1 line data 2 line """.strip() OFFSET_CONTENT_2 = """ # Warning line Error line data 1 line data 2 line Trailing line Blank line above Another trailing line Yet another trailing line Yet yet another trailing line """.strip() def test_split_kv_pairs(): kv_pairs = split_kv_pairs(SPLIT_TEST_1.splitlines()) assert len(kv_pairs) == 2 assert kv_pairs == { 'keyword1': 'value1', 'keyword2': 'value2a=True, value2b=100M' } kv_pairs = split_kv_pairs(SPLIT_TEST_1.splitlines(), filter_string='value2') assert len(kv_pairs) == 1 assert kv_pairs == { 'keyword2': 'value2a=True, value2b=100M' } kv_pairs = split_kv_pairs(SPLIT_TEST_1.splitlines(), use_partition=True) assert len(kv_pairs) == 3 assert kv_pairs == { 'keyword1': 'value1', 'keyword2': 'value2a=True, value2b=100M', 'keyword3': '' } kv_pairs = split_kv_pairs(SPLIT_TEST_1.splitlines(), use_partition=True, ordered=True) assert len(kv_pairs) == 3 assert kv_pairs == SPLIT_TEST_1_OD kv_pairs = split_kv_pairs(SPLIT_TEST_2.splitlines(), comment_char='@', split_on=':') assert len(kv_pairs) == 2 assert kv_pairs == { 'keyword1': 'value1', 'keyword2': 'value2a=True, value2b=100M' } kv_pairs = split_kv_pairs(SPLIT_TEST_2.splitlines(), comment_char='@', split_on=':', filter_string='value2') assert len(kv_pairs) == 1 assert kv_pairs == { 'keyword2': 'value2a=True, value2b=100M' } kv_pairs = split_kv_pairs(SPLIT_TEST_2.splitlines(), comment_char='@', split_on=':', use_partition=True) assert len(kv_pairs) == 3 assert kv_pairs == { 'keyword1': 'value1', 'keyword2': 'value2a=True, value2b=100M', 'keyword3': '' } SPLIT_LINES = """ Line one Line two part 1 \\ line two part 2\\ line two part 3 Line three """.strip() SPLIT_LINES_2 = """ Line one Line two part 1 ^ line two part 2^ line two part 3 Line three^ """.strip() SPLIT_LINES_3 = """ web.default_taskmaster_tasks = RHN::Task::SessionCleanup, RHN::Task::ErrataQueue, RHN::Task::ErrataEngine, RHN::Task::DailySummary, RHN::Task::SummaryPopulation, RHN::Task::RHNProc, RHN::Task::PackageCleanup db_host =""" def test_unsplit_lines(): lines = list(unsplit_lines(SPLIT_LINES.splitlines())) assert len(lines) == 3 assert lines[0] == 'Line one' assert lines[1] == 'Line two part 1 line two part 2 line two part 3' assert lines[2] == 'Line three' lines = list(unsplit_lines(SPLIT_LINES_2.splitlines(), cont_char='^')) assert len(lines) == 3 assert lines[0] == 'Line one' assert lines[1] == 'Line two part 1 line two part 2 line two part 3' assert lines[2] == 'Line three' # test continuation on last line # Test keeping continuation character on line lines = list(unsplit_lines( SPLIT_LINES_3.splitlines(), cont_char=',', keep_cont_char=True )) assert len(lines) == 4 assert lines[0] == '' assert lines[1] == 'web.default_taskmaster_tasks = RHN::Task::SessionCleanup, RHN::Task::ErrataQueue, RHN::Task::ErrataEngine, RHN::Task::DailySummary, RHN::Task::SummaryPopulation, RHN::Task::RHNProc, RHN::Task::PackageCleanup' assert lines[2] == '' assert lines[3] == 'db_host =' def test_calc_offset(): assert calc_offset(OFFSET_CONTENT_1.splitlines(), target=[]) == 0 assert calc_offset(OFFSET_CONTENT_1.splitlines(), target=[None]) == 0 assert calc_offset(OFFSET_CONTENT_1.splitlines(), target=['data ']) == 0 with pytest.raises(ValueError): calc_offset(OFFSET_CONTENT_1.splitlines(), target=['xdata ']) with pytest.raises(ValueError): calc_offset(OFFSET_CONTENT_1.splitlines(), target=['data '], invert_search=True) assert calc_offset(OFFSET_CONTENT_1.splitlines(), target=['Trailing', 'Blank', 'Another '], invert_search=True) == 0 assert calc_offset(OFFSET_CONTENT_2.splitlines(), target=[]) == 0 assert calc_offset(OFFSET_CONTENT_2.splitlines(), target=['data ']) == 3 assert calc_offset(reversed(OFFSET_CONTENT_2.splitlines()), target=['Trailing', 'Blank', 'Another ', 'Yet'], invert_search=True) == 6 assert calc_offset(OFFSET_CONTENT_2.splitlines(), target=['data', '2']) == 3 assert calc_offset(OFFSET_CONTENT_2.splitlines(), target=['data', '2'], require_all=True) == 4 assert calc_offset( reversed(OFFSET_CONTENT_2.splitlines()), target=['Trailing', 'Blank', 'Another ', 'Yet'], invert_search=True) == 6 assert calc_offset( reversed(OFFSET_CONTENT_2.splitlines()), target=['Trailing', 'Blank', 'Another ', 'Yet'], invert_search=True, require_all=True) == 6 FIXED_CONTENT_1 = """ Column1 Column2 Column3 data1 data 2 data 3 data4 data5 data6 data 7 data 9 """.strip() FIXED_CONTENT_1A = """ WARNING Column1 Column2 Column3 data1 data 2 data 3 data4 data5 data6 data 7 data 9 """.strip() FIXED_CONTENT_1B = """ Column1 Column2 Column3 data1 data 2 data4 data5 data6 data 7 data 9 """.strip() FIXED_CONTENT_2 = """ WARNING WARNING WARNING Some message Another message Column1 Column2 Column3 data1 data 2 data 3 data4 data5 data6 data 7 data 9 """.strip() FIXED_CONTENT_3 = """ WARNING WARNING WARNING Some message Another message Column1 Column2 Column3 data1 data 2 data 3 data4 data5 data6 data 7 data 9 Trailing non-data line Another trailing non-data line """.strip() FIXED_CONTENT_4 = """ WARNING WARNING WARNING Some message Another message Column1 Column 2 Column 3 data1 data 2 data 3 data4 data5 data6 data 7 data 9 data10 Trailing non-data line Another trailing non-data line """.strip() FIXED_CONTENT_5 = """ Column1 Column 2 Column 3 data1 data 2 data 3 data 7 data 9 data10 """.strip() FIXED_CONTENT_DUP_HEADER_PREFIXES = """ NAMESPACE NAME LABELS default foo app=superawesome """.strip() def test_parse_fixed_table(): data = parse_fixed_table(FIXED_CONTENT_1.splitlines()) assert len(data) == 3 assert data[0] == {'Column1': 'data1', 'Column2': 'data 2', 'Column3': 'data 3'} assert data[1] == {'Column1': 'data4', 'Column2': 'data5', 'Column3': 'data6'} assert data[2] == {'Column1': 'data 7', 'Column2': '', 'Column3': 'data 9'} data = parse_fixed_table(FIXED_CONTENT_1A.splitlines(), heading_ignore=['Column1 ']) assert len(data) == 3 assert data[0] == {'Column1': 'data1', 'Column2': 'data 2', 'Column3': 'data 3'} assert data[1] == {'Column1': 'data4', 'Column2': 'data5', 'Column3': 'data6'} assert data[2] == {'Column1': 'data 7', 'Column2': '', 'Column3': 'data 9'} data = parse_fixed_table(FIXED_CONTENT_1B.splitlines()) assert len(data) == 3 assert data[0] == {'Column1': 'data1', 'Column2': 'data 2', 'Column3': ''} assert data[1] == {'Column1': 'data4', 'Column2': 'data5', 'Column3': 'data6'} assert data[2] == {'Column1': 'data 7', 'Column2': '', 'Column3': 'data 9'} data = parse_fixed_table(FIXED_CONTENT_2.splitlines(), heading_ignore=['Column1 ']) assert len(data) == 3 assert data[0] == {'Column1': 'data1', 'Column2': 'data 2', 'Column3': 'data 3'} assert data[1] == {'Column1': 'data4', 'Column2': 'data5', 'Column3': 'data6'} assert data[2] == {'Column1': 'data 7', 'Column2': '', 'Column3': 'data 9'} data = parse_fixed_table(FIXED_CONTENT_3.splitlines(), heading_ignore=['Column1 '], trailing_ignore=['Trailing', 'Another']) assert len(data) == 3 assert data[0] == {'Column1': 'data1', 'Column2': 'data 2', 'Column3': 'data 3'} assert data[1] == {'Column1': 'data4', 'Column2': 'data5', 'Column3': 'data6'} assert data[2] == {'Column1': 'data 7', 'Column2': '', 'Column3': 'data 9'} data = parse_fixed_table(FIXED_CONTENT_4.splitlines(), heading_ignore=['Column1 '], header_substitute=[('Column 2', 'Column_2'), ('Column 3', 'Column_3')], trailing_ignore=['Trailing', 'Another']) assert len(data) == 4 assert data[0] == {'Column1': 'data1', 'Column_2': 'data 2', 'Column_3': 'data 3'} assert data[1] == {'Column1': 'data4', 'Column_2': 'data5', 'Column_3': 'data6'} assert data[2] == {'Column1': 'data 7', 'Column_2': '', 'Column_3': 'data 9'} assert data[3] == {'Column1': 'data10', 'Column_2': '', 'Column_3': ''} # Test that if we search for trailing data that is always found, then we # should get the whole thing parsed as a table from the header line data = parse_fixed_table( ['foo' + line for line in FIXED_CONTENT_4.splitlines()], heading_ignore=['fooColumn1 '], header_substitute=[('fooColumn1', 'Column1'), ('Column 2', 'Column_2'), ('Column 3', 'Column_3')], trailing_ignore=['foo'] ) assert len(data) == 6 assert data[4] == {'Column1': 'fooTrailing', 'Column_2': 'non-data li', 'Column_3': 'ne'} assert data[5] == {'Column1': 'foo Another', 'Column_2': 'trailing no', 'Column_3': 'n-data line'} data = parse_fixed_table(FIXED_CONTENT_DUP_HEADER_PREFIXES.splitlines()) assert data[0] == {'NAMESPACE': 'default', 'NAME': 'foo', 'LABELS': 'app=superawesome'} data = parse_fixed_table(FIXED_CONTENT_5.splitlines()) assert len(data) == 3 def test_parse_fixed_table_empty_exception(): with pytest.raises(ParseException) as pe: parse_fixed_table(FIXED_CONTENT_1B.splitlines(), empty_exception=True) assert "Incorrect line:" in str(pe.value) def test_optlist_standard(): d = optlist_to_dict('key1,key2=val2,key1=val1,key3') assert sorted(d.keys()) == sorted(['key1', 'key2', 'key3']) assert d['key1'] == 'val1' assert d['key2'] == 'val2' assert d['key3'] is True def test_optlist_no_vals(): d = optlist_to_dict('key1,key2=val2,key1=val1,key3', kv_sep=None) assert sorted(d.keys()) == sorted(['key1', 'key1=val1', 'key2=val2', 'key3']) assert d['key1'] is True assert d['key1=val1'] is True assert d['key2=val2'] is True assert d['key3'] is True def test_optlist_strip_quotes(): d = optlist_to_dict( '''key1="foo",key2='bar',key3="mismatched quotes',key4="inner'quotes"''', strip_quotes=True ) assert sorted(d.keys()) == sorted(['key1', 'key2', 'key3', 'key4']) assert d['key1'] == 'foo' assert d['key2'] == 'bar' assert d['key3'] == '"mismatched quotes\'' assert d['key4'] == "inner'quotes" def test_optlist_with_spaces(): d = optlist_to_dict( '''key1=foo, key2=bar''' ) assert 'key1' in d assert 'key2' in d PS_AUX_TEST = """ USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.0 0.0 19356 1544 ? Ss May31 0:01 /sbin/init root 1821 0.0 0.0 0 0 ? S May31 0:25 [kondemand/0] root 1864 0.0 0.0 18244 668 ? Ss May31 0:05 irqbalance --pid=/var/run/irqbalance.pid user1 20160 0.0 0.0 108472 1896 pts/3 Ss 10:09 0:00 bash root 20357 0.0 0.0 9120 760 ? Ss 10:09 0:00 /sbin/dhclient -1 -q -lf /var/lib/dhclient/dhclient-extbr0.leases -pf /var/run/dhclient-extbr0.pid extbr0 qemu 22673 0.8 10.2 1618556 805636 ? Sl 11:38 1:07 /usr/libexec/qemu-kvm -name rhel7 -S -M rhel6.5.0 -enable-kvm -m 1024 -smp 2,sockets=2,cores=1,threads=1 -uuid 13798ffc-bc1e-d437-4f3f-2e0fa6c923ad """ MISSING_DATA_TEST = """ WARNING: Locking disabled. Be careful! This could corrupt your metadata. LVM2_PV_FMT|LVM2_PV_UUID|LVM2_DEV_SIZE|LVM2_PV_NAME|LVM2_PV_MAJOR|LVM2_PV_MINOR|LVM2_PV_MDA_FREE|LVM2_PV_MDA_SIZE|LVM2_PV_EXT_VSN|LVM2_PE_START|LVM2_PV_SIZE|LVM2_PV_FREE|LVM2_PV_USED|LVM2_PV_ATTR|LVM2_PV_ALLOCATABLE|LVM2_PV_EXPORTED|LVM2_PV_MISSING|LVM2_PV_PE_COUNT|LVM2_PV_PE_ALLOC_COUNT|LVM2_PV_TAGS|LVM2_PV_MDA_COUNT|LVM2_PV_MDA_USED_COUNT|LVM2_PV_BA_START|LVM2_PV_BA_SIZE|LVM2_PV_IN_USE|LVM2_PV_DUPLICATE|LVM2_VG_NAME WARNING: Locking disabled. Be careful! This could corrupt your metadata. """ SUBSTITUTE_HEADERS_TEST = """ address,port,state,read-only 0.0.0.0,3000,LISTEN,N 10.76.19.184,37500,ESTAB,Y """.strip() POSTGRESQL_LOG = """ schema | table | rows public | rhnsnapshotpackage | 47428950 public | rhnpackagefile | 32174333 public | rhnpackagecapability | 12934215 public | rhnpackagechangelogrec | 11269933 public | rhnchecksum | 10129746 public | rhnactionconfigrevision | 2894957 public | rhnpackageprovides | 2712442 public | rhnpackagerequires | 2532861 public | rhn_command_target | 1009152 public | rhnconfigfilename | 0 public | rhnxccdfidentsystem | 0 public | rhndistchannelmap | 0 public | rhnactionvirtshutdown | 0 public | rhnpublicchannelfamily | 0 (402 rows) """.strip() # Normally has a --- separator line, which is ignored using get_active_lines TABLE1 = """ THIS IS A HEADER this is some content_with_blank_prefix This is more content """.strip() TABLE2 = [ "SID Nr Instance SAPLOCALHOST Version DIR_EXECUTABLE", "HA2| 16| D16| lu0417|749, patch 10, changelist 1698137| /usr/sap/HA2/D16/exe", "HA2| 22| D22| lu0417|749, patch 10, changelist 1698137| /usr/sap/HA2/D22/exe" ] TABLE3 = """ THIS | IS | A | HEADER this ^ is ^ some ^ content This ^ is ^ more ^ content """.strip() def test_parse_delimited_table(): # No content? No table. assert parse_delimited_table([]) == [] # Test maximum splits and header 'ignore', which should actually be # called 'header_startswith' tbl = parse_delimited_table( PS_AUX_TEST.splitlines(), max_splits=10, heading_ignore=['USER'] ) assert tbl assert isinstance(tbl, list) assert len(tbl) == 6 assert isinstance(tbl[0], dict) assert tbl[0] == { '%MEM': '0.0', 'TTY': '?', 'VSZ': '19356', 'PID': '1', '%CPU': '0.0', 'START': 'May31', 'COMMAND': '/sbin/init', 'USER': 'root', 'STAT': 'Ss', 'TIME': '0:01', 'RSS': '1544' } assert tbl[5]['COMMAND'] == \ '/usr/libexec/qemu-kvm -name rhel7 -S -M rhel6.5.0 -enable-kvm -m 1024 -smp 2,sockets=2,cores=1,threads=1 -uuid 13798ffc-bc1e-d437-4f3f-2e0fa6c923ad' # Test trailing ignore not found tbl = parse_delimited_table( MISSING_DATA_TEST.splitlines(), delim='|', heading_ignore=['LVM2_PV_FMT'], trailing_ignore=['WARNING', 'ERROR', 'Cannot get lock'] ) assert isinstance(tbl, list) assert len(tbl) == 0 # Header substitution tbl = parse_delimited_table( SUBSTITUTE_HEADERS_TEST.splitlines(), delim=',', strip=False, header_substitute=[('read-only', 'read_only')] ) assert tbl assert isinstance(tbl, list) assert len(tbl) == 2 assert isinstance(tbl[1], dict) assert tbl[1] == { 'address': '10.76.19.184', 'port': '37500', 'state': 'ESTAB', 'read_only': 'Y' } # Test change of delimiter and trailing_ignore tbl = parse_delimited_table(POSTGRESQL_LOG.splitlines(), delim='|', trailing_ignore=['(']) assert isinstance(tbl, list) assert len(tbl) == 14 assert isinstance(tbl[0], dict) assert tbl[0] == { 'schema': 'public', 'table': 'rhnsnapshotpackage', 'rows': '47428950' } # Test using different header delimiter result = parse_delimited_table(TABLE3.splitlines(), delim="^", header_delim="|") assert isinstance(result, list) assert len(result) == 2 assert isinstance(result[0], dict) expected = [{"THIS": "this", "IS": "is", "A": "some", "HEADER": "content"}, {"THIS": "This", "IS": "is", "A": "more", "HEADER": "content"}] assert expected == result # Test explicit None as header delimiter, different from content delimiter result = parse_delimited_table(TABLE2, delim='|', header_delim=None) assert isinstance(result, list) assert len(result) == 2 assert isinstance(result[0], dict) expected = [{"SID": "HA2", "Nr": "16", "Instance": "D16", "SAPLOCALHOST": "lu0417", "Version": "749, patch 10, changelist 1698137", "DIR_EXECUTABLE": "/usr/sap/HA2/D16/exe"}, {"SID": "HA2", "Nr": "22", "Instance": "D22", "SAPLOCALHOST": "lu0417", "Version": "749, patch 10, changelist 1698137", "DIR_EXECUTABLE": "/usr/sap/HA2/D22/exe"}] assert expected == result # Test raw_line_key TABLE1_SP = TABLE1.splitlines() result = parse_delimited_table(TABLE1_SP, raw_line_key='raw_line') assert isinstance(result, list) assert len(result) == 2 assert isinstance(result[0], dict) # Get the RAW line assert result[0]['raw_line'] == TABLE1_SP[1] DATA_LIST = [ {'name': 'test 1', 'role': 'server', 'memory_gb': 16, 'ssd': True}, {'name': 'test 2', 'role': 'server', 'memory_gb': 256, 'ssd': False}, {'name': 'test 3', 'role': 'server', 'memory_gb': 16, 'ssd': False}, {'name': 'test 4', 'role': 'embedded', 'memory_gb': 1, 'ssd': False}, {'name': 'test 5', 'role': 'workstation', 'memory_gb': 16, 'ssd': True}, ] CERT_LIST = [ { 'status': 'MONITORING', 'stuck': 'no', 'key pair storage': "type=NSSDB,location='/etc/dirsrv/slapd-LDAP-EXAMPLE-COM',nickname='Server-Cert',token='NSS Certificate DB',pinfile='/etc/dirsrv/slapd-LDAP-EXAMPLE-COM/pwdfile.txt'", 'certificate': { 'type': 'NSSDB', 'location': '/etc/dirsrv/slapd-LDAP-EXAMPLE-COM', 'nickname': 'Server-Cert', 'token': 'NSS Certificate DB', }, 'CA': 'IPA', 'issuer': 'CN=Certificate Authority,O=LDAP.EXAMPLE.COM', 'subject': 'CN=master.LDAP.EXAMPLE.COM,O=LDAP.EXAMPLE.COM', 'expires': '2017-06-28 12:52:12 UTC', 'eku': 'id-kp-serverAuth,id-kp-clientAuth', 'pre-save command': '', 'post-save command': '/usr/lib64/ipa/certmonger/restart_dirsrv LDAP-EXAMPLE-COM', 'track': 'yes', 'auto-renew': 'yes', }, { 'status': 'MONITORING', 'stuck': 'no', 'key pair storage': "type=NSSDB,location='/etc/dirsrv/slapd-PKI-IPA',nickname='Server-Cert',token='NSS Certificate DB',pinfile='/etc/dirsrv/slapd-PKI-IPA/pwdfile.txt'", 'certificate': { 'type': 'NSSDB', 'location': '/etc/dirsrv/slapd-PKI-IPA', 'nickname': 'Server-Cert', 'token': 'NSS Certificate DB', }, 'CA': 'IPA', 'issuer': 'CN=Certificate Authority,O=EXAMPLE.COM', 'subject': 'CN=ldap.EXAMPLE.COM,O=EXAMPLE.COM', 'expires': '2017-06-28 12:52:13 UTC', 'eku': 'id-kp-serverAuth,id-kp-clientAuth', 'pre-save command': '', 'post-save command': '/usr/lib64/ipa/certmonger/restart_dirsrv PKI-IPA', 'track': 'yes', 'auto-renew': 'yes', 'dash- space': 'tested', } ] def test_keyword_search(): # No keywords, no result assert len(keyword_search(DATA_LIST)) == 0<|fim▁hole|> # Search on present but non-matching keyword produces empty list assert keyword_search(DATA_LIST, memory_gb=8) == [] # Single result - search on string results = keyword_search(DATA_LIST, role='embedded') assert len(results) == 1 assert results[0] == DATA_LIST[3] # Multiple results, name has underscore - search on integer results = keyword_search(DATA_LIST, memory_gb=16) assert len(results) == 3 assert results == [DATA_LIST[i] for i in (0, 2, 4)] # Search on boolean results = keyword_search(DATA_LIST, ssd=False) assert len(results) == 3 assert results == [DATA_LIST[i] for i in (1, 2, 3)] # No data, no results. assert len(keyword_search([], role='server')) == 0 # Search with contains results = keyword_search(DATA_LIST, role__contains='e') assert len(results) == 4 assert results == [DATA_LIST[i] for i in (0, 1, 2, 3)] # Search with startswith results = keyword_search(DATA_LIST, role__startswith='e') assert len(results) == 1 assert results[0] == DATA_LIST[3] # Search for multiple keys, with spaces and dashes, and search operators results = keyword_search( CERT_LIST, pre_save_command='', key_pair_storage__startswith="type=NSSDB,location='/etc/dirsrv/slapd-PKI-IPA'" ) assert len(results) == 1 assert results[0] == CERT_LIST[1] # Make sure contains can also apply to keys with dashes and spaces results = keyword_search( CERT_LIST, post_save_command__contains='PKI-IPA', ) assert len(results) == 1 assert results[0] == CERT_LIST[1] # Lower case value matching results = keyword_search( CERT_LIST, status__lower_value='Monitoring', ) assert len(results) == 2 assert results == CERT_LIST # Check that searches for keys with two underscores that aren't matcher # suffixes still work results = keyword_search( CERT_LIST, dash__space='tested', ) assert len(results) == 1 assert results[0] == CERT_LIST[1] # Check that we can use contains to check the contents of a dictionary # in a value results = keyword_search( CERT_LIST, certificate__contains='type' ) assert len(results) == 2 assert results == CERT_LIST assert keyword_search( CERT_LIST, certificate__contains='encryption' ) == [] PS_LIST = [ {'PID': '692', 'PPID': '2', 'COMMAND': 'kdmflush', '_line': ' 692 2 kdmflush'}, {'PID': '701', 'PPID': '2', 'COMMAND': 'kdmflush', '_line': ' 701 2 kdmflush'}, {'PID': '725', 'PPID': '2', 'COMMAND': 'xfsalloc', '_line': ' 725 2 xfsalloc'}, {'PID': '726', 'PPID': '2', 'COMMAND': None, '_line': ' 726 2 grep -F xx'}, ] def test_keyword_search_None(): # Normal search assert keyword_search(PS_LIST, COMMAND__default=None)[0]['PID'] == '726' assert keyword_search(PS_LIST, _line__contains='alloc')[0]['PID'] == '725' assert keyword_search(PS_LIST, COMMAND__startswith='xfs')[0]['PID'] == '725' assert len(keyword_search(PS_LIST, COMMAND__lower_value='KDMFLUSH')) == 2 # Check that searches for non-existing keys assert keyword_search(PS_LIST, NONE__default=None) == [] assert keyword_search(PS_LIST, NONE__startswith='xfs') == [] def test_parse_exception(): with pytest.raises(ParseException) as e_info: raise ParseException('This is a parse exception') assert 'This is a parse exception' == str(e_info.value) def test_skip_exception(): with pytest.raises(SkipException) as e_info: raise SkipException('This is a skip exception') assert 'This is a skip exception' == str(e_info.value)<|fim▁end|>
# Search on absent keywords produces empty list assert keyword_search(DATA_LIST, cpu_count=4) == []
<|file_name|>IProxy.java<|end_file_name|><|fim▁begin|>package com.Deoda.MCMBTools.proxy; public interface IProxy {<|fim▁hole|><|fim▁end|>
}
<|file_name|>doRPC.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python import os, sys from AnnotationLib import * from optparse import OptionParser import copy import math # BASED ON WIKIPEDIA VERSION # n - number of nodes # C - capacity matrix # F - flow matrix # s - source # t - sink # sumC - sum over rows of C (too speed up computation) def edmonds_karp(n, C, s, t, sumC): # Residual capacity from u to v is C[u][v] - F[u][v] F = [[0] * n for i in xrange(n)] while True: P = [-1] * n # Parent table P[s] = s M = [0] * n # Capacity of path to node M[s] = float('infinity') Q = [s] # BFS queue while Q: u = Q.pop(0) for v in xrange(n): # There is available capacity, # and v is not seen before in search if C[u][v] - F[u][v] > 0 and P[v] == -1: P[v] = u M[v] = min(M[u], C[u][v] - F[u][v]) if v != t: if(sumC[u] > 0): Q.append(v) else: # Backtrack search, and write flow while P[v] != v: u = P[v] F[u][v] += M[t] F[v][u] -= M[t] v = u Q = None break if P[t] == -1: # We did not find a path to t return (F) class AnnoGraph: def __init__(self, anno, det, ignore, style, minCover, minOverlap, maxDistance, ignoreOverlap): # setting rects #print anno.imageName self.anno = anno self.det = det self.det.sortByScore("descending") # generate initial graph self.n = len(det.rects) self.m = len(anno.rects) # Number of nodes = number of detections + number of GT + source + sink self.a = self.n + self.m + 2 # Flow matrix self.F = [[0] * self.a for i in xrange(self.a)] # Capacity matrix self.C = [[0] * self.a for i in xrange(self.a)] # Connect source to all detections for i in range(1, self.n + 1): self.C[0][i] = 1 self.C[i][0] = 1 # Connect sink to all GT for i in range(self.n + 1, self.a - 1): self.C[i][self.a - 1] = 1 self.C[self.a - 1][i] = 1 # Overall flow self.full_flow = 0 self.ignore_flow = 0 # match rects / Adjacency matrix self.M = [[] for i in xrange(self.n)] self.match(style, minCover, minOverlap, maxDistance) self.nextN = 0 # Deactivate All Non Matching detections # Save row sums for capacity matrix self.sumC = [] self.sumC.append(self.n) for q in [len(self.M[j]) for j in xrange(len(self.M))]: self.sumC.append(q) for q in [1] * self.m: self.sumC.append(q) # Initially no links are active self.sumC_active = [] self.sumC_active.append(self.n) for q in [len(self.M[j]) for j in xrange(len(self.M))]: self.sumC_active.append(0) for q in [1] * self.m: self.sumC_active.append(q) # self.ignore = [ 0 ] * self.m for ig in ignore.rects: for i, r in enumerate(anno.rects): if(ig.overlap_pascal(r) > ignoreOverlap): self.ignore[i] = 1 def match(self, style, minCover, minOverlap, maxDistance): for i in xrange(self.n): detRect = self.det.rects[i] for j in xrange(self.m): annoRect = self.anno.rects[j] # Bastian Leibe's matching style if(style == 0): if detRect.isMatchingStd(annoRect, minCover, minOverlap, maxDistance): self.M[i].append(self.n + 1 + j) # Pascal Matching style if(style == 1): if (detRect.isMatchingPascal(annoRect, minOverlap)): self.M[i].append(self.n + 1 + j) def decreaseScore(self, score): capacity_change = False for i in xrange(self.nextN, self.n): if (self.det.rects[i].score >= score): capacity_change = self.insertIntoC(i + 1) or capacity_change self.nextN += 1 else: break if capacity_change: self.F = edmonds_karp(self.a, self.C, 0, self.a - 1, self.sumC_active) self.full_flow = sum([self.F[0][i] for i in xrange(self.a)]) self.ignore_flow = sum([self.F[i][self.a - 1] * self.ignore[i - 1 - self.n] for i in range(1 + self.n, 1 + self.n + self.m )]) return capacity_change def addBB(self, rect): self.nextN += 1 capacity_change = self.insertIntoC(rect.boxIndex + 1) if capacity_change: self.F = edmonds_karp(self.a, self.C, 0, self.a - 1, self.sumC_active) self.full_flow = sum([self.F[0][i] for i in xrange(self.a)]) self.ignore_flow = sum([self.F[i][self.a - 1] * self.ignore[i - 1 - self.n] for i in range(1 + self.n, 1 + self.n + self.m )]) return capacity_change def insertIntoC(self, i): #print "Inserting node", i, self.det.rects[i-1].score, "of image", self.anno.imageName for match in self.M[i - 1]: #print " match: ", match self.C[i][match] = 1 self.C[match][i] = 1 self.sumC_active[i] = self.sumC[i] return self.sumC[i] > 0 def maxflow(self): return self.full_flow - self.ignore_flow def consideredDets(self): return self.nextN - self.ignore_flow def ignoredFlow(self): return self.ignore_flow def getTruePositives(self): ret = copy.copy(self.anno) ret.rects = [] #iterate over GT for i in xrange(self.n + 1, self.a - 1): #Flow to sink > 0 if(self.F[i][self.a - 1] > 0 and self.ignore[i - self.n - 1] == 0): #Find associated det for j in xrange(1, self.n + 1): if(self.F[j][i] > 0): ret.rects.append(self.det[j - 1]) break return ret def getIgnoredTruePositives(self): ret = copy.copy(self.anno) ret.rects = [] #iterate over GT for i in xrange(self.n + 1, self.a - 1): #Flow to sink > 0 if(self.F[i][self.a - 1] > 0 and self.ignore[i - self.n - 1] == 1): #Find associated det for j in xrange(1, self.n + 1): if(self.F[j][i] > 0): ret.rects.append(self.det[j - 1]) break return ret def getMissingRecall(self): ret = copy.copy(self.anno) ret.rects = [] for i in xrange(self.n + 1, self.a - 1): if(self.F[i][self.a - 1] == 0 and self.ignore[i - self.n - 1] == 0): ret.rects.append(self.anno.rects[i - self.n - 1]) return ret def getFalsePositives(self): ret = copy.copy(self.det) ret.rects = [] for i in xrange(1, self.n + 1): if(self.F[0][i] == 0): ret.rects.append(self.det[i - 1]) return ret def asort(idlGT, idlDet, minWidth, minHeight, style, minCover, minOverlap, maxDistance, maxWidth=float('inf'), maxHeight=float('inf')): #Asort too small object in ground truth for x,anno in enumerate(idlGT): imageFound = False filterIndex = -1 for i,filterAnno in enumerate(idlDet): if (suffixMatch(anno.imageName, filterAnno.imageName) and anno.frameNr == filterAnno.frameNr): filterIndex = i imageFound = True break if(not imageFound): continue validGTRects = [] for j in anno.rects: if (j.width() >= minWidth) and (j.height() >= minHeight) and (j.width() <= maxWidth) and (j.height() <= maxHeight): validGTRects.append(j) else: # Sort out detections that would have matched matchingIndexes = [] for m,frect in enumerate(idlDet[filterIndex].rects): if(style == 0): if (j.isMatchingStd(frect, minCover,minOverlap, maxDistance)): overlap = j.overlap_pascal(frect) matchingIndexes.append((m,overlap)) if(style == 1): if(j.isMatchingPascal(frect, minOverlap)): overlap = j.overlap_pascal(frect) matchingIndexes.append((m, overlap)) for m in xrange(len(matchingIndexes) - 1, -1, -1): matching_rect = idlDet[filterIndex].rects[matchingIndexes[m][0]] matching_overlap = matchingIndexes[m][1] better_overlap_found = False for l in anno.rects: if l.overlap_pascal(matching_rect) > matching_overlap: better_overlap_found = True if better_overlap_found: continue del idlDet[filterIndex].rects[matchingIndexes[m][0]] idlGT[x].rects = validGTRects #Sort out too small false positives for x,anno in enumerate(idlDet): imageFound = False filterIndex = -1 for i,filterAnno in enumerate(idlGT): if (suffixMatch(anno.imageName, filterAnno.imageName) and anno.frameNr == filterAnno.frameNr): filterIndex = i imageFound = True break if(not imageFound): continue validDetRects = [] for j in anno.rects: if (j.width() >= minWidth) and (j.height() >= minHeight) and (j.width() <= maxWidth) and (j.height() <= maxHeight): validDetRects.append(j) else: for frect in idlGT[filterIndex].rects: if(style == 0): if j.isMatchingStd(frect, minCover,minOverlap, maxDistance): validDetRects.append(j) if(style == 1): if(j.isMatchingPascal(frect, minOverlap)): validDetRects.append(j) idlDet[x].rects = validDetRects def main(): parser = OptionParser(usage="usage: %prog [options] <groundTruthIdl> <detectionIdl>") parser.add_option("-o", "--outFile", action="store", type="string", dest="outFile") parser.add_option("-a", "--analysisFiles", action="store", type="string", dest="analysisFile") parser.add_option("-s", "--minScore", action="store", type="float", dest="minScore") parser.add_option("-w", "--minWidth", action="store", type="int", dest="minWidth", default=0) parser.add_option("-u", "--minHeight", action="store", type="int", dest="minHeight",default=0) parser.add_option("--maxWidth", action="store", type="float", dest="maxWidth", default=float('inf')) parser.add_option("--maxHeight", action="store", type="float", dest="maxHeight", default=float('inf')) parser.add_option("-r", "--fixAspectRatio", action="store", type="float", dest="aspectRatio") parser.add_option("-p", "--Pascal-Style", action="store_true", dest="pascalStyle") parser.add_option("-l", "--Leibe-Seemann-Matching-Style", action="store_true", dest="leibeStyle") parser.add_option("--minCover", action="store", type="float", dest="minCover", default=0.5) parser.add_option("--maxDistance", action="store", type="float", dest="maxDistance", default=0.5) parser.add_option("--minOverlap", action="store", type="float", dest="minOverlap", default=0.5) parser.add_option("--clipToImageWidth", action="store", type="float", dest="clipWidth", default= None) parser.add_option("--clipToImageHeight", action="store", type="float", dest="clipHeight", default= None) parser.add_option("-d", "--dropFirst", action="store_true", dest="dropFirst") #parser.add_option("-c", "--class", action="store", type="int", dest="classID", default=-1) parser.add_option("-c", "--class", action="store", type="int", dest="classID", default = None) parser.add_option("-i", "--ignore", action="store", type="string", dest="ignoreFile") parser.add_option("--ignoreOverlap", action="store", type="float", dest="ignoreOverlap", default = 0.9) (options, args) = parser.parse_args() if (len(args) < 2): print "Please specify annotation and detection as arguments!" parser.print_help() sys.exit(1) annoFile = args[0] # First figure out the minimum height and width we are dealing with minWidth = options.minWidth minHeight = options.minHeight maxWidth = options.maxWidth maxHeight = options.maxHeight print "Minimum width: %d height: %d" % (minWidth, minHeight) # Load files annoIDL = parse(annoFile) detIDL = [] for dets in args[1:]: detIDL += parse(dets) if options.ignoreFile != None: ignoreIDL = parse(options.ignoreFile) else: ignoreIDL = copy.deepcopy(annoIDL) for anno in ignoreIDL: anno.rects = [] if(options.classID is not None): for anno in annoIDL:<|fim▁hole|> for anno in ignoreIDL: anno.rects = [rect for rect in anno.rects if (rect.classID == options.classID or rect.classID == -1)] # prevent division by zero when fixing aspect ratio for anno in annoIDL: anno.rects = [rect for rect in anno.rects if rect.width() > 0 and rect.height() > 0] for anno in detIDL: anno.rects = [rect for rect in anno.rects if rect.width() > 0 and rect.height() > 0] for anno in ignoreIDL: anno.rects = [rect for rect in anno.rects if rect.width() > 0 and rect.height() > 0] # Fix aspect ratio if (not options.aspectRatio == None): forceAspectRatio(annoIDL, options.aspectRatio) forceAspectRatio(detIDL, options.aspectRatio) forceAspectRatio(ignoreIDL, options.aspectRatio) # Deselect detections with too low score if (not options.minScore == None): for i,anno in enumerate(detIDL): validRects = [] for rect in anno.rects: if (rect.score >= options.minScore): validRects.append(rect) anno.rects = validRects # Clip detections to the image dimensions if(options.clipWidth != None or options.clipHeight != None): min_x = -float('inf') min_y = -float('inf') max_x = float('inf') max_y = float('inf') if(options.clipWidth != None): min_x = 0 max_x = options.clipWidth if(options.clipHeight != None): min_y = 0 max_y = options.clipHeight print "Clipping width: (%.02f-%.02f); clipping height: (%.02f-%.02f)" % (min_x, max_x, min_y, max_y) for anno in annoIDL: for rect in anno: rect.clipToImage(min_x, max_x, min_y, max_y) for anno in detIDL: for rect in anno: rect.clipToImage(min_x, max_x, min_y, max_y) # Setup matching style; standard is Pascal # style matchingStyle = 1 # Pascal style if (options.pascalStyle == True): matchingStyle = 1 if (options.leibeStyle == True): matchingStyle = 0 if (options.pascalStyle and options.leibeStyle): print "Conflicting matching styles!" sys.exit(1) if (options.dropFirst == True): print "Drop first frame of each sequence..." newIDL = [] for i, anno in enumerate(detIDL): if (i > 1 and detIDL[i].frameNr == detIDL[i-1].frameNr + 1 and detIDL[i].frameNr == detIDL[i-2].frameNr + 2 and detIDL[i].frameNr == detIDL[i-3].frameNr + 3 and detIDL[i].frameNr == detIDL[i-4].frameNr + 4): newIDL.append(anno) detIDL = newIDL # Asort detections which are too small/too big print "Asorting too large/ too small detections" asort(annoIDL, detIDL, minWidth, minHeight, matchingStyle, options.minCover, options.minOverlap, options.maxDistance, maxWidth, maxHeight) #Debugging asort #saveIDL("testGT.idl", annoIDL) #saveIDL("testDET.idl", detIDL) noAnnotations = 0 for anno in annoIDL: for j,detAnno in enumerate(detIDL): if (suffixMatch(anno.imageName, detIDL[j].imageName) and anno.frameNr == detIDL[j].frameNr): noAnnotations = noAnnotations + len(anno.rects) break print "#Annotations:", noAnnotations ###--- set up graphs ---### print "Setting up graphs ..." graphs = [] allRects = [] missingFrames = 0 for i in xrange(len(annoIDL)): imageFound = False filterIndex = -1 for j, detAnno in enumerate(detIDL): if (suffixMatch(annoIDL[i].imageName, detIDL[j].imageName) and annoIDL[i].frameNr == detIDL[j].frameNr): filterIndex = j imageFound = True break if(not imageFound): print "No annotation/detection pair found for: " + annoIDL[i].imageName + " frame: " + str(annoIDL[i].frameNr) missingFrames += 1 continue; graphs.append(AnnoGraph(annoIDL[i], detIDL[filterIndex], ignoreIDL[i], matchingStyle, options.minCover, options.minOverlap, options.maxDistance, options.ignoreOverlap)) for j,rect in enumerate(detIDL[filterIndex]): newRect = detAnnoRect() newRect.imageName = anno.imageName newRect.frameNr = anno.frameNr newRect.rect = rect newRect.imageIndex = i - missingFrames newRect.boxIndex = j allRects.append(newRect) print "missingFrames: ", missingFrames print "Number of detections on annotated frames: " , len(allRects) ###--- get scores from all rects ---### print "Sorting scores ..." allRects.sort(cmpDetAnnoRectsByScore) allRects.reverse() ###--- gradually decrease score ---### print "Gradually decrease score ..." lastScore = float('infinity') precs = [1.0] recalls = [0.0] #fppi = [ 10**(math.floor(math.log(1.0 / float(len(annoIDL)))/math.log(10) * 10.0) / 10.0) ] fppi = [ 1.0 / float(len(annoIDL)) ] scores = [lastScore] numDet = len(allRects) sf = lastsf = 0 cd = lastcd = 0 iflow = lastiflow = 0 changed = False firstFP = True for i,nextrect in enumerate(allRects): score = nextrect.rect.score; # updating true and false positive counts sf = sf - graphs[nextrect.imageIndex].maxflow() cd = cd - graphs[nextrect.imageIndex].consideredDets() iflow = iflow - graphs[nextrect.imageIndex].ignoredFlow() #changed = changed or graphs[nextrect.imageIndex].decreaseScore(score) changed = graphs[nextrect.imageIndex].addBB(nextrect) or changed sf = sf + graphs[nextrect.imageIndex].maxflow() cd = cd + graphs[nextrect.imageIndex].consideredDets() iflow = iflow + graphs[nextrect.imageIndex].ignoredFlow() if(firstFP and cd - sf != 0): firstFP = False changed = True if (i == numDet - 1 or score != allRects[i + 1].rect.score or firstFP or i == len(allRects)): if(changed or i == numDet - 1 or i == len(allRects)): if(lastcd > 0): scores.append(lastScore) recalls.append(float(lastsf) / float(noAnnotations - lastiflow)) precs.append(float(lastsf) / float(lastcd)) fppi.append(float(lastcd - lastsf) / float(len(annoIDL))) if (cd > 0): scores.append(score) recalls.append(float(sf) / float(noAnnotations - iflow)) precs.append(float(sf) / float(cd)) fppi.append(float(cd - sf) / float(len(annoIDL))) changed = False lastScore = score lastsf = sf lastcd = cd lastiflow = iflow ###--- output to file ---### outfilename = options.outFile if outfilename is None: outputDir = os.path.dirname(os.path.abspath(args[1])) outputFile = os.path.basename(os.path.abspath(args[1])) [base, ext] = idlBase(outputFile) outfilename = outputDir + "/rpc-" + base +".txt" print "saving " + outfilename; file = open(outfilename, 'w') for i in xrange(len(precs)): file.write(str(precs[i])+" "+str(recalls[i])+" "+str(scores[i])+ " " + str(fppi[i])+ "\n") file.close() # Extracting failure cases if(options.analysisFile != None): anaPrefix = options.analysisFile falsePositives = [] truePositives = [] missingRecall = [] ignoredTruePositives = [] for i in xrange(len(graphs)): falsePositives.append(graphs[i].getFalsePositives()) truePositives.append(graphs[i].getTruePositives()) truePositives[-1].imageName = falsePositives[-1].imageName truePositives[-1].imagePath = falsePositives[-1].imagePath missingRecall.append(graphs[i].getMissingRecall()) missingRecall[-1].imageName = falsePositives[-1].imageName missingRecall[-1].imagePath = falsePositives[-1].imagePath if options.ignoreFile != None: ignoredTruePositives.append(graphs[i].getIgnoredTruePositives()) saveIDL(anaPrefix + "-falsePositives.idl.gz", falsePositives); sortedFP = annoAnalyze(falsePositives); saveIDL(anaPrefix + "-falsePositives-sortedByScore.idl.gz", sortedFP); saveIDL(anaPrefix + "-truePositives.idl.gz", truePositives); sortedFP = annoAnalyze(truePositives); saveIDL(anaPrefix + "-truePositives-sortedByScore.idl.gz", sortedFP); if options.ignoreFile != None: saveIDL(anaPrefix + "-ignoredTruePositives.idl.gz", ignoredTruePositives) saveIDL(anaPrefix + "-missingRecall.idl.gz", missingRecall); if __name__ == "__main__": main()<|fim▁end|>
anno.rects = [rect for rect in anno.rects if (rect.classID == options.classID or rect.classID == -1)] for anno in detIDL: anno.rects = [rect for rect in anno.rects if (rect.classID == options.classID or rect.classID == -1)]
<|file_name|>main.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from bottle import run, get, post, view, request, redirect, route, static_file, template import bottle import json import threading import requests import time import sys messages = set([]) @bottle.route('/static/<path:path>') def server_static(path): return static_file(path, root='static') @get('/chat') @view('chat') def chat(): name = request.query.name return dict(msg=list(messages), name=name) @route('/')<|fim▁hole|> @post('/send') def sendmsg(): name = request.forms.getunicode('name') msg = request.forms.getunicode('msg') global messages if name != None and msg != None: messages.add((name, msg)) redirect('chat?name=' + name) else: redirect('chat') run(host='localhost', port=int(sys.argv[1]))<|fim▁end|>
def index(): redirect('chat')
<|file_name|>test_settings.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- import django SECRET_KEY = 'psst' SITE_ID = 1 DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': ':memory:', 'USER': '', 'PASSWORD': '', 'HOST': '', 'PORT': '', }<|fim▁hole|> if django.VERSION >= (1, 8): TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', 'django.core.context_processors.request', 'allauth.account.context_processors.account', 'allauth.socialaccount.context_processors.socialaccount', ], }, }, ] else: TEMPLATE_CONTEXT_PROCESSORS = ( "django.contrib.auth.context_processors.auth", "django.core.context_processors.debug", "django.core.context_processors.i18n", "django.core.context_processors.media", "django.core.context_processors.static", "django.core.context_processors.request", "django.contrib.messages.context_processors.messages", "allauth.account.context_processors.account", "allauth.socialaccount.context_processors.socialaccount", ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.admin', 'allauth', 'allauth.account', 'allauth.socialaccount', 'allauth.socialaccount.providers.amazon', 'allauth.socialaccount.providers.angellist', 'allauth.socialaccount.providers.baidu', 'allauth.socialaccount.providers.bitbucket', 'allauth.socialaccount.providers.bitly', 'allauth.socialaccount.providers.coinbase', 'allauth.socialaccount.providers.douban', 'allauth.socialaccount.providers.dropbox', 'allauth.socialaccount.providers.dropbox_oauth2', 'allauth.socialaccount.providers.evernote', 'allauth.socialaccount.providers.feedly', 'allauth.socialaccount.providers.facebook', 'allauth.socialaccount.providers.flickr', 'allauth.socialaccount.providers.foursquare', 'allauth.socialaccount.providers.google', 'allauth.socialaccount.providers.github', 'allauth.socialaccount.providers.hubic', 'allauth.socialaccount.providers.instagram', 'allauth.socialaccount.providers.linkedin', 'allauth.socialaccount.providers.linkedin_oauth2', 'allauth.socialaccount.providers.mailru', 'allauth.socialaccount.providers.windowslive', 'allauth.socialaccount.providers.odnoklassniki', 'allauth.socialaccount.providers.openid', 'allauth.socialaccount.providers.orcid', 'allauth.socialaccount.providers.paypal', 'allauth.socialaccount.providers.persona', 'allauth.socialaccount.providers.soundcloud', 'allauth.socialaccount.providers.spotify', 'allauth.socialaccount.providers.stackexchange', 'allauth.socialaccount.providers.tumblr', 'allauth.socialaccount.providers.twitch', 'allauth.socialaccount.providers.twitter', 'allauth.socialaccount.providers.vimeo', 'allauth.socialaccount.providers.weibo', 'allauth.socialaccount.providers.vk', 'allauth.socialaccount.providers.xing', ) AUTHENTICATION_BACKENDS = ( "django.contrib.auth.backends.ModelBackend", "allauth.account.auth_backends.AuthenticationBackend", ) STATIC_ROOT = '/tmp/' # Dummy STATIC_URL = '/static/'<|fim▁end|>
} ROOT_URLCONF = 'allauth.urls'
<|file_name|>00_pass.py<|end_file_name|><|fim▁begin|># Tests: # assign ::= expr store<|fim▁hole|><|fim▁end|>
pass
<|file_name|>environment-type-def.ts<|end_file_name|><|fim▁begin|>export interface Environment { readonly production: boolean; readonly geoLocation: { readonly timeoutMillis: number readonly firefoxWorkaroundTimeoutMillis: number readonly updateMillis: number }; readonly gastroLocationsUrl: string; readonly shoppingLocationsUrl?: string; readonly reviewsBaseUrl: string; readonly homePage: { readonly url: string readonly title: string }; readonly nativeAppUrl?: string; readonly reportNewLocationUrl?: string; readonly reportProblemEmail: string; readonly area: { /** * Two-letter county code in lowercase. * * E.g. "de" */ readonly country: string /**<|fim▁hole|> readonly lat: number readonly lng: number } /** * The initial bounds of the map */ readonly bounds: { readonly north: number readonly south: number readonly west: number readonly east: number } /** * The initial zoom of the map */ readonly zoom: 12 }; readonly googleAnalyticsTrackingIds: { readonly website?: string readonly map?: string }; }<|fim▁end|>
* The initial center of the map */ readonly center: {
<|file_name|>keyword.rs<|end_file_name|><|fim▁begin|>use liquid_core::Expression; use liquid_core::Result; use liquid_core::Runtime; use liquid_core::{ Display_filter, Filter, FilterParameters, FilterReflection, FromFilterParameters, ParseFilter, }; use liquid_core::{Value, ValueView}; <|fim▁hole|>struct TestKeywordFilterParameters { #[parameter( description = "Optional keyword argument.", arg_type = "str", mode = "keyword" )] optional: Option<Expression>, #[parameter( description = "Required keyword argument. Must be a boolean.", arg_type = "bool", mode = "keyword" )] required: Expression, } #[derive(Clone, ParseFilter, FilterReflection)] #[filter( name = "kw", description = "Filter to test keyword arguments.", parameters(TestKeywordFilterParameters), parsed(TestKeywordFilter) )] pub struct TestKeywordFilterParser; #[derive(Debug, FromFilterParameters, Display_filter)] #[name = "kw"] pub struct TestKeywordFilter { #[parameters] args: TestKeywordFilterParameters, } impl Filter for TestKeywordFilter { fn evaluate(&self, _input: &dyn ValueView, runtime: &dyn Runtime) -> Result<Value> { let args = self.args.evaluate(runtime)?; let required = args.required; let result = if let Some(optional) = args.optional { format!("<optional: {}; required: {}>", optional, required) } else { format!("<required: {}>", required) }; Ok(Value::scalar(result)) } }<|fim▁end|>
#[derive(Debug, FilterParameters)]
<|file_name|>connection.py<|end_file_name|><|fim▁begin|># # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER # # Copyright (c) 2015 Juniper Networks, Inc. # All rights reserved. # # Use is subject to license terms. # # Licensed under the Apache License, Version 2.0 (the ?License?); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at http://www.apache.org/licenses/LICENSE-2.0. # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ This module defines the Connection class. """ from __future__ import unicode_literals from __future__ import print_function from future import standard_library standard_library.install_aliases() from builtins import object import requests import logging class Connection(object): """ Creates a connection to Space Platform mimicking a GUI login. This class is **not** thread-safe. It is up to the users of the class to ensure thread safety. The ``rest.Space`` class uses this class for supporting session-based connections to Junos Space. Thread-safety requirements are met by that class. """ def __init__(self, homeurl, username=None, password=None, cert=None, our_ip=None): self._logger = logging.getLogger('root') self.homeurl = homeurl + '/mainui' self.authurl = homeurl + '/mainui/j_security_check' self.session = None if username is not None: if password is None: raise ValueError('password is mandatory along with username') if cert is not None: raise ValueError('You must provide only one of username+password or cert') else: if password is not None:<|fim▁hole|> raise ValueError('You must provide one of username+password or cert') self.username = username self.password = password self.our_ip = our_ip self.cert = cert self._logger.debug("Connection: Initiating login to %s", self.homeurl) self.login() def login(self): """ Login to Space """ self.session = requests.Session() sess = self.session if self.our_ip is None: resp = sess.get(self.homeurl, cert=self.cert, verify=False) #self._logger.debug(resp.status_code) #self._logger.debug(resp.headers) #self._logger.debug(resp.text) # Extract the ipAddr and code variables embbed in the form validation code ip_addr_start_idx = resp.text.find("var ipAddr = ") if ip_addr_start_idx < 0: self.check_login_status() return ip_addr_end_idx = resp.text.find("\n", ip_addr_start_idx) ip_addr_line = resp.text[ip_addr_start_idx : ip_addr_end_idx] ip_addr_items = ip_addr_line.split("=", 2) ip_addr = ip_addr_items[1].strip("'; ").strip() #codeStartIdx = r.text.find("var code = ", ip_addr_end_idx); #codeEndIdx = r.text.find("\n", codeStartIdx); #codeLine = r.text[codeStartIdx : codeEndIdx] #codeItems = codeLine.split("=", 2); #code = codeItems[1].strip("'; ").strip();''' #form_username = self.username + '%' + code + '@' + ip_addr; else: resp = sess.get(self.homeurl, cert=self.cert, verify=False) ip_addr = self.our_ip form_username = self.username + '@' + ip_addr data = { "j_screen_username" : self.username, "j_username" : form_username, "j_password" : self.password } self._logger.debug(data) resp = sess.post(self.authurl, data=data, cert=self.cert, verify=False) #self._logger.debug(resp.status_code) #self._logger.debug(resp.headers) #self._logger.debug(resp.text) self.check_login_status() def is_logged_in(self): """ Checks if a login has been established """ return self.session is not None def check_login_status(self): """ Check login-status """ if not self.is_logged_in(): raise Exception("Not logged in") resp = self.session.get(self.homeurl, verify=False) ip_addr_start_idx = resp.text.find("var ipAddr = ") if ip_addr_start_idx >= 0: raise Exception("Not in a logged-in session.") def get_session(self): """ Return the HTTP session object """ if self.is_logged_in(): return self.session else: raise Exception("Not logged in") def logout(self): """ Logout from Space Server """ logout_url = self.homeurl + "/unsecured/logout.jsp" resp = self.session.get(logout_url, verify=False) #self._logger.debug(resp.status_code) #self._logger.debug(resp.headers) #self._logger.debug(resp.text) if resp.status_code == 200: self.session = None<|fim▁end|>
raise ValueError('password is valid only along with username') if cert is None:
<|file_name|>transpose4x4-opt.py<|end_file_name|><|fim▁begin|># This file is part of Peach-Py package and is licensed under the Simplified BSD license. # See license.rst for the full text of the license. from peachpy.x86_64 import * from peachpy import * matrix = Argument(ptr(float_)) with Function("transpose4x4_opt", (matrix,)): reg_matrix = GeneralPurposeRegister64() LOAD.ARGUMENT(reg_matrix, matrix) xmm_rows = [XMMRegister() for _ in range(4)] for i, xmm_row in enumerate(xmm_rows): MOVUPS(xmm_row, [reg_matrix + i * XMMRegister.size]) xmm_temps = [XMMRegister() for _ in range(2)] # xmm_temps[0] = ( m00, m01, m02, m03 ) MOVAPS(xmm_temps[0], xmm_rows[0]) # xmm_temps[1] = ( m20, m21, m22, m23 ) MOVAPS(xmm_temps[1], xmm_rows[2]) # xmm_rows[0] = ( m00, m10, m01, m11 ) UNPCKLPS(xmm_rows[0], xmm_rows[1]) # xmm_rows[2] = ( m20, m30, m21, m31 ) UNPCKLPS(xmm_rows[2], xmm_rows[3]) # xmm_rows[1] = ( m02, m12, m03, m13 ) UNPCKHPS(xmm_temps[0], xmm_rows[1]) xmm_rows[1] = xmm_temps[0] # xmm_rows[3] = ( m22, m32, m23, m33 ) UNPCKHPS(xmm_temps[1], xmm_rows[3]) xmm_rows[3] = xmm_temps[1] xmm_temps = [XMMRegister() for _ in range(2)] # xmm_temps[0] = ( m00, m10, m01, m11 ) MOVAPS(xmm_temps[0], xmm_rows[0]) # xmm_temps[1] = ( m02, m12, m03, m13 ) MOVAPS(xmm_temps[1], xmm_rows[1]) # xmm_rows[0] = ( m00, m10, m20, m30 ) MOVLHPS(xmm_rows[0], xmm_rows[2]) MOVUPS([reg_matrix], xmm_rows[0]) # xmm_rows[2] = ( m01, m11, m21, m31 ) MOVHLPS(xmm_rows[2], xmm_temps[0]) MOVUPS([reg_matrix + 16], xmm_rows[2]) # xmm_rows[1] = ( m02, m12, m22, m32 ) MOVLHPS(xmm_rows[1], xmm_rows[3]) MOVUPS([reg_matrix + 32], xmm_rows[1])<|fim▁hole|> RETURN()<|fim▁end|>
# xmm_rows[3] = ( m03, m13, m23, m33 ) MOVHLPS(xmm_rows[3], xmm_temps[1]) MOVUPS([reg_matrix + 48], xmm_rows[3])
<|file_name|>network_cls.py<|end_file_name|><|fim▁begin|>import tensorflow as tf from ocnn import * # octree-based resnet55 def network_resnet(octree, flags, training=True, reuse=None): depth = flags.depth channels = [2048, 1024, 512, 256, 128, 64, 32, 16, 8] with tf.variable_scope("ocnn_resnet", reuse=reuse): data = octree_property(octree, property_name="feature", dtype=tf.float32, depth=depth, channel=flags.channel) data = tf.reshape(data, [1, flags.channel, -1, 1]) with tf.variable_scope("conv1"): data = octree_conv_bn_relu(data, octree, depth, channels[depth], training) for d in range(depth, 2, -1): for i in range(0, flags.resblock_num): with tf.variable_scope('resblock_%d_%d' % (d, i)): data = octree_resblock(data, octree, d, channels[d], 1, training) with tf.variable_scope('max_pool_%d' % d): data, _ = octree_max_pool(data, octree, d) with tf.variable_scope("global_average"): data = octree_full_voxel(data, depth=2) data = tf.reduce_mean(data, 2) if flags.dropout[0]: data = tf.layers.dropout(data, rate=0.5, training=training) with tf.variable_scope("fc2"): logit = dense(data, flags.nout, use_bias=True) return logit # the ocnn in the paper def network_ocnn(octree, flags, training=True, reuse=None): depth = flags.depth channels = [512, 256, 128, 64, 32, 16, 8, 4, 2] with tf.variable_scope("ocnn", reuse=reuse): data = octree_property(octree, property_name="feature", dtype=tf.float32, depth=depth, channel=flags.channel) data = tf.reshape(data, [1, flags.channel, -1, 1]) for d in range(depth, 2, -1): with tf.variable_scope('depth_%d' % d): data = octree_conv_bn_relu(data, octree, d, channels[d], training) data, _ = octree_max_pool(data, octree, d) with tf.variable_scope("full_voxel"): data = octree_full_voxel(data, depth=2) data = tf.layers.dropout(data, rate=0.5, training=training) with tf.variable_scope("fc1"): data = fc_bn_relu(data, channels[2], training=training) data = tf.layers.dropout(data, rate=0.5, training=training) <|fim▁hole|> return logit def cls_network(octree, flags, training, reuse=False): if flags.name.lower() == 'ocnn': return network_ocnn(octree, flags, training, reuse) elif flags.name.lower() == 'resnet': return network_resnet(octree, flags, training, reuse) else: print('Error, no network: ' + flags.name)<|fim▁end|>
with tf.variable_scope("fc2"): logit = dense(data, flags.nout, use_bias=True)
<|file_name|>pull_request_file.py<|end_file_name|><|fim▁begin|># coding=utf-8 from __future__ import unicode_literals, print_function from flask import request, jsonify, url_for from flask_login import current_user import bugsnag from . import load from webhookdb.tasks.pull_request_file import spawn_page_tasks_for_pull_request_files @load.route('/repos/<owner>/<repo>/pulls/<int:number>/files', methods=["POST"]) def pull_request_files(owner, repo, number): """ Queue tasks to load the pull request files (diffs) for a single pull request into WebhookDB. :statuscode 202: task successfully queued """ bugsnag_ctx = {"owner": owner, "repo": repo, "number": number} bugsnag.configure_request(meta_data=bugsnag_ctx) children = bool(request.args.get("children", False)) result = spawn_page_tasks_for_pull_request_files.delay( owner, repo, number, children=children, requestor_id=current_user.get_id(), ) resp = jsonify({"message": "queued"}) resp.status_code = 202<|fim▁hole|><|fim▁end|>
resp.headers["Location"] = url_for("tasks.status", task_id=result.id) return resp
<|file_name|>HedgeHogFieldActorPropertiesPanel.java<|end_file_name|><|fim▁begin|>// // Copyright (c) 2014 Limit Point Systems, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package tools.viewer.user; import tools.viewer.common.*; import tools.viewer.render.*; import tools.common.gui.*; import java.awt.*; import java.awt.event.*; import java.util.*; import javax.swing.*; import javax.swing.event.*; import javax.swing.border.*; import java.text.*; import vtk.*; /** * Implementation of <code>G3DFieldActorPropertiesPanel</code> for editing the * values of a <code>HedgeHogFieldActorDescriptor</code>. */ public class HedgeHogFieldActorPropertiesPanel extends G3DFieldActorPropertiesPanel { // CONSTANTS FACET protected static final String[] VECTOR_MODES = { ViewerConstants.VECTOR_MAGNITUDE, ViewerConstants.VECTOR_NORMAL }; // GUI FACET protected JPanel hedgeHogPanel; protected JSpinner scaleFactorSpinner; protected JComboBox vectorModeComboBox; // CONSTRUCTORS /** * Constructor */ public HedgeHogFieldActorPropertiesPanel(G3DViewer xviewer, FieldActorDescriptor[] xdescriptors) { super(xviewer, xdescriptors); hedgeHogPanel = createHedgeHogPanel();<|fim▁hole|> initValues(); } // CREATE FACET /** * Create hedge hog panel */ protected JPanel createHedgeHogPanel() { JPanel result = new JPanel(); result.setLayout(new BoxLayout(result, BoxLayout.PAGE_AXIS)); result.setBorder( BorderFactory.createCompoundBorder( BorderFactory.createEmptyBorder(6, 12, 6, 12), BorderFactory.createTitledBorder("Hedge Hog:"))); //===== result.add(Box.createVerticalGlue()); JPanel panel = new JPanel(); JLabel scaleFactorLabel = new JLabel("Scale Factor: ", JLabel.RIGHT); scaleFactorLabel.setAlignmentX(Component.CENTER_ALIGNMENT); SpinnerModel scaleFactorModel = new SpinnerNumberModel(1.0, 0.0, 10000000.0, 0.01); scaleFactorSpinner = new JSpinner(scaleFactorModel); panel.add(scaleFactorLabel); panel.add(scaleFactorSpinner); result.add(panel); result.add(Box.createVerticalGlue()); //===== panel = new JPanel(); JLabel vectorModeLabel = new JLabel("Vector Mode:", JLabel.RIGHT); vectorModeLabel.setAlignmentX(Component.CENTER_ALIGNMENT); vectorModeComboBox = new JComboBox(VECTOR_MODES); panel.add(vectorModeLabel); panel.add(vectorModeComboBox); result.add(panel); result.add(Box.createVerticalGlue()); //===== return result; } // INITIALIZE FACET /** * */ public void initValues() { super.initValues(); // Use the first actor in the list to initialize the // user interface. HedgeHogFieldActorDescriptor actor = (HedgeHogFieldActorDescriptor) descriptors[0]; initHedgeHogPanel(actor); } /** * */ protected void initHedgeHogPanel(HedgeHogFieldActorDescriptor actor) { scaleFactorSpinner.setValue(actor.scaleFactor); vectorModeComboBox.setSelectedItem(actor.vectorMode); } // APPLY FACET /** * */ public void doApply() { // Set the wait state to true, it is restored by // UpdatePropertiesPanelEvent. setWaitState(true); synchronized (viewer.getScript()) { synchronized (viewer.getScene()) { // Apply the changed to the descriptors HedgeHogFieldActorDescriptor actor; for(int i=0; i<descriptors.length; i++) { actor = (HedgeHogFieldActorDescriptor) descriptors[i]; applyHedgeHog(actor); } } } super.doApply(false); } /** * */ public void applyHedgeHog(HedgeHogFieldActorDescriptor actor) { actor.scaleFactor = ((SpinnerNumberModel)scaleFactorSpinner.getModel()).getNumber().doubleValue(); actor.vectorMode = (String) vectorModeComboBox.getSelectedItem(); } }<|fim▁end|>
tabbedPane.addTab("Hedge Hog", hedgeHogPanel);
<|file_name|>AutoEnchanter.java<|end_file_name|><|fim▁begin|>package me.mrCookieSlime.Slimefun.Objects.SlimefunItem.machines; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import me.mrCookieSlime.CSCoreLibPlugin.general.Inventory.InvUtils; import me.mrCookieSlime.CSCoreLibPlugin.general.Inventory.Item.CustomItem; import me.mrCookieSlime.EmeraldEnchants.EmeraldEnchants; import me.mrCookieSlime.EmeraldEnchants.ItemEnchantment; import me.mrCookieSlime.Slimefun.Lists.RecipeType; import me.mrCookieSlime.Slimefun.Objects.Category; import me.mrCookieSlime.Slimefun.Objects.SlimefunItem.abstractItems.AContainer; import me.mrCookieSlime.Slimefun.Objects.SlimefunItem.abstractItems.MachineHelper; import me.mrCookieSlime.Slimefun.Objects.SlimefunItem.abstractItems.MachineRecipe; import me.mrCookieSlime.Slimefun.api.BlockStorage; import me.mrCookieSlime.Slimefun.api.Slimefun; import me.mrCookieSlime.Slimefun.api.energy.ChargableBlock; import org.bukkit.Material; import org.bukkit.block.Block; import org.bukkit.enchantments.Enchantment; import org.bukkit.inventory.ItemStack; import org.bukkit.inventory.meta.EnchantmentStorageMeta; import org.bukkit.inventory.meta.ItemMeta; import org.bukkit.material.MaterialData; public class AutoEnchanter extends AContainer { public static int max_emerald_enchantments = 2; public AutoEnchanter(Category category, ItemStack item, String name, RecipeType recipeType, ItemStack[] recipe) { super(category, item, name, recipeType, recipe); } @Override public String getInventoryTitle() { return "&5Auto-Enchanter"; } @Override public ItemStack getProgressBar() { return new ItemStack(Material.GOLD_CHESTPLATE); } @Override public void registerDefaultRecipes() {} @Override public int getEnergyConsumption() { return 9; } @SuppressWarnings("deprecation") @Override protected void tick(Block b) { if (isProcessing(b)) { int timeleft = progress.get(b); if (timeleft > 0) { ItemStack item = getProgressBar().clone(); item.setDurability(MachineHelper.getDurability(item, timeleft, processing.get(b).getTicks())); ItemMeta im = item.getItemMeta(); im.setDisplayName(" "); List<String> lore = new ArrayList<String>(); lore.add(MachineHelper.getProgress(timeleft, processing.get(b).getTicks())); lore.add(""); lore.add(MachineHelper.getTimeLeft(timeleft / 2)); im.setLore(lore); item.setItemMeta(im); BlockStorage.getInventory(b).replaceExistingItem(22, item); if (ChargableBlock.isChargable(b)) { if (ChargableBlock.getCharge(b) < getEnergyConsumption()) return; ChargableBlock.addCharge(b, -getEnergyConsumption()); progress.put(b, timeleft - 1); } else progress.put(b, timeleft - 1); } else { BlockStorage.getInventory(b).replaceExistingItem(22, new CustomItem(new MaterialData(Material.STAINED_GLASS_PANE, (byte) 15), " ")); pushItems(b, processing.get(b).getOutput()); progress.remove(b); processing.remove(b); } } else { MachineRecipe r = null; slots: for (int slot: getInputSlots()) { ItemStack target = BlockStorage.getInventory(b).getItemInSlot(slot == getInputSlots()[0] ? getInputSlots()[1]: getInputSlots()[0]); ItemStack item = BlockStorage.getInventory(b).getItemInSlot(slot); if (item != null && item.getType() == Material.ENCHANTED_BOOK && target != null) { Map<Enchantment, Integer> enchantments = new HashMap<Enchantment, Integer>(); Set<ItemEnchantment> enchantments2 = new HashSet<ItemEnchantment>(); int amount = 0; int special_amount = 0; EnchantmentStorageMeta meta = (EnchantmentStorageMeta) item.getItemMeta(); for (Map.Entry<Enchantment, Integer> e: meta.getStoredEnchants().entrySet()) { if (e.getKey().canEnchantItem(target)) { amount++; enchantments.put(e.getKey(), e.getValue()); } } if (Slimefun.isEmeraldEnchantsInstalled()) { for (ItemEnchantment enchantment: EmeraldEnchants.getInstance().getRegistry().getEnchantments(item)) { if (EmeraldEnchants.getInstance().getRegistry().isApplicable(target, enchantment.getEnchantment()) && EmeraldEnchants.getInstance().getRegistry().getEnchantmentLevel(target, enchantment.getEnchantment().getName()) < enchantment.getLevel()) { amount++; special_amount++; enchantments2.add(enchantment); } } special_amount+=EmeraldEnchants.getInstance().getRegistry().getEnchantments(target).size(); } if (amount > 0 && special_amount <= max_emerald_enchantments) { ItemStack newItem = target.clone(); for (Map.Entry<Enchantment, Integer> e: enchantments.entrySet()) { newItem.addUnsafeEnchantment(e.getKey(), e.getValue()); }<|fim▁hole|> for (ItemEnchantment e: enchantments2) { EmeraldEnchants.getInstance().getRegistry().applyEnchantment(newItem, e.getEnchantment(), e.getLevel()); } r = new MachineRecipe(75 * amount, new ItemStack[] {target, item}, new ItemStack[] {newItem, new ItemStack(Material.BOOK)}); } break slots; } } if (r != null) { if (!fits(b, r.getOutput())) return; for (int slot: getInputSlots()) { BlockStorage.getInventory(b).replaceExistingItem(slot, InvUtils.decreaseItem(BlockStorage.getInventory(b).getItemInSlot(slot), 1)); } processing.put(b, r); progress.put(b, r.getTicks()); } } } @Override public int getSpeed() { return 1; } @Override public String getMachineIdentifier() { return "AUTO_ENCHANTER"; } }<|fim▁end|>
<|file_name|>LoadedProcedureSet.java<|end_file_name|><|fim▁begin|>/* This file is part of VoltDB. * Copyright (C) 2008-2014 VoltDB Inc. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as * published by the Free Software Foundation, either version 3 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with VoltDB. If not, see <http://www.gnu.org/licenses/>. */ package org.voltdb; import java.util.HashMap; import java.util.Map.Entry; import java.util.Set; import org.voltcore.logging.Level; import org.voltcore.logging.VoltLogger; import org.voltdb.SystemProcedureCatalog.Config; import org.voltdb.catalog.CatalogMap; import org.voltdb.catalog.Procedure; import org.voltdb.compiler.Language; import org.voltdb.groovy.GroovyScriptProcedureDelegate; import org.voltdb.utils.LogKeys; import com.google_voltpatches.common.collect.ImmutableMap; public class LoadedProcedureSet { private static final VoltLogger hostLog = new VoltLogger("HOST"); // user procedures. ImmutableMap<String, ProcedureRunner> procs = ImmutableMap.<String, ProcedureRunner>builder().build(); // map of sysproc fragment ids to system procedures. final HashMap<Long, ProcedureRunner> m_registeredSysProcPlanFragments = new HashMap<Long, ProcedureRunner>(); final ProcedureRunnerFactory m_runnerFactory; final long m_siteId; final int m_siteIndex; final SiteProcedureConnection m_site; public LoadedProcedureSet(SiteProcedureConnection site, ProcedureRunnerFactory runnerFactory, long siteId, int siteIndex) { m_runnerFactory = runnerFactory; m_siteId = siteId; m_siteIndex = siteIndex; m_site = site; } public ProcedureRunner getSysproc(long fragmentId) { synchronized (m_registeredSysProcPlanFragments) { return m_registeredSysProcPlanFragments.get(fragmentId); } } public void registerPlanFragment(final long pfId, final ProcedureRunner proc) { synchronized (m_registeredSysProcPlanFragments) { assert(m_registeredSysProcPlanFragments.containsKey(pfId) == false); m_registeredSysProcPlanFragments.put(pfId, proc); } } public void loadProcedures( CatalogContext catalogContext, BackendTarget backendTarget, CatalogSpecificPlanner csp) { m_registeredSysProcPlanFragments.clear(); ImmutableMap.Builder<String, ProcedureRunner> builder = loadProceduresFromCatalog(catalogContext, backendTarget, csp); loadSystemProcedures(catalogContext, backendTarget, csp, builder); procs = builder.build(); } private ImmutableMap.Builder<String, ProcedureRunner> loadProceduresFromCatalog( CatalogContext catalogContext, BackendTarget backendTarget, CatalogSpecificPlanner csp) { // load up all the stored procedures final CatalogMap<Procedure> catalogProcedures = catalogContext.database.getProcedures(); ImmutableMap.Builder<String, ProcedureRunner> builder = ImmutableMap.<String, ProcedureRunner>builder(); for (final Procedure proc : catalogProcedures) { // Sysprocs used to be in the catalog. Now they aren't. Ignore // sysprocs found in old catalog versions. (PRO-365) if (proc.getTypeName().startsWith("@")) { continue; } ProcedureRunner runner = null; VoltProcedure procedure = null; if (proc.getHasjava()) { final String className = proc.getClassname(); Language lang; try { lang = Language.valueOf(proc.getLanguage()); } catch (IllegalArgumentException e) { // default to java for earlier compiled catalogs lang = Language.JAVA; } <|fim▁hole|> procClass = catalogContext.classForProcedure(className); } catch (final ClassNotFoundException e) { if (className.startsWith("org.voltdb.")) { VoltDB.crashLocalVoltDB("VoltDB does not support procedures with package names " + "that are prefixed with \"org.voltdb\". Please use a different " + "package name and retry. Procedure name was " + className + ".", false, null); } else { VoltDB.crashLocalVoltDB("VoltDB was unable to load a procedure (" + className + ") it expected to be in the " + "catalog jarfile and will now exit.", false, null); } } try { procedure = lang.accept(procedureInstantiator, procClass); } catch (final Exception e) { hostLog.l7dlog( Level.WARN, LogKeys.host_ExecutionSite_GenericException.name(), new Object[] { m_siteId, m_siteIndex }, e); } } else { procedure = new ProcedureRunner.StmtProcedure(); } assert(procedure != null); runner = m_runnerFactory.create(procedure, proc, csp); builder.put(proc.getTypeName().intern(), runner); } return builder; } private static Language.CheckedExceptionVisitor<VoltProcedure, Class<?>, Exception> procedureInstantiator = new Language.CheckedExceptionVisitor<VoltProcedure, Class<?>, Exception>() { @Override public VoltProcedure visitJava(Class<?> p) throws Exception { return (VoltProcedure)p.newInstance(); } @Override public VoltProcedure visitGroovy(Class<?> p) throws Exception { return new GroovyScriptProcedureDelegate(p); } }; private void loadSystemProcedures( CatalogContext catalogContext, BackendTarget backendTarget, CatalogSpecificPlanner csp, ImmutableMap.Builder<String, ProcedureRunner> builder) { Set<Entry<String,Config>> entrySet = SystemProcedureCatalog.listing.entrySet(); for (Entry<String, Config> entry : entrySet) { Config sysProc = entry.getValue(); Procedure proc = sysProc.asCatalogProcedure(); VoltSystemProcedure procedure = null; ProcedureRunner runner = null; final String className = sysProc.getClassname(); Class<?> procClass = null; // this check is for sysprocs that don't have a procedure class if (className != null) { try { procClass = catalogContext.classForProcedure(className); } catch (final ClassNotFoundException e) { if (sysProc.commercial) { continue; } hostLog.l7dlog( Level.WARN, LogKeys.host_ExecutionSite_GenericException.name(), new Object[] { m_siteId, m_siteIndex }, e); VoltDB.crashLocalVoltDB(e.getMessage(), true, e); } try { procedure = (VoltSystemProcedure) procClass.newInstance(); } catch (final InstantiationException e) { hostLog.l7dlog( Level.WARN, LogKeys.host_ExecutionSite_GenericException.name(), new Object[] { m_siteId, m_siteIndex }, e); } catch (final IllegalAccessException e) { hostLog.l7dlog( Level.WARN, LogKeys.host_ExecutionSite_GenericException.name(), new Object[] { m_siteId, m_siteIndex }, e); } runner = m_runnerFactory.create(procedure, proc, csp); procedure.initSysProc(m_site, this, proc, catalogContext.cluster); builder.put(entry.getKey().intern(), runner); } } } public ProcedureRunner getProcByName(String procName) { return procs.get(procName); } }<|fim▁end|>
Class<?> procClass = null; try {
<|file_name|>aws-autoscale-ec2-instance-modify.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python """ Purpose : Extract next sequence number of auto-scaled instance and set new tag to self instance. Script will be running from new instance. will take input from command line instead of from json file Future Plan : will associate instance to a role based IAM profile Usage : python ec2-autoscale-instance-modify.py -a <your aws access_key> -s <aws secret key> -g <auto scale group that used in cloudformation file> -r <region> -n <min_server_number> -c <customer> -t <uat/plab/prod> -p <appname> -d <domainname ie example.net> """ __author__ = "kama maiti" __copyright__ = "Copyright 2016, AWS autoscaled instance tag modification project" __credits__ = ["kamal maiti"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "kamal maiti" __email__ = "[email protected]" __status__ = "production/Non-production" import re import argparse import boto.ec2.autoscale from boto.ec2 import EC2Connection import shlex, subprocess akey = "" skey = "" ag = "" rg = "" min_num = "" def find_server_number(str): #Assuming first match only with consecutive three digits match = [] match = re.findall(r'\d\d\d', str) if match: return match #will return a list containg server number else: return match #will return blank list def main(): arg_parser = argparse.ArgumentParser(description='Read autoscale instance') arg_parser.add_argument('-a', dest='akey',help='Provide AWS_ACCESS_KEY') arg_parser.add_argument('-s', dest='skey',help='Provide AWS_SECRET_ACCESS_KEY') arg_parser.add_argument('-g', dest='ag',help='Provide User provided autoscale group name') arg_parser.add_argument('-r', dest='rg',help='Provide region name') arg_parser.add_argument('-n', dest='min_num',help='Minimum Server name') arg_parser.add_argument('-c', dest='customer',help='Name of the customer in short') arg_parser.add_argument('-t', dest='servertype',help='Type of the server ie prod or uat or plab') arg_parser.add_argument('-p', dest='purpose',help='Purpose of the Server') arg_parser.add_argument('-d', dest='domain',help='Domain name that will be appended to server name') args = arg_parser.parse_args() #print(args) access_key = args.akey secret_key = args.skey region = args.rg group_name = str(args.ag) min_server_num = int(args.min_num) customer = str(args.customer) servertype = str(args.servertype) purpose = str(args.purpose) domain = str(args.domain) #created two objects below. One for autocale connection and another for ec2 instance as_conn = boto.ec2.autoscale.connect_to_region(region, aws_access_key_id=access_key, aws_secret_access_key=secret_key) ec2_conn = boto.ec2.connect_to_region(region, aws_access_key_id=access_key, aws_secret_access_key=secret_key) try: groups = as_conn.get_all_groups() all_groups = [group.name for group in groups] for g in all_groups: if group_name in g: #searching autocaling group that we are concerned with. Note all autoscalling group name should be unique FOUND_GROUP = g #FOUND_GROUP will save exact AG name. Note that exact AG name is not same as user provided name. It'll check if group_name is subset of g FOUND_GROUP_WITH_DES = as_conn.get_all_groups(names=[FOUND_GROUP])[0] instance_ids = [i.instance_id for i in FOUND_GROUP_WITH_DES.instances] #reservations = ec2_conn.get_all_instances(instance_ids) instances = ec2_conn.get_only_instances(instance_ids) #instances = [i for r in reservations for i in r.instances] lNameTag = [] #collect all tags of all instances and sort Name tags and save them in list. for i,j in enumerate(instances): a = instances[i].tags lNameTag.append(a['Name']) #process each instances and take their server number in one list lServerNum = [] if not lNameTag: #checking if list is empty or not. If empty then this is first instance whose server num will be min_server_num next_number = min_server_num else: for server in lNameTag: #iterating each value of "Name" tag if not find_server_number(server): #if method find_server_number returns null list next_number = min_server_num else: val = find_server_number(server) #got value like [u'101']. Below comand will remove [],' and u actual_num=str(val).strip('[]').strip('u').strip('\'') lServerNum.append(int(actual_num)) #actual_num is string, need to convert to int if not lServerNum: #check if list of server number is blank or not next_number = min_server_num else: maximum_number = max(lServerNum) #used max function to find out maximum number in the list next_number = maximum_number + 1 #Now we need to save this next_number in a file so that we can collect it and send to other commands. with open('/tmp/serverno','w') as fd: #created a file and save the number as string. Then read it and used later fd.write(str(next_number)) with open('/tmp/serverno','r') as fd: num=fd.read() #Will modify tag of current instance. Let's build a new tag. delm = "-" #Delimeter that will be used to join multiple string seq = ( customer, servertype, purpose, num, domain) #created a tuple new_tag = delm.join(seq) #joined tuple strings with open('/tmp/nodename','w') as fd: fd.write(str(new_tag)) #will extract current instance ID using curl. ie curl http://169.254.169.254/latest/meta-data/instance-id<|fim▁hole|> cmd = 'curl http://169.254.169.254/latest/meta-data/instance-id' #shlex is simple lexical analyser for splitting a large string into tokens args = shlex.split(cmd) #args will have value like : ['curl', 'http://169.254.169.254/latest/meta-data/instance-id'] output,error = subprocess.Popen(args,stdout = subprocess.PIPE, stderr= subprocess.PIPE).communicate() #out and error are saved in variable. communicate will execute comamnd #o="i-fd96291f" #used for testing cur_instance_reservation = ec2_conn.get_all_instances(instance_ids=output) cur_instance = cur_instance_reservation[0].instances[0] cur_instance.add_tag('Name', new_tag) finally: as_conn.close() ec2_conn.close() if __name__ == '__main__': main()<|fim▁end|>
#
<|file_name|>consts.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use back::abi; use llvm; use llvm::{ConstFCmp, ConstICmp, SetLinkage, SetUnnamedAddr}; use llvm::{InternalLinkage, ValueRef, Bool, True}; use middle::{check_const, const_eval, def}; use middle::const_eval::{const_int_checked_neg, const_uint_checked_neg}; use middle::const_eval::{const_int_checked_add, const_uint_checked_add}; use middle::const_eval::{const_int_checked_sub, const_uint_checked_sub}; use middle::const_eval::{const_int_checked_mul, const_uint_checked_mul}; use middle::const_eval::{const_int_checked_div, const_uint_checked_div}; use middle::const_eval::{const_int_checked_rem, const_uint_checked_rem}; use middle::const_eval::{const_int_checked_shl, const_uint_checked_shl}; use middle::const_eval::{const_int_checked_shr, const_uint_checked_shr}; use trans::{adt, closure, debuginfo, expr, inline, machine}; use trans::base::{self, push_ctxt}; use trans::common::*; use trans::declare; use trans::monomorphize; use trans::type_::Type; use trans::type_of; use middle::subst::Substs; use middle::ty::{self, Ty}; use util::ppaux::{Repr, ty_to_string}; use std::iter::repeat; use libc::c_uint; use syntax::{ast, ast_util}; use syntax::parse::token; use syntax::ptr::P; pub fn const_lit(cx: &CrateContext, e: &ast::Expr, lit: &ast::Lit) -> ValueRef { let _icx = push_ctxt("trans_lit"); debug!("const_lit: {:?}", lit); match lit.node { ast::LitByte(b) => C_integral(Type::uint_from_ty(cx, ast::TyU8), b as u64, false), ast::LitChar(i) => C_integral(Type::char(cx), i as u64, false), ast::LitInt(i, ast::SignedIntLit(t, _)) => { C_integral(Type::int_from_ty(cx, t), i, true) } ast::LitInt(u, ast::UnsignedIntLit(t)) => { C_integral(Type::uint_from_ty(cx, t), u, false) } ast::LitInt(i, ast::UnsuffixedIntLit(_)) => { let lit_int_ty = ty::node_id_to_type(cx.tcx(), e.id); match lit_int_ty.sty { ty::ty_int(t) => { C_integral(Type::int_from_ty(cx, t), i as u64, true) } ty::ty_uint(t) => { C_integral(Type::uint_from_ty(cx, t), i as u64, false) } _ => cx.sess().span_bug(lit.span, &format!("integer literal has type {} (expected int \ or usize)", ty_to_string(cx.tcx(), lit_int_ty))) } } ast::LitFloat(ref fs, t) => { C_floating(&fs, Type::float_from_ty(cx, t)) } ast::LitFloatUnsuffixed(ref fs) => { let lit_float_ty = ty::node_id_to_type(cx.tcx(), e.id); match lit_float_ty.sty { ty::ty_float(t) => { C_floating(&fs, Type::float_from_ty(cx, t)) } _ => { cx.sess().span_bug(lit.span, "floating point literal doesn't have the right type"); } } } ast::LitBool(b) => C_bool(cx, b), ast::LitStr(ref s, _) => C_str_slice(cx, (*s).clone()), ast::LitBinary(ref data) => { addr_of(cx, C_bytes(cx, &data[..]), "binary") } } } pub fn ptrcast(val: ValueRef, ty: Type) -> ValueRef { unsafe { llvm::LLVMConstPointerCast(val, ty.to_ref()) } } fn addr_of_mut(ccx: &CrateContext, cv: ValueRef, kind: &str) -> ValueRef { unsafe { // FIXME: this totally needs a better name generation scheme, perhaps a simple global // counter? Also most other uses of gensym in trans. let gsym = token::gensym("_"); let name = format!("{}{}", kind, gsym.usize()); let gv = declare::define_global(ccx, &name[..], val_ty(cv)).unwrap_or_else(||{ ccx.sess().bug(&format!("symbol `{}` is already defined", name)); }); llvm::LLVMSetInitializer(gv, cv); SetLinkage(gv, InternalLinkage); SetUnnamedAddr(gv, true); gv } } pub fn addr_of(ccx: &CrateContext, cv: ValueRef, kind: &str) -> ValueRef { match ccx.const_globals().borrow().get(&cv) { Some(&gv) => return gv, None => {} } let gv = addr_of_mut(ccx, cv, kind); unsafe { llvm::LLVMSetGlobalConstant(gv, True); } ccx.const_globals().borrow_mut().insert(cv, gv); gv } fn const_deref_ptr(cx: &CrateContext, v: ValueRef) -> ValueRef { let v = match cx.const_unsized().borrow().get(&v) { Some(&v) => v, None => v }; unsafe { llvm::LLVMGetInitializer(v) } } fn const_deref<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, v: ValueRef, ty: Ty<'tcx>) -> (ValueRef, Ty<'tcx>) { match ty::deref(ty, true) { Some(mt) => { if type_is_sized(cx.tcx(), mt.ty) { (const_deref_ptr(cx, v), mt.ty) } else { // Derefing a fat pointer does not change the representation, // just the type to the unsized contents. (v, mt.ty) } } None => { cx.sess().bug(&format!("unexpected dereferenceable type {}", ty_to_string(cx.tcx(), ty))) } } } pub fn get_const_expr<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, def_id: ast::DefId, ref_expr: &ast::Expr) -> &'tcx ast::Expr { let def_id = inline::maybe_instantiate_inline(ccx, def_id); if def_id.krate != ast::LOCAL_CRATE { ccx.sess().span_bug(ref_expr.span, "cross crate constant could not be inlined"); } let item = ccx.tcx().map.expect_item(def_id.node); if let ast::ItemConst(_, ref expr) = item.node { &**expr } else { ccx.sess().span_bug(ref_expr.span, &format!("get_const_expr given non-constant item {}", item.repr(ccx.tcx()))); } } fn get_const_val(ccx: &CrateContext, def_id: ast::DefId, ref_expr: &ast::Expr) -> ValueRef { let expr = get_const_expr(ccx, def_id, ref_expr); let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty()); get_const_expr_as_global(ccx, expr, check_const::PURE_CONST, empty_substs) } pub fn get_const_expr_as_global<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, expr: &ast::Expr, qualif: check_const::ConstQualif, param_substs: &'tcx Substs<'tcx>) -> ValueRef { // Special-case constants to cache a common global for all uses. match expr.node { ast::ExprPath(..) => { let def = ccx.tcx().def_map.borrow().get(&expr.id).unwrap().full_def(); match def { def::DefConst(def_id) => { if !ccx.tcx().adjustments.borrow().contains_key(&expr.id) { return get_const_val(ccx, def_id, expr); } } _ => {} } } _ => {} } let key = (expr.id, param_substs); match ccx.const_values().borrow().get(&key) { Some(&val) => return val, None => {} } let val = if qualif.intersects(check_const::NON_STATIC_BORROWS) { // Avoid autorefs as they would create global instead of stack // references, even when only the latter are correct. let ty = monomorphize::apply_param_substs(ccx.tcx(), param_substs, &ty::expr_ty(ccx.tcx(), expr)); const_expr_unadjusted(ccx, expr, ty, param_substs) } else { const_expr(ccx, expr, param_substs).0 }; // boolean SSA values are i1, but they have to be stored in i8 slots, // otherwise some LLVM optimization passes don't work as expected let val = unsafe { if llvm::LLVMTypeOf(val) == Type::i1(ccx).to_ref() { llvm::LLVMConstZExt(val, Type::i8(ccx).to_ref()) } else { val } }; let lvalue = addr_of(ccx, val, "const"); ccx.const_values().borrow_mut().insert(key, lvalue); lvalue } pub fn const_expr<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, e: &ast::Expr, param_substs: &'tcx Substs<'tcx>) -> (ValueRef, Ty<'tcx>) { let ety = monomorphize::apply_param_substs(cx.tcx(), param_substs, &ty::expr_ty(cx.tcx(), e)); let llconst = const_expr_unadjusted(cx, e, ety, param_substs); let mut llconst = llconst; let mut ety_adjusted = monomorphize::apply_param_substs(cx.tcx(), param_substs, &ty::expr_ty_adjusted(cx.tcx(), e)); let opt_adj = cx.tcx().adjustments.borrow().get(&e.id).cloned(); match opt_adj { Some(ty::AdjustReifyFnPointer(_def_id)) => { // FIXME(#19925) once fn item types are // zero-sized, we'll need to do something here } Some(ty::AdjustUnsafeFnPointer) => { // purely a type-level thing } Some(ty::AdjustDerefRef(adj)) => { let mut ty = ety; // Save the last autoderef in case we can avoid it. if adj.autoderefs > 0 { for _ in 0..adj.autoderefs-1 { let (dv, dt) = const_deref(cx, llconst, ty); llconst = dv; ty = dt; } } let second_autoref = match adj.autoref { None => { let (dv, dt) = const_deref(cx, llconst, ty); llconst = dv; // If we derefed a fat pointer then we will have an // open type here. So we need to update the type with // the one returned from const_deref. ety_adjusted = dt; None } Some(ty::AutoUnsafe(_, opt_autoref)) | Some(ty::AutoPtr(_, _, opt_autoref)) => { if adj.autoderefs == 0 { // Don't copy data to do a deref+ref // (i.e., skip the last auto-deref). llconst = addr_of(cx, llconst, "autoref"); } else { // Seeing as we are deref'ing here and take a reference // again to make the pointer part of the far pointer below, // we just skip the whole thing. We still need the type // though. This works even if we don't need to deref // because of byref semantics. Note that this is not just // an optimisation, it is necessary for mutable vectors to // work properly. ty = match ty::deref(ty, true) { Some(mt) => mt.ty, None => { cx.sess().bug(&format!("unexpected dereferenceable type {}", ty_to_string(cx.tcx(), ty))) } } } opt_autoref } Some(autoref) => { cx.sess().span_bug(e.span, &format!("unimplemented const first autoref {:?}", autoref)) } }; match second_autoref { None => {} Some(box ty::AutoUnsafe(_, None)) | Some(box ty::AutoPtr(_, _, None)) => { llconst = addr_of(cx, llconst, "autoref"); } Some(box ty::AutoUnsize(ref k)) => { let info = expr::unsized_info( cx, k, e.id, ty, param_substs, || const_get_elt(cx, llconst, &[abi::FAT_PTR_EXTRA as u32])); let unsized_ty = ty::unsize_ty(cx.tcx(), ty, k, e.span); let ptr_ty = type_of::in_memory_type_of(cx, unsized_ty).ptr_to(); let base = ptrcast(llconst, ptr_ty); let prev_const = cx.const_unsized().borrow_mut() .insert(base, llconst); assert!(prev_const.is_none() || prev_const == Some(llconst)); assert_eq!(abi::FAT_PTR_ADDR, 0); assert_eq!(abi::FAT_PTR_EXTRA, 1); llconst = C_struct(cx, &[base, info], false); } Some(autoref) => { cx.sess().span_bug(e.span, &format!("unimplemented const second autoref {:?}", autoref)) } } } None => {} }; let llty = type_of::sizing_type_of(cx, ety_adjusted); let csize = machine::llsize_of_alloc(cx, val_ty(llconst)); let tsize = machine::llsize_of_alloc(cx, llty); if csize != tsize { cx.sess().abort_if_errors(); unsafe { // FIXME these values could use some context llvm::LLVMDumpValue(llconst); llvm::LLVMDumpValue(C_undef(llty)); } cx.sess().bug(&format!("const {} of type {} has size {} instead of {}", e.repr(cx.tcx()), ty_to_string(cx.tcx(), ety_adjusted), csize, tsize)); } (llconst, ety_adjusted) } fn check_unary_expr_validity(cx: &CrateContext, e: &ast::Expr, t: Ty, te: ValueRef) { // The only kind of unary expression that we check for validity // here is `-expr`, to check if it "overflows" (e.g. `-i32::MIN`). if let ast::ExprUnary(ast::UnNeg, ref inner_e) = e.node { // An unfortunate special case: we parse e.g. -128 as a // negation of the literal 128, which means if we're expecting // a i8 (or if it was already suffixed, e.g. `-128_i8`), then // 128 will have already overflowed to -128, and so then the // constant evaluator thinks we're trying to negate -128. // // Catch this up front by looking for ExprLit directly, // and just accepting it. if let ast::ExprLit(_) = inner_e.node { return; } let result = match t.sty { ty::ty_int(int_type) => { let input = match const_to_opt_int(te) { Some(v) => v, None => return, }; const_int_checked_neg( input, e, Some(const_eval::IntTy::from(cx.tcx(), int_type))) } ty::ty_uint(uint_type) => { let input = match const_to_opt_uint(te) { Some(v) => v, None => return, }; const_uint_checked_neg( input, e, Some(const_eval::UintTy::from(cx.tcx(), uint_type))) } _ => return, }; // We do not actually care about a successful result. if let Err(err) = result { cx.tcx().sess.span_err(e.span, &err.description()); } } } fn check_binary_expr_validity(cx: &CrateContext, e: &ast::Expr, t: Ty, te1: ValueRef, te2: ValueRef) { let b = if let ast::ExprBinary(b, _, _) = e.node { b } else { return }; let result = match t.sty { ty::ty_int(int_type) => { let (lhs, rhs) = match (const_to_opt_int(te1), const_to_opt_int(te2)) { (Some(v1), Some(v2)) => (v1, v2), _ => return, }; let opt_ety = Some(const_eval::IntTy::from(cx.tcx(), int_type)); match b.node { ast::BiAdd => const_int_checked_add(lhs, rhs, e, opt_ety), ast::BiSub => const_int_checked_sub(lhs, rhs, e, opt_ety), ast::BiMul => const_int_checked_mul(lhs, rhs, e, opt_ety), ast::BiDiv => const_int_checked_div(lhs, rhs, e, opt_ety), ast::BiRem => const_int_checked_rem(lhs, rhs, e, opt_ety), ast::BiShl => const_int_checked_shl(lhs, rhs, e, opt_ety), ast::BiShr => const_int_checked_shr(lhs, rhs, e, opt_ety), _ => return, } } ty::ty_uint(uint_type) => { let (lhs, rhs) = match (const_to_opt_uint(te1), const_to_opt_uint(te2)) { (Some(v1), Some(v2)) => (v1, v2), _ => return, }; let opt_ety = Some(const_eval::UintTy::from(cx.tcx(), uint_type)); match b.node { ast::BiAdd => const_uint_checked_add(lhs, rhs, e, opt_ety), ast::BiSub => const_uint_checked_sub(lhs, rhs, e, opt_ety), ast::BiMul => const_uint_checked_mul(lhs, rhs, e, opt_ety), ast::BiDiv => const_uint_checked_div(lhs, rhs, e, opt_ety), ast::BiRem => const_uint_checked_rem(lhs, rhs, e, opt_ety), ast::BiShl => const_uint_checked_shl(lhs, rhs, e, opt_ety), ast::BiShr => const_uint_checked_shr(lhs, rhs, e, opt_ety), _ => return, } } _ => return, }; // We do not actually care about a successful result. if let Err(err) = result { cx.tcx().sess.span_err(e.span, &err.description()); } } fn const_expr_unadjusted<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, e: &ast::Expr, ety: Ty<'tcx>, param_substs: &'tcx Substs<'tcx>) -> ValueRef { debug!("const_expr_unadjusted(e={}, ety={}, param_substs={})", e.repr(cx.tcx()), ety.repr(cx.tcx()), param_substs.repr(cx.tcx())); let map_list = |exprs: &[P<ast::Expr>]| { exprs.iter().map(|e| const_expr(cx, &**e, param_substs).0) .fold(Vec::new(), |mut l, val| { l.push(val); l }) }; unsafe { let _icx = push_ctxt("const_expr"); match e.node { ast::ExprLit(ref lit) => { const_lit(cx, e, &**lit) } ast::ExprBinary(b, ref e1, ref e2) => { /* Neither type is bottom, and we expect them to be unified * already, so the following is safe. */ let (te1, ty) = const_expr(cx, &**e1, param_substs); debug!("const_expr_unadjusted: te1={}, ty={}", cx.tn().val_to_string(te1), ty.repr(cx.tcx())); let is_simd = ty::type_is_simd(cx.tcx(), ty); let intype = if is_simd { ty::simd_type(cx.tcx(), ty) } else { ty }; let is_float = ty::type_is_fp(intype); let signed = ty::type_is_signed(intype); let (te2, _) = const_expr(cx, &**e2, param_substs); check_binary_expr_validity(cx, e, ty, te1, te2); match b.node { ast::BiAdd => { if is_float { llvm::LLVMConstFAdd(te1, te2) } else { llvm::LLVMConstAdd(te1, te2) } } ast::BiSub => { if is_float { llvm::LLVMConstFSub(te1, te2) } else { llvm::LLVMConstSub(te1, te2) } } ast::BiMul => { if is_float { llvm::LLVMConstFMul(te1, te2) } else { llvm::LLVMConstMul(te1, te2) } } ast::BiDiv => { if is_float { llvm::LLVMConstFDiv(te1, te2) } else if signed { llvm::LLVMConstSDiv(te1, te2) } else { llvm::LLVMConstUDiv(te1, te2) } } ast::BiRem => { if is_float { llvm::LLVMConstFRem(te1, te2) } else if signed { llvm::LLVMConstSRem(te1, te2) } else { llvm::LLVMConstURem(te1, te2) } } ast::BiAnd => llvm::LLVMConstAnd(te1, te2), ast::BiOr => llvm::LLVMConstOr(te1, te2), ast::BiBitXor => llvm::LLVMConstXor(te1, te2), ast::BiBitAnd => llvm::LLVMConstAnd(te1, te2), ast::BiBitOr => llvm::LLVMConstOr(te1, te2), ast::BiShl => { let te2 = base::cast_shift_const_rhs(b.node, te1, te2); llvm::LLVMConstShl(te1, te2) } ast::BiShr => { let te2 = base::cast_shift_const_rhs(b.node, te1, te2); if signed { llvm::LLVMConstAShr(te1, te2) } else { llvm::LLVMConstLShr(te1, te2) } } ast::BiEq | ast::BiNe | ast::BiLt | ast::BiLe | ast::BiGt | ast::BiGe => { if is_float { let cmp = base::bin_op_to_fcmp_predicate(cx, b.node); ConstFCmp(cmp, te1, te2) } else { let cmp = base::bin_op_to_icmp_predicate(cx, b.node, signed); let bool_val = ConstICmp(cmp, te1, te2); if is_simd { // LLVM outputs an `< size x i1 >`, so we need to perform // a sign extension to get the correctly sized type. llvm::LLVMConstIntCast(bool_val, val_ty(te1).to_ref(), True) } else { bool_val } } } } }, ast::ExprUnary(u, ref inner_e) => { let (te, ty) = const_expr(cx, &**inner_e, param_substs); check_unary_expr_validity(cx, e, ty, te); let is_float = ty::type_is_fp(ty); match u { ast::UnUniq | ast::UnDeref => { const_deref(cx, te, ty).0 } ast::UnNot => llvm::LLVMConstNot(te), ast::UnNeg => { if is_float { llvm::LLVMConstFNeg(te) } else { llvm::LLVMConstNeg(te) } } } } ast::ExprField(ref base, field) => { let (bv, bt) = const_expr(cx, &**base, param_substs); let brepr = adt::represent_type(cx, bt); expr::with_field_tys(cx.tcx(), bt, None, |discr, field_tys| { let ix = ty::field_idx_strict(cx.tcx(), field.node.name, field_tys); adt::const_get_field(cx, &*brepr, bv, discr, ix) }) } ast::ExprTupField(ref base, idx) => { let (bv, bt) = const_expr(cx, &**base, param_substs); let brepr = adt::represent_type(cx, bt); expr::with_field_tys(cx.tcx(), bt, None, |discr, _| { adt::const_get_field(cx, &*brepr, bv, discr, idx.node) }) } ast::ExprIndex(ref base, ref index) => { let (bv, bt) = const_expr(cx, &**base, param_substs); let iv = match const_eval::eval_const_expr_partial(cx.tcx(), &**index, None) { Ok(const_eval::const_int(i)) => i as u64, Ok(const_eval::const_uint(u)) => u, _ => cx.sess().span_bug(index.span, "index is not an integer-constant expression") }; let (arr, len) = match bt.sty { ty::ty_vec(_, Some(u)) => (bv, C_uint(cx, u)), ty::ty_vec(_, None) | ty::ty_str => { let e1 = const_get_elt(cx, bv, &[0]); (const_deref_ptr(cx, e1), const_get_elt(cx, bv, &[1])) } ty::ty_rptr(_, mt) => match mt.ty.sty { ty::ty_vec(_, Some(u)) => { (const_deref_ptr(cx, bv), C_uint(cx, u)) }, _ => cx.sess().span_bug(base.span, &format!("index-expr base must be a vector \ or string type, found {}", ty_to_string(cx.tcx(), bt))) }, _ => cx.sess().span_bug(base.span, &format!("index-expr base must be a vector \ or string type, found {}", ty_to_string(cx.tcx(), bt))) }; let len = llvm::LLVMConstIntGetZExtValue(len) as u64; let len = match bt.sty { ty::ty_uniq(ty) | ty::ty_rptr(_, ty::mt{ty, ..}) => match ty.sty { ty::ty_str => { assert!(len > 0); len - 1 } _ => len }, _ => len }; if iv >= len { // FIXME #3170: report this earlier on in the const-eval // pass. Reporting here is a bit late. cx.sess().span_err(e.span, "const index-expr is out of bounds"); C_undef(type_of::type_of(cx, bt).element_type()) } else { const_get_elt(cx, arr, &[iv as c_uint]) } } ast::ExprCast(ref base, _) => { let llty = type_of::type_of(cx, ety); let (v, basety) = const_expr(cx, &**base, param_substs); if expr::cast_is_noop(basety, ety) { return v; } match (expr::cast_type_kind(cx.tcx(), basety), expr::cast_type_kind(cx.tcx(), ety)) { (expr::cast_integral, expr::cast_integral) => { let s = ty::type_is_signed(basety) as Bool; llvm::LLVMConstIntCast(v, llty.to_ref(), s) } (expr::cast_integral, expr::cast_float) => { if ty::type_is_signed(basety) { llvm::LLVMConstSIToFP(v, llty.to_ref()) } else { llvm::LLVMConstUIToFP(v, llty.to_ref()) } } (expr::cast_float, expr::cast_float) => { llvm::LLVMConstFPCast(v, llty.to_ref()) } (expr::cast_float, expr::cast_integral) => { if ty::type_is_signed(ety) { llvm::LLVMConstFPToSI(v, llty.to_ref()) } else { llvm::LLVMConstFPToUI(v, llty.to_ref()) } } (expr::cast_enum, expr::cast_integral) => { let repr = adt::represent_type(cx, basety); let discr = adt::const_get_discrim(cx, &*repr, v); let iv = C_integral(cx.int_type(), discr, false); let ety_cast = expr::cast_type_kind(cx.tcx(), ety); match ety_cast { expr::cast_integral => { let s = ty::type_is_signed(ety) as Bool; llvm::LLVMConstIntCast(iv, llty.to_ref(), s) } _ => cx.sess().bug("enum cast destination is not \ integral") } } (expr::cast_pointer, expr::cast_pointer) => { ptrcast(v, llty) } (expr::cast_integral, expr::cast_pointer) => { llvm::LLVMConstIntToPtr(v, llty.to_ref()) } (expr::cast_pointer, expr::cast_integral) => { llvm::LLVMConstPtrToInt(v, llty.to_ref()) } _ => { cx.sess().impossible_case(e.span, "bad combination of types for cast") } } } ast::ExprAddrOf(ast::MutImmutable, ref sub) => { // If this is the address of some static, then we need to return // the actual address of the static itself (short circuit the rest // of const eval). let mut cur = sub; loop { match cur.node { ast::ExprParen(ref sub) => cur = sub, ast::ExprBlock(ref blk) => { if let Some(ref sub) = blk.expr { cur = sub; } else { break; } } _ => break, } } let opt_def = cx.tcx().def_map.borrow().get(&cur.id).map(|d| d.full_def()); if let Some(def::DefStatic(def_id, _)) = opt_def { get_static_val(cx, def_id, ety) } else { // If this isn't the address of a static, then keep going through // normal constant evaluation. let (v, _) = const_expr(cx, &**sub, param_substs); addr_of(cx, v, "ref") } } ast::ExprAddrOf(ast::MutMutable, ref sub) => { let (v, _) = const_expr(cx, &**sub, param_substs); addr_of_mut(cx, v, "ref_mut_slice") } ast::ExprTup(ref es) => { let repr = adt::represent_type(cx, ety); let vals = map_list(&es[..]); adt::trans_const(cx, &*repr, 0, &vals[..]) } ast::ExprStruct(_, ref fs, ref base_opt) => { let repr = adt::represent_type(cx, ety); let base_val = match *base_opt { Some(ref base) => Some(const_expr(cx, &**base, param_substs)), None => None }; expr::with_field_tys(cx.tcx(), ety, Some(e.id), |discr, field_tys| { let cs = field_tys.iter().enumerate() .map(|(ix, &field_ty)| { match fs.iter().find(|f| field_ty.name == f.ident.node.name) { Some(ref f) => const_expr(cx, &*f.expr, param_substs).0, None => { match base_val { Some((bv, _)) => { adt::const_get_field(cx, &*repr, bv, discr, ix) } None => { cx.sess().span_bug(e.span, "missing struct field") } } } } }).collect::<Vec<_>>(); if ty::type_is_simd(cx.tcx(), ety) { C_vector(&cs[..]) } else { adt::trans_const(cx, &*repr, discr, &cs[..]) } }) } ast::ExprVec(ref es) => { let unit_ty = ty::sequence_element_type(cx.tcx(), ety); let llunitty = type_of::type_of(cx, unit_ty); let vs = es.iter().map(|e| const_expr(cx, &**e, param_substs).0) .collect::<Vec<_>>(); // If the vector contains enums, an LLVM array won't work. if vs.iter().any(|vi| val_ty(*vi) != llunitty) { C_struct(cx, &vs[..], false) } else { C_array(llunitty, &vs[..]) } } ast::ExprRepeat(ref elem, ref count) => { let unit_ty = ty::sequence_element_type(cx.tcx(), ety); let llunitty = type_of::type_of(cx, unit_ty); let n = ty::eval_repeat_count(cx.tcx(), count); let unit_val = const_expr(cx, &**elem, param_substs).0; let vs: Vec<_> = repeat(unit_val).take(n).collect(); if val_ty(unit_val) != llunitty { C_struct(cx, &vs[..], false) } else { C_array(llunitty, &vs[..]) } } ast::ExprPath(..) => { let def = cx.tcx().def_map.borrow().get(&e.id).unwrap().full_def(); match def { def::DefFn(..) | def::DefMethod(..) => { expr::trans_def_fn_unadjusted(cx, e, def, param_substs).val } def::DefConst(def_id) => { const_deref_ptr(cx, get_const_val(cx, def_id, e)) } def::DefVariant(enum_did, variant_did, _) => { let vinfo = ty::enum_variant_with_id(cx.tcx(), enum_did, variant_did); if vinfo.args.len() > 0 { // N-ary variant. expr::trans_def_fn_unadjusted(cx, e, def, param_substs).val } else { // Nullary variant. let repr = adt::represent_type(cx, ety); adt::trans_const(cx, &*repr, vinfo.disr_val, &[]) } } def::DefStruct(_) => { if let ty::ty_bare_fn(..) = ety.sty { // Tuple struct. expr::trans_def_fn_unadjusted(cx, e, def, param_substs).val } else { // Unit struct. C_null(type_of::type_of(cx, ety)) } } _ => { cx.sess().span_bug(e.span, "expected a const, fn, struct, \ or variant def") } } } ast::ExprCall(ref callee, ref args) => { let opt_def = cx.tcx().def_map.borrow().get(&callee.id).map(|d| d.full_def()); let arg_vals = map_list(&args[..]); match opt_def { Some(def::DefStruct(_)) => { if ty::type_is_simd(cx.tcx(), ety) { C_vector(&arg_vals[..]) } else { let repr = adt::represent_type(cx, ety); adt::trans_const(cx, &*repr, 0, &arg_vals[..]) } } Some(def::DefVariant(enum_did, variant_did, _)) => { let repr = adt::represent_type(cx, ety); let vinfo = ty::enum_variant_with_id(cx.tcx(), enum_did, variant_did); adt::trans_const(cx, &*repr, vinfo.disr_val, &arg_vals[..]) } _ => cx.sess().span_bug(e.span, "expected a struct or variant def") } } ast::ExprParen(ref e) => const_expr(cx, &**e, param_substs).0, ast::ExprBlock(ref block) => { match block.expr { Some(ref expr) => const_expr(cx, &**expr, param_substs).0, None => C_nil(cx) } } ast::ExprClosure(_, ref decl, ref body) => { closure::trans_closure_expr(closure::Dest::Ignore(cx), &**decl, &**body, e.id, param_substs); C_null(type_of::type_of(cx, ety)) } _ => cx.sess().span_bug(e.span, "bad constant expression type in consts::const_expr") } } } <|fim▁hole|> // At this point, get_item_val has already translated the // constant's initializer to determine its LLVM type. let v = ccx.static_values().borrow().get(&id).unwrap().clone(); // boolean SSA values are i1, but they have to be stored in i8 slots, // otherwise some LLVM optimization passes don't work as expected let v = if llvm::LLVMTypeOf(v) == Type::i1(ccx).to_ref() { llvm::LLVMConstZExt(v, Type::i8(ccx).to_ref()) } else { v }; llvm::LLVMSetInitializer(g, v); // As an optimization, all shared statics which do not have interior // mutability are placed into read-only memory. if m != ast::MutMutable { let node_ty = ty::node_id_to_type(ccx.tcx(), id); let tcontents = ty::type_contents(ccx.tcx(), node_ty); if !tcontents.interior_unsafe() { llvm::LLVMSetGlobalConstant(g, True); } } debuginfo::create_global_var_metadata(ccx, id, g); g } } fn get_static_val<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, did: ast::DefId, ty: Ty<'tcx>) -> ValueRef { if ast_util::is_local(did) { return base::get_item_val(ccx, did.node) } base::trans_external_path(ccx, did, ty) }<|fim▁end|>
pub fn trans_static(ccx: &CrateContext, m: ast::Mutability, id: ast::NodeId) -> ValueRef { unsafe { let _icx = push_ctxt("trans_static"); let g = base::get_item_val(ccx, id);
<|file_name|>qemu_vm.py<|end_file_name|><|fim▁begin|>""" Utility classes and functions to handle Virtual Machine creation using qemu. :copyright: 2008-2009 Red Hat Inc. """ import time import os import logging import fcntl import re import commands from autotest.client.shared import error from autotest.client import utils import utils_misc import virt_vm import test_setup import storage import qemu_monitor import aexpect import qemu_virtio_port import remote import data_dir import utils_net import qemu_devices class QemuSegFaultError(virt_vm.VMError): def __init__(self, crash_message): virt_vm.VMError.__init__(self, crash_message) self.crash_message = crash_message def __str__(self): return ("Qemu crashed: %s" % self.crash_message) class VMMigrateProtoUnsupportedError(virt_vm.VMMigrateProtoUnknownError): """ When QEMU tells us it doesn't know about a given migration protocol. This usually happens when we're testing older QEMU. It makes sense to skip the test in this situation. """ def __init__(self, protocol, output): self.protocol = protocol self.output = output def __str__(self): return ("QEMU reports it doesn't know migration protocol '%s'. " "QEMU output: %s" % (self.protocol, self.output)) class KVMInternalError(virt_vm.VMError): pass class ImageUnbootableError(virt_vm.VMError): def __init__(self, name): virt_vm.VMError.__init__(self, name) self.name = name def __str__(self): return ("VM '%s' can't bootup from image," " check your boot disk image file." % self.name) class VM(virt_vm.BaseVM): """ This class handles all basic VM operations. """ MIGRATION_PROTOS = ['rdma', 'x-rdma', 'tcp', 'unix', 'exec', 'fd'] # By default we inherit all timeouts from the base VM class except... CLOSE_SESSION_TIMEOUT = 30 # Because we've seen qemu taking longer than 5 seconds to initialize # itself completely, including creating the monitor sockets files # which are used on create(), this timeout is considerably larger # than the one on the base vm class CREATE_TIMEOUT = 20 def __init__(self, name, params, root_dir, address_cache, state=None): """ Initialize the object and set a few attributes. :param name: The name of the object :param params: A dict containing VM params (see method make_qemu_command for a full description) :param root_dir: Base directory for relative filenames :param address_cache: A dict that maps MAC addresses to IP addresses :param state: If provided, use this as self.__dict__ """ if state: self.__dict__ = state else: self.process = None self.serial_console = None self.redirs = {} self.spice_options = {} self.vnc_port = 5900 self.monitors = [] self.virtio_ports = [] # virtio_console / virtio_serialport self.pci_assignable = None self.uuid = None self.vcpu_threads = [] self.vhost_threads = [] self.devices = None self.name = name self.params = params self.root_dir = root_dir self.address_cache = address_cache self.index_in_use = {} # This usb_dev_dict member stores usb controller and device info, # It's dict, each key is an id of usb controller, # and key's value is a list, contains usb devices' ids which # attach to this controller. # A filled usb_dev_dict may look like: # { "usb1" : ["stg1", "stg2", "stg3", "stg4", "stg5", "stg6"], # "usb2" : ["stg7", "stg8"], # ... # } # This structure can used in usb hotplug/unplug test. self.usb_dev_dict = {} self.logs = {} self.logsessions = {} self.driver_type = 'qemu' self.params['driver_type_' + self.name] = self.driver_type # virtnet init depends on vm_type/driver_type being set w/in params super(VM, self).__init__(name, params) # un-overwrite instance attribute, virtnet db lookups depend on this if state: self.instance = state['instance'] self.qemu_command = '' self.start_time = 0.0 def verify_alive(self): """ Make sure the VM is alive and that the main monitor is responsive. :raise VMDeadError: If the VM is dead :raise: Various monitor exceptions if the monitor is unresponsive """ self.verify_disk_image_bootable() self.verify_userspace_crash() self.verify_kernel_crash() self.verify_illegal_instruction() self.verify_kvm_internal_error() try: virt_vm.BaseVM.verify_alive(self) if self.monitor: self.monitor.verify_responsive() except virt_vm.VMDeadError: raise virt_vm.VMDeadError(self.process.get_status(), self.process.get_output()) def is_alive(self): """ Return True if the VM is alive and its monitor is responsive. """ return not self.is_dead() and (not self.monitor or self.monitor.is_responsive()) def is_dead(self): """ Return True if the qemu process is dead. """ return not self.process or not self.process.is_alive() def is_paused(self): """ Return True if the qemu process is paused ('stop'ed) """ if self.is_dead(): return False try: self.verify_status("paused") return True except virt_vm.VMStatusError: return False def verify_status(self, status): """ Check VM status :param status: Optional VM status, 'running' or 'paused' :raise VMStatusError: If the VM status is not same as parameter """ if not self.monitor.verify_status(status): raise virt_vm.VMStatusError('Unexpected VM status: "%s"' % self.monitor.get_status()) def verify_userspace_crash(self): """ Verify if the userspace component (qemu) crashed. """ if "(core dumped)" in self.process.get_output(): for line in self.process.get_output().splitlines(): if "(core dumped)" in line: raise QemuSegFaultError(line) def verify_kvm_internal_error(self): """ Verify KVM internal error. """ if "KVM internal error." in self.process.get_output(): out = self.process.get_output() out = out[out.find("KVM internal error."):] raise KVMInternalError(out) def verify_disk_image_bootable(self): if self.params.get("image_verify_bootable") == "yes": pattern = self.params.get("image_unbootable_pattern") if not pattern: raise virt_vm.VMConfigMissingError(self.name, "image_unbootable_pattern") try: seabios_log = self.logsessions['seabios'].get_output() if re.search(pattern, seabios_log, re.S): logging.error("Can't boot guest from image.") # Set 'shutdown_command' to None to force autotest # shuts down guest with monitor. self.params["shutdown_command"] = None raise ImageUnbootableError(self.name) except KeyError: pass def clone(self, name=None, params=None, root_dir=None, address_cache=None, copy_state=False): """ Return a clone of the VM object with optionally modified parameters. The clone is initially not alive and needs to be started using create(). Any parameters not passed to this function are copied from the source VM. :param name: Optional new VM name :param params: Optional new VM creation parameters :param root_dir: Optional new base directory for relative filenames :param address_cache: A dict that maps MAC addresses to IP addresses :param copy_state: If True, copy the original VM's state to the clone. Mainly useful for make_qemu_command(). """ if name is None: name = self.name if params is None: params = self.params.copy() if root_dir is None: root_dir = self.root_dir if address_cache is None: address_cache = self.address_cache if copy_state: state = self.__dict__.copy() else: state = None return VM(name, params, root_dir, address_cache, state) def get_serial_console_filename(self, name=None): """ Return the serial console filename. :param name: The serial port name. """ if name: return "/tmp/serial-%s-%s" % (name, self.instance) return "/tmp/serial-%s" % self.instance def get_serial_console_filenames(self): """ Return a list of all serial console filenames (as specified in the VM's params). """ return [self.get_serial_console_filename(_) for _ in self.params.objects("isa_serials")] def make_create_command(self, name=None, params=None, root_dir=None): """ Generate a qemu command line. All parameters are optional. If a parameter is not supplied, the corresponding value stored in the<|fim▁hole|> :param name: The name of the object :param params: A dict containing VM params :param root_dir: Base directory for relative filenames :note: The params dict should contain: mem -- memory size in MBs cdrom -- ISO filename to use with the qemu -cdrom parameter extra_params -- a string to append to the qemu command shell_port -- port of the remote shell daemon on the guest (SSH, Telnet or the home-made Remote Shell Server) shell_client -- client program to use for connecting to the remote shell daemon on the guest (ssh, telnet or nc) x11_display -- if specified, the DISPLAY environment variable will be be set to this value for the qemu process (useful for SDL rendering) images -- a list of image object names, separated by spaces nics -- a list of NIC object names, separated by spaces For each image in images: drive_format -- string to pass as 'if' parameter for this image (e.g. ide, scsi) image_snapshot -- if yes, pass 'snapshot=on' to qemu for this image image_boot -- if yes, pass 'boot=on' to qemu for this image In addition, all parameters required by get_image_filename. For each NIC in nics: nic_model -- string to pass as 'model' parameter for this NIC (e.g. e1000) """ # Helper function for command line option wrappers def _add_option(option, value, option_type=None, first=False): """ Add option to qemu parameters. """ if first: fmt = " %s=%s" else: fmt = ",%s=%s" if option_type is bool: # Decode value for bool parameter (supports True, False, None) if value in ['yes', 'on', True]: return fmt % (option, "on") elif value in ['no', 'off', False]: return fmt % (option, "off") elif value and isinstance(value, bool): return fmt % (option, "on") elif value and isinstance(value, str): # "EMPTY_STRING" and "NULL_STRING" is used for testing illegal # foramt of option. # "EMPTY_STRING": set option as a empty string "". # "NO_EQUAL_STRING": set option as a option string only, # even without "=". # (In most case, qemu-kvm should recognize it as "<null>") if value == "NO_EQUAL_STRING": return ",%s" % option if value == "EMPTY_STRING": value = '""' return fmt % (option, str(value)) return "" # Wrappers for all supported qemu command line parameters. # This is meant to allow support for multiple qemu versions. # Each of these functions receives the output of 'qemu -help' # as a parameter, and should add the requested command line # option accordingly. def add_name(devices, name): return " -name '%s'" % name def process_sandbox(devices, action): if action == "add": if devices.has_option("sandbox"): return " -sandbox on " elif action == "rem": if devices.has_option("sandbox"): return " -sandbox off " def add_human_monitor(devices, monitor_name, filename): if not devices.has_option("chardev"): return " -monitor unix:'%s',server,nowait" % filename monitor_id = "hmp_id_%s" % monitor_name cmd = " -chardev socket" cmd += _add_option("id", monitor_id) cmd += _add_option("path", filename) cmd += _add_option("server", "NO_EQUAL_STRING") cmd += _add_option("nowait", "NO_EQUAL_STRING") cmd += " -mon chardev=%s" % monitor_id cmd += _add_option("mode", "readline") return cmd def add_qmp_monitor(devices, monitor_name, filename): if not devices.has_option("qmp"): logging.warn("Fallback to human monitor since qmp is" " unsupported") return add_human_monitor(devices, monitor_name, filename) if not devices.has_option("chardev"): return " -qmp unix:'%s',server,nowait" % filename monitor_id = "qmp_id_%s" % monitor_name cmd = " -chardev socket" cmd += _add_option("id", monitor_id) cmd += _add_option("path", filename) cmd += _add_option("server", "NO_EQUAL_STRING") cmd += _add_option("nowait", "NO_EQUAL_STRING") cmd += " -mon chardev=%s" % monitor_id cmd += _add_option("mode", "control") return cmd def add_serial(devices, name, filename): if not devices.has_option("chardev"): return " -serial unix:'%s',server,nowait" % filename serial_id = "serial_id_%s" % name cmd = " -chardev socket" cmd += _add_option("id", serial_id) cmd += _add_option("path", filename) cmd += _add_option("server", "NO_EQUAL_STRING") cmd += _add_option("nowait", "NO_EQUAL_STRING") cmd += " -device isa-serial" cmd += _add_option("chardev", serial_id) return cmd def add_virtio_port(devices, name, bus, filename, porttype, chardev, name_prefix=None, index=None, extra_params=""): """ Appends virtio_serialport or virtio_console device to cmdline. :param help: qemu -h output :param name: Name of the port :param bus: Which virtio-serial-pci device use :param filename: Path to chardev filename :param porttype: Type of the port (*serialport, console) :param chardev: Which chardev to use (*socket, spicevmc) :param name_prefix: Custom name prefix (port index is appended) :param index: Index of the current virtio_port :param extra_params: Space sepparated chardev params """ cmd = '' # host chardev if chardev == "spicevmc": # SPICE cmd += " -chardev spicevmc,id=dev%s,name=%s" % (name, name) else: # SOCKET cmd = (" -chardev socket,id=dev%s,path=%s,server,nowait" % (name, filename)) # virtport device if porttype in ("console", "virtio_console"): cmd += " -device virtconsole" else: cmd += " -device virtserialport" if name_prefix: # used by spiceagent (com.redhat.spice.*) port_name = "%s%d" % (name_prefix, index) else: port_name = name cmd += ",chardev=dev%s,name=%s,id=%s" % (name, port_name, name) cmd += _add_option("bus", bus) # Space sepparated chardev params _params = "" for parm in extra_params.split(): _params += ',' + parm cmd += _params return cmd def add_log_seabios(devices): if not devices.has_device("isa-debugcon"): return "" default_id = "seabioslog_id_%s" % self.instance filename = "/tmp/seabios-%s" % self.instance self.logs["seabios"] = filename cmd = " -chardev socket" cmd += _add_option("id", default_id) cmd += _add_option("path", filename) cmd += _add_option("server", "NO_EQUAL_STRING") cmd += _add_option("nowait", "NO_EQUAL_STRING") cmd += " -device isa-debugcon" cmd += _add_option("chardev", default_id) cmd += _add_option("iobase", "0x402") return cmd def add_log_anaconda(devices): chardev_id = "anacondalog_chardev_%s" % self.instance vioser_id = "anacondalog_vioser_%s" % self.instance filename = "/tmp/anaconda-%s" % self.instance self.logs["anaconda"] = filename dev = qemu_devices.QCustomDevice('chardev', backend='backend') dev.set_param('backend', 'socket') dev.set_param('id', chardev_id) dev.set_param("path", filename) dev.set_param("server", 'NO_EQUAL_STRING') dev.set_param("nowait", 'NO_EQUAL_STRING') devices.insert(dev) dev = QDevice('virtio-serial-pci', parent_bus={'type': 'pci'}) dev.set_param("id", vioser_id) devices.insert(dev) dev = QDevice('virtserialport') dev.set_param("bus", "%s.0" % vioser_id) dev.set_param("chardev", chardev_id) dev.set_param("name", "org.fedoraproject.anaconda.log.0") devices.insert(dev) def add_mem(devices, mem): return " -m %s" % mem def add_smp(devices): smp_str = " -smp %d" % self.cpuinfo.smp smp_pattern = "smp n\[,maxcpus=cpus\].*" if devices.has_option(smp_pattern): smp_str += ",maxcpus=%d" % self.cpuinfo.maxcpus smp_str += ",cores=%d" % self.cpuinfo.cores smp_str += ",threads=%d" % self.cpuinfo.threads smp_str += ",sockets=%d" % self.cpuinfo.sockets return smp_str def add_nic(devices, vlan, model=None, mac=None, device_id=None, netdev_id=None, nic_extra_params=None, pci_addr=None, bootindex=None, queues=1, vectors=None): if model == 'none': return if devices.has_option("device"): if not model: model = "rtl8139" elif model == "virtio": model = "virtio-net-pci" dev = QDevice(model) dev.set_param('mac', mac) # only pci domain=0,bus=0,function=0 is supported for now. # # libvirt gains the pci_slot, free_pci_addr here, # value by parsing the xml file, i.e. counting all the # pci devices and store the number. if model != 'spapr-vlan': dev.parent_bus = {'type': 'pci'} dev.set_param('addr', pci_addr) if nic_extra_params: nic_extra_params = (_.split('=', 1) for _ in nic_extra_params.split(',') if _) for key, val in nic_extra_params: dev.set_param(key, val) dev.set_param("bootindex", bootindex) else: dev = qemu_devices.QCustomDevice('net', backend='type') dev.set_param('type', 'nic') dev.set_param('model', model) dev.set_param('macaddr', mac, 'NEED_QUOTE') dev.set_param('id', device_id, 'NEED_QUOTE') if "virtio" in model: if int(queues) > 1: dev.set_param('mq', 'on') if vectors: dev.set_param('vectors', vectors) if devices.has_option("netdev"): dev.set_param('netdev', netdev_id) else: dev.set_param('vlan', vlan) devices.insert(dev) def add_net(devices, vlan, nettype, ifname=None, tftp=None, bootfile=None, hostfwd=[], netdev_id=None, netdev_extra_params=None, tapfds=None, script=None, downscript=None, vhost=None, queues=None, vhostfds=None): mode = None if nettype in ['bridge', 'network', 'macvtap']: mode = 'tap' elif nettype == 'user': mode = 'user' else: logging.warning("Unknown/unsupported nettype %s" % nettype) return '' if devices.has_option("netdev"): cmd = " -netdev %s,id=%s" % (mode, netdev_id) if vhost: cmd += ",%s" % vhost if vhostfds: if (int(queues) > 1 and 'vhostfds=' in devices.get_help_text()): cmd += ",vhostfds=%s" % vhostfds else: txt = "" if int(queues) > 1: txt = "qemu do not support vhost multiqueue," txt += " Fall back to single queue." if 'vhostfd=' in devices.get_help_text(): cmd += ",vhostfd=%s" % vhostfds.split(":")[0] else: txt += " qemu do not support vhostfd." if txt: logging.warn(txt) if netdev_extra_params: cmd += "%s" % netdev_extra_params else: cmd = " -net %s,vlan=%d" % (mode, vlan) if mode == "tap" and tapfds: if (int(queues)) > 1 and ',fds=' in devices.get_help_text(): cmd += ",fds=%s" % tapfds else: cmd += ",fd=%s" % tapfds elif mode == "user": if tftp and "[,tftp=" in devices.get_help_text(): cmd += ",tftp='%s'" % tftp if bootfile and "[,bootfile=" in devices.get_help_text(): cmd += ",bootfile='%s'" % bootfile if "[,hostfwd=" in devices.get_help_text(): for host_port, guest_port in hostfwd: cmd += ",hostfwd=tcp::%s-:%s" % (host_port, guest_port) else: if ifname: cmd += ",ifname='%s'" % ifname if script: cmd += ",script='%s'" % script cmd += ",downscript='%s'" % (downscript or "no") return cmd def add_floppy(devices, filename, index): cmd_list = [" -fda '%s'", " -fdb '%s'"] return cmd_list[index] % filename def add_tftp(devices, filename): # If the new syntax is supported, don't add -tftp if "[,tftp=" in devices.get_help_text(): return "" else: return " -tftp '%s'" % filename def add_bootp(devices, filename): # If the new syntax is supported, don't add -bootp if "[,bootfile=" in devices.get_help_text(): return "" else: return " -bootp '%s'" % filename def add_tcp_redir(devices, host_port, guest_port): # If the new syntax is supported, don't add -redir if "[,hostfwd=" in devices.get_help_text(): return "" else: return " -redir tcp:%s::%s" % (host_port, guest_port) def add_vnc(devices, vnc_port, vnc_password='no', extra_params=None): vnc_cmd = " -vnc :%d" % (vnc_port - 5900) if vnc_password == "yes": vnc_cmd += ",password" if extra_params: vnc_cmd += ",%s" % extra_params return vnc_cmd def add_sdl(devices): if devices.has_option("sdl"): return " -sdl" else: return "" def add_nographic(devices): return " -nographic" def add_uuid(devices, uuid): return " -uuid '%s'" % uuid def add_pcidevice(devices, host, params, device_driver="pci-assign"): if device_driver == "pci-assign": if (devices.has_device("pci-assign") or devices.has_device("kvm-pci-assign")): dev = QDevice(device_driver, parent_bus={'type': 'pci'}) else: dev = qemu_devices.QCustomDevice('pcidevice', parent_bus={'type': 'pci'}) else: if devices.has_device(device_driver): dev = QDevice(device_driver, parent_bus={'type': 'pci'}) else: dev = qemu_devices.QCustomDevice('pcidevice', parent_bus={'type': 'pci'}) help_cmd = "%s -device pci-assign,\\? 2>&1" % qemu_binary pcidevice_help = utils.system_output(help_cmd) dev.set_param('host', host) dev.set_param('id', 'id_%s' % host.replace(":", ".")) fail_param = [] for param in params.get("pci-assign_params", "").split(): value = params.get(param) if value: if bool(re.search(param, pcidevice_help, re.M)): dev.set_param(param, value) else: fail_param.append(param) if fail_param: msg = ("parameter %s is not support in device pci-assign." " It only support following parameter:\n %s" % (param, pcidevice_help)) logging.warn(msg) devices.insert(dev) def add_spice_rhel5(devices, spice_params, port_range=(3100, 3199)): """ processes spice parameters on rhel5 host. :param spice_options - dict with spice keys/values :param port_range - tuple with port range, default: (3000, 3199) """ if devices.has_option("spice"): cmd = " -spice" else: return "" spice_help = "" if devices.has_option("spice-help"): spice_help = commands.getoutput("%s -device \\?" % qemu_binary) s_port = str(utils_misc.find_free_port(*port_range)) self.spice_options['spice_port'] = s_port cmd += " port=%s" % s_port for param in spice_params.split(): value = params.get(param) if value: if bool(re.search(param, spice_help, re.M)): cmd += ",%s=%s" % (param, value) else: msg = ("parameter %s is not supported in spice. It " "only supports the following parameters:\n %s" % (param, spice_help)) logging.warn(msg) else: cmd += ",%s" % param if devices.has_option("qxl"): qxl_dev_nr = params.get("qxl_dev_nr", 1) cmd += " -qxl %s" % qxl_dev_nr return cmd def add_spice(port_range=(3000, 3199), tls_port_range=(3200, 3399)): """ processes spice parameters :param port_range - tuple with port range, default: (3000, 3199) :param tls_port_range - tuple with tls port range, default: (3200, 3399) """ spice_opts = [] # will be used for ",".join() tmp = None def optget(opt): """a helper function""" return self.spice_options.get(opt) def set_yes_no_value(key, yes_value=None, no_value=None): """just a helper function""" tmp = optget(key) if tmp == "no" and no_value: spice_opts.append(no_value) elif tmp == "yes" and yes_value: spice_opts.append(yes_value) def set_value(opt_string, key, fallback=None): """just a helper function""" tmp = optget(key) if tmp: spice_opts.append(opt_string % tmp) elif fallback: spice_opts.append(fallback) s_port = str(utils_misc.find_free_port(*port_range)) if optget("spice_port") == "generate": if not self.is_alive(): self.spice_options['spice_port'] = s_port spice_opts.append("port=%s" % s_port) self.spice_port = s_port else: self.spice_options['spice_port'] = self.spice_port spice_opts.append("port=%s" % self.spice_port) else: set_value("port=%s", "spice_port") set_value("password=%s", "spice_password", "disable-ticketing") if optget("listening_addr") == "ipv4": host_ip = utils_net.get_host_ip_address(self.params) self.spice_options['listening_addr'] = "ipv4" spice_opts.append("addr=%s" % host_ip) #set_value("addr=%s", "listening_addr", ) elif optget("listening_addr") == "ipv6": host_ip = utils_net.get_host_ip_address(self.params) host_ip_ipv6 = utils_misc.convert_ipv4_to_ipv6(host_ip) self.spice_options['listening_addr'] = "ipv6" spice_opts.append("addr=%s" % host_ip_ipv6) set_yes_no_value( "disable_copy_paste", yes_value="disable-copy-paste") set_value("addr=%s", "spice_addr") if optget("spice_ssl") == "yes": # SSL only part t_port = str(utils_misc.find_free_port(*tls_port_range)) if optget("spice_tls_port") == "generate": if not self.is_alive(): self.spice_options['spice_tls_port'] = t_port spice_opts.append("tls-port=%s" % t_port) self.spice_tls_port = t_port else: self.spice_options[ 'spice_tls_port'] = self.spice_tls_port spice_opts.append("tls-port=%s" % self.spice_tls_port) else: set_value("tls-port=%s", "spice_tls_port") prefix = optget("spice_x509_prefix") if ((prefix is None or not os.path.exists(prefix)) and (optget("spice_gen_x509") == "yes")): # Generate spice_x509_* is not always necessary, # Regenerate them will make your existing VM # not longer accessiable via encrypted spice. c_subj = optget("spice_x509_cacert_subj") s_subj = optget("spice_x509_server_subj") # If CN is not specified, add IP of host if s_subj[-3:] == "CN=": s_subj += utils_net.get_host_ip_address(self.params) passwd = optget("spice_x509_key_password") secure = optget("spice_x509_secure") utils_misc.create_x509_dir(prefix, c_subj, s_subj, passwd, secure) tmp = optget("spice_x509_dir") if tmp == "yes": spice_opts.append("x509-dir=%s" % (prefix)) elif tmp == "no": cacert = optget("spice_x509_cacert_file") server_key = optget("spice_x509_key_file") server_cert = optget("spice_x509_cert_file") keyfile_str = ("x509-key-file=%s,x509-cacert-file=%s," "x509-cert-file=%s" % (os.path.join(prefix, server_key), os.path.join(prefix, cacert), os.path.join(prefix, server_cert))) spice_opts.append(keyfile_str) set_yes_no_value("spice_x509_secure", yes_value="x509-key-password=%s" % (optget("spice_x509_key_password"))) tmp = optget("spice_secure_channels") if tmp: for item in tmp.split(","): spice_opts.append("tls-channel=%s" % (item.strip())) # Less common options set_value("seamless-migration=%s", "spice_seamless_migration") set_value("image-compression=%s", "spice_image_compression") set_value("jpeg-wan-compression=%s", "spice_jpeg_wan_compression") set_value("zlib-glz-wan-compression=%s", "spice_zlib_glz_wan_compression") set_value("streaming-video=%s", "spice_streaming_video") set_value("agent-mouse=%s", "spice_agent_mouse") set_value("playback-compression=%s", "spice_playback_compression") set_yes_no_value("spice_ipv4", yes_value="ipv4") set_yes_no_value("spice_ipv6", yes_value="ipv6") return " -spice %s" % (",".join(spice_opts)) def add_qxl(qxl_nr, qxl_memory=None): """ adds extra qxl devices + sets memory to -vga qxl and extra qxls :param qxl_nr total number of qxl devices :param qxl_memory sets memory to individual devices """ qxl_str = "" vram_help = "" if qxl_memory: vram_help = "vram_size=%d" % qxl_memory qxl_str += " -global qxl-vga.%s" % (vram_help) for index in range(1, qxl_nr): qxl_str += " -device qxl,id=video%d,%s"\ % (index, vram_help) return qxl_str def add_vga(vga): return " -vga %s" % vga def add_kernel(devices, filename): return " -kernel '%s'" % filename def add_initrd(devices, filename): return " -initrd '%s'" % filename def add_rtc(devices): # Pay attention that rtc-td-hack is for early version # if "rtc " in help: if devices.has_option("rtc"): cmd = " -rtc base=%s" % params.get("rtc_base", "utc") cmd += _add_option("clock", params.get("rtc_clock", "host")) cmd += _add_option("driftfix", params.get("rtc_drift", "none")) return cmd elif devices.has_option("rtc-td-hack"): return " -rtc-td-hack" else: return "" def add_kernel_cmdline(devices, cmdline): return " -append '%s'" % cmdline def add_testdev(devices, filename=None): if devices.has_device("testdev"): return (" -chardev file,id=testlog,path=%s" " -device testdev,chardev=testlog" % filename) elif devices.has_device("pc-testdev"): return " -device pc-testdev" else: return "" def add_isa_debug_exit(devices, iobase=0xf4, iosize=0x04): if devices.has_device("isa-debug-exit"): return (" -device isa-debug-exit,iobase=%s,iosize=%s" % (iobase, iosize)) else: return "" def add_no_hpet(devices): if devices.has_option("no-hpet"): return " -no-hpet" else: return "" def add_cpu_flags(devices, cpu_model, flags=None, vendor_id=None, family=None): if devices.has_option('cpu'): cmd = " -cpu '%s'" % cpu_model if vendor_id: cmd += ",vendor=\"%s\"" % vendor_id if flags: if not flags.startswith(","): cmd += "," cmd += "%s" % flags if family is not None: cmd += ",family=%s" % family return cmd else: return "" def add_boot(devices, boot_order, boot_once, boot_menu): cmd = " -boot" pattern = "boot \[order=drives\]\[,once=drives\]\[,menu=on\|off\]" if devices.has_option("boot \[a\|c\|d\|n\]"): cmd += " %s" % boot_once elif devices.has_option(pattern): cmd += (" order=%s,once=%s,menu=%s" % (boot_order, boot_once, boot_menu)) else: cmd = "" return cmd def get_index(index): while self.index_in_use.get(str(index)): index += 1 return index def add_sga(devices): if not devices.has_option("device"): return "" return " -device sga" def add_watchdog(devices, device_type=None, action="reset"): watchdog_cmd = "" if devices.has_option("watchdog"): if device_type: watchdog_cmd += " -watchdog %s" % device_type watchdog_cmd += " -watchdog-action %s" % action return watchdog_cmd def add_option_rom(devices, opt_rom): if not devices.has_option("option-rom"): return "" return " -option-rom %s" % opt_rom def add_smartcard(devices, sc_chardev, sc_id): sc_cmd = " -device usb-ccid,id=ccid0" sc_cmd += " -chardev " + sc_chardev sc_cmd += ",id=" + sc_id + ",name=smartcard" sc_cmd += " -device ccid-card-passthru,chardev=" + sc_id return sc_cmd def add_numa_node(devices, mem=None, cpus=None, nodeid=None): """ This function used to add numa node to guest command line """ if not devices.has_option("numa"): return "" numa_cmd = " -numa node" if mem is not None: numa_cmd += ",mem=%s" % mem if cpus is not None: numa_cmd += ",cpus=%s" % cpus if nodeid is not None: numa_cmd += ",nodeid=%s" % nodeid return numa_cmd # End of command line option wrappers # If nothing changed and devices exists, return imediatelly if (name is None and params is None and root_dir is None and self.devices is not None): return self.devices if name is None: name = self.name if params is None: params = self.params if root_dir is None: root_dir = self.root_dir have_ahci = False have_virtio_scsi = False virtio_scsi_pcis = [] # init value by default. # PCI addr 0,1,2 are taken by PCI/ISA/IDE bridge and the GPU. self.pci_addr_list = [0, 1, 2] # Clone this VM using the new params vm = self.clone(name, params, root_dir, copy_state=True) # global counters ide_bus = 0 ide_unit = 0 vdisk = 0 scsi_disk = 0 global_image_bootindex = 0 if params.get("kernel"): global_image_bootindex = 1 qemu_binary = utils_misc.get_qemu_binary(params) self.qemu_binary = qemu_binary support_cpu_model = commands.getoutput("%s -cpu \\?" % qemu_binary) index_global = 0 # init the dict index_in_use for key in params.keys(): if 'drive_index' in key: self.index_in_use[params.get(key)] = True cmd = "" # Enable the use of glibc's malloc_perturb feature if params.get("malloc_perturb", "no") == "yes": cmd += "MALLOC_PERTURB_=1 " # Set the X11 display parameter if requested if params.get("x11_display"): cmd += "DISPLAY=%s " % params.get("x11_display") if params.get("qemu_audio_drv"): cmd += "QEMU_AUDIO_DRV=%s " % params.get("qemu_audio_drv") # Add command prefix for qemu-kvm. like taskset, valgrind and so on if params.get("qemu_command_prefix"): qemu_command_prefix = params.get("qemu_command_prefix") cmd += "%s " % qemu_command_prefix # Add numa memory cmd to pin guest memory to numa node if params.get("numa_node"): numa_node = int(params.get("numa_node")) if numa_node < 0: p = utils_misc.NumaNode(numa_node) n = int(utils_misc.get_node_count()) + numa_node cmd += "numactl -m %s " % n else: n = numa_node - 1 cmd += "numactl -m %s " % n # Start constructing devices representation devices = qemu_devices.DevContainer(qemu_binary, self.name, params.get('strict_mode'), params.get( 'workaround_qemu_qmp_crash'), params.get('allow_hotplugged_vm')) StrDev = qemu_devices.QStringDevice QDevice = qemu_devices.QDevice devices.insert(StrDev('PREFIX', cmdline=cmd)) # Add the qemu binary devices.insert(StrDev('qemu', cmdline=qemu_binary)) devices.insert(StrDev('-S', cmdline="-S")) # Add the VM's name devices.insert(StrDev('vmname', cmdline=add_name(devices, name))) if params.get("qemu_sandbox", "on") == "on": devices.insert(StrDev('sandbox', cmdline=process_sandbox(devices, "add"))) elif params.get("sandbox", "off") == "off": devices.insert(StrDev('qemu_sandbox', cmdline=process_sandbox(devices, "rem"))) devs = devices.machine_by_params(params) for dev in devs: devices.insert(dev) # no automagic devices please defaults = params.get("defaults", "no") if devices.has_option("nodefaults") and defaults != "yes": devices.insert(StrDev('nodefaults', cmdline=" -nodefaults")) vga = params.get("vga") if vga: if vga != 'none': devices.insert(StrDev('VGA-%s' % vga, {'addr': 2}, cmdline=add_vga(vga), parent_bus={'type': 'pci'})) else: devices.insert(StrDev('VGA-none', cmdline=add_vga(vga))) if vga == "qxl": qxl_dev_memory = int(params.get("qxl_dev_memory", 0)) qxl_dev_nr = int(params.get("qxl_dev_nr", 1)) devices.insert(StrDev('qxl', cmdline=add_qxl(qxl_dev_nr, qxl_dev_memory))) elif params.get('defaults', 'no') != 'no': # by default add cirrus devices.insert(StrDev('VGA-cirrus', {'addr': 2}, cmdline=add_vga(vga), parent_bus={'type': 'pci'})) # When old scsi fmt is used, new device with lowest pci_addr is created devices.hook_fill_scsi_hbas(params) # -soundhw addresses are always the lowest after scsi soundhw = params.get("soundcards") if soundhw: if not devices.has_option('device') or soundhw == "all": for sndcard in ('AC97', 'ES1370', 'intel-hda'): # Add all dummy PCI devices and the actuall command below devices.insert(StrDev("SND-%s" % sndcard, parent_bus={'type': 'pci'})) devices.insert(StrDev('SoundHW', cmdline="-soundhw %s" % soundhw)) else: # TODO: Use QDevices for this and set the addresses properly for sound_device in soundhw.split(","): if "hda" in sound_device: devices.insert(QDevice('intel-hda', parent_bus={'type': 'pci'})) devices.insert(QDevice('hda-duplex')) elif sound_device in ["es1370", "ac97"]: devices.insert(QDevice(sound_device.upper(), parent_bus={'type': 'pci'})) else: devices.insert(QDevice(sound_device, parent_bus={'type': 'pci'})) # Add monitors for monitor_name in params.objects("monitors"): monitor_params = params.object_params(monitor_name) monitor_filename = qemu_monitor.get_monitor_filename(vm, monitor_name) if monitor_params.get("monitor_type") == "qmp": cmd = add_qmp_monitor(devices, monitor_name, monitor_filename) devices.insert(StrDev('QMP-%s' % monitor_name, cmdline=cmd)) else: cmd = add_human_monitor(devices, monitor_name, monitor_filename) devices.insert(StrDev('HMP-%s' % monitor_name, cmdline=cmd)) # Add serial console redirection for serial in params.objects("isa_serials"): serial_filename = vm.get_serial_console_filename(serial) cmd = add_serial(devices, serial, serial_filename) devices.insert(StrDev('SER-%s' % serial, cmdline=cmd)) # Add virtio_serial ports no_virtio_serial_pcis = 0 no_virtio_ports = 0 virtio_port_spread = int(params.get('virtio_port_spread', 2)) for port_name in params.objects("virtio_ports"): port_params = params.object_params(port_name) bus = params.get('virtio_port_bus', False) if bus is not False: # Manually set bus bus = int(bus) elif not virtio_port_spread: # bus not specified, let qemu decide pass elif not no_virtio_ports % virtio_port_spread: # Add new vio-pci every n-th port. (Spread ports) bus = no_virtio_serial_pcis else: # Port not overriden, use last vio-pci bus = no_virtio_serial_pcis - 1 if bus < 0: # First bus bus = 0 # Add virtio_serial_pcis for i in range(no_virtio_serial_pcis, bus + 1): dev = QDevice('virtio-serial-pci', parent_bus={'type': 'pci'}) dev.set_param('id', 'virtio_serial_pci%d' % i) devices.insert(dev) no_virtio_serial_pcis += 1 if bus is not False: bus = "virtio_serial_pci%d.0" % bus # Add actual ports cmd = add_virtio_port(devices, port_name, bus, self.get_virtio_port_filename(port_name), port_params.get('virtio_port_type'), port_params.get('virtio_port_chardev'), port_params.get('virtio_port_name_prefix'), no_virtio_ports, port_params.get('virtio_port_params', '')) devices.insert(StrDev('VIO-%s' % port_name, cmdline=cmd)) no_virtio_ports += 1 # Add logging devices.insert(StrDev('isa-log', cmdline=add_log_seabios(devices))) if params.get("anaconda_log", "no") == "yes": add_log_anaconda(devices) # Add USB controllers usbs = params.objects("usbs") if not devices.has_option("device"): usbs = ("oldusb",) # Old qemu, add only one controller '-usb' for usb_name in usbs: usb_params = params.object_params(usb_name) for dev in devices.usbc_by_params(usb_name, usb_params): devices.insert(dev) # Add images (harddrives) for image_name in params.objects("images"): # FIXME: Use qemu_devices for handling indexes image_params = params.object_params(image_name) if image_params.get("boot_drive") == "no": continue if params.get("index_enable") == "yes": drive_index = image_params.get("drive_index") if drive_index: index = drive_index else: index_global = get_index(index_global) index = str(index_global) index_global += 1 else: index = None image_bootindex = None image_boot = image_params.get("image_boot") if not re.search("boot=on\|off", devices.get_help_text(), re.MULTILINE): if image_boot in ['yes', 'on', True]: image_bootindex = str(global_image_bootindex) global_image_bootindex += 1 image_boot = "unused" image_bootindex = image_params.get('bootindex', image_bootindex) else: if image_boot in ['yes', 'on', True]: if global_image_bootindex > 0: image_boot = False global_image_bootindex += 1 image_params = params.object_params(image_name) if image_params.get("boot_drive") == "no": continue devs = devices.images_define_by_params(image_name, image_params, 'disk', index, image_boot, image_bootindex) for _ in devs: devices.insert(_) # Networking redirs = [] for redir_name in params.objects("redirs"): redir_params = params.object_params(redir_name) guest_port = int(redir_params.get("guest_port")) host_port = vm.redirs.get(guest_port) redirs += [(host_port, guest_port)] iov = 0 for nic in vm.virtnet: nic_params = params.object_params(nic.nic_name) if nic_params.get('pci_assignable') == "no": script = nic_params.get("nic_script") downscript = nic_params.get("nic_downscript") vhost = nic_params.get("vhost") script_dir = data_dir.get_data_dir() if script: script = utils_misc.get_path(script_dir, script) if downscript: downscript = utils_misc.get_path(script_dir, downscript) # setup nic parameters as needed # add_netdev if netdev_id not set nic = vm.add_nic(**dict(nic)) # gather set values or None if unset vlan = int(nic.get('vlan')) netdev_id = nic.get('netdev_id') device_id = nic.get('device_id') mac = nic.get('mac') nic_model = nic.get("nic_model") nic_extra = nic.get("nic_extra_params") bootindex = nic_params.get("bootindex") netdev_extra = nic.get("netdev_extra_params") bootp = nic.get("bootp") if nic.get("tftp"): tftp = utils_misc.get_path(root_dir, nic.get("tftp")) else: tftp = None nettype = nic.get("nettype", "bridge") # don't force conversion add_nic()/add_net() optional parameter if nic.has_key('tapfds'): tapfds = nic.tapfds else: tapfds = None if nic.has_key('vhostfds'): vhostfds = nic.vhostfds else: vhostfds = None ifname = nic.get('ifname') queues = nic.get("queues", 1) # specify the number of MSI-X vectors that the card should have; # this option currently only affects virtio cards if nic_params.get("enable_msix_vectors") == "yes": if nic.has_key("vectors"): vectors = nic.vectors else: vectors = 2 * int(queues) + 1 else: vectors = None # Handle the '-net nic' part add_nic(devices, vlan, nic_model, mac, device_id, netdev_id, nic_extra, nic_params.get("nic_pci_addr"), bootindex, queues, vectors) # Handle the '-net tap' or '-net user' or '-netdev' part cmd = add_net(devices, vlan, nettype, ifname, tftp, bootp, redirs, netdev_id, netdev_extra, tapfds, script, downscript, vhost, queues, vhostfds) # TODO: Is every NIC a PCI device? devices.insert(StrDev("NET-%s" % nettype, cmdline=cmd)) else: device_driver = nic_params.get("device_driver", "pci-assign") pci_id = vm.pa_pci_ids[iov] add_pcidevice(devices, pci_id, params=nic_params, device_driver=device_driver) iov += 1 mem = params.get("mem") if mem: devices.insert(StrDev('mem', cmdline=add_mem(devices, mem))) smp = int(params.get("smp", 0)) vcpu_maxcpus = int(params.get("vcpu_maxcpus", 0)) vcpu_sockets = int(params.get("vcpu_sockets", 0)) vcpu_cores = int(params.get("vcpu_cores", 0)) vcpu_threads = int(params.get("vcpu_threads", 0)) # Force CPU threads to 2 when smp > 8. if smp > 8 and vcpu_threads <= 1: vcpu_threads = 2 # Some versions of windows don't support more than 2 sockets of cpu, # here is a workaround to make all windows use only 2 sockets. if (vcpu_sockets and vcpu_sockets > 2 and params.get("os_type") == 'windows'): vcpu_sockets = 2 if smp == 0 or vcpu_sockets == 0: vcpu_cores = vcpu_cores or 1 vcpu_threads = vcpu_threads or 1 if smp and vcpu_sockets == 0: vcpu_sockets = int(smp / (vcpu_cores * vcpu_threads)) or 1 else: vcpu_sockets = vcpu_sockets or 1 if smp == 0: smp = vcpu_cores * vcpu_threads * vcpu_sockets else: if vcpu_cores == 0: vcpu_threads = vcpu_threads or 1 vcpu_cores = int(smp / (vcpu_sockets * vcpu_threads)) or 1 else: vcpu_threads = int(smp / (vcpu_cores * vcpu_sockets)) or 1 self.cpuinfo.smp = smp self.cpuinfo.maxcpus = vcpu_maxcpus or smp self.cpuinfo.cores = vcpu_cores self.cpuinfo.threads = vcpu_threads self.cpuinfo.sockets = vcpu_sockets devices.insert(StrDev('smp', cmdline=add_smp(devices))) numa_total_cpus = 0 numa_total_mem = 0 for numa_node in params.objects("guest_numa_nodes"): numa_params = params.object_params(numa_node) numa_mem = numa_params.get("numa_mem") numa_cpus = numa_params.get("numa_cpus") numa_nodeid = numa_params.get("numa_nodeid") if numa_mem is not None: numa_total_mem += int(numa_mem) if numa_cpus is not None: numa_total_cpus += len(utils_misc.cpu_str_to_list(numa_cpus)) devices.insert(StrDev('numa', cmdline=add_numa_node(devices))) if params.get("numa_consistency_check_cpu_mem", "no") == "yes": if (numa_total_cpus > int(smp) or numa_total_mem > int(mem) or len(params.objects("guest_numa_nodes")) > int(smp)): logging.debug("-numa need %s vcpu and %s memory. It is not " "matched the -smp and -mem. The vcpu number " "from -smp is %s, and memory size from -mem is" " %s" % (numa_total_cpus, numa_total_mem, smp, mem)) raise virt_vm.VMDeviceError("The numa node cfg can not fit" " smp and memory cfg.") cpu_model = params.get("cpu_model") use_default_cpu_model = True if cpu_model: use_default_cpu_model = False for model in re.split(",", cpu_model): model = model.strip() if not model in support_cpu_model: continue cpu_model = model break else: cpu_model = model logging.error("Non existing CPU model %s will be passed " "to qemu (wrong config or negative test)", model) if use_default_cpu_model: cpu_model = params.get("default_cpu_model") if cpu_model: vendor = params.get("cpu_model_vendor") flags = params.get("cpu_model_flags") family = params.get("cpu_family") self.cpuinfo.model = cpu_model self.cpuinfo.vendor = vendor self.cpuinfo.flags = flags self.cpuinfo.family = family cmd = add_cpu_flags(devices, cpu_model, flags, vendor, family) devices.insert(StrDev('cpu', cmdline=cmd)) # Add cdroms for cdrom in params.objects("cdroms"): image_params = params.object_params(cdrom) # FIXME: Use qemu_devices for handling indexes if image_params.get("boot_drive") == "no": continue if params.get("index_enable") == "yes": drive_index = image_params.get("drive_index") if drive_index: index = drive_index else: index_global = get_index(index_global) index = str(index_global) index_global += 1 else: index = None image_bootindex = None image_boot = image_params.get("image_boot") if not re.search("boot=on\|off", devices.get_help_text(), re.MULTILINE): if image_boot in ['yes', 'on', True]: image_bootindex = str(global_image_bootindex) global_image_bootindex += 1 image_boot = "unused" image_bootindex = image_params.get( 'bootindex', image_bootindex) else: if image_boot in ['yes', 'on', True]: if global_image_bootindex > 0: image_boot = False global_image_bootindex += 1 iso = image_params.get("cdrom") if iso or image_params.get("cdrom_without_file") == "yes": devs = devices.cdroms_define_by_params(cdrom, image_params, 'cdrom', index, image_boot, image_bootindex) for _ in devs: devices.insert(_) # We may want to add {floppy_otps} parameter for -fda, -fdb # {fat:floppy:}/path/. However vvfat is not usually recommended. for floppy_name in params.objects('floppies'): image_params = params.object_params(floppy_name) # TODO: Unify image, cdrom, floppy params image_params['drive_format'] = 'floppy' image_params[ 'image_readonly'] = image_params.get("floppy_readonly", "no") # Use the absolute patch with floppies (pure *.vfd) image_params['image_raw_device'] = 'yes' image_params['image_name'] = utils_misc.get_path( data_dir.get_data_dir(), image_params["floppy_name"]) image_params['image_format'] = None devs = devices.images_define_by_params(floppy_name, image_params, media='') for _ in devs: devices.insert(_) # Add usb devices for usb_dev in params.objects("usb_devices"): usb_dev_params = params.object_params(usb_dev) devices.insert(devices.usb_by_params(usb_dev, usb_dev_params)) tftp = params.get("tftp") if tftp: tftp = utils_misc.get_path(data_dir.get_data_dir(), tftp) devices.insert(StrDev('tftp', cmdline=add_tftp(devices, tftp))) bootp = params.get("bootp") if bootp: devices.insert(StrDev('bootp', cmdline=add_bootp(devices, bootp))) kernel = params.get("kernel") if kernel: kernel = utils_misc.get_path(data_dir.get_data_dir(), kernel) devices.insert(StrDev('kernel', cmdline=add_kernel(devices, kernel))) kernel_params = params.get("kernel_params") if kernel_params: cmd = add_kernel_cmdline(devices, kernel_params) devices.insert(StrDev('kernel-params', cmdline=cmd)) initrd = params.get("initrd") if initrd: initrd = utils_misc.get_path(data_dir.get_data_dir(), initrd) devices.insert(StrDev('initrd', cmdline=add_initrd(devices, initrd))) for host_port, guest_port in redirs: cmd = add_tcp_redir(devices, host_port, guest_port) devices.insert(StrDev('tcp-redir', cmdline=cmd)) cmd = "" if params.get("display") == "vnc": vnc_extra_params = params.get("vnc_extra_params") vnc_password = params.get("vnc_password", "no") cmd += add_vnc(devices, self.vnc_port, vnc_password, vnc_extra_params) elif params.get("display") == "sdl": cmd += add_sdl(devices) elif params.get("display") == "nographic": cmd += add_nographic(devices) elif params.get("display") == "spice": if params.get("rhel5_spice"): spice_params = params.get("spice_params") cmd += add_spice_rhel5(devices, spice_params) else: spice_keys = ( "spice_port", "spice_password", "spice_addr", "spice_ssl", "spice_tls_port", "spice_tls_ciphers", "spice_gen_x509", "spice_x509_dir", "spice_x509_prefix", "spice_x509_key_file", "spice_x509_cacert_file", "spice_x509_key_password", "spice_x509_secure", "spice_x509_cacert_subj", "spice_x509_server_subj", "spice_secure_channels", "spice_image_compression", "spice_jpeg_wan_compression", "spice_zlib_glz_wan_compression", "spice_streaming_video", "spice_agent_mouse", "spice_playback_compression", "spice_ipv4", "spice_ipv6", "spice_x509_cert_file", "disable_copy_paste", "spice_seamless_migration", "listening_addr" ) for skey in spice_keys: value = params.get(skey, None) if value: self.spice_options[skey] = value cmd += add_spice() if cmd: devices.insert(StrDev('display', cmdline=cmd)) if params.get("uuid") == "random": cmd = add_uuid(devices, vm.uuid) devices.insert(StrDev('uuid', cmdline=cmd)) elif params.get("uuid"): cmd = add_uuid(devices, params.get("uuid")) devices.insert(StrDev('uuid', cmdline=cmd)) if params.get("testdev") == "yes": cmd = add_testdev(devices, vm.get_testlog_filename()) devices.insert(StrDev('testdev', cmdline=cmd)) if params.get("isa_debugexit") == "yes": iobase = params.get("isa_debugexit_iobase") iosize = params.get("isa_debugexit_iosize") cmd = add_isa_debug_exit(devices, iobase, iosize) devices.insert(StrDev('isa_debugexit', cmdline=cmd)) if params.get("disable_hpet") == "yes": devices.insert(StrDev('nohpet', cmdline=add_no_hpet(devices))) devices.insert(StrDev('rtc', cmdline=add_rtc(devices))) if devices.has_option("boot"): boot_order = params.get("boot_order", "cdn") boot_once = params.get("boot_once", "c") boot_menu = params.get("boot_menu", "off") cmd = add_boot(devices, boot_order, boot_once, boot_menu) devices.insert(StrDev('bootmenu', cmdline=cmd)) p9_export_dir = params.get("9p_export_dir") if p9_export_dir: cmd = " -fsdev" p9_fs_driver = params.get("9p_fs_driver") if p9_fs_driver == "handle": cmd += " handle,id=local1,path=" + p9_export_dir elif p9_fs_driver == "proxy": cmd += " proxy,id=local1,socket=" else: p9_fs_driver = "local" cmd += " local,id=local1,path=" + p9_export_dir # security model is needed only for local fs driver if p9_fs_driver == "local": p9_security_model = params.get("9p_security_model") if not p9_security_model: p9_security_model = "none" cmd += ",security_model=" + p9_security_model elif p9_fs_driver == "proxy": p9_socket_name = params.get("9p_socket_name") if not p9_socket_name: raise virt_vm.VMImageMissingError("Socket name not " "defined") cmd += p9_socket_name p9_immediate_writeout = params.get("9p_immediate_writeout") if p9_immediate_writeout == "yes": cmd += ",writeout=immediate" p9_readonly = params.get("9p_readonly") if p9_readonly == "yes": cmd += ",readonly" devices.insert(StrDev('fsdev', cmdline=cmd)) dev = QDevice('virtio-9p-pci', parent_bus={'type': 'pci'}) dev.set_param('fsdev', 'local1') dev.set_param('mount_tag', 'autotest_tag') devices.insert(dev) extra_params = params.get("extra_params") if extra_params: devices.insert(StrDev('extra', cmdline=extra_params)) bios_path = params.get("bios_path") if bios_path: devices.insert(StrDev('bios', cmdline="-bios %s" % bios_path)) disable_kvm_option = "" if (devices.has_option("no-kvm")): disable_kvm_option = "-no-kvm" enable_kvm_option = "" if (devices.has_option("enable-kvm")): enable_kvm_option = "-enable-kvm" if (params.get("disable_kvm", "no") == "yes"): params["enable_kvm"] = "no" if (params.get("enable_kvm", "yes") == "no"): devices.insert(StrDev('nokvm', cmdline=disable_kvm_option)) logging.debug("qemu will run in TCG mode") else: devices.insert(StrDev('kvm', cmdline=enable_kvm_option)) logging.debug("qemu will run in KVM mode") self.no_shutdown = (devices.has_option("no-shutdown") and params.get("disable_shutdown", "no") == "yes") if self.no_shutdown: devices.insert(StrDev('noshutdown', cmdline="-no-shutdown")) user_runas = params.get("user_runas") if devices.has_option("runas") and user_runas: devices.insert(StrDev('runas', cmdline="-runas %s" % user_runas)) if params.get("enable_sga") == "yes": devices.insert(StrDev('sga', cmdline=add_sga(devices))) if params.get("smartcard", "no") == "yes": sc_chardev = params.get("smartcard_chardev") sc_id = params.get("smartcard_id") devices.insert(StrDev('smartcard', cmdline=add_smartcard(devices, sc_chardev, sc_id))) if params.get("enable_watchdog", "no") == "yes": cmd = add_watchdog(devices, params.get("watchdog_device_type", None), params.get("watchdog_action", "reset")) devices.insert(StrDev('watchdog', cmdline=cmd)) option_roms = params.get("option_roms") if option_roms: cmd = "" for opt_rom in option_roms.split(): cmd += add_option_rom(help, opt_rom) if cmd: devices.insert(StrDev('ROM', cmdline=cmd)) return devices def _nic_tap_add_helper(self, nic): if nic.nettype == 'macvtap': logging.info("Adding macvtap ifname: %s", nic.ifname) utils_net.add_nic_macvtap(nic) else: nic.tapfds = utils_net.open_tap("/dev/net/tun", nic.ifname, queues=nic.queues, vnet_hdr=True) logging.debug("Adding VM %s NIC ifname %s to bridge %s", self.name, nic.ifname, nic.netdst) if nic.nettype == 'bridge': utils_net.add_to_bridge(nic.ifname, nic.netdst) utils_net.bring_up_ifname(nic.ifname) def _nic_tap_remove_helper(self, nic): try: if nic.nettype == 'macvtap': logging.info("Remove macvtap ifname %s", nic.ifname) tap = utils_net.Macvtap(nic.ifname) tap.delete() else: logging.debug("Removing VM %s NIC ifname %s from bridge %s", self.name, nic.ifname, nic.netdst) if nic.tapfds: for i in nic.tapfds.split(':'): os.close(int(i)) if nic.vhostfds: for i in nic.tapfds.split(':'): os.close(int(i)) except TypeError: pass @error.context_aware def create(self, name=None, params=None, root_dir=None, timeout=CREATE_TIMEOUT, migration_mode=None, migration_exec_cmd=None, migration_fd=None, mac_source=None): """ Start the VM by running a qemu command. All parameters are optional. If name, params or root_dir are not supplied, the respective values stored as class attributes are used. :param name: The name of the object :param params: A dict containing VM params :param root_dir: Base directory for relative filenames :param migration_mode: If supplied, start VM for incoming migration using this protocol (either 'rdma', 'x-rdma', 'rdma', 'tcp', 'unix' or 'exec') :param migration_exec_cmd: Command to embed in '-incoming "exec: ..."' (e.g. 'gzip -c -d filename') if migration_mode is 'exec' default to listening on a random TCP port :param migration_fd: Open descriptor from machine should migrate. :param mac_source: A VM object from which to copy MAC addresses. If not specified, new addresses will be generated. :raise VMCreateError: If qemu terminates unexpectedly :raise VMKVMInitError: If KVM initialization fails :raise VMHugePageError: If hugepage initialization fails :raise VMImageMissingError: If a CD image is missing :raise VMHashMismatchError: If a CD image hash has doesn't match the expected hash :raise VMBadPATypeError: If an unsupported PCI assignment type is requested :raise VMPAError: If no PCI assignable devices could be assigned :raise TAPCreationError: If fail to create tap fd :raise BRAddIfError: If fail to add a tap to a bridge :raise TAPBringUpError: If fail to bring up a tap :raise PrivateBridgeError: If fail to bring the private bridge """ error.context("creating '%s'" % self.name) self.destroy(free_mac_addresses=False) if name is not None: self.name = name self.devices = None # Representation changed if params is not None: self.params = params self.devices = None # Representation changed if root_dir is not None: self.root_dir = root_dir self.devices = None # Representation changed name = self.name params = self.params root_dir = self.root_dir # Verify the md5sum of the ISO images for cdrom in params.objects("cdroms"): cdrom_params = params.object_params(cdrom) iso = cdrom_params.get("cdrom") if iso: iso = utils_misc.get_path(data_dir.get_data_dir(), iso) if not os.path.exists(iso): raise virt_vm.VMImageMissingError(iso) compare = False if cdrom_params.get("md5sum_1m"): logging.debug("Comparing expected MD5 sum with MD5 sum of " "first MB of ISO file...") actual_hash = utils.hash_file(iso, 1048576, method="md5") expected_hash = cdrom_params.get("md5sum_1m") compare = True elif cdrom_params.get("md5sum"): logging.debug("Comparing expected MD5 sum with MD5 sum of " "ISO file...") actual_hash = utils.hash_file(iso, method="md5") expected_hash = cdrom_params.get("md5sum") compare = True elif cdrom_params.get("sha1sum"): logging.debug("Comparing expected SHA1 sum with SHA1 sum " "of ISO file...") actual_hash = utils.hash_file(iso, method="sha1") expected_hash = cdrom_params.get("sha1sum") compare = True if compare: if actual_hash == expected_hash: logging.debug("Hashes match") else: raise virt_vm.VMHashMismatchError(actual_hash, expected_hash) # Make sure the following code is not executed by more than one thread # at the same time lockfile = open("/tmp/kvm-autotest-vm-create.lock", "w+") fcntl.lockf(lockfile, fcntl.LOCK_EX) try: # Handle port redirections redir_names = params.objects("redirs") host_ports = utils_misc.find_free_ports( 5000, 6000, len(redir_names)) self.redirs = {} for i in range(len(redir_names)): redir_params = params.object_params(redir_names[i]) guest_port = int(redir_params.get("guest_port")) self.redirs[guest_port] = host_ports[i] # Generate basic parameter values for all NICs and create TAP fd for nic in self.virtnet: nic_params = params.object_params(nic.nic_name) pa_type = nic_params.get("pci_assignable") if pa_type and pa_type != "no": device_driver = nic_params.get("device_driver", "pci-assign") if "mac" not in nic: self.virtnet.generate_mac_address(nic["nic_name"]) mac = nic["mac"] if self.pci_assignable is None: self.pci_assignable = test_setup.PciAssignable( driver=params.get("driver"), driver_option=params.get("driver_option"), host_set_flag=params.get("host_setup_flag"), kvm_params=params.get("kvm_default"), vf_filter_re=params.get("vf_filter_re"), pf_filter_re=params.get("pf_filter_re"), device_driver=device_driver) # Virtual Functions (VF) assignable devices if pa_type == "vf": self.pci_assignable.add_device(device_type=pa_type, mac=mac) # Physical NIC (PF) assignable devices elif pa_type == "pf": self.pci_assignable.add_device(device_type=pa_type, name=nic_params.get("device_name")) else: raise virt_vm.VMBadPATypeError(pa_type) else: # fill in key values, validate nettype # note: make_create_command() calls vm.add_nic (i.e. on a # copy) if nic_params.get('netdst') == 'private': nic.netdst = (test_setup. PrivateBridgeConfig(nic_params).brname) nic = self.add_nic(**dict(nic)) # implied add_netdev if mac_source: # Will raise exception if source doesn't # have cooresponding nic logging.debug("Copying mac for nic %s from VM %s" % (nic.nic_name, mac_source.name)) nic.mac = mac_source.get_mac_address(nic.nic_name) if nic.ifname in utils_net.get_net_if(): self.virtnet.generate_ifname(nic.nic_name) if nic.nettype in ['bridge', 'network', 'macvtap']: self._nic_tap_add_helper(nic) if ((nic_params.get("vhost") == 'vhost=on') and (nic_params.get("enable_vhostfd", "yes") == "yes")): vhostfds = [] for i in xrange(int(nic.queues)): vhostfds.append(str(os.open("/dev/vhost-net", os.O_RDWR))) nic.vhostfds = ':'.join(vhostfds) elif nic.nettype == 'user': logging.info("Assuming dependencies met for " "user mode nic %s, and ready to go" % nic.nic_name) self.virtnet.update_db() # Find available VNC port, if needed if params.get("display") == "vnc": self.vnc_port = utils_misc.find_free_port(5900, 6100) # Find random UUID if specified 'uuid = random' in config file if params.get("uuid") == "random": f = open("/proc/sys/kernel/random/uuid") self.uuid = f.read().strip() f.close() if self.pci_assignable is not None: self.pa_pci_ids = self.pci_assignable.request_devs() if self.pa_pci_ids: logging.debug("Successfully assigned devices: %s", self.pa_pci_ids) else: raise virt_vm.VMPAError(pa_type) # Make qemu command try: self.devices = self.make_create_command() logging.debug(self.devices.str_short()) logging.debug(self.devices.str_bus_short()) qemu_command = self.devices.cmdline() except error.TestNAError: # TestNAErrors should be kept as-is so we generate SKIP # results instead of bogus FAIL results raise except Exception: for nic in self.virtnet: self._nic_tap_remove_helper(nic) # TODO: log_last_traceback is being moved into autotest. # use autotest.client.shared.base_utils when it's completed. if 'log_last_traceback' in utils.__dict__: utils.log_last_traceback('Fail to create qemu command:') else: utils_misc.log_last_traceback('Fail to create qemu' 'command:') raise virt_vm.VMStartError(self.name, 'Error occurred while ' 'executing make_create_command(). ' 'Check the log for traceback.') # Add migration parameters if required if migration_mode in ["tcp", "rdma", "x-rdma"]: self.migration_port = utils_misc.find_free_port(5200, 6000) qemu_command += (" -incoming " + migration_mode + ":0:%d" % self.migration_port) elif migration_mode == "unix": self.migration_file = "/tmp/migration-unix-%s" % self.instance qemu_command += " -incoming unix:%s" % self.migration_file elif migration_mode == "exec": if migration_exec_cmd is None: self.migration_port = utils_misc.find_free_port(5200, 6000) qemu_command += (' -incoming "exec:nc -l %s"' % self.migration_port) else: qemu_command += (' -incoming "exec:%s"' % migration_exec_cmd) elif migration_mode == "fd": qemu_command += ' -incoming "fd:%d"' % (migration_fd) p9_fs_driver = params.get("9p_fs_driver") if p9_fs_driver == "proxy": proxy_helper_name = params.get("9p_proxy_binary", "virtfs-proxy-helper") proxy_helper_cmd = utils_misc.get_path(root_dir, proxy_helper_name) if not proxy_helper_cmd: raise virt_vm.VMConfigMissingError(self.name, "9p_proxy_binary") p9_export_dir = params.get("9p_export_dir") if not p9_export_dir: raise virt_vm.VMConfigMissingError(self.name, "9p_export_dir") proxy_helper_cmd += " -p " + p9_export_dir proxy_helper_cmd += " -u 0 -g 0" p9_socket_name = params.get("9p_socket_name") proxy_helper_cmd += " -s " + p9_socket_name proxy_helper_cmd += " -n" logging.info("Running Proxy Helper:\n%s", proxy_helper_cmd) self.process = aexpect.run_bg(proxy_helper_cmd, None, logging.info, "[9p proxy helper]", auto_close=False) logging.info("Running qemu command (reformatted):\n%s", qemu_command.replace(" -", " \\\n -")) self.qemu_command = qemu_command self.process = aexpect.run_bg(qemu_command, None, logging.info, "[qemu output] ", auto_close=False) self.start_time = time.time() # test doesn't need to hold tapfd's open for nic in self.virtnet: if nic.has_key('tapfds'): # implies bridge/tap try: for i in nic.tapfds.split(':'): os.close(int(i)) # qemu process retains access via open file # remove this attribute from virtnet because # fd numbers are not always predictable and # vm instance must support cloning. del nic['tapfds'] # File descriptor is already closed except OSError: pass if nic.has_key('vhostfds'): try: for i in nic.vhostfds.split(':'): os.close(int(i)) del nic['vhostfds'] except OSError: pass # Make sure the process was started successfully if not self.process.is_alive(): status = self.process.get_status() output = self.process.get_output().strip() migration_in_course = migration_mode is not None unknown_protocol = "unknown migration protocol" in output if migration_in_course and unknown_protocol: e = VMMigrateProtoUnsupportedError(migration_mode, output) else: e = virt_vm.VMCreateError(qemu_command, status, output) self.destroy() raise e # Establish monitor connections self.monitors = [] for monitor_name in params.objects("monitors"): monitor_params = params.object_params(monitor_name) try: monitor = qemu_monitor.wait_for_create_monitor(self, monitor_name, monitor_params, timeout) except qemu_monitor.MonitorConnectError, detail: logging.error(detail) self.destroy() raise # Add this monitor to the list self.monitors += [monitor] # Create isa serial ports. self.serial_ports = [] for serial in params.objects("isa_serials"): self.serial_ports.append(serial) # Create virtio_ports (virtio_serialports and virtio_consoles) i = 0 self.virtio_ports = [] for port in params.objects("virtio_ports"): port_params = params.object_params(port) if port_params.get('virtio_port_chardev') == "spicevmc": filename = 'dev%s' % port else: filename = self.get_virtio_port_filename(port) port_name = port_params.get('virtio_port_name_prefix', None) if port_name: # If port_name_prefix was used port_name = port_name + str(i) else: # Implicit name - port port_name = port if port_params.get('virtio_port_type') in ("console", "virtio_console"): self.virtio_ports.append( qemu_virtio_port.VirtioConsole(port, port_name, filename)) else: self.virtio_ports.append( qemu_virtio_port.VirtioSerial(port, port_name, filename)) i += 1 # Get the output so far, to see if we have any problems with # KVM modules or with hugepage setup. output = self.process.get_output() if re.search("Could not initialize KVM", output, re.IGNORECASE): e = virt_vm.VMKVMInitError( qemu_command, self.process.get_output()) self.destroy() raise e if "alloc_mem_area" in output: e = virt_vm.VMHugePageError( qemu_command, self.process.get_output()) self.destroy() raise e logging.debug("VM appears to be alive with PID %s", self.get_pid()) vcpu_thread_pattern = self.params.get("vcpu_thread_pattern", r"thread_id.?[:|=]\s*(\d+)") self.vcpu_threads = self.get_vcpu_pids(vcpu_thread_pattern) vhost_thread_pattern = params.get("vhost_thread_pattern", r"\w+\s+(\d+)\s.*\[vhost-%s\]") self.vhost_threads = self.get_vhost_threads(vhost_thread_pattern) # Establish a session with the serial console # Let's consider the first serial port as serial console. # Note: requires a version of netcat that supports -U try: tmp_serial = self.serial_ports[0] except IndexError: raise virt_vm.VMConfigMissingError(name, "isa_serial") self.serial_console = aexpect.ShellSession( "nc -U %s" % self.get_serial_console_filename(tmp_serial), auto_close=False, output_func=utils_misc.log_line, output_params=("serial-%s-%s.log" % (tmp_serial, name),), prompt=self.params.get("shell_prompt", "[\#\$]")) del tmp_serial for key, value in self.logs.items(): outfile = "%s-%s.log" % (key, name) self.logsessions[key] = aexpect.Tail( "nc -U %s" % value, auto_close=False, output_func=utils_misc.log_line, output_params=(outfile,)) self.logsessions[key].set_log_file(outfile) if params.get("paused_after_start_vm") != "yes": # start guest if self.monitor.verify_status("paused"): try: self.monitor.cmd("cont") except qemu_monitor.QMPCmdError, e: if ((e.data['class'] == "MigrationExpected") and (migration_mode is not None)): logging.debug("Migration did not start yet...") else: raise e finally: fcntl.lockf(lockfile, fcntl.LOCK_UN) lockfile.close() def wait_for_status(self, status, timeout, first=0.0, step=1.0, text=None): """ Wait until the VM status changes to specified status :return: True in case the status has changed before timeout, otherwise return None. :param timeout: Timeout in seconds :param first: Time to sleep before first attempt :param steps: Time to sleep between attempts in seconds :param text: Text to print while waiting, for debug purposes """ return utils_misc.wait_for(lambda: self.monitor.verify_status(status), timeout, first, step, text) def wait_until_paused(self, timeout): """ Wait until the VM is paused. :return: True in case the VM is paused before timeout, otherwise return None. :param timeout: Timeout in seconds """ return self.wait_for_status("paused", timeout) def wait_until_dead(self, timeout, first=0.0, step=1.0): """ Wait until VM is dead. :return: True if VM is dead before timeout, otherwise returns None. :param timeout: Timeout in seconds :param first: Time to sleep before first attempt :param steps: Time to sleep between attempts in seconds """ return utils_misc.wait_for(self.is_dead, timeout, first, step) def wait_for_shutdown(self, timeout=60): """ Wait until guest shuts down. Helps until the VM is shut down by the guest. :return: True in case the VM was shut down, None otherwise. Note that the VM is not necessarily dead when this function returns True. If QEMU is running in -no-shutdown mode, the QEMU process may be still alive. """ if self.no_shutdown: return self.wait_until_paused(timeout) else: return self.wait_until_dead(timeout, 1, 1) def graceful_shutdown(self, timeout=60): """ Try to gracefully shut down the VM. :return: True if VM was successfully shut down, None otherwise. Note that the VM is not necessarily dead when this function returns True. If QEMU is running in -no-shutdown mode, the QEMU process may be still alive. """ if self.params.get("shutdown_command"): # Try to destroy with shell command logging.debug("Shutting down VM %s (shell)", self.name) try: if len(self.virtnet) > 0: session = self.login() else: session = self.serial_login() except (virt_vm.VMInterfaceIndexError), e: try: session = self.serial_login() except (remote.LoginError, virt_vm.VMError), e: logging.debug(e) except (remote.LoginError, virt_vm.VMError), e: logging.debug(e) else: try: # Send the shutdown command session.sendline(self.params.get("shutdown_command")) if self.wait_for_shutdown(timeout): return True finally: session.close() def _cleanup(self, free_mac_addresses): """ Do cleanup works .removes VM monitor files. .process close .serial_console close .logsessions close .delete tmp files .free_mac_addresses, if needed .delete macvtap, if needed :param free_mac_addresses: Whether to release the VM's NICs back to the address pool. """ self.monitors = [] if self.pci_assignable: self.pci_assignable.release_devs() self.pci_assignable = None if self.process: self.process.close() if self.serial_console: self.serial_console.close() if self.logsessions: for key in self.logsessions: self.logsessions[key].close() # Generate the tmp file which should be deleted. file_list = [self.get_testlog_filename()] file_list += qemu_monitor.get_monitor_filenames(self) file_list += self.get_virtio_port_filenames() file_list += self.get_serial_console_filenames() file_list += self.logs.values() for f in file_list: try: os.unlink(f) except OSError: pass if hasattr(self, "migration_file"): try: os.unlink(self.migration_file) except OSError: pass if free_mac_addresses: for nic_index in xrange(0, len(self.virtnet)): self.free_mac_address(nic_index) for nic in self.virtnet: if nic.nettype == 'macvtap': tap = utils_net.Macvtap(nic.ifname) tap.delete() def destroy(self, gracefully=True, free_mac_addresses=True): """ Destroy the VM. If gracefully is True, first attempt to shutdown the VM with a shell command. Then, attempt to destroy the VM via the monitor with a 'quit' command. If that fails, send SIGKILL to the qemu process. :param gracefully: If True, an attempt will be made to end the VM using a shell command before trying to end the qemu process with a 'quit' or a kill signal. :param free_mac_addresses: If True, the MAC addresses used by the VM will be freed. """ try: # Is it already dead? if self.is_dead(): return logging.debug("Destroying VM %s (PID %s)", self.name, self.get_pid()) kill_timeout = int(self.params.get("kill_timeout", "60")) if gracefully: self.graceful_shutdown(kill_timeout) if self.is_dead(): logging.debug("VM %s down (shell)", self.name) return else: logging.debug("VM %s failed to go down (shell)", self.name) if self.monitor: # Try to finish process with a monitor command logging.debug("Ending VM %s process (monitor)", self.name) try: self.monitor.quit() except qemu_monitor.MonitorError, e: logging.warn(e) else: # Wait for the VM to be really dead if self.wait_until_dead(5, 0.5, 0.5): logging.debug("VM %s down (monitor)", self.name) return else: logging.debug("VM %s failed to go down (monitor)", self.name) # If the VM isn't dead yet... pid = self.process.get_pid() logging.debug("Ending VM %s process (killing PID %s)", self.name, pid) utils_misc.kill_process_tree(pid, 9) # Wait for the VM to be really dead if utils_misc.wait_for(self.is_dead, 5, 0.5, 0.5): logging.debug("VM %s down (process killed)", self.name) return # If all else fails, we've got a zombie... logging.error("VM %s (PID %s) is a zombie!", self.name, self.process.get_pid()) finally: self._cleanup(free_mac_addresses) @property def monitor(self): """ Return the main monitor object, selected by the parameter main_monitor. If main_monitor isn't defined, return the first monitor. If no monitors exist, or if main_monitor refers to a nonexistent monitor, return None. """ for m in self.monitors: if m.name == self.params.get("main_monitor"): return m if self.monitors and not self.params.get("main_monitor"): return self.monitors[0] return None def get_monitors_by_type(self, mon_type): """ Return list of monitors of mon_type type. :param mon_type: desired monitor type (qmp, human) """ return [_ for _ in self.monitors if _.protocol == mon_type] def get_peer(self, netid): """ Return the peer of netdev or network deivce. :param netid: id of netdev or device :return: id of the peer device otherwise None """ o = self.monitor.info("network") network_info = o if isinstance(o, dict): network_info = o.get["return"] netdev_peer_re = self.params.get("netdev_peer_re") if not netdev_peer_re: default_netdev_peer_re = "\s{2,}(.*?): .*?\\\s(.*?):" logging.warning("Missing config netdev_peer_re for VM %s, " "using default %s", self.name, default_netdev_peer_re) netdev_peer_re = default_netdev_peer_re pairs = re.findall(netdev_peer_re, network_info, re.S) for nic, tap in pairs: if nic == netid: return tap if tap == netid: return nic return None def get_ifname(self, nic_index=0): """ Return the ifname of a bridge/tap device associated with a NIC. :param nic_index: Index of the NIC """ return self.virtnet[nic_index].ifname def get_pid(self): """ Return the VM's PID. If the VM is dead return None. :note: This works under the assumption that self.process.get_pid() returns the PID of the parent shell process. """ try: children = commands.getoutput("ps --ppid=%d -o pid=" % self.process.get_pid()).split() return int(children[0]) except (TypeError, IndexError, ValueError): return None def get_shell_pid(self): """ Return the PID of the parent shell process. :note: This works under the assumption that self.process.get_pid() returns the PID of the parent shell process. """ return self.process.get_pid() def get_vnc_port(self): """ Return self.vnc_port. """ return self.vnc_port def get_vcpu_pids(self, vcpu_thread_pattern): """ Return the list of vcpu PIDs :return: the list of vcpu PIDs """ return [int(_) for _ in re.findall(vcpu_thread_pattern, str(self.monitor.info("cpus")))] def get_vhost_threads(self, vhost_thread_pattern): """ Return the list of vhost threads PIDs :param vhost_thread_pattern: a regex to match the vhost threads :type vhost_thread_pattern: string :return: a list of vhost threads PIDs :rtype: list of integer """ return [int(_) for _ in re.findall(vhost_thread_pattern % self.get_pid(), utils.system_output("ps aux"))] def get_shared_meminfo(self): """ Returns the VM's shared memory information. :return: Shared memory used by VM (MB) """ if self.is_dead(): logging.error("Could not get shared memory info from dead VM.") return None filename = "/proc/%d/statm" % self.get_pid() shm = int(open(filename).read().split()[2]) # statm stores informations in pages, translate it to MB return shm * 4.0 / 1024 def get_spice_var(self, spice_var): """ Returns string value of spice variable of choice or None :param spice_var - spice related variable 'spice_port', ... """ return self.spice_options.get(spice_var, None) @error.context_aware def hotplug_vcpu(self, cpu_id=None, plug_command=""): """ Hotplug a vcpu, if not assign the cpu_id, will use the minimum unused. the function will use the plug_command if you assigned it, else the function will use the command automatically generated based on the type of monitor :param cpu_id the cpu_id you want hotplug. """ vcpu_threads_count = len(self.vcpu_threads) plug_cpu_id = cpu_id if plug_cpu_id is None: plug_cpu_id = vcpu_threads_count if plug_command: vcpu_add_cmd = plug_command % plug_cpu_id else: if self.monitor.protocol == 'human': vcpu_add_cmd = "cpu_set %s online" % plug_cpu_id elif self.monitor.protocol == 'qmp': vcpu_add_cmd = "cpu-add id=%s" % plug_cpu_id try: self.monitor.verify_supported_cmd(vcpu_add_cmd.split()[0]) except qemu_monitor.MonitorNotSupportedCmdError: raise error.TestNAError("%s monitor not support cmd '%s'" % (self.monitor.protocol, vcpu_add_cmd)) try: cmd_output = self.monitor.send_args_cmd(vcpu_add_cmd) except qemu_monitor.QMPCmdError, e: return (False, str(e)) vcpu_thread_pattern = self.params.get("vcpu_thread_pattern", r"thread_id.?[:|=]\s*(\d+)") self.vcpu_threads = self.get_vcpu_pids(vcpu_thread_pattern) if len(self.vcpu_threads) == vcpu_threads_count + 1: return(True, plug_cpu_id) else: return(False, cmd_output) @error.context_aware def hotplug_nic(self, **params): """ Convenience method wrapper for add_nic() and add_netdev(). :return: dict-like object containing nic's details """ nic_name = self.add_nic(**params)["nic_name"] self.activate_netdev(nic_name) self.activate_nic(nic_name) return self.virtnet[nic_name] @error.context_aware def hotunplug_nic(self, nic_index_or_name): """ Convenience method wrapper for del/deactivate nic and netdev. """ # make sure we got a name nic_name = self.virtnet[nic_index_or_name].nic_name self.deactivate_nic(nic_name) self.deactivate_netdev(nic_name) self.del_nic(nic_name) @error.context_aware def add_netdev(self, **params): """ Hotplug a netdev device. :param **params: NIC info. dict. :return: netdev_id """ nic_name = params['nic_name'] nic = self.virtnet[nic_name] nic_index = self.virtnet.nic_name_index(nic_name) nic.set_if_none('netdev_id', utils_misc.generate_random_id()) nic.set_if_none('ifname', self.virtnet.generate_ifname(nic_index)) nic.set_if_none('nettype', 'bridge') if nic.nettype in ['bridge', 'macvtap']: # implies tap # destination is required, hard-code reasonable default if unset # nic.set_if_none('netdst', 'virbr0') # tapfd allocated/set in activate because requires system resources nic.set_if_none('queues', '1') ids = [] for i in range(int(nic.queues)): ids.append(utils_misc.generate_random_id()) nic.set_if_none('tapfd_ids', ids) elif nic.nettype == 'user': pass # nothing to do else: # unsupported nettype raise virt_vm.VMUnknownNetTypeError(self.name, nic_name, nic.nettype) return nic.netdev_id @error.context_aware def del_netdev(self, nic_index_or_name): """ Remove netdev info. from nic on VM, does not deactivate. :param: nic_index_or_name: name or index number for existing NIC """ nic = self.virtnet[nic_index_or_name] error.context("removing netdev info from nic %s from vm %s" % ( nic, self.name)) for propertea in ['netdev_id', 'ifname', 'queues', 'tapfds', 'tapfd_ids', 'vectors']: if nic.has_key(propertea): del nic[propertea] def add_nic(self, **params): """ Add new or setup existing NIC, optionally creating netdev if None :param **params: Parameters to set :param nic_name: Name for existing or new device :param nic_model: Model name to emulate :param netdev_id: Existing qemu net device ID name, None to create new :param mac: Optional MAC address, None to randomly generate. """ # returns existing or new nic object nic = super(VM, self).add_nic(**params) nic_index = self.virtnet.nic_name_index(nic.nic_name) nic.set_if_none('vlan', str(nic_index)) nic.set_if_none('device_id', utils_misc.generate_random_id()) nic.set_if_none('queues', '1') if not nic.has_key('netdev_id'): # virtnet items are lists that act like dicts nic.netdev_id = self.add_netdev(**dict(nic)) nic.set_if_none('nic_model', params['nic_model']) nic.set_if_none('queues', params.get('queues', '1')) if params.get("enable_msix_vectors") == "yes": nic.set_if_none('vectors', 2 * int(nic.queues) + 1) return nic @error.context_aware def activate_netdev(self, nic_index_or_name): """ Activate an inactive host-side networking device :raise:: IndexError if nic doesn't exist :raise:: VMUnknownNetTypeError: if nettype is unset/unsupported :raise:: IOError if TAP device node cannot be opened :raise:: VMAddNetDevError: if operation failed """ tapfds = [] nic = self.virtnet[nic_index_or_name] error.context("Activating netdev for %s based on %s" % (self.name, nic)) msg_sfx = ("nic %s on vm %s with attach_cmd " % (self.virtnet[nic_index_or_name], self.name)) attach_cmd = "netdev_add" if nic.nettype == 'bridge': # implies tap error.context("Opening tap device node for %s " % nic.ifname, logging.debug) python_tapfds = utils_net.open_tap("/dev/net/tun", nic.ifname, queues=nic.queues, vnet_hdr=False) for i in range(int(nic.queues)): error.context("Assigning tap %s to qemu by fd" % nic.tapfd_ids[i], logging.info) lsof_cmd = "lsof -a -p %s -Ff -- /dev/net/tun" % self.get_pid() openfd_list = utils.system_output(lsof_cmd).splitlines() self.monitor.getfd(int(python_tapfds.split(':')[i]), nic.tapfd_ids[i]) n_openfd_list = utils.system_output(lsof_cmd).splitlines() new_qemu_fd = list(set(n_openfd_list) - set(openfd_list)) if not new_qemu_fd: err_msg = "Can't get the tap fd in qemu process!" raise virt_vm.VMAddNetDevError(err_msg) tapfds.append(new_qemu_fd[0].lstrip("f")) nic.set_if_none("tapfds", ":".join(tapfds)) if not self.devices: err_msg = "Can't add nic for VM which is not running." raise virt_vm.VMAddNetDevError(err_msg) if ((int(nic.queues)) > 1 and ',fds=' in self.devices.get_help_text()): attach_cmd += " type=tap,id=%s,fds=%s" % (nic.device_id, nic.tapfds) else: attach_cmd += " type=tap,id=%s,fd=%s" % (nic.device_id, nic.tapfds) error.context("Raising interface for " + msg_sfx + attach_cmd, logging.debug) utils_net.bring_up_ifname(nic.ifname) error.context("Raising bridge for " + msg_sfx + attach_cmd, logging.debug) # assume this will puke if netdst unset if not nic.netdst is None: utils_net.add_to_bridge(nic.ifname, nic.netdst) elif nic.nettype == 'macvtap': pass elif nic.nettype == 'user': attach_cmd += " user,id=%s" % nic.device_id elif nic.nettype == 'none': attach_cmd += " none" else: # unsupported nettype raise virt_vm.VMUnknownNetTypeError(self.name, nic_index_or_name, nic.nettype) if nic.has_key('netdev_extra_params'): attach_cmd += nic.netdev_extra_params error.context("Hotplugging " + msg_sfx + attach_cmd, logging.debug) if self.monitor.protocol == 'qmp': self.monitor.send_args_cmd(attach_cmd) else: self.monitor.send_args_cmd(attach_cmd, convert=False) network_info = self.monitor.info("network") if nic.device_id not in network_info: # Don't leave resources dangling self.deactivate_netdev(nic_index_or_name) raise virt_vm.VMAddNetDevError(("Failed to add netdev: %s for " % nic.device_id) + msg_sfx + attach_cmd) @error.context_aware def activate_nic(self, nic_index_or_name): """ Activate an VM's inactive NIC device and verify state :param nic_index_or_name: name or index number for existing NIC """ error.context("Retrieving info for NIC %s on VM %s" % ( nic_index_or_name, self.name)) nic = self.virtnet[nic_index_or_name] device_add_cmd = "device_add" if nic.has_key('nic_model'): device_add_cmd += ' driver=%s' % nic.nic_model device_add_cmd += ",netdev=%s" % nic.device_id if nic.has_key('mac'): device_add_cmd += ",mac=%s" % nic.mac device_add_cmd += ",id=%s" % nic.nic_name if nic['nic_model'] == 'virtio-net-pci': if int(nic['queues']) > 1: device_add_cmd += ",mq=on" if nic.has_key('vectors'): device_add_cmd += ",vectors=%s" % nic.vectors device_add_cmd += nic.get('nic_extra_params', '') if nic.has_key('romfile'): device_add_cmd += ",romfile=%s" % nic.romfile error.context("Activating nic on VM %s with monitor command %s" % ( self.name, device_add_cmd)) if self.monitor.protocol == 'qmp': self.monitor.send_args_cmd(device_add_cmd) else: self.monitor.send_args_cmd(device_add_cmd, convert=False) error.context("Verifying nic %s shows in qtree" % nic.nic_name) qtree = self.monitor.info("qtree") if not nic.nic_name in qtree: logging.error(qtree) raise virt_vm.VMAddNicError("Device %s was not plugged into qdev" "tree" % nic.nic_name) @error.context_aware def deactivate_nic(self, nic_index_or_name, wait=20): """ Reverses what activate_nic did :param nic_index_or_name: name or index number for existing NIC :param wait: Time test will wait for the guest to unplug the device """ nic = self.virtnet[nic_index_or_name] error.context("Removing nic %s from VM %s" % (nic_index_or_name, self.name)) nic_del_cmd = "device_del id=%s" % (nic.nic_name) if self.monitor.protocol == 'qmp': self.monitor.send_args_cmd(nic_del_cmd) else: self.monitor.send_args_cmd(nic_del_cmd, convert=True) if wait: logging.info("waiting for the guest to finish the unplug") if not utils_misc.wait_for(lambda: nic.nic_name not in self.monitor.info("qtree"), wait, 5, 1): raise virt_vm.VMDelNicError("Device is not unplugged by " "guest, please check whether the " "hotplug module was loaded in " "guest") @error.context_aware def deactivate_netdev(self, nic_index_or_name): """ Reverses what activate_netdev() did :param: nic_index_or_name: name or index number for existing NIC """ # FIXME: Need to down interface & remove from bridge???? netdev_id = self.virtnet[nic_index_or_name].device_id error.context("removing netdev id %s from vm %s" % (netdev_id, self.name)) nic_del_cmd = "netdev_del id=%s" % netdev_id if self.monitor.protocol == 'qmp': self.monitor.send_args_cmd(nic_del_cmd) else: self.monitor.send_args_cmd(nic_del_cmd, convert=True) network_info = self.monitor.info("network") if netdev_id in network_info: raise virt_vm.VMDelNetDevError("Fail to remove netdev %s" % netdev_id) @error.context_aware def del_nic(self, nic_index_or_name): """ Undefine nic prameters, reverses what add_nic did. :param nic_index_or_name: name or index number for existing NIC :param wait: Time test will wait for the guest to unplug the device """ super(VM, self).del_nic(nic_index_or_name) @error.context_aware def send_fd(self, fd, fd_name="migfd"): """ Send file descriptor over unix socket to VM. :param fd: File descriptor. :param fd_name: File descriptor identificator in VM. """ error.context("Send fd %d like %s to VM %s" % (fd, fd_name, self.name)) logging.debug("Send file descriptor %s to source VM.", fd_name) if self.monitor.protocol == 'human': self.monitor.cmd("getfd %s" % (fd_name), fd=fd) elif self.monitor.protocol == 'qmp': self.monitor.cmd("getfd", args={'fdname': fd_name}, fd=fd) error.context() def mig_finished(self): ret = True if (self.params["display"] == "spice" and self.get_spice_var("spice_seamless_migration") == "on"): s = self.monitor.info("spice") if isinstance(s, str): ret = "migrated: true" in s else: ret = s.get("migrated") == "true" o = self.monitor.info("migrate") if isinstance(o, str): return ret and (not "status: active" in o) else: return ret and (o.get("status") != "active") def mig_succeeded(self): o = self.monitor.info("migrate") if isinstance(o, str): return "status: completed" in o else: return o.get("status") == "completed" def mig_failed(self): o = self.monitor.info("migrate") if isinstance(o, str): return "status: failed" in o else: return o.get("status") == "failed" def mig_cancelled(self): if self.mig_succeeded(): raise virt_vm.VMMigrateCancelError( "Migration completed successfully") elif self.mig_failed(): raise virt_vm.VMMigrateFailedError("Migration failed") o = self.monitor.info("migrate") if isinstance(o, str): return ("Migration status: cancelled" in o or "Migration status: canceled" in o) else: return (o.get("status") == "cancelled" or o.get("status") == "canceled") def wait_for_migration(self, timeout): if not utils_misc.wait_for(self.mig_finished, timeout, 2, 2, "Waiting for migration to complete"): raise virt_vm.VMMigrateTimeoutError("Timeout expired while waiting" " for migration to finish") @error.context_aware def migrate(self, timeout=virt_vm.BaseVM.MIGRATE_TIMEOUT, protocol="tcp", cancel_delay=None, offline=False, stable_check=False, clean=True, save_path="/tmp", dest_host="localhost", remote_port=None, not_wait_for_migration=False, fd_src=None, fd_dst=None, migration_exec_cmd_src=None, migration_exec_cmd_dst=None): """ Migrate the VM. If the migration is local, the VM object's state is switched with that of the destination VM. Otherwise, the state is switched with that of a dead VM (returned by self.clone()). :param timeout: Time to wait for migration to complete. :param protocol: Migration protocol (as defined in MIGRATION_PROTOS) :param cancel_delay: If provided, specifies a time duration after which migration will be canceled. Used for testing migrate_cancel. :param offline: If True, pause the source VM before migration. :param stable_check: If True, compare the VM's state after migration to its state before migration and raise an exception if they differ. :param clean: If True, delete the saved state files (relevant only if stable_check is also True). @save_path: The path for state files. :param dest_host: Destination host (defaults to 'localhost'). :param remote_port: Port to use for remote migration. :param not_wait_for_migration: If True migration start but not wait till the end of migration. :param fd_s: File descriptor for migration to which source VM write data. Descriptor is closed during the migration. :param fd_d: File descriptor for migration from which destination VM read data. :param migration_exec_cmd_src: Command to embed in '-incoming "exec: "' (e.g. 'exec:gzip -c > filename') if migration_mode is 'exec' default to listening on a random TCP port :param migration_exec_cmd_dst: Command to embed in '-incoming "exec: "' (e.g. 'gzip -c -d filename') if migration_mode is 'exec' default to listening on a random TCP port """ if protocol not in self.MIGRATION_PROTOS: raise virt_vm.VMMigrateProtoUnknownError(protocol) error.base_context("migrating '%s'" % self.name) local = dest_host == "localhost" mig_fd_name = None if protocol == "fd": # Check if descriptors aren't None for local migration. if local and (fd_dst is None or fd_src is None): (fd_dst, fd_src) = os.pipe() mig_fd_name = "migfd_%d_%d" % (fd_src, time.time()) self.send_fd(fd_src, mig_fd_name) os.close(fd_src) clone = self.clone() if (local and not (migration_exec_cmd_src and "gzip" in migration_exec_cmd_src)): error.context("creating destination VM") if stable_check: # Pause the dest vm after creation extra_params = clone.params.get("extra_params", "") + " -S" clone.params["extra_params"] = extra_params clone.create(migration_mode=protocol, mac_source=self, migration_fd=fd_dst, migration_exec_cmd=migration_exec_cmd_dst) if fd_dst: os.close(fd_dst) error.context() try: if (self.params["display"] == "spice" and local and not (protocol == "exec" and (migration_exec_cmd_src and "gzip" in migration_exec_cmd_src))): host_ip = utils_net.get_host_ip_address(self.params) dest_port = clone.spice_options.get('spice_port', '') if self.params.get("spice_ssl") == "yes": dest_tls_port = clone.spice_options.get("spice_tls_port", "") cert_s = clone.spice_options.get("spice_x509_server_subj", "") cert_subj = "%s" % cert_s[1:] cert_subj += host_ip cert_subj = "\"%s\"" % cert_subj else: dest_tls_port = "" cert_subj = "" logging.debug("Informing migration to spice client") commands = ["__com.redhat_spice_migrate_info", "spice_migrate_info", "client_migrate_info"] for command in commands: try: self.monitor.verify_supported_cmd(command) except qemu_monitor.MonitorNotSupportedCmdError: continue # spice_migrate_info requires host_ip, dest_port # client_migrate_info also requires protocol cmdline = "%s hostname=%s" % (command, host_ip) if command == "client_migrate_info": cmdline += " ,protocol=%s" % self.params['display'] if dest_port: cmdline += ",port=%s" % dest_port if dest_tls_port: cmdline += ",tls-port=%s" % dest_tls_port if cert_subj: cmdline += ",cert-subject=%s" % cert_subj break self.monitor.send_args_cmd(cmdline) if protocol in ["tcp", "rdma", "x-rdma"]: if local: uri = protocol + ":localhost:%d" % clone.migration_port else: uri = protocol + ":%s:%d" % (dest_host, remote_port) elif protocol == "unix": uri = "unix:%s" % clone.migration_file elif protocol == "exec": if local: if not migration_exec_cmd_src: uri = '"exec:nc localhost %s"' % clone.migration_port else: uri = '"exec:%s"' % (migration_exec_cmd_src) else: uri = '"exec:%s"' % (migration_exec_cmd_src) elif protocol == "fd": uri = "fd:%s" % mig_fd_name if offline is True: self.monitor.cmd("stop") logging.info("Migrating to %s", uri) self.monitor.migrate(uri) if not_wait_for_migration: return clone if cancel_delay: time.sleep(cancel_delay) self.monitor.cmd("migrate_cancel") if not utils_misc.wait_for(self.mig_cancelled, 60, 2, 2, "Waiting for migration " "cancellation"): raise virt_vm.VMMigrateCancelError( "Cannot cancel migration") return self.wait_for_migration(timeout) if (local and (migration_exec_cmd_src and "gzip" in migration_exec_cmd_src)): error.context("creating destination VM") if stable_check: # Pause the dest vm after creation extra_params = clone.params.get("extra_params", "") + " -S" clone.params["extra_params"] = extra_params clone.create(migration_mode=protocol, mac_source=self, migration_fd=fd_dst, migration_exec_cmd=migration_exec_cmd_dst) self.verify_alive() # Report migration status if self.mig_succeeded(): logging.info("Migration completed successfully") elif self.mig_failed(): raise virt_vm.VMMigrateFailedError("Migration failed") else: raise virt_vm.VMMigrateFailedError("Migration ended with " "unknown status") # Switch self <-> clone temp = self.clone(copy_state=True) self.__dict__ = clone.__dict__ clone = temp # From now on, clone is the source VM that will soon be destroyed # and self is the destination VM that will remain alive. If this # is remote migration, self is a dead VM object. error.context("after migration") if local: time.sleep(1) self.verify_kernel_crash() self.verify_alive() if local and stable_check: try: save1 = os.path.join(save_path, "src-" + clone.instance) save2 = os.path.join(save_path, "dst-" + self.instance) clone.save_to_file(save1) self.save_to_file(save2) # Fail if we see deltas md5_save1 = utils.hash_file(save1) md5_save2 = utils.hash_file(save2) if md5_save1 != md5_save2: raise virt_vm.VMMigrateStateMismatchError() finally: if clean: if os.path.isfile(save1): os.remove(save1) if os.path.isfile(save2): os.remove(save2) finally: # If we're doing remote migration and it's completed successfully, # self points to a dead VM object if not not_wait_for_migration: if self.is_alive(): self.monitor.cmd("cont") clone.destroy(gracefully=False) @error.context_aware def reboot(self, session=None, method="shell", nic_index=0, timeout=virt_vm.BaseVM.REBOOT_TIMEOUT): """ Reboot the VM and wait for it to come back up by trying to log in until timeout expires. :param session: A shell session object or None. :param method: Reboot method. Can be "shell" (send a shell reboot command) or "system_reset" (send a system_reset monitor command). :param nic_index: Index of NIC to access in the VM, when logging in after rebooting. :param timeout: Time to wait for login to succeed (after rebooting). :return: A new shell session object. """ error.base_context("rebooting '%s'" % self.name, logging.info) error.context("before reboot") error.context() if method == "shell": session = session or self.login() session.sendline(self.params.get("reboot_command")) error.context("waiting for guest to go down", logging.info) if not utils_misc.wait_for( lambda: not session.is_responsive( timeout=self.CLOSE_SESSION_TIMEOUT), timeout / 2, 0, 1): raise virt_vm.VMRebootError("Guest refuses to go down") session.close() elif method == "system_reset": # Clear the event list of all QMP monitors qmp_monitors = [m for m in self.monitors if m.protocol == "qmp"] for m in qmp_monitors: m.clear_events() # Send a system_reset monitor command self.monitor.cmd("system_reset") # Look for RESET QMP events time.sleep(1) for m in qmp_monitors: if m.get_event("RESET"): logging.info("RESET QMP event received") else: raise virt_vm.VMRebootError("RESET QMP event not received " "after system_reset " "(monitor '%s')" % m.name) else: raise virt_vm.VMRebootError("Unknown reboot method: %s" % method) if self.params.get("mac_changeable") == "yes": utils_net.update_mac_ip_address(self, self.params) error.context("logging in after reboot", logging.info) return self.wait_for_login(nic_index, timeout=timeout) def send_key(self, keystr): """ Send a key event to the VM. :param keystr: A key event string (e.g. "ctrl-alt-delete") """ # For compatibility with versions of QEMU that do not recognize all # key names: replace keyname with the hex value from the dict, which # QEMU will definitely accept key_mapping = {"semicolon": "0x27", "comma": "0x33", "dot": "0x34", "slash": "0x35"} for key, value in key_mapping.items(): keystr = keystr.replace(key, value) self.monitor.sendkey(keystr) time.sleep(0.2) # should this really be expected from VMs of all hypervisor types? def screendump(self, filename, debug=True): try: if self.monitor: self.monitor.screendump(filename=filename, debug=debug) except qemu_monitor.MonitorError, e: logging.warn(e) def save_to_file(self, path): """ Override BaseVM save_to_file method """ self.verify_status('paused') # Throws exception if not # Set high speed 1TB/S self.monitor.migrate_set_speed(str(2 << 39)) self.monitor.migrate_set_downtime(self.MIGRATE_TIMEOUT) logging.debug("Saving VM %s to %s" % (self.name, path)) # Can only check status if background migration self.monitor.migrate("exec:cat>%s" % path, wait=False) utils_misc.wait_for( # no monitor.migrate-status method lambda: re.search("(status.*completed)", str(self.monitor.info("migrate")), re.M), self.MIGRATE_TIMEOUT, 2, 2, "Waiting for save to %s to complete" % path) # Restore the speed and downtime to default values self.monitor.migrate_set_speed(str(32 << 20)) self.monitor.migrate_set_downtime(0.03) # Base class defines VM must be off after a save self.monitor.cmd("system_reset") self.verify_status('paused') # Throws exception if not def restore_from_file(self, path): """ Override BaseVM restore_from_file method """ self.verify_status('paused') # Throws exception if not logging.debug("Restoring VM %s from %s" % (self.name, path)) # Rely on create() in incoming migration mode to do the 'right thing' self.create(name=self.name, params=self.params, root_dir=self.root_dir, timeout=self.MIGRATE_TIMEOUT, migration_mode="exec", migration_exec_cmd="cat " + path, mac_source=self) self.verify_status('running') # Throws exception if not def savevm(self, tag_name): """ Override BaseVM savevm method """ self.verify_status('paused') # Throws exception if not logging.debug("Saving VM %s to %s" % (self.name, tag_name)) self.monitor.send_args_cmd("savevm id=%s" % tag_name) self.monitor.cmd("system_reset") self.verify_status('paused') # Throws exception if not def loadvm(self, tag_name): """ Override BaseVM loadvm method """ self.verify_status('paused') # Throws exception if not logging.debug("Loading VM %s from %s" % (self.name, tag_name)) self.monitor.send_args_cmd("loadvm id=%s" % tag_name) self.verify_status('paused') # Throws exception if not def pause(self): """ Pause the VM operation. """ self.monitor.cmd("stop") def resume(self): """ Resume the VM operation in case it's stopped. """ self.monitor.cmd("cont") def set_link(self, netdev_name, up): """ Set link up/down. :param name: Link name :param up: Bool value, True=set up this link, False=Set down this link """ self.monitor.set_link(netdev_name, up) def get_block_old(self, blocks_info, p_dict={}): """ Get specified block device from monitor's info block command. The block device is defined by parameter in p_dict. :param p_dict: Dictionary that contains parameters and its value used to define specified block device. @blocks_info: the results of monitor command 'info block' :return: Matched block device name, None when not find any device. """ if isinstance(blocks_info, str): for block in blocks_info.splitlines(): match = True for key, value in p_dict.iteritems(): if value is True: check_str = "%s=1" % key elif value is False: check_str = "%s=0" % key else: check_str = "%s=%s" % (key, value) if check_str not in block: match = False break if match: return block.split(":")[0] else: for block in blocks_info: match = True for key, value in p_dict.iteritems(): if isinstance(value, bool): check_str = "u'%s': %s" % (key, value) else: check_str = "u'%s': u'%s'" % (key, value) if check_str not in str(block): match = False break if match: return block['device'] return None def process_info_block(self, blocks_info): """ process the info block, so that can deal with the new and old qemu formart. :param blocks_info: the output of qemu command 'info block' """ block_list = [] block_entry = [] for block in blocks_info.splitlines(): if block: block_entry.append(block.strip()) else: block_list.append(' '.join(block_entry)) block_entry = [] # don't forget the last one block_list.append(' '.join(block_entry)) return block_list def get_block(self, p_dict={}): """ Get specified block device from monitor's info block command. The block device is defined by parameter in p_dict. :param p_dict: Dictionary that contains parameters and its value used to define specified block device. :return: Matched block device name, None when not find any device. """ blocks_info = self.monitor.info("block") block = self.get_block_old(blocks_info, p_dict) if block: return block block_list = self.process_info_block(blocks_info) for block in block_list: for key, value in p_dict.iteritems(): # for new qemu we just deal with key = [removable, # file,backing_file], for other types key, we should # fixup later logging.info("block = %s" % block) if key == 'removable': if value is False: if not 'Removable device' in block: return block.split(":")[0] elif value is True: if 'Removable device' in block: return block.split(":")[0] # file in key means both file and backing_file if ('file' in key) and (value in block): return block.split(":")[0] return None def check_block_locked(self, value): """ Check whether specified block device is locked or not. Return True, if device is locked, else False. :param vm: VM object :param value: Parameter that can specify block device. Can be any possible identification of a device, Such as device name/image file name/... :return: True if device is locked, False if device is unlocked. """ assert value, "Device identification not specified" blocks_info = self.monitor.info("block") assert value in str(blocks_info), \ "Device %s not listed in monitor's output" % value if isinstance(blocks_info, str): lock_str = "locked=1" lock_str_new = "locked" no_lock_str = "not locked" for block in blocks_info.splitlines(): if (value in block) and (lock_str in block): return True # deal with new qemu block_list = self.process_info_block(blocks_info) for block_new in block_list: if (value in block_new) and ("Removable device" in block_new): if no_lock_str in block_new: return False elif lock_str_new in block_new: return True else: for block in blocks_info: if value in str(block): return block['locked'] return False def live_snapshot(self, base_file, snapshot_file, snapshot_format="qcow2"): """ Take a live disk snapshot. :param base_file: base file name :param snapshot_file: snapshot file name :param snapshot_format: snapshot file format :return: File name of disk snapshot. """ device = self.get_block({"file": base_file}) output = self.monitor.live_snapshot(device, snapshot_file, snapshot_format) logging.debug(output) device = self.get_block({"file": snapshot_file}) if device: current_file = device else: current_file = None return current_file def block_stream(self, device, speed, base=None, correct=True): """ start to stream block device, aka merge snapshot; :param device: device ID; :param speed: limited speed, default unit B/s; :param base: base file; :param correct: auto correct cmd, correct by default """ cmd = self.params.get("block_stream_cmd", "block-stream") return self.monitor.block_stream(device, speed, base, cmd, correct=correct) def block_mirror(self, device, target, speed, sync, format, mode="absolute-paths", correct=True): """ Mirror block device to target file; :param device: device ID :param target: destination image file name; :param speed: max limited speed, default unit is B/s; :param sync: what parts of the disk image should be copied to the destination; :param mode: new image open mode :param format: target image format :param correct: auto correct cmd, correct by default """ cmd = self.params.get("block_mirror_cmd", "drive-mirror") return self.monitor.block_mirror(device, target, speed, sync, format, mode, cmd, correct=correct) def block_reopen(self, device, new_image, format="qcow2", correct=True): """ Reopen a new image, no need to do this step in rhel7 host :param device: device ID :param new_image: new image filename :param format: new image format :param correct: auto correct cmd, correct by default """ cmd = self.params.get("block_reopen_cmd", "block-job-complete") return self.monitor.block_reopen(device, new_image, format, cmd, correct=correct) def cancel_block_job(self, device, correct=True): """ cancel active job on the image_file :param device: device ID :param correct: auto correct cmd, correct by default """ cmd = self.params.get("block_job_cancel_cmd", "block-job-cancel") return self.monitor.cancel_block_job(device, cmd, correct=correct) def set_job_speed(self, device, speed="0", correct=True): """ set max speed of block job; :param device: device ID :param speed: max speed of block job :param correct: auto correct cmd, correct by default """ cmd = self.params.get("set_block_job_speed", "block-job-set-speed") return self.monitor.set_block_job_speed(device, speed, cmd, correct=correct) def get_job_status(self, device): """ get block job info; :param device: device ID """ return self.monitor.query_block_job(device)<|fim▁end|>
class attributes is used.
<|file_name|>FriendlyTimeTest.java<|end_file_name|><|fim▁begin|><|fim▁hole|> import com.comandante.creeper.common.FriendlyTime; import org.junit.Test; public class FriendlyTimeTest { @Test public void testFriendlyParsing() throws Exception { FriendlyTime friendlyTime = new FriendlyTime(400); System.out.println("Friendly Long: " + friendlyTime.getFriendlyFormatted()); System.out.println("Friendly Short: " + friendlyTime.getFriendlyFormattedShort()); } }<|fim▁end|>
package com.comandante.creeper.command.commands;
<|file_name|>pattern-tyvar-2.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. extern mod extra; enum bar { t1((), Option<~[int]>), t2, } // n.b. my change changes this error message, but I think it's right -- tjc fn foo(t: bar) -> int { match t { t1(_, Some(x)) => { return x * 3; } _ => { fail!(); } } } //~ ERROR binary operation * cannot be applied to <|fim▁hole|><|fim▁end|>
fn main() { }
<|file_name|>xkcd_rgb.py<|end_file_name|><|fim▁begin|>xkcd_rgb = {'acid green': '#8ffe09', 'adobe': '#bd6c48', 'algae': '#54ac68', 'algae green': '#21c36f', 'almost black': '#070d0d', 'amber': '#feb308', 'amethyst': '#9b5fc0', 'apple': '#6ecb3c', 'apple green': '#76cd26', 'apricot': '#ffb16d', 'aqua': '#13eac9', 'aqua blue': '#02d8e9', 'aqua green': '#12e193', 'aqua marine': '#2ee8bb', 'aquamarine': '#04d8b2', 'army green': '#4b5d16', 'asparagus': '#77ab56', 'aubergine': '#3d0734', 'auburn': '#9a3001', 'avocado': '#90b134', 'avocado green': '#87a922', 'azul': '#1d5dec', 'azure': '#069af3', 'baby blue': '#a2cffe', 'baby green': '#8cff9e', 'baby pink': '#ffb7ce', 'baby poo': '#ab9004', 'baby poop': '#937c00', 'baby poop green': '#8f9805', 'baby puke green': '#b6c406', 'baby purple': '#ca9bf7', 'baby shit brown': '#ad900d', 'baby shit green': '#889717', 'banana': '#ffff7e', 'banana yellow': '#fafe4b', 'barbie pink': '#fe46a5', 'barf green': '#94ac02', 'barney': '#ac1db8', 'barney purple': '#a00498', 'battleship grey': '#6b7c85', 'beige': '#e6daa6', 'berry': '#990f4b', 'bile': '#b5c306', 'black': '#000000', 'bland': '#afa88b', 'blood': '#770001', 'blood orange': '#fe4b03', 'blood red': '#980002', 'blue': '#0343df', 'blue blue': '#2242c7', 'blue green': '#137e6d', 'blue grey': '#607c8e', 'blue purple': '#5729ce', 'blue violet': '#5d06e9', 'blue with a hint of purple': '#533cc6', 'blue/green': '#0f9b8e', 'blue/grey': '#758da3', 'blue/purple': '#5a06ef', 'blueberry': '#464196', 'bluegreen': '#017a79', 'bluegrey': '#85a3b2', 'bluey green': '#2bb179', 'bluey grey': '#89a0b0', 'bluey purple': '#6241c7', 'bluish': '#2976bb', 'bluish green': '#10a674', 'bluish grey': '#748b97', 'bluish purple': '#703be7', 'blurple': '#5539cc', 'blush': '#f29e8e', 'blush pink': '#fe828c', 'booger': '#9bb53c', 'booger green': '#96b403', 'bordeaux': '#7b002c', 'boring green': '#63b365', 'bottle green': '#044a05', 'brick': '#a03623', 'brick orange': '#c14a09', 'brick red': '#8f1402', 'bright aqua': '#0bf9ea', 'bright blue': '#0165fc', 'bright cyan': '#41fdfe', 'bright green': '#01ff07', 'bright lavender': '#c760ff', 'bright light blue': '#26f7fd', 'bright light green': '#2dfe54', 'bright lilac': '#c95efb', 'bright lime': '#87fd05', 'bright lime green': '#65fe08', 'bright magenta': '#ff08e8', 'bright olive': '#9cbb04', 'bright orange': '#ff5b00', 'bright pink': '#fe01b1', 'bright purple': '#be03fd', 'bright red': '#ff000d', 'bright sea green': '#05ffa6', 'bright sky blue': '#02ccfe', 'bright teal': '#01f9c6', 'bright turquoise': '#0ffef9', 'bright violet': '#ad0afd', 'bright yellow': '#fffd01', 'bright yellow green': '#9dff00', 'british racing green': '#05480d', 'bronze': '#a87900', 'brown': '#653700', 'brown green': '#706c11', 'brown grey': '#8d8468', 'brown orange': '#b96902', 'brown red': '#922b05', 'brown yellow': '#b29705', 'brownish': '#9c6d57', 'brownish green': '#6a6e09', 'brownish grey': '#86775f', 'brownish orange': '#cb7723', 'brownish pink': '#c27e79', 'brownish purple': '#76424e', 'brownish red': '#9e3623', 'brownish yellow': '#c9b003', 'browny green': '#6f6c0a', 'browny orange': '#ca6b02', 'bruise': '#7e4071', 'bubble gum pink': '#ff69af', 'bubblegum': '#ff6cb5', 'bubblegum pink': '#fe83cc', 'buff': '#fef69e', 'burgundy': '#610023', 'burnt orange': '#c04e01', 'burnt red': '#9f2305', 'burnt siena': '#b75203', 'burnt sienna': '#b04e0f', 'burnt umber': '#a0450e', 'burnt yellow': '#d5ab09', 'burple': '#6832e3', 'butter': '#ffff81', 'butter yellow': '#fffd74', 'butterscotch': '#fdb147', 'cadet blue': '#4e7496', 'camel': '#c69f59', 'camo': '#7f8f4e', 'camo green': '#526525', 'camouflage green': '#4b6113', 'canary': '#fdff63', 'canary yellow': '#fffe40', 'candy pink': '#ff63e9', 'caramel': '#af6f09', 'carmine': '#9d0216', 'carnation': '#fd798f', 'carnation pink': '#ff7fa7', 'carolina blue': '#8ab8fe', 'celadon': '#befdb7', 'celery': '#c1fd95', 'cement': '#a5a391', 'cerise': '#de0c62', 'cerulean': '#0485d1', 'cerulean blue': '#056eee', 'charcoal': '#343837', 'charcoal grey': '#3c4142', 'chartreuse': '#c1f80a', 'cherry': '#cf0234', 'cherry red': '#f7022a', 'chestnut': '#742802', 'chocolate': '#3d1c02', 'chocolate brown': '#411900', 'cinnamon': '#ac4f06', 'claret': '#680018', 'clay': '#b66a50', 'clay brown': '#b2713d', 'clear blue': '#247afd', 'cloudy blue': '#acc2d9', 'cobalt': '#1e488f', 'cobalt blue': '#030aa7', 'cocoa': '#875f42', 'coffee': '#a6814c', 'cool blue': '#4984b8', 'cool green': '#33b864', 'cool grey': '#95a3a6', 'copper': '#b66325', 'coral': '#fc5a50', 'coral pink': '#ff6163', 'cornflower': '#6a79f7', 'cornflower blue': '#5170d7', 'cranberry': '#9e003a', 'cream': '#ffffc2', 'creme': '#ffffb6', 'crimson': '#8c000f', 'custard': '#fffd78', 'cyan': '#00ffff', 'dandelion': '#fedf08', 'dark': '#1b2431', 'dark aqua': '#05696b', 'dark aquamarine': '#017371', 'dark beige': '#ac9362', 'dark blue': '#00035b', 'dark blue green': '#005249', 'dark blue grey': '#1f3b4d', 'dark brown': '#341c02', 'dark coral': '#cf524e', 'dark cream': '#fff39a', 'dark cyan': '#0a888a', 'dark forest green': '#002d04', 'dark fuchsia': '#9d0759', 'dark gold': '#b59410', 'dark grass green': '#388004', 'dark green': '#033500', 'dark green blue': '#1f6357', 'dark grey': '#363737', 'dark grey blue': '#29465b', 'dark hot pink': '#d90166', 'dark indigo': '#1f0954', 'dark khaki': '#9b8f55', 'dark lavender': '#856798', 'dark lilac': '#9c6da5', 'dark lime': '#84b701', 'dark lime green': '#7ebd01', 'dark magenta': '#960056', 'dark maroon': '#3c0008', 'dark mauve': '#874c62', 'dark mint': '#48c072', 'dark mint green': '#20c073', 'dark mustard': '#a88905', 'dark navy': '#000435', 'dark navy blue': '#00022e', 'dark olive': '#373e02', 'dark olive green': '#3c4d03', 'dark orange': '#c65102', 'dark pastel green': '#56ae57', 'dark peach': '#de7e5d', 'dark periwinkle': '#665fd1', 'dark pink': '#cb416b', 'dark plum': '#3f012c', 'dark purple': '#35063e', 'dark red': '#840000', 'dark rose': '#b5485d', 'dark royal blue': '#02066f', 'dark sage': '#598556', 'dark salmon': '#c85a53', 'dark sand': '#a88f59', 'dark sea green': '#11875d', 'dark seafoam': '#1fb57a', 'dark seafoam green': '#3eaf76', 'dark sky blue': '#448ee4', 'dark slate blue': '#214761', 'dark tan': '#af884a', 'dark taupe': '#7f684e', 'dark teal': '#014d4e', 'dark turquoise': '#045c5a', 'dark violet': '#34013f', 'dark yellow': '#d5b60a', 'dark yellow green': '#728f02', 'darkblue': '#030764', 'darkgreen': '#054907', 'darkish blue': '#014182', 'darkish green': '#287c37', 'darkish pink': '#da467d', 'darkish purple': '#751973', 'darkish red': '#a90308', 'deep aqua': '#08787f', 'deep blue': '#040273', 'deep brown': '#410200', 'deep green': '#02590f', 'deep lavender': '#8d5eb7', 'deep lilac': '#966ebd', 'deep magenta': '#a0025c', 'deep orange': '#dc4d01', 'deep pink': '#cb0162', 'deep purple': '#36013f', 'deep red': '#9a0200', 'deep rose': '#c74767', 'deep sea blue': '#015482', 'deep sky blue': '#0d75f8', 'deep teal': '#00555a', 'deep turquoise': '#017374', 'deep violet': '#490648', 'denim': '#3b638c', 'denim blue': '#3b5b92', 'desert': '#ccad60', 'diarrhea': '#9f8303', 'dirt': '#8a6e45', 'dirt brown': '#836539', 'dirty blue': '#3f829d', 'dirty green': '#667e2c', 'dirty orange': '#c87606', 'dirty pink': '#ca7b80', 'dirty purple': '#734a65', 'dirty yellow': '#cdc50a', 'dodger blue': '#3e82fc', 'drab': '#828344', 'drab green': '#749551', 'dried blood': '#4b0101', 'duck egg blue': '#c3fbf4', 'dull blue': '#49759c', 'dull brown': '#876e4b', 'dull green': '#74a662', 'dull orange': '#d8863b', 'dull pink': '#d5869d', 'dull purple': '#84597e', 'dull red': '#bb3f3f', 'dull teal': '#5f9e8f', 'dull yellow': '#eedc5b', 'dusk': '#4e5481', 'dusk blue': '#26538d', 'dusky blue': '#475f94', 'dusky pink': '#cc7a8b', 'dusky purple': '#895b7b', 'dusky rose': '#ba6873', 'dust': '#b2996e', 'dusty blue': '#5a86ad', 'dusty green': '#76a973', 'dusty lavender': '#ac86a8', 'dusty orange': '#f0833a', 'dusty pink': '#d58a94', 'dusty purple': '#825f87', 'dusty red': '#b9484e', 'dusty rose': '#c0737a', 'dusty teal': '#4c9085', 'earth': '#a2653e', 'easter green': '#8cfd7e', 'easter purple': '#c071fe', 'ecru': '#feffca', 'egg shell': '#fffcc4', 'eggplant': '#380835', 'eggplant purple': '#430541', 'eggshell': '#ffffd4', 'eggshell blue': '#c4fff7', 'electric blue': '#0652ff', 'electric green': '#21fc0d', 'electric lime': '#a8ff04', 'electric pink': '#ff0490', 'electric purple': '#aa23ff', 'emerald': '#01a049', 'emerald green': '#028f1e', 'evergreen': '#05472a', 'faded blue': '#658cbb', 'faded green': '#7bb274', 'faded orange': '#f0944d', 'faded pink': '#de9dac', 'faded purple': '#916e99', 'faded red': '#d3494e', 'faded yellow': '#feff7f', 'fawn': '#cfaf7b', 'fern': '#63a950', 'fern green': '#548d44', 'fire engine red': '#fe0002', 'flat blue': '#3c73a8', 'flat green': '#699d4c', 'fluorescent green': '#08ff08', 'fluro green': '#0aff02', 'foam green': '#90fda9', 'forest': '#0b5509', 'forest green': '#06470c', 'forrest green': '#154406', 'french blue': '#436bad', 'fresh green': '#69d84f', 'frog green': '#58bc08', 'fuchsia': '#ed0dd9', 'gold': '#dbb40c', 'golden': '#f5bf03', 'golden brown': '#b27a01', 'golden rod': '#f9bc08', 'golden yellow': '#fec615', 'goldenrod': '#fac205', 'grape': '#6c3461', 'grape purple': '#5d1451', 'grapefruit': '#fd5956', 'grass': '#5cac2d', 'grass green': '#3f9b0b', 'grassy green': '#419c03', 'green': '#15b01a', 'green apple': '#5edc1f', 'green blue': '#06b48b', 'green brown': '#544e03', 'green grey': '#77926f', 'green teal': '#0cb577', 'green yellow': '#c9ff27', 'green/blue': '#01c08d', 'green/yellow': '#b5ce08', 'greenblue': '#23c48b', 'greenish': '#40a368', 'greenish beige': '#c9d179', 'greenish blue': '#0b8b87', 'greenish brown': '#696112', 'greenish cyan': '#2afeb7', 'greenish grey': '#96ae8d', 'greenish tan': '#bccb7a', 'greenish teal': '#32bf84', 'greenish turquoise': '#00fbb0', 'greenish yellow': '#cdfd02', 'greeny blue': '#42b395', 'greeny brown': '#696006', 'greeny grey': '#7ea07a', 'greeny yellow': '#c6f808', 'grey': '#929591', 'grey blue': '#6b8ba4', 'grey brown': '#7f7053', 'grey green': '#789b73', 'grey pink': '#c3909b', 'grey purple': '#826d8c', 'grey teal': '#5e9b8a', 'grey/blue': '#647d8e', 'grey/green': '#86a17d', 'greyblue': '#77a1b5', 'greyish': '#a8a495', 'greyish blue': '#5e819d', 'greyish brown': '#7a6a4f', 'greyish green': '#82a67d', 'greyish pink': '#c88d94', 'greyish purple': '#887191', 'greyish teal': '#719f91', 'gross green': '#a0bf16', 'gunmetal': '#536267', 'hazel': '#8e7618', 'heather': '#a484ac', 'heliotrope': '#d94ff5', 'highlighter green': '#1bfc06', 'hospital green': '#9be5aa', 'hot green': '#25ff29', 'hot magenta': '#f504c9', 'hot pink': '#ff028d', 'hot purple': '#cb00f5', 'hunter green': '#0b4008', 'ice': '#d6fffa', 'ice blue': '#d7fffe', 'icky green': '#8fae22', 'indian red': '#850e04', 'indigo': '#380282', 'indigo blue': '#3a18b1', 'iris': '#6258c4', 'irish green': '#019529', 'ivory': '#ffffcb', 'jade': '#1fa774', 'jade green': '#2baf6a', 'jungle green': '#048243', 'kelley green': '#009337', 'kelly green': '#02ab2e', 'kermit green': '#5cb200', 'key lime': '#aeff6e', 'khaki': '#aaa662', 'khaki green': '#728639', 'kiwi': '#9cef43', 'kiwi green': '#8ee53f', 'lavender': '#c79fef', 'lavender blue': '#8b88f8', 'lavender pink': '#dd85d7', 'lawn green': '#4da409', 'leaf': '#71aa34', 'leaf green': '#5ca904', 'leafy green': '#51b73b', 'leather': '#ac7434', 'lemon': '#fdff52', 'lemon green': '#adf802', 'lemon lime': '#bffe28', 'lemon yellow': '#fdff38', 'lichen': '#8fb67b', 'light aqua': '#8cffdb', 'light aquamarine': '#7bfdc7', 'light beige': '#fffeb6', 'light blue': '#95d0fc', 'light blue green': '#7efbb3', 'light blue grey': '#b7c9e2', 'light bluish green': '#76fda8', 'light bright green': '#53fe5c', 'light brown': '#ad8150', 'light burgundy': '#a8415b', 'light cyan': '#acfffc', 'light eggplant': '#894585', 'light forest green': '#4f9153', 'light gold': '#fddc5c', 'light grass green': '#9af764', 'light green': '#96f97b', 'light green blue': '#56fca2', 'light greenish blue': '#63f7b4', 'light grey': '#d8dcd6', 'light grey blue': '#9dbcd4', 'light grey green': '#b7e1a1', 'light indigo': '#6d5acf', 'light khaki': '#e6f2a2', 'light lavendar': '#efc0fe', 'light lavender': '#dfc5fe', 'light light blue': '#cafffb', 'light light green': '#c8ffb0', 'light lilac': '#edc8ff', 'light lime': '#aefd6c', 'light lime green': '#b9ff66', 'light magenta': '#fa5ff7', 'light maroon': '#a24857', 'light mauve': '#c292a1', 'light mint': '#b6ffbb', 'light mint green': '#a6fbb2', 'light moss green': '#a6c875', 'light mustard': '#f7d560', 'light navy': '#155084', 'light navy blue': '#2e5a88', 'light neon green': '#4efd54', 'light olive': '#acbf69', 'light olive green': '#a4be5c', 'light orange': '#fdaa48', 'light pastel green': '#b2fba5', 'light pea green': '#c4fe82', 'light peach': '#ffd8b1', 'light periwinkle': '#c1c6fc', 'light pink': '#ffd1df', 'light plum': '#9d5783', 'light purple': '#bf77f6', 'light red': '#ff474c', 'light rose': '#ffc5cb', 'light royal blue': '#3a2efe', 'light sage': '#bcecac', 'light salmon': '#fea993', 'light sea green': '#98f6b0', 'light seafoam': '#a0febf', 'light seafoam green': '#a7ffb5', 'light sky blue': '#c6fcff', 'light tan': '#fbeeac', 'light teal': '#90e4c1', 'light turquoise': '#7ef4cc', 'light urple': '#b36ff6', 'light violet': '#d6b4fc', 'light yellow': '#fffe7a', 'light yellow green': '#ccfd7f', 'light yellowish green': '#c2ff89', 'lightblue': '#7bc8f6', 'lighter green': '#75fd63', 'lighter purple': '#a55af4', 'lightgreen': '#76ff7b', 'lightish blue': '#3d7afd', 'lightish green': '#61e160', 'lightish purple': '#a552e6', 'lightish red': '#fe2f4a', 'lilac': '#cea2fd', 'liliac': '#c48efd', 'lime': '#aaff32', 'lime green': '#89fe05', 'lime yellow': '#d0fe1d', 'lipstick': '#d5174e', 'lipstick red': '#c0022f', 'macaroni and cheese': '#efb435', 'magenta': '#c20078', 'mahogany': '#4a0100', 'maize': '#f4d054', 'mango': '#ffa62b', 'manilla': '#fffa86', 'marigold': '#fcc006', 'marine': '#042e60', 'marine blue': '#01386a', 'maroon': '#650021', 'mauve': '#ae7181', 'medium blue': '#2c6fbb', 'medium brown': '#7f5112', 'medium green': '#39ad48', 'medium grey': '#7d7f7c', 'medium pink': '#f36196', 'medium purple': '#9e43a2', 'melon': '#ff7855', 'merlot': '#730039', 'metallic blue': '#4f738e', 'mid blue': '#276ab3', 'mid green': '#50a747', 'midnight': '#03012d', 'midnight blue': '#020035', 'midnight purple': '#280137', 'military green': '#667c3e', 'milk chocolate': '#7f4e1e', 'mint': '#9ffeb0', 'mint green': '#8fff9f', 'minty green': '#0bf77d', 'mocha': '#9d7651', 'moss': '#769958', 'moss green': '#658b38', 'mossy green': '#638b27', 'mud': '#735c12', 'mud brown': '#60460f', 'mud green': '#606602', 'muddy brown': '#886806', 'muddy green': '#657432', 'muddy yellow': '#bfac05', 'mulberry': '#920a4e', 'murky green': '#6c7a0e', 'mushroom': '#ba9e88', 'mustard': '#ceb301', 'mustard brown': '#ac7e04', 'mustard green': '#a8b504', 'mustard yellow': '#d2bd0a', 'muted blue': '#3b719f', 'muted green': '#5fa052', 'muted pink': '#d1768f', 'muted purple': '#805b87', 'nasty green': '#70b23f', 'navy': '#01153e', 'navy blue': '#001146', 'navy green': '#35530a', 'neon blue': '#04d9ff', 'neon green': '#0cff0c', 'neon pink': '#fe019a', 'neon purple': '#bc13fe', 'neon red': '#ff073a', 'neon yellow': '#cfff04', 'nice blue': '#107ab0', 'night blue': '#040348', 'ocean': '#017b92', 'ocean blue': '#03719c', 'ocean green': '#3d9973', 'ocher': '#bf9b0c', 'ochre': '#bf9005', 'ocre': '#c69c04', 'off blue': '#5684ae', 'off green': '#6ba353', 'off white': '#ffffe4', 'off yellow': '#f1f33f', 'old pink': '#c77986', 'old rose': '#c87f89', 'olive': '#6e750e', 'olive brown': '#645403', 'olive drab': '#6f7632', 'olive green': '#677a04', 'olive yellow': '#c2b709', 'orange': '#f97306', 'orange brown': '#be6400', 'orange pink': '#ff6f52', 'orange red': '#fd411e', 'orange yellow': '#ffad01', 'orangeish': '#fd8d49', 'orangered': '#fe420f', 'orangey brown': '#b16002', 'orangey red': '#fa4224', 'orangey yellow': '#fdb915', 'orangish': '#fc824a', 'orangish brown': '#b25f03', 'orangish red': '#f43605', 'orchid': '#c875c4', 'pale': '#fff9d0', 'pale aqua': '#b8ffeb', 'pale blue': '#d0fefe', 'pale brown': '#b1916e', 'pale cyan': '#b7fffa', 'pale gold': '#fdde6c', 'pale green': '#c7fdb5', 'pale grey': '#fdfdfe', 'pale lavender': '#eecffe', 'pale light green': '#b1fc99', 'pale lilac': '#e4cbff', 'pale lime': '#befd73', 'pale lime green': '#b1ff65', 'pale magenta': '#d767ad', 'pale mauve': '#fed0fc', 'pale olive': '#b9cc81', 'pale olive green': '#b1d27b', 'pale orange': '#ffa756', 'pale peach': '#ffe5ad', 'pale pink': '#ffcfdc', 'pale purple': '#b790d4', 'pale red': '#d9544d', 'pale rose': '#fdc1c5', 'pale salmon': '#ffb19a', 'pale sky blue': '#bdf6fe', 'pale teal': '#82cbb2', 'pale turquoise': '#a5fbd5', 'pale violet': '#ceaefa', 'pale yellow': '#ffff84', 'parchment': '#fefcaf', 'pastel blue': '#a2bffe',<|fim▁hole|> 'pastel green': '#b0ff9d', 'pastel orange': '#ff964f', 'pastel pink': '#ffbacd', 'pastel purple': '#caa0ff', 'pastel red': '#db5856', 'pastel yellow': '#fffe71', 'pea': '#a4bf20', 'pea green': '#8eab12', 'pea soup': '#929901', 'pea soup green': '#94a617', 'peach': '#ffb07c', 'peachy pink': '#ff9a8a', 'peacock blue': '#016795', 'pear': '#cbf85f', 'periwinkle': '#8e82fe', 'periwinkle blue': '#8f99fb', 'perrywinkle': '#8f8ce7', 'petrol': '#005f6a', 'pig pink': '#e78ea5', 'pine': '#2b5d34', 'pine green': '#0a481e', 'pink': '#ff81c0', 'pink purple': '#db4bda', 'pink red': '#f5054f', 'pink/purple': '#ef1de7', 'pinkish': '#d46a7e', 'pinkish brown': '#b17261', 'pinkish grey': '#c8aca9', 'pinkish orange': '#ff724c', 'pinkish purple': '#d648d7', 'pinkish red': '#f10c45', 'pinkish tan': '#d99b82', 'pinky': '#fc86aa', 'pinky purple': '#c94cbe', 'pinky red': '#fc2647', 'piss yellow': '#ddd618', 'pistachio': '#c0fa8b', 'plum': '#580f41', 'plum purple': '#4e0550', 'poison green': '#40fd14', 'poo': '#8f7303', 'poo brown': '#885f01', 'poop': '#7f5e00', 'poop brown': '#7a5901', 'poop green': '#6f7c00', 'powder blue': '#b1d1fc', 'powder pink': '#ffb2d0', 'primary blue': '#0804f9', 'prussian blue': '#004577', 'puce': '#a57e52', 'puke': '#a5a502', 'puke brown': '#947706', 'puke green': '#9aae07', 'puke yellow': '#c2be0e', 'pumpkin': '#e17701', 'pumpkin orange': '#fb7d07', 'pure blue': '#0203e2', 'purple': '#7e1e9c', 'purple blue': '#632de9', 'purple brown': '#673a3f', 'purple grey': '#866f85', 'purple pink': '#e03fd8', 'purple red': '#990147', 'purple/blue': '#5d21d0', 'purple/pink': '#d725de', 'purpleish': '#98568d', 'purpleish blue': '#6140ef', 'purpleish pink': '#df4ec8', 'purpley': '#8756e4', 'purpley blue': '#5f34e7', 'purpley grey': '#947e94', 'purpley pink': '#c83cb9', 'purplish': '#94568c', 'purplish blue': '#601ef9', 'purplish brown': '#6b4247', 'purplish grey': '#7a687f', 'purplish pink': '#ce5dae', 'purplish red': '#b0054b', 'purply': '#983fb2', 'purply blue': '#661aee', 'purply pink': '#f075e6', 'putty': '#beae8a', 'racing green': '#014600', 'radioactive green': '#2cfa1f', 'raspberry': '#b00149', 'raw sienna': '#9a6200', 'raw umber': '#a75e09', 'really light blue': '#d4ffff', 'red': '#e50000', 'red brown': '#8b2e16', 'red orange': '#fd3c06', 'red pink': '#fa2a55', 'red purple': '#820747', 'red violet': '#9e0168', 'red wine': '#8c0034', 'reddish': '#c44240', 'reddish brown': '#7f2b0a', 'reddish grey': '#997570', 'reddish orange': '#f8481c', 'reddish pink': '#fe2c54', 'reddish purple': '#910951', 'reddy brown': '#6e1005', 'rich blue': '#021bf9', 'rich purple': '#720058', 'robin egg blue': '#8af1fe', "robin's egg": '#6dedfd', "robin's egg blue": '#98eff9', 'rosa': '#fe86a4', 'rose': '#cf6275', 'rose pink': '#f7879a', 'rose red': '#be013c', 'rosy pink': '#f6688e', 'rouge': '#ab1239', 'royal': '#0c1793', 'royal blue': '#0504aa', 'royal purple': '#4b006e', 'ruby': '#ca0147', 'russet': '#a13905', 'rust': '#a83c09', 'rust brown': '#8b3103', 'rust orange': '#c45508', 'rust red': '#aa2704', 'rusty orange': '#cd5909', 'rusty red': '#af2f0d', 'saffron': '#feb209', 'sage': '#87ae73', 'sage green': '#88b378', 'salmon': '#ff796c', 'salmon pink': '#fe7b7c', 'sand': '#e2ca76', 'sand brown': '#cba560', 'sand yellow': '#fce166', 'sandstone': '#c9ae74', 'sandy': '#f1da7a', 'sandy brown': '#c4a661', 'sandy yellow': '#fdee73', 'sap green': '#5c8b15', 'sapphire': '#2138ab', 'scarlet': '#be0119', 'sea': '#3c9992', 'sea blue': '#047495', 'sea green': '#53fca1', 'seafoam': '#80f9ad', 'seafoam blue': '#78d1b6', 'seafoam green': '#7af9ab', 'seaweed': '#18d17b', 'seaweed green': '#35ad6b', 'sepia': '#985e2b', 'shamrock': '#01b44c', 'shamrock green': '#02c14d', 'shit': '#7f5f00', 'shit brown': '#7b5804', 'shit green': '#758000', 'shocking pink': '#fe02a2', 'sick green': '#9db92c', 'sickly green': '#94b21c', 'sickly yellow': '#d0e429', 'sienna': '#a9561e', 'silver': '#c5c9c7', 'sky': '#82cafc', 'sky blue': '#75bbfd', 'slate': '#516572', 'slate blue': '#5b7c99', 'slate green': '#658d6d', 'slate grey': '#59656d', 'slime green': '#99cc04', 'snot': '#acbb0d', 'snot green': '#9dc100', 'soft blue': '#6488ea', 'soft green': '#6fc276', 'soft pink': '#fdb0c0', 'soft purple': '#a66fb5', 'spearmint': '#1ef876', 'spring green': '#a9f971', 'spruce': '#0a5f38', 'squash': '#f2ab15', 'steel': '#738595', 'steel blue': '#5a7d9a', 'steel grey': '#6f828a', 'stone': '#ada587', 'stormy blue': '#507b9c', 'straw': '#fcf679', 'strawberry': '#fb2943', 'strong blue': '#0c06f7', 'strong pink': '#ff0789', 'sun yellow': '#ffdf22', 'sunflower': '#ffc512', 'sunflower yellow': '#ffda03', 'sunny yellow': '#fff917', 'sunshine yellow': '#fffd37', 'swamp': '#698339', 'swamp green': '#748500', 'tan': '#d1b26f', 'tan brown': '#ab7e4c', 'tan green': '#a9be70', 'tangerine': '#ff9408', 'taupe': '#b9a281', 'tea': '#65ab7c', 'tea green': '#bdf8a3', 'teal': '#029386', 'teal blue': '#01889f', 'teal green': '#25a36f', 'tealish': '#24bca8', 'tealish green': '#0cdc73', 'terra cotta': '#c9643b', 'terracota': '#cb6843', 'terracotta': '#ca6641', 'tiffany blue': '#7bf2da', 'tomato': '#ef4026', 'tomato red': '#ec2d01', 'topaz': '#13bbaf', 'toupe': '#c7ac7d', 'toxic green': '#61de2a', 'tree green': '#2a7e19', 'true blue': '#010fcc', 'true green': '#089404', 'turquoise': '#06c2ac', 'turquoise blue': '#06b1c4', 'turquoise green': '#04f489', 'turtle green': '#75b84f', 'twilight': '#4e518b', 'twilight blue': '#0a437a', 'ugly blue': '#31668a', 'ugly brown': '#7d7103', 'ugly green': '#7a9703', 'ugly pink': '#cd7584', 'ugly purple': '#a442a0', 'ugly yellow': '#d0c101', 'ultramarine': '#2000b1', 'ultramarine blue': '#1805db', 'umber': '#b26400', 'velvet': '#750851', 'vermillion': '#f4320c', 'very dark blue': '#000133', 'very dark brown': '#1d0200', 'very dark green': '#062e03', 'very dark purple': '#2a0134', 'very light blue': '#d5ffff', 'very light brown': '#d3b683', 'very light green': '#d1ffbd', 'very light pink': '#fff4f2', 'very light purple': '#f6cefc', 'very pale blue': '#d6fffe', 'very pale green': '#cffdbc', 'vibrant blue': '#0339f8', 'vibrant green': '#0add08', 'vibrant purple': '#ad03de', 'violet': '#9a0eea', 'violet blue': '#510ac9', 'violet pink': '#fb5ffc', 'violet red': '#a50055', 'viridian': '#1e9167', 'vivid blue': '#152eff', 'vivid green': '#2fef10', 'vivid purple': '#9900fa', 'vomit': '#a2a415', 'vomit green': '#89a203', 'vomit yellow': '#c7c10c', 'warm blue': '#4b57db', 'warm brown': '#964e02', 'warm grey': '#978a84', 'warm pink': '#fb5581', 'warm purple': '#952e8f', 'washed out green': '#bcf5a6', 'water blue': '#0e87cc', 'watermelon': '#fd4659', 'weird green': '#3ae57f', 'wheat': '#fbdd7e', 'white': '#ffffff', 'windows blue': '#3778bf', 'wine': '#80013f', 'wine red': '#7b0323', 'wintergreen': '#20f986', 'wisteria': '#a87dc2', 'yellow': '#ffff14', 'yellow brown': '#b79400', 'yellow green': '#c0fb2d', 'yellow ochre': '#cb9d06', 'yellow orange': '#fcb001', 'yellow tan': '#ffe36e', 'yellow/green': '#c8fd3d', 'yellowgreen': '#bbf90f', 'yellowish': '#faee66', 'yellowish brown': '#9b7a01', 'yellowish green': '#b0dd16', 'yellowish orange': '#ffab0f', 'yellowish tan': '#fcfc81', 'yellowy brown': '#ae8b0c', 'yellowy green': '#bff128'}<|fim▁end|>
<|file_name|>ChromeEval.js<|end_file_name|><|fim▁begin|>/** * @license * Copyright 2014 Google Inc. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at *<|fim▁hole|> * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // Asynchronous eval workaround for lack of eval in Chrome Apps. Do not add // workaround in Cordova Chrome Apps. if ( ! (window.cordova && window.chrome) ) { TemplateUtil.compile = function() { return function() { return this.name_ + " wasn't required. Models must be arequired()'ed for Templates to be compiled in Packaged Apps."; }; }; var __EVAL_CALLBACKS__ = {}; var aeval = (function() { var nextID = 0; var future = afuture(); if ( ! document.body ) window.addEventListener('load', future.set); else future.set(); return function(src) { return aseq( future.get, function(ret) { var id = 'c' + (nextID++); var newjs = ['__EVAL_CALLBACKS__["' + id + '"](' + src + ');']; var blob = new Blob(newjs, {type: 'text/javascript'}); var url = window.URL.createObjectURL(blob); // TODO: best values? // url.defer = ?; // url.async = ?; __EVAL_CALLBACKS__[id] = function(data) { delete __EVAL_CALLBACKS__[id]; ret && ret.call(this, data); }; var script = document.createElement('script'); script.src = url; script.onload = function() { this.remove(); window.URL.revokeObjectURL(url); // document.body.removeChild(this); }; document.body.appendChild(script); }); }; })(); var TEMPLATE_FUNCTIONS = []; var aevalTemplate = function(t, model) { var doEval_ = function(t) { // Parse result: [isSimple, maybeCode]: [true, null] or [false, codeString]. var parseResult = TemplateCompiler.parseString(t.template); // Simple case, just a string literal if ( parseResult[0] ) return aconstant(ConstantTemplate(t.language === 'css' ? X.foam.grammars.CSS3.create().parser.parseString(t.template).toString() : t.template)); var code = TemplateUtil.HEADER + parseResult[1] + TemplateUtil.FOOTERS[t.language]; var args = ['opt_out']; if ( t.args ) { for ( var i = 0 ; i < t.args.length ; i++ ) { args.push(t.args[i].name); } } return aeval('function(' + args.join(',') + '){' + code + '}'); }; var doEval = function(t) { try { return doEval_(t); } catch (err) { console.log('Template Error: ', err); console.log(code); return aconstant(function() {return 'TemplateError: Check console.';}); } }; var i = TEMPLATE_FUNCTIONS.length; TEMPLATE_FUNCTIONS[i] = ''; return aseq( t.futureTemplate, function(ret, t) { doEval(t)(ret); }, function(ret, f) { TEMPLATE_FUNCTIONS[i] = f; ret(f); }); }; }<|fim▁end|>
* http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS,
<|file_name|>printvals.py<|end_file_name|><|fim▁begin|>import time from usb_vendor import PIC_USB import traceback # Product IDs: Master PIC is 0x0004, Rocket PIC is 0x0005, Barge PIC is 0x0006 comms = PIC_USB(0x0005) def main(): print("START") loop_time = .2 # How often to run the main loop, in seconds while True: start_time = time.clock() # print(chr(27) + "[2J") # quad_info() try: # debug_uart_buffers() # debug_uart_status() rocket_info() endstops() # debug_oc_status() except Exception, e: print "Error occurred. {}".format(e) traceback.print_exc() print "Retrying..." comms = PIC_USB(0x0005) while (time.clock() - start_time) < loop_time: pass def rocket_info(): info = comms.get_rocket_info() print "Rocket Tilt {} | Rocket Speed {} | Throttle {} | Motor Speed {} | Motor Thrust {} | Stepper Speed {} | Tilt Angle {} | Tilt Direction {} | Rocket State {}".format( info["tilt"], info["speed"], info["throttle"], info["motor_speed"], info["motor_thrust"], info["stepper_speed"], info["tilt_ang"], info["tilt_dir"], info["rocket_state"], ) def debug_uart_buffers(): info = comms.debug_uart_buffers() rx = info["rx"] tx = info["tx"] print "TX_head {} | TX_tail {} | TX_count {} || RX_head {} | RX_tail {} | RX_count {}".format( tx["head"], tx["tail"], tx["count"], rx["head"], rx["tail"], rx["count"], ) def debug_uart_status(): info = comms.debug_uart_status() uart1 = info["uart1"] uart2 = info["uart2"] print "[UART1] URXDA: {} | OERR {} | FERR {} || PERR {} | RIDLE {} | ADDEN {}".format( uart1["URXDA"], uart1["OERR"], uart1["FERR"], uart1["PERR"], uart1["RIDLE"], uart1["ADDEN"] ) print "[UART2] URXDA: {} | OERR {} | FERR {} || PERR {} | RIDLE {} | ADDEN {}".format( uart2["URXDA"], uart2["OERR"], uart2["FERR"], uart2["PERR"],<|fim▁hole|> uart2["ADDEN"] ) def debug_oc_status(): info = comms.debug_oc_status() print "DC_OCM0 {} | DC_OCM1 {} | DC_OCM2 {} | DC_OCTSEL {} | DC_OCFLT {}".format( info["DC_OCM0"], info["DC_OCM1"], info["DC_CM2"], info["DC_OCTSEL"], info["DC_OCFLT"], ) print "ST_OCM0 {} | ST_OCM1 {} | ST_OCM2 {} | ST_OCTSEL {} | ST_OCFLT {}".format( info["ST_OCM0"], info["ST_OCM1"], info["ST_CM2"], info["ST_OCTSEL"], info["ST_OCFLT"] ) def current_state(): info = comms.get_state() print "Current State {}".format( info["state"], ) def quad_info(): info = comms.get_quad_info() print "Quad Counter {} | Overflow {}".format( info["counter"], info["overflow"], ) def endstops(): """ Reads the system's endstops. """ info = comms.get_limit_sw_info() print("Y_BOT {} | Y_TOP {} | X_L {} | X_R {} | BARGE {} ".format( info["Y_BOT"], info["Y_TOP"], info["X_L"], info["X_R"], info["BARGE"]) ) if __name__ == '__main__': main()<|fim▁end|>
uart2["RIDLE"],
<|file_name|>reachable-loop-head.cpp<|end_file_name|><|fim▁begin|>//======================================================================= // Copyright 2001 Jeremy G. Siek, Andrew Lumsdaine, Lie-Quan Lee, // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) //======================================================================= #include <boost/config.hpp> #include <iostream> #include <fstream> #include <boost/graph/adjacency_list.hpp> #include <boost/graph/depth_first_search.hpp> #include <boost/graph/graphviz.hpp> #include <boost/graph/copy.hpp> int main(int argc, char *argv[]) { if (argc < 3) { std::cerr << "usage: reachable-loop-head.exe <in-file> <out-file>" << std::endl; return -1; } using namespace boost; GraphvizDigraph g; read_graphviz(argv[1], g); graph_traits < GraphvizDigraph >::vertex_descriptor loop_head = 1; typedef color_traits < default_color_type > Color; std::vector < default_color_type > reachable_from_head(num_vertices(g), Color::white()); default_color_type c; depth_first_visit(g, loop_head, default_dfs_visitor(), make_iterator_property_map(reachable_from_head.begin(), get(vertex_index, g), c)); property_map<GraphvizDigraph, vertex_attribute_t>::type vattr_map = get(vertex_attribute, g); graph_traits < GraphvizDigraph >::vertex_iterator i, i_end; for (boost::tie(i, i_end) = vertices(g); i != i_end; ++i) if (reachable_from_head[*i] != Color::white()) { vattr_map[*i]["color"] = "gray"; vattr_map[*i]["style"] = "filled"; } std::ofstream loops_out(argv[2]); #if defined(BOOST_MSVC) && BOOST_MSVC <= 1300 // VC++ has trouble with the get_property() functions loops_out << "digraph G {\n" << "size=\"3,3\"\n" << "ratio=\"fill\"\n" << "shape=\"box\"\n"; graph_traits<GraphvizDigraph>::vertex_iterator vi, vi_end; for (boost::tie(vi, vi_end) = vertices(g); vi != vi_end; ++vi) { loops_out << *vi << "["; for (std::map<std::string,std::string>::iterator ai = vattr_map[*vi].begin(); ai != vattr_map[*vi].end(); ++ai) { loops_out << ai->first << "=" << ai->second; if (next(ai) != vattr_map[*vi].end()) loops_out << ", "; } loops_out<< "]"; } property_map<GraphvizDigraph, edge_attribute_t>::type eattr_map = get(edge_attribute, g); graph_traits<GraphvizDigraph>::edge_iterator ei, ei_end; for (boost::tie(ei, ei_end) = edges(g); ei != ei_end; ++ei) { loops_out << source(*ei, g) << " -> " << target(*ei, g) << "["; <|fim▁hole|> for (std::map<std::string,std::string>::iterator eai = attr_map.begin(); eai != attr_map.end(); ++eai) { loops_out << eai->first << "=" << eai->second; if (next(eai) != attr_map.end()) loops_out << ", "; } loops_out<< "]"; } loops_out << "}\n"; #else get_property(g, graph_graph_attribute)["size"] = "3,3"; get_property(g, graph_graph_attribute)["ratio"] = "fill"; get_property(g, graph_vertex_attribute)["shape"] = "box"; write_graphviz(loops_out, g, make_vertex_attributes_writer(g), make_edge_attributes_writer(g), make_graph_attributes_writer(g)); #endif return EXIT_SUCCESS; }<|fim▁end|>
std::map<std::string,std::string>& attr_map = eattr_map[*ei];
<|file_name|>ExpressionEnrichmentServiceImplTest.java<|end_file_name|><|fim▁begin|>package org.cbioportal.service.impl; import java.math.BigDecimal; import java.util.*; import org.cbioportal.model.*; import org.cbioportal.model.meta.GenericAssayMeta; import org.cbioportal.persistence.MolecularDataRepository; import org.cbioportal.service.GeneService; import org.cbioportal.service.GenericAssayService; import org.cbioportal.service.MolecularProfileService; import org.cbioportal.service.SampleService; import org.cbioportal.service.exception.MolecularProfileNotFoundException; import org.cbioportal.service.util.ExpressionEnrichmentUtil; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.InjectMocks; import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.Spy; import org.mockito.junit.MockitoJUnitRunner; @RunWith(MockitoJUnitRunner.class) public class ExpressionEnrichmentServiceImplTest extends BaseServiceImplTest { @InjectMocks private ExpressionEnrichmentServiceImpl enrichmentServiceImpl; @Mock private SampleService sampleService; @Mock private MolecularProfileService molecularProfileService; @Mock private MolecularDataRepository molecularDataRepository; @Mock private GeneService geneService; @Spy @InjectMocks private ExpressionEnrichmentUtil expressionEnrichmentUtil; @Mock private GenericAssayService genericAssayService; CancerStudy cancerStudy = new CancerStudy(); MolecularProfile geneMolecularProfile = new MolecularProfile(); MolecularProfileSamples molecularProfileSamples = new MolecularProfileSamples(); List<Sample> samples = new ArrayList<>(); Map<String, List<MolecularProfileCaseIdentifier>> molecularProfileCaseSets = new HashMap<>(); Map<String, List<MolecularProfileCaseIdentifier>> molecularProfilePatientLevelCaseSets = new HashMap<>(); // patient level only data public static final String SAMPLE_ID5 = "sample_id5"; @Before public void setup() throws MolecularProfileNotFoundException { cancerStudy.setReferenceGenome(ReferenceGenome.HOMO_SAPIENS_DEFAULT_GENOME_NAME); cancerStudy.setCancerStudyIdentifier(STUDY_ID); geneMolecularProfile.setCancerStudyIdentifier(STUDY_ID); geneMolecularProfile.setStableId(MOLECULAR_PROFILE_ID); geneMolecularProfile.setCancerStudy(cancerStudy); molecularProfileSamples.setMolecularProfileId(MOLECULAR_PROFILE_ID); molecularProfileSamples.setCommaSeparatedSampleIds("1,2,3,4"); Sample sample1 = new Sample(); sample1.setStableId(SAMPLE_ID1); sample1.setInternalId(1); sample1.setCancerStudyIdentifier(STUDY_ID); sample1.setPatientId(1); samples.add(sample1); Sample sample2 = new Sample(); sample2.setStableId(SAMPLE_ID2); sample2.setInternalId(2); sample2.setCancerStudyIdentifier(STUDY_ID); sample2.setPatientId(2); samples.add(sample2); Sample sample3 = new Sample(); sample3.setStableId(SAMPLE_ID3); sample3.setInternalId(3); sample3.setCancerStudyIdentifier(STUDY_ID); sample3.setPatientId(3); samples.add(sample3); Sample sample4 = new Sample(); sample4.setStableId(SAMPLE_ID4); sample4.setInternalId(4); sample4.setCancerStudyIdentifier(STUDY_ID); sample4.setPatientId(4); samples.add(sample4); List<MolecularProfileCaseIdentifier> alteredSampleIdentifieres = new ArrayList<>(); List<MolecularProfileCaseIdentifier> unalteredSampleIdentifieres = new ArrayList<>(); List<MolecularProfileCaseIdentifier> unalteredPatientLevelSampleIdentifieres = new ArrayList<>(); MolecularProfileCaseIdentifier caseIdentifier1 = new MolecularProfileCaseIdentifier(); caseIdentifier1.setMolecularProfileId(MOLECULAR_PROFILE_ID); caseIdentifier1.setCaseId(SAMPLE_ID1); alteredSampleIdentifieres.add(caseIdentifier1); MolecularProfileCaseIdentifier caseIdentifier2 = new MolecularProfileCaseIdentifier(); caseIdentifier2.setMolecularProfileId(MOLECULAR_PROFILE_ID); caseIdentifier2.setCaseId(SAMPLE_ID2); alteredSampleIdentifieres.add(caseIdentifier2); MolecularProfileCaseIdentifier caseIdentifier3 = new MolecularProfileCaseIdentifier(); caseIdentifier3.setMolecularProfileId(MOLECULAR_PROFILE_ID); caseIdentifier3.setCaseId(SAMPLE_ID3); unalteredSampleIdentifieres.add(caseIdentifier3); unalteredPatientLevelSampleIdentifieres.add(caseIdentifier3); MolecularProfileCaseIdentifier caseIdentifier4 = new MolecularProfileCaseIdentifier(); caseIdentifier4.setMolecularProfileId(MOLECULAR_PROFILE_ID); caseIdentifier4.setCaseId(SAMPLE_ID4); unalteredSampleIdentifieres.add(caseIdentifier4); unalteredPatientLevelSampleIdentifieres.add(caseIdentifier4); // patient level only data MolecularProfileCaseIdentifier caseIdentifier5 = new MolecularProfileCaseIdentifier(); caseIdentifier5.setMolecularProfileId(MOLECULAR_PROFILE_ID); caseIdentifier5.setCaseId(SAMPLE_ID5); unalteredPatientLevelSampleIdentifieres.add(caseIdentifier5); molecularProfileCaseSets.put("altered samples", alteredSampleIdentifieres); molecularProfileCaseSets.put("unaltered samples", unalteredSampleIdentifieres); molecularProfilePatientLevelCaseSets.put("altered samples", alteredSampleIdentifieres); molecularProfilePatientLevelCaseSets.put("unaltered samples", unalteredPatientLevelSampleIdentifieres); Mockito.when(molecularProfileService.getMolecularProfile(MOLECULAR_PROFILE_ID)) .thenReturn(geneMolecularProfile); Mockito.when(molecularDataRepository.getCommaSeparatedSampleIdsOfMolecularProfile(MOLECULAR_PROFILE_ID)) .thenReturn(molecularProfileSamples); Mockito.when(sampleService.fetchSamples(Arrays.asList(STUDY_ID, STUDY_ID, STUDY_ID, STUDY_ID), Arrays.asList(SAMPLE_ID3, SAMPLE_ID4, SAMPLE_ID1, SAMPLE_ID2), "ID")).thenReturn(samples); } @Test public void getGenomicEnrichments() throws Exception { geneMolecularProfile.setMolecularAlterationType(MolecularProfile.MolecularAlterationType.MRNA_EXPRESSION); List<GeneMolecularAlteration> molecularDataList = new ArrayList<GeneMolecularAlteration>(); GeneMolecularAlteration geneMolecularAlteration1 = new GeneMolecularAlteration(); geneMolecularAlteration1.setEntrezGeneId(ENTREZ_GENE_ID_2); geneMolecularAlteration1.setValues("2,3,2.1,3"); molecularDataList.add(geneMolecularAlteration1); GeneMolecularAlteration geneMolecularAlteration2 = new GeneMolecularAlteration(); geneMolecularAlteration2.setEntrezGeneId(ENTREZ_GENE_ID_3); geneMolecularAlteration2.setValues("1.1,5,2.3,3"); molecularDataList.add(geneMolecularAlteration2); Mockito.when(molecularDataRepository.getGeneMolecularAlterationsIterableFast(MOLECULAR_PROFILE_ID)) .thenReturn(molecularDataList); List<Gene> expectedGeneList = new ArrayList<>(); Gene gene1 = new Gene(); gene1.setEntrezGeneId(ENTREZ_GENE_ID_2); gene1.setHugoGeneSymbol(HUGO_GENE_SYMBOL_2); expectedGeneList.add(gene1); Gene gene2 = new Gene(); gene2.setEntrezGeneId(ENTREZ_GENE_ID_3); gene2.setHugoGeneSymbol(HUGO_GENE_SYMBOL_3); expectedGeneList.add(gene2); Mockito.when(geneService.fetchGenes(Arrays.asList("2", "3"), "ENTREZ_GENE_ID", "SUMMARY")) .thenReturn(expectedGeneList); List<GenomicEnrichment> result = enrichmentServiceImpl.getGenomicEnrichments(MOLECULAR_PROFILE_ID, molecularProfileCaseSets, EnrichmentType.SAMPLE); Assert.assertEquals(2, result.size()); GenomicEnrichment expressionEnrichment = result.get(0); Assert.assertEquals(ENTREZ_GENE_ID_2, expressionEnrichment.getEntrezGeneId()); Assert.assertEquals(HUGO_GENE_SYMBOL_2, expressionEnrichment.getHugoGeneSymbol()); Assert.assertEquals(null, expressionEnrichment.getCytoband()); Assert.assertEquals(2, expressionEnrichment.getGroupsStatistics().size()); GroupStatistics unalteredGroupStats = expressionEnrichment.getGroupsStatistics().get(0); Assert.assertEquals("unaltered samples", unalteredGroupStats.getName()); Assert.assertEquals(new BigDecimal("2.55"), unalteredGroupStats.getMeanExpression()); Assert.assertEquals(new BigDecimal("0.6363961030678927"), unalteredGroupStats.getStandardDeviation()); GroupStatistics alteredGroupStats = expressionEnrichment.getGroupsStatistics().get(1); Assert.assertEquals("altered samples", alteredGroupStats.getName()); Assert.assertEquals(new BigDecimal("2.5"), alteredGroupStats.getMeanExpression()); Assert.assertEquals(new BigDecimal("0.7071067811865476"), alteredGroupStats.getStandardDeviation()); Assert.assertEquals(new BigDecimal("0.9475795430163914"), expressionEnrichment.getpValue()); expressionEnrichment = result.get(1); Assert.assertEquals(ENTREZ_GENE_ID_3, expressionEnrichment.getEntrezGeneId()); Assert.assertEquals(HUGO_GENE_SYMBOL_3, expressionEnrichment.getHugoGeneSymbol()); Assert.assertEquals(null, expressionEnrichment.getCytoband()); Assert.assertEquals(2, expressionEnrichment.getGroupsStatistics().size()); unalteredGroupStats = expressionEnrichment.getGroupsStatistics().get(0); Assert.assertEquals("unaltered samples", unalteredGroupStats.getName()); Assert.assertEquals(new BigDecimal("2.65"), unalteredGroupStats.getMeanExpression()); Assert.assertEquals(new BigDecimal("0.4949747468305834"), unalteredGroupStats.getStandardDeviation()); alteredGroupStats = expressionEnrichment.getGroupsStatistics().get(1); Assert.assertEquals("altered samples", alteredGroupStats.getName()); Assert.assertEquals(new BigDecimal("3.05"), alteredGroupStats.getMeanExpression()); Assert.assertEquals(new BigDecimal("2.7577164466275352"), alteredGroupStats.getStandardDeviation()); Assert.assertEquals(new BigDecimal("0.8716148250471419"), expressionEnrichment.getpValue()); } @Test public void getGenericAssayEnrichments() throws Exception { geneMolecularProfile.setMolecularAlterationType(MolecularProfile.MolecularAlterationType.GENERIC_ASSAY); List<GenericAssayMolecularAlteration> molecularDataList = new ArrayList<GenericAssayMolecularAlteration>(); GenericAssayMolecularAlteration genericAssayMolecularAlteration1 = new GenericAssayMolecularAlteration(); genericAssayMolecularAlteration1.setGenericAssayStableId(HUGO_GENE_SYMBOL_1); genericAssayMolecularAlteration1.setValues("2,3,2.1,3"); molecularDataList.add(genericAssayMolecularAlteration1); GenericAssayMolecularAlteration genericAssayMolecularAlteration2 = new GenericAssayMolecularAlteration(); genericAssayMolecularAlteration2.setGenericAssayStableId(HUGO_GENE_SYMBOL_2); genericAssayMolecularAlteration2.setValues("1.1,5,2.3,3"); molecularDataList.add(genericAssayMolecularAlteration2); Mockito.when(molecularDataRepository.getGenericAssayMolecularAlterationsIterable(MOLECULAR_PROFILE_ID, null, "SUMMARY")).thenReturn(molecularDataList); Mockito.when(genericAssayService.getGenericAssayMetaByStableIdsAndMolecularIds( Arrays.asList(HUGO_GENE_SYMBOL_1, HUGO_GENE_SYMBOL_2), Arrays.asList(MOLECULAR_PROFILE_ID, MOLECULAR_PROFILE_ID), "SUMMARY")) .thenReturn(Arrays.asList(new GenericAssayMeta(HUGO_GENE_SYMBOL_1), new GenericAssayMeta(HUGO_GENE_SYMBOL_2))); List<GenericAssayEnrichment> result = enrichmentServiceImpl.getGenericAssayEnrichments(MOLECULAR_PROFILE_ID, molecularProfileCaseSets, EnrichmentType.SAMPLE); Assert.assertEquals(2, result.size()); GenericAssayEnrichment genericAssayEnrichment = result.get(0); Assert.assertEquals(HUGO_GENE_SYMBOL_1, genericAssayEnrichment.getStableId()); Assert.assertEquals(2, genericAssayEnrichment.getGroupsStatistics().size()); GroupStatistics unalteredGroupStats = genericAssayEnrichment.getGroupsStatistics().get(0); Assert.assertEquals("unaltered samples", unalteredGroupStats.getName()); Assert.assertEquals(new BigDecimal("2.55"), unalteredGroupStats.getMeanExpression()); Assert.assertEquals(new BigDecimal("0.6363961030678927"), unalteredGroupStats.getStandardDeviation()); GroupStatistics alteredGroupStats = genericAssayEnrichment.getGroupsStatistics().get(1); Assert.assertEquals("altered samples", alteredGroupStats.getName()); Assert.assertEquals(new BigDecimal("2.5"), alteredGroupStats.getMeanExpression()); Assert.assertEquals(new BigDecimal("0.7071067811865476"), alteredGroupStats.getStandardDeviation()); Assert.assertEquals(new BigDecimal("0.9475795430163914"), genericAssayEnrichment.getpValue());<|fim▁hole|> Assert.assertEquals(2, genericAssayEnrichment.getGroupsStatistics().size()); unalteredGroupStats = genericAssayEnrichment.getGroupsStatistics().get(0); Assert.assertEquals("unaltered samples", unalteredGroupStats.getName()); Assert.assertEquals(new BigDecimal("2.65"), unalteredGroupStats.getMeanExpression()); Assert.assertEquals(new BigDecimal("0.4949747468305834"), unalteredGroupStats.getStandardDeviation()); alteredGroupStats = genericAssayEnrichment.getGroupsStatistics().get(1); Assert.assertEquals("altered samples", alteredGroupStats.getName()); Assert.assertEquals(new BigDecimal("3.05"), alteredGroupStats.getMeanExpression()); Assert.assertEquals(new BigDecimal("2.7577164466275352"), alteredGroupStats.getStandardDeviation()); Assert.assertEquals(new BigDecimal("0.8716148250471419"), genericAssayEnrichment.getpValue()); } @Test public void getGenericAssayPatientLevelEnrichments() throws Exception { geneMolecularProfile.setMolecularAlterationType(MolecularProfile.MolecularAlterationType.GENERIC_ASSAY); geneMolecularProfile.setPatientLevel(true); List<GenericAssayMolecularAlteration> molecularDataList = new ArrayList<GenericAssayMolecularAlteration>(); GenericAssayMolecularAlteration genericAssayMolecularAlteration1 = new GenericAssayMolecularAlteration(); genericAssayMolecularAlteration1.setGenericAssayStableId(HUGO_GENE_SYMBOL_1); genericAssayMolecularAlteration1.setValues("2,3,2.1,3,3,3"); molecularDataList.add(genericAssayMolecularAlteration1); GenericAssayMolecularAlteration genericAssayMolecularAlteration2 = new GenericAssayMolecularAlteration(); genericAssayMolecularAlteration2.setGenericAssayStableId(HUGO_GENE_SYMBOL_2); genericAssayMolecularAlteration2.setValues("1.1,5,2.3,3,3"); molecularDataList.add(genericAssayMolecularAlteration2); Mockito.when(molecularDataRepository.getGenericAssayMolecularAlterationsIterable(MOLECULAR_PROFILE_ID, null, "SUMMARY")).thenReturn(molecularDataList); Mockito.when(genericAssayService.getGenericAssayMetaByStableIdsAndMolecularIds( Arrays.asList(HUGO_GENE_SYMBOL_1, HUGO_GENE_SYMBOL_2), Arrays.asList(MOLECULAR_PROFILE_ID, MOLECULAR_PROFILE_ID), "SUMMARY")) .thenReturn(Arrays.asList(new GenericAssayMeta(HUGO_GENE_SYMBOL_1), new GenericAssayMeta(HUGO_GENE_SYMBOL_2))); // add 5th sample which is the second sample of patient 4 Sample sample5 = new Sample(); sample5.setStableId(SAMPLE_ID5); sample5.setInternalId(5); sample5.setCancerStudyIdentifier(STUDY_ID); sample5.setPatientId(4); samples.add(sample5); Mockito.when(sampleService.fetchSamples(Arrays.asList(STUDY_ID, STUDY_ID, STUDY_ID, STUDY_ID, STUDY_ID), Arrays.asList(SAMPLE_ID3, SAMPLE_ID4, SAMPLE_ID5, SAMPLE_ID1, SAMPLE_ID2), "ID")).thenReturn(samples); List<GenericAssayEnrichment> result = enrichmentServiceImpl.getGenericAssayEnrichments(MOLECULAR_PROFILE_ID, molecularProfilePatientLevelCaseSets, EnrichmentType.SAMPLE); Assert.assertEquals(2, result.size()); GenericAssayEnrichment genericAssayEnrichment = result.get(0); Assert.assertEquals(HUGO_GENE_SYMBOL_1, genericAssayEnrichment.getStableId()); Assert.assertEquals(2, genericAssayEnrichment.getGroupsStatistics().size()); GroupStatistics unalteredGroupStats = genericAssayEnrichment.getGroupsStatistics().get(0); Assert.assertEquals("unaltered samples", unalteredGroupStats.getName()); Assert.assertEquals(new BigDecimal("2.55"), unalteredGroupStats.getMeanExpression()); Assert.assertEquals(new BigDecimal("0.6363961030678927"), unalteredGroupStats.getStandardDeviation()); GroupStatistics alteredGroupStats = genericAssayEnrichment.getGroupsStatistics().get(1); Assert.assertEquals("altered samples", alteredGroupStats.getName()); Assert.assertEquals(new BigDecimal("2.5"), alteredGroupStats.getMeanExpression()); Assert.assertEquals(new BigDecimal("0.7071067811865476"), alteredGroupStats.getStandardDeviation()); Assert.assertEquals(new BigDecimal("0.9475795430163914"), genericAssayEnrichment.getpValue()); genericAssayEnrichment = result.get(1); Assert.assertEquals(HUGO_GENE_SYMBOL_2, genericAssayEnrichment.getStableId()); Assert.assertEquals(2, genericAssayEnrichment.getGroupsStatistics().size()); unalteredGroupStats = genericAssayEnrichment.getGroupsStatistics().get(0); Assert.assertEquals("unaltered samples", unalteredGroupStats.getName()); Assert.assertEquals(new BigDecimal("2.65"), unalteredGroupStats.getMeanExpression()); Assert.assertEquals(new BigDecimal("0.4949747468305834"), unalteredGroupStats.getStandardDeviation()); alteredGroupStats = genericAssayEnrichment.getGroupsStatistics().get(1); Assert.assertEquals("altered samples", alteredGroupStats.getName()); Assert.assertEquals(new BigDecimal("3.05"), alteredGroupStats.getMeanExpression()); Assert.assertEquals(new BigDecimal("2.7577164466275352"), alteredGroupStats.getStandardDeviation()); Assert.assertEquals(new BigDecimal("0.8716148250471419"), genericAssayEnrichment.getpValue()); } }<|fim▁end|>
genericAssayEnrichment = result.get(1); Assert.assertEquals(HUGO_GENE_SYMBOL_2, genericAssayEnrichment.getStableId());
<|file_name|>release.spec.ts<|end_file_name|><|fim▁begin|>import { HttpClientTestingModule, HttpTestingController } from '@angular/common/http/testing'; import { TestBed } from '@angular/core/testing';<|fim▁hole|>import { ReleaseProvider } from './release'; describe('Release Provider', () => { let releaseProvider: ReleaseProvider; let logger: Logger; let httpMock: HttpTestingController; const currentAppVersion = 'v1.1.1'; const latestAppVersion = 'v2.2.2'; let loggerSpy; class AppProviderMock { public info; constructor() { this.info = { version: currentAppVersion }; } } beforeEach(() => { TestBed.configureTestingModule({ imports: [HttpClientTestingModule], providers: [ ReleaseProvider, Logger, { provide: AppProvider, useClass: AppProviderMock } ] }); releaseProvider = TestBed.get(ReleaseProvider); logger = TestBed.get(Logger); loggerSpy = spyOn(logger, 'error'); httpMock = TestBed.get(HttpTestingController); }); afterEach(() => { httpMock.verify(); }); it('should get successfully the current app version', () => { const appVersion = releaseProvider.getCurrentAppVersion(); expect(appVersion).toBeDefined(); expect(appVersion).toEqual(currentAppVersion); }); it('should get successfully the latest app version', () => { releaseProvider.getLatestAppVersion().then((data: { version: string }) => { const version = data.version; expect(version).toBeDefined(); expect(version).toEqual(latestAppVersion); }); const bwsReq = httpMock.expectOne( 'https://bws.bitpay.com/bws/api/latest-version' ); expect(bwsReq.request.method).toEqual('GET'); bwsReq.flush({ version: latestAppVersion }); }); it('should check unsuccessfully the latest app version format', () => { const result = releaseProvider.newReleaseAvailable('V..3.3.3'); expect(result).toBeFalsy; expect(loggerSpy).toHaveBeenCalledWith( 'Cannot verify the format of version tag. latestVersion V..3.3.3 - currentVersion v1.1.1' ); }); it('should check unsuccessfully the current app version format', () => { spyOn(releaseProvider, 'getCurrentAppVersion').and.returnValue('V..1.1.1'); const result = releaseProvider.newReleaseAvailable(latestAppVersion); expect(result).toBeFalsy; expect(loggerSpy).toHaveBeenCalledWith( 'Cannot verify the format of version tag. latestVersion v2.2.2 - currentVersion V..1.1.1' ); }); it('should be a new version available', () => { const result = releaseProvider.newReleaseAvailable(latestAppVersion); expect(result).toBeTruthy; }); it('should be a new major version available', () => { const majorAppVersion = '2.0.0'; const result = releaseProvider.newReleaseAvailable(majorAppVersion); expect(result).toBeTruthy; }); it('should be a new minor version available', () => { const minorAppVersion = '1.2.0'; const result = releaseProvider.newReleaseAvailable(minorAppVersion); expect(result).toBeFalsy; }); it('should be a new patch version available', () => { const patchAppVersion = '1.1.2'; const result = releaseProvider.newReleaseAvailable(patchAppVersion); expect(result).toBeFalsy; }); });<|fim▁end|>
import { AppProvider } from '../../providers/app/app'; import { Logger } from '../../providers/logger/logger';
<|file_name|>ALPHA3.py<|end_file_name|><|fim▁begin|># Copyright (c) 2003-2010, Berend-Jan "SkyLined" Wever <[email protected]> # Project homepage: http://code.google.com/p/alpha3/ # All rights reserved. See COPYRIGHT.txt for details. import charsets, encode, io import x86, x64, test import os, re, sys #_______________________________________________________________________________________________________________________ # # ,sSSs,,s, ,sSSSs, : ALPHA3 - Alphanumeric shellcode encoder. # dS" Y$P" YS" ,SY : Version 1.0 alpha # iS' dY ssS" : Copyright (C) 2003-2009 by SkyLined. # YS, dSb SP, ;SP : <[email protected]> # `"YSS'"S' "YSSSY" : http://skypher.com/wiki/index.php/ALPHA3 #_______________________________________________________________________________________________________________________ # _settings = { "architecture": None, "character encoding": None, "case": None } _default_settings = { "architecture": "x86", "character encoding": "ascii", "case": "mixedcase" } _valid_settings = { "case": charsets.valid_character_casings, "character encoding": charsets.valid_character_encodings, "architecture": ["x86", "x64"] } _arguments = { "base address": None } _switches = { "input": None, "output": None } _flags = { "verbose": 0, "help": 0, "test": 0, "int3": 0 } encoders = []; import print_functions; from print_functions import * def ParseCommandLine(): global _settings, _arguments, _switches, _flags; # Parse settings, arguments, switches and flags from the command line: if len(sys.argv) == 1: _flags["help"] = 1; else: for i in range(1, len(sys.argv)): arg = sys.argv[i]; if arg[:2] == "--": end_switch_name = arg.find("="); if end_switch_name != -1: switch_name = arg[2:end_switch_name]; switch_value = arg[end_switch_name + 1:]; for valid_switch_name in _switches: if switch_name == valid_switch_name: _switches[switch_name] = switch_value; break; else: print >>sys.stderr, "Unknown switch '%s'!" % arg[2:]; return False; else: flag_name = arg[2:] for valid_flag_name in _flags: if flag_name == valid_flag_name: _flags[flag_name] += 1; break else: print >>sys.stderr, "Unknown flag '%s'!" % valid_flag_name; return False; else: for setting_name in _valid_settings: if arg in _valid_settings[setting_name]: _settings[setting_name] = arg; break; else: for argument_name in _arguments: if _arguments[argument_name] == None: _arguments[argument_name] = arg; break; else: print >>sys.stderr, "Unknown _arguments: %s." % repr(arg); return False; return True; def PrintLogo(): PrintInfo([ (None, "____________________________________________________________________________"), (None, """ ,sSSs,,s, ,sSSSs, ALPHA3 - Alphanumeric shellcode encoder."""), (None, """ dS" Y$P" YS" ,SY Version 1.0 alpha"""), (None, """ iS' dY ssS" Copyright (C) 2003-2009 by SkyLined."""), (None, """ YS, dSb SP, ;SP <[email protected]>"""), (None, """ `"YSS'"S' "YSSSY" http://skypher.com/wiki/index.php/ALPHA3"""), (None, "____________________________________________________________________________"), ]); def PrintHelp(): PrintInfo([ (None, "[Usage]"), (" ", "ALPHA3.py [ encoder settings | I/O settings | flags ]"), (None, ""), (None, "[Encoder setting]"), (" architecture ", "Which processor architecture to target (x86, x64)."), (" character encoding ", "Which character encoding to use (ascii, cp437, latin-1, utf-16)."), (" casing ", "Which character casing to use (uppercase, mixedcase, lowercase)."), (" base address ", "How to determine the base address in the decoder code (each encoder has its own set of " "valid values)."), (None, ""), (None, "[I/O Setting]"), (" --input=\"file\"", "Path to a file that contains the shellcode to be encoded (Optional, default is to read " "input from stdin)."), (" --output=\"file\"", "Path to a file that will receive the encoded shellcode (Optional, default is to write " "output to stdout)."), (None, ""), (None, "[Flags]"), (" --verbose", "Display verbose information while executing. Use this flag twice to output progress " "during encoding."), (" --help", "Display this message and quit."), (" --test", "Run all available tests for all encoders. (Useful while developing/testing new " "encoders)."), (" --int3", "Trigger a breakpoint before executing the result of a test. (Use in combination with " "--test)."), (None, ""), (None, "[Notes]"), (" ", "You can provide encoder settings in combination with the --help and --test switches to filter which " "encoders you get help information for and which get tested, respectively.") ]); def Main(): # Print header if _flags["help"]: # Print the main help body before displaying encoder specific help: PrintLogo(); PrintWrappedLine(); PrintHelp(); PrintWrappedLine(); encoding = False; elif not _flags["test"]: if _flags["verbose"]: PrintLogo(); encoding = True; else: if _flags["verbose"]: PrintLogo(); PrintWrappedLine(); # We're testing our encoders encoding = False; # Print the _settings provided by the user and if we're encoding shellcode, set and print the default _settings # for anything not provided: if _flags["verbose"]: for name in _settings: if _settings[name] is not None: PrintInfo([(name, _settings[name])]); elif encoding: _settings[name] = _default_settings[name]; PrintInfo([(name, _settings[name] + " (default)")]); for name in _arguments: if _arguments[name] is not None: PrintInfo([(name, _arguments[name])]); # If the user wants to encode shellcode, it needs to be read from stdin or a file: if encoding: if _switches["input"] is not None: shellcode = io.ReadFile(_switches["input"]); else: shellcode = sys.stdin.read(); # Scan all encoders to see which match the given _settings/_arguments and take action: results = []; errors = False; help_results = {}; at_least_one_encoder_found = False; for encoder_settings in encoders: for name in _settings: if not name in encoder_settings: raise AssertionError("One of the encoders is missing the '%s' setting: %s" % (name, encoder_settings["name"])); if _settings[name] != None and _settings[name] != encoder_settings[name]: # This _settings is specified but does not match this encoders _settings: skip the encoder. break; else: # All _settings match # Check "base address" argument: if (_arguments["base address"] is None or re.match(encoder_settings["base address"], _arguments["base address"], re.IGNORECASE)): at_least_one_encoder_found = True; if _flags["test"]: problems = test.TestEncoder(encoder_settings, _arguments["base address"], _flags["int3"] > 0); if problems is not None: # None => No test was found for the given base address at_least_one_encoder_found = True; results.extend(problems); errors = True; elif _flags["help"]: encoder_settings_string = "%s %s %s" % (encoder_settings["architecture"], encoder_settings["character encoding"], encoder_settings["case"]); if encoder_settings_string not in help_results: help_results[encoder_settings_string] = []; help_results[encoder_settings_string].append(( encoder_settings["name"], " ".join(encoder_settings["base address samples"]))); else: encoder_function = encoder_settings["function"]; if "function args" in encoder_settings: encoder_function_args = encoder_settings["function args"]; else: encoder_function_args = {}; if _switches["output"] is not None: io.WriteFile(_settings["output file"], result); else: encoded_shellcode = encoder_function(_arguments["base address"], shellcode, *encoder_function_args); results += test.CheckEncodedShellcode(encoded_shellcode, encoder_settings); sys.stdout.write(encoded_shellcode); if _flags["help"]: if not help_results: PrintWrappedLine("No encoder found that can encode using the given settings and arguments."); errors = True; else: PrintWrappedLine("Valid base address examples for each encoder, ordered by encoder settings, are:"); help_results_encoder_settings = help_results.keys(); help_results_encoder_settings.sort(); for encoder_settings_string in help_results_encoder_settings: PrintWrappedLine(""); PrintWrappedLine("[%s]" % encoder_settings_string); for encoder_name, valid_base_address_samples in help_results[encoder_settings_string]: PrintInfo([(' ' + encoder_name, valid_base_address_samples)]); else: if not at_least_one_encoder_found: results.append("No encoder exists for the given settings."); errors = True; if results: PrintWrappedLine(""); PrintWrappedLine("The following problems were found:"); for result in results: PrintWrappedLine(result); return not errors; def toInt(s): if s[:2] == "0x": return int(s[2:], 16); return int(s); <|fim▁hole|> encoders.extend(x64.encoders); success = ParseCommandLine(); if success: print_functions.g_output_verbosity_level = _flags["verbose"]; success = Main(); exit_code = {True:0, False:1}[success]; exit(exit_code);<|fim▁end|>
if __name__ == "__main__": encoders.extend(x86.encoders);
<|file_name|>ReplaceNoCase.java<|end_file_name|><|fim▁begin|>/** * * Copyright (c) 2014, the Railo Company Ltd. All rights reserved. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library. If not, see <http://www.gnu.org/licenses/>. * **/ /** * Implements the CFML Function replacenocase */ package lucee.runtime.functions.string; import lucee.commons.lang.StringUtil; import lucee.runtime.PageContext; import lucee.runtime.exp.FunctionException; import lucee.runtime.exp.PageException; import lucee.runtime.ext.function.BIF;<|fim▁hole|>public final class ReplaceNoCase extends BIF { private static final long serialVersionUID = 4991516019845001690L; public static String call(PageContext pc, String str, String sub1, String sub2) throws FunctionException { return _call(pc, str, sub1, sub2, true); } public static String call(PageContext pc, String str, String sub1, String sub2, String scope) throws FunctionException { return _call(pc, str, sub1, sub2, !scope.equalsIgnoreCase("all")); } public static String call(PageContext pc, String input, Object find, String repl, String scope) throws PageException { return _call(pc, input, find, repl, !scope.equalsIgnoreCase("all")); } public static String call(PageContext pc, String input, Object find, String repl) throws PageException { return _call(pc, input, find, repl, true); } private static String _call(PageContext pc, String str, String sub1, String sub2, boolean onlyFirst) throws FunctionException { if (StringUtil.isEmpty(sub1)) throw new FunctionException(pc, "ReplaceNoCase", 2, "sub1", "The string length must be greater than 0"); return StringUtil.replace(str, sub1, sub2, onlyFirst, true); } private static String _call(PageContext pc, String input, Object find, String repl, boolean onlyFirst) throws PageException { if (!Decision.isSimpleValue(find)) throw new FunctionException(pc, "ReplaceNoCase", 2, "sub1", "When passing three parameters or more, the second parameter must be a String."); return _call(pc, input, Caster.toString(find), repl, onlyFirst); } public static String call(PageContext pc, String input, Object struct) throws PageException { if (!Decision.isStruct(struct)) throw new FunctionException(pc, "ReplaceNoCase", 2, "sub1", "When passing only two parameters, the second parameter must be a Struct."); return StringUtil.replaceStruct(input, Caster.toStruct(struct), true); } @Override public Object invoke(PageContext pc, Object[] args) throws PageException { if (args.length == 2) return call(pc, Caster.toString(args[0]), args[1]); if (args.length == 3) return call(pc, Caster.toString(args[0]), args[1], Caster.toString(args[2])); if (args.length == 4) return call(pc, Caster.toString(args[0]), args[1], Caster.toString(args[2]), Caster.toString(args[3])); throw new FunctionException(pc, "Replace", 2, 4, args.length); } }<|fim▁end|>
import lucee.runtime.op.Caster; import lucee.runtime.op.Decision;
<|file_name|>adt.rs<|end_file_name|><|fim▁begin|>// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. /*! * # Representation of Algebraic Data Types * * This module determines how to represent enums, structs, and tuples * based on their monomorphized types; it is responsible both for * choosing a representation and translating basic operations on * values of those types. (Note: exporting the representations for * debuggers is handled in debuginfo.rs, not here.) * * Note that the interface treats everything as a general case of an * enum, so structs/tuples/etc. have one pseudo-variant with * discriminant 0; i.e., as if they were a univariant enum. * * Having everything in one place will enable improvements to data * structure representation; possibilities include: * * - User-specified alignment (e.g., cacheline-aligning parts of * concurrently accessed data structures); LLVM can't represent this * directly, so we'd have to insert padding fields in any structure * that might contain one and adjust GEP indices accordingly. See * issue #4578. * * - Store nested enums' discriminants in the same word. Rather, if * some variants start with enums, and those enums representations * have unused alignment padding between discriminant and body, the * outer enum's discriminant can be stored there and those variants * can start at offset 0. Kind of fancy, and might need work to * make copies of the inner enum type cooperate, but it could help * with `Option` or `Result` wrapped around another enum. * * - Tagged pointers would be neat, but given that any type can be * used unboxed and any field can have pointers (including mutable) * taken to it, implementing them for Rust seems difficult. */ #![allow(unsigned_negate)] use std::collections::Map; use std::num::Int; use std::rc::Rc; use llvm::{ValueRef, True, IntEQ, IntNE}; use middle::subst; use middle::subst::Subst; use middle::trans::_match; use middle::trans::build::*; use middle::trans::cleanup; use middle::trans::cleanup::CleanupMethods; use middle::trans::common::*; use middle::trans::datum; use middle::trans::machine; use middle::trans::type_::Type; use middle::trans::type_of; use middle::ty; use middle::ty::Disr; use syntax::abi::{X86, X86_64, Arm, Mips, Mipsel}; use syntax::ast; use syntax::attr; use syntax::attr::IntType; use util::ppaux::ty_to_string; type Hint = attr::ReprAttr; /// Representations. #[deriving(Eq, PartialEq, Show)] pub enum Repr { /// C-like enums; basically an int. CEnum(IntType, Disr, Disr), // discriminant range (signedness based on the IntType) /** * Single-case variants, and structs/tuples/records. * * Structs with destructors need a dynamic destroyedness flag to * avoid running the destructor too many times; this is included * in the `Struct` if present. */ Univariant(Struct, bool), /** * General-case enums: for each case there is a struct, and they * all start with a field for the discriminant. * * Types with destructors need a dynamic destroyedness flag to * avoid running the destructor too many times; the last argument * indicates whether such a flag is present. */ General(IntType, Vec<Struct>, bool), /** * Two cases distinguished by a nullable pointer: the case with discriminant * `nndiscr` must have single field which is known to be nonnull due to its type. * The other case is known to be zero sized. Hence we represent the enum * as simply a nullable pointer: if not null it indicates the `nndiscr` variant, * otherwise it indicates the other case. */ RawNullablePointer { pub nndiscr: Disr, pub nnty: ty::t, pub nullfields: Vec<ty::t> }, /** * Two cases distinguished by a nullable pointer: the case with discriminant * `nndiscr` is represented by the struct `nonnull`, where the `ptrfield`th * field is known to be nonnull due to its type; if that field is null, then * it represents the other case, which is inhabited by at most one value * (and all other fields are undefined/unused). * * For example, `std::option::Option` instantiated at a safe pointer type * is represented such that `None` is a null pointer and `Some` is the * identity function. */ StructWrappedNullablePointer { pub nonnull: Struct, pub nndiscr: Disr, pub ptrfield: PointerField, pub nullfields: Vec<ty::t>, } } /// For structs, and struct-like parts of anything fancier. #[deriving(Eq, PartialEq, Show)] pub struct Struct { // If the struct is DST, then the size and alignment do not take into // account the unsized fields of the struct. pub size: u64, pub align: u32, pub sized: bool, pub packed: bool, pub fields: Vec<ty::t> } /** * Convenience for `represent_type`. There should probably be more or * these, for places in trans where the `ty::t` isn't directly * available. */ pub fn represent_node(bcx: Block, node: ast::NodeId) -> Rc<Repr> { represent_type(bcx.ccx(), node_id_type(bcx, node)) } /// Decides how to represent a given type. pub fn represent_type(cx: &CrateContext, t: ty::t) -> Rc<Repr> { debug!("Representing: {}", ty_to_string(cx.tcx(), t)); match cx.adt_reprs().borrow().find(&t) { Some(repr) => return repr.clone(), None => {} } let repr = Rc::new(represent_type_uncached(cx, t)); debug!("Represented as: {}", repr) cx.adt_reprs().borrow_mut().insert(t, repr.clone()); repr } fn represent_type_uncached(cx: &CrateContext, t: ty::t) -> Repr { match ty::get(t).sty { ty::ty_tup(ref elems) => { return Univariant(mk_struct(cx, elems.as_slice(), false, t), false) } ty::ty_struct(def_id, ref substs) => { let fields = ty::lookup_struct_fields(cx.tcx(), def_id); let mut ftys = fields.iter().map(|field| { ty::lookup_field_type(cx.tcx(), def_id, field.id, substs) }).collect::<Vec<_>>(); let packed = ty::lookup_packed(cx.tcx(), def_id); let dtor = ty::ty_dtor(cx.tcx(), def_id).has_drop_flag(); if dtor { ftys.push(ty::mk_bool()); } return Univariant(mk_struct(cx, ftys.as_slice(), packed, t), dtor) } ty::ty_unboxed_closure(def_id, _) => { let upvars = ty::unboxed_closure_upvars(cx.tcx(), def_id); let upvar_types = upvars.iter().map(|u| u.ty).collect::<Vec<_>>(); return Univariant(mk_struct(cx, upvar_types.as_slice(), false, t), false) } ty::ty_enum(def_id, ref substs) => { let cases = get_cases(cx.tcx(), def_id, substs); let hint = *ty::lookup_repr_hints(cx.tcx(), def_id).as_slice().get(0) .unwrap_or(&attr::ReprAny); let dtor = ty::ty_dtor(cx.tcx(), def_id).has_drop_flag(); if cases.len() == 0 { // Uninhabitable; represent as unit // (Typechecking will reject discriminant-sizing attrs.) assert_eq!(hint, attr::ReprAny); let ftys = if dtor { vec!(ty::mk_bool()) } else { vec!() }; return Univariant(mk_struct(cx, ftys.as_slice(), false, t), dtor); } if !dtor && cases.iter().all(|c| c.tys.len() == 0) { // All bodies empty -> intlike let discrs: Vec<u64> = cases.iter().map(|c| c.discr).collect(); let bounds = IntBounds { ulo: *discrs.iter().min().unwrap(), uhi: *discrs.iter().max().unwrap(), slo: discrs.iter().map(|n| *n as i64).min().unwrap(), shi: discrs.iter().map(|n| *n as i64).max().unwrap() }; return mk_cenum(cx, hint, &bounds); } // Since there's at least one // non-empty body, explicit discriminants should have // been rejected by a checker before this point. if !cases.iter().enumerate().all(|(i,c)| c.discr == (i as Disr)) { cx.sess().bug(format!("non-C-like enum {} with specified \ discriminants", ty::item_path_str(cx.tcx(), def_id)).as_slice()); } if cases.len() == 1 { // Equivalent to a struct/tuple/newtype. // (Typechecking will reject discriminant-sizing attrs.) assert_eq!(hint, attr::ReprAny); let mut ftys = cases.get(0).tys.clone(); if dtor { ftys.push(ty::mk_bool()); } return Univariant(mk_struct(cx, ftys.as_slice(), false, t), dtor); } if !dtor && cases.len() == 2 && hint == attr::ReprAny { // Nullable pointer optimization let mut discr = 0; while discr < 2 { if cases.get(1 - discr).is_zerolen(cx, t) { let st = mk_struct(cx, cases.get(discr).tys.as_slice(), false, t); match cases.get(discr).find_ptr() { Some(ThinPointer(_)) if st.fields.len() == 1 => { return RawNullablePointer { nndiscr: discr as Disr, nnty: *st.fields.get(0), nullfields: cases.get(1 - discr).tys.clone() }; } Some(ptrfield) => { return StructWrappedNullablePointer { nndiscr: discr as Disr, nonnull: st, ptrfield: ptrfield, nullfields: cases.get(1 - discr).tys.clone() }; } None => { } } } discr += 1; } } // The general case. assert!((cases.len() - 1) as i64 >= 0); let bounds = IntBounds { ulo: 0, uhi: (cases.len() - 1) as u64, slo: 0, shi: (cases.len() - 1) as i64 }; let ity = range_to_inttype(cx, hint, &bounds); let fields : Vec<_> = cases.iter().map(|c| { let mut ftys = vec!(ty_of_inttype(ity)).append(c.tys.as_slice()); if dtor { ftys.push(ty::mk_bool()); } mk_struct(cx, ftys.as_slice(), false, t) }).collect(); ensure_enum_fits_in_address_space(cx, ity, fields.as_slice(), t); General(ity, fields, dtor) } _ => cx.sess().bug(format!("adt::represent_type called on non-ADT type: {}", ty_to_string(cx.tcx(), t)).as_slice()) } } // this should probably all be in ty struct Case { discr: Disr, tys: Vec<ty::t> } #[deriving(Eq, PartialEq, Show)] pub enum PointerField { ThinPointer(uint), FatPointer(uint, uint) } impl Case { fn is_zerolen(&self, cx: &CrateContext, scapegoat: ty::t) -> bool { mk_struct(cx, self.tys.as_slice(), false, scapegoat).size == 0 } fn find_ptr(&self) -> Option<PointerField> { use back::abi::{fn_field_code, slice_elt_base, trt_field_box}; for (i, &ty) in self.tys.iter().enumerate() { match ty::get(ty).sty { // &T/&mut T could either be a thin or fat pointer depending on T ty::ty_rptr(_, ty::mt { ty, .. }) => match ty::get(ty).sty { // &[T] and &str are a pointer and length pair ty::ty_vec(_, None) | ty::ty_str => return Some(FatPointer(i, slice_elt_base)), // &Trait/&mut Trait are a pair of pointers: the actual object and a vtable ty::ty_trait(..) => return Some(FatPointer(i, trt_field_box)), // Any other &T/&mut T is just a pointer _ => return Some(ThinPointer(i)) }, // Box<T> could either be a thin or fat pointer depending on T ty::ty_uniq(t) => match ty::get(t).sty { ty::ty_vec(_, None) => return Some(FatPointer(i, slice_elt_base)), // Box<Trait> is a pair of pointers: the actual object and a vtable ty::ty_trait(..) => return Some(FatPointer(i, trt_field_box)), // Any other Box<T> is just a pointer _ => return Some(ThinPointer(i)) }, // Functions are just pointers ty::ty_bare_fn(..) => return Some(ThinPointer(i)), // Closures are a pair of pointers: the code and environment ty::ty_closure(..) => return Some(FatPointer(i, fn_field_code)), // Anything else is not a pointer _ => continue } } None } } fn get_cases(tcx: &ty::ctxt, def_id: ast::DefId, substs: &subst::Substs) -> Vec<Case> { ty::enum_variants(tcx, def_id).iter().map(|vi| { let arg_tys = vi.args.iter().map(|&raw_ty| { raw_ty.subst(tcx, substs) }).collect(); Case { discr: vi.disr_val, tys: arg_tys } }).collect() } fn mk_struct(cx: &CrateContext, tys: &[ty::t], packed: bool, scapegoat: ty::t) -> Struct { let sized = tys.iter().all(|&ty| ty::type_is_sized(cx.tcx(), ty)); let lltys : Vec<Type> = if sized { tys.iter() .map(|&ty| type_of::sizing_type_of(cx, ty)).collect() } else { tys.iter().filter(|&ty| ty::type_is_sized(cx.tcx(), *ty)) .map(|&ty| type_of::sizing_type_of(cx, ty)).collect() }; ensure_struct_fits_in_address_space(cx, lltys.as_slice(), packed, scapegoat); let llty_rec = Type::struct_(cx, lltys.as_slice(), packed); Struct { size: machine::llsize_of_alloc(cx, llty_rec), align: machine::llalign_of_min(cx, llty_rec), sized: sized, packed: packed, fields: Vec::from_slice(tys), } } #[deriving(Show)] struct IntBounds { slo: i64, shi: i64, ulo: u64, uhi: u64 } fn mk_cenum(cx: &CrateContext, hint: Hint, bounds: &IntBounds) -> Repr { let it = range_to_inttype(cx, hint, bounds); match it { attr::SignedInt(_) => CEnum(it, bounds.slo as Disr, bounds.shi as Disr), attr::UnsignedInt(_) => CEnum(it, bounds.ulo, bounds.uhi) } } fn range_to_inttype(cx: &CrateContext, hint: Hint, bounds: &IntBounds) -> IntType { debug!("range_to_inttype: {} {}", hint, bounds); // Lists of sizes to try. u64 is always allowed as a fallback. #[allow(non_uppercase_statics)] static choose_shortest: &'static[IntType] = &[ attr::UnsignedInt(ast::TyU8), attr::SignedInt(ast::TyI8), attr::UnsignedInt(ast::TyU16), attr::SignedInt(ast::TyI16), attr::UnsignedInt(ast::TyU32), attr::SignedInt(ast::TyI32)]; #[allow(non_uppercase_statics)] static at_least_32: &'static[IntType] = &[ attr::UnsignedInt(ast::TyU32), attr::SignedInt(ast::TyI32)]; let attempts; match hint { attr::ReprInt(span, ity) => { if !bounds_usable(cx, ity, bounds) { cx.sess().span_bug(span, "representation hint insufficient for discriminant range") } return ity; } attr::ReprExtern => { attempts = match cx.sess().targ_cfg.arch { X86 | X86_64 => at_least_32, // WARNING: the ARM EABI has two variants; the one corresponding to `at_least_32` // appears to be used on Linux and NetBSD, but some systems may use the variant // corresponding to `choose_shortest`. However, we don't run on those yet...? Arm => at_least_32, Mips => at_least_32, Mipsel => at_least_32, } } attr::ReprAny => { attempts = choose_shortest; }, attr::ReprPacked => { cx.tcx().sess.bug("range_to_inttype: found ReprPacked on an enum"); } } for &ity in attempts.iter() { if bounds_usable(cx, ity, bounds) { return ity; } } return attr::UnsignedInt(ast::TyU64); } pub fn ll_inttype(cx: &CrateContext, ity: IntType) -> Type { match ity { attr::SignedInt(t) => Type::int_from_ty(cx, t), attr::UnsignedInt(t) => Type::uint_from_ty(cx, t) } } fn bounds_usable(cx: &CrateContext, ity: IntType, bounds: &IntBounds) -> bool { debug!("bounds_usable: {} {}", ity, bounds); match ity { attr::SignedInt(_) => { let lllo = C_integral(ll_inttype(cx, ity), bounds.slo as u64, true); let llhi = C_integral(ll_inttype(cx, ity), bounds.shi as u64, true); bounds.slo == const_to_int(lllo) as i64 && bounds.shi == const_to_int(llhi) as i64 } attr::UnsignedInt(_) => { let lllo = C_integral(ll_inttype(cx, ity), bounds.ulo, false); let llhi = C_integral(ll_inttype(cx, ity), bounds.uhi, false); bounds.ulo == const_to_uint(lllo) as u64 && bounds.uhi == const_to_uint(llhi) as u64 } } } pub fn ty_of_inttype(ity: IntType) -> ty::t { match ity { attr::SignedInt(t) => ty::mk_mach_int(t), attr::UnsignedInt(t) => ty::mk_mach_uint(t) } } // LLVM doesn't like types that don't fit in the address space fn ensure_struct_fits_in_address_space(ccx: &CrateContext, fields: &[Type], packed: bool, scapegoat: ty::t) { let mut offset = 0; for &llty in fields.iter() { // Invariant: offset < ccx.max_obj_size() <= 1<<61 if !packed { let type_align = machine::llalign_of_min(ccx, llty); offset = roundup(offset, type_align); } // type_align is a power-of-2, so still offset < ccx.max_obj_size() // llsize_of_alloc(ccx, llty) is also less than ccx.max_obj_size() // so the sum is less than 1<<62 (and therefore can't overflow). offset += machine::llsize_of_alloc(ccx, llty); if offset >= ccx.max_obj_size() { ccx.report_overbig_object(scapegoat); } } } fn union_size_and_align(sts: &[Struct]) -> (machine::llsize, machine::llalign) { let size = sts.iter().map(|st| st.size).max().unwrap(); let most_aligned = sts.iter().max_by(|st| st.align).unwrap(); (size, most_aligned.align) } fn ensure_enum_fits_in_address_space(ccx: &CrateContext, discr: IntType, fields: &[Struct], scapegoat: ty::t) { let discr_size = machine::llsize_of_alloc(ccx, ll_inttype(ccx, discr)); let (field_size, field_align) = union_size_and_align(fields); // field_align < 1<<32, discr_size <= 8, field_size < MAX_OBJ_SIZE <= 1<<61 // so the sum is less than 1<<62 (and can't overflow). let total_size = roundup(discr_size, field_align) + field_size; if total_size >= ccx.max_obj_size() { ccx.report_overbig_object(scapegoat); } } /** * LLVM-level types are a little complicated. * * C-like enums need to be actual ints, not wrapped in a struct, * because that changes the ABI on some platforms (see issue #10308). * * For nominal types, in some cases, we need to use LLVM named structs * and fill in the actual contents in a second pass to prevent * unbounded recursion; see also the comments in `trans::type_of`. */ pub fn type_of(cx: &CrateContext, r: &Repr) -> Type { generic_type_of(cx, r, None, false, false) } // Pass dst=true if the type you are passing is a DST. Yes, we could figure // this out, but if you call this on an unsized type without realising it, you // are going to get the wrong type (it will not include the unsized parts of it). pub fn sizing_type_of(cx: &CrateContext, r: &Repr, dst: bool) -> Type { generic_type_of(cx, r, None, true, dst) } pub fn incomplete_type_of(cx: &CrateContext, r: &Repr, name: &str) -> Type { generic_type_of(cx, r, Some(name), false, false) } pub fn finish_type_of(cx: &CrateContext, r: &Repr, llty: &mut Type) { match *r { CEnum(..) | General(..) | RawNullablePointer { .. } => { } Univariant(ref st, _) | StructWrappedNullablePointer { nonnull: ref st, .. } => llty.set_struct_body(struct_llfields(cx, st, false, false).as_slice(), st.packed) } } fn generic_type_of(cx: &CrateContext, r: &Repr, name: Option<&str>, sizing: bool, dst: bool) -> Type { match *r { CEnum(ity, _, _) => ll_inttype(cx, ity), RawNullablePointer { nnty, .. } => type_of::sizing_type_of(cx, nnty), Univariant(ref st, _) | StructWrappedNullablePointer { nonnull: ref st, .. } => { match name { None => { Type::struct_(cx, struct_llfields(cx, st, sizing, dst).as_slice(), st.packed) } Some(name) => { assert_eq!(sizing, false); Type::named_struct(cx, name) } } } General(ity, ref sts, _) => { // We need a representation that has: // * The alignment of the most-aligned field // * The size of the largest variant (rounded up to that alignment) // * No alignment padding anywhere any variant has actual data // (currently matters only for enums small enough to be immediate) // * The discriminant in an obvious place. // // So we start with the discriminant, pad it up to the alignment with // more of its own type, then use alignment-sized ints to get the rest // of the size. // // FIXME #10604: this breaks when vector types are present. let (size, align) = union_size_and_align(sts.as_slice()); let align_s = align as u64; let discr_ty = ll_inttype(cx, ity); let discr_size = machine::llsize_of_alloc(cx, discr_ty); let align_units = (size + align_s - 1) / align_s - 1; let pad_ty = match align_s { 1 => Type::array(&Type::i8(cx), align_units), 2 => Type::array(&Type::i16(cx), align_units), 4 => Type::array(&Type::i32(cx), align_units), 8 if machine::llalign_of_min(cx, Type::i64(cx)) == 8 => Type::array(&Type::i64(cx), align_units), a if a.count_ones() == 1 => Type::array(&Type::vector(&Type::i32(cx), a / 4), align_units), _ => fail!("unsupported enum alignment: {}", align) }; assert_eq!(machine::llalign_of_min(cx, pad_ty), align); assert_eq!(align_s % discr_size, 0); let fields = vec!(discr_ty, Type::array(&discr_ty, align_s / discr_size - 1), pad_ty); match name { None => Type::struct_(cx, fields.as_slice(), false), Some(name) => { let mut llty = Type::named_struct(cx, name); llty.set_struct_body(fields.as_slice(), false); llty } } } } } fn struct_llfields(cx: &CrateContext, st: &Struct, sizing: bool, dst: bool) -> Vec<Type> { if sizing { st.fields.iter().filter(|&ty| !dst || ty::type_is_sized(cx.tcx(), *ty)) .map(|&ty| type_of::sizing_type_of(cx, ty)).collect() } else { st.fields.iter().map(|&ty| type_of::type_of(cx, ty)).collect() } } /** * Obtain a representation of the discriminant sufficient to translate * destructuring; this may or may not involve the actual discriminant. * * This should ideally be less tightly tied to `_match`. */ pub fn trans_switch(bcx: Block, r: &Repr, scrutinee: ValueRef) -> (_match::BranchKind, Option<ValueRef>) {<|fim▁hole|> CEnum(..) | General(..) | RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => { (_match::Switch, Some(trans_get_discr(bcx, r, scrutinee, None))) } Univariant(..) => { (_match::Single, None) } } } /// Obtain the actual discriminant of a value. pub fn trans_get_discr(bcx: Block, r: &Repr, scrutinee: ValueRef, cast_to: Option<Type>) -> ValueRef { let signed; let val; match *r { CEnum(ity, min, max) => { val = load_discr(bcx, ity, scrutinee, min, max); signed = ity.is_signed(); } General(ity, ref cases, _) => { let ptr = GEPi(bcx, scrutinee, [0, 0]); val = load_discr(bcx, ity, ptr, 0, (cases.len() - 1) as Disr); signed = ity.is_signed(); } Univariant(..) => { val = C_u8(bcx.ccx(), 0); signed = false; } RawNullablePointer { nndiscr, nnty, .. } => { let cmp = if nndiscr == 0 { IntEQ } else { IntNE }; let llptrty = type_of::sizing_type_of(bcx.ccx(), nnty); val = ICmp(bcx, cmp, Load(bcx, scrutinee), C_null(llptrty)); signed = false; } StructWrappedNullablePointer { nndiscr, ptrfield, .. } => { val = struct_wrapped_nullable_bitdiscr(bcx, nndiscr, ptrfield, scrutinee); signed = false; } } match cast_to { None => val, Some(llty) => if signed { SExt(bcx, val, llty) } else { ZExt(bcx, val, llty) } } } fn struct_wrapped_nullable_bitdiscr(bcx: Block, nndiscr: Disr, ptrfield: PointerField, scrutinee: ValueRef) -> ValueRef { let llptrptr = match ptrfield { ThinPointer(field) => GEPi(bcx, scrutinee, [0, field]), FatPointer(field, pair) => GEPi(bcx, scrutinee, [0, field, pair]) }; let llptr = Load(bcx, llptrptr); let cmp = if nndiscr == 0 { IntEQ } else { IntNE }; ICmp(bcx, cmp, llptr, C_null(val_ty(llptr))) } /// Helper for cases where the discriminant is simply loaded. fn load_discr(bcx: Block, ity: IntType, ptr: ValueRef, min: Disr, max: Disr) -> ValueRef { let llty = ll_inttype(bcx.ccx(), ity); assert_eq!(val_ty(ptr), llty.ptr_to()); let bits = machine::llbitsize_of_real(bcx.ccx(), llty); assert!(bits <= 64); let bits = bits as uint; let mask = (-1u64 >> (64 - bits)) as Disr; if (max + 1) & mask == min & mask { // i.e., if the range is everything. The lo==hi case would be // rejected by the LLVM verifier (it would mean either an // empty set, which is impossible, or the entire range of the // type, which is pointless). Load(bcx, ptr) } else { // llvm::ConstantRange can deal with ranges that wrap around, // so an overflow on (max + 1) is fine. LoadRangeAssert(bcx, ptr, min, (max+1), /* signed: */ True) } } /** * Yield information about how to dispatch a case of the * discriminant-like value returned by `trans_switch`. * * This should ideally be less tightly tied to `_match`. */ pub fn trans_case<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr, discr: Disr) -> _match::OptResult<'blk, 'tcx> { match *r { CEnum(ity, _, _) => { _match::SingleResult(Result::new(bcx, C_integral(ll_inttype(bcx.ccx(), ity), discr as u64, true))) } General(ity, _, _) => { _match::SingleResult(Result::new(bcx, C_integral(ll_inttype(bcx.ccx(), ity), discr as u64, true))) } Univariant(..) => { bcx.ccx().sess().bug("no cases for univariants or structs") } RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => { assert!(discr == 0 || discr == 1); _match::SingleResult(Result::new(bcx, C_bool(bcx.ccx(), discr != 0))) } } } /** * Set the discriminant for a new value of the given case of the given * representation. */ pub fn trans_set_discr(bcx: Block, r: &Repr, val: ValueRef, discr: Disr) { match *r { CEnum(ity, min, max) => { assert_discr_in_range(ity, min, max, discr); Store(bcx, C_integral(ll_inttype(bcx.ccx(), ity), discr as u64, true), val) } General(ity, ref cases, dtor) => { if dtor { let ptr = trans_field_ptr(bcx, r, val, discr, cases.get(discr as uint).fields.len() - 2); Store(bcx, C_u8(bcx.ccx(), 1), ptr); } Store(bcx, C_integral(ll_inttype(bcx.ccx(), ity), discr as u64, true), GEPi(bcx, val, [0, 0])) } Univariant(ref st, dtor) => { assert_eq!(discr, 0); if dtor { Store(bcx, C_u8(bcx.ccx(), 1), GEPi(bcx, val, [0, st.fields.len() - 1])); } } RawNullablePointer { nndiscr, nnty, ..} => { if discr != nndiscr { let llptrty = type_of::sizing_type_of(bcx.ccx(), nnty); Store(bcx, C_null(llptrty), val) } } StructWrappedNullablePointer { nonnull: ref nonnull, nndiscr, ptrfield, .. } => { if discr != nndiscr { let (llptrptr, llptrty) = match ptrfield { ThinPointer(field) => (GEPi(bcx, val, [0, field]), type_of::type_of(bcx.ccx(), *nonnull.fields.get(field))), FatPointer(field, pair) => { let v = GEPi(bcx, val, [0, field, pair]); (v, val_ty(v).element_type()) } }; Store(bcx, C_null(llptrty), llptrptr) } } } } fn assert_discr_in_range(ity: IntType, min: Disr, max: Disr, discr: Disr) { match ity { attr::UnsignedInt(_) => assert!(min <= discr && discr <= max), attr::SignedInt(_) => assert!(min as i64 <= discr as i64 && discr as i64 <= max as i64) } } /** * The number of fields in a given case; for use when obtaining this * information from the type or definition is less convenient. */ pub fn num_args(r: &Repr, discr: Disr) -> uint { match *r { CEnum(..) => 0, Univariant(ref st, dtor) => { assert_eq!(discr, 0); st.fields.len() - (if dtor { 1 } else { 0 }) } General(_, ref cases, dtor) => { cases.get(discr as uint).fields.len() - 1 - (if dtor { 1 } else { 0 }) } RawNullablePointer { nndiscr, ref nullfields, .. } => { if discr == nndiscr { 1 } else { nullfields.len() } } StructWrappedNullablePointer { nonnull: ref nonnull, nndiscr, nullfields: ref nullfields, .. } => { if discr == nndiscr { nonnull.fields.len() } else { nullfields.len() } } } } /// Access a field, at a point when the value's case is known. pub fn trans_field_ptr(bcx: Block, r: &Repr, val: ValueRef, discr: Disr, ix: uint) -> ValueRef { // Note: if this ever needs to generate conditionals (e.g., if we // decide to do some kind of cdr-coding-like non-unique repr // someday), it will need to return a possibly-new bcx as well. match *r { CEnum(..) => { bcx.ccx().sess().bug("element access in C-like enum") } Univariant(ref st, _dtor) => { assert_eq!(discr, 0); struct_field_ptr(bcx, st, val, ix, false) } General(_, ref cases, _) => { struct_field_ptr(bcx, cases.get(discr as uint), val, ix + 1, true) } RawNullablePointer { nndiscr, ref nullfields, .. } | StructWrappedNullablePointer { nndiscr, ref nullfields, .. } if discr != nndiscr => { // The unit-like case might have a nonzero number of unit-like fields. // (e.d., Result of Either with (), as one side.) let ty = type_of::type_of(bcx.ccx(), *nullfields.get(ix)); assert_eq!(machine::llsize_of_alloc(bcx.ccx(), ty), 0); // The contents of memory at this pointer can't matter, but use // the value that's "reasonable" in case of pointer comparison. PointerCast(bcx, val, ty.ptr_to()) } RawNullablePointer { nndiscr, nnty, .. } => { assert_eq!(ix, 0); assert_eq!(discr, nndiscr); let ty = type_of::type_of(bcx.ccx(), nnty); PointerCast(bcx, val, ty.ptr_to()) } StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { assert_eq!(discr, nndiscr); struct_field_ptr(bcx, nonnull, val, ix, false) } } } pub fn struct_field_ptr(bcx: Block, st: &Struct, val: ValueRef, ix: uint, needs_cast: bool) -> ValueRef { let val = if needs_cast { let ccx = bcx.ccx(); let fields = st.fields.iter().map(|&ty| type_of::type_of(ccx, ty)).collect::<Vec<_>>(); let real_ty = Type::struct_(ccx, fields.as_slice(), st.packed); PointerCast(bcx, val, real_ty.ptr_to()) } else { val }; GEPi(bcx, val, [0, ix]) } pub fn fold_variants<'blk, 'tcx>( bcx: Block<'blk, 'tcx>, r: &Repr, value: ValueRef, f: |Block<'blk, 'tcx>, &Struct, ValueRef| -> Block<'blk, 'tcx>) -> Block<'blk, 'tcx> { let fcx = bcx.fcx; match *r { Univariant(ref st, _) => { f(bcx, st, value) } General(ity, ref cases, _) => { let ccx = bcx.ccx(); let unr_cx = fcx.new_temp_block("enum-variant-iter-unr"); Unreachable(unr_cx); let discr_val = trans_get_discr(bcx, r, value, None); let llswitch = Switch(bcx, discr_val, unr_cx.llbb, cases.len()); let bcx_next = fcx.new_temp_block("enum-variant-iter-next"); for (discr, case) in cases.iter().enumerate() { let mut variant_cx = fcx.new_temp_block( format!("enum-variant-iter-{}", discr.to_string()).as_slice() ); let rhs_val = C_integral(ll_inttype(ccx, ity), discr as u64, true); AddCase(llswitch, rhs_val, variant_cx.llbb); let fields = case.fields.iter().map(|&ty| type_of::type_of(bcx.ccx(), ty)).collect::<Vec<_>>(); let real_ty = Type::struct_(ccx, fields.as_slice(), case.packed); let variant_value = PointerCast(variant_cx, value, real_ty.ptr_to()); variant_cx = f(variant_cx, case, variant_value); Br(variant_cx, bcx_next.llbb); } bcx_next } _ => unreachable!() } } /// Access the struct drop flag, if present. pub fn trans_drop_flag_ptr<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, r: &Repr, val: ValueRef) -> datum::DatumBlock<'blk, 'tcx, datum::Expr> { let ptr_ty = ty::mk_imm_ptr(bcx.tcx(), ty::mk_bool()); match *r { Univariant(ref st, true) => { let flag_ptr = GEPi(bcx, val, [0, st.fields.len() - 1]); datum::immediate_rvalue_bcx(bcx, flag_ptr, ptr_ty).to_expr_datumblock() } General(_, _, true) => { let fcx = bcx.fcx; let custom_cleanup_scope = fcx.push_custom_cleanup_scope(); let scratch = unpack_datum!(bcx, datum::lvalue_scratch_datum( bcx, ty::mk_bool(), "drop_flag", false, cleanup::CustomScope(custom_cleanup_scope), (), |_, bcx, _| bcx )); bcx = fold_variants(bcx, r, val, |variant_cx, st, value| { let ptr = struct_field_ptr(variant_cx, st, value, (st.fields.len() - 1), false); datum::Datum::new(ptr, ptr_ty, datum::Rvalue::new(datum::ByRef)) .store_to(variant_cx, scratch.val) }); let expr_datum = scratch.to_expr_datum(); fcx.pop_custom_cleanup_scope(custom_cleanup_scope); datum::DatumBlock::new(bcx, expr_datum) } _ => bcx.ccx().sess().bug("tried to get drop flag of non-droppable type") } } /** * Construct a constant value, suitable for initializing a * GlobalVariable, given a case and constant values for its fields. * Note that this may have a different LLVM type (and different * alignment!) from the representation's `type_of`, so it needs a * pointer cast before use. * * The LLVM type system does not directly support unions, and only * pointers can be bitcast, so a constant (and, by extension, the * GlobalVariable initialized by it) will have a type that can vary * depending on which case of an enum it is. * * To understand the alignment situation, consider `enum E { V64(u64), * V32(u32, u32) }` on Windows. The type has 8-byte alignment to * accommodate the u64, but `V32(x, y)` would have LLVM type `{i32, * i32, i32}`, which is 4-byte aligned. * * Currently the returned value has the same size as the type, but * this could be changed in the future to avoid allocating unnecessary * space after values of shorter-than-maximum cases. */ pub fn trans_const(ccx: &CrateContext, r: &Repr, discr: Disr, vals: &[ValueRef]) -> ValueRef { match *r { CEnum(ity, min, max) => { assert_eq!(vals.len(), 0); assert_discr_in_range(ity, min, max, discr); C_integral(ll_inttype(ccx, ity), discr as u64, true) } General(ity, ref cases, _) => { let case = cases.get(discr as uint); let max_sz = cases.iter().map(|x| x.size).max().unwrap(); let lldiscr = C_integral(ll_inttype(ccx, ity), discr as u64, true); let contents = build_const_struct(ccx, case, (vec!(lldiscr)).append(vals).as_slice()); C_struct(ccx, contents.append([padding(ccx, max_sz - case.size)]).as_slice(), false) } Univariant(ref st, _dro) => { assert!(discr == 0); let contents = build_const_struct(ccx, st, vals); C_struct(ccx, contents.as_slice(), st.packed) } RawNullablePointer { nndiscr, nnty, .. } => { if discr == nndiscr { assert_eq!(vals.len(), 1); vals[0] } else { C_null(type_of::sizing_type_of(ccx, nnty)) } } StructWrappedNullablePointer { nonnull: ref nonnull, nndiscr, .. } => { if discr == nndiscr { C_struct(ccx, build_const_struct(ccx, nonnull, vals).as_slice(), false) } else { let vals = nonnull.fields.iter().map(|&ty| { // Always use null even if it's not the `ptrfield`th // field; see #8506. C_null(type_of::sizing_type_of(ccx, ty)) }).collect::<Vec<ValueRef>>(); C_struct(ccx, build_const_struct(ccx, nonnull, vals.as_slice()).as_slice(), false) } } } } /** * Compute struct field offsets relative to struct begin. */ fn compute_struct_field_offsets(ccx: &CrateContext, st: &Struct) -> Vec<u64> { let mut offsets = vec!(); let mut offset = 0; for &ty in st.fields.iter() { let llty = type_of::sizing_type_of(ccx, ty); if !st.packed { let type_align = type_of::align_of(ccx, ty); offset = roundup(offset, type_align); } offsets.push(offset); offset += machine::llsize_of_alloc(ccx, llty); } assert_eq!(st.fields.len(), offsets.len()); offsets } /** * Building structs is a little complicated, because we might need to * insert padding if a field's value is less aligned than its type. * * Continuing the example from `trans_const`, a value of type `(u32, * E)` should have the `E` at offset 8, but if that field's * initializer is 4-byte aligned then simply translating the tuple as * a two-element struct will locate it at offset 4, and accesses to it * will read the wrong memory. */ fn build_const_struct(ccx: &CrateContext, st: &Struct, vals: &[ValueRef]) -> Vec<ValueRef> { assert_eq!(vals.len(), st.fields.len()); let target_offsets = compute_struct_field_offsets(ccx, st); // offset of current value let mut offset = 0; let mut cfields = Vec::new(); for (&val, &target_offset) in vals.iter().zip(target_offsets.iter()) { if !st.packed { let val_align = machine::llalign_of_min(ccx, val_ty(val)); offset = roundup(offset, val_align); } if offset != target_offset { cfields.push(padding(ccx, target_offset - offset)); offset = target_offset; } assert!(!is_undef(val)); cfields.push(val); offset += machine::llsize_of_alloc(ccx, val_ty(val)); } assert!(st.sized && offset <= st.size); if offset != st.size { cfields.push(padding(ccx, st.size - offset)); } cfields } fn padding(ccx: &CrateContext, size: u64) -> ValueRef { C_undef(Type::array(&Type::i8(ccx), size)) } // FIXME this utility routine should be somewhere more general #[inline] fn roundup(x: u64, a: u32) -> u64 { let a = a as u64; ((x + (a - 1)) / a) * a } /// Get the discriminant of a constant value. (Not currently used.) pub fn const_get_discrim(ccx: &CrateContext, r: &Repr, val: ValueRef) -> Disr { match *r { CEnum(ity, _, _) => { match ity { attr::SignedInt(..) => const_to_int(val) as Disr, attr::UnsignedInt(..) => const_to_uint(val) as Disr } } General(ity, _, _) => { match ity { attr::SignedInt(..) => const_to_int(const_get_elt(ccx, val, [0])) as Disr, attr::UnsignedInt(..) => const_to_uint(const_get_elt(ccx, val, [0])) as Disr } } Univariant(..) => 0, RawNullablePointer { nndiscr, .. } => { if is_null(val) { /* subtraction as uint is ok because nndiscr is either 0 or 1 */ (1 - nndiscr) as Disr } else { nndiscr } } StructWrappedNullablePointer { nndiscr, ptrfield, .. } => { let (idx, sub_idx) = match ptrfield { ThinPointer(field) => (field, None), FatPointer(field, pair) => (field, Some(pair)) }; if is_null(const_struct_field(ccx, val, idx, sub_idx)) { /* subtraction as uint is ok because nndiscr is either 0 or 1 */ (1 - nndiscr) as Disr } else { nndiscr } } } } /** * Extract a field of a constant value, as appropriate for its * representation. * * (Not to be confused with `common::const_get_elt`, which operates on * raw LLVM-level structs and arrays.) */ pub fn const_get_field(ccx: &CrateContext, r: &Repr, val: ValueRef, _discr: Disr, ix: uint) -> ValueRef { match *r { CEnum(..) => ccx.sess().bug("element access in C-like enum const"), Univariant(..) => const_struct_field(ccx, val, ix, None), General(..) => const_struct_field(ccx, val, ix + 1, None), RawNullablePointer { .. } => { assert_eq!(ix, 0); val } StructWrappedNullablePointer{ .. } => const_struct_field(ccx, val, ix, None) } } /// Extract field of struct-like const, skipping our alignment padding. fn const_struct_field(ccx: &CrateContext, val: ValueRef, ix: uint, sub_idx: Option<uint>) -> ValueRef { // Get the ix-th non-undef element of the struct. let mut real_ix = 0; // actual position in the struct let mut ix = ix; // logical index relative to real_ix let mut field; loop { loop { field = match sub_idx { Some(si) => const_get_elt(ccx, val, [real_ix, si as u32]), None => const_get_elt(ccx, val, [real_ix]) }; if !is_undef(field) { break; } real_ix = real_ix + 1; } if ix == 0 { return field; } ix = ix - 1; real_ix = real_ix + 1; } }<|fim▁end|>
match *r {
<|file_name|>zlobpcg.cpp<|end_file_name|><|fim▁begin|>/* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @author Stan Tomov @author Hartwig Anzt @precisions normal z -> s d c */ #include "magmasparse_internal.h" #define PRECISION_z #define COMPLEX #define RTOLERANCE lapackf77_dlamch( "E" ) #define ATOLERANCE lapackf77_dlamch( "E" ) /** Purpose ------- Solves an eigenvalue problem A * X = evalues X where A is a complex sparse matrix stored in the GPU memory. X and B are complex vectors stored on the GPU memory. This is a GPU implementation of the LOBPCG method. This method allocates all required memory space inside the routine. Also, the memory is not allocated as one big chunk, but seperatly for the different blocks. This allows to use texture also for large matrices. Arguments --------- @param[in] A magma_z_matrix input matrix A @param[in,out] solver_par magma_z_solver_par* solver parameters @param[in,out] precond_par magma_z_precond_par* preconditioner parameters @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zheev ********************************************************************/ extern "C" magma_int_t magma_zlobpcg( magma_z_matrix A, magma_z_solver_par *solver_par, magma_z_preconditioner *precond_par, magma_queue_t queue ) { magma_int_t info = 0; #define residualNorms(i,iter) ( residualNorms + (i) + (iter)*n ) #define SWAP(x, y) { pointer = x; x = y; y = pointer; } #define hresidualNorms(i,iter) (hresidualNorms + (i) + (iter)*n ) #define gramA( m, n) (gramA + (m) + (n)*ldgram) #define gramB( m, n) (gramB + (m) + (n)*ldgram) #define gevectors(m, n) (gevectors + (m) + (n)*ldgram) #define h_gramB( m, n) (h_gramB + (m) + (n)*ldgram) #define magma_z_bspmv_tuned(m, n, alpha, A, X, beta, AX, queue) { \ magma_z_matrix x={Magma_CSR}, ax={Magma_CSR}; \ x.memory_location = Magma_DEV; x.num_rows = m; x.num_cols = n; x.major = MagmaColMajor; x.nnz = m*n; x.dval = X; x.storage_type = Magma_DENSE; \ ax.memory_location= Magma_DEV; ax.num_rows = m; ax.num_cols = n; ax.major = MagmaColMajor; ax.nnz = m*n; ax.dval = AX; ax.storage_type = Magma_DENSE; \ CHECK( magma_z_spmv(alpha, A, x, beta, ax, queue )); \ } //************************************************************** // %Memory allocation for the eigenvectors, eigenvalues, and workspace solver_par->solver = Magma_LOBPCG; magma_int_t m = A.num_rows; magma_int_t n = (solver_par->num_eigenvalues); magmaDoubleComplex *blockX = solver_par->eigenvectors; double *evalues = solver_par->eigenvalues; solver_par->numiter = 0; solver_par->spmv_count = 0; magmaDoubleComplex *dwork=NULL, *hwork=NULL; magmaDoubleComplex *blockP=NULL, *blockAP=NULL, *blockR=NULL, *blockAR=NULL, *blockAX=NULL, *blockW=NULL; magmaDoubleComplex *gramA=NULL, *gramB=NULL, *gramM=NULL; magmaDoubleComplex *gevectors=NULL, *h_gramB=NULL; dwork = NULL; hwork = NULL; blockP = NULL; blockR = NULL;<|fim▁hole|> blockAX = NULL; blockW = NULL; gramA = NULL; gramB = NULL; gramM = NULL; gevectors = NULL; h_gramB = NULL; magmaDoubleComplex *pointer, *origX = blockX; double *eval_gpu=NULL; magma_int_t iterationNumber, cBlockSize, restart = 1, iter; //Chronometry real_Double_t tempo1, tempo2; magma_int_t lwork = max( 2*n+n*magma_get_dsytrd_nb(n), 1 + 6*3*n + 2* 3*n* 3*n); magma_int_t *iwork={0}, liwork = 15*n+9; magma_int_t gramDim, ldgram = 3*n, ikind = 3; magmaDoubleComplex *hW={0}; // === Set solver parameters === double residualTolerance = solver_par->rtol; magma_int_t maxIterations = solver_par->maxiter; double tmp; double r0=0; // set in 1st iteration // === Set some constants & defaults === magmaDoubleComplex c_zero = MAGMA_Z_ZERO; magmaDoubleComplex c_one = MAGMA_Z_ONE; magmaDoubleComplex c_neg_one = MAGMA_Z_NEG_ONE; double *residualNorms={0}, *condestGhistory={0}, condestG={0}; double *gevalues={0}; magma_int_t *activeMask={0}; double *hresidualNorms={0}; #ifdef COMPLEX double *rwork={0}; magma_int_t lrwork = 1 + 5*(3*n) + 2*(3*n)*(3*n); CHECK( magma_dmalloc_cpu(&rwork, lrwork)); #endif CHECK( magma_zmalloc_pinned( &hwork , lwork )); CHECK( magma_zmalloc( &blockAX , m*n )); CHECK( magma_zmalloc( &blockAR , m*n )); CHECK( magma_zmalloc( &blockAP , m*n )); CHECK( magma_zmalloc( &blockR , m*n )); CHECK( magma_zmalloc( &blockP , m*n )); CHECK( magma_zmalloc( &blockW , m*n )); CHECK( magma_zmalloc( &dwork , m*n )); CHECK( magma_dmalloc( &eval_gpu , 3*n )); //**********************************************************+ // === Check some parameters for possible quick exit === solver_par->info = MAGMA_SUCCESS; if (m < 2) info = MAGMA_DIVERGENCE; else if (n > m) info = MAGMA_SLOW_CONVERGENCE; if (solver_par->info != 0) { magma_xerbla( __func__, -(info) ); goto cleanup; } solver_par->info = info; // local info variable; // === Allocate GPU memory for the residual norms' history === CHECK( magma_dmalloc(&residualNorms, (maxIterations+1) * n)); CHECK( magma_malloc( (void **)&activeMask, (n+1) * sizeof(magma_int_t) )); // === Allocate CPU work space === CHECK( magma_dmalloc_cpu(&condestGhistory, maxIterations+1)); CHECK( magma_dmalloc_cpu(&gevalues, 3 * n)); CHECK( magma_malloc_cpu((void **)&iwork, liwork * sizeof(magma_int_t))); CHECK( magma_zmalloc_pinned(&hW, n*n)); CHECK( magma_zmalloc_pinned(&gevectors, 9*n*n)); CHECK( magma_zmalloc_pinned(&h_gramB , 9*n*n)); // === Allocate GPU workspace === CHECK( magma_zmalloc(&gramM, n * n)); CHECK( magma_zmalloc(&gramA, 9 * n * n)); CHECK( magma_zmalloc(&gramB, 9 * n * n)); // === Set activemask to one === for(magma_int_t k =0; k<n; k++){ iwork[k]=1; } magma_setmatrix(n, 1, sizeof(magma_int_t), iwork, n , activeMask, n, queue); #if defined(PRECISION_s) ikind = 3; #endif // === Make the initial vectors orthonormal === magma_zgegqr_gpu(ikind, m, n, blockX, m, dwork, hwork, &info ); //magma_zorthomgs( m, n, blockX, queue ); magma_z_bspmv_tuned(m, n, c_one, A, blockX, c_zero, blockAX, queue ); solver_par->spmv_count++; // === Compute the Gram matrix = (X, AX) & its eigenstates === magma_zgemm( MagmaConjTrans, MagmaNoTrans, n, n, m, c_one, blockX, m, blockAX, m, c_zero, gramM, n, queue ); magma_zheevd_gpu( MagmaVec, MagmaUpper, n, gramM, n, evalues, hW, n, hwork, lwork, #ifdef COMPLEX rwork, lrwork, #endif iwork, liwork, &info ); // === Update X = X * evectors === magma_zgemm( MagmaNoTrans, MagmaNoTrans, m, n, n, c_one, blockX, m, gramM, n, c_zero, blockW, m, queue ); SWAP(blockW, blockX); // === Update AX = AX * evectors === magma_zgemm( MagmaNoTrans, MagmaNoTrans, m, n, n, c_one, blockAX, m, gramM, n, c_zero, blockW, m, queue ); SWAP(blockW, blockAX); condestGhistory[1] = 7.82; tempo1 = magma_sync_wtime( queue ); // === Main LOBPCG loop ============================================================ for(iterationNumber = 1; iterationNumber < maxIterations; iterationNumber++) { // === compute the residuals (R = Ax - x evalues ) magmablas_zlacpy( MagmaFull, m, n, blockAX, m, blockR, m, queue ); /* for(magma_int_t i=0; i<n; i++) { magma_zaxpy( m, MAGMA_Z_MAKE(-evalues[i],0), blockX+i*m, 1, blockR+i*m, 1, queue ); } */ magma_dsetmatrix( 3*n, 1, evalues, 3*n, eval_gpu, 3*n, queue ); CHECK( magma_zlobpcg_res( m, n, eval_gpu, blockX, blockR, eval_gpu, queue )); magmablas_dznrm2_cols( m, n, blockR, m, residualNorms(0, iterationNumber), queue ); // === remove the residuals corresponding to already converged evectors CHECK( magma_zcompact(m, n, blockR, m, residualNorms(0, iterationNumber), residualTolerance, activeMask, &cBlockSize, queue )); if (cBlockSize == 0) break; // === apply a preconditioner P to the active residulas: R_new = P R_old // === for now set P to be identity (no preconditioner => nothing to be done ) //magmablas_zlacpy( MagmaFull, m, cBlockSize, blockR, m, blockW, m, queue ); //SWAP(blockW, blockR); // preconditioner magma_z_matrix bWv={Magma_CSR}, bRv={Magma_CSR}; bWv.memory_location = Magma_DEV; bWv.num_rows = m; bWv.num_cols = cBlockSize; bWv.major = MagmaColMajor; bWv.nnz = m*cBlockSize; bWv.dval = blockW; bRv.memory_location = Magma_DEV; bRv.num_rows = m; bRv.num_cols = cBlockSize; bRv.major = MagmaColMajor; bRv.nnz = m*cBlockSize; bRv.dval = blockR; CHECK( magma_z_applyprecond_left( MagmaNoTrans, A, bRv, &bWv, precond_par, queue )); CHECK( magma_z_applyprecond_right( MagmaNoTrans, A, bWv, &bRv, precond_par, queue )); // === make the preconditioned residuals orthogonal to X if( precond_par->solver != Magma_NONE){ magma_zgemm( MagmaConjTrans, MagmaNoTrans, n, cBlockSize, m, c_one, blockX, m, blockR, m, c_zero, gramB(0,0), ldgram, queue ); magma_zgemm( MagmaNoTrans, MagmaNoTrans, m, cBlockSize, n, c_neg_one, blockX, m, gramB(0,0), ldgram, c_one, blockR, m, queue ); } // === make the active preconditioned residuals orthonormal magma_zgegqr_gpu(ikind, m, cBlockSize, blockR, m, dwork, hwork, &info ); #if defined(PRECISION_s) // re-orthogonalization SWAP(blockX, dwork); magma_zgegqr_gpu(ikind, m, cBlockSize, blockR, m, dwork, hwork, &info ); #endif //magma_zorthomgs( m, cBlockSize, blockR, queue ); // === compute AR magma_z_bspmv_tuned(m, cBlockSize, c_one, A, blockR, c_zero, blockAR, queue ); solver_par->spmv_count++; if (!restart) { // === compact P & AP as well CHECK( magma_zcompactActive(m, n, blockP, m, activeMask, queue )); CHECK( magma_zcompactActive(m, n, blockAP, m, activeMask, queue )); /* // === make P orthogonal to X ? magma_zgemm( MagmaConjTrans, MagmaNoTrans, n, cBlockSize, m, c_one, blockX, m, blockP, m, c_zero, gramB(0,0), ldgram, queue ); magma_zgemm( MagmaNoTrans, MagmaNoTrans, m, cBlockSize, n, c_neg_one, blockX, m, gramB(0,0), ldgram, c_one, blockP, m, queue ); // === make P orthogonal to R ? magma_zgemm( MagmaConjTrans, MagmaNoTrans, cBlockSize, cBlockSize, m, c_one, blockR, m, blockP, m, c_zero, gramB(0,0), ldgram, queue ); magma_zgemm( MagmaNoTrans, MagmaNoTrans, m, cBlockSize, cBlockSize, c_neg_one, blockR, m, gramB(0,0), ldgram, c_one, blockP, m, queue ); */ // === Make P orthonormal & properly change AP (without multiplication by A) magma_zgegqr_gpu(ikind, m, cBlockSize, blockP, m, dwork, hwork, &info ); #if defined(PRECISION_s) // re-orthogonalization SWAP(blockX, dwork); magma_zgegqr_gpu(ikind, m, cBlockSize, blockP, m, dwork, hwork, &info ); #endif //magma_zorthomgs( m, cBlockSize, blockP, queue ); //magma_z_bspmv_tuned(m, cBlockSize, c_one, A, blockP, c_zero, blockAP, queue ); magma_zsetmatrix( cBlockSize, cBlockSize, hwork, cBlockSize, dwork, cBlockSize, queue ); // replacement according to Stan #if defined(PRECISION_s) || defined(PRECISION_d) magmablas_ztrsm( MagmaRight, MagmaUpper, MagmaNoTrans, MagmaNonUnit, m, cBlockSize, c_one, dwork, cBlockSize, blockAP, m, queue ); #else magma_ztrsm( MagmaRight, MagmaUpper, MagmaNoTrans, MagmaNonUnit, m, cBlockSize, c_one, dwork, cBlockSize, blockAP, m, queue ); #endif } iter = max( 1, iterationNumber - 10 - int(log(1.*cBlockSize)) ); double condestGmean = 0.; for(magma_int_t i = 0; i<iterationNumber-iter+1; i++){ condestGmean += condestGhistory[i]; } condestGmean = condestGmean / (iterationNumber-iter+1); if (restart) gramDim = n+cBlockSize; else gramDim = n+2*cBlockSize; /* --- The Raileight-Ritz method for [X R P] ----------------------- [ X R P ]' [AX AR AP] y = evalues [ X R P ]' [ X R P ], i.e., GramA GramB / X'AX X'AR X'AP \ / X'X X'R X'P \ | R'AX R'AR R'AP | y = evalues | R'X R'R R'P | \ P'AX P'AR P'AP / \ P'X P'R P'P / ----------------------------------------------------------------- */ // === assemble GramB; first, set it to I magmablas_zlaset( MagmaFull, ldgram, ldgram, c_zero, c_one, gramB, ldgram, queue ); // identity if (!restart) { magma_zgemm( MagmaConjTrans, MagmaNoTrans, cBlockSize, n, m, c_one, blockP, m, blockX, m, c_zero, gramB(n+cBlockSize,0), ldgram, queue ); magma_zgemm( MagmaConjTrans, MagmaNoTrans, cBlockSize, cBlockSize, m, c_one, blockP, m, blockR, m, c_zero, gramB(n+cBlockSize,n), ldgram, queue ); } magma_zgemm( MagmaConjTrans, MagmaNoTrans, cBlockSize, n, m, c_one, blockR, m, blockX, m, c_zero, gramB(n,0), ldgram, queue ); // === get GramB from the GPU to the CPU and compute its eigenvalues only magma_zgetmatrix( gramDim, gramDim, gramB, ldgram, h_gramB, ldgram, queue ); lapackf77_zheev("N", "L", &gramDim, h_gramB, &ldgram, gevalues, hwork, &lwork, #ifdef COMPLEX rwork, #endif &info); // === check stability criteria if we need to restart condestG = log10( gevalues[gramDim-1]/gevalues[0] ) + 1.; if ((condestG/condestGmean>2 && condestG>2) || condestG>8) { // Steepest descent restart for stability restart=1; printf("restart at step #%d\n", int(iterationNumber)); } // === assemble GramA; first, set it to I magmablas_zlaset( MagmaFull, ldgram, ldgram, c_zero, c_one, gramA, ldgram, queue ); // identity magma_zgemm( MagmaConjTrans, MagmaNoTrans, cBlockSize, n, m, c_one, blockR, m, blockAX, m, c_zero, gramA(n,0), ldgram, queue ); magma_zgemm( MagmaConjTrans, MagmaNoTrans, cBlockSize, cBlockSize, m, c_one, blockR, m, blockAR, m, c_zero, gramA(n,n), ldgram, queue ); if (!restart) { magma_zgemm( MagmaConjTrans, MagmaNoTrans, cBlockSize, n, m, c_one, blockP, m, blockAX, m, c_zero, gramA(n+cBlockSize,0), ldgram, queue ); magma_zgemm( MagmaConjTrans, MagmaNoTrans, cBlockSize, cBlockSize, m, c_one, blockP, m, blockAR, m, c_zero, gramA(n+cBlockSize,n), ldgram, queue ); magma_zgemm( MagmaConjTrans, MagmaNoTrans, cBlockSize, cBlockSize, m, c_one, blockP, m, blockAP, m, c_zero, gramA(n+cBlockSize,n+cBlockSize), ldgram, queue ); } /* // === Compute X' AX or just use the eigenvalues below ? magma_zgemm( MagmaConjTrans, MagmaNoTrans, n, n, m, c_one, blockX, m, blockAX, m, c_zero, gramA(0,0), ldgram, queue ); */ if (restart==0) { magma_zgetmatrix( gramDim, gramDim, gramA, ldgram, gevectors, ldgram, queue ); } else { gramDim = n+cBlockSize; magma_zgetmatrix( gramDim, gramDim, gramA, ldgram, gevectors, ldgram, queue ); } for(magma_int_t k=0; k<n; k++) *gevectors(k,k) = MAGMA_Z_MAKE(evalues[k], 0); // === the previous eigensolver destroyed what is in h_gramB => must copy it again magma_zgetmatrix( gramDim, gramDim, gramB, ldgram, h_gramB, ldgram, queue ); magma_int_t itype = 1; lapackf77_zhegvd(&itype, "V", "L", &gramDim, gevectors, &ldgram, h_gramB, &ldgram, gevalues, hwork, &lwork, #ifdef COMPLEX rwork, &lrwork, #endif iwork, &liwork, &info); for(magma_int_t k =0; k<n; k++) evalues[k] = gevalues[k]; // === copy back the result to gramA on the GPU and use it for the updates magma_zsetmatrix( gramDim, gramDim, gevectors, ldgram, gramA, ldgram, queue ); if (restart == 0) { // === contribution from P to the new X (in new search direction P) magma_zgemm( MagmaNoTrans, MagmaNoTrans, m, n, cBlockSize, c_one, blockP, m, gramA(n+cBlockSize,0), ldgram, c_zero, dwork, m, queue ); SWAP(dwork, blockP); // === contribution from R to the new X (in new search direction P) magma_zgemm( MagmaNoTrans, MagmaNoTrans, m, n, cBlockSize, c_one, blockR, m, gramA(n,0), ldgram, c_one, blockP, m, queue ); // === corresponding contribution from AP to the new AX (in AP) magma_zgemm( MagmaNoTrans, MagmaNoTrans, m, n, cBlockSize, c_one, blockAP, m, gramA(n+cBlockSize,0), ldgram, c_zero, dwork, m, queue ); SWAP(dwork, blockAP); // === corresponding contribution from AR to the new AX (in AP) magma_zgemm( MagmaNoTrans, MagmaNoTrans, m, n, cBlockSize, c_one, blockAR, m, gramA(n,0), ldgram, c_one, blockAP, m, queue ); } else { // === contribution from R (only) to the new X magma_zgemm( MagmaNoTrans, MagmaNoTrans, m, n, cBlockSize, c_one, blockR, m, gramA(n,0), ldgram, c_zero, blockP, m, queue ); // === corresponding contribution from AR (only) to the new AX magma_zgemm( MagmaNoTrans, MagmaNoTrans,m, n, cBlockSize, c_one, blockAR, m, gramA(n,0), ldgram, c_zero, blockAP, m, queue ); } // === contribution from old X to the new X + the new search direction P magma_zgemm( MagmaNoTrans, MagmaNoTrans, m, n, n, c_one, blockX, m, gramA, ldgram, c_zero, dwork, m, queue ); SWAP(dwork, blockX); //magma_zaxpy( m*n, c_one, blockP, 1, blockX, 1, queue ); CHECK( magma_zlobpcg_maxpy( m, n, blockP, blockX, queue )); // === corresponding contribution from old AX to new AX + AP magma_zgemm( MagmaNoTrans, MagmaNoTrans, m, n, n, c_one, blockAX, m, gramA, ldgram, c_zero, dwork, m, queue ); SWAP(dwork, blockAX); //magma_zaxpy( m*n, c_one, blockAP, 1, blockAX, 1, queue ); CHECK( magma_zlobpcg_maxpy( m, n, blockAP, blockAX, queue )); condestGhistory[iterationNumber+1]=condestG; magma_dgetmatrix( 1, 1, residualNorms(0, iterationNumber), 1, &tmp, 1, queue ); if ( iterationNumber == 1 ) { solver_par->init_res = tmp; r0 = tmp * solver_par->rtol; if ( r0 < ATOLERANCE ) r0 = ATOLERANCE; } solver_par->final_res = tmp; if ( tmp < r0 ) { break; } if (cBlockSize == 0) { break; } if ( solver_par->verbose!=0 ) { if ( iterationNumber%solver_par->verbose == 0 ) { // double res; // magma_zgetmatrix( 1, 1, // (magmaDoubleComplex*)residualNorms(0, iterationNumber), 1, // (magmaDoubleComplex*)&res, 1, queue ); // // printf("Iteration %4d, CBS %4d, Residual: %10.7f\n", // iterationNumber, cBlockSize, res); printf("%4d-%2d ", int(iterationNumber), int(cBlockSize)); magma_dprint_gpu(1, n, residualNorms(0, iterationNumber), 1); } } restart = 0; } // === end for iterationNumber = 1,maxIterations ======================= // fill solver info tempo2 = magma_sync_wtime( queue ); solver_par->runtime = (real_Double_t) tempo2-tempo1; solver_par->numiter = iterationNumber; if ( solver_par->numiter < solver_par->maxiter) { info = MAGMA_SUCCESS; } else if ( solver_par->init_res > solver_par->final_res ) info = MAGMA_SLOW_CONVERGENCE; else info = MAGMA_DIVERGENCE; // ============================================================================= // === postprocessing; // ============================================================================= // === compute the real AX and corresponding eigenvalues magma_z_bspmv_tuned(m, n, c_one, A, blockX, c_zero, blockAX, queue ); magma_zgemm( MagmaConjTrans, MagmaNoTrans, n, n, m, c_one, blockX, m, blockAX, m, c_zero, gramM, n, queue ); magma_zheevd_gpu( MagmaVec, MagmaUpper, n, gramM, n, gevalues, dwork, n, hwork, lwork, #ifdef COMPLEX rwork, lrwork, #endif iwork, liwork, &info ); for(magma_int_t k =0; k<n; k++) evalues[k] = gevalues[k]; // === update X = X * evectors SWAP(blockX, dwork); magma_zgemm( MagmaNoTrans, MagmaNoTrans, m, n, n, c_one, dwork, m, gramM, n, c_zero, blockX, m, queue ); // === update AX = AX * evectors to compute the final residual SWAP(blockAX, dwork); magma_zgemm( MagmaNoTrans, MagmaNoTrans, m, n, n, c_one, dwork, m, gramM, n, c_zero, blockAX, m, queue ); // === compute R = AX - evalues X magmablas_zlacpy( MagmaFull, m, n, blockAX, m, blockR, m, queue ); for(magma_int_t i=0; i<n; i++) magma_zaxpy( m, MAGMA_Z_MAKE(-evalues[i], 0), blockX+i*m, 1, blockR+i*m, 1, queue ); // === residualNorms[iterationNumber] = || R || magmablas_dznrm2_cols( m, n, blockR, m, residualNorms(0, iterationNumber), queue ); // === restore blockX if needed if (blockX != origX) magmablas_zlacpy( MagmaFull, m, n, blockX, m, origX, m, queue ); printf("Eigenvalues:\n"); for(magma_int_t i =0; i<n; i++) printf("%e ", evalues[i]); printf("\n\n"); printf("Final residuals:\n"); magma_dprint_gpu(1, n, residualNorms(0, iterationNumber), 1); printf("\n\n"); //=== Prmagma_int_t residual history in a file for plotting ==== CHECK( magma_dmalloc_cpu(&hresidualNorms, (iterationNumber+1) * n)); magma_dgetmatrix( n, iterationNumber, residualNorms, n, hresidualNorms, n, queue ); solver_par->iter_res = *hresidualNorms(0, iterationNumber-1); printf("Residuals are stored in file residualNorms\n"); printf("Plot the residuals using: myplot \n"); FILE *residuals_file; residuals_file = fopen("residualNorms", "w"); for(magma_int_t i =1; i<iterationNumber; i++) { for(magma_int_t j = 0; j<n; j++) fprintf(residuals_file, "%f ", *hresidualNorms(j,i)); fprintf(residuals_file, "\n"); } fclose(residuals_file); cleanup: magma_free_cpu(hresidualNorms); // === free work space magma_free( residualNorms ); magma_free_cpu( condestGhistory ); magma_free_cpu( gevalues ); magma_free_cpu( iwork ); magma_free_pinned( hW ); magma_free_pinned( gevectors ); magma_free_pinned( h_gramB ); magma_free( gramM ); magma_free( gramA ); magma_free( gramB ); magma_free( activeMask ); if (blockX != (solver_par->eigenvectors)) magma_free( blockX ); if (blockAX != (solver_par->eigenvectors)) magma_free( blockAX ); if (blockAR != (solver_par->eigenvectors)) magma_free( blockAR ); if (blockAP != (solver_par->eigenvectors)) magma_free( blockAP ); if (blockR != (solver_par->eigenvectors)) magma_free( blockR ); if (blockP != (solver_par->eigenvectors)) magma_free( blockP ); if (blockW != (solver_par->eigenvectors)) magma_free( blockW ); if (dwork != (solver_par->eigenvectors)) magma_free( dwork ); magma_free( eval_gpu ); magma_free_pinned( hwork ); #ifdef COMPLEX magma_free_cpu( rwork ); rwork = NULL; #endif return info; }<|fim▁end|>
blockAP = NULL; blockAR = NULL;
<|file_name|>tailer.go<|end_file_name|><|fim▁begin|>package tailer<|fim▁hole|> "github.com/docker/infrakit/pkg/discovery" "github.com/docker/infrakit/pkg/launch/inproc" logutil "github.com/docker/infrakit/pkg/log" "github.com/docker/infrakit/pkg/plugin" "github.com/docker/infrakit/pkg/plugin/event/tailer" metadata_plugin "github.com/docker/infrakit/pkg/plugin/metadata" "github.com/docker/infrakit/pkg/run" "github.com/docker/infrakit/pkg/run/local" "github.com/docker/infrakit/pkg/types" ) const ( // Kind is the canonical name of the plugin for starting up, etc. Kind = "tailer" // EnvPath is the environment variable to set to tail when no additional configs are used. EnvPath = "INFRAKIT_TAILER_PATH" ) var ( log = logutil.New("module", "run/v0/tailer") ) func init() { inproc.Register(Kind, Run, DefaultOptions) } // DefaultOptions return an Options with default values filled in. var DefaultOptions = tailer.Options{ tailer.Rule{ Path: local.Getenv(EnvPath, filepath.Join(local.Getenv("PWD", ""), "test.log")), MustExist: false, }, } // Run runs the plugin, blocking the current thread. Error is returned immediately // if the plugin cannot be started. func Run(plugins func() discovery.Plugins, name plugin.Name, config *types.Any) (transport plugin.Transport, impls map[run.PluginCode]interface{}, onStop func(), err error) { options := DefaultOptions err = config.Decode(&options) if err != nil { return } if len(options) == 0 { options = DefaultOptions } var events *tailer.Tailer events, err = tailer.NewPlugin(options) if err != nil { return } transport.Name = name impls = map[run.PluginCode]interface{}{ run.Metadata: metadata_plugin.NewPluginFromData(events.Data()), run.Event: events, } onStop = func() { events.Stop() } return }<|fim▁end|>
import ( "path/filepath"
<|file_name|>JsQuanLyNgayPhep.js<|end_file_name|><|fim▁begin|>var getColor = function (value, p, record) { return "<span style='color:blue;'>" + value + "</span>"; } var getDayNumber = function (value, p, record) { if (value == "" || value == 0) { return "<span style='float:right;'>0</span>"; } return "<b style='float:right;color:blue;'>" + value + "</b>"; } //Lấy số ngày phép được hưởng mỗi tháng var getUsedDayPerMonth = function (value, p, record) { if (value == "" || value == 0) { return ""; } return "<b style='float:right;color:red;'>" + value + "</b>"; } //Lấy số ngày phép được hưởng mỗi năm var getUsedDayPerYear = function (value, p, record) { if (value == "" || value == 0) { return "<span style='float:right;'>0</span>"; } return "<b style='float:right;color:red;'>" + value + "</b>"; } //Lấy tổng số ngày phép được hưởng var getTotalDaysPerYear = function (value, p, record) { if (value == "" || value == 0) { return "<span style='float:right;'>0</span>"; } return "<b style='float:right;color:blue;'>" + value + "</b>"; } var enterKeyPressHandler = function (f, e) { if (e.getKey() == e.ENTER) { PagingToolbar1.pageIndex = 0; PagingToolbar1.doLoad(); Store1.reload(); } } //Xóa các điều kiện lọc var clearFilter = function () { txtMaCB.reset(); txtHoTen.reset(); cbPhongBan.reset(); cbTo.reset(); txtTongSoNgayPhep.reset(); <|fim▁hole|> txtSoNgayConLai.reset(); txtCongDonNgayPhep.reset(); txtThamNien.reset(); PagingToolbar1.pageIndex = 0; PagingToolbar1.doLoad(); Store1.reload(); } var ValidateNgayPhep = function () { if (nbfSoNgayPhep.getValue() == '') { alert("Bạn chưa nhập số ngày nghỉ phép năm nay"); nbfSoNgayPhep.focus(); return false; } if (nbfSoNgayPhepCongDonToiDaTrong1Thang.getValue() == '') { alert("Bạn chưa nhập số ngày phép được cộng dồn trong 1 tháng"); nbfSoNgayPhepCongDonToiDaTrong1Thang.focus(); return false; } if (nbfSoNgayPhepThuongThem.getValue() != '' && dfHanDungNgayPhepThuongThem.getValue() == '') { alert("Bạn chưa nhập hạn sử dụng ngày phép được thưởng thêm"); return false; } if (chkNgayNghiPhepNamTruoc.checked == true && dfHanDungNgayPhepNamTruoc.getValue() == '') { alert("Bạn chưa nhập hạn dùng ngày nghỉ phép của năm trước"); return false; } if (rdChiNhungNhanVienDuocChon.checked == false && rdApDungChoTatCaNhanVien.checked == false) { alert("Bạn chưa chọn đối tượng được tính ngày phép"); return false; } return true; } var RenderSoNgayPhepDaSuDung = function (value, p, record) { if (value == "0" || value == null) { return ""; } return value; } var ResetForm = function () { dfHanSuDungNPNamTruoc.reset(); nbfSoNgayPhep.reset(); nbfSoNgayPhepThuongThem.reset(); chkNgayNghiPhepNamTruoc.reset(); dfHanDungNgayPhepNamTruoc.reset(); dfHanDungNgayPhepThuongThem.reset(); nbfSoNgayPhepCongDonToiDaTrong1Thang.reset(); rdApDungChoTatCaNhanVien.enable(); rdChiNhungNhanVienDuocChon.setValue(false); rdApDungChoTatCaNhanVien.setValue(false); } var RenderThamNien = function (value, p, record) { var totalDay = value * 1; if (totalDay == 0) { return ""; } var month = Math.floor(totalDay / 30); var remainDay = totalDay % 30; var year = 0; var remainMonth = 0; var rs = ""; if (month >= 12) { year = Math.floor(month / 12); remainMonth = month % 12; } if (year > 0) { rs = year + " năm "; if (remainMonth > 0) { rs += remainMonth + " tháng"; } } else { rs = month + " tháng "; if (remainDay > 0) { rs += remainDay + " ngày"; } } return rs; }<|fim▁end|>
txtSoNgayDaSuDung.reset();
<|file_name|>test_core_resolver.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python # # Copyright 2018-2021 Polyaxon, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest import tempfile from polyaxon import settings from polyaxon.auxiliaries import ( get_default_init_container, get_default_sidecar_container, ) from polyaxon.connections.kinds import V1ConnectionKind from polyaxon.connections.schemas import V1BucketConnection, V1K8sResourceSchema from polyaxon.exceptions import PolyaxonCompilerError from polyaxon.managers.agent import AgentConfigManager from polyaxon.polyaxonfile.specs import kinds from polyaxon.polyflow import V1CompiledOperation, V1RunKind from polyaxon.polypod.compiler.resolver import BaseResolver from polyaxon.schemas.cli.agent_config import AgentConfig from polyaxon.schemas.types import V1ConnectionType, V1K8sResourceType from polyaxon.utils.test_utils import BaseTestCase @pytest.mark.polypod_mark class TestResolver(BaseTestCase): def setUp(self): super().setUp() self.compiled_operation = V1CompiledOperation.read( { "version": 1.1, "kind": kinds.COMPILED_OPERATION, "plugins": { "auth": False, "shm": False, "collectLogs": False, "collectArtifacts": False, "collectResources": False, }, "run": {"kind": V1RunKind.JOB, "container": {"image": "test"}}, } ) def test_core_resolver_instance(self): resolver = BaseResolver( run=None, compiled_operation=self.compiled_operation, owner_name="user", project_name="p1", project_uuid=None, run_name="j1", run_uuid=None, run_path="test", params=None, ) assert resolver.project_uuid == resolver.project_name assert resolver.run_uuid == resolver.run_name resolver = BaseResolver( run=None, compiled_operation=self.compiled_operation, owner_name="user", project_name="p1", run_name="j1", run_path="test", project_uuid="some_uuid", run_uuid="some_uuid", params=None, ) assert resolver.project_uuid != resolver.project_name assert resolver.run_uuid != resolver.run_name def test_resolve_connections_with_no_config(self): settings.AGENT_CONFIG = None resolver = BaseResolver( run=None, compiled_operation=self.compiled_operation, owner_name="user", project_name="p1", project_uuid=None, run_name="j1", run_uuid=None, run_path="test", params=None, ) with self.assertRaises(PolyaxonCompilerError): resolver.resolve_connections() def test_resolve_without_compiled_operation(self): with self.assertRaises(PolyaxonCompilerError): BaseResolver( run=None, compiled_operation=None, owner_name="user", project_name="p1", project_uuid=None, run_name="j1", run_uuid=None, run_path="test", params=None, ) def test_resolve_connections_with_invalid_config(self): fpath = tempfile.mkdtemp() AgentConfigManager.CONFIG_PATH = fpath secret1 = V1K8sResourceType( name="secret1", schema=V1K8sResourceSchema(name="secret1"), is_requested=True, ) secret2 = V1K8sResourceType( name="secret2", schema=V1K8sResourceSchema(name="secret2"), is_requested=True, ) connection1 = V1ConnectionType( name="test_s3", kind=V1ConnectionKind.S3, schema=V1BucketConnection(bucket="s3//:foo"), secret=secret1.schema, ) connection2 = V1ConnectionType( name="test_gcs", kind=V1ConnectionKind.GCS, schema=V1BucketConnection(bucket="gcs//:foo"), secret=secret1.schema, ) connection3 = V1ConnectionType( name="test_wasb", kind=V1ConnectionKind.WASB, schema=V1BucketConnection(bucket="wasbs//:foo"), secret=secret2.schema, ) settings.AGENT_CONFIG = AgentConfig( namespace="foo", artifacts_store=connection1, connections=[connection2, connection3], ) resolver = BaseResolver( run=None, compiled_operation=self.compiled_operation, owner_name="user", project_name="p1", project_uuid=None, run_name="j1", run_uuid=None, run_path="test", params=None, ) resolver.resolve_connections() assert resolver.namespace == "foo" assert resolver.connection_by_names == {connection1.name: connection1} assert resolver.artifacts_store == connection1 assert [s.schema for s in resolver.secrets] == [secret1.schema, secret2.schema] assert resolver.polyaxon_sidecar == get_default_sidecar_container() assert resolver.polyaxon_init == get_default_init_container() # Add run spec to resolve connections compiled_operation = V1CompiledOperation.read( { "version": 1.1, "kind": kinds.COMPILED_OPERATION, "plugins": { "auth": False, "shm": False,<|fim▁hole|> "collectResources": False, }, "run": { "kind": V1RunKind.JOB, "container": {"image": "test"}, "connections": {connection3.name}, }, } ) resolver = BaseResolver( run=None, compiled_operation=compiled_operation, owner_name="user", project_name="p1", project_uuid=None, run_name="j1", run_uuid=None, run_path="test", params=None, ) resolver.resolve_connections() assert resolver.namespace == "foo" assert resolver.connection_by_names == { connection1.name: connection1, connection3.name: connection3, } assert [s.schema for s in resolver.secrets] == [secret1.schema, secret2.schema] assert resolver.artifacts_store == connection1 assert resolver.polyaxon_sidecar == get_default_sidecar_container() assert resolver.polyaxon_init == get_default_init_container() # Add run spec to resolve connections compiled_operation = V1CompiledOperation.read( { "version": 1.1, "kind": kinds.COMPILED_OPERATION, "plugins": { "auth": False, "shm": False, "collectLogs": False, "collectArtifacts": False, "collectResources": False, }, "run": { "kind": V1RunKind.JOB, "container": {"image": "test"}, "connections": { connection1.name, connection2.name, connection3.name, }, }, } ) resolver = BaseResolver( run=None, compiled_operation=compiled_operation, owner_name="user", project_name="p1", project_uuid=None, run_name="j1", run_uuid=None, run_path="test", params=None, ) resolver.resolve_connections() assert resolver.namespace == "foo" assert resolver.connection_by_names == { connection3.name: connection3, connection2.name: connection2, connection1.name: connection1, } assert [s.schema for s in resolver.secrets] == [secret1.schema, secret2.schema] assert resolver.artifacts_store == connection1 assert resolver.polyaxon_sidecar == get_default_sidecar_container() assert resolver.polyaxon_init == get_default_init_container()<|fim▁end|>
"collectLogs": False, "collectArtifacts": False,
<|file_name|>graphbench_unix.go<|end_file_name|><|fim▁begin|>// +build linux freebsd package graphtest import ( "bytes" "io" "io/ioutil" "path/filepath" "testing" "github.com/docker/docker/pkg/stringid" ) // DriverBenchExists benchmarks calls to exist func DriverBenchExists(b *testing.B, drivername string, driveroptions ...string) { driver := GetDriver(b, drivername, driveroptions...) defer PutDriver(b) base := stringid.GenerateRandomID() if err := driver.Create(base, "", nil); err != nil { b.Fatal(err) } b.ResetTimer() for i := 0; i < b.N; i++ { if !driver.Exists(base) { b.Fatal("Newly created image doesn't exist") } } } // DriverBenchGetEmpty benchmarks calls to get on an empty layer func DriverBenchGetEmpty(b *testing.B, drivername string, driveroptions ...string) { driver := GetDriver(b, drivername, driveroptions...) defer PutDriver(b) base := stringid.GenerateRandomID() if err := driver.Create(base, "", nil); err != nil { b.Fatal(err) } b.ResetTimer() for i := 0; i < b.N; i++ { _, err := driver.Get(base, "") b.StopTimer() if err != nil { b.Fatalf("Error getting mount: %s", err) } if err := driver.Put(base); err != nil { b.Fatalf("Error putting mount: %s", err) } b.StartTimer() } } // DriverBenchDiffBase benchmarks calls to diff on a root layer func DriverBenchDiffBase(b *testing.B, drivername string, driveroptions ...string) { driver := GetDriver(b, drivername, driveroptions...) defer PutDriver(b) base := stringid.GenerateRandomID() if err := driver.Create(base, "", nil); err != nil { b.Fatal(err) } if err := addFiles(driver, base, 3); err != nil { b.Fatal(err) } b.ResetTimer() for i := 0; i < b.N; i++ { arch, err := driver.Diff(base, "") if err != nil { b.Fatal(err) } _, err = io.Copy(ioutil.Discard, arch) if err != nil { b.Fatalf("Error copying archive: %s", err) } arch.Close() } } // DriverBenchDiffN benchmarks calls to diff on two layers with // a provided number of files on the lower and upper layers. func DriverBenchDiffN(b *testing.B, bottom, top int, drivername string, driveroptions ...string) { driver := GetDriver(b, drivername, driveroptions...) defer PutDriver(b) base := stringid.GenerateRandomID() upper := stringid.GenerateRandomID() if err := driver.Create(base, "", nil); err != nil { b.Fatal(err) } if err := addManyFiles(driver, base, bottom, 3); err != nil { b.Fatal(err) } if err := driver.Create(upper, base, nil); err != nil { b.Fatal(err) } if err := addManyFiles(driver, upper, top, 6); err != nil { b.Fatal(err) } b.ResetTimer() for i := 0; i < b.N; i++ { arch, err := driver.Diff(upper, "") if err != nil { b.Fatal(err) }<|fim▁hole|> } arch.Close() } } // DriverBenchDiffApplyN benchmarks calls to diff and apply together func DriverBenchDiffApplyN(b *testing.B, fileCount int, drivername string, driveroptions ...string) { driver := GetDriver(b, drivername, driveroptions...) defer PutDriver(b) base := stringid.GenerateRandomID() upper := stringid.GenerateRandomID() if err := driver.Create(base, "", nil); err != nil { b.Fatal(err) } if err := addManyFiles(driver, base, fileCount, 3); err != nil { b.Fatal(err) } if err := driver.Create(upper, base, nil); err != nil { b.Fatal(err) } if err := addManyFiles(driver, upper, fileCount, 6); err != nil { b.Fatal(err) } diffSize, err := driver.DiffSize(upper, "") if err != nil { b.Fatal(err) } b.ResetTimer() b.StopTimer() for i := 0; i < b.N; i++ { diff := stringid.GenerateRandomID() if err := driver.Create(diff, base, nil); err != nil { b.Fatal(err) } if err := checkManyFiles(driver, diff, fileCount, 3); err != nil { b.Fatal(err) } b.StartTimer() arch, err := driver.Diff(upper, "") if err != nil { b.Fatal(err) } applyDiffSize, err := driver.ApplyDiff(diff, "", arch) if err != nil { b.Fatal(err) } b.StopTimer() arch.Close() if applyDiffSize != diffSize { // TODO: enforce this //b.Fatalf("Apply diff size different, got %d, expected %s", applyDiffSize, diffSize) } if err := checkManyFiles(driver, diff, fileCount, 6); err != nil { b.Fatal(err) } } } // DriverBenchDeepLayerDiff benchmarks calls to diff on top of a given number of layers. func DriverBenchDeepLayerDiff(b *testing.B, layerCount int, drivername string, driveroptions ...string) { driver := GetDriver(b, drivername, driveroptions...) defer PutDriver(b) base := stringid.GenerateRandomID() if err := driver.Create(base, "", nil); err != nil { b.Fatal(err) } if err := addFiles(driver, base, 50); err != nil { b.Fatal(err) } topLayer, err := addManyLayers(driver, base, layerCount) if err != nil { b.Fatal(err) } b.ResetTimer() for i := 0; i < b.N; i++ { arch, err := driver.Diff(topLayer, "") if err != nil { b.Fatal(err) } _, err = io.Copy(ioutil.Discard, arch) if err != nil { b.Fatalf("Error copying archive: %s", err) } arch.Close() } } // DriverBenchDeepLayerRead benchmarks calls to read a file under a given number of layers. func DriverBenchDeepLayerRead(b *testing.B, layerCount int, drivername string, driveroptions ...string) { driver := GetDriver(b, drivername, driveroptions...) defer PutDriver(b) base := stringid.GenerateRandomID() if err := driver.Create(base, "", nil); err != nil { b.Fatal(err) } content := []byte("test content") if err := addFile(driver, base, "testfile.txt", content); err != nil { b.Fatal(err) } topLayer, err := addManyLayers(driver, base, layerCount) if err != nil { b.Fatal(err) } root, err := driver.Get(topLayer, "") if err != nil { b.Fatal(err) } defer driver.Put(topLayer) b.ResetTimer() for i := 0; i < b.N; i++ { // Read content c, err := ioutil.ReadFile(filepath.Join(root, "testfile.txt")) if err != nil { b.Fatal(err) } b.StopTimer() if bytes.Compare(c, content) != 0 { b.Fatalf("Wrong content in file %v, expected %v", c, content) } b.StartTimer() } }<|fim▁end|>
_, err = io.Copy(ioutil.Discard, arch) if err != nil { b.Fatalf("Error copying archive: %s", err)
<|file_name|>fn-spec.js<|end_file_name|><|fim▁begin|>/* * Copyright 2014,2015 Open Networking Laboratory * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* ONOS GUI -- Util -- General Purpose Functions - Unit Tests */ describe('factory: fw/util/fn.js', function() { var $window, fs, someFunction = function () {}, someArray = [1, 2, 3], someObject = { foo: 'bar'}, someNumber = 42, someString = 'xyyzy',<|fim▁hole|> stringArray = ['foo', 'bar']; beforeEach(module('onosUtil')); beforeEach(inject(function (_$window_, FnService) { $window = _$window_; fs = FnService; $window.innerWidth = 400; $window.innerHeight = 200; })); // === Tests for isF() it('isF(): null for undefined', function () { expect(fs.isF(undefined)).toBeNull(); }); it('isF(): null for null', function () { expect(fs.isF(null)).toBeNull(); }); it('isF(): the reference for function', function () { expect(fs.isF(someFunction)).toBe(someFunction); }); it('isF(): null for string', function () { expect(fs.isF(someString)).toBeNull(); }); it('isF(): null for number', function () { expect(fs.isF(someNumber)).toBeNull(); }); it('isF(): null for Date', function () { expect(fs.isF(someDate)).toBeNull(); }); it('isF(): null for array', function () { expect(fs.isF(someArray)).toBeNull(); }); it('isF(): null for object', function () { expect(fs.isF(someObject)).toBeNull(); }); // === Tests for isA() it('isA(): null for undefined', function () { expect(fs.isA(undefined)).toBeNull(); }); it('isA(): null for null', function () { expect(fs.isA(null)).toBeNull(); }); it('isA(): null for function', function () { expect(fs.isA(someFunction)).toBeNull(); }); it('isA(): null for string', function () { expect(fs.isA(someString)).toBeNull(); }); it('isA(): null for number', function () { expect(fs.isA(someNumber)).toBeNull(); }); it('isA(): null for Date', function () { expect(fs.isA(someDate)).toBeNull(); }); it('isA(): the reference for array', function () { expect(fs.isA(someArray)).toBe(someArray); }); it('isA(): null for object', function () { expect(fs.isA(someObject)).toBeNull(); }); // === Tests for isS() it('isS(): null for undefined', function () { expect(fs.isS(undefined)).toBeNull(); }); it('isS(): null for null', function () { expect(fs.isS(null)).toBeNull(); }); it('isS(): null for function', function () { expect(fs.isS(someFunction)).toBeNull(); }); it('isS(): the reference for string', function () { expect(fs.isS(someString)).toBe(someString); }); it('isS(): null for number', function () { expect(fs.isS(someNumber)).toBeNull(); }); it('isS(): null for Date', function () { expect(fs.isS(someDate)).toBeNull(); }); it('isS(): null for array', function () { expect(fs.isS(someArray)).toBeNull(); }); it('isS(): null for object', function () { expect(fs.isS(someObject)).toBeNull(); }); // === Tests for isO() it('isO(): null for undefined', function () { expect(fs.isO(undefined)).toBeNull(); }); it('isO(): null for null', function () { expect(fs.isO(null)).toBeNull(); }); it('isO(): null for function', function () { expect(fs.isO(someFunction)).toBeNull(); }); it('isO(): null for string', function () { expect(fs.isO(someString)).toBeNull(); }); it('isO(): null for number', function () { expect(fs.isO(someNumber)).toBeNull(); }); it('isO(): null for Date', function () { expect(fs.isO(someDate)).toBeNull(); }); it('isO(): null for array', function () { expect(fs.isO(someArray)).toBeNull(); }); it('isO(): the reference for object', function () { expect(fs.isO(someObject)).toBe(someObject); }); // === Tests for contains() it('contains(): false for improper args', function () { expect(fs.contains()).toBeFalsy(); }); it('contains(): false for non-array', function () { expect(fs.contains(null, 1)).toBeFalsy(); }); it('contains(): true for contained item', function () { expect(fs.contains(someArray, 1)).toBeTruthy(); expect(fs.contains(stringArray, 'bar')).toBeTruthy(); }); it('contains(): false for non-contained item', function () { expect(fs.contains(someArray, 109)).toBeFalsy(); expect(fs.contains(stringArray, 'zonko')).toBeFalsy(); }); // === Tests for areFunctions() it('areFunctions(): false for non-array', function () { expect(fs.areFunctions({}, 'not-an-array')).toBeFalsy(); }); it('areFunctions(): true for empty-array', function () { expect(fs.areFunctions({}, [])).toBeTruthy(); }); it('areFunctions(): true for some api', function () { expect(fs.areFunctions({ a: function () {}, b: function () {} }, ['b', 'a'])).toBeTruthy(); }); it('areFunctions(): false for some other api', function () { expect(fs.areFunctions({ a: function () {}, b: 'not-a-function' }, ['b', 'a'])).toBeFalsy(); }); it('areFunctions(): extraneous stuff NOT ignored', function () { expect(fs.areFunctions({ a: function () {}, b: function () {}, c: 1, d: 'foo' }, ['a', 'b'])).toBeFalsy(); }); it('areFunctions(): extraneous stuff ignored (alternate fn)', function () { expect(fs.areFunctionsNonStrict({ a: function () {}, b: function () {}, c: 1, d: 'foo' }, ['a', 'b'])).toBeTruthy(); }); // == use the now-tested areFunctions() on our own api: it('should define api functions', function () { expect(fs.areFunctions(fs, [ 'isF', 'isA', 'isS', 'isO', 'contains', 'areFunctions', 'areFunctionsNonStrict', 'windowSize', 'find', 'inArray', 'removeFromArray', 'cap' ])).toBeTruthy(); }); // === Tests for windowSize() it('windowSize(): noargs', function () { var dim = fs.windowSize(); expect(dim.width).toEqual(400); expect(dim.height).toEqual(200); }); it('windowSize(): adjust height', function () { var dim = fs.windowSize(50); expect(dim.width).toEqual(400); expect(dim.height).toEqual(150); }); it('windowSize(): adjust width', function () { var dim = fs.windowSize(0, 50); expect(dim.width).toEqual(350); expect(dim.height).toEqual(200); }); it('windowSize(): adjust width and height', function () { var dim = fs.windowSize(101, 201); expect(dim.width).toEqual(199); expect(dim.height).toEqual(99); }); // === Tests for find() var dataset = [ { id: 'foo', name: 'Furby'}, { id: 'bar', name: 'Barbi'}, { id: 'baz', name: 'Basil'}, { id: 'goo', name: 'Gabby'}, { id: 'zoo', name: 'Zevvv'} ]; it('should not find ooo', function () { expect(fs.find('ooo', dataset)).toEqual(-1); }); it('should find foo', function () { expect(fs.find('foo', dataset)).toEqual(0); }); it('should find zoo', function () { expect(fs.find('zoo', dataset)).toEqual(4); }); it('should not find Simon', function () { expect(fs.find('Simon', dataset, 'name')).toEqual(-1); }); it('should find Furby', function () { expect(fs.find('Furby', dataset, 'name')).toEqual(0); }); it('should find Zevvv', function () { expect(fs.find('Zevvv', dataset, 'name')).toEqual(4); }); // === Tests for inArray() var objRef = { x:1, y:2 }, array = [1, 3.14, 'hey', objRef, 'there', true], array2 = ['b', 'a', 'd', 'a', 's', 's']; it('should return -1 on non-arrays', function () { expect(fs.inArray(1, {x:1})).toEqual(-1); }); it('should not find HOO', function () { expect(fs.inArray('HOO', array)).toEqual(-1); }); it('should find 1', function () { expect(fs.inArray(1, array)).toEqual(0); }); it('should find pi', function () { expect(fs.inArray(3.14, array)).toEqual(1); }); it('should find hey', function () { expect(fs.inArray('hey', array)).toEqual(2); }); it('should find the object', function () { expect(fs.inArray(objRef, array)).toEqual(3); }); it('should find there', function () { expect(fs.inArray('there', array)).toEqual(4); }); it('should find true', function () { expect(fs.inArray(true, array)).toEqual(5); }); it('should find the first occurrence A', function () { expect(fs.inArray('a', array2)).toEqual(1); }); it('should find the first occurrence S', function () { expect(fs.inArray('s', array2)).toEqual(4); }); it('should not find X', function () { expect(fs.inArray('x', array2)).toEqual(-1); }); // === Tests for removeFromArray() it('should ignore non-arrays', function () { expect(fs.removeFromArray(1, {x:1})).toBe(false); }); it('should keep the array the same, for non-match', function () { var array = [1, 2, 3]; expect(fs.removeFromArray(4, array)).toBe(false); expect(array).toEqual([1, 2, 3]); }); it('should remove a value', function () { var array = [1, 2, 3]; expect(fs.removeFromArray(2, array)).toBe(true); expect(array).toEqual([1, 3]); }); it('should remove the first occurrence', function () { var array = ['x', 'y', 'z', 'z', 'y']; expect(fs.removeFromArray('y', array)).toBe(true); expect(array).toEqual(['x', 'z', 'z', 'y']); expect(fs.removeFromArray('x', array)).toBe(true); expect(array).toEqual(['z', 'z', 'y']); }); // === Tests for cap() it('should ignore non-alpha', function () { expect(fs.cap('123')).toEqual('123'); }); it('should capitalize first char', function () { expect(fs.cap('Foo')).toEqual('Foo'); expect(fs.cap('foo')).toEqual('Foo'); expect(fs.cap('foo bar')).toEqual('Foo bar'); }); });<|fim▁end|>
someDate = new Date(),
<|file_name|>metric_configuration.py<|end_file_name|><|fim▁begin|>import json from typing import Tuple from great_expectations.core.id_dict import IDDict class MetricConfiguration: def __init__( self, metric_name: str, metric_domain_kwargs: dict, metric_value_kwargs: dict = None, metric_dependencies: dict = None, ): self._metric_name = metric_name if not isinstance(metric_domain_kwargs, IDDict): metric_domain_kwargs = IDDict(metric_domain_kwargs) self._metric_domain_kwargs = metric_domain_kwargs if not isinstance(metric_value_kwargs, IDDict): if metric_value_kwargs is None: metric_value_kwargs = {} metric_value_kwargs = IDDict(metric_value_kwargs) self._metric_value_kwargs = metric_value_kwargs if metric_dependencies is None: metric_dependencies = {} self._metric_dependencies = metric_dependencies def __repr__(self): return json.dumps(self.to_json_dict(), indent=2) def __str__(self): return self.__repr__() @property def metric_name(self): return self._metric_name @property def metric_domain_kwargs(self): return self._metric_domain_kwargs @property def metric_value_kwargs(self): return self._metric_value_kwargs @property def metric_domain_kwargs_id(self): return self.metric_domain_kwargs.to_id() @property def metric_value_kwargs_id(self): return self.metric_value_kwargs.to_id() @property def metric_dependencies(self): return self._metric_dependencies @metric_dependencies.setter def metric_dependencies(self, metric_dependencies): self._metric_dependencies = metric_dependencies @property def id(self) -> Tuple[str, str, str]: return ( self.metric_name, self.metric_domain_kwargs_id, self.metric_value_kwargs_id, ) def to_json_dict(self) -> dict:<|fim▁hole|> "metric_domain_kwargs": self.metric_domain_kwargs, "metric_domain_kwargs_id": self.metric_domain_kwargs_id, "metric_value_kwargs": self.metric_value_kwargs, "metric_value_kwargs_id": self.metric_value_kwargs_id, "id": self.id, } return json_dict<|fim▁end|>
json_dict: dict = { "metric_name": self.metric_name,
<|file_name|>boot.ts<|end_file_name|><|fim▁begin|>import {bootstrap} from '/node_modules/angular2/platform/browser' import {AppComponent} from './app.component' bootstrap(AppComponent); /* Copyright 2016 Google Inc. All Rights Reserved.<|fim▁hole|>can be found in the LICENSE file at http://angular.io/license */<|fim▁end|>
Use of this source code is governed by an MIT-style license that
<|file_name|>server.js<|end_file_name|><|fim▁begin|>var http = require("http"); var server = http.createServer(function(request, response) { response.writeHead(200, { "Grip-Hold": "response", "Grip-Channel": "mychannel", <|fim▁hole|> "Grip-Timeout": "60"}); response.write('<b>Hello {{name}}</b>!'); response.end(); }); server.listen(8080); console.log("Server is listening");<|fim▁end|>
<|file_name|>spree_awesome_blog.js<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|>
//= require admin/spree_backend
<|file_name|>LinearInterpolatorTest.java<|end_file_name|><|fim▁begin|>/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.commons.math.analysis.interpolation; import org.apache.commons.math.MathException; import org.apache.commons.math.exception.NonMonotonousSequenceException; import org.apache.commons.math.exception.DimensionMismatchException; import org.apache.commons.math.exception.NumberIsTooSmallException; import org.apache.commons.math.TestUtils; import org.apache.commons.math.analysis.UnivariateRealFunction; import org.apache.commons.math.analysis.polynomials.PolynomialFunction; import org.apache.commons.math.analysis.polynomials.PolynomialSplineFunction; import org.junit.Assert; import org.junit.Test; /** * Test the LinearInterpolator. */ public class LinearInterpolatorTest { /** error tolerance for spline interpolator value at knot points */ protected double knotTolerance = 1E-12; /** error tolerance for interpolating polynomial coefficients */ protected double coefficientTolerance = 1E-6; /** error tolerance for interpolated values */ protected double interpolationTolerance = 1E-12; @Test public void testInterpolateLinearDegenerateTwoSegment() throws Exception { double x[] = { 0.0, 0.5, 1.0 }; double y[] = { 0.0, 0.5, 1.0 }; UnivariateRealInterpolator i = new LinearInterpolator(); UnivariateRealFunction f = i.interpolate(x, y); verifyInterpolation(f, x, y); // Verify coefficients using analytical values PolynomialFunction polynomials[] = ((PolynomialSplineFunction) f).getPolynomials(); double target[] = {y[0], 1d}; TestUtils.assertEquals(polynomials[0].getCoefficients(), target, coefficientTolerance); target = new double[]{y[1], 1d}; TestUtils.assertEquals(polynomials[1].getCoefficients(), target, coefficientTolerance); // Check interpolation Assert.assertEquals(0.0,f.value(0.0), interpolationTolerance); Assert.assertEquals(0.4,f.value(0.4), interpolationTolerance); Assert.assertEquals(1.0,f.value(1.0), interpolationTolerance); } @Test public void testInterpolateLinearDegenerateThreeSegment() throws Exception { double x[] = { 0.0, 0.5, 1.0, 1.5 }; double y[] = { 0.0, 0.5, 1.0, 1.5 }; UnivariateRealInterpolator i = new LinearInterpolator(); UnivariateRealFunction f = i.interpolate(x, y); verifyInterpolation(f, x, y); // Verify coefficients using analytical values PolynomialFunction polynomials[] = ((PolynomialSplineFunction) f).getPolynomials();<|fim▁hole|> TestUtils.assertEquals(polynomials[0].getCoefficients(), target, coefficientTolerance); target = new double[]{y[1], 1d}; TestUtils.assertEquals(polynomials[1].getCoefficients(), target, coefficientTolerance); target = new double[]{y[2], 1d}; TestUtils.assertEquals(polynomials[2].getCoefficients(), target, coefficientTolerance); // Check interpolation Assert.assertEquals(0,f.value(0), interpolationTolerance); Assert.assertEquals(1.4,f.value(1.4), interpolationTolerance); Assert.assertEquals(1.5,f.value(1.5), interpolationTolerance); } @Test public void testInterpolateLinear() throws Exception { double x[] = { 0.0, 0.5, 1.0 }; double y[] = { 0.0, 0.5, 0.0 }; UnivariateRealInterpolator i = new LinearInterpolator(); UnivariateRealFunction f = i.interpolate(x, y); verifyInterpolation(f, x, y); // Verify coefficients using analytical values PolynomialFunction polynomials[] = ((PolynomialSplineFunction) f).getPolynomials(); double target[] = {y[0], 1d}; TestUtils.assertEquals(polynomials[0].getCoefficients(), target, coefficientTolerance); target = new double[]{y[1], -1d}; TestUtils.assertEquals(polynomials[1].getCoefficients(), target, coefficientTolerance); } @Test public void testIllegalArguments() throws MathException { // Data set arrays of different size. UnivariateRealInterpolator i = new LinearInterpolator(); try { double xval[] = { 0.0, 1.0 }; double yval[] = { 0.0, 1.0, 2.0 }; i.interpolate(xval, yval); Assert.fail("Failed to detect data set array with different sizes."); } catch (DimensionMismatchException iae) { // Expected. } // X values not sorted. try { double xval[] = { 0.0, 1.0, 0.5 }; double yval[] = { 0.0, 1.0, 2.0 }; i.interpolate(xval, yval); Assert.fail("Failed to detect unsorted arguments."); } catch (NonMonotonousSequenceException iae) { // Expected. } // Not enough data to interpolate. try { double xval[] = { 0.0 }; double yval[] = { 0.0 }; i.interpolate(xval, yval); Assert.fail("Failed to detect unsorted arguments."); } catch (NumberIsTooSmallException iae) { // Expected. } } /** * verifies that f(x[i]) = y[i] for i = 0..n-1 where n is common length. */ protected void verifyInterpolation(UnivariateRealFunction f, double x[], double y[]) throws Exception{ for (int i = 0; i < x.length; i++) { Assert.assertEquals(f.value(x[i]), y[i], knotTolerance); } } }<|fim▁end|>
double target[] = {y[0], 1d};
<|file_name|>mdl_file.cpp<|end_file_name|><|fim▁begin|>#include "mdl_file.hpp" MDLFile::MDLFile(std::string file_path): File(std::move(file_path)) {this->update();} bool MDLFile::exists_tag(const std::string& tag_name) const { return this->parsed_tags.find(tag_name) != this->parsed_tags.end(); } bool MDLFile::exists_sequence(const std::string& sequence_name) const { return this->parsed_sequences.find(sequence_name) != this->parsed_sequences.end(); } void MDLFile::add_tag(std::string tag_name, std::string data) { this->write(tag_name + ": " + data, false); this->parsed_tags[tag_name] = data; } void MDLFile::add_sequence(std::string sequence_name, std::vector<std::string> data) { this->write(sequence_name + ": %[", false);<|fim▁hole|> { std::string suffix = (i == data.size() - 1) ? "]%" : ""; this->write("- " + data.at(i) + suffix, false); } else this->write("]%", false); this->parsed_sequences[sequence_name] = data; } void MDLFile::delete_tag(std::string tag_name) { if(exists_tag(tag_name)) { std::vector<std::string> lines = this->get_lines(); for(std::size_t i = 0; i < lines.size(); i++) { std::string s = lines.at(i); if(mdl::util::find_tag_name(s) == tag_name) { this->write_line("", i); i++; } } this->parsed_tags.erase(tag_name); } } void MDLFile::delete_sequence(std::string sequence_name) { if(exists_sequence(sequence_name)) { std::vector<std::string> lines = this->get_lines(); for(std::size_t i = 0; i < lines.size(); i++) { std::string s = lines.at(i); if(mdl::util::find_tag_name(s) == sequence_name) { std::size_t sequence_size = mdl::util::find_sequence_values(lines, i).size(); for(std::size_t j = 0; j <= sequence_size; j++) { this->write_line("", i + j); } } } this->parsed_tags.erase(sequence_name); } } void MDLFile::edit_tag(std::string tag_name, std::string data) { this->delete_tag(tag_name); this->add_tag(tag_name, std::move(data)); } void MDLFile::edit_sequence(std::string sequence_name, std::vector<std::string> data) { this->delete_sequence(sequence_name); this->add_sequence(sequence_name, std::move(data)); } std::string MDLFile::get_tag(const std::string& tag_name) const { if(this->exists_tag(tag_name)) return this->get_parsed_tags().at(tag_name); return mdl::default_string; } std::vector<std::string> MDLFile::get_sequence(const std::string& sequence_name) const { if(this->exists_sequence(sequence_name)) return this->get_parsed_sequences().at(sequence_name); return {}; } const std::map<std::string, std::string>& MDLFile::get_parsed_tags() const { return this->parsed_tags; } const std::map<std::string, std::vector<std::string>>& MDLFile::get_parsed_sequences() const { return this->parsed_sequences; } void MDLFile::update() { this->parsed_tags.clear(); this->parsed_sequences.clear(); std::vector<std::string> lines = this->get_lines(); for(std::size_t i = 0; i < lines.size(); i++) { std::string line = lines.at(i); if(mdl::syntax::is_comment(line)) continue; if(mdl::syntax::is_tag(line)) this->parsed_tags[mdl::util::find_tag_name(line)] = mdl::util::find_tag_value(line); if(mdl::syntax::is_sequence(line)) this->parsed_sequences[mdl::util::find_sequence_name(line)] = mdl::util::find_sequence_values(lines, i); } } namespace mdl { std::vector<std::string> read_lines(const std::string& filename) { return File(filename).get_lines(); } std::string read(const std::string& filename) { return File(filename).get_data(); } namespace syntax { bool is_comment(const std::string& line) { return line.c_str()[0] == '#'; } bool is_tag(const std::string& line) { return line.find(": ") != std::string::npos && !mdl::syntax::is_sequence(line); } bool is_sequence(const std::string& line) { return line.find(": ") != std::string::npos && mdl::util::ends_with(line, "%["); } bool is_end_of_sequence(const std::string& line) { return mdl::util::ends_with(line, "]%"); } } namespace util { std::vector<std::string> split_string(const std::string& string, const std::string& delimiter) { std::vector<std::string> v; // Start of an element. std::size_t element_start = 0; // We start searching from the end of the previous element, which // initially is the start of the string. std::size_t element_end = 0; // Find the first non-delim, i.e. the start of an element, after the end of the previous element. while((element_start = string.find_first_not_of(delimiter, element_end)) != std::string::npos) { // Find the first delem, i.e. the end of the element (or if this fails it is the end of the string). element_end = string.find_first_of(delimiter, element_start); // Add it. v.emplace_back(string, element_start, element_end == std::string::npos ? std::string::npos : element_end - element_start); } // When there are no more non-spaces, we are done. return v; } bool ends_with(const std::string& string, const std::string& suffix) { if(string.length() >= suffix.length()) return (0 == string.compare(string.length() - suffix.length(), suffix.length(), suffix)); else return false; } bool begins_with(const std::string& string, const std::string& prefix) { return string.compare(0, prefix.length(), prefix) == 0; } std::string find_tag_name(const std::string& line) { std::string r; std::vector<std::string> sp = mdl::util::split_string(line, ":"); constexpr std::size_t minimum_split_quantity = 2; if(sp.size() < minimum_split_quantity) return mdl::default_string; return sp.at(0); } std::string find_tag_value(const std::string& line) { std::string r; std::vector<std::string> sp = mdl::util::split_string(line, ":"); constexpr std::size_t minimum_split_quantity = 2; if(sp.size() < minimum_split_quantity) return mdl::default_string; for(std::size_t i = 1; i < sp.size(); i++) { sp.at(i).erase(0, 1); r += sp.at(i); } return r; } std::string find_sequence_name(const std::string& line) { // Identical to finding tag name return find_tag_name(line); } std::vector<std::string> find_sequence_values(const std::vector<std::string>& lines, std::size_t index) { bool end = false; std::vector<std::string> r; if(!mdl::syntax::is_sequence(lines.at(index))) return r; while(++index < lines.size() && !end) { std::string cur = lines.at(index); if(mdl::util::begins_with(cur, "- ")) { cur.erase(0, 2); if(mdl::syntax::is_end_of_sequence(cur)) { cur.erase(cur.length() - 2, 2); end = true; } r.push_back(cur); } } return r; } } }<|fim▁end|>
if(!data.empty()) for(std::size_t i = 0; i < data.size(); i++)
<|file_name|>parm.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Parameterized string expansion use self::Param::*; use self::States::*; use std::iter::repeat; #[derive(Clone, Copy, PartialEq)] enum States { Nothing, Percent, SetVar, GetVar, PushParam, CharConstant, CharClose, IntConstant(i32), FormatPattern(Flags, FormatState), SeekIfElse(usize), SeekIfElsePercent(usize), SeekIfEnd(usize), SeekIfEndPercent(usize), } #[derive(Copy, PartialEq, Clone)] enum FormatState { Flags, Width, Precision, } /// Types of parameters a capability can use #[allow(missing_docs)] #[derive(Clone)] pub enum Param { Words(String), Number(i32), } /// Container for static and dynamic variable arrays pub struct Variables { /// Static variables A-Z sta: [Param; 26], /// Dynamic variables a-z dyn: [Param; 26], } impl Variables { /// Return a new zero-initialized Variables pub fn new() -> Variables { Variables { sta: [Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0)], dyn: [Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0)], } } } /// Expand a parameterized capability /// /// # Arguments /// * `cap` - string to expand /// * `params` - vector of params for %p1 etc /// * `vars` - Variables struct for %Pa etc /// /// To be compatible with ncurses, `vars` should be the same between calls to `expand` for /// multiple capabilities for the same terminal. pub fn expand(cap: &[u8], params: &[Param], vars: &mut Variables) -> Result<Vec<u8>, String> { let mut state = Nothing; // expanded cap will only rarely be larger than the cap itself let mut output = Vec::with_capacity(cap.len()); let mut stack: Vec<Param> = Vec::new(); // Copy parameters into a local vector for mutability let mut mparams = [Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0), Number(0)]; for (dst, src) in mparams.iter_mut().zip(params.iter()) { *dst = (*src).clone(); } for &c in cap.iter() { let cur = c as char; let mut old_state = state; match state { Nothing => { if cur == '%' { state = Percent; } else { output.push(c); } } Percent => { match cur { '%' => { output.push(c); state = Nothing } 'c' => { match stack.pop() { // if c is 0, use 0200 (128) for ncurses compatibility Some(Number(0)) => output.push(128u8), // Don't check bounds. ncurses just casts and truncates. Some(Number(c)) => output.push(c as u8), Some(_) => return Err("a non-char was used with %c".to_string()), None => return Err("stack is empty".to_string()), } } 'p' => state = PushParam, 'P' => state = SetVar, 'g' => state = GetVar, '\'' => state = CharConstant, '{' => state = IntConstant(0), 'l' => { match stack.pop() { Some(Words(s)) => stack.push(Number(s.len() as i32)), Some(_) => return Err("a non-str was used with %l".to_string()), None => return Err("stack is empty".to_string()), } } '+' | '-' | '/' | '*' | '^' | '&' | '|' | 'm' => { match (stack.pop(), stack.pop()) { (Some(Number(y)), Some(Number(x))) => { stack.push(Number(match cur { '+' => x + y, '-' => x - y, '*' => x * y, '/' => x / y, '|' => x | y, '&' => x & y, '^' => x ^ y, 'm' => x % y, _ => unreachable!("All cases handled"), })) } (Some(_), Some(_)) => { return Err(format!("non-numbers on stack with {}", cur)) } _ => return Err("stack is empty".to_string()), } } '=' | '>' | '<' | 'A' | 'O' => { match (stack.pop(), stack.pop()) { (Some(Number(y)), Some(Number(x))) => { stack.push(Number(if match cur { '=' => x == y, '<' => x < y, '>' => x > y, 'A' => x > 0 && y > 0, 'O' => x > 0 || y > 0, _ => unreachable!(), } { 1 } else { 0 })) } (Some(_), Some(_)) => { return Err(format!("non-numbers on stack with {}", cur)) } _ => return Err("stack is empty".to_string()), } } '!' | '~' => { match stack.pop() { Some(Number(x)) => { stack.push(Number(match cur { '!' if x > 0 => 0, '!' => 1, '~' => !x, _ => unreachable!(), })) } Some(_) => return Err(format!("non-numbers on stack with {}", cur)), None => return Err("stack is empty".to_string()), } } 'i' => { match (&mparams[0], &mparams[1]) { (&Number(x), &Number(y)) => { mparams[0] = Number(x + 1); mparams[1] = Number(y + 1); } _ => { return Err("first two params not numbers with %i".to_string()) } } } // printf-style support for %doxXs 'd' | 'o' | 'x' | 'X' | 's' => { if let Some(arg) = stack.pop() { let flags = Flags::new(); let res = format(arg, FormatOp::from_char(cur), flags)?; output.extend(res.iter().cloned()); } else { return Err("stack is empty".to_string()); } } ':' | '#' | ' ' | '.' | '0'..='9' => { let mut flags = Flags::new(); let mut fstate = FormatState::Flags; match cur { ':' => (), '#' => flags.alternate = true, ' ' => flags.space = true, '.' => fstate = FormatState::Precision, '0'..='9' => { flags.width = cur as usize - '0' as usize; fstate = FormatState::Width; } _ => unreachable!(), } state = FormatPattern(flags, fstate); } // conditionals '?' => (), 't' => { match stack.pop() { Some(Number(0)) => state = SeekIfElse(0), Some(Number(_)) => (), Some(_) => { return Err("non-number on stack with conditional".to_string()) } None => return Err("stack is empty".to_string()), } } 'e' => state = SeekIfEnd(0), ';' => (), _ => return Err(format!("unrecognized format option {}", cur)), } } PushParam => { // params are 1-indexed stack.push(mparams[match cur.to_digit(10) { Some(d) => d as usize - 1, None => return Err("bad param number".to_string()), }] .clone()); } SetVar => { if cur >= 'A' && cur <= 'Z' { if let Some(arg) = stack.pop() { let idx = (cur as u8) - b'A'; vars.sta[idx as usize] = arg; } else { return Err("stack is empty".to_string()); } } else if cur >= 'a' && cur <= 'z' { if let Some(arg) = stack.pop() { let idx = (cur as u8) - b'a'; vars.dyn[idx as usize] = arg; } else { return Err("stack is empty".to_string()); } } else { return Err("bad variable name in %P".to_string()); } } GetVar => { if cur >= 'A' && cur <= 'Z' { let idx = (cur as u8) - b'A'; stack.push(vars.sta[idx as usize].clone()); } else if cur >= 'a' && cur <= 'z' { let idx = (cur as u8) - b'a'; stack.push(vars.dyn[idx as usize].clone()); } else { return Err("bad variable name in %g".to_string()); } } CharConstant => { stack.push(Number(c as i32)); state = CharClose; } CharClose => { if cur != '\'' { return Err("malformed character constant".to_string()); } } IntConstant(i) => { if cur == '}' { stack.push(Number(i)); state = Nothing; } else if let Some(digit) = cur.to_digit(10) { match i.checked_mul(10).and_then(|i_ten| i_ten.checked_add(digit as i32)) { Some(i) => { state = IntConstant(i); old_state = Nothing; } None => return Err("int constant too large".to_string()), } } else { return Err("bad int constant".to_string()); } } FormatPattern(ref mut flags, ref mut fstate) => { old_state = Nothing; match (*fstate, cur) { (_, 'd') | (_, 'o') | (_, 'x') | (_, 'X') | (_, 's') => { if let Some(arg) = stack.pop() { let res = format(arg, FormatOp::from_char(cur), *flags)?; output.extend(res.iter().cloned()); // will cause state to go to Nothing old_state = FormatPattern(*flags, *fstate); } else { return Err("stack is empty".to_string()); } } (FormatState::Flags, '#') => { flags.alternate = true; } (FormatState::Flags, '-') => { flags.left = true; } (FormatState::Flags, '+') => { flags.sign = true; } (FormatState::Flags, ' ') => { flags.space = true; } (FormatState::Flags, '0'..='9') => { flags.width = cur as usize - '0' as usize; *fstate = FormatState::Width; } (FormatState::Flags, '.') => { *fstate = FormatState::Precision; } (FormatState::Width, '0'..='9') => { let old = flags.width; flags.width = flags.width * 10 + (cur as usize - '0' as usize); if flags.width < old { return Err("format width overflow".to_string()); } } (FormatState::Width, '.') => { *fstate = FormatState::Precision; } (FormatState::Precision, '0'..='9') => { let old = flags.precision; flags.precision = flags.precision * 10 + (cur as usize - '0' as usize); if flags.precision < old { return Err("format precision overflow".to_string()); } } _ => return Err("invalid format specifier".to_string()), } } SeekIfElse(level) => { if cur == '%' { state = SeekIfElsePercent(level); } old_state = Nothing; } SeekIfElsePercent(level) => { if cur == ';' { if level == 0 { state = Nothing; } else { state = SeekIfElse(level - 1); } } else if cur == 'e' && level == 0 { state = Nothing; } else if cur == '?' { state = SeekIfElse(level + 1); } else { state = SeekIfElse(level); } } SeekIfEnd(level) => { if cur == '%' { state = SeekIfEndPercent(level); } old_state = Nothing; } SeekIfEndPercent(level) => { if cur == ';' { if level == 0 { state = Nothing; } else { state = SeekIfEnd(level - 1); } } else if cur == '?' { state = SeekIfEnd(level + 1);<|fim▁hole|> } } } if state == old_state { state = Nothing; } } Ok(output) } #[derive(Copy, PartialEq, Clone)] struct Flags { width: usize, precision: usize, alternate: bool, left: bool, sign: bool, space: bool, } impl Flags { fn new() -> Flags { Flags { width: 0, precision: 0, alternate: false, left: false, sign: false, space: false, } } } #[derive(Copy, Clone)] enum FormatOp { Digit, Octal, LowerHex, UpperHex, String, } impl FormatOp { fn from_char(c: char) -> FormatOp { match c { 'd' => FormatOp::Digit, 'o' => FormatOp::Octal, 'x' => FormatOp::LowerHex, 'X' => FormatOp::UpperHex, 's' => FormatOp::String, _ => panic!("bad FormatOp char"), } } fn to_char(self) -> char { match self { FormatOp::Digit => 'd', FormatOp::Octal => 'o', FormatOp::LowerHex => 'x', FormatOp::UpperHex => 'X', FormatOp::String => 's', } } } fn format(val: Param, op: FormatOp, flags: Flags) -> Result<Vec<u8>, String> { let mut s = match val { Number(d) => { match op { FormatOp::Digit => { if flags.sign { format!("{:+01$}", d, flags.precision) } else if d < 0 { // C doesn't take sign into account in precision calculation. format!("{:01$}", d, flags.precision + 1) } else if flags.space { format!(" {:01$}", d, flags.precision) } else { format!("{:01$}", d, flags.precision) } } FormatOp::Octal => { if flags.alternate { // Leading octal zero counts against precision. format!("0{:01$o}", d, flags.precision.saturating_sub(1)) } else { format!("{:01$o}", d, flags.precision) } } FormatOp::LowerHex => { if flags.alternate && d != 0 { format!("0x{:01$x}", d, flags.precision) } else { format!("{:01$x}", d, flags.precision) } } FormatOp::UpperHex => { if flags.alternate && d != 0 { format!("0X{:01$X}", d, flags.precision) } else { format!("{:01$X}", d, flags.precision) } } FormatOp::String => return Err("non-number on stack with %s".to_string()), } .into_bytes() } Words(s) => { match op { FormatOp::String => { let mut s = s.into_bytes(); if flags.precision > 0 && flags.precision < s.len() { s.truncate(flags.precision); } s } _ => return Err(format!("non-string on stack with %{}", op.to_char())), } } }; if flags.width > s.len() { let n = flags.width - s.len(); if flags.left { s.extend(repeat(b' ').take(n)); } else { let mut s_ = Vec::with_capacity(flags.width); s_.extend(repeat(b' ').take(n)); s_.extend(s.into_iter()); s = s_; } } Ok(s) } #[cfg(test)] mod test { use super::{expand, Variables}; use super::Param::{self, Words, Number}; use std::result::Result::Ok; #[test] fn test_basic_setabf() { let s = b"\\E[48;5;%p1%dm"; assert_eq!(expand(s, &[Number(1)], &mut Variables::new()).unwrap(), "\\E[48;5;1m".bytes().collect::<Vec<_>>()); } #[test] fn test_multiple_int_constants() { assert_eq!(expand(b"%{1}%{2}%d%d", &[], &mut Variables::new()).unwrap(), "21".bytes().collect::<Vec<_>>()); } #[test] fn test_op_i() { let mut vars = Variables::new(); assert_eq!(expand(b"%p1%d%p2%d%p3%d%i%p1%d%p2%d%p3%d", &[Number(1), Number(2), Number(3)], &mut vars), Ok("123233".bytes().collect::<Vec<_>>())); assert_eq!(expand(b"%p1%d%p2%d%i%p1%d%p2%d", &[], &mut vars), Ok("0011".bytes().collect::<Vec<_>>())); } #[test] fn test_param_stack_failure_conditions() { let mut varstruct = Variables::new(); let vars = &mut varstruct; fn get_res(fmt: &str, cap: &str, params: &[Param], vars: &mut Variables) -> Result<Vec<u8>, String> { let mut u8v: Vec<_> = fmt.bytes().collect(); u8v.extend(cap.as_bytes().iter().map(|&b| b)); expand(&u8v, params, vars) } let caps = ["%d", "%c", "%s", "%Pa", "%l", "%!", "%~"]; for &cap in caps.iter() { let res = get_res("", cap, &[], vars); assert!(res.is_err(), "Op {} succeeded incorrectly with 0 stack entries", cap); let p = if cap == "%s" || cap == "%l" { Words("foo".to_string()) } else { Number(97) }; let res = get_res("%p1", cap, &[p], vars); assert!(res.is_ok(), "Op {} failed with 1 stack entry: {}", cap, res.unwrap_err()); } let caps = ["%+", "%-", "%*", "%/", "%m", "%&", "%|", "%A", "%O"]; for &cap in caps.iter() { let res = expand(cap.as_bytes(), &[], vars); assert!(res.is_err(), "Binop {} succeeded incorrectly with 0 stack entries", cap); let res = get_res("%{1}", cap, &[], vars); assert!(res.is_err(), "Binop {} succeeded incorrectly with 1 stack entry", cap); let res = get_res("%{1}%{2}", cap, &[], vars); assert!(res.is_ok(), "Binop {} failed with 2 stack entries: {}", cap, res.unwrap_err()); } } #[test] fn test_push_bad_param() { assert!(expand(b"%pa", &[], &mut Variables::new()).is_err()); } #[test] fn test_comparison_ops() { let v = [('<', [1u8, 0u8, 0u8]), ('=', [0u8, 1u8, 0u8]), ('>', [0u8, 0u8, 1u8])]; for &(op, bs) in v.iter() { let s = format!("%{{1}}%{{2}}%{}%d", op); let res = expand(s.as_bytes(), &[], &mut Variables::new()); assert!(res.is_ok(), res.unwrap_err()); assert_eq!(res.unwrap(), vec![b'0' + bs[0]]); let s = format!("%{{1}}%{{1}}%{}%d", op); let res = expand(s.as_bytes(), &[], &mut Variables::new()); assert!(res.is_ok(), res.unwrap_err()); assert_eq!(res.unwrap(), vec![b'0' + bs[1]]); let s = format!("%{{2}}%{{1}}%{}%d", op); let res = expand(s.as_bytes(), &[], &mut Variables::new()); assert!(res.is_ok(), res.unwrap_err()); assert_eq!(res.unwrap(), vec![b'0' + bs[2]]); } } #[test] fn test_conditionals() { let mut vars = Variables::new(); let s = b"\\E[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;m"; let res = expand(s, &[Number(1)], &mut vars); assert!(res.is_ok(), res.unwrap_err()); assert_eq!(res.unwrap(), "\\E[31m".bytes().collect::<Vec<_>>()); let res = expand(s, &[Number(8)], &mut vars); assert!(res.is_ok(), res.unwrap_err()); assert_eq!(res.unwrap(), "\\E[90m".bytes().collect::<Vec<_>>()); let res = expand(s, &[Number(42)], &mut vars); assert!(res.is_ok(), res.unwrap_err()); assert_eq!(res.unwrap(), "\\E[38;5;42m".bytes().collect::<Vec<_>>()); } #[test] fn test_format() { let mut varstruct = Variables::new(); let vars = &mut varstruct; assert_eq!(expand(b"%p1%s%p2%2s%p3%2s%p4%.2s", &[Words("foo".to_string()), Words("foo".to_string()), Words("f".to_string()), Words("foo".to_string())], vars), Ok("foofoo ffo".bytes().collect::<Vec<_>>())); assert_eq!(expand(b"%p1%:-4.2s", &[Words("foo".to_string())], vars), Ok("fo ".bytes().collect::<Vec<_>>())); assert_eq!(expand(b"%p1%d%p1%.3d%p1%5d%p1%:+d", &[Number(1)], vars), Ok("1001 1+1".bytes().collect::<Vec<_>>())); assert_eq!(expand(b"%p1%o%p1%#o%p2%6.4x%p2%#6.4X", &[Number(15), Number(27)], vars), Ok("17017 001b0X001B".bytes().collect::<Vec<_>>())); } }<|fim▁end|>
} else { state = SeekIfEnd(level);
<|file_name|>sketch2.ts<|end_file_name|><|fim▁begin|>//#!tsc && NODE_PATH=dist/src node dist/sketch2.js import { Point, Rect, TextHjustify, TextVjustify, Transform, DECIDEG2RAD, TextAngle, } from "./src/kicad_common"; import { StrokeFont } from "./src/kicad_strokefont"; import { Plotter, CanvasPlotter } from "./src/kicad_plotter"; import * as fs from "fs"; { const font = StrokeFont.instance; const width = 2000, height = 2000; const Canvas = require('canvas'); const canvas = Canvas.createCanvas ? Canvas.createCanvas(width, height) : new Canvas(width, height); const ctx = canvas.getContext('2d'); ctx.strokeStyle = "#666666"; ctx.lineWidth = 1; ctx.beginPath(); ctx.moveTo(canvas.width / 2, 0); ctx.lineTo(canvas.width / 2, canvas.height); ctx.stroke(); ctx.beginPath();<|fim▁hole|> ctx.moveTo(0, canvas.height / 2); ctx.lineTo(canvas.width, canvas.height / 2); ctx.stroke(); ctx.lineCap = "round"; ctx.lineJoin = 'round'; // ctx.translate(canvas.width / 2, canvas.height / 2); const plotter = new CanvasPlotter(ctx); const text = 'jeyjmcNV'; const size = 100; const lineWidth = 20; const bold = false; const italic = false; const pos = { x: canvas.width / 2, y: canvas.height / 2 }; const vjustify = TextVjustify.CENTER; { const boundingbox = font.computeStringBoundaryLimits(text, size, lineWidth, italic); ctx.save(); ctx.translate(pos.x, pos.y); ctx.translate(0, size / 2); ctx.fillStyle = "rgba(255, 0, 0, 0.3)"; ctx.fillRect(0, 0, boundingbox.width, -boundingbox.height); ctx.fillStyle = "rgba(0, 0, 255, 0.3)"; ctx.fillRect(0, 0, boundingbox.width, boundingbox.topLimit); ctx.fillRect(0, 0, boundingbox.width, boundingbox.bottomLimit); { const n = text.charCodeAt(0) - ' '.charCodeAt(0); const glyph = font.glyphs[n]; console.log(JSON.stringify(glyph)); ctx.fillStyle = "rgba(0, 255, 0, 0.3)"; ctx.fillRect( glyph.boundingBox.pos1.x * size, glyph.boundingBox.pos1.y * size, glyph.boundingBox.width * size, glyph.boundingBox.height * size ); ctx.restore(); } } font.drawText(plotter, pos, text, size, lineWidth, TextAngle.HORIZ, TextHjustify.LEFT, vjustify, italic, bold); const out = fs.createWriteStream('text.png'), stream = canvas.pngStream(); stream.on('data', function (chunk: any) { out.write(chunk); }); stream.on('end', function(){ console.log('saved png'); }); }<|fim▁end|>
<|file_name|>output_folder.py<|end_file_name|><|fim▁begin|># coding=utf-8 import typing from collections import MutableMapping from emft.core.logging import make_logger from emft.core.path import Path from emft.core.singleton import Singleton LOGGER = make_logger(__name__) # noinspection PyAbstractClass class OutputFolder(Path): pass class OutputFolders(MutableMapping, metaclass=Singleton): ACTIVE_OUTPUT_FOLDER = None ACTIVE_OUTPUT_FOLDER_NAME = None def __init__(self, init_dict: dict = None): self._data = init_dict or dict() def __getitem__(self, key) -> OutputFolder: return self._data.__getitem__(key) def __iter__(self) -> typing.Iterator[str]: return self._data.__iter__() def values(self) -> typing.List[OutputFolder]: return list(self._data.values()) @property def data(self) -> dict: return self._data def __len__(self) -> int: return self._data.__len__() def __delitem__(self, key): return self._data.__delitem__(key)<|fim▁hole|> def __setitem__(self, key, value: OutputFolder): return self._data.__setitem__(key, value)<|fim▁end|>
<|file_name|>assign-to-method.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. struct cat { priv meows : uint, how_hungry : int, } impl cat { pub fn speak(&self) { self.meows += 1u; } } fn cat(in_x : uint, in_y : int) -> cat {<|fim▁hole|> meows: in_x, how_hungry: in_y } } fn main() { let nyan : cat = cat(52u, 99); nyan.speak = || info!("meow"); //~ ERROR attempted to take value of method }<|fim▁end|>
cat {
<|file_name|>HeaderButton.tsx<|end_file_name|><|fim▁begin|>/* Copyright 2015, 2016 OpenMarket Ltd Copyright 2017 Vector Creations Ltd Copyright 2017 New Vector Ltd Copyright 2018 New Vector Ltd Copyright 2019 The Matrix.org Foundation C.I.C. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ import React from 'react'; import classNames from 'classnames'; import Analytics from '../../../Analytics'; import AccessibleTooltipButton from "../elements/AccessibleTooltipButton"; import { replaceableComponent } from "../../../utils/replaceableComponent"; import { ButtonEvent } from "../elements/AccessibleButton"; interface IProps { // Whether this button is highlighted isHighlighted: boolean; // click handler onClick: (ev: ButtonEvent) => void; // The parameters to track the click event analytics: Parameters<typeof Analytics.trackEvent>; // Button name name: string; // Button title title: string; } // TODO: replace this, the composer buttons and the right panel buttons with a unified representation @replaceableComponent("views.right_panel.HeaderButton") export default class HeaderButton extends React.Component<IProps> { private onClick = (ev: ButtonEvent) => { Analytics.trackEvent(...this.props.analytics); this.props.onClick(ev); }; public render() { // eslint-disable-next-line @typescript-eslint/no-unused-vars const { isHighlighted, onClick, analytics, name, title, ...props } = this.props; const classes = classNames({<|fim▁hole|> mx_RightPanel_headerButton: true, mx_RightPanel_headerButton_highlight: isHighlighted, [`mx_RightPanel_${name}`]: true, }); return <AccessibleTooltipButton {...props} aria-selected={isHighlighted} role="tab" title={title} className={classes} onClick={this.onClick} />; } }<|fim▁end|>
<|file_name|>dicco_numbers.py<|end_file_name|><|fim▁begin|>def freq_month(obj): if obj is None or obj == []: return months = {1: 'jan', 2: 'feb', 3: 'mar', 4: 'apr', 5: 'may', 6: 'jun', 7: 'jul', 8: 'aug', 9: 'sep', 10: 'oct', 11: 'nov', 12: 'dec', } frequencies = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ] # for i in range(0, len(obj)):<|fim▁hole|> for i in obj: frequencies[ i-1 ] += 1 print "The following month(s) have a birthday celebration" for i in range(0, len(frequencies)): if frequencies[i] > 0: print str(months[i+1]) + " has " + str(frequencies[i]) return frequencies in_array = [3,6,2,7,7,7,] print freq_month(in_array) print freq_month([])<|fim▁end|>
# frequencies[ obj[i] -1] += 1
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>mod dyngen; mod error; mod helpers; mod impl_debug; mod impl_partialeq; pub mod struct_layout; #[cfg(test)] #[allow(warnings)] pub(crate) mod bitfield_unit; #[cfg(all(test, target_endian = "little"))] mod bitfield_unit_tests; use self::dyngen::DynamicItems; use self::helpers::attributes; use self::struct_layout::StructLayoutTracker; use super::BindgenOptions; use crate::ir::analysis::{HasVtable, Sizedness}; use crate::ir::annotations::FieldAccessorKind; use crate::ir::comment; use crate::ir::comp::{ Bitfield, BitfieldUnit, CompInfo, CompKind, Field, FieldData, FieldMethods, Method, MethodKind, }; use crate::ir::context::{BindgenContext, ItemId}; use crate::ir::derive::{ CanDerive, CanDeriveCopy, CanDeriveDebug, CanDeriveDefault, CanDeriveEq, CanDeriveHash, CanDeriveOrd, CanDerivePartialEq, CanDerivePartialOrd, }; use crate::ir::dot; use crate::ir::enum_ty::{Enum, EnumVariant, EnumVariantValue}; use crate::ir::function::{Abi, Function, FunctionKind, FunctionSig, Linkage}; use crate::ir::int::IntKind; use crate::ir::item::{IsOpaque, Item, ItemCanonicalName, ItemCanonicalPath}; use crate::ir::item_kind::ItemKind; use crate::ir::layout::Layout; use crate::ir::module::Module; use crate::ir::objc::{ObjCInterface, ObjCMethod}; use crate::ir::template::{ AsTemplateParam, TemplateInstantiation, TemplateParameters, }; use crate::ir::ty::{Type, TypeKind}; use crate::ir::var::Var; use proc_macro2::{self, Ident, Span}; use quote::TokenStreamExt; use crate::{Entry, HashMap, HashSet}; use std::borrow::Cow; use std::cell::Cell; use std::collections::VecDeque; use std::fmt::Write; use std::iter; use std::ops; use std::str::FromStr; // Name of type defined in constified enum module pub static CONSTIFIED_ENUM_MODULE_REPR_NAME: &str = "Type"; fn top_level_path( ctx: &BindgenContext, item: &Item, ) -> Vec<proc_macro2::TokenStream> { let mut path = vec![quote! { self }]; if ctx.options().enable_cxx_namespaces { for _ in 0..item.codegen_depth(ctx) { path.push(quote! { super }); } } path } fn root_import( ctx: &BindgenContext, module: &Item, ) -> proc_macro2::TokenStream { assert!(ctx.options().enable_cxx_namespaces, "Somebody messed it up"); assert!(module.is_module()); let mut path = top_level_path(ctx, module); let root = ctx.root_module().canonical_name(ctx); let root_ident = ctx.rust_ident(&root); path.push(quote! { #root_ident }); let mut tokens = quote! {}; tokens.append_separated(path, quote!(::)); quote! { #[allow(unused_imports)] use #tokens ; } } bitflags! { struct DerivableTraits: u16 { const DEBUG = 1 << 0; const DEFAULT = 1 << 1; const COPY = 1 << 2; const CLONE = 1 << 3; const HASH = 1 << 4; const PARTIAL_ORD = 1 << 5; const ORD = 1 << 6; const PARTIAL_EQ = 1 << 7; const EQ = 1 << 8; } } fn derives_of_item(item: &Item, ctx: &BindgenContext) -> DerivableTraits { let mut derivable_traits = DerivableTraits::empty(); if item.can_derive_debug(ctx) && !item.annotations().disallow_debug() { derivable_traits |= DerivableTraits::DEBUG; } if item.can_derive_default(ctx) && !item.annotations().disallow_default() { derivable_traits |= DerivableTraits::DEFAULT; } let all_template_params = item.all_template_params(ctx); if item.can_derive_copy(ctx) && !item.annotations().disallow_copy() { derivable_traits |= DerivableTraits::COPY; if ctx.options().rust_features().builtin_clone_impls || !all_template_params.is_empty() { // FIXME: This requires extra logic if you have a big array in a // templated struct. The reason for this is that the magic: // fn clone(&self) -> Self { *self } // doesn't work for templates. // // It's not hard to fix though. derivable_traits |= DerivableTraits::CLONE; } } if item.can_derive_hash(ctx) { derivable_traits |= DerivableTraits::HASH; } if item.can_derive_partialord(ctx) { derivable_traits |= DerivableTraits::PARTIAL_ORD; } if item.can_derive_ord(ctx) { derivable_traits |= DerivableTraits::ORD; } if item.can_derive_partialeq(ctx) { derivable_traits |= DerivableTraits::PARTIAL_EQ; } if item.can_derive_eq(ctx) { derivable_traits |= DerivableTraits::EQ; } derivable_traits } impl From<DerivableTraits> for Vec<&'static str> { fn from(derivable_traits: DerivableTraits) -> Vec<&'static str> { [ (DerivableTraits::DEBUG, "Debug"), (DerivableTraits::DEFAULT, "Default"), (DerivableTraits::COPY, "Copy"), (DerivableTraits::CLONE, "Clone"), (DerivableTraits::HASH, "Hash"), (DerivableTraits::PARTIAL_ORD, "PartialOrd"), (DerivableTraits::ORD, "Ord"), (DerivableTraits::PARTIAL_EQ, "PartialEq"), (DerivableTraits::EQ, "Eq"), ] .iter() .filter_map(|&(flag, derive)| { Some(derive).filter(|_| derivable_traits.contains(flag)) }) .collect() } } struct CodegenResult<'a> { items: Vec<proc_macro2::TokenStream>, dynamic_items: DynamicItems, /// A monotonic counter used to add stable unique id's to stuff that doesn't /// need to be referenced by anything. codegen_id: &'a Cell<usize>, /// Whether a bindgen union has been generated at least once. saw_bindgen_union: bool, /// Whether an incomplete array has been generated at least once. saw_incomplete_array: bool, /// Whether Objective C types have been seen at least once. saw_objc: bool, /// Whether Apple block types have been seen at least once. saw_block: bool, /// Whether a bitfield allocation unit has been seen at least once. saw_bitfield_unit: bool, items_seen: HashSet<ItemId>, /// The set of generated function/var names, needed because in C/C++ is /// legal to do something like: /// /// ```c++ /// extern "C" { /// void foo(); /// extern int bar; /// } /// /// extern "C" { /// void foo(); /// extern int bar; /// } /// ``` /// /// Being these two different declarations. functions_seen: HashSet<String>, vars_seen: HashSet<String>, /// Used for making bindings to overloaded functions. Maps from a canonical /// function name to the number of overloads we have already codegen'd for /// that name. This lets us give each overload a unique suffix. overload_counters: HashMap<String, u32>, } impl<'a> CodegenResult<'a> { fn new(codegen_id: &'a Cell<usize>) -> Self { CodegenResult { items: vec![], dynamic_items: DynamicItems::new(), saw_bindgen_union: false, saw_incomplete_array: false, saw_objc: false, saw_block: false, saw_bitfield_unit: false, codegen_id, items_seen: Default::default(), functions_seen: Default::default(), vars_seen: Default::default(), overload_counters: Default::default(), } } fn dynamic_items(&mut self) -> &mut DynamicItems { &mut self.dynamic_items } fn saw_bindgen_union(&mut self) { self.saw_bindgen_union = true; } fn saw_incomplete_array(&mut self) { self.saw_incomplete_array = true; } fn saw_objc(&mut self) { self.saw_objc = true; } fn saw_block(&mut self) { self.saw_block = true; } fn saw_bitfield_unit(&mut self) { self.saw_bitfield_unit = true; } fn seen<Id: Into<ItemId>>(&self, item: Id) -> bool { self.items_seen.contains(&item.into()) } fn set_seen<Id: Into<ItemId>>(&mut self, item: Id) { self.items_seen.insert(item.into()); } fn seen_function(&self, name: &str) -> bool { self.functions_seen.contains(name) } fn saw_function(&mut self, name: &str) { self.functions_seen.insert(name.into()); } /// Get the overload number for the given function name. Increments the /// counter internally so the next time we ask for the overload for this /// name, we get the incremented value, and so on. fn overload_number(&mut self, name: &str) -> u32 { let counter = self.overload_counters.entry(name.into()).or_insert(0); let number = *counter; *counter += 1; number } fn seen_var(&self, name: &str) -> bool { self.vars_seen.contains(name) } fn saw_var(&mut self, name: &str) { self.vars_seen.insert(name.into()); } fn inner<F>(&mut self, cb: F) -> Vec<proc_macro2::TokenStream> where F: FnOnce(&mut Self), { let mut new = Self::new(self.codegen_id); cb(&mut new); self.saw_incomplete_array |= new.saw_incomplete_array; self.saw_objc |= new.saw_objc; self.saw_block |= new.saw_block; self.saw_bitfield_unit |= new.saw_bitfield_unit; self.saw_bindgen_union |= new.saw_bindgen_union; new.items } } impl<'a> ops::Deref for CodegenResult<'a> { type Target = Vec<proc_macro2::TokenStream>; fn deref(&self) -> &Self::Target { &self.items } } impl<'a> ops::DerefMut for CodegenResult<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.items } } /// A trait to convert a rust type into a pointer, optionally const, to the same /// type. trait ToPtr { fn to_ptr(self, is_const: bool) -> proc_macro2::TokenStream; } impl ToPtr for proc_macro2::TokenStream { fn to_ptr(self, is_const: bool) -> proc_macro2::TokenStream { if is_const { quote! { *const #self } } else { quote! { *mut #self } } } } /// An extension trait for `proc_macro2::TokenStream` that lets us append any implicit /// template parameters that exist for some type, if necessary. trait AppendImplicitTemplateParams { fn append_implicit_template_params( &mut self, ctx: &BindgenContext, item: &Item, ); } impl AppendImplicitTemplateParams for proc_macro2::TokenStream { fn append_implicit_template_params( &mut self, ctx: &BindgenContext, item: &Item, ) { let item = item.id().into_resolver().through_type_refs().resolve(ctx); match *item.expect_type().kind() { TypeKind::UnresolvedTypeRef(..) => { unreachable!("already resolved unresolved type refs") } TypeKind::ResolvedTypeRef(..) => { unreachable!("we resolved item through type refs") } // None of these types ever have implicit template parameters. TypeKind::Void | TypeKind::NullPtr | TypeKind::Pointer(..) | TypeKind::Reference(..) | TypeKind::Int(..) | TypeKind::Float(..) | TypeKind::Complex(..) | TypeKind::Array(..) | TypeKind::TypeParam | TypeKind::Opaque | TypeKind::Function(..) | TypeKind::Enum(..) | TypeKind::ObjCId | TypeKind::ObjCSel | TypeKind::TemplateInstantiation(..) => return, _ => {} } let params: Vec<_> = item .used_template_params(ctx) .iter() .map(|p| { p.try_to_rust_ty(ctx, &()) .expect("template params cannot fail to be a rust type") }) .collect(); if !params.is_empty() { self.append_all(quote! { < #( #params ),* > }); } } } trait CodeGenerator { /// Extra information from the caller. type Extra; /// Extra information returned to the caller. type Return; fn codegen<'a>( &self, ctx: &BindgenContext, result: &mut CodegenResult<'a>, extra: &Self::Extra, ) -> Self::Return; } impl Item { fn process_before_codegen( &self, ctx: &BindgenContext, result: &mut CodegenResult, ) -> bool { if !self.is_enabled_for_codegen(ctx) { return false; } if self.is_blocklisted(ctx) || result.seen(self.id()) { debug!( "<Item as CodeGenerator>::process_before_codegen: Ignoring hidden or seen: \ self = {:?}", self ); return false; } if !ctx.codegen_items().contains(&self.id()) { // TODO(emilio, #453): Figure out what to do when this happens // legitimately, we could track the opaque stuff and disable the // assertion there I guess. warn!("Found non-allowlisted item in code generation: {:?}", self); } result.set_seen(self.id()); true } } impl CodeGenerator for Item { type Extra = (); type Return = (); fn codegen<'a>( &self, ctx: &BindgenContext, result: &mut CodegenResult<'a>, _extra: &(), ) { debug!("<Item as CodeGenerator>::codegen: self = {:?}", self); if !self.process_before_codegen(ctx, result) { return; } match *self.kind() { ItemKind::Module(ref module) => { module.codegen(ctx, result, self); } ItemKind::Function(ref fun) => { fun.codegen(ctx, result, self); } ItemKind::Var(ref var) => { var.codegen(ctx, result, self); } ItemKind::Type(ref ty) => { ty.codegen(ctx, result, self); } } } } impl CodeGenerator for Module { type Extra = Item; type Return = (); fn codegen<'a>( &self, ctx: &BindgenContext, result: &mut CodegenResult<'a>, item: &Item, ) { debug!("<Module as CodeGenerator>::codegen: item = {:?}", item); let codegen_self = |result: &mut CodegenResult, found_any: &mut bool| { for child in self.children() { if ctx.codegen_items().contains(child) { *found_any = true; ctx.resolve_item(*child).codegen(ctx, result, &()); } } if item.id() == ctx.root_module() { if result.saw_block { utils::prepend_block_header(ctx, &mut *result); } if result.saw_bindgen_union { utils::prepend_union_types(ctx, &mut *result); } if result.saw_incomplete_array { utils::prepend_incomplete_array_types(ctx, &mut *result); } if ctx.need_bindgen_complex_type() { utils::prepend_complex_type(&mut *result); } if result.saw_objc { utils::prepend_objc_header(ctx, &mut *result); } if result.saw_bitfield_unit { utils::prepend_bitfield_unit_type(ctx, &mut *result); } } }; if !ctx.options().enable_cxx_namespaces || (self.is_inline() && !ctx.options().conservative_inline_namespaces) { codegen_self(result, &mut false); return; } let mut found_any = false; let inner_items = result.inner(|result| { result.push(root_import(ctx, item)); let path = item.namespace_aware_canonical_path(ctx).join("::"); if let Some(raw_lines) = ctx.options().module_lines.get(&path) { for raw_line in raw_lines { found_any = true; result.push( proc_macro2::TokenStream::from_str(raw_line).unwrap(), ); } } codegen_self(result, &mut found_any); }); // Don't bother creating an empty module. if !found_any { return; } let name = item.canonical_name(ctx); let ident = ctx.rust_ident(name); result.push(if item.id() == ctx.root_module() { quote! { #[allow(non_snake_case, non_camel_case_types, non_upper_case_globals)] pub mod #ident { #( #inner_items )* } } } else { quote! { pub mod #ident { #( #inner_items )* } } }); } } impl CodeGenerator for Var { type Extra = Item; type Return = (); fn codegen<'a>( &self, ctx: &BindgenContext, result: &mut CodegenResult<'a>, item: &Item, ) { use crate::ir::var::VarType; debug!("<Var as CodeGenerator>::codegen: item = {:?}", item); debug_assert!(item.is_enabled_for_codegen(ctx)); let canonical_name = item.canonical_name(ctx); if result.seen_var(&canonical_name) { return; } result.saw_var(&canonical_name); let canonical_ident = ctx.rust_ident(&canonical_name); // We can't generate bindings to static variables of templates. The // number of actual variables for a single declaration are open ended // and we don't know what instantiations do or don't exist. if !item.all_template_params(ctx).is_empty() { return; } let mut attrs = vec![]; if let Some(comment) = item.comment(ctx) { attrs.push(attributes::doc(comment)); } let ty = self.ty().to_rust_ty_or_opaque(ctx, &()); if let Some(val) = self.val() { match *val { VarType::Bool(val) => { result.push(quote! { #(#attrs)* pub const #canonical_ident : #ty = #val ; }); } VarType::Int(val) => { let int_kind = self .ty() .into_resolver() .through_type_aliases() .through_type_refs() .resolve(ctx) .expect_type() .as_integer() .unwrap(); let val = if int_kind.is_signed() { helpers::ast_ty::int_expr(val) } else { helpers::ast_ty::uint_expr(val as _) }; result.push(quote! { #(#attrs)* pub const #canonical_ident : #ty = #val ; }); } VarType::String(ref bytes) => { // Account the trailing zero. // // TODO: Here we ignore the type we just made up, probably // we should refactor how the variable type and ty id work. let len = bytes.len() + 1; let ty = quote! { [u8; #len] }; match String::from_utf8(bytes.clone()) { Ok(string) => { let cstr = helpers::ast_ty::cstr_expr(string); if ctx .options() .rust_features .static_lifetime_elision { result.push(quote! { #(#attrs)* pub const #canonical_ident : &#ty = #cstr ; }); } else { result.push(quote! { #(#attrs)* pub const #canonical_ident : &'static #ty = #cstr ; }); } } Err(..) => { let bytes = helpers::ast_ty::byte_array_expr(bytes); result.push(quote! { #(#attrs)* pub const #canonical_ident : #ty = #bytes ; }); } } } VarType::Float(f) => { if let Ok(expr) = helpers::ast_ty::float_expr(ctx, f) { result.push(quote! { #(#attrs)* pub const #canonical_ident : #ty = #expr ; }); } } VarType::Char(c) => { result.push(quote! { #(#attrs)* pub const #canonical_ident : #ty = #c ; }); } } } else { // If necessary, apply a `#[link_name]` attribute let link_name = self.mangled_name().unwrap_or_else(|| self.name()); if !utils::names_will_be_identical_after_mangling( &canonical_name, link_name, None, ) { attrs.push(attributes::link_name(link_name)); } let maybe_mut = if self.is_const() { quote! {} } else { quote! { mut } }; let tokens = quote!( extern "C" { #(#attrs)* pub static #maybe_mut #canonical_ident: #ty; } ); result.push(tokens); } } } impl CodeGenerator for Type { type Extra = Item; type Return = (); fn codegen<'a>( &self, ctx: &BindgenContext, result: &mut CodegenResult<'a>, item: &Item, ) { debug!("<Type as CodeGenerator>::codegen: item = {:?}", item); debug_assert!(item.is_enabled_for_codegen(ctx)); match *self.kind() { TypeKind::Void | TypeKind::NullPtr | TypeKind::Int(..) | TypeKind::Float(..) | TypeKind::Complex(..) | TypeKind::Array(..) | TypeKind::Vector(..) | TypeKind::Pointer(..) | TypeKind::Reference(..) | TypeKind::Function(..) | TypeKind::ResolvedTypeRef(..) | TypeKind::Opaque | TypeKind::TypeParam => { // These items don't need code generation, they only need to be // converted to rust types in fields, arguments, and such. // NOTE(emilio): If you add to this list, make sure to also add // it to BindgenContext::compute_allowlisted_and_codegen_items. } TypeKind::TemplateInstantiation(ref inst) => { inst.codegen(ctx, result, item) } TypeKind::BlockPointer(inner) => { if !ctx.options().generate_block { return; } let inner_item = inner.into_resolver().through_type_refs().resolve(ctx); let name = item.canonical_name(ctx); let inner_rust_type = { if let TypeKind::Function(fnsig) = inner_item.kind().expect_type().kind() { utils::fnsig_block(ctx, fnsig) } else { panic!("invalid block typedef: {:?}", inner_item) } }; let rust_name = ctx.rust_ident(&name); let mut tokens = if let Some(comment) = item.comment(ctx) { attributes::doc(comment) } else { quote! {} }; tokens.append_all(quote! { pub type #rust_name = #inner_rust_type ; }); result.push(tokens); result.saw_block(); } TypeKind::Comp(ref ci) => ci.codegen(ctx, result, item), TypeKind::TemplateAlias(inner, _) | TypeKind::Alias(inner) => { let inner_item = inner.into_resolver().through_type_refs().resolve(ctx); let name = item.canonical_name(ctx); let path = item.canonical_path(ctx); { let through_type_aliases = inner .into_resolver() .through_type_refs() .through_type_aliases() .resolve(ctx); // Try to catch the common pattern: // // typedef struct foo { ... } foo; // // here, and also other more complex cases like #946. if through_type_aliases.canonical_path(ctx) == path { return; } } // If this is a known named type, disallow generating anything // for it too. let spelling = self.name().expect("Unnamed alias?"); if utils::type_from_named(ctx, spelling).is_some() { return; } let mut outer_params = item.used_template_params(ctx); let is_opaque = item.is_opaque(ctx, &()); let inner_rust_type = if is_opaque { outer_params = vec![]; self.to_opaque(ctx, item) } else { // Its possible that we have better layout information than // the inner type does, so fall back to an opaque blob based // on our layout if converting the inner item fails. let mut inner_ty = inner_item .try_to_rust_ty_or_opaque(ctx, &()) .unwrap_or_else(|_| self.to_opaque(ctx, item)); inner_ty.append_implicit_template_params(ctx, inner_item); inner_ty }; { // FIXME(emilio): This is a workaround to avoid generating // incorrect type aliases because of types that we haven't // been able to resolve (because, eg, they depend on a // template parameter). // // It's kind of a shame not generating them even when they // could be referenced, but we already do the same for items // with invalid template parameters, and at least this way // they can be replaced, instead of generating plain invalid // code. let inner_canon_type = inner_item.expect_type().canonical_type(ctx); if inner_canon_type.is_invalid_type_param() { warn!( "Item contained invalid named type, skipping: \ {:?}, {:?}", item, inner_item ); return; } } let rust_name = ctx.rust_ident(&name); let mut tokens = if let Some(comment) = item.comment(ctx) { attributes::doc(comment) } else { quote! {} }; let alias_style = if ctx.options().type_alias.matches(&name) { AliasVariation::TypeAlias } else if ctx.options().new_type_alias.matches(&name) { AliasVariation::NewType } else if ctx.options().new_type_alias_deref.matches(&name) { AliasVariation::NewTypeDeref } else { ctx.options().default_alias_style }; // We prefer using `pub use` over `pub type` because of: // https://github.com/rust-lang/rust/issues/26264 // These are the only characters allowed in simple // paths, eg `good::dogs::Bront`. if inner_rust_type.to_string().chars().all(|c| matches!(c, 'A'..='Z' | 'a'..='z' | '0'..='9' | ':' | '_' | ' ')) && outer_params.is_empty() && !is_opaque && alias_style == AliasVariation::TypeAlias && inner_item.expect_type().canonical_type(ctx).is_enum() { tokens.append_all(quote! { pub use }); let path = top_level_path(ctx, item); tokens.append_separated(path, quote!(::)); tokens.append_all(quote! { :: #inner_rust_type as #rust_name ; }); result.push(tokens); return; } tokens.append_all(match alias_style { AliasVariation::TypeAlias => quote! { pub type #rust_name }, AliasVariation::NewType | AliasVariation::NewTypeDeref => { assert!( ctx.options().rust_features().repr_transparent, "repr_transparent feature is required to use {:?}", alias_style ); let mut attributes = vec![attributes::repr("transparent")]; let derivable_traits = derives_of_item(item, ctx); if !derivable_traits.is_empty() { let derives: Vec<_> = derivable_traits.into(); attributes.push(attributes::derives(&derives)) } quote! { #( #attributes )* pub struct #rust_name } } }); let params: Vec<_> = outer_params .into_iter() .filter_map(|p| p.as_template_param(ctx, &())) .collect(); if params .iter() .any(|p| ctx.resolve_type(*p).is_invalid_type_param()) { warn!( "Item contained invalid template \ parameter: {:?}", item ); return; } let params: Vec<_> = params .iter() .map(|p| { p.try_to_rust_ty(ctx, &()).expect( "type parameters can always convert to rust ty OK", ) }) .collect(); if !params.is_empty() { tokens.append_all(quote! { < #( #params ),* > }); } tokens.append_all(match alias_style { AliasVariation::TypeAlias => quote! { = #inner_rust_type ; }, AliasVariation::NewType | AliasVariation::NewTypeDeref => { quote! { (pub #inner_rust_type) ; } } }); if alias_style == AliasVariation::NewTypeDeref { let prefix = ctx.trait_prefix(); tokens.append_all(quote! { impl ::#prefix::ops::Deref for #rust_name { type Target = #inner_rust_type; #[inline] fn deref(&self) -> &Self::Target { &self.0 } } impl ::#prefix::ops::DerefMut for #rust_name { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } }); } result.push(tokens); } TypeKind::Enum(ref ei) => ei.codegen(ctx, result, item), TypeKind::ObjCId | TypeKind::ObjCSel => { result.saw_objc(); } TypeKind::ObjCInterface(ref interface) => { interface.codegen(ctx, result, item) } ref u @ TypeKind::UnresolvedTypeRef(..) => { unreachable!("Should have been resolved after parsing {:?}!", u) } } } } struct Vtable<'a> { item_id: ItemId, /// A reference to the originating compound object. #[allow(dead_code)] comp_info: &'a CompInfo, } impl<'a> Vtable<'a> { fn new(item_id: ItemId, comp_info: &'a CompInfo) -> Self { Vtable { item_id, comp_info } } } impl<'a> CodeGenerator for Vtable<'a> { type Extra = Item; type Return = (); fn codegen<'b>( &self, ctx: &BindgenContext, result: &mut CodegenResult<'b>, item: &Item, ) { assert_eq!(item.id(), self.item_id); debug_assert!(item.is_enabled_for_codegen(ctx)); let name = ctx.rust_ident(&self.canonical_name(ctx)); // For now, we will only generate vtables for classes that: // - do not inherit from others (compilers merge VTable from primary parent class). // - do not contain a virtual destructor (requires ordering; platforms generate different vtables). if ctx.options().vtable_generation && self.comp_info.base_members().is_empty() && self.comp_info.destructor().is_none() { let class_ident = ctx.rust_ident(self.item_id.canonical_name(ctx)); let methods = self .comp_info .methods() .iter() .filter_map(|m| { if !m.is_virtual() { return None; } let function_item = ctx.resolve_item(m.signature()); let function = function_item.expect_function(); let signature_item = ctx.resolve_item(function.signature()); let signature = match signature_item.expect_type().kind() { TypeKind::Function(ref sig) => sig, _ => panic!("Function signature type mismatch"), }; // FIXME: Is there a canonical name without the class prepended? let function_name = function_item.canonical_name(ctx); // FIXME: Need to account for overloading with times_seen (separately from regular function path). let function_name = ctx.rust_ident(function_name); let mut args = utils::fnsig_arguments(ctx, signature); let ret = utils::fnsig_return_ty(ctx, signature); args[0] = if m.is_const() { quote! { this: *const #class_ident } } else { quote! { this: *mut #class_ident } }; Some(quote! { pub #function_name : unsafe extern "C" fn( #( #args ),* ) #ret }) }) .collect::<Vec<_>>(); result.push(quote! { #[repr(C)] pub struct #name { #( #methods ),* } }) } else { // For the cases we don't support, simply generate an empty struct. let void = helpers::ast_ty::c_void(ctx); result.push(quote! { #[repr(C)] pub struct #name ( #void ); }); } } } impl<'a> ItemCanonicalName for Vtable<'a> { fn canonical_name(&self, ctx: &BindgenContext) -> String { format!("{}__bindgen_vtable", self.item_id.canonical_name(ctx)) } } impl<'a> TryToRustTy for Vtable<'a> { type Extra = (); fn try_to_rust_ty( &self, ctx: &BindgenContext, _: &(), ) -> error::Result<proc_macro2::TokenStream> { let name = ctx.rust_ident(self.canonical_name(ctx)); Ok(quote! { #name }) } } impl CodeGenerator for TemplateInstantiation { type Extra = Item; type Return = (); fn codegen<'a>( &self, ctx: &BindgenContext, result: &mut CodegenResult<'a>, item: &Item, ) { debug_assert!(item.is_enabled_for_codegen(ctx)); // Although uses of instantiations don't need code generation, and are // just converted to rust types in fields, vars, etc, we take this // opportunity to generate tests for their layout here. If the // instantiation is opaque, then its presumably because we don't // properly understand it (maybe because of specializations), and so we // shouldn't emit layout tests either. if !ctx.options().layout_tests || self.is_opaque(ctx, item) { return; } // If there are any unbound type parameters, then we can't generate a // layout test because we aren't dealing with a concrete type with a // concrete size and alignment. if ctx.uses_any_template_parameters(item.id()) { return; } let layout = item.kind().expect_type().layout(ctx); if let Some(layout) = layout { let size = layout.size; let align = layout.align; let name = item.full_disambiguated_name(ctx); let mut fn_name = format!("__bindgen_test_layout_{}_instantiation", name); let times_seen = result.overload_number(&fn_name); if times_seen > 0 { write!(&mut fn_name, "_{}", times_seen).unwrap(); } let fn_name = ctx.rust_ident_raw(fn_name); let prefix = ctx.trait_prefix(); let ident = item.to_rust_ty_or_opaque(ctx, &()); let size_of_expr = quote! { ::#prefix::mem::size_of::<#ident>() }; let align_of_expr = quote! { ::#prefix::mem::align_of::<#ident>() }; let item = quote! { #[test] fn #fn_name() { assert_eq!(#size_of_expr, #size, concat!("Size of template specialization: ", stringify!(#ident))); assert_eq!(#align_of_expr, #align, concat!("Alignment of template specialization: ", stringify!(#ident))); } }; result.push(item); } } } /// Trait for implementing the code generation of a struct or union field. trait FieldCodegen<'a> { type Extra; fn codegen<F, M>( &self, ctx: &BindgenContext, fields_should_be_private: bool, codegen_depth: usize, accessor_kind: FieldAccessorKind, parent: &CompInfo, result: &mut CodegenResult, struct_layout: &mut StructLayoutTracker, fields: &mut F, methods: &mut M, extra: Self::Extra, ) where F: Extend<proc_macro2::TokenStream>, M: Extend<proc_macro2::TokenStream>; } impl<'a> FieldCodegen<'a> for Field { type Extra = (); fn codegen<F, M>( &self, ctx: &BindgenContext, fields_should_be_private: bool, codegen_depth: usize, accessor_kind: FieldAccessorKind, parent: &CompInfo, result: &mut CodegenResult, struct_layout: &mut StructLayoutTracker, fields: &mut F, methods: &mut M, _: (), ) where F: Extend<proc_macro2::TokenStream>, M: Extend<proc_macro2::TokenStream>, { match *self { Field::DataMember(ref data) => { data.codegen( ctx, fields_should_be_private, codegen_depth, accessor_kind, parent, result, struct_layout, fields, methods, (), ); } Field::Bitfields(ref unit) => { unit.codegen( ctx, fields_should_be_private, codegen_depth, accessor_kind, parent, result, struct_layout, fields, methods, (), ); } } } } impl<'a> FieldCodegen<'a> for FieldData { type Extra = (); fn codegen<F, M>( &self, ctx: &BindgenContext, fields_should_be_private: bool, codegen_depth: usize, accessor_kind: FieldAccessorKind, parent: &CompInfo, result: &mut CodegenResult, struct_layout: &mut StructLayoutTracker, fields: &mut F, methods: &mut M, _: (), ) where F: Extend<proc_macro2::TokenStream>, M: Extend<proc_macro2::TokenStream>, { // Bitfields are handled by `FieldCodegen` implementations for // `BitfieldUnit` and `Bitfield`. assert!(self.bitfield_width().is_none()); let field_item = self.ty().into_resolver().through_type_refs().resolve(ctx); let field_ty = field_item.expect_type(); let mut ty = self.ty().to_rust_ty_or_opaque(ctx, &()); ty.append_implicit_template_params(ctx, field_item); // NB: If supported, we use proper `union` types. let ty = if parent.is_union() && !struct_layout.is_rust_union() { result.saw_bindgen_union(); if ctx.options().enable_cxx_namespaces { quote! { root::__BindgenUnionField<#ty> } } else { quote! { __BindgenUnionField<#ty> } } } else if let Some(item) = field_ty.is_incomplete_array(ctx) { result.saw_incomplete_array(); let inner = item.to_rust_ty_or_opaque(ctx, &()); if ctx.options().enable_cxx_namespaces { quote! { root::__IncompleteArrayField<#inner> } } else { quote! { __IncompleteArrayField<#inner> } } } else { ty }; let mut field = quote! {}; if ctx.options().generate_comments { if let Some(raw_comment) = self.comment() { let comment = comment::preprocess(raw_comment, codegen_depth + 1); field = attributes::doc(comment); } } let field_name = self .name() .map(|name| ctx.rust_mangle(name).into_owned()) .expect("Each field should have a name in codegen!"); let field_ident = ctx.rust_ident_raw(field_name.as_str()); if let Some(padding_field) = struct_layout.saw_field(&field_name, field_ty, self.offset()) { fields.extend(Some(padding_field)); } let is_private = (!self.is_public() && ctx.options().respect_cxx_access_specs) || self.annotations() .private_fields() .unwrap_or(fields_should_be_private); let accessor_kind = self.annotations().accessor_kind().unwrap_or(accessor_kind); if is_private { field.append_all(quote! { #field_ident : #ty , }); } else { field.append_all(quote! { pub #field_ident : #ty , }); } fields.extend(Some(field)); // TODO: Factor the following code out, please! if accessor_kind == FieldAccessorKind::None { return; } let getter_name = ctx.rust_ident_raw(format!("get_{}", field_name)); let mutable_getter_name = ctx.rust_ident_raw(format!("get_{}_mut", field_name)); let field_name = ctx.rust_ident_raw(field_name); methods.extend(Some(match accessor_kind { FieldAccessorKind::None => unreachable!(), FieldAccessorKind::Regular => { quote! { #[inline] pub fn #getter_name(&self) -> & #ty { &self.#field_name } #[inline] pub fn #mutable_getter_name(&mut self) -> &mut #ty { &mut self.#field_name } } } FieldAccessorKind::Unsafe => { quote! { #[inline] pub unsafe fn #getter_name(&self) -> & #ty { &self.#field_name } #[inline] pub unsafe fn #mutable_getter_name(&mut self) -> &mut #ty { &mut self.#field_name } } } FieldAccessorKind::Immutable => { quote! { #[inline] pub fn #getter_name(&self) -> & #ty { &self.#field_name } } } })); } } impl BitfieldUnit { /// Get the constructor name for this bitfield unit. fn ctor_name(&self) -> proc_macro2::TokenStream { let ctor_name = Ident::new( &format!("new_bitfield_{}", self.nth()), Span::call_site(), ); quote! { #ctor_name } } } impl Bitfield { /// Extend an under construction bitfield unit constructor with this /// bitfield. This sets the relevant bits on the `__bindgen_bitfield_unit` /// variable that's being constructed. fn extend_ctor_impl( &self, ctx: &BindgenContext, param_name: proc_macro2::TokenStream, mut ctor_impl: proc_macro2::TokenStream, ) -> proc_macro2::TokenStream { let bitfield_ty = ctx.resolve_type(self.ty()); let bitfield_ty_layout = bitfield_ty .layout(ctx) .expect("Bitfield without layout? Gah!"); let bitfield_int_ty = helpers::integer_type(ctx, bitfield_ty_layout) .expect( "Should already have verified that the bitfield is \ representable as an int", ); let offset = self.offset_into_unit(); let width = self.width() as u8; let prefix = ctx.trait_prefix(); ctor_impl.append_all(quote! { __bindgen_bitfield_unit.set( #offset, #width, { let #param_name: #bitfield_int_ty = unsafe { ::#prefix::mem::transmute(#param_name) }; #param_name as u64 } ); }); ctor_impl } } fn access_specifier( ctx: &BindgenContext, is_pub: bool, ) -> proc_macro2::TokenStream { if is_pub || !ctx.options().respect_cxx_access_specs { quote! { pub } } else { quote! {} } } impl<'a> FieldCodegen<'a> for BitfieldUnit { type Extra = (); fn codegen<F, M>( &self, ctx: &BindgenContext, fields_should_be_private: bool, codegen_depth: usize, accessor_kind: FieldAccessorKind, parent: &CompInfo, result: &mut CodegenResult, struct_layout: &mut StructLayoutTracker, fields: &mut F, methods: &mut M, _: (), ) where F: Extend<proc_macro2::TokenStream>, M: Extend<proc_macro2::TokenStream>, { use crate::ir::ty::RUST_DERIVE_IN_ARRAY_LIMIT; result.saw_bitfield_unit(); let layout = self.layout(); let unit_field_ty = helpers::bitfield_unit(ctx, layout); let field_ty = { if parent.is_union() && !struct_layout.is_rust_union() { result.saw_bindgen_union(); if ctx.options().enable_cxx_namespaces { quote! { root::__BindgenUnionField<#unit_field_ty> } } else { quote! { __BindgenUnionField<#unit_field_ty> } } } else { unit_field_ty.clone() } }; { let align_field_name = format!("_bitfield_align_{}", self.nth()); let align_field_ident = ctx.rust_ident(&align_field_name); let align_ty = match self.layout().align { n if n >= 8 => quote! { u64 }, 4 => quote! { u32 }, 2 => quote! { u16 }, _ => quote! { u8 }, }; let align_field = quote! { pub #align_field_ident: [#align_ty; 0], }; fields.extend(Some(align_field)); } let unit_field_name = format!("_bitfield_{}", self.nth()); let unit_field_ident = ctx.rust_ident(&unit_field_name); let ctor_name = self.ctor_name(); let mut ctor_params = vec![]; let mut ctor_impl = quote! {}; // We cannot generate any constructor if the underlying storage can't // implement AsRef<[u8]> / AsMut<[u8]> / etc, or can't derive Default. // // We don't check `larger_arrays` here because Default does still have // the 32 items limitation. let mut generate_ctor = layout.size <= RUST_DERIVE_IN_ARRAY_LIMIT; let mut access_spec = !fields_should_be_private; for bf in self.bitfields() { // Codegen not allowed for anonymous bitfields if bf.name().is_none() { continue; } if layout.size > RUST_DERIVE_IN_ARRAY_LIMIT && !ctx.options().rust_features().larger_arrays { continue; } access_spec &= bf.is_public(); let mut bitfield_representable_as_int = true; bf.codegen( ctx, fields_should_be_private, codegen_depth, accessor_kind, parent, result, struct_layout, fields, methods, (&unit_field_name, &mut bitfield_representable_as_int), ); // Generating a constructor requires the bitfield to be representable as an integer. if !bitfield_representable_as_int { generate_ctor = false; continue; } let param_name = bitfield_getter_name(ctx, bf); let bitfield_ty_item = ctx.resolve_item(bf.ty()); let bitfield_ty = bitfield_ty_item.expect_type(); let bitfield_ty = bitfield_ty.to_rust_ty_or_opaque(ctx, bitfield_ty_item); ctor_params.push(quote! { #param_name : #bitfield_ty }); ctor_impl = bf.extend_ctor_impl(ctx, param_name, ctor_impl); } let access_spec = access_specifier(ctx, access_spec); let field = quote! { #access_spec #unit_field_ident : #field_ty , }; fields.extend(Some(field)); if generate_ctor { methods.extend(Some(quote! { #[inline] #access_spec fn #ctor_name ( #( #ctor_params ),* ) -> #unit_field_ty { let mut __bindgen_bitfield_unit: #unit_field_ty = Default::default(); #ctor_impl __bindgen_bitfield_unit } })); } struct_layout.saw_bitfield_unit(layout); } } fn bitfield_getter_name( ctx: &BindgenContext, bitfield: &Bitfield, ) -> proc_macro2::TokenStream { let name = bitfield.getter_name(); let name = ctx.rust_ident_raw(name); quote! { #name } } fn bitfield_setter_name( ctx: &BindgenContext, bitfield: &Bitfield, ) -> proc_macro2::TokenStream { let setter = bitfield.setter_name(); let setter = ctx.rust_ident_raw(setter); quote! { #setter } } impl<'a> FieldCodegen<'a> for Bitfield { type Extra = (&'a str, &'a mut bool); fn codegen<F, M>( &self, ctx: &BindgenContext, fields_should_be_private: bool, _codegen_depth: usize, _accessor_kind: FieldAccessorKind, parent: &CompInfo, _result: &mut CodegenResult, struct_layout: &mut StructLayoutTracker, _fields: &mut F, methods: &mut M, (unit_field_name, bitfield_representable_as_int): (&'a str, &mut bool), ) where F: Extend<proc_macro2::TokenStream>, M: Extend<proc_macro2::TokenStream>, { let prefix = ctx.trait_prefix(); let getter_name = bitfield_getter_name(ctx, self); let setter_name = bitfield_setter_name(ctx, self); let unit_field_ident = Ident::new(unit_field_name, Span::call_site()); let bitfield_ty_item = ctx.resolve_item(self.ty()); let bitfield_ty = bitfield_ty_item.expect_type(); let bitfield_ty_layout = bitfield_ty .layout(ctx) .expect("Bitfield without layout? Gah!"); let bitfield_int_ty = match helpers::integer_type(ctx, bitfield_ty_layout) { Some(int_ty) => { *bitfield_representable_as_int = true; int_ty } None => { *bitfield_representable_as_int = false; return; } }; let bitfield_ty = bitfield_ty.to_rust_ty_or_opaque(ctx, bitfield_ty_item); let offset = self.offset_into_unit(); let width = self.width() as u8; let access_spec = access_specifier( ctx, self.is_public() && !fields_should_be_private, ); if parent.is_union() && !struct_layout.is_rust_union() { methods.extend(Some(quote! { #[inline] #access_spec fn #getter_name(&self) -> #bitfield_ty { unsafe { ::#prefix::mem::transmute( self.#unit_field_ident.as_ref().get(#offset, #width) as #bitfield_int_ty ) } } #[inline] #access_spec fn #setter_name(&mut self, val: #bitfield_ty) { unsafe { let val: #bitfield_int_ty = ::#prefix::mem::transmute(val); self.#unit_field_ident.as_mut().set( #offset, #width, val as u64 ) } } })); } else { methods.extend(Some(quote! { #[inline] #access_spec fn #getter_name(&self) -> #bitfield_ty { unsafe { ::#prefix::mem::transmute( self.#unit_field_ident.get(#offset, #width) as #bitfield_int_ty ) } } #[inline] #access_spec fn #setter_name(&mut self, val: #bitfield_ty) { unsafe { let val: #bitfield_int_ty = ::#prefix::mem::transmute(val); self.#unit_field_ident.set( #offset, #width, val as u64 ) } } })); } } } impl CodeGenerator for CompInfo { type Extra = Item; type Return = (); fn codegen<'a>( &self, ctx: &BindgenContext, result: &mut CodegenResult<'a>, item: &Item, ) { debug!("<CompInfo as CodeGenerator>::codegen: item = {:?}", item); debug_assert!(item.is_enabled_for_codegen(ctx)); // Don't output classes with template parameters that aren't types, and // also don't output template specializations, neither total or partial. if self.has_non_type_template_params() { return; } let ty = item.expect_type(); let layout = ty.layout(ctx); let mut packed = self.is_packed(ctx, layout.as_ref()); let canonical_name = item.canonical_name(ctx); let canonical_ident = ctx.rust_ident(&canonical_name); // Generate the vtable from the method list if appropriate. // // TODO: I don't know how this could play with virtual methods that are // not in the list of methods found by us, we'll see. Also, could the // order of the vtable pointers vary? // // FIXME: Once we generate proper vtables, we need to codegen the // vtable, but *not* generate a field for it in the case that // HasVtable::has_vtable_ptr is false but HasVtable::has_vtable is true. // // Also, we need to generate the vtable in such a way it "inherits" from // the parent too. let is_opaque = item.is_opaque(ctx, &()); let mut fields = vec![]; let mut struct_layout = StructLayoutTracker::new(ctx, self, ty, &canonical_name); if !is_opaque { if item.has_vtable_ptr(ctx) { let vtable = Vtable::new(item.id(), self); vtable.codegen(ctx, result, item); let vtable_type = vtable .try_to_rust_ty(ctx, &()) .expect("vtable to Rust type conversion is infallible") .to_ptr(true); fields.push(quote! { pub vtable_: #vtable_type , }); struct_layout.saw_vtable(); } for base in self.base_members() { if !base.requires_storage(ctx) { continue; } let inner_item = ctx.resolve_item(base.ty); let mut inner = inner_item.to_rust_ty_or_opaque(ctx, &()); inner.append_implicit_template_params(ctx, inner_item); let field_name = ctx.rust_ident(&base.field_name); struct_layout.saw_base(inner_item.expect_type()); let access_spec = access_specifier(ctx, base.is_public()); fields.push(quote! { #access_spec #field_name: #inner, }); } } let mut methods = vec![]; if !is_opaque { let codegen_depth = item.codegen_depth(ctx); let fields_should_be_private = item.annotations().private_fields().unwrap_or(false); let struct_accessor_kind = item .annotations() .accessor_kind() .unwrap_or(FieldAccessorKind::None); for field in self.fields() { field.codegen( ctx, fields_should_be_private, codegen_depth, struct_accessor_kind, self, result, &mut struct_layout, &mut fields, &mut methods, (), ); } // Check whether an explicit padding field is needed // at the end. if let Some(comp_layout) = layout { fields.extend( struct_layout .add_tail_padding(&canonical_name, comp_layout), ); } } if is_opaque { // Opaque item should not have generated methods, fields. debug_assert!(fields.is_empty()); debug_assert!(methods.is_empty()); } let is_union = self.kind() == CompKind::Union; let layout = item.kind().expect_type().layout(ctx); let zero_sized = item.is_zero_sized(ctx); let forward_decl = self.is_forward_declaration(); let mut explicit_align = None; // C++ requires every struct to be addressable, so what C++ compilers do // is making the struct 1-byte sized. // // This is apparently not the case for C, see: // https://github.com/rust-lang/rust-bindgen/issues/551 // // Just get the layout, and assume C++ if not. // // NOTE: This check is conveniently here to avoid the dummy fields we // may add for unused template parameters. if !forward_decl && zero_sized { let has_address = if is_opaque { // Generate the address field if it's an opaque type and // couldn't determine the layout of the blob. layout.is_none() } else { layout.map_or(true, |l| l.size != 0) }; if has_address { let layout = Layout::new(1, 1); let ty = helpers::blob(ctx, Layout::new(1, 1)); struct_layout.saw_field_with_layout( "_address", layout, /* offset = */ Some(0), ); fields.push(quote! { pub _address: #ty, }); } } if is_opaque { match layout { Some(l) => { explicit_align = Some(l.align); let ty = helpers::blob(ctx, l); fields.push(quote! { pub _bindgen_opaque_blob: #ty , }); } None => { warn!("Opaque type without layout! Expect dragons!"); } } } else if !is_union && !zero_sized { if let Some(padding_field) = layout.and_then(|layout| struct_layout.pad_struct(layout)) { fields.push(padding_field); } if let Some(layout) = layout { if struct_layout.requires_explicit_align(layout) { if layout.align == 1 { packed = true; } else { explicit_align = Some(layout.align); if !ctx.options().rust_features.repr_align { let ty = helpers::blob( ctx, Layout::new(0, layout.align), ); fields.push(quote! { pub __bindgen_align: #ty , }); } } } } } else if is_union && !forward_decl { // TODO(emilio): It'd be nice to unify this with the struct path // above somehow. let layout = layout.expect("Unable to get layout information?"); if struct_layout.requires_explicit_align(layout) { explicit_align = Some(layout.align); } if !struct_layout.is_rust_union() { let ty = helpers::blob(ctx, layout); fields.push(quote! { pub bindgen_union_field: #ty , }) } } if forward_decl { fields.push(quote! { _unused: [u8; 0], }); } let mut generic_param_names = vec![]; for (idx, ty) in item.used_template_params(ctx).iter().enumerate() { let param = ctx.resolve_type(*ty); let name = param.name().unwrap(); let ident = ctx.rust_ident(name); generic_param_names.push(ident.clone()); let prefix = ctx.trait_prefix(); let field_name = ctx.rust_ident(format!("_phantom_{}", idx)); fields.push(quote! { pub #field_name : ::#prefix::marker::PhantomData< ::#prefix::cell::UnsafeCell<#ident> > , }); } let generics = if !generic_param_names.is_empty() { let generic_param_names = generic_param_names.clone(); quote! { < #( #generic_param_names ),* > } } else { quote! {} }; let mut attributes = vec![]; let mut needs_clone_impl = false; let mut needs_default_impl = false; let mut needs_debug_impl = false; let mut needs_partialeq_impl = false; if let Some(comment) = item.comment(ctx) { attributes.push(attributes::doc(comment)); } if packed && !is_opaque { let n = layout.map_or(1, |l| l.align); assert!(ctx.options().rust_features().repr_packed_n || n == 1); let packed_repr = if n == 1 { "packed".to_string() } else { format!("packed({})", n) }; attributes.push(attributes::repr_list(&["C", &packed_repr])); } else { attributes.push(attributes::repr("C")); } if ctx.options().rust_features().repr_align { if let Some(explicit) = explicit_align { // Ensure that the struct has the correct alignment even in // presence of alignas. let explicit = helpers::ast_ty::int_expr(explicit as i64); attributes.push(quote! { #[repr(align(#explicit))] }); } } let derivable_traits = derives_of_item(item, ctx); if !derivable_traits.contains(DerivableTraits::DEBUG) { needs_debug_impl = ctx.options().derive_debug && ctx.options().impl_debug && !ctx.no_debug_by_name(item) && !item.annotations().disallow_debug(); } if !derivable_traits.contains(DerivableTraits::DEFAULT) { needs_default_impl = ctx.options().derive_default && !self.is_forward_declaration() && !ctx.no_default_by_name(item) && !item.annotations().disallow_default(); } let all_template_params = item.all_template_params(ctx); if derivable_traits.contains(DerivableTraits::COPY) && !derivable_traits.contains(DerivableTraits::CLONE) { needs_clone_impl = true; } if !derivable_traits.contains(DerivableTraits::PARTIAL_EQ) { needs_partialeq_impl = ctx.options().derive_partialeq && ctx.options().impl_partialeq && ctx.lookup_can_derive_partialeq_or_partialord(item.id()) == CanDerive::Manually; } let mut derives: Vec<_> = derivable_traits.into(); derives.extend(item.annotations().derives().iter().map(String::as_str)); // The custom derives callback may return a list of derive attributes; // add them to the end of the list. let custom_derives; if let Some(cb) = &ctx.options().parse_callbacks { custom_derives = cb.add_derives(&canonical_name); // In most cases this will be a no-op, since custom_derives will be empty. derives.extend(custom_derives.iter().map(|s| s.as_str())); }; if !derives.is_empty() { attributes.push(attributes::derives(&derives)) } if item.annotations().must_use_type() || ctx.must_use_type_by_name(item) { attributes.push(attributes::must_use()); } let mut tokens = if is_union && struct_layout.is_rust_union() { quote! { #( #attributes )* pub union #canonical_ident } } else { quote! { #( #attributes )* pub struct #canonical_ident } }; tokens.append_all(quote! { #generics { #( #fields )* } }); result.push(tokens); // Generate the inner types and all that stuff. // // TODO: In the future we might want to be smart, and use nested // modules, and whatnot. for ty in self.inner_types() { let child_item = ctx.resolve_item(*ty); // assert_eq!(child_item.parent_id(), item.id()); child_item.codegen(ctx, result, &()); } // NOTE: Some unexposed attributes (like alignment attributes) may // affect layout, so we're bad and pray to the gods for avoid sending // all the tests to shit when parsing things like max_align_t. if self.found_unknown_attr() { warn!( "Type {} has an unknown attribute that may affect layout", canonical_ident ); } if all_template_params.is_empty() { if !is_opaque { for var in self.inner_vars() { ctx.resolve_item(*var).codegen(ctx, result, &()); } } if ctx.options().layout_tests && !self.is_forward_declaration() { if let Some(layout) = layout { let fn_name = format!("bindgen_test_layout_{}", canonical_ident); let fn_name = ctx.rust_ident_raw(fn_name); let prefix = ctx.trait_prefix(); let size_of_expr = quote! { ::#prefix::mem::size_of::<#canonical_ident>() }; let align_of_expr = quote! { ::#prefix::mem::align_of::<#canonical_ident>() }; let size = layout.size; let align = layout.align; let check_struct_align = if align > ctx.target_pointer_size() && !ctx.options().rust_features().repr_align { None } else { Some(quote! { assert_eq!(#align_of_expr, #align, concat!("Alignment of ", stringify!(#canonical_ident))); }) }; // FIXME when [issue #465](https://github.com/rust-lang/rust-bindgen/issues/465) ready let too_many_base_vtables = self .base_members() .iter() .filter(|base| base.ty.has_vtable(ctx))<|fim▁hole|> 1; let should_skip_field_offset_checks = is_opaque || too_many_base_vtables; let check_field_offset = if should_skip_field_offset_checks { vec![] } else { let asserts = self.fields() .iter() .filter_map(|field| match *field { Field::DataMember(ref f) if f.name().is_some() => Some(f), _ => None, }) .flat_map(|field| { let name = field.name().unwrap(); field.offset().map(|offset| { let field_offset = offset / 8; let field_name = ctx.rust_ident(name); quote! { assert_eq!( unsafe { &(*(::#prefix::ptr::null::<#canonical_ident>())).#field_name as *const _ as usize }, #field_offset, concat!("Offset of field: ", stringify!(#canonical_ident), "::", stringify!(#field_name)) ); } }) }) .collect::<Vec<proc_macro2::TokenStream>>(); asserts }; let item = quote! { #[test] fn #fn_name() { assert_eq!(#size_of_expr, #size, concat!("Size of: ", stringify!(#canonical_ident))); #check_struct_align #( #check_field_offset )* } }; result.push(item); } } let mut method_names = Default::default(); if ctx.options().codegen_config.methods() { for method in self.methods() { assert!(method.kind() != MethodKind::Constructor); method.codegen_method( ctx, &mut methods, &mut method_names, result, self, ); } } if ctx.options().codegen_config.constructors() { for sig in self.constructors() { Method::new( MethodKind::Constructor, *sig, /* const */ false, ) .codegen_method( ctx, &mut methods, &mut method_names, result, self, ); } } if ctx.options().codegen_config.destructors() { if let Some((kind, destructor)) = self.destructor() { debug_assert!(kind.is_destructor()); Method::new(kind, destructor, false).codegen_method( ctx, &mut methods, &mut method_names, result, self, ); } } } // NB: We can't use to_rust_ty here since for opaque types this tries to // use the specialization knowledge to generate a blob field. let ty_for_impl = quote! { #canonical_ident #generics }; if needs_clone_impl { result.push(quote! { impl #generics Clone for #ty_for_impl { fn clone(&self) -> Self { *self } } }); } if needs_default_impl { let prefix = ctx.trait_prefix(); let body = if ctx.options().rust_features().maybe_uninit { quote! { let mut s = ::#prefix::mem::MaybeUninit::<Self>::uninit(); unsafe { ::#prefix::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() } } } else { quote! { unsafe { let mut s: Self = ::#prefix::mem::uninitialized(); ::#prefix::ptr::write_bytes(&mut s, 0, 1); s } } }; // Note we use `ptr::write_bytes()` instead of `mem::zeroed()` because the latter does // not necessarily ensure padding bytes are zeroed. Some C libraries are sensitive to // non-zero padding bytes, especially when forwards/backwards compatability is // involved. result.push(quote! { impl #generics Default for #ty_for_impl { fn default() -> Self { #body } } }); } if needs_debug_impl { let impl_ = impl_debug::gen_debug_impl( ctx, self.fields(), item, self.kind(), ); let prefix = ctx.trait_prefix(); result.push(quote! { impl #generics ::#prefix::fmt::Debug for #ty_for_impl { #impl_ } }); } if needs_partialeq_impl { if let Some(impl_) = impl_partialeq::gen_partialeq_impl( ctx, self, item, &ty_for_impl, ) { let partialeq_bounds = if !generic_param_names.is_empty() { let bounds = generic_param_names.iter().map(|t| { quote! { #t: PartialEq } }); quote! { where #( #bounds ),* } } else { quote! {} }; let prefix = ctx.trait_prefix(); result.push(quote! { impl #generics ::#prefix::cmp::PartialEq for #ty_for_impl #partialeq_bounds { #impl_ } }); } } if !methods.is_empty() { result.push(quote! { impl #generics #ty_for_impl { #( #methods )* } }); } } } trait MethodCodegen { fn codegen_method<'a>( &self, ctx: &BindgenContext, methods: &mut Vec<proc_macro2::TokenStream>, method_names: &mut HashMap<String, usize>, result: &mut CodegenResult<'a>, parent: &CompInfo, ); } impl MethodCodegen for Method { fn codegen_method<'a>( &self, ctx: &BindgenContext, methods: &mut Vec<proc_macro2::TokenStream>, method_names: &mut HashMap<String, usize>, result: &mut CodegenResult<'a>, _parent: &CompInfo, ) { assert!({ let cc = &ctx.options().codegen_config; match self.kind() { MethodKind::Constructor => cc.constructors(), MethodKind::Destructor => cc.destructors(), MethodKind::VirtualDestructor { .. } => cc.destructors(), MethodKind::Static | MethodKind::Normal | MethodKind::Virtual { .. } => cc.methods(), } }); // TODO(emilio): We could generate final stuff at least. if self.is_virtual() { return; // FIXME } // First of all, output the actual function. let function_item = ctx.resolve_item(self.signature()); if !function_item.process_before_codegen(ctx, result) { return; } let function = function_item.expect_function(); let times_seen = function.codegen(ctx, result, function_item); let times_seen = match times_seen { Some(seen) => seen, None => return, }; let signature_item = ctx.resolve_item(function.signature()); let mut name = match self.kind() { MethodKind::Constructor => "new".into(), MethodKind::Destructor => "destruct".into(), _ => function.name().to_owned(), }; let signature = match *signature_item.expect_type().kind() { TypeKind::Function(ref sig) => sig, _ => panic!("How in the world?"), }; if let (Abi::ThisCall, false) = (signature.abi(), ctx.options().rust_features().thiscall_abi) { return; } // Do not generate variadic methods, since rust does not allow // implementing them, and we don't do a good job at it anyway. if signature.is_variadic() { return; } let count = { let count = method_names.entry(name.clone()).or_insert(0); *count += 1; *count - 1 }; if count != 0 { name.push_str(&count.to_string()); } let mut function_name = function_item.canonical_name(ctx); if times_seen > 0 { write!(&mut function_name, "{}", times_seen).unwrap(); } let function_name = ctx.rust_ident(function_name); let mut args = utils::fnsig_arguments(ctx, signature); let mut ret = utils::fnsig_return_ty(ctx, signature); if !self.is_static() && !self.is_constructor() { args[0] = if self.is_const() { quote! { &self } } else { quote! { &mut self } }; } // If it's a constructor, we always return `Self`, and we inject the // "this" parameter, so there's no need to ask the user for it. // // Note that constructors in Clang are represented as functions with // return-type = void. if self.is_constructor() { args.remove(0); ret = quote! { -> Self }; } let mut exprs = helpers::ast_ty::arguments_from_signature(signature, ctx); let mut stmts = vec![]; // If it's a constructor, we need to insert an extra parameter with a // variable called `__bindgen_tmp` we're going to create. if self.is_constructor() { let prefix = ctx.trait_prefix(); let tmp_variable_decl = if ctx .options() .rust_features() .maybe_uninit { exprs[0] = quote! { __bindgen_tmp.as_mut_ptr() }; quote! { let mut __bindgen_tmp = ::#prefix::mem::MaybeUninit::uninit() } } else { exprs[0] = quote! { &mut __bindgen_tmp }; quote! { let mut __bindgen_tmp = ::#prefix::mem::uninitialized() } }; stmts.push(tmp_variable_decl); } else if !self.is_static() { assert!(!exprs.is_empty()); exprs[0] = quote! { self }; }; let call = quote! { #function_name (#( #exprs ),* ) }; stmts.push(call); if self.is_constructor() { stmts.push(if ctx.options().rust_features().maybe_uninit { quote! { __bindgen_tmp.assume_init() } } else { quote! { __bindgen_tmp } }) } let block = quote! { #( #stmts );* }; let mut attrs = vec![attributes::inline()]; if signature.must_use() && ctx.options().rust_features().must_use_function { attrs.push(attributes::must_use()); } let name = ctx.rust_ident(&name); methods.push(quote! { #(#attrs)* pub unsafe fn #name ( #( #args ),* ) #ret { #block } }); } } /// A helper type that represents different enum variations. #[derive(Copy, Clone, PartialEq, Debug)] pub enum EnumVariation { /// The code for this enum will use a Rust enum. Note that creating this in unsafe code /// (including FFI) with an invalid value will invoke undefined behaviour, whether or not /// its marked as non_exhaustive. Rust { /// Indicates whether the generated struct should be `#[non_exhaustive]` non_exhaustive: bool, }, /// The code for this enum will use a newtype NewType { /// Indicates whether the newtype will have bitwise operators is_bitfield: bool, }, /// The code for this enum will use consts Consts, /// The code for this enum will use a module containing consts ModuleConsts, } impl EnumVariation { fn is_rust(&self) -> bool { matches!(*self, EnumVariation::Rust { .. }) } /// Both the `Const` and `ModuleConsts` variants will cause this to return /// true. fn is_const(&self) -> bool { matches!(*self, EnumVariation::Consts | EnumVariation::ModuleConsts) } } impl Default for EnumVariation { fn default() -> EnumVariation { EnumVariation::Consts } } impl std::str::FromStr for EnumVariation { type Err = std::io::Error; /// Create a `EnumVariation` from a string. fn from_str(s: &str) -> Result<Self, Self::Err> { match s { "rust" => Ok(EnumVariation::Rust { non_exhaustive: false, }), "rust_non_exhaustive" => Ok(EnumVariation::Rust { non_exhaustive: true, }), "bitfield" => Ok(EnumVariation::NewType { is_bitfield: true }), "consts" => Ok(EnumVariation::Consts), "moduleconsts" => Ok(EnumVariation::ModuleConsts), "newtype" => Ok(EnumVariation::NewType { is_bitfield: false }), _ => Err(std::io::Error::new( std::io::ErrorKind::InvalidInput, concat!( "Got an invalid EnumVariation. Accepted values ", "are 'rust', 'rust_non_exhaustive', 'bitfield', 'consts',", "'moduleconsts', and 'newtype'." ), )), } } } /// A helper type to construct different enum variations. enum EnumBuilder<'a> { Rust { codegen_depth: usize, attrs: Vec<proc_macro2::TokenStream>, ident: Ident, tokens: proc_macro2::TokenStream, emitted_any_variants: bool, }, NewType { codegen_depth: usize, canonical_name: &'a str, tokens: proc_macro2::TokenStream, is_bitfield: bool, }, Consts { repr: proc_macro2::TokenStream, variants: Vec<proc_macro2::TokenStream>, codegen_depth: usize, }, ModuleConsts { codegen_depth: usize, module_name: &'a str, module_items: Vec<proc_macro2::TokenStream>, }, } impl<'a> EnumBuilder<'a> { /// Returns the depth of the code generation for a variant of this enum. fn codegen_depth(&self) -> usize { match *self { EnumBuilder::Rust { codegen_depth, .. } | EnumBuilder::NewType { codegen_depth, .. } | EnumBuilder::ModuleConsts { codegen_depth, .. } | EnumBuilder::Consts { codegen_depth, .. } => codegen_depth, } } /// Returns true if the builder is for a rustified enum. fn is_rust_enum(&self) -> bool { matches!(*self, EnumBuilder::Rust { .. }) } /// Create a new enum given an item builder, a canonical name, a name for /// the representation, and which variation it should be generated as. fn new( name: &'a str, mut attrs: Vec<proc_macro2::TokenStream>, repr: proc_macro2::TokenStream, enum_variation: EnumVariation, enum_codegen_depth: usize, ) -> Self { let ident = Ident::new(name, Span::call_site()); match enum_variation { EnumVariation::NewType { is_bitfield } => EnumBuilder::NewType { codegen_depth: enum_codegen_depth, canonical_name: name, tokens: quote! { #( #attrs )* pub struct #ident (pub #repr); }, is_bitfield, }, EnumVariation::Rust { .. } => { // `repr` is guaranteed to be Rustified in Enum::codegen attrs.insert(0, quote! { #[repr( #repr )] }); let tokens = quote!(); EnumBuilder::Rust { codegen_depth: enum_codegen_depth + 1, attrs, ident, tokens, emitted_any_variants: false, } } EnumVariation::Consts => { let mut variants = Vec::new(); variants.push(quote! { #( #attrs )* pub type #ident = #repr; }); EnumBuilder::Consts { repr, variants, codegen_depth: enum_codegen_depth, } } EnumVariation::ModuleConsts => { let ident = Ident::new( CONSTIFIED_ENUM_MODULE_REPR_NAME, Span::call_site(), ); let type_definition = quote! { #( #attrs )* pub type #ident = #repr; }; EnumBuilder::ModuleConsts { codegen_depth: enum_codegen_depth + 1, module_name: name, module_items: vec![type_definition], } } } } /// Add a variant to this enum. fn with_variant<'b>( self, ctx: &BindgenContext, variant: &EnumVariant, mangling_prefix: Option<&str>, rust_ty: proc_macro2::TokenStream, result: &mut CodegenResult<'b>, is_ty_named: bool, ) -> Self { let variant_name = ctx.rust_mangle(variant.name()); let is_rust_enum = self.is_rust_enum(); let expr = match variant.val() { EnumVariantValue::Boolean(v) if is_rust_enum => { helpers::ast_ty::uint_expr(v as u64) } EnumVariantValue::Boolean(v) => quote!(#v), EnumVariantValue::Signed(v) => helpers::ast_ty::int_expr(v), EnumVariantValue::Unsigned(v) => helpers::ast_ty::uint_expr(v), }; let mut doc = quote! {}; if ctx.options().generate_comments { if let Some(raw_comment) = variant.comment() { let comment = comment::preprocess(raw_comment, self.codegen_depth()); doc = attributes::doc(comment); } } match self { EnumBuilder::Rust { attrs, ident, tokens, emitted_any_variants: _, codegen_depth, } => { let name = ctx.rust_ident(variant_name); EnumBuilder::Rust { attrs, ident, codegen_depth, tokens: quote! { #tokens #doc #name = #expr, }, emitted_any_variants: true, } } EnumBuilder::NewType { canonical_name, .. } => { if ctx.options().rust_features().associated_const && is_ty_named { let enum_ident = ctx.rust_ident(canonical_name); let variant_ident = ctx.rust_ident(variant_name); result.push(quote! { impl #enum_ident { #doc pub const #variant_ident : #rust_ty = #rust_ty ( #expr ); } }); } else { let ident = ctx.rust_ident(match mangling_prefix { Some(prefix) => { Cow::Owned(format!("{}_{}", prefix, variant_name)) } None => variant_name, }); result.push(quote! { #doc pub const #ident : #rust_ty = #rust_ty ( #expr ); }); } self } EnumBuilder::Consts { ref repr, .. } => { let constant_name = match mangling_prefix { Some(prefix) => { Cow::Owned(format!("{}_{}", prefix, variant_name)) } None => variant_name, }; let ty = if is_ty_named { &rust_ty } else { repr }; let ident = ctx.rust_ident(constant_name); result.push(quote! { #doc pub const #ident : #ty = #expr ; }); self } EnumBuilder::ModuleConsts { codegen_depth, module_name, mut module_items, } => { let name = ctx.rust_ident(variant_name); let ty = ctx.rust_ident(CONSTIFIED_ENUM_MODULE_REPR_NAME); module_items.push(quote! { #doc pub const #name : #ty = #expr ; }); EnumBuilder::ModuleConsts { module_name, module_items, codegen_depth, } } } } fn build<'b>( self, ctx: &BindgenContext, rust_ty: proc_macro2::TokenStream, result: &mut CodegenResult<'b>, ) -> proc_macro2::TokenStream { match self { EnumBuilder::Rust { attrs, ident, tokens, emitted_any_variants, .. } => { let variants = if !emitted_any_variants { quote!(__bindgen_cannot_repr_c_on_empty_enum = 0) } else { tokens }; quote! { #( #attrs )* pub enum #ident { #variants } } } EnumBuilder::NewType { canonical_name, tokens, is_bitfield, .. } => { if !is_bitfield { return tokens; } let rust_ty_name = ctx.rust_ident_raw(canonical_name); let prefix = ctx.trait_prefix(); result.push(quote! { impl ::#prefix::ops::BitOr<#rust_ty> for #rust_ty { type Output = Self; #[inline] fn bitor(self, other: Self) -> Self { #rust_ty_name(self.0 | other.0) } } }); result.push(quote! { impl ::#prefix::ops::BitOrAssign for #rust_ty { #[inline] fn bitor_assign(&mut self, rhs: #rust_ty) { self.0 |= rhs.0; } } }); result.push(quote! { impl ::#prefix::ops::BitAnd<#rust_ty> for #rust_ty { type Output = Self; #[inline] fn bitand(self, other: Self) -> Self { #rust_ty_name(self.0 & other.0) } } }); result.push(quote! { impl ::#prefix::ops::BitAndAssign for #rust_ty { #[inline] fn bitand_assign(&mut self, rhs: #rust_ty) { self.0 &= rhs.0; } } }); tokens } EnumBuilder::Consts { variants, .. } => quote! { #( #variants )* }, EnumBuilder::ModuleConsts { module_items, module_name, .. } => { let ident = ctx.rust_ident(module_name); quote! { pub mod #ident { #( #module_items )* } } } } } } impl CodeGenerator for Enum { type Extra = Item; type Return = (); fn codegen<'a>( &self, ctx: &BindgenContext, result: &mut CodegenResult<'a>, item: &Item, ) { debug!("<Enum as CodeGenerator>::codegen: item = {:?}", item); debug_assert!(item.is_enabled_for_codegen(ctx)); let name = item.canonical_name(ctx); let ident = ctx.rust_ident(&name); let enum_ty = item.expect_type(); let layout = enum_ty.layout(ctx); let variation = self.computed_enum_variation(ctx, item); let repr_translated; let repr = match self.repr().map(|repr| ctx.resolve_type(repr)) { Some(repr) if !ctx.options().translate_enum_integer_types && !variation.is_rust() => { repr } repr => { // An enum's integer type is translated to a native Rust // integer type in 3 cases: // * the enum is Rustified and we need a translated type for // the repr attribute // * the representation couldn't be determined from the C source // * it was explicitly requested as a bindgen option let kind = match repr { Some(repr) => match *repr.canonical_type(ctx).kind() { TypeKind::Int(int_kind) => int_kind, _ => panic!("Unexpected type as enum repr"), }, None => { warn!( "Guessing type of enum! Forward declarations of enums \ shouldn't be legal!" ); IntKind::Int } }; let signed = kind.is_signed(); let size = layout .map(|l| l.size) .or_else(|| kind.known_size()) .unwrap_or(0); let translated = match (signed, size) { (true, 1) => IntKind::I8, (false, 1) => IntKind::U8, (true, 2) => IntKind::I16, (false, 2) => IntKind::U16, (true, 4) => IntKind::I32, (false, 4) => IntKind::U32, (true, 8) => IntKind::I64, (false, 8) => IntKind::U64, _ => { warn!( "invalid enum decl: signed: {}, size: {}", signed, size ); IntKind::I32 } }; repr_translated = Type::new(None, None, TypeKind::Int(translated), false); &repr_translated } }; let mut attrs = vec![]; // TODO(emilio): Delegate this to the builders? match variation { EnumVariation::Rust { non_exhaustive } => { if non_exhaustive && ctx.options().rust_features().non_exhaustive { attrs.push(attributes::non_exhaustive()); } else if non_exhaustive && !ctx.options().rust_features().non_exhaustive { panic!("The rust target you're using doesn't seem to support non_exhaustive enums"); } } EnumVariation::NewType { .. } => { if ctx.options().rust_features.repr_transparent { attrs.push(attributes::repr("transparent")); } else { attrs.push(attributes::repr("C")); } } _ => {} }; if let Some(comment) = item.comment(ctx) { attrs.push(attributes::doc(comment)); } if item.annotations().must_use_type() || ctx.must_use_type_by_name(item) { attrs.push(attributes::must_use()); } if !variation.is_const() { let mut derives = derives_of_item(item, ctx); // For backwards compat, enums always derive // Clone/Eq/PartialEq/Hash, even if we don't generate those by // default. derives.insert( DerivableTraits::CLONE | DerivableTraits::HASH | DerivableTraits::PARTIAL_EQ | DerivableTraits::EQ, ); let mut derives: Vec<_> = derives.into(); for derive in item.annotations().derives().iter() { if !derives.contains(&derive.as_str()) { derives.push(derive); } } // The custom derives callback may return a list of derive attributes; // add them to the end of the list. let custom_derives; if let Some(cb) = &ctx.options().parse_callbacks { custom_derives = cb.add_derives(&name); // In most cases this will be a no-op, since custom_derives will be empty. derives.extend(custom_derives.iter().map(|s| s.as_str())); }; attrs.push(attributes::derives(&derives)); } fn add_constant<'a>( ctx: &BindgenContext, enum_: &Type, // Only to avoid recomputing every time. enum_canonical_name: &Ident, // May be the same as "variant" if it's because the // enum is unnamed and we still haven't seen the // value. variant_name: &Ident, referenced_name: &Ident, enum_rust_ty: proc_macro2::TokenStream, result: &mut CodegenResult<'a>, ) { let constant_name = if enum_.name().is_some() { if ctx.options().prepend_enum_name { format!("{}_{}", enum_canonical_name, variant_name) } else { format!("{}", variant_name) } } else { format!("{}", variant_name) }; let constant_name = ctx.rust_ident(constant_name); result.push(quote! { pub const #constant_name : #enum_rust_ty = #enum_canonical_name :: #referenced_name ; }); } let repr = repr.to_rust_ty_or_opaque(ctx, item); let mut builder = EnumBuilder::new( &name, attrs, repr, variation, item.codegen_depth(ctx), ); // A map where we keep a value -> variant relation. let mut seen_values = HashMap::<_, Ident>::default(); let enum_rust_ty = item.to_rust_ty_or_opaque(ctx, &()); let is_toplevel = item.is_toplevel(ctx); // Used to mangle the constants we generate in the unnamed-enum case. let parent_canonical_name = if is_toplevel { None } else { Some(item.parent_id().canonical_name(ctx)) }; let constant_mangling_prefix = if ctx.options().prepend_enum_name { if enum_ty.name().is_none() { parent_canonical_name.as_deref() } else { Some(&*name) } } else { None }; // NB: We defer the creation of constified variants, in case we find // another variant with the same value (which is the common thing to // do). let mut constified_variants = VecDeque::new(); let mut iter = self.variants().iter().peekable(); while let Some(variant) = iter.next().or_else(|| constified_variants.pop_front()) { if variant.hidden() { continue; } if variant.force_constification() && iter.peek().is_some() { constified_variants.push_back(variant); continue; } match seen_values.entry(variant.val()) { Entry::Occupied(ref entry) => { if variation.is_rust() { let variant_name = ctx.rust_mangle(variant.name()); let mangled_name = if is_toplevel || enum_ty.name().is_some() { variant_name } else { let parent_name = parent_canonical_name.as_ref().unwrap(); Cow::Owned(format!( "{}_{}", parent_name, variant_name )) }; let existing_variant_name = entry.get(); // Use associated constants for named enums. if enum_ty.name().is_some() && ctx.options().rust_features().associated_const { let enum_canonical_name = &ident; let variant_name = ctx.rust_ident_raw(&*mangled_name); result.push(quote! { impl #enum_rust_ty { pub const #variant_name : #enum_rust_ty = #enum_canonical_name :: #existing_variant_name ; } }); } else { add_constant( ctx, enum_ty, &ident, &Ident::new(&*mangled_name, Span::call_site()), existing_variant_name, enum_rust_ty.clone(), result, ); } } else { builder = builder.with_variant( ctx, variant, constant_mangling_prefix, enum_rust_ty.clone(), result, enum_ty.name().is_some(), ); } } Entry::Vacant(entry) => { builder = builder.with_variant( ctx, variant, constant_mangling_prefix, enum_rust_ty.clone(), result, enum_ty.name().is_some(), ); let variant_name = ctx.rust_ident(variant.name()); // If it's an unnamed enum, or constification is enforced, // we also generate a constant so it can be properly // accessed. if (variation.is_rust() && enum_ty.name().is_none()) || variant.force_constification() { let mangled_name = if is_toplevel { variant_name.clone() } else { let parent_name = parent_canonical_name.as_ref().unwrap(); Ident::new( &format!("{}_{}", parent_name, variant_name), Span::call_site(), ) }; add_constant( ctx, enum_ty, &ident, &mangled_name, &variant_name, enum_rust_ty.clone(), result, ); } entry.insert(variant_name); } } } let item = builder.build(ctx, enum_rust_ty, result); result.push(item); } } /// Enum for the default type of macro constants. #[derive(Copy, Clone, PartialEq, Debug)] pub enum MacroTypeVariation { /// Use i32 or i64 Signed, /// Use u32 or u64 Unsigned, } impl MacroTypeVariation { /// Convert a `MacroTypeVariation` to its str representation. pub fn as_str(&self) -> &str { match self { MacroTypeVariation::Signed => "signed", MacroTypeVariation::Unsigned => "unsigned", } } } impl Default for MacroTypeVariation { fn default() -> MacroTypeVariation { MacroTypeVariation::Unsigned } } impl std::str::FromStr for MacroTypeVariation { type Err = std::io::Error; /// Create a `MacroTypeVariation` from a string. fn from_str(s: &str) -> Result<Self, Self::Err> { match s { "signed" => Ok(MacroTypeVariation::Signed), "unsigned" => Ok(MacroTypeVariation::Unsigned), _ => Err(std::io::Error::new( std::io::ErrorKind::InvalidInput, concat!( "Got an invalid MacroTypeVariation. Accepted values ", "are 'signed' and 'unsigned'" ), )), } } } /// Enum for how aliases should be translated. #[derive(Copy, Clone, PartialEq, Debug)] pub enum AliasVariation { /// Convert to regular Rust alias TypeAlias, /// Create a new type by wrapping the old type in a struct and using #[repr(transparent)] NewType, /// Same as NewStruct but also impl Deref to be able to use the methods of the wrapped type NewTypeDeref, } impl AliasVariation { /// Convert an `AliasVariation` to its str representation. pub fn as_str(&self) -> &str { match self { AliasVariation::TypeAlias => "type_alias", AliasVariation::NewType => "new_type", AliasVariation::NewTypeDeref => "new_type_deref", } } } impl Default for AliasVariation { fn default() -> AliasVariation { AliasVariation::TypeAlias } } impl std::str::FromStr for AliasVariation { type Err = std::io::Error; /// Create an `AliasVariation` from a string. fn from_str(s: &str) -> Result<Self, Self::Err> { match s { "type_alias" => Ok(AliasVariation::TypeAlias), "new_type" => Ok(AliasVariation::NewType), "new_type_deref" => Ok(AliasVariation::NewTypeDeref), _ => Err(std::io::Error::new( std::io::ErrorKind::InvalidInput, concat!( "Got an invalid AliasVariation. Accepted values ", "are 'type_alias', 'new_type', and 'new_type_deref'" ), )), } } } /// Fallible conversion to an opaque blob. /// /// Implementors of this trait should provide the `try_get_layout` method to /// fallibly get this thing's layout, which the provided `try_to_opaque` trait /// method will use to convert the `Layout` into an opaque blob Rust type. trait TryToOpaque { type Extra; /// Get the layout for this thing, if one is available. fn try_get_layout( &self, ctx: &BindgenContext, extra: &Self::Extra, ) -> error::Result<Layout>; /// Do not override this provided trait method. fn try_to_opaque( &self, ctx: &BindgenContext, extra: &Self::Extra, ) -> error::Result<proc_macro2::TokenStream> { self.try_get_layout(ctx, extra) .map(|layout| helpers::blob(ctx, layout)) } } /// Infallible conversion of an IR thing to an opaque blob. /// /// The resulting layout is best effort, and is unfortunately not guaranteed to /// be correct. When all else fails, we fall back to a single byte layout as a /// last resort, because C++ does not permit zero-sized types. See the note in /// the `ToRustTyOrOpaque` doc comment about fallible versus infallible traits /// and when each is appropriate. /// /// Don't implement this directly. Instead implement `TryToOpaque`, and then /// leverage the blanket impl for this trait. trait ToOpaque: TryToOpaque { fn get_layout(&self, ctx: &BindgenContext, extra: &Self::Extra) -> Layout { self.try_get_layout(ctx, extra) .unwrap_or_else(|_| Layout::for_size(ctx, 1)) } fn to_opaque( &self, ctx: &BindgenContext, extra: &Self::Extra, ) -> proc_macro2::TokenStream { let layout = self.get_layout(ctx, extra); helpers::blob(ctx, layout) } } impl<T> ToOpaque for T where T: TryToOpaque {} /// Fallible conversion from an IR thing to an *equivalent* Rust type. /// /// If the C/C++ construct represented by the IR thing cannot (currently) be /// represented in Rust (for example, instantiations of templates with /// const-value generic parameters) then the impl should return an `Err`. It /// should *not* attempt to return an opaque blob with the correct size and /// alignment. That is the responsibility of the `TryToOpaque` trait. trait TryToRustTy { type Extra; fn try_to_rust_ty( &self, ctx: &BindgenContext, extra: &Self::Extra, ) -> error::Result<proc_macro2::TokenStream>; } /// Fallible conversion to a Rust type or an opaque blob with the correct size /// and alignment. /// /// Don't implement this directly. Instead implement `TryToRustTy` and /// `TryToOpaque`, and then leverage the blanket impl for this trait below. trait TryToRustTyOrOpaque: TryToRustTy + TryToOpaque { type Extra; fn try_to_rust_ty_or_opaque( &self, ctx: &BindgenContext, extra: &<Self as TryToRustTyOrOpaque>::Extra, ) -> error::Result<proc_macro2::TokenStream>; } impl<E, T> TryToRustTyOrOpaque for T where T: TryToRustTy<Extra = E> + TryToOpaque<Extra = E>, { type Extra = E; fn try_to_rust_ty_or_opaque( &self, ctx: &BindgenContext, extra: &E, ) -> error::Result<proc_macro2::TokenStream> { self.try_to_rust_ty(ctx, extra).or_else(|_| { if let Ok(layout) = self.try_get_layout(ctx, extra) { Ok(helpers::blob(ctx, layout)) } else { Err(error::Error::NoLayoutForOpaqueBlob) } }) } } /// Infallible conversion to a Rust type, or an opaque blob with a best effort /// of correct size and alignment. /// /// Don't implement this directly. Instead implement `TryToRustTy` and /// `TryToOpaque`, and then leverage the blanket impl for this trait below. /// /// ### Fallible vs. Infallible Conversions to Rust Types /// /// When should one use this infallible `ToRustTyOrOpaque` trait versus the /// fallible `TryTo{RustTy, Opaque, RustTyOrOpaque}` triats? All fallible trait /// implementations that need to convert another thing into a Rust type or /// opaque blob in a nested manner should also use fallible trait methods and /// propagate failure up the stack. Only infallible functions and methods like /// CodeGenerator implementations should use the infallible /// `ToRustTyOrOpaque`. The further out we push error recovery, the more likely /// we are to get a usable `Layout` even if we can't generate an equivalent Rust /// type for a C++ construct. trait ToRustTyOrOpaque: TryToRustTy + ToOpaque { type Extra; fn to_rust_ty_or_opaque( &self, ctx: &BindgenContext, extra: &<Self as ToRustTyOrOpaque>::Extra, ) -> proc_macro2::TokenStream; } impl<E, T> ToRustTyOrOpaque for T where T: TryToRustTy<Extra = E> + ToOpaque<Extra = E>, { type Extra = E; fn to_rust_ty_or_opaque( &self, ctx: &BindgenContext, extra: &E, ) -> proc_macro2::TokenStream { self.try_to_rust_ty(ctx, extra) .unwrap_or_else(|_| self.to_opaque(ctx, extra)) } } impl<T> TryToOpaque for T where T: Copy + Into<ItemId>, { type Extra = (); fn try_get_layout( &self, ctx: &BindgenContext, _: &(), ) -> error::Result<Layout> { ctx.resolve_item((*self).into()).try_get_layout(ctx, &()) } } impl<T> TryToRustTy for T where T: Copy + Into<ItemId>, { type Extra = (); fn try_to_rust_ty( &self, ctx: &BindgenContext, _: &(), ) -> error::Result<proc_macro2::TokenStream> { ctx.resolve_item((*self).into()).try_to_rust_ty(ctx, &()) } } impl TryToOpaque for Item { type Extra = (); fn try_get_layout( &self, ctx: &BindgenContext, _: &(), ) -> error::Result<Layout> { self.kind().expect_type().try_get_layout(ctx, self) } } impl TryToRustTy for Item { type Extra = (); fn try_to_rust_ty( &self, ctx: &BindgenContext, _: &(), ) -> error::Result<proc_macro2::TokenStream> { self.kind().expect_type().try_to_rust_ty(ctx, self) } } impl TryToOpaque for Type { type Extra = Item; fn try_get_layout( &self, ctx: &BindgenContext, _: &Item, ) -> error::Result<Layout> { self.layout(ctx).ok_or(error::Error::NoLayoutForOpaqueBlob) } } impl TryToRustTy for Type { type Extra = Item; fn try_to_rust_ty( &self, ctx: &BindgenContext, item: &Item, ) -> error::Result<proc_macro2::TokenStream> { use self::helpers::ast_ty::*; match *self.kind() { TypeKind::Void => Ok(c_void(ctx)), // TODO: we should do something smart with nullptr, or maybe *const // c_void is enough? TypeKind::NullPtr => Ok(c_void(ctx).to_ptr(true)), TypeKind::Int(ik) => { match ik { IntKind::Bool => Ok(quote! { bool }), IntKind::Char { .. } => Ok(raw_type(ctx, "c_char")), IntKind::SChar => Ok(raw_type(ctx, "c_schar")), IntKind::UChar => Ok(raw_type(ctx, "c_uchar")), IntKind::Short => Ok(raw_type(ctx, "c_short")), IntKind::UShort => Ok(raw_type(ctx, "c_ushort")), IntKind::Int => Ok(raw_type(ctx, "c_int")), IntKind::UInt => Ok(raw_type(ctx, "c_uint")), IntKind::Long => Ok(raw_type(ctx, "c_long")), IntKind::ULong => Ok(raw_type(ctx, "c_ulong")), IntKind::LongLong => Ok(raw_type(ctx, "c_longlong")), IntKind::ULongLong => Ok(raw_type(ctx, "c_ulonglong")), IntKind::WChar => { let layout = self .layout(ctx) .expect("Couldn't compute wchar_t's layout?"); let ty = Layout::known_type_for_size(ctx, layout.size) .expect("Non-representable wchar_t?"); let ident = ctx.rust_ident_raw(ty); Ok(quote! { #ident }) } IntKind::I8 => Ok(quote! { i8 }), IntKind::U8 => Ok(quote! { u8 }), IntKind::I16 => Ok(quote! { i16 }), IntKind::U16 => Ok(quote! { u16 }), IntKind::I32 => Ok(quote! { i32 }), IntKind::U32 => Ok(quote! { u32 }), IntKind::I64 => Ok(quote! { i64 }), IntKind::U64 => Ok(quote! { u64 }), IntKind::Custom { name, .. } => { Ok(proc_macro2::TokenStream::from_str(name).unwrap()) } IntKind::U128 => { Ok(if ctx.options().rust_features.i128_and_u128 { quote! { u128 } } else { // Best effort thing, but wrong alignment // unfortunately. quote! { [u64; 2] } }) } IntKind::I128 => { Ok(if ctx.options().rust_features.i128_and_u128 { quote! { i128 } } else { quote! { [u64; 2] } }) } } } TypeKind::Float(fk) => { Ok(float_kind_rust_type(ctx, fk, self.layout(ctx))) } TypeKind::Complex(fk) => { let float_path = float_kind_rust_type(ctx, fk, self.layout(ctx)); ctx.generated_bindgen_complex(); Ok(if ctx.options().enable_cxx_namespaces { quote! { root::__BindgenComplex<#float_path> } } else { quote! { __BindgenComplex<#float_path> } }) } TypeKind::Function(ref fs) => { // We can't rely on the sizeof(Option<NonZero<_>>) == // sizeof(NonZero<_>) optimization with opaque blobs (because // they aren't NonZero), so don't *ever* use an or_opaque // variant here. let ty = fs.try_to_rust_ty(ctx, &())?; let prefix = ctx.trait_prefix(); Ok(quote! { ::#prefix::option::Option<#ty> }) } TypeKind::Array(item, len) | TypeKind::Vector(item, len) => { let ty = item.try_to_rust_ty(ctx, &())?; Ok(quote! { [ #ty ; #len ] }) } TypeKind::Enum(..) => { let path = item.namespace_aware_canonical_path(ctx); let path = proc_macro2::TokenStream::from_str(&path.join("::")) .unwrap(); Ok(quote!(#path)) } TypeKind::TemplateInstantiation(ref inst) => { inst.try_to_rust_ty(ctx, item) } TypeKind::ResolvedTypeRef(inner) => inner.try_to_rust_ty(ctx, &()), TypeKind::TemplateAlias(..) | TypeKind::Alias(..) | TypeKind::BlockPointer(..) => { if self.is_block_pointer() && !ctx.options().generate_block { let void = c_void(ctx); return Ok(void.to_ptr(/* is_const = */ false)); } if item.is_opaque(ctx, &()) && item.used_template_params(ctx) .into_iter() .any(|param| param.is_template_param(ctx, &())) { self.try_to_opaque(ctx, item) } else if let Some(ty) = self .name() .and_then(|name| utils::type_from_named(ctx, name)) { Ok(ty) } else { utils::build_path(item, ctx) } } TypeKind::Comp(ref info) => { let template_params = item.all_template_params(ctx); if info.has_non_type_template_params() || (item.is_opaque(ctx, &()) && !template_params.is_empty()) { return self.try_to_opaque(ctx, item); } utils::build_path(item, ctx) } TypeKind::Opaque => self.try_to_opaque(ctx, item), TypeKind::Pointer(inner) | TypeKind::Reference(inner) => { let is_const = ctx.resolve_type(inner).is_const(); let inner = inner.into_resolver().through_type_refs().resolve(ctx); let inner_ty = inner.expect_type(); let is_objc_pointer = matches!(inner_ty.kind(), TypeKind::ObjCInterface(..)); // Regardless if we can properly represent the inner type, we // should always generate a proper pointer here, so use // infallible conversion of the inner type. let mut ty = inner.to_rust_ty_or_opaque(ctx, &()); ty.append_implicit_template_params(ctx, inner); // Avoid the first function pointer level, since it's already // represented in Rust. if inner_ty.canonical_type(ctx).is_function() || is_objc_pointer { Ok(ty) } else { Ok(ty.to_ptr(is_const)) } } TypeKind::TypeParam => { let name = item.canonical_name(ctx); let ident = ctx.rust_ident(&name); Ok(quote! { #ident }) } TypeKind::ObjCSel => Ok(quote! { objc::runtime::Sel }), TypeKind::ObjCId => Ok(quote! { id }), TypeKind::ObjCInterface(ref interface) => { let name = ctx.rust_ident(interface.name()); Ok(quote! { #name }) } ref u @ TypeKind::UnresolvedTypeRef(..) => { unreachable!("Should have been resolved after parsing {:?}!", u) } } } } impl TryToOpaque for TemplateInstantiation { type Extra = Item; fn try_get_layout( &self, ctx: &BindgenContext, item: &Item, ) -> error::Result<Layout> { item.expect_type() .layout(ctx) .ok_or(error::Error::NoLayoutForOpaqueBlob) } } impl TryToRustTy for TemplateInstantiation { type Extra = Item; fn try_to_rust_ty( &self, ctx: &BindgenContext, item: &Item, ) -> error::Result<proc_macro2::TokenStream> { if self.is_opaque(ctx, item) { return Err(error::Error::InstantiationOfOpaqueType); } let def = self .template_definition() .into_resolver() .through_type_refs() .resolve(ctx); let mut ty = quote! {}; let def_path = def.namespace_aware_canonical_path(ctx); ty.append_separated( def_path.into_iter().map(|p| ctx.rust_ident(p)), quote!(::), ); let def_params = def.self_template_params(ctx); if def_params.is_empty() { // This can happen if we generated an opaque type for a partial // template specialization, and we've hit an instantiation of // that partial specialization. extra_assert!(def.is_opaque(ctx, &())); return Err(error::Error::InstantiationOfOpaqueType); } // TODO: If the definition type is a template class/struct // definition's member template definition, it could rely on // generic template parameters from its outer template // class/struct. When we emit bindings for it, it could require // *more* type arguments than we have here, and we will need to // reconstruct them somehow. We don't have any means of doing // that reconstruction at this time. let template_args = self .template_arguments() .iter() .zip(def_params.iter()) // Only pass type arguments for the type parameters that // the def uses. .filter(|&(_, param)| ctx.uses_template_parameter(def.id(), *param)) .map(|(arg, _)| { let arg = arg.into_resolver().through_type_refs().resolve(ctx); let mut ty = arg.try_to_rust_ty(ctx, &())?; ty.append_implicit_template_params(ctx, arg); Ok(ty) }) .collect::<error::Result<Vec<_>>>()?; if template_args.is_empty() { return Ok(ty); } Ok(quote! { #ty < #( #template_args ),* > }) } } impl TryToRustTy for FunctionSig { type Extra = (); fn try_to_rust_ty( &self, ctx: &BindgenContext, _: &(), ) -> error::Result<proc_macro2::TokenStream> { // TODO: we might want to consider ignoring the reference return value. let ret = utils::fnsig_return_ty(ctx, self); let arguments = utils::fnsig_arguments(ctx, self); let abi = self.abi(); match abi { Abi::ThisCall if !ctx.options().rust_features().thiscall_abi => { warn!("Skipping function with thiscall ABI that isn't supported by the configured Rust target"); Ok(proc_macro2::TokenStream::new()) } _ => Ok(quote! { unsafe extern #abi fn ( #( #arguments ),* ) #ret }), } } } impl CodeGenerator for Function { type Extra = Item; /// If we've actually generated the symbol, the number of times we've seen /// it. type Return = Option<u32>; fn codegen<'a>( &self, ctx: &BindgenContext, result: &mut CodegenResult<'a>, item: &Item, ) -> Self::Return { debug!("<Function as CodeGenerator>::codegen: item = {:?}", item); debug_assert!(item.is_enabled_for_codegen(ctx)); // We can't currently do anything with Internal functions so just // avoid generating anything for them. match self.linkage() { Linkage::Internal => return None, Linkage::External => {} } // Pure virtual methods have no actual symbol, so we can't generate // something meaningful for them. match self.kind() { FunctionKind::Method(ref method_kind) if method_kind.is_pure_virtual() => { return None; } _ => {} } // Similar to static member variables in a class template, we can't // generate bindings to template functions, because the set of // instantiations is open ended and we have no way of knowing which // monomorphizations actually exist. if !item.all_template_params(ctx).is_empty() { return None; } let name = self.name(); let mut canonical_name = item.canonical_name(ctx); let mangled_name = self.mangled_name(); { let seen_symbol_name = mangled_name.unwrap_or(&canonical_name); // TODO: Maybe warn here if there's a type/argument mismatch, or // something? if result.seen_function(seen_symbol_name) { return None; } result.saw_function(seen_symbol_name); } let signature_item = ctx.resolve_item(self.signature()); let signature = signature_item.kind().expect_type().canonical_type(ctx); let signature = match *signature.kind() { TypeKind::Function(ref sig) => sig, _ => panic!("Signature kind is not a Function: {:?}", signature), }; let args = utils::fnsig_arguments(ctx, signature); let ret = utils::fnsig_return_ty(ctx, signature); let mut attributes = vec![]; if signature.must_use() && ctx.options().rust_features().must_use_function { attributes.push(attributes::must_use()); } if let Some(comment) = item.comment(ctx) { attributes.push(attributes::doc(comment)); } let abi = match signature.abi() { Abi::ThisCall if !ctx.options().rust_features().thiscall_abi => { warn!("Skipping function with thiscall ABI that isn't supported by the configured Rust target"); return None; } Abi::Win64 if signature.is_variadic() => { warn!("Skipping variadic function with Win64 ABI that isn't supported"); return None; } Abi::Unknown(unknown_abi) => { panic!( "Invalid or unknown abi {:?} for function {:?} ({:?})", unknown_abi, canonical_name, self ); } abi => abi, }; // Handle overloaded functions by giving each overload its own unique // suffix. let times_seen = result.overload_number(&canonical_name); if times_seen > 0 { write!(&mut canonical_name, "{}", times_seen).unwrap(); } let link_name = mangled_name.unwrap_or(name); if !utils::names_will_be_identical_after_mangling( &canonical_name, link_name, Some(abi), ) { attributes.push(attributes::link_name(link_name)); } // Unfortunately this can't piggyback on the `attributes` list because // the #[link(wasm_import_module)] needs to happen before the `extern // "C"` block. It doesn't get picked up properly otherwise let wasm_link_attribute = ctx.options().wasm_import_module_name.as_ref().map(|name| { quote! { #[link(wasm_import_module = #name)] } }); let ident = ctx.rust_ident(canonical_name); let tokens = quote! { #wasm_link_attribute extern #abi { #(#attributes)* pub fn #ident ( #( #args ),* ) #ret; } }; // If we're doing dynamic binding generation, add to the dynamic items. if ctx.options().dynamic_library_name.is_some() && self.kind() == FunctionKind::Function { let args_identifiers = utils::fnsig_argument_identifiers(ctx, signature); let return_item = ctx.resolve_item(signature.return_type()); let ret_ty = match *return_item.kind().expect_type().kind() { TypeKind::Void => quote! {()}, _ => return_item.to_rust_ty_or_opaque(ctx, &()), }; result.dynamic_items().push( ident, abi, signature.is_variadic(), ctx.options().dynamic_link_require_all, args, args_identifiers, ret, ret_ty, ); } else { result.push(tokens); } Some(times_seen) } } fn objc_method_codegen( ctx: &BindgenContext, method: &ObjCMethod, class_name: Option<&str>, prefix: &str, ) -> proc_macro2::TokenStream { let signature = method.signature(); let fn_args = utils::fnsig_arguments(ctx, signature); let fn_ret = utils::fnsig_return_ty(ctx, signature); let sig = if method.is_class_method() { let fn_args = fn_args.clone(); quote! { ( #( #fn_args ),* ) #fn_ret } } else { let fn_args = fn_args.clone(); let args = iter::once(quote! { &self }).chain(fn_args.into_iter()); quote! { ( #( #args ),* ) #fn_ret } }; let methods_and_args = method.format_method_call(&fn_args); let body = if method.is_class_method() { let class_name = ctx.rust_ident( class_name .expect("Generating a class method without class name?") .to_owned(), ); quote! { msg_send!(class!(#class_name), #methods_and_args) } } else { quote! { msg_send!(*self, #methods_and_args) } }; let method_name = ctx.rust_ident(format!("{}{}", prefix, method.rust_name())); quote! { unsafe fn #method_name #sig where <Self as std::ops::Deref>::Target: objc::Message + Sized { #body } } } impl CodeGenerator for ObjCInterface { type Extra = Item; type Return = (); fn codegen<'a>( &self, ctx: &BindgenContext, result: &mut CodegenResult<'a>, item: &Item, ) { debug_assert!(item.is_enabled_for_codegen(ctx)); let mut impl_items = vec![]; for method in self.methods() { let impl_item = objc_method_codegen(ctx, method, None, ""); impl_items.push(impl_item); } for class_method in self.class_methods() { let ambiquity = self .methods() .iter() .map(|m| m.rust_name()) .any(|x| x == class_method.rust_name()); let prefix = if ambiquity { "class_" } else { "" }; let impl_item = objc_method_codegen( ctx, class_method, Some(self.name()), prefix, ); impl_items.push(impl_item); } let trait_name = ctx.rust_ident(self.rust_name()); let trait_constraints = quote! { Sized + std::ops::Deref }; let trait_block = if self.is_template() { let template_names: Vec<Ident> = self .template_names .iter() .map(|g| ctx.rust_ident(g)) .collect(); quote! { pub trait #trait_name <#(#template_names),*> : #trait_constraints { #( #impl_items )* } } } else { quote! { pub trait #trait_name : #trait_constraints { #( #impl_items )* } } }; let class_name = ctx.rust_ident(self.name()); if !self.is_category() && !self.is_protocol() { let struct_block = quote! { #[repr(transparent)] #[derive(Clone)] pub struct #class_name(pub id); impl std::ops::Deref for #class_name { type Target = objc::runtime::Object; fn deref(&self) -> &Self::Target { unsafe { &*self.0 } } } unsafe impl objc::Message for #class_name { } impl #class_name { pub fn alloc() -> Self { Self(unsafe { msg_send!(objc::class!(#class_name), alloc) }) } } }; result.push(struct_block); let mut protocol_set: HashSet<ItemId> = Default::default(); for protocol_id in self.conforms_to.iter() { protocol_set.insert(*protocol_id); let protocol_name = ctx.rust_ident( ctx.resolve_type(protocol_id.expect_type_id(ctx)) .name() .unwrap(), ); let impl_trait = quote! { impl #protocol_name for #class_name { } }; result.push(impl_trait); } let mut parent_class = self.parent_class; while let Some(parent_id) = parent_class { let parent = parent_id .expect_type_id(ctx) .into_resolver() .through_type_refs() .resolve(ctx) .expect_type() .kind(); let parent = match parent { TypeKind::ObjCInterface(ref parent) => parent, _ => break, }; parent_class = parent.parent_class; let parent_name = ctx.rust_ident(parent.rust_name()); let impl_trait = if parent.is_template() { let template_names: Vec<Ident> = parent .template_names .iter() .map(|g| ctx.rust_ident(g)) .collect(); quote! { impl <#(#template_names :'static),*> #parent_name <#(#template_names),*> for #class_name { } } } else { quote! { impl #parent_name for #class_name { } } }; result.push(impl_trait); for protocol_id in parent.conforms_to.iter() { if protocol_set.insert(*protocol_id) { let protocol_name = ctx.rust_ident( ctx.resolve_type(protocol_id.expect_type_id(ctx)) .name() .unwrap(), ); let impl_trait = quote! { impl #protocol_name for #class_name { } }; result.push(impl_trait); } } if !parent.is_template() { let parent_struct_name = parent.name(); let child_struct_name = self.name(); let parent_struct = ctx.rust_ident(parent_struct_name); let from_block = quote! { impl From<#class_name> for #parent_struct { fn from(child: #class_name) -> #parent_struct { #parent_struct(child.0) } } }; result.push(from_block); let error_msg = format!( "This {} cannot be downcasted to {}", parent_struct_name, child_struct_name ); let try_into_block = quote! { impl std::convert::TryFrom<#parent_struct> for #class_name { type Error = &'static str; fn try_from(parent: #parent_struct) -> Result<#class_name, Self::Error> { let is_kind_of : bool = unsafe { msg_send!(parent, isKindOfClass:class!(#class_name))}; if is_kind_of { Ok(#class_name(parent.0)) } else { Err(#error_msg) } } } }; result.push(try_into_block); } } } if !self.is_protocol() { let impl_block = if self.is_template() { let template_names: Vec<Ident> = self .template_names .iter() .map(|g| ctx.rust_ident(g)) .collect(); quote! { impl <#(#template_names :'static),*> #trait_name <#(#template_names),*> for #class_name { } } } else { quote! { impl #trait_name for #class_name { } } }; result.push(impl_block); } result.push(trait_block); result.saw_objc(); } } pub(crate) fn codegen( context: BindgenContext, ) -> (Vec<proc_macro2::TokenStream>, BindgenOptions) { context.gen(|context| { let _t = context.timer("codegen"); let counter = Cell::new(0); let mut result = CodegenResult::new(&counter); debug!("codegen: {:?}", context.options()); if context.options().emit_ir { let codegen_items = context.codegen_items(); for (id, item) in context.items() { if codegen_items.contains(&id) { println!("ir: {:?} = {:#?}", id, item); } } } if let Some(path) = context.options().emit_ir_graphviz.as_ref() { match dot::write_dot_file(context, path) { Ok(()) => info!( "Your dot file was generated successfully into: {}", path ), Err(e) => warn!("{}", e), } } if let Some(spec) = context.options().depfile.as_ref() { match spec.write(context.deps()) { Ok(()) => info!( "Your depfile was generated successfully into: {}", spec.depfile_path.display() ), Err(e) => warn!("{}", e), } } context.resolve_item(context.root_module()).codegen( context, &mut result, &(), ); if let Some(ref lib_name) = context.options().dynamic_library_name { let lib_ident = context.rust_ident(lib_name); let dynamic_items_tokens = result.dynamic_items().get_tokens(lib_ident); result.push(dynamic_items_tokens); } result.items }) } pub mod utils { use super::{error, ToRustTyOrOpaque}; use crate::ir::context::BindgenContext; use crate::ir::function::{Abi, FunctionSig}; use crate::ir::item::{Item, ItemCanonicalPath}; use crate::ir::ty::TypeKind; use proc_macro2; use std::borrow::Cow; use std::mem; use std::str::FromStr; pub fn prepend_bitfield_unit_type( ctx: &BindgenContext, result: &mut Vec<proc_macro2::TokenStream>, ) { let bitfield_unit_src = include_str!("./bitfield_unit.rs"); let bitfield_unit_src = if ctx.options().rust_features().min_const_fn { Cow::Borrowed(bitfield_unit_src) } else { Cow::Owned(bitfield_unit_src.replace("const fn ", "fn ")) }; let bitfield_unit_type = proc_macro2::TokenStream::from_str(&bitfield_unit_src).unwrap(); let bitfield_unit_type = quote!(#bitfield_unit_type); let items = vec![bitfield_unit_type]; let old_items = mem::replace(result, items); result.extend(old_items); } pub fn prepend_objc_header( ctx: &BindgenContext, result: &mut Vec<proc_macro2::TokenStream>, ) { let use_objc = if ctx.options().objc_extern_crate { quote! { #[macro_use] extern crate objc; } } else { quote! { use objc; } }; let id_type = quote! { #[allow(non_camel_case_types)] pub type id = *mut objc::runtime::Object; }; let items = vec![use_objc, id_type]; let old_items = mem::replace(result, items); result.extend(old_items.into_iter()); } pub fn prepend_block_header( ctx: &BindgenContext, result: &mut Vec<proc_macro2::TokenStream>, ) { let use_block = if ctx.options().block_extern_crate { quote! { extern crate block; } } else { quote! { use block; } }; let items = vec![use_block]; let old_items = mem::replace(result, items); result.extend(old_items.into_iter()); } pub fn prepend_union_types( ctx: &BindgenContext, result: &mut Vec<proc_macro2::TokenStream>, ) { let prefix = ctx.trait_prefix(); // If the target supports `const fn`, declare eligible functions // as `const fn` else just `fn`. let const_fn = if ctx.options().rust_features().min_const_fn { quote! { const fn } } else { quote! { fn } }; // TODO(emilio): The fmt::Debug impl could be way nicer with // std::intrinsics::type_name, but... let union_field_decl = quote! { #[repr(C)] pub struct __BindgenUnionField<T>(::#prefix::marker::PhantomData<T>); }; let union_field_impl = quote! { impl<T> __BindgenUnionField<T> { #[inline] pub #const_fn new() -> Self { __BindgenUnionField(::#prefix::marker::PhantomData) } #[inline] pub unsafe fn as_ref(&self) -> &T { ::#prefix::mem::transmute(self) } #[inline] pub unsafe fn as_mut(&mut self) -> &mut T { ::#prefix::mem::transmute(self) } } }; let union_field_default_impl = quote! { impl<T> ::#prefix::default::Default for __BindgenUnionField<T> { #[inline] fn default() -> Self { Self::new() } } }; let union_field_clone_impl = quote! { impl<T> ::#prefix::clone::Clone for __BindgenUnionField<T> { #[inline] fn clone(&self) -> Self { Self::new() } } }; let union_field_copy_impl = quote! { impl<T> ::#prefix::marker::Copy for __BindgenUnionField<T> {} }; let union_field_debug_impl = quote! { impl<T> ::#prefix::fmt::Debug for __BindgenUnionField<T> { fn fmt(&self, fmt: &mut ::#prefix::fmt::Formatter<'_>) -> ::#prefix::fmt::Result { fmt.write_str("__BindgenUnionField") } } }; // The actual memory of the filed will be hashed, so that's why these // field doesn't do anything with the hash. let union_field_hash_impl = quote! { impl<T> ::#prefix::hash::Hash for __BindgenUnionField<T> { fn hash<H: ::#prefix::hash::Hasher>(&self, _state: &mut H) { } } }; let union_field_partialeq_impl = quote! { impl<T> ::#prefix::cmp::PartialEq for __BindgenUnionField<T> { fn eq(&self, _other: &__BindgenUnionField<T>) -> bool { true } } }; let union_field_eq_impl = quote! { impl<T> ::#prefix::cmp::Eq for __BindgenUnionField<T> { } }; let items = vec![ union_field_decl, union_field_impl, union_field_default_impl, union_field_clone_impl, union_field_copy_impl, union_field_debug_impl, union_field_hash_impl, union_field_partialeq_impl, union_field_eq_impl, ]; let old_items = mem::replace(result, items); result.extend(old_items.into_iter()); } pub fn prepend_incomplete_array_types( ctx: &BindgenContext, result: &mut Vec<proc_macro2::TokenStream>, ) { let prefix = ctx.trait_prefix(); // If the target supports `const fn`, declare eligible functions // as `const fn` else just `fn`. let const_fn = if ctx.options().rust_features().min_const_fn { quote! { const fn } } else { quote! { fn } }; let incomplete_array_decl = quote! { #[repr(C)] #[derive(Default)] pub struct __IncompleteArrayField<T>( ::#prefix::marker::PhantomData<T>, [T; 0]); }; let incomplete_array_impl = quote! { impl<T> __IncompleteArrayField<T> { #[inline] pub #const_fn new() -> Self { __IncompleteArrayField(::#prefix::marker::PhantomData, []) } #[inline] pub fn as_ptr(&self) -> *const T { self as *const _ as *const T } #[inline] pub fn as_mut_ptr(&mut self) -> *mut T { self as *mut _ as *mut T } #[inline] pub unsafe fn as_slice(&self, len: usize) -> &[T] { ::#prefix::slice::from_raw_parts(self.as_ptr(), len) } #[inline] pub unsafe fn as_mut_slice(&mut self, len: usize) -> &mut [T] { ::#prefix::slice::from_raw_parts_mut(self.as_mut_ptr(), len) } } }; let incomplete_array_debug_impl = quote! { impl<T> ::#prefix::fmt::Debug for __IncompleteArrayField<T> { fn fmt(&self, fmt: &mut ::#prefix::fmt::Formatter<'_>) -> ::#prefix::fmt::Result { fmt.write_str("__IncompleteArrayField") } } }; let items = vec![ incomplete_array_decl, incomplete_array_impl, incomplete_array_debug_impl, ]; let old_items = mem::replace(result, items); result.extend(old_items.into_iter()); } pub fn prepend_complex_type(result: &mut Vec<proc_macro2::TokenStream>) { let complex_type = quote! { #[derive(PartialEq, Copy, Clone, Hash, Debug, Default)] #[repr(C)] pub struct __BindgenComplex<T> { pub re: T, pub im: T } }; let items = vec![complex_type]; let old_items = mem::replace(result, items); result.extend(old_items.into_iter()); } pub fn build_path( item: &Item, ctx: &BindgenContext, ) -> error::Result<proc_macro2::TokenStream> { let path = item.namespace_aware_canonical_path(ctx); let tokens = proc_macro2::TokenStream::from_str(&path.join("::")).unwrap(); Ok(tokens) } fn primitive_ty( ctx: &BindgenContext, name: &str, ) -> proc_macro2::TokenStream { let ident = ctx.rust_ident_raw(name); quote! { #ident } } pub fn type_from_named( ctx: &BindgenContext, name: &str, ) -> Option<proc_macro2::TokenStream> { // FIXME: We could use the inner item to check this is really a // primitive type but, who the heck overrides these anyway? Some(match name { "int8_t" => primitive_ty(ctx, "i8"), "uint8_t" => primitive_ty(ctx, "u8"), "int16_t" => primitive_ty(ctx, "i16"), "uint16_t" => primitive_ty(ctx, "u16"), "int32_t" => primitive_ty(ctx, "i32"), "uint32_t" => primitive_ty(ctx, "u32"), "int64_t" => primitive_ty(ctx, "i64"), "uint64_t" => primitive_ty(ctx, "u64"), "size_t" if ctx.options().size_t_is_usize => { primitive_ty(ctx, "usize") } "uintptr_t" => primitive_ty(ctx, "usize"), "ssize_t" if ctx.options().size_t_is_usize => { primitive_ty(ctx, "isize") } "intptr_t" | "ptrdiff_t" => primitive_ty(ctx, "isize"), _ => return None, }) } pub fn fnsig_return_ty( ctx: &BindgenContext, sig: &FunctionSig, ) -> proc_macro2::TokenStream { let return_item = ctx.resolve_item(sig.return_type()); if let TypeKind::Void = *return_item.kind().expect_type().kind() { quote! {} } else { let ret_ty = return_item.to_rust_ty_or_opaque(ctx, &()); quote! { -> #ret_ty } } } pub fn fnsig_arguments( ctx: &BindgenContext, sig: &FunctionSig, ) -> Vec<proc_macro2::TokenStream> { use super::ToPtr; let mut unnamed_arguments = 0; let mut args = sig .argument_types() .iter() .map(|&(ref name, ty)| { let arg_item = ctx.resolve_item(ty); let arg_ty = arg_item.kind().expect_type(); // From the C90 standard[1]: // // A declaration of a parameter as "array of type" shall be // adjusted to "qualified pointer to type", where the type // qualifiers (if any) are those specified within the [ and ] of // the array type derivation. // // [1]: http://c0x.coding-guidelines.com/6.7.5.3.html let arg_ty = match *arg_ty.canonical_type(ctx).kind() { TypeKind::Array(t, _) => { let stream = if ctx.options().array_pointers_in_arguments { arg_ty.to_rust_ty_or_opaque(ctx, arg_item) } else { t.to_rust_ty_or_opaque(ctx, &()) }; stream.to_ptr(ctx.resolve_type(t).is_const()) } TypeKind::Pointer(inner) => { let inner = ctx.resolve_item(inner); let inner_ty = inner.expect_type(); if let TypeKind::ObjCInterface(ref interface) = *inner_ty.canonical_type(ctx).kind() { let name = ctx.rust_ident(interface.name()); quote! { #name } } else { arg_item.to_rust_ty_or_opaque(ctx, &()) } } _ => arg_item.to_rust_ty_or_opaque(ctx, &()), }; let arg_name = match *name { Some(ref name) => ctx.rust_mangle(name).into_owned(), None => { unnamed_arguments += 1; format!("arg{}", unnamed_arguments) } }; assert!(!arg_name.is_empty()); let arg_name = ctx.rust_ident(arg_name); quote! { #arg_name : #arg_ty } }) .collect::<Vec<_>>(); if sig.is_variadic() { args.push(quote! { ... }) } args } pub fn fnsig_argument_identifiers( ctx: &BindgenContext, sig: &FunctionSig, ) -> Vec<proc_macro2::TokenStream> { let mut unnamed_arguments = 0; let args = sig .argument_types() .iter() .map(|&(ref name, _ty)| { let arg_name = match *name { Some(ref name) => ctx.rust_mangle(name).into_owned(), None => { unnamed_arguments += 1; format!("arg{}", unnamed_arguments) } }; assert!(!arg_name.is_empty()); let arg_name = ctx.rust_ident(arg_name); quote! { #arg_name } }) .collect::<Vec<_>>(); args } pub fn fnsig_block( ctx: &BindgenContext, sig: &FunctionSig, ) -> proc_macro2::TokenStream { let args = sig.argument_types().iter().map(|&(_, ty)| { let arg_item = ctx.resolve_item(ty); arg_item.to_rust_ty_or_opaque(ctx, &()) }); let return_item = ctx.resolve_item(sig.return_type()); let ret_ty = if let TypeKind::Void = *return_item.kind().expect_type().kind() { quote! { () } } else { return_item.to_rust_ty_or_opaque(ctx, &()) }; quote! { *const ::block::Block<(#(#args,)*), #ret_ty> } } // Returns true if `canonical_name` will end up as `mangled_name` at the // machine code level, i.e. after LLVM has applied any target specific // mangling. pub fn names_will_be_identical_after_mangling( canonical_name: &str, mangled_name: &str, call_conv: Option<Abi>, ) -> bool { // If the mangled name and the canonical name are the same then no // mangling can have happened between the two versions. if canonical_name == mangled_name { return true; } // Working with &[u8] makes indexing simpler than with &str let canonical_name = canonical_name.as_bytes(); let mangled_name = mangled_name.as_bytes(); let (mangling_prefix, expect_suffix) = match call_conv { Some(Abi::C) | // None is the case for global variables None => { (b'_', false) } Some(Abi::Stdcall) => (b'_', true), Some(Abi::Fastcall) => (b'@', true), // This is something we don't recognize, stay on the safe side // by emitting the `#[link_name]` attribute Some(_) => return false, }; // Check that the mangled name is long enough to at least contain the // canonical name plus the expected prefix. if mangled_name.len() < canonical_name.len() + 1 { return false; } // Return if the mangled name does not start with the prefix expected // for the given calling convention. if mangled_name[0] != mangling_prefix { return false; } // Check that the mangled name contains the canonical name after the // prefix if &mangled_name[1..canonical_name.len() + 1] != canonical_name { return false; } // If the given calling convention also prescribes a suffix, check that // it exists too if expect_suffix { let suffix = &mangled_name[canonical_name.len() + 1..]; // The shortest suffix is "@0" if suffix.len() < 2 { return false; } // Check that the suffix starts with '@' and is all ASCII decimals // after that. if suffix[0] != b'@' || !suffix[1..].iter().all(u8::is_ascii_digit) { return false; } } else if mangled_name.len() != canonical_name.len() + 1 { // If we don't expect a prefix but there is one, we need the // #[link_name] attribute return false; } true } }<|fim▁end|>
.count() >
<|file_name|>surface_drawing.cc<|end_file_name|><|fim▁begin|>#include "deps.h" // Not implemented: // SDL_ConvertSurface - need PixelFormat implementation // SDL_CreateRGBSurfaceFrom - need to think about memory management? clone? keep reference? // SDL_CreateRGBSurfaceWithFormatFrom - ditto // SDL_LoadBMP_RW // SDL_SaveBMP_RW // SDL_SetSurfacePalette - need palette implementation namespace sdl2_bindings { using namespace v8; METHOD(BlitScaled) { BEGIN(); UNWRAP(src, Surface, args[0]); SDL_Rect srcRect; SDL_Rect *srcRectPtr = nullptr; if (!args[1]->IsNull()) { extractRect(isolate, args[1]->ToObject(), &srcRect); srcRectPtr = &srcRect; } UNWRAP(dst, Surface, args[2]); SDL_Rect dstRect; SDL_Rect *dstRectPtr = nullptr; if (!args[3]->IsNull()) { extractRect(isolate, args[3]->ToObject(), &dstRect); dstRectPtr = &dstRect; } if (SDL_BlitScaled(src->surface_, srcRectPtr, dst->surface_, dstRectPtr) != 0) { THROW_SDL_ERROR(); } } // HELPER METHOD(BlitScaledCmp) { BEGIN(); UNWRAP(src, Surface, args[0]); INTARG(sx, 1); INTARG(sy, 2); INTARG(sw, 3); INTARG(sh, 4); UNWRAP(dst, Surface, args[5]); INTARG(dx, 6); INTARG(dy, 7); INTARG(dw, 8); INTARG(dh, 9); SDL_Rect srcRect = { .x = sx, .y = sy, .w = sw, .h = sh }; SDL_Rect dstRect = { .x = dx, .y = dy, .w = dw, .h = dh }; if (SDL_BlitScaled(src->surface_, &srcRect, dst->surface_, &dstRect) != 0) { THROW_SDL_ERROR(); } } METHOD(BlitSurface) { BEGIN(); UNWRAP(src, Surface, args[0]); SDL_Rect srcRect; SDL_Rect *srcRectPtr = nullptr; if (!args[1]->IsNull()) { extractRect(isolate, args[1]->ToObject(), &srcRect); srcRectPtr = &srcRect; } UNWRAP(dst, Surface, args[2]); SDL_Rect dstRect; SDL_Rect *dstRectPtr = nullptr; if (!args[3]->IsNull()) { extractRect(isolate, args[3]->ToObject(), &dstRect); dstRectPtr = &dstRect; } if (SDL_BlitSurface(src->surface_, srcRectPtr, dst->surface_, dstRectPtr) != 0) { THROW_SDL_ERROR(); } } // HELPER METHOD(BlitSurfaceAtPoint) { BEGIN(); UNWRAP(src, Surface, args[0]); UNWRAP(dst, Surface, args[1]); INTARG(x, 2); INTARG(y, 3); SDL_Rect dstRect; dstRect.x = x; dstRect.y = y; dstRect.w = src->surface_->w; dstRect.h = src->surface_->h; if (SDL_BlitSurface(src->surface_, nullptr, dst->surface_, &dstRect) != 0) { THROW_SDL_ERROR(); } } METHOD(ConvertPixels) { BEGIN(); INTARG(width, 0); INTARG(height, 1); UINT32ARG(srcFormat, 2); Local<ArrayBufferView> srcBuffer = Local<ArrayBufferView>::Cast(args[3]); void *src = srcBuffer->Buffer()->GetContents().Data(); INTARG(srcPitch, 4); UINT32ARG(dstFormat, 5); Local<ArrayBufferView> dstBuffer = Local<ArrayBufferView>::Cast(args[6]); void *dst = dstBuffer->Buffer()->GetContents().Data(); INTARG(dstPitch, 7); if (SDL_ConvertPixels(width, height, srcFormat, src, srcPitch, dstFormat, dst, dstPitch) < 0) { THROW_SDL_ERROR(); } } METHOD(ConvertSurfaceFormat) { BEGIN(); UNWRAP(surface, Surface, args[0]); UINT32ARG(format, 1); SDL_Surface *newSurface = SDL_ConvertSurfaceFormat(surface->surface_, format, 0); if (newSurface == nullptr) { THROW_SDL_ERROR(); } else { RETURN(Surface::NewInstance(isolate, newSurface, true)); } } METHOD(CreateRGBSurface) { BEGIN(); INTARG(width, 0); INTARG(height, 1); INTARG(depth, 2); UINT32ARG(rmask, 3); UINT32ARG(gmask, 4); UINT32ARG(bmask, 5); UINT32ARG(amask, 6); auto surface = SDL_CreateRGBSurface(0, width, height, depth, rmask, gmask, bmask, amask); if (surface == nullptr) { THROW_SDL_ERROR(); } else { RETURN(Surface::NewInstance(isolate, surface, true)); } } METHOD(CreateRGBSurfaceWithFormat) { BEGIN(); INTARG(width, 0); INTARG(height, 1); INTARG(depth, 2); UINT32ARG(format, 3); auto surface = SDL_CreateRGBSurfaceWithFormat(0, width, height, depth, format); if (surface == nullptr) { THROW_SDL_ERROR(); } else { RETURN(Surface::NewInstance(isolate, surface, true)); } } // SDL_CreateRGBSurfaceWithFormat METHOD(FillRect) { BEGIN(); UNWRAP(surface, Surface, args[0]); SDL_Rect rect; SDL_Rect *rectPtr = nullptr; if (!args[1]->IsNull()) { extractRect(isolate, args[1]->ToObject(), &rect); rectPtr = &rect; } UINT32ARG(color, 2); if (SDL_FillRect(surface->surface_, rectPtr, color) != 0) { THROW_SDL_ERROR(); } } METHOD(FillRects) { BEGIN(); UNWRAP(surface, Surface, args[0]); ARRAYARG(rects, 1); UINT32ARG(color, 2); int count = rects->Length(); SDL_Rect sdlRects[count]; for (int ix = 0; ix < count; ++ix) { extractRect(isolate, rects->Get(ix)->ToObject(), &sdlRects[ix]); } if (SDL_FillRects(surface->surface_, sdlRects, count, color) != 0) { THROW_SDL_ERROR(); } } METHOD(GetClipRect) { BEGIN(); UNWRAP(surface, Surface, args[0]); SDL_Rect out; SDL_GetClipRect(surface->surface_, &out); auto rect = MK_OBJECT(); populateRect(isolate, rect, &out); RETURN(rect); } METHOD(GetColorKey) { BEGIN(); UNWRAP(surface, Surface, args[0]); uint32_t colorKey; if (SDL_GetColorKey(surface->surface_, &colorKey) < 0) { THROW_SDL_ERROR(); } else { RETURN(MK_NUMBER(colorKey)); } } METHOD(GetSurfaceAlphaMod) { BEGIN(); UNWRAP(surface, Surface, args[0]); uint8_t alphaMod; if (SDL_GetSurfaceAlphaMod(surface->surface_, &alphaMod) < 0) { THROW_SDL_ERROR(); } else { RETURN(MK_NUMBER(alphaMod)); } } METHOD(GetSurfaceBlendMode) { BEGIN(); UNWRAP(surface, Surface, args[0]); SDL_BlendMode blendMode; if (SDL_GetSurfaceBlendMode(surface->surface_, &blendMode) < 0) { THROW_SDL_ERROR(); } else { RETURN(MK_NUMBER(blendMode)); } } METHOD(GetSurfaceColorMod) { BEGIN(); UNWRAP(surface, Surface, args[0]); uint8_t r, g, b; if (SDL_GetSurfaceColorMod(surface->surface_, &r, &g, &b) < 0) { THROW_SDL_ERROR(); } else { auto out = MK_OBJECT(); GET_CONTEXT(); SET_KEY(out, SYM(r), MK_NUMBER(r)); SET_KEY(out, SYM(g), MK_NUMBER(g)); SET_KEY(out, SYM(b), MK_NUMBER(b)); RETURN(out); } } METHOD(LoadBMP) { BEGIN();<|fim▁hole|> auto surface = SDL_LoadBMP(*file); if (surface == nullptr) { THROW_SDL_ERROR(); } else { RETURN(Surface::NewInstance(isolate, surface, true)); } } METHOD(LockSurface) { BEGIN(); UNWRAP(surface, Surface, args[0]); if (SDL_LockSurface(surface->surface_) < 0) { THROW_SDL_ERROR(); } } METHOD(LowerBlit) { BEGIN(); UNWRAP(src, Surface, args[0]); SDL_Rect srcRect; SDL_Rect *srcRectPtr = nullptr; if (!args[1]->IsNull()) { extractRect(isolate, args[1]->ToObject(), &srcRect); srcRectPtr = &srcRect; } UNWRAP(dst, Surface, args[2]); SDL_Rect dstRect; SDL_Rect *dstRectPtr = nullptr; if (!args[3]->IsNull()) { extractRect(isolate, args[3]->ToObject(), &dstRect); dstRectPtr = &dstRect; } if (SDL_LowerBlit(src->surface_, srcRectPtr, dst->surface_, dstRectPtr) != 0) { THROW_SDL_ERROR(); } } // HELPER METHOD(LowerBlitAtPoint) { BEGIN(); UNWRAP(src, Surface, args[0]); UNWRAP(dst, Surface, args[1]); INTARG(x, 2); INTARG(y, 3); SDL_Rect dstRect; dstRect.x = x; dstRect.y = y; dstRect.w = src->surface_->w; dstRect.h = src->surface_->h; if (SDL_LowerBlit(src->surface_, nullptr, dst->surface_, &dstRect) != 0) { THROW_SDL_ERROR(); } } METHOD(LowerBlitScaled) { BEGIN(); UNWRAP(src, Surface, args[0]); SDL_Rect srcRect; SDL_Rect *srcRectPtr = nullptr; if (!args[1]->IsNull()) { extractRect(isolate, args[1]->ToObject(), &srcRect); srcRectPtr = &srcRect; } UNWRAP(dst, Surface, args[2]); SDL_Rect dstRect; SDL_Rect *dstRectPtr = nullptr; if (!args[3]->IsNull()) { extractRect(isolate, args[3]->ToObject(), &dstRect); dstRectPtr = &dstRect; } if (SDL_LowerBlitScaled(src->surface_, srcRectPtr, dst->surface_, dstRectPtr) != 0) { THROW_SDL_ERROR(); } } // HELPER METHOD(LowerBlitScaledCmp) { BEGIN(); UNWRAP(src, Surface, args[0]); INTARG(sx, 1); INTARG(sy, 2); INTARG(sw, 3); INTARG(sh, 4); UNWRAP(dst, Surface, args[5]); INTARG(dx, 6); INTARG(dy, 7); INTARG(dw, 8); INTARG(dh, 9); SDL_Rect srcRect = { .x = sx, .y = sy, .w = sw, .h = sh }; SDL_Rect dstRect = { .x = dx, .y = dy, .w = dw, .h = dh }; if (SDL_LowerBlitScaled(src->surface_, &srcRect, dst->surface_, &dstRect) != 0) { THROW_SDL_ERROR(); } } METHOD(MustLock) { BEGIN(); UNWRAP(surface, Surface, args[0]); RETURN(MK_BOOL(SDL_MUSTLOCK(surface->surface_))); } METHOD(SaveBMP) { BEGIN(); UNWRAP(surface, Surface, args[0]); STRINGARG(file, 1); if (SDL_SaveBMP(surface->surface_, *file) < 0) { THROW_SDL_ERROR(); } } METHOD(SetClipRect) { BEGIN(); UNWRAP(surface, Surface, args[0]); OBJECTARG(rect, 1); SDL_Rect sdlRect; extractRect(isolate, rect, &sdlRect); RETURN(MK_BOOL(SDL_SetClipRect(surface->surface_, &sdlRect))); } METHOD(SetColorKey) { BEGIN(); UNWRAP(surface, Surface, args[0]); BOOLARG(enable, 1); UINT32ARG(color, 2); if (SDL_SetColorKey(surface->surface_, enable ? SDL_TRUE : SDL_FALSE, color) < 0) { THROW_SDL_ERROR(); } } METHOD(SetSurfaceAlphaMod) { BEGIN(); UNWRAP(surface, Surface, args[0]); UINT32ARG(alphaMod, 1); if (SDL_SetSurfaceAlphaMod(surface->surface_, alphaMod) < 0) { THROW_SDL_ERROR(); } } METHOD(SetSurfaceBlendMode) { BEGIN(); UNWRAP(surface, Surface, args[0]); INTARG(blendMode, 1); if (SDL_SetSurfaceBlendMode(surface->surface_, (SDL_BlendMode)blendMode) < 0) { THROW_SDL_ERROR(); } } METHOD(SetSurfaceColorMod) { BEGIN(); UNWRAP(surface, Surface, args[0]); INTARG(r, 1); INTARG(g, 2); INTARG(b, 3); if (SDL_SetSurfaceColorMod(surface->surface_, r, g, b) < 0) { THROW_SDL_ERROR(); } } METHOD(SetSurfaceRLE) { BEGIN(); UNWRAP(surface, Surface, args[0]); INTARG(flag, 1); if (SDL_SetSurfaceRLE(surface->surface_, flag) < 0) { THROW_SDL_ERROR(); } } METHOD(UnlockSurface) { BEGIN(); UNWRAP(surface, Surface, args[0]); SDL_UnlockSurface(surface->surface_); } void InitSurfaceDrawingFunctions(Local<Object> exports) { NODE_SET_METHOD(exports, "blitScaled", BlitScaled); NODE_SET_METHOD(exports, "blitScaledCmp", BlitScaledCmp); NODE_SET_METHOD(exports, "blitSurface", BlitSurface); NODE_SET_METHOD(exports, "blitSurfaceAtPoint", BlitSurfaceAtPoint); NODE_SET_METHOD(exports, "convertPixels", ConvertPixels); NODE_SET_METHOD(exports, "convertSurfaceFormat", ConvertSurfaceFormat); NODE_SET_METHOD(exports, "createRGBSurface", CreateRGBSurface); NODE_SET_METHOD(exports, "createRGBSurfaceWithFormat", CreateRGBSurfaceWithFormat); NODE_SET_METHOD(exports, "fillRect", FillRect); NODE_SET_METHOD(exports, "fillRects", FillRect); NODE_SET_METHOD(exports, "getClipRect", GetClipRect); NODE_SET_METHOD(exports, "getColorKey", GetColorKey); NODE_SET_METHOD(exports, "getSurfaceAlphaMod", GetSurfaceAlphaMod); NODE_SET_METHOD(exports, "getSurfaceBlendMode", GetSurfaceBlendMode); NODE_SET_METHOD(exports, "getSurfaceColorMod", GetSurfaceColorMod); NODE_SET_METHOD(exports, "loadBMP", LoadBMP); NODE_SET_METHOD(exports, "lockSurface", LockSurface); NODE_SET_METHOD(exports, "lowerBlit", LowerBlit); NODE_SET_METHOD(exports, "lowerBlitAtPoint", LowerBlitAtPoint); NODE_SET_METHOD(exports, "lowerBlitScaled", LowerBlitScaled); NODE_SET_METHOD(exports, "lowerBlitScaledCmp", LowerBlitScaledCmp); NODE_SET_METHOD(exports, "mustLock", MustLock); NODE_SET_METHOD(exports, "saveBMP", SaveBMP); NODE_SET_METHOD(exports, "setClipRect", SetClipRect); NODE_SET_METHOD(exports, "setColorKey", SetColorKey); NODE_SET_METHOD(exports, "setSurfaceAlphaMod", SetSurfaceAlphaMod); NODE_SET_METHOD(exports, "setSurfaceBlendMode", SetSurfaceBlendMode); NODE_SET_METHOD(exports, "setSurfaceColorMod", SetSurfaceColorMod); NODE_SET_METHOD(exports, "setSurfaceRLE", SetSurfaceRLE); NODE_SET_METHOD(exports, "unlockSurface", UnlockSurface); } }<|fim▁end|>
STRINGARG(file, 0);
<|file_name|>sorting.ts<|end_file_name|><|fim▁begin|>/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ module VZ.Sorting { /** * Compares tag names asciinumerically broken into components. * * <p>This is the comparison function used for sorting most string values in * TensorBoard. Unlike the standard asciibetical comparator, this function * knows that 'a10b' > 'a2b'. Fixed point and engineering notation are * supported. This function also splits the input by slash and underscore to * perform array comparison. Therefore it knows that 'a/a' < 'a+/a' even * though '+' < '/' in the ASCII table. */ export function compareTagNames(a, b: string): number { let ai = 0;<|fim▁hole|> if (isDigit(a[ai]) && isDigit(b[bi])) { const ais = ai; const bis = bi; ai = consumeNumber(a, ai + 1); bi = consumeNumber(b, bi + 1); const an = parseFloat(a.slice(ais, ai)); const bn = parseFloat(b.slice(bis, bi)); if (an < bn) return -1; if (an > bn) return 1; continue; } if (isBreak(a[ai])) { if (!isBreak(b[bi])) return -1; } else if (isBreak(b[bi])) { return 1; } else if (a[ai] < b[bi]) { return -1; } else if (a[ai] > b[bi]) { return 1; } ai++; bi++; } } function consumeNumber(s: string, i: number): number { enum State { NATURAL, REAL, EXPONENT_SIGN, EXPONENT } let state = State.NATURAL; for (; i < s.length; i++) { if (state === State.NATURAL) { if (s[i] === '.') { state = State.REAL; } else if (s[i] === 'e' || s[i] === 'E') { state = State.EXPONENT_SIGN; } else if (!isDigit(s[i])) { break; } } else if (state === State.REAL) { if (s[i] === 'e' || s[i] === 'E') { state = State.EXPONENT_SIGN; } else if (!isDigit(s[i])) { break; } } else if (state === State.EXPONENT_SIGN) { if (isDigit(s[i]) || s[i] === '+' || s[i] === '-') { state = State.EXPONENT; } else { break; } } else if (state === State.EXPONENT) { if (!isDigit(s[i])) break; } } return i; } function isDigit(c: string): boolean { return '0' <= c && c <= '9'; } function isBreak(c: string): boolean { // TODO(jart): Remove underscore when people stop using it like a slash. return c === '/' || c === '_' || isDigit(c); } }<|fim▁end|>
let bi = 0; while (true) { if (ai === a.length) return bi === b.length ? 0 : -1; if (bi === b.length) return 1;
<|file_name|>bitcoin_nl.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" ?><!DOCTYPE TS><TS language="nl" version="2.0"> <context> <name>AboutDialog</name> <message> <location filename="../forms/aboutdialog.ui" line="+14"/> <source>About Freshcoin Core</source> <translation>Over Freshcoin Core</translation> </message> <message> <location line="+39"/> <source>&lt;b&gt;Freshcoin Core&lt;/b&gt; version</source> <translation>&lt;b&gt; Freshcoin Core&lt;/b&gt; versie</translation> </message> <message> <location line="+57"/> <source> This is experimental software. Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</source> <translation> Dit is experimentele software. Gedistribueerd onder de MIT/X11 software licentie, zie het bijgevoegde bestand COPYING of http://www.opensource.org/licenses/mit-license.php. Dit product bevat software ontwikkeld door het OpenSSL Project voor gebruik in de OpenSSL Toolkit (http://www.openssl.org/) en cryptografische software gemaakt door Eric Young ([email protected]) en UPnP software geschreven door Thomas Bernard.</translation> </message> <message> <location filename="../utilitydialog.cpp" line="+29"/> <source>Copyright</source> <translation>Auteursrecht</translation> </message> <message> <location line="+0"/> <source>The Freshcoin Core developers</source> <translation>De Freshcoin Core ontwikkelaars</translation> </message> </context> <context> <name>AddressBookPage</name> <message> <location filename="../forms/addressbookpage.ui" line="+30"/> <source>Double-click to edit address or label</source> <translation>Dubbelklik om het adres of label te wijzigen</translation> </message> <message> <location line="+27"/> <source>Create a new address</source> <translation>Maak een nieuw adres aan</translation> </message> <message> <location line="+3"/> <source>&amp;New</source> <translation>&amp;Nieuw</translation> </message> <message> <location line="+11"/> <source>Copy the currently selected address to the system clipboard</source> <translation>Kopiëer het geselecteerde adres naar het klembord</translation> </message> <message> <location line="+3"/> <source>&amp;Copy</source> <translation>&amp;Kopiëer</translation> </message> <message> <location line="+52"/> <source>C&amp;lose</source> <translation>S&amp;luiten</translation> </message> <message> <location filename="../addressbookpage.cpp" line="+74"/> <source>&amp;Copy Address</source> <translation>&amp;Kopiëer Adres</translation> </message> <message> <location filename="../forms/addressbookpage.ui" line="-41"/> <source>Delete the currently selected address from the list</source> <translation>Verwijder het geselecteerde adres uit de lijst</translation> </message> <message> <location line="+27"/> <source>Export the data in the current tab to a file</source> <translation>Exporteer de gegevens in de huidige tab naar een bestand</translation> </message> <message> <location line="+3"/> <source>&amp;Export</source> <translation>&amp;Exporteer</translation> </message> <message> <location line="-27"/> <source>&amp;Delete</source> <translation>&amp;Verwijder</translation> </message> <message> <location filename="../addressbookpage.cpp" line="-30"/> <source>Choose the address to send coins to</source> <translation>Kies het adres om munten naar te verzenden</translation> </message> <message> <location line="+1"/> <source>Choose the address to receive coins with</source> <translation>Kies het adres om munten voor te ontvangen</translation> </message> <message> <location line="+5"/> <source>C&amp;hoose</source> <translation>K&amp;iezen</translation> </message> <message> <location line="+6"/> <source>Sending addresses</source> <translation>Verzendadressen</translation> </message> <message> <location line="+1"/> <source>Receiving addresses</source> <translation>Ontvangst Adressen</translation> </message> <message> <location line="+7"/> <source>These are your Freshcoin addresses for sending payments. Always check the amount and the receiving address before sending coins.</source> <translation>Dit zijn uw opgeslagen adressen om betalingen naar te verzenden. Controleer altijd het bedrag en het ontvangende adres voordat u uw freshcoins verzendt.</translation> </message> <message> <location line="+4"/> <source>These are your Freshcoin addresses for receiving payments. It is recommended to use a new receiving address for each transaction.</source> <translation>Dit zijn uw Freshcoin adressen om betalingen te ontvangen. We raden u aan om een nieuw ontvangstadres voor iedere transactie te gebruiken.</translation> </message> <message> <location line="+7"/> <source>Copy &amp;Label</source> <translation>Kopiëer &amp;Label</translation> </message> <message> <location line="+1"/> <source>&amp;Edit</source> <translation>&amp;Bewerken</translation> </message> <message> <location line="+194"/> <source>Export Address List</source> <translation>Exporteer adreslijst</translation> </message> <message> <location line="+1"/> <source>Comma separated file (*.csv)</source> <translation>Kommagescheiden bestand (*.csv)</translation> </message> <message> <location line="+13"/> <source>Exporting Failed</source> <translation>Export mislukt</translation> </message> <message> <location line="+1"/> <source>There was an error trying to save the address list to %1.</source> <translation>Er is een fout opgetreden tijdens de opslag naar %1</translation> </message> </context> <context> <name>AddressTableModel</name> <message> <location filename="../addresstablemodel.cpp" line="+168"/> <source>Label</source> <translation>Label</translation> </message> <message> <location line="+0"/> <source>Address</source> <translation>Adres</translation> </message> <message> <location line="+36"/> <source>(no label)</source> <translation>(geen label)</translation> </message> </context> <context> <name>AskPassphraseDialog</name> <message> <location filename="../forms/askpassphrasedialog.ui" line="+26"/> <source>Passphrase Dialog</source> <translation>Wachtwoord instellen</translation> </message> <message> <location line="+21"/> <source>Enter passphrase</source> <translation>Voer wachtwoord in</translation> </message> <message> <location line="+14"/> <source>New passphrase</source> <translation>Nieuw wachtwoord</translation> </message> <message> <location line="+14"/> <source>Repeat new passphrase</source> <translation>Herhaal nieuw wachtwoord</translation> </message> <message> <location filename="../askpassphrasedialog.cpp" line="+40"/> <source>Enter the new passphrase to the wallet.&lt;br/&gt;Please use a passphrase of &lt;b&gt;10 or more random characters&lt;/b&gt;, or &lt;b&gt;eight or more words&lt;/b&gt;.</source> <translation>Vul een nieuw wachtwoord in voor uw portemonnee. &lt;br/&gt; Gebruik een wachtwoord van &lt;b&gt;10 of meer lukrake karakters&lt;/b&gt;, of &lt;b&gt; acht of meer woorden&lt;/b&gt; . </translation> </message> <message> <location line="+1"/> <source>Encrypt wallet</source> <translation>Versleutel portemonnee</translation> </message> <message> <location line="+3"/> <source>This operation needs your wallet passphrase to unlock the wallet.</source> <translation>Deze operatie vereist uw portemonneewachtwoord om de portemonnee te openen.</translation> </message> <message> <location line="+5"/> <source>Unlock wallet</source> <translation>Open portemonnee</translation> </message> <message> <location line="+3"/> <source>This operation needs your wallet passphrase to decrypt the wallet.</source> <translation>Deze operatie vereist uw portemonneewachtwoord om de portemonnee te ontsleutelen</translation> </message> <message> <location line="+5"/> <source>Decrypt wallet</source> <translation>Ontsleutel portemonnee</translation> </message> <message> <location line="+3"/> <source>Change passphrase</source> <translation>Wijzig wachtwoord</translation> </message> <message> <location line="+1"/> <source>Enter the old and new passphrase to the wallet.</source> <translation>Vul uw oude en nieuwe portemonneewachtwoord in.</translation> </message> <message> <location line="+46"/> <source>Confirm wallet encryption</source> <translation>Bevestig versleuteling van de portemonnee</translation> </message> <message> <location line="+1"/> <source>Warning: If you encrypt your wallet and lose your passphrase, you will &lt;b&gt;LOSE ALL OF YOUR FRSHCOINS&lt;/b&gt;!</source> <translation>Waarschuwing: Als u uw portemonnee versleutelt en uw wachtwoord vergeet, zult u &lt;b&gt;AL UW FRSHCOINS KWIJT RAKEN&lt;/b&gt;!</translation> </message> <message> <location line="+0"/> <source>Are you sure you wish to encrypt your wallet?</source> <translation>Weet u zeker dat u uw portemonnee wilt versleutelen?</translation> </message> <message> <location line="+15"/> <source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source> <translation>BELANGRIJK: Elke eerder gemaakte backup van uw portemonneebestand dient u te vervangen door het nieuw gegenereerde, versleutelde portemonneebestand. Om veiligheidsredenen zullen eerdere backups van het niet-versleutelde portemonneebestand onbruikbaar worden zodra u uw nieuwe, versleutelde, portemonnee begint te gebruiken.</translation> </message> <message> <location line="+100"/> <location line="+24"/> <source>Warning: The Caps Lock key is on!</source> <translation>Waarschuwing: De Caps-Lock toets staat aan!</translation> </message> <message> <location line="-130"/> <location line="+58"/> <source>Wallet encrypted</source> <translation>Portemonnee versleuteld</translation> </message> <message> <location line="-56"/> <source>Freshcoin Core will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your Freshcoins from being stolen by malware infecting your computer.</source> <translation>Freshcoin zal nu afsluiten om het versleutelingsproces te voltooien. Onthoud dat het versleutelen van uw portemonnee u niet volledig kan beschermen: Malware kan uw computer infecteren en uw freshcoins stelen.</translation> </message> <message> <location line="+13"/> <location line="+7"/> <location line="+42"/> <location line="+6"/> <source>Wallet encryption failed</source> <translation>Portemonneeversleuteling mislukt</translation> </message> <message> <location line="-54"/> <source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source> <translation>Portemonneeversleuteling mislukt door een interne fout. Uw portemonnee is niet versleuteld.</translation> </message> <message> <location line="+7"/> <location line="+48"/> <source>The supplied passphrases do not match.</source> <translation>De opgegeven wachtwoorden komen niet overeen</translation> </message> <message> <location line="-37"/> <source>Wallet unlock failed</source> <translation>Portemonnee openen mislukt</translation> </message> <message> <location line="+1"/> <location line="+11"/> <location line="+19"/> <source>The passphrase entered for the wallet decryption was incorrect.</source> <translation>Het opgegeven wachtwoord voor de portemonnee-ontsleuteling is niet correct.</translation> </message> <message> <location line="-20"/> <source>Wallet decryption failed</source> <translation>Portemonnee-ontsleuteling mislukt</translation> </message> <message> <location line="+14"/> <source>Wallet passphrase was successfully changed.</source> <translation>Portemonneewachtwoord is met succes gewijzigd.</translation> </message> </context> <context> <name>BitcoinGUI</name> <message> <location filename="../bitcoingui.cpp" line="+295"/> <source>Sign &amp;message...</source> <translation>&amp;Onderteken bericht...</translation> </message> <message> <location line="+335"/> <source>Synchronizing with network...</source> <translation>Synchroniseren met netwerk...</translation> </message> <message><|fim▁hole|> </message> <message> <location line="-137"/> <source>Node</source> <translation>Node</translation> </message> <message> <location line="+138"/> <source>Show general overview of wallet</source> <translation>Toon algemeen overzicht van de portemonnee</translation> </message> <message> <location line="+20"/> <source>&amp;Transactions</source> <translation>&amp;Transacties</translation> </message> <message> <location line="+1"/> <source>Browse transaction history</source> <translation>Blader door transactiegeschiedenis</translation> </message> <message> <location line="+17"/> <source>E&amp;xit</source> <translation>&amp;Afsluiten</translation> </message> <message> <location line="+1"/> <source>Quit application</source> <translation>Programma afsluiten</translation> </message> <message> <location line="+7"/> <source>Show information about Freshcoin Core</source> <translation>Laat informatie zien over Freshcoin</translation> </message> <message> <location line="+3"/> <location line="+2"/> <source>About &amp;Qt</source> <translation>Over &amp;Qt</translation> </message> <message> <location line="+2"/> <source>Show information about Qt</source> <translation>Toon informatie over Qt</translation> </message> <message> <location line="+2"/> <source>&amp;Options...</source> <translation>O&amp;pties...</translation> </message> <message> <location line="+9"/> <source>&amp;Encrypt Wallet...</source> <translation>&amp;Versleutel Portemonnee...</translation> </message> <message> <location line="+3"/> <source>&amp;Backup Wallet...</source> <translation>&amp;Backup Portemonnee...</translation> </message> <message> <location line="+2"/> <source>&amp;Change Passphrase...</source> <translation>&amp;Wijzig Wachtwoord</translation> </message> <message> <location line="+10"/> <source>&amp;sending addresses...</source> <translation>&amp;Adresboek...</translation> </message> <message> <location line="+2"/> <source>&amp;receiving addresses...</source> <translation>&amp;Mijn adressen..</translation> </message> <message> <location line="+3"/> <source>Open &amp;URI...</source> <translation>Open &amp;URI...</translation> </message> <message> <location line="+325"/> <source>Importing blocks from disk...</source> <translation>Blokken aan het importeren vanaf harde schijf...</translation> </message> <message> <location line="+3"/> <source>Reindexing blocks on disk...</source> <translation>Blokken aan het herindexeren op harde schijf...</translation> </message> <message> <location line="-405"/> <source>Send coins to a Freshcoin address</source> <translation>Verzend munten naar een Freshcoin adres</translation> </message> <message> <location line="+49"/> <source>Modify configuration options for Freshcoin Core</source> <translation>Wijzig instellingen van Freshcoin</translation> </message> <message> <location line="+12"/> <source>Backup wallet to another location</source> <translation>Backup portemonnee naar een andere locatie</translation> </message> <message> <location line="+2"/> <source>Change the passphrase used for wallet encryption</source> <translation>Wijzig het wachtwoord voor uw portemonneversleuteling</translation> </message> <message> <location line="+6"/> <source>&amp;Debug window</source> <translation>&amp;Debugscherm</translation> </message> <message> <location line="+1"/> <source>Open debugging and diagnostic console</source> <translation>Open debugging en diagnostische console</translation> </message> <message> <location line="-4"/> <source>&amp;Verify message...</source> <translation>&amp;Verifiëer bericht...</translation> </message> <message> <location line="+430"/> <source>Freshcoin</source> <translation>Freshcoin</translation> </message> <message> <location line="-643"/> <source>Wallet</source> <translation>Portemonnee</translation> </message> <message> <location line="+146"/> <source>&amp;Send</source> <translation>&amp;Verzenden</translation> </message> <message> <location line="+7"/> <source>&amp;Receive</source> <translation>&amp;Ontvangen</translation> </message> <message> <location line="+46"/> <location line="+2"/> <source>&amp;Show / Hide</source> <translation>&amp;Toon / Verberg</translation> </message> <message> <location line="+1"/> <source>Show or hide the main Window</source> <translation>Toon of verberg het hoofdvenster</translation> </message> <message> <location line="+3"/> <source>Encrypt the private keys that belong to your wallet</source> <translation>Versleutel de geheime sleutels die bij uw portemonnee horen</translation> </message> <message> <location line="+7"/> <source>Sign messages with your Freshcoin addresses to prove you own them</source> <translation>Onderteken berichten met uw Freshcoin adressen om te bewijzen dat u deze adressen bezit</translation> </message> <message> <location line="+2"/> <source>Verify messages to ensure they were signed with specified Freshcoin addresses</source> <translation>Verifiëer handtekeningen om zeker te zijn dat de berichten zijn ondertekend met de gespecificeerde Freshcoin adressen</translation> </message> <message> <location line="+48"/> <source>&amp;File</source> <translation>&amp;Bestand</translation> </message> <message> <location line="+14"/> <source>&amp;Settings</source> <translation>&amp;Instellingen</translation> </message> <message> <location line="+9"/> <source>&amp;Help</source> <translation>&amp;Hulp</translation> </message> <message> <location line="+15"/> <source>Tabs toolbar</source> <translation>Tab-werkbalk</translation> </message> <message> <location line="-284"/> <location line="+376"/> <source>[testnet]</source> <translation>[testnetwerk]</translation> </message> <message> <location line="-401"/> <source>Freshcoin Core</source> <translation>Freshcoin Core</translation> </message> <message> <location line="+163"/> <source>Request payments (generates QR codes and freshcoin: URIs)</source> <translation>Vraag betaling aan (genereert QR codes en "freshcoin:" URIs)</translation> </message> <message> <location line="+29"/> <location line="+2"/> <source>&amp;About Freshcoin Core</source> <translation>&amp;Over Freshcoin Core</translation> </message> <message> <location line="+35"/> <source>Show the list of used sending addresses and labels</source> <translation>Toon de lijst met gebruikte verzend adressen en labels</translation> </message> <message> <location line="+2"/> <source>Show the list of used receiving addresses and labels</source> <translation>Toon de lijst met gebruikte ontvangst adressen en labels</translation> </message> <message> <location line="+3"/> <source>Open a freshcoin: URI or payment request</source> <translation>Open een freshcoin: URI of betalingsverzoek</translation> </message> <message> <location line="+2"/> <source>&amp;Command-line options</source> <translation>&amp;Commandoregel-opties</translation> </message> <message> <location line="+1"/> <source>Show the Freshcoin Core help message to get a list with possible Freshcoin Core command-line options</source> <translation>Toon het help-bericht voor een lijst met geldige commandoregel-opties</translation> </message> <message> <location line="+159"/> <location line="+5"/> <source>Freshcoin client</source> <translation>Freshcoin client</translation> </message> <message numerus="yes"> <location line="+142"/> <source>%n active connection(s) to Freshcoin network</source> <translation><numerusform>%n actieve connectie naar het Freshcoin netwerk</numerusform><numerusform>%n actieve connecties naar het Freshcoin netwerk</numerusform></translation> </message> <message> <location line="+22"/> <source>No block source available...</source> <translation>Geen bron van blokken beschikbaar...</translation> </message> <message> <location line="+12"/> <source>Processed %1 of %2 (estimated) blocks of transaction history.</source> <translation>%1 van %2 (geschat) blokken van de transactiehistorie verwerkt.</translation> </message> <message> <location line="+4"/> <source>Processed %1 blocks of transaction history.</source> <translation>%1 blokken van transactiehistorie verwerkt.</translation> </message> <message numerus="yes"> <location line="+23"/> <source>%n hour(s)</source> <translation><numerusform>%n uur</numerusform><numerusform>%n uur</numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n day(s)</source> <translation><numerusform>%n dag</numerusform><numerusform>%n dagen</numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n week(s)</source> <translation><numerusform>%n week</numerusform><numerusform>%n weken</numerusform></translation> </message> <message> <location line="+4"/> <source>%1 behind</source> <translation>%1 achter</translation> </message> <message> <location line="+21"/> <source>Last received block was generated %1 ago.</source> <translation>Laatst ontvangen blok was %1 geleden gegenereerd.</translation> </message> <message> <location line="+2"/> <source>Transactions after this will not yet be visible.</source> <translation>Transacties na dit moment zullen nu nog niet zichtbaar zijn.</translation> </message> <message> <location line="+27"/> <source>Error</source> <translation>Fout</translation> </message> <message> <location line="+3"/> <source>Warning</source> <translation>Waarschuwing</translation> </message> <message> <location line="+3"/> <source>Information</source> <translation>Informatie</translation> </message> <message> <location line="-85"/> <source>Up to date</source> <translation>Bijgewerkt</translation> </message> <message> <location line="+34"/> <source>Catching up...</source> <translation>Aan het bijwerken...</translation> </message> <message> <location line="+130"/> <source>Sent transaction</source> <translation>Verzonden transactie</translation> </message> <message> <location line="+0"/> <source>Incoming transaction</source> <translation>Binnenkomende transactie</translation> </message> <message> <location line="+1"/> <source>Date: %1 Amount: %2 Type: %3 Address: %4 </source> <translation>Datum: %1 Bedrag: %2 Type: %3 Adres: %4 </translation> </message> <message> <location line="+69"/> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;unlocked&lt;/b&gt;</source> <translation>Portemonnee is &lt;b&gt;versleuteld&lt;/b&gt; en momenteel &lt;b&gt;geopend&lt;/b&gt;</translation> </message> <message> <location line="+8"/> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;locked&lt;/b&gt;</source> <translation>Portemonnee is &lt;b&gt;versleuteld&lt;/b&gt; en momenteel &lt;b&gt;gesloten&lt;/b&gt;</translation> </message> <message> <location filename="../bitcoin.cpp" line="+438"/> <source>A fatal error occurred. Freshcoin Core can no longer continue safely and will quit.</source> <translation>Er is een fatale fout opgetreden. Freshcoin kan niet meer veilig doorgaan en zal nu afgesloten worden.</translation> </message> </context> <context> <name>ClientModel</name> <message> <location filename="../clientmodel.cpp" line="+119"/> <source>Network Alert</source> <translation>Netwerkwaarschuwing</translation> </message> </context> <context> <name>CoinControlDialog</name> <message> <location filename="../forms/coincontroldialog.ui" line="+14"/> <source>Coin Control Address Selection</source> <translation>Geavanceerde muntopties - adres selectie</translation> </message> <message> <location line="+34"/> <source>Quantity:</source> <translation>Kwantiteit:</translation> </message> <message> <location line="+29"/> <source>Bytes:</source> <translation>Bytes:</translation> </message> <message> <location line="+45"/> <source>Amount:</source> <translation>Bedrag:</translation> </message> <message> <location line="+29"/> <source>Priority:</source> <translation>Prioriteit:</translation> </message> <message> <location line="+45"/> <source>Fee:</source> <translation>Kosten:</translation> </message> <message> <location line="+32"/> <source>Low Output:</source> <translation>Lage uitvoer:</translation> </message> <message> <location line="+48"/> <source>After Fee:</source> <translation>Na kosten:</translation> </message> <message> <location line="+32"/> <source>Change:</source> <translation>Restant:</translation> </message> <message> <location line="+63"/> <source>(un)select all</source> <translation>(de)selecteer alles</translation> </message> <message> <location line="+13"/> <source>Tree mode</source> <translation>Hierarchische modus</translation> </message> <message> <location line="+16"/> <source>List mode</source> <translation>Lijst modus</translation> </message> <message> <location line="+52"/> <source>Amount</source> <translation>Bedrag</translation> </message> <message> <location line="+10"/> <source>Address</source> <translation>Adres</translation> </message> <message> <location line="+5"/> <source>Date</source> <translation>Datum</translation> </message> <message> <location line="+5"/> <source>Confirmations</source> <translation>Bevestigingen</translation> </message> <message> <location line="+3"/> <source>Confirmed</source> <translation>Bevestigd</translation> </message> <message> <location line="+5"/> <source>Priority</source> <translation>Prioriteit</translation> </message> <message> <location filename="../coincontroldialog.cpp" line="+42"/> <source>Copy address</source> <translation>Kopieer adres</translation> </message> <message> <location line="+1"/> <source>Copy label</source> <translation>Kopieer label</translation> </message> <message> <location line="+1"/> <location line="+26"/> <source>Copy amount</source> <translation>Kopieer bedrag</translation> </message> <message> <location line="-25"/> <source>Copy transaction ID</source> <translation>Kopieer transactie-ID</translation> </message> <message> <location line="+1"/> <source>Lock unspent</source> <translation>Blokeer niet gebruikte</translation> </message> <message> <location line="+1"/> <source>Unlock unspent</source> <translation>Deblokkeer ongebruikte</translation> </message> <message> <location line="+22"/> <source>Copy quantity</source> <translation>Kopieer aantal</translation> </message> <message> <location line="+2"/> <source>Copy fee</source> <translation>Kopieer kosten</translation> </message> <message> <location line="+1"/> <source>Copy after fee</source> <translation>Kopieer na kosten</translation> </message> <message> <location line="+1"/> <source>Copy bytes</source> <translation>Kopieer bytes</translation> </message> <message> <location line="+1"/> <source>Copy priority</source> <translation>Kopieer prioriteit</translation> </message> <message> <location line="+1"/> <source>Copy low output</source> <translation>Kopieer lage uitvoer</translation> </message> <message> <location line="+1"/> <source>Copy change</source> <translation>Kopieer restant</translation> </message> <message> <location line="+323"/> <source>highest</source> <translation>hoogste</translation> </message> <message> <location line="+1"/> <source>higher</source> <translation>hoger</translation> </message> <message> <location line="+1"/> <source>high</source> <translation>hoog</translation> </message> <message> <location line="+1"/> <source>medium-high</source> <translation>gemiddeld-hoog</translation> </message> <message> <location line="+1"/> <source>medium</source> <translation>gemiddeld</translation> </message> <message> <location line="+4"/> <source>low-medium</source> <translation>laag-gemiddeld</translation> </message> <message> <location line="+1"/> <source>low</source> <translation>laag</translation> </message> <message> <location line="+1"/> <source>lower</source> <translation>lager</translation> </message> <message> <location line="+1"/> <source>lowest</source> <translation>laagste</translation> </message> <message> <location line="+11"/> <source>(%1 locked)</source> <translation>(%1 geblokkeerd)</translation> </message> <message> <location line="+31"/> <source>none</source> <translation>geen</translation> </message> <message> <location line="+140"/> <source>Dust</source> <translation>Stof</translation> </message> <message> <location line="+0"/> <source>yes</source> <translation>ja</translation> </message> <message> <location line="+0"/> <source>no</source> <translation>nee</translation> </message> <message> <location line="+10"/> <source>This label turns red, if the transaction size is greater than 1000 bytes.</source> <translation>Dit label wordt rood als de transactie grootte meer dan 1000 bytes is.</translation> </message> <message> <location line="+1"/> <location line="+5"/> <source>This means a fee of at least %1 per kB is required.</source> <translation>Dit betekent dat een vergoeding van minimaal %1 per kB nodig is.</translation> </message> <message> <location line="-4"/> <source>Can vary +/- 1 byte per input.</source> <translation>Kan +/- byte per invoer variëren.</translation> </message> <message> <location line="+2"/> <source>Transactions with higher priority are more likely to get included into a block.</source> <translation>Transacties met een hogere prioriteit zullen eerder in een blok verwerkt worden.</translation> </message> <message> <location line="+1"/> <source>This label turns red, if the priority is smaller than &quot;medium&quot;.</source> <translation>Als dit label rood is, is de prioriteit minder dan &quot;medium&quot;.</translation> </message> <message> <location line="+3"/> <source>This label turns red, if any recipient receives an amount smaller than %1.</source> <translation>Dit label wordt rood wanneer enig ontvanger een bedrag van minder dan %1 ontvangt.</translation> </message> <message> <location line="+1"/> <location line="+4"/> <source>This means a fee of at least %1 is required.</source> <translation>Dit betekend dat een minimale vergoeding van %1 nodig is.</translation> </message> <message> <location line="-3"/> <source>Amounts below 0.546 times the minimum relay fee are shown as dust.</source> <translation>Bedragen kleiner dan 0.546 keer de minimum doorstuur vergoeding (kosten), worden als stof aangemerkt.</translation> </message> <message> <location line="+2"/> <source>This label turns red, if the change is smaller than %1.</source> <translation>Dit label wordt rood, als het restant kleiner dan %1.</translation> </message> <message> <location line="+43"/> <location line="+66"/> <source>(no label)</source> <translation>(geen label)</translation> </message> <message> <location line="-9"/> <source>change from %1 (%2)</source> <translation>restant van %1 (%2)</translation> </message> <message> <location line="+1"/> <source>(change)</source> <translation>(wisselgeld)</translation> </message> </context> <context> <name>EditAddressDialog</name> <message> <location filename="../forms/editaddressdialog.ui" line="+14"/> <source>Edit Address</source> <translation>Bewerk Adres</translation> </message> <message> <location line="+11"/> <source>&amp;Label</source> <translation>&amp;Label</translation> </message> <message> <location line="+10"/> <source>The label associated with this address list entry</source> <translation>Het label dat bij dit adres hoort</translation> </message> <message> <location line="+17"/> <source>The address associated with this address list entry. This can only be modified for sending addresses.</source> <translation>Het adres dat bij dit item hoort. Dit kan alleen bewerkt worden voor verzendadressen.</translation> </message> <message> <location line="-10"/> <source>&amp;Address</source> <translation>&amp;Adres</translation> </message> <message> <location filename="../editaddressdialog.cpp" line="+28"/> <source>New receiving address</source> <translation>Nieuw ontvangstadres</translation> </message> <message> <location line="+4"/> <source>New sending address</source> <translation>Nieuw adres om naar te verzenden</translation> </message> <message> <location line="+3"/> <source>Edit receiving address</source> <translation>Bewerk ontvangstadres</translation> </message> <message> <location line="+4"/> <source>Edit sending address</source> <translation>Bewerk adres om naar te verzenden</translation> </message> <message> <location line="+76"/> <source>The entered address &quot;%1&quot; is already in the address book.</source> <translation>Het opgegeven adres &quot;%1&quot; bestaat al in uw adresboek.</translation> </message> <message> <location line="-5"/> <source>The entered address &quot;%1&quot; is not a valid Freshcoin address.</source> <translation>Het opgegeven adres &quot;%1&quot; is een ongeldig Freshcoin adres</translation> </message> <message> <location line="+10"/> <source>Could not unlock wallet.</source> <translation>Kon de portemonnee niet openen.</translation> </message> <message> <location line="+5"/> <source>New key generation failed.</source> <translation>Genereren nieuwe sleutel mislukt.</translation> </message> </context> <context> <name>FreespaceChecker</name> <message> <location filename="../intro.cpp" line="+65"/> <source>A new data directory will be created.</source> <translation>Een nieuwe gegevensmap wordt aangemaakt.</translation> </message> <message> <location line="+22"/> <source>name</source> <translation>naam</translation> </message> <message> <location line="+2"/> <source>Directory already exists. Add %1 if you intend to create a new directory here.</source> <translation>Map bestaat al. Voeg %1 toe als u van plan bent hier een nieuwe map aan te maken.</translation> </message> <message> <location line="+3"/> <source>Path already exists, and is not a directory.</source> <translation>Locatie bestaat al, en is geen map.</translation> </message> <message> <location line="+7"/> <source>Cannot create data directory here.</source> <translation>Kan hier geen gegevensmap aanmaken.</translation> </message> </context> <context> <name>HelpMessageDialog</name> <message> <location filename="../forms/helpmessagedialog.ui" line="+19"/> <source>Freshcoin Core - Command-line options</source> <translation>Freshcoin Core - Commandoregel-opties</translation> </message> <message> <location filename="../utilitydialog.cpp" line="+38"/> <source>Freshcoin Core</source> <translation>Freshcoin Core</translation> </message> <message> <location line="+0"/> <source>version</source> <translation>versie</translation> </message> <message> <location line="+2"/> <source>Usage:</source> <translation>Gebruik:</translation> </message> <message> <location line="+1"/> <source>command-line options</source> <translation>commandoregel-opties</translation> </message> <message> <location line="+4"/> <source>UI options</source> <translation>gebruikersinterfaceopties</translation> </message> <message> <location line="+1"/> <source>Set language, for example &quot;de_DE&quot; (default: system locale)</source> <translation>Stel taal in, bijvoorbeeld &apos;&apos;de_DE&quot; (standaard: systeeminstellingen)</translation> </message> <message> <location line="+1"/> <source>Start minimized</source> <translation>Geminimaliseerd starten</translation> </message> <message> <location line="+1"/> <source>Show splash screen on startup (default: 1)</source> <translation>Laat laadscherm zien bij het opstarten. (standaard: 1)</translation> </message> <message> <location line="+1"/> <source>Choose data directory on startup (default: 0)</source> <translation>Kies de gegevensmap tijdens het opstarten (default: 0)</translation> </message> </context> <context> <name>Intro</name> <message> <location filename="../forms/intro.ui" line="+14"/> <source>Welcome</source> <translation>Welkom</translation> </message> <message> <location line="+9"/> <source>Welcome to Freshcoin Core.</source> <translation>Welkom bij Freshcoin Core</translation> </message> <message> <location line="+26"/> <source>As this is the first time the program is launched, you can choose where Freshcoin Core will store its data.</source> <translation>Omdat dit de eerste keer is dat het programma gestart is, kan je nu kiezen waar Freshcoin Core de data moet opslaan.</translation> </message> <message> <location line="+10"/> <source>Freshcoin Core will download and store a copy of the Freshcoin block chain. At least %1GB of data will be stored in this directory, and it will grow over time. The wallet will also be stored in this directory.</source> <translation>Freshcoin Core zal een kopie van de Freshcoin blok keten downloaden en opslaan. Tenminste %1 GB aan data wordt opgeslagen in deze map en het zal groeien in de tijd. De portemonnee ook in deze map.</translation> </message> <message> <location line="+10"/> <source>Use the default data directory</source> <translation>Gebruik de standaard gegevensmap</translation> </message> <message> <location line="+7"/> <source>Use a custom data directory:</source> <translation>Gebruik een aangepaste gegevensmap:</translation> </message> <message> <location filename="../intro.cpp" line="+85"/> <source>Freshcoin</source> <translation>Freshcoin</translation> </message> <message> <location line="+1"/> <source>Error: Specified data directory &quot;%1&quot; can not be created.</source> <translation>Fout: Opgegeven gegevensmap &quot;%1&quot; kan niet aangemaakt worden.</translation> </message> <message> <location line="+19"/> <source>Error</source> <translation>Fout</translation> </message> <message> <location line="+9"/> <source>GB of free space available</source> <translation>GB aan vrije opslagruimte beschikbaar</translation> </message> <message> <location line="+3"/> <source>(of %1GB needed)</source> <translation>(van %1GB benodigd)</translation> </message> </context> <context> <name>OpenURIDialog</name> <message> <location filename="../forms/openuridialog.ui" line="+14"/> <source>Open URI</source> <translation>Open URI</translation> </message> <message> <location line="+6"/> <source>Open payment request from URI or file</source> <translation>Open betalingsverzoek via URI of bestand</translation> </message> <message> <location line="+9"/> <source>URI:</source> <translation>URI:</translation> </message> <message> <location line="+11"/> <source>Select payment request file</source> <translation>Selecteer betalingsverzoek bestand</translation> </message> <message> <location filename="../openuridialog.cpp" line="+47"/> <source>Select payment request file to open</source> <translation>Selecteer te openen betalingsverzoek bestand</translation> </message> </context> <context> <name>OptionsDialog</name> <message> <location filename="../forms/optionsdialog.ui" line="+14"/> <source>Options</source> <translation>Opties</translation> </message> <message> <location line="+13"/> <source>&amp;Main</source> <translation>&amp;Algemeen</translation> </message> <message> <location line="+6"/> <source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB.</source> <translation>Optionele transactiekosten per kB. Transactiekosten helpen ervoor te zorgen dat uw transacties snel verwerkt worden. De meeste transacties zijn 1kB.</translation> </message> <message> <location line="+15"/> <source>Pay transaction &amp;fee</source> <translation>Betaal &amp;transactiekosten</translation> </message> <message> <location line="+1"/> <source>If you disable the spending of unconfirmed change, the change from a transaction cannot be used until that transaction has at least one confirmation. This also affects how your balance is computed.</source> <translation>Wanneer u de optie om onbevestigd wisselgeld uit te kunnen geven uitschakeld, kan wisselgeld niet opnieuw worden uitgegeven totdat de transactie ten minste een bevestiging heeft. Dit beinvloed ook hoe uw beschikbare saldo wordt berekend</translation> </message> <message> <location line="+1"/> <source>&amp;Spend unconfirmed change (experts only)</source> <translation>&amp;Geef onbevestigd wisselgeld uit (alleen voor experts)</translation> </message> <message> <location line="+31"/> <source>Automatically start Freshcoin after logging in to the system.</source> <translation>Start Freshcoin automatisch na inloggen in het systeem</translation> </message> <message> <location line="+2"/> <source>&amp;Start Freshcoin on system login</source> <translation>Start &amp;Freshcoin bij het inloggen in het systeem</translation> </message> <message> <location line="+9"/> <source>Size of &amp;database cache</source> <translation>Grootte van de &amp;database cache</translation> </message> <message> <location line="+13"/> <source>Set database cache size in megabytes (default: 25)</source> <translation>Stel databasecachegrootte in in megabytes (standaard: 25)</translation> </message> <message> <location line="+13"/> <source>MB</source> <translation>MB</translation> </message> <message> <location line="+27"/> <source>Number of script &amp;verification threads</source> <translation>Aantal script subprocessen (threads)</translation> </message> <message> <location line="+13"/> <source>Set the number of script verification threads (up to 16, 0 = auto, &lt;0 = leave that many cores free, default: 0)</source> <translation>Stel het aantal subprocessen voor scriptverificatie in (max 16, 0 = auto, &lt;0 = laat zoveel cores vrij, standaard: 0)</translation> </message> <message> <location line="+58"/> <source>Connect to the Freshcoin network through a SOCKS proxy.</source> <translation>Verbind met het Freshcoin netwerk via een SOCKS proxy.</translation> </message> <message> <location line="+3"/> <source>&amp;Connect through SOCKS proxy (default proxy):</source> <translation>Verbind via SOCKS proxy (standaard proxy):</translation> </message> <message> <location line="+34"/> <source>IP address of the proxy (e.g. IPv4: 127.0.0.1 / IPv6: ::1)</source> <translation>IP-adres van de proxy (bijv. IPv4: 127.0.0.1 / IPv6: ::1)</translation> </message> <message> <location line="+224"/> <source>Active command-line options that override above options:</source> <translation>Actieve commandoregel-opties die bovenstaande overschrijven</translation> </message> <message> <location line="+43"/> <source>Reset all client options to default.</source> <translation>Reset alle clientopties naar de standaardinstellingen.</translation> </message> <message> <location line="+3"/> <source>&amp;Reset Options</source> <translation>&amp;Reset Opties</translation> </message> <message> <location line="-323"/> <source>&amp;Network</source> <translation>&amp;Netwerk</translation> </message> <message> <location line="+6"/> <source>Automatically open the Freshcoin client port on the router. This only works when your router supports UPnP and it is enabled.</source> <translation>Open de Freshcoin-poort automatisch op de router. Dit werkt alleen als de router UPnP ondersteunt en deze optie aan staat.</translation> </message> <message> <location line="+3"/> <source>Map port using &amp;UPnP</source> <translation>Poortmapping via &amp;UPnP</translation> </message> <message> <location line="+19"/> <source>Proxy &amp;IP:</source> <translation>Proxy &amp;IP:</translation> </message> <message> <location line="+32"/> <source>&amp;Port:</source> <translation>&amp;Poort:</translation> </message> <message> <location line="+25"/> <source>Port of the proxy (e.g. 9050)</source> <translation>Poort van de proxy (bijv. 9050)</translation> </message> <message> <location line="+7"/> <source>SOCKS &amp;Version:</source> <translation>SOCKS-&amp;Versie:</translation> </message> <message> <location line="+13"/> <source>SOCKS version of the proxy (e.g. 5)</source> <translation>SOCKS-versie van de proxy (bijv. 5)</translation> </message> <message> <location line="+36"/> <source>&amp;Window</source> <translation>&amp;Scherm</translation> </message> <message> <location line="+6"/> <source>Show only a tray icon after minimizing the window.</source> <translation>Laat alleen een systeemvak-icoon zien wanneer het venster geminimaliseerd is</translation> </message> <message> <location line="+3"/> <source>&amp;Minimize to the tray instead of the taskbar</source> <translation>&amp;Minimaliseer naar het systeemvak in plaats van de taakbalk</translation> </message> <message> <location line="+7"/> <source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source> <translation>Minimaliseer het venster in de plaats van de applicatie af te sluiten als het venster gesloten wordt. Wanneer deze optie aan staan, kan de applicatie alleen worden afgesloten door Afsluiten te kiezen in het menu.</translation> </message> <message> <location line="+3"/> <source>M&amp;inimize on close</source> <translation>Minimaliseer bij sluiten van het &amp;venster</translation> </message> <message> <location line="+21"/> <source>&amp;Display</source> <translation>&amp;Interface</translation> </message> <message> <location line="+8"/> <source>User Interface &amp;language:</source> <translation>Taal &amp;Gebruikersinterface:</translation> </message> <message> <location line="+13"/> <source>The user interface language can be set here. This setting will take effect after restarting Freshcoin Core.</source> <translation>De taal van de gebruikersinterface kan hier ingesteld worden. Deze instelling zal pas van kracht worden nadat Freshcoin herstart wordt.</translation> </message> <message> <location line="+11"/> <source>&amp;Unit to show amounts in:</source> <translation>&amp;Eenheid om bedragen in te tonen:</translation> </message> <message> <location line="+13"/> <source>Choose the default subdivision unit to show in the interface and when sending coins.</source> <translation>Kies de standaard onderverdelingseenheid om weer te geven in uw programma, en voor het verzenden van munten</translation> </message> <message> <location line="+9"/> <source>Whether to show Freshcoin addresses in the transaction list or not.</source> <translation>Of Freshcoinadressen getoond worden in de transactielijst</translation> </message> <message> <location line="+3"/> <source>&amp;Display addresses in transaction list</source> <translation>Toon a&amp;dressen in de transactielijst</translation> </message> <message> <location line="+7"/> <source>Whether to show coin control features or not.</source> <translation>Geavanceerde munt functies weergeven of niet.</translation> </message> <message> <location line="+3"/> <source>Display coin &amp;control features (experts only)</source> <translation>Toon geavan&amp;ceerde munt functions (alleen voor experts)</translation> </message> <message> <location line="+136"/> <source>&amp;OK</source> <translation>&amp;OK</translation> </message> <message> <location line="+7"/> <source>&amp;Cancel</source> <translation>Ann&amp;uleren</translation> </message> <message> <location filename="../optionsdialog.cpp" line="+67"/> <source>default</source> <translation>standaard</translation> </message> <message> <location line="+57"/> <source>none</source> <translation>geen</translation> </message> <message> <location line="+75"/> <source>Confirm options reset</source> <translation>Bevestig reset opties</translation> </message> <message> <location line="+1"/> <location line="+29"/> <source>Client restart required to activate changes.</source> <translation>Herstart van de client is vereist om veranderingen door te voeren.</translation> </message> <message> <location line="-29"/> <source>Client will be shutdown, do you want to proceed?</source> <translation>De client zal worden afgesloten, wilt u doorgaan?</translation> </message> <message> <location line="+33"/> <source>This change would require a client restart.</source> <translation>Deze wijziging vereist een herstart van de client</translation> </message> <message> <location line="+34"/> <source>The supplied proxy address is invalid.</source> <translation>Het opgegeven proxyadres is ongeldig.</translation> </message> </context> <context> <name>OverviewPage</name> <message> <location filename="../forms/overviewpage.ui" line="+14"/> <source>Form</source> <translation>Vorm</translation> </message> <message> <location line="+50"/> <location line="+231"/> <source>The displayed information may be out of date. Your wallet automatically synchronizes with the Freshcoin network after a connection is established, but this process has not completed yet.</source> <translation>De weergegeven informatie kan verouderd zijn. Uw portemonnee synchroniseert automatisch met het Freshcoin netwerk nadat een verbinding is gemaakt, maar dit proces is nog niet voltooid.</translation> </message> <message> <location line="-155"/> <source>Unconfirmed:</source> <translation>Onbevestigd:</translation> </message> <message> <location line="-83"/> <source>Wallet</source> <translation>Portemonnee</translation> </message> <message> <location line="+51"/> <source>Available:</source> <translation>Beschikbaar:</translation> </message> <message> <location line="+16"/> <source>Pending:</source> <translation>In afwachting:</translation> </message> <message> <location line="+51"/> <source>Confirmed:</source> <translation>Bevestigd:</translation> </message> <message> <location line="+16"/> <source>Your current spendable balance</source> <translation>Uw beschikbare saldo</translation> </message> <message> <location line="+32"/> <source>Total of transactions that have yet to be confirmed, and do not yet count toward the spendable balance</source> <translation>De som van de transacties die nog bevestigd moeten worden, en nog niet meetellen in uw beschikbare saldo</translation> </message> <message> <location line="+16"/> <source>Immature:</source> <translation>Immatuur:</translation> </message> <message> <location line="+13"/> <source>Mined balance that has not yet matured</source> <translation>Gedolven saldo dat nog niet tot wasdom is gekomen</translation> </message> <message> <location line="+16"/> <source>Total:</source> <translation>Totaal:</translation> </message> <message> <location line="+16"/> <source>Your current total balance</source> <translation>Uw totale saldo</translation> </message> <message> <location line="+71"/> <source>&lt;b&gt;Recent transactions&lt;/b&gt;</source> <translation>&lt;b&gt;Recente transacties&lt;/b&gt;</translation> </message> <message> <location filename="../overviewpage.cpp" line="+120"/> <location line="+1"/> <source>out of sync</source> <translation>niet gesynchroniseerd</translation> </message> </context> <context> <name>PaymentServer</name> <message> <location filename="../paymentserver.cpp" line="+403"/> <location line="+13"/> <source>URI handling</source> <translation>URI-behandeling</translation> </message> <message> <location line="+1"/> <source>URI can not be parsed! This can be caused by an invalid Freshcoin address or malformed URI parameters.</source> <translation>URI kan niet worden geïnterpreteerd. Dit kan komen door een ongeldig Freshcoinadres of misvormde URI-parameters.</translation> </message> <message> <location line="+96"/> <source>Requested payment amount of %1 is too small (considered dust).</source> <translation>Het gevraagde betalingsbedrag van 1% is te laag (beschouwd als stof).</translation> </message> <message> <location line="-221"/> <location line="+212"/> <location line="+13"/> <location line="+95"/> <location line="+18"/> <location line="+16"/> <source>Payment request error</source> <translation>Fout bij betalingsverzoek</translation> </message> <message> <location line="-353"/> <source>Cannot start freshcoin: click-to-pay handler</source> <translation>Kan freshcoin niet starten: click-to-pay handler</translation> </message> <message> <location line="+58"/> <source>Net manager warning</source> <translation>Netwerkmanager waarschuwing</translation> </message> <message> <location line="+1"/> <source>Your active proxy doesn&apos;t support SOCKS5, which is required for payment requests via proxy.</source> <translation>Uw actieve proxy ondersteund geen SOCK5. Dit is benodigd om betalingsverzoeken via een proxy uit te voeren.</translation> </message> <message> <location line="+52"/> <source>Payment request fetch URL is invalid: %1</source> <translation>URL van betaalverzoek is ongeldig: %1</translation> </message> <message> <location line="+27"/> <source>Payment request file handling</source> <translation>Betalingsverzoek bestandsverwerking</translation> </message> <message> <location line="+1"/> <source>Payment request file can not be read or processed! This can be caused by an invalid payment request file.</source> <translation>Betalingsverzoek bestand kan niet worden gelezeon of verwerkt! Dit kan worden veroorzaakt door een ongeldig bestand.</translation> </message> <message> <location line="+73"/> <source>Unverified payment requests to custom payment scripts are unsupported.</source> <translation>Niet-geverifieerde betalingsverzoeken naar aangepaste betaling scripts worden niet ondersteund.</translation> </message> <message> <location line="+59"/> <source>Refund from %1</source> <translation>Restitutie van 1%</translation> </message> <message> <location line="+43"/> <source>Error communicating with %1: %2</source> <translation>Fout bij communiceren met %1: %2</translation> </message> <message> <location line="+24"/> <source>Payment request can not be parsed or processed!</source> <translation>Betalingsverzoek kan niet worden geinterpreteerd of verwerkt!</translation> </message> <message> <location line="+11"/> <source>Bad response from server %1</source> <translation>Ongeldig antwoord van server %1</translation> </message> <message> <location line="+33"/> <source>Payment acknowledged</source> <translation>Betaling bevestigd</translation> </message> <message> <location line="-11"/> <source>Network request error</source> <translation>Netwerkfout bij verzoek</translation> </message> </context> <context> <name>QObject</name> <message> <location filename="../bitcoin.cpp" line="+71"/> <location line="+11"/> <source>Freshcoin</source> <translation>Freshcoin</translation> </message> <message> <location line="+1"/> <source>Error: Specified data directory &quot;%1&quot; does not exist.</source> <translation>Fout: Opgegeven gegevensmap &quot;%1&quot; bestaat niet.</translation> </message> <message> <location line="-12"/> <source>Error: Invalid combination of -regtest and -testnet.</source> <translation>Fout: Ongeldige combinatie van -regtest en -testnet</translation> </message> <message> <location filename="../guiutil.cpp" line="+82"/> <source>Enter a Freshcoin address (e.g. DJ7zB7c5BsB9UJLy1rKQtY7c6CQfGiaRLM)</source> <translation>Vul een Freshcoinadres in (bijv. DJ7zB7c5BsB9UJLy1rKQtY7c6CQfGiaRLM)</translation> </message> </context> <context> <name>QRImageWidget</name> <message> <location filename="../receiverequestdialog.cpp" line="+36"/> <source>&amp;Save Image...</source> <translation>&amp;Afbeelding opslaan...</translation> </message> <message> <location line="+3"/> <source>&amp;Copy Image</source> <translation>&amp;Afbeelding kopiëren</translation> </message> <message> <location line="+28"/> <source>Save QR Code</source> <translation>Sla QR-code op</translation> </message> <message> <location line="+0"/> <source>PNG Image (*.png)</source> <translation>PNG afbeelding (*.png)</translation> </message> </context> <context> <name>RPCConsole</name> <message> <location filename="../forms/rpcconsole.ui" line="+46"/> <source>Client name</source> <translation>Clientnaam</translation> </message> <message> <location line="+10"/> <location line="+23"/> <location line="+26"/> <location line="+23"/> <location line="+23"/> <location line="+36"/> <location line="+23"/> <location line="+36"/> <location line="+23"/> <location line="+23"/> <location filename="../rpcconsole.cpp" line="+359"/> <source>N/A</source> <translation>N.v.t.</translation> </message> <message> <location line="-223"/> <source>Client version</source> <translation>Clientversie</translation> </message> <message> <location line="-45"/> <source>&amp;Information</source> <translation>&amp;Informatie</translation> </message> <message> <location line="-10"/> <source>Debug window</source> <translation>Debug venster</translation> </message> <message> <location line="+25"/> <source>General</source> <translation>Algemeen</translation> </message> <message> <location line="+53"/> <source>Using OpenSSL version</source> <translation>Gebruikt OpenSSL versie</translation> </message> <message> <location line="+49"/> <source>Startup time</source> <translation>Opstarttijd</translation> </message> <message> <location line="+29"/> <source>Network</source> <translation>Netwerk</translation> </message> <message> <location line="+7"/> <source>Name</source> <translation>Naam</translation> </message> <message> <location line="+23"/> <source>Number of connections</source> <translation>Aantal connecties</translation> </message> <message> <location line="+29"/> <source>Block chain</source> <translation>Blok-keten</translation> </message> <message> <location line="+7"/> <source>Current number of blocks</source> <translation>Huidig aantal blokken</translation> </message> <message> <location line="+23"/> <source>Estimated total blocks</source> <translation>Geschat totaal aantal blokken</translation> </message> <message> <location line="+23"/> <source>Last block time</source> <translation>Tijd laatste blok</translation> </message> <message> <location line="+52"/> <source>&amp;Open</source> <translation>&amp;Open</translation> </message> <message> <location line="+24"/> <source>&amp;Console</source> <translation>&amp;Console</translation> </message> <message> <location line="+72"/> <source>&amp;Network Traffic</source> <translation>&amp;Netwerkverkeer</translation> </message> <message> <location line="+52"/> <source>&amp;Clear</source> <translation>&amp;Wissen</translation> </message> <message> <location line="+13"/> <source>Totals</source> <translation>Totalen</translation> </message> <message> <location line="+64"/> <source>In:</source> <translation>In:</translation> </message> <message> <location line="+80"/> <source>Out:</source> <translation>Uit:</translation> </message> <message> <location line="-521"/> <source>Build date</source> <translation>Bouwdatum</translation> </message> <message> <location line="+206"/> <source>Debug log file</source> <translation>Debug-logbestand</translation> </message> <message> <location line="+7"/> <source>Open the Freshcoin Core debug log file from the current data directory. This can take a few seconds for large log files.</source> <translation>Open het Freshcoindebug-logbestand van de huidige datamap. Dit kan een aantal seconden duren voor grote logbestanden.</translation> </message> <message> <location line="+76"/> <source>Clear console</source> <translation>Maak console leeg</translation> </message> <message> <location filename="../rpcconsole.cpp" line="-30"/> <source>Welcome to the Freshcoin Core RPC console.</source> <translation>Welkom bij de Freshcoin RPC-console.</translation> </message> <message> <location line="+1"/> <source>Use up and down arrows to navigate history, and &lt;b&gt;Ctrl-L&lt;/b&gt; to clear screen.</source> <translation>Gebruik de pijltjestoetsen om door de geschiedenis te navigeren, en &lt;b&gt;Ctrl-L&lt;/b&gt; om het scherm leeg te maken.</translation> </message> <message> <location line="+1"/> <source>Type &lt;b&gt;help&lt;/b&gt; for an overview of available commands.</source> <translation>Typ &lt;b&gt;help&lt;/b&gt; voor een overzicht van de beschikbare commando&apos;s.</translation> </message> <message> <location line="+122"/> <source>%1 B</source> <translation>%1 B</translation> </message> <message> <location line="+2"/> <source>%1 KB</source> <translation>%1 KB</translation> </message> <message> <location line="+2"/> <source>%1 MB</source> <translation>%1 MB</translation> </message> <message> <location line="+2"/> <source>%1 GB</source> <translation>%1 GB</translation> </message> <message> <location line="+7"/> <source>%1 m</source> <translation>%1 m</translation> </message> <message> <location line="+5"/> <source>%1 h</source> <translation>%1 u</translation> </message> <message> <location line="+2"/> <source>%1 h %2 m</source> <translation>%1 u %2 m</translation> </message> </context> <context> <name>ReceiveCoinsDialog</name> <message> <location filename="../forms/receivecoinsdialog.ui" line="+107"/> <source>&amp;Amount:</source> <translation>&amp;Bedrag:</translation> </message> <message> <location line="-16"/> <source>&amp;Label:</source> <translation>&amp;Label:</translation> </message> <message> <location line="-37"/> <source>&amp;Message:</source> <translation>&amp;Bericht:</translation> </message> <message> <location line="-20"/> <source>Reuse one of the previously used receiving addresses. Reusing addresses has security and privacy issues. Do not use this unless re-generating a payment request made before.</source> <translation>Gebruik een van de eerder gebruikte ontvangstadressen opnieuw. Het opnieuw gebruiken van adressen heeft beveiliging- en privacy risico. Gebruik dit niet tenzij een eerder betalingsverzoek opnieuw gegenereerd is.</translation> </message> <message> <location line="+3"/> <source>R&amp;euse an existing receiving address (not recommended)</source> <translation>H&amp;ergebruik en bestaand ontvangstadres (niet aanbevolen)</translation> </message> <message> <location line="+14"/> <location line="+23"/> <source>An optional message to attach to the payment request, which will be displayed when the request is opened. Note: The message will not be sent with the payment over the Freshcoin network.</source> <translation>Een optioneel bericht om aan het betalingsverzoek toe te voegen, dat weergegeven zal worden bij het openen van het verzoek. Het bericht zal niet bij de betaling over het Freshcoin netwerk verzonden worden</translation> </message> <message> <location line="-7"/> <location line="+21"/> <source>An optional label to associate with the new receiving address.</source> <translation>Een optioneel label om te associëren met het nieuwe ontvangende adres</translation> </message> <message> <location line="-7"/> <source>Use this form to request payments. All fields are &lt;b&gt;optional&lt;/b&gt;.</source> <translation>Gebruik dit formulier om betalingsverzoeken te doen. Alle velden zijn &lt;b&gt;optioneel&lt;/b&gt;.</translation> </message> <message> <location line="+23"/> <location line="+22"/> <source>An optional amount to request. Leave this empty or zero to not request a specific amount.</source> <translation>Een optioneel bedrag. Laat dit veld leeg om geen specifiek bedrag te verzoeken.</translation> </message> <message> <location line="+32"/> <source>Clear all fields of the form.</source> <translation>Wis alle velden op het formulier.</translation> </message> <message> <location line="+3"/> <source>Clear</source> <translation>Wissen</translation> </message> <message> <location line="+78"/> <source>Requested payments history</source> <translation>Geschiedenis van de betalingsverzoeken</translation> </message> <message> <location line="-98"/> <source>&amp;Request payment</source> <translation>&amp;Betalingsverzoek</translation> </message> <message> <location line="+120"/> <source>Show the selected request (does the same as double clicking an entry)</source> <translation>Toon het geselecteerde verzoek (doet hetzelfde als dubbelklikken)</translation> </message> <message> <location line="+3"/> <source>Show</source> <translation>Toon</translation> </message> <message> <location line="+11"/> <source>Remove the selected entries from the list</source> <translation>Verwijder de geselecteerd items van de lijst</translation> </message> <message> <location line="+3"/> <source>Remove</source> <translation>Verwijder</translation> </message> <message> <location filename="../receivecoinsdialog.cpp" line="+38"/> <source>Copy label</source> <translation>Kopieer label</translation> </message> <message> <location line="+1"/> <source>Copy message</source> <translation>Kopieer bericht</translation> </message> <message> <location line="+1"/> <source>Copy amount</source> <translation>Kopieer bedrag</translation> </message> </context> <context> <name>ReceiveRequestDialog</name> <message> <location filename="../forms/receiverequestdialog.ui" line="+29"/> <source>QR Code</source> <translation>QR-code</translation> </message> <message> <location line="+46"/> <source>Copy &amp;URI</source> <translation>Kopieer &amp;URI</translation> </message> <message> <location line="+7"/> <source>Copy &amp;Address</source> <translation>Kopieer &amp;adres</translation> </message> <message> <location line="+7"/> <source>&amp;Save Image...</source> <translation>&amp;Sla plaatje om...</translation> </message> <message> <location filename="../receiverequestdialog.cpp" line="+56"/> <source>Request payment to %1</source> <translation>Betalingsverzoek aan %1</translation> </message> <message> <location line="+6"/> <source>Payment information</source> <translation>Betalingsinformatie</translation> </message> <message> <location line="+1"/> <source>URI</source> <translation>URI</translation> </message> <message> <location line="+2"/> <source>Address</source> <translation>Adres</translation> </message> <message> <location line="+2"/> <source>Amount</source> <translation>Bedrag</translation> </message> <message> <location line="+2"/> <source>Label</source> <translation>Label</translation> </message> <message> <location line="+2"/> <source>Message</source> <translation>Bericht</translation> </message> <message> <location line="+10"/> <source>Resulting URI too long, try to reduce the text for label / message.</source> <translation>Resulterende URI te lang, probeer de tekst korter te maken voor het label/bericht.</translation> </message> <message> <location line="+5"/> <source>Error encoding URI into QR Code.</source> <translation>Fout tijdens encoderen URI in QR-code</translation> </message> </context> <context> <name>RecentRequestsTableModel</name> <message> <location filename="../recentrequeststablemodel.cpp" line="+24"/> <source>Date</source> <translation>Datum</translation> </message> <message> <location line="+0"/> <source>Label</source> <translation>Label</translation> </message> <message> <location line="+0"/> <source>Message</source> <translation>Bericht</translation> </message> <message> <location line="+0"/> <source>Amount</source> <translation>Bedrag</translation> </message> <message> <location line="+38"/> <source>(no label)</source> <translation>(geen label)</translation> </message> <message> <location line="+9"/> <source>(no message)</source> <translation>(geen bericht)</translation> </message> <message> <location line="+8"/> <source>(no amount)</source> <translation>(geen bedrag)</translation> </message> </context> <context> <name>SendCoinsDialog</name> <message> <location filename="../forms/sendcoinsdialog.ui" line="+14"/> <location filename="../sendcoinsdialog.cpp" line="+380"/> <location line="+80"/> <source>Send Coins</source> <translation>Verzend munten</translation> </message> <message> <location line="+76"/> <source>Coin Control Features</source> <translation>Geavanceerde muntopties</translation> </message> <message> <location line="+20"/> <source>Inputs...</source> <translation>Muntbron...</translation> </message> <message> <location line="+7"/> <source>automatically selected</source> <translation>automatisch geselecteerd</translation> </message> <message> <location line="+19"/> <source>Insufficient funds!</source> <translation>Onvoldoende saldo!</translation> </message> <message> <location line="+89"/> <source>Quantity:</source> <translation>Kwantiteit:</translation> </message> <message> <location line="+35"/> <source>Bytes:</source> <translation>Bytes:</translation> </message> <message> <location line="+48"/> <source>Amount:</source> <translation>Bedrag:</translation> </message> <message> <location line="+32"/> <source>Priority:</source> <translation>Prioriteit:</translation> </message> <message> <location line="+48"/> <source>Fee:</source> <translation>Kosten:</translation> </message> <message> <location line="+32"/> <source>Low Output:</source> <translation>Lage uitvoer:</translation> </message> <message> <location line="+48"/> <source>After Fee:</source> <translation>Na kosten:</translation> </message> <message> <location line="+32"/> <source>Change:</source> <translation>Wisselgeld:</translation> </message> <message> <location line="+44"/> <source>If this is activated, but the change address is empty or invalid, change will be sent to a newly generated address.</source> <translation>Als dit is geactiveerd en het wisselgeld-adres is leeg of ongeldig, dan wordt het wisselgeld verzonden naar een nieuw gegenereerde adres.</translation> </message> <message> <location line="+3"/> <source>Custom change address</source> <translation>Specifiek wisselgeld-adres</translation> </message> <message> <location line="+164"/> <source>Send to multiple recipients at once</source> <translation>Verzend aan meerdere ontvangers ineens</translation> </message> <message> <location line="+3"/> <source>Add &amp;Recipient</source> <translation>Voeg &amp;Ontvanger Toe</translation> </message> <message> <location line="-23"/> <source>Clear all fields of the form.</source> <translation>Wis alle velden van het formulier.</translation> </message> <message> <location line="+3"/> <source>Clear &amp;All</source> <translation>Verwijder &amp;Alles</translation> </message> <message> <location line="+52"/> <source>Balance:</source> <translation>Saldo:</translation> </message> <message> <location line="-78"/> <source>Confirm the send action</source> <translation>Bevestig de verzendopdracht</translation> </message> <message> <location line="+3"/> <source>S&amp;end</source> <translation>&amp;Verzenden</translation> </message> <message> <location filename="../sendcoinsdialog.cpp" line="-229"/> <source>Confirm send coins</source> <translation>Bevestig de verzendopdracht</translation> </message> <message> <location line="-74"/> <location line="+5"/> <location line="+5"/> <location line="+4"/> <source>%1 to %2</source> <translation>%1 tot %2</translation> </message> <message> <location line="-121"/> <source>Copy quantity</source> <translation>Kopieer aantal</translation> </message> <message> <location line="+1"/> <source>Copy amount</source> <translation>Kopieer bedrag</translation> </message> <message> <location line="+1"/> <source>Copy fee</source> <translation>Kopieer kosten</translation> </message> <message> <location line="+1"/> <source>Copy after fee</source> <translation>Kopieer bedrag na kosten</translation> </message> <message> <location line="+1"/> <source>Copy bytes</source> <translation>Kopieer bytes</translation> </message> <message> <location line="+1"/> <source>Copy priority</source> <translation>Kopieer prioriteit</translation> </message> <message> <location line="+1"/> <source>Copy low output</source> <translation>Kopieer lage uitvoer</translation> </message> <message> <location line="+1"/> <source>Copy change</source> <translation>Kopieer restant</translation> </message> <message> <location line="+170"/> <source>Total Amount %1 (= %2)</source> <translation>Totaal bedrag %1 (= %2)</translation> </message> <message> <location line="+2"/> <source>or</source> <translation>of</translation> </message> <message> <location line="+203"/> <source>The recipient address is not valid, please recheck.</source> <translation>Het ontvangstadres is niet geldig, controleer uw invoer.</translation> </message> <message> <location line="+3"/> <source>The amount to pay must be larger than 0.</source> <translation>Het ingevoerde bedrag moet groter zijn dan 0.</translation> </message> <message> <location line="+3"/> <source>The amount exceeds your balance.</source> <translation>Bedrag is hoger dan uw huidige saldo.</translation> </message> <message> <location line="+3"/> <source>The total exceeds your balance when the %1 transaction fee is included.</source> <translation>Het totaal overschrijdt uw huidige saldo wanneer de %1 transactiekosten worden meegerekend</translation> </message> <message> <location line="+3"/> <source>Duplicate address found, can only send to each address once per send operation.</source> <translation>Dubbel adres gevonden, u kunt slechts eenmaal per transactie naar een bepaald adres verzenden</translation> </message> <message> <location line="+3"/> <source>Transaction creation failed!</source> <translation>Aanmaken van transactie is mislukt!</translation> </message> <message> <location line="+4"/> <source>The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source> <translation>De transactie was afgewezen. Dit kan gebeuren als u eerder uitgegeven munten opnieuw wilt verenden, zoals wanneer u een kopie van uw wallet.dat heeft gebruikt en in de kopie deze munten zijn gemarkeerd als uitgegeven, maar in de huidige nog niet.</translation> </message> <message> <location line="+113"/> <source>Warning: Invalid Freshcoin address</source> <translation>Waarschuwing: Ongeldig Freshcoin adres</translation> </message> <message> <location line="+20"/> <source>(no label)</source> <translation>(geen label)</translation> </message> <message> <location line="-11"/> <source>Warning: Unknown change address</source> <translation>Waarschuwing: Onbekend wisselgeld-adres</translation> </message> <message> <location line="-367"/> <source>Are you sure you want to send?</source> <translation>Weet u zeker dat u wilt verzenden?</translation> </message> <message> <location line="+9"/> <source>added as transaction fee</source> <translation>toegevoegd als transactiekosten</translation> </message> <message> <location line="+171"/> <source>Payment request expired</source> <translation>Betalingsverzoek verlopen</translation> </message> <message> <location line="+8"/> <source>Invalid payment address %1</source> <translation>Ongeldig betalingsadres %1</translation> </message> </context> <context> <name>SendCoinsEntry</name> <message> <location filename="../forms/sendcoinsentry.ui" line="+131"/> <location line="+521"/> <location line="+536"/> <source>A&amp;mount:</source> <translation>Bedra&amp;g:</translation> </message> <message> <location line="-1152"/> <source>Pay &amp;To:</source> <translation>Betaal &amp;Aan:</translation> </message> <message> <location line="+18"/> <source>The address to send the payment to (e.g. DJ7zB7c5BsB9UJLy1rKQtY7c6CQfGiaRLM)</source> <translation>Het adres waaraan u wilt betalen (bijv. DJ7zB7c5BsB9UJLy1rKQtY7c6CQfGiaRLM)</translation> </message> <message> <location filename="../sendcoinsentry.cpp" line="+30"/> <source>Enter a label for this address to add it to your address book</source> <translation>Vul een label in voor dit adres om het op te slaan in uw adresboek</translation> </message> <message> <location filename="../forms/sendcoinsentry.ui" line="+57"/> <source>&amp;Label:</source> <translation>&amp;Label:</translation> </message> <message> <location line="-50"/> <source>Choose previously used address</source> <translation>Kies een opgeslagen adres</translation> </message> <message> <location line="-40"/> <source>This is a normal payment.</source> <translation>Dit is een normale betaling.</translation> </message> <message> <location line="+50"/> <source>Alt+A</source> <translation>Alt+A</translation> </message> <message> <location line="+7"/> <source>Paste address from clipboard</source> <translation>Plak adres vanuit klembord</translation> </message> <message> <location line="+10"/> <source>Alt+P</source> <translation>Alt+P</translation> </message> <message> <location line="+7"/> <location line="+524"/> <location line="+536"/> <source>Remove this entry</source> <translation>Verwijder deze invoer</translation> </message> <message> <location line="-1008"/> <source>Message:</source> <translation>Bericht:</translation> </message> <message> <location line="+968"/> <source>This is a verified payment request.</source> <translation>Dit is een geverifieerd betalingsverzoek.</translation> </message> <message> <location line="-991"/> <source>Enter a label for this address to add it to the list of used addresses</source> <translation>Vul een label voor dit adres in om het in het adresboek op te slaan</translation> </message> <message> <location line="+33"/> <source>A message that was attached to the freshcoin: URI which will be stored with the transaction for your reference. Note: This message will not be sent over the Freshcoin network.</source> <translation>Het aan het betalingsverzoek bijgevoegde bericht zal bij de transactie worden opgeslagen. Let op: dit bericht zal niet over het Freshcoin netwerk verzonden worden.</translation> </message> <message> <location line="+426"/> <source>This is an unverified payment request.</source> <translation>Dit is een ongeverifieerd betalingsverzoek.</translation> </message> <message> <location line="+18"/> <location line="+532"/> <source>Pay To:</source> <translation>Betaal Aan:</translation> </message> <message> <location line="-498"/> <location line="+536"/> <source>Memo:</source> <translation>Memo:</translation> </message> </context> <context> <name>ShutdownWindow</name> <message> <location filename="../utilitydialog.cpp" line="+48"/> <source>Freshcoin Core is shutting down...</source> <translation>Freshcoin Core is aan het afsluiten...</translation> </message> <message> <location line="+1"/> <source>Do not shut down the computer until this window disappears.</source> <translation>Sluit de computer niet af totdat dit venster verdwenen is.</translation> </message> </context> <context> <name>SignVerifyMessageDialog</name> <message> <location filename="../forms/signverifymessagedialog.ui" line="+14"/> <source>Signatures - Sign / Verify a Message</source> <translation>Handtekeningen - Onderteken een bericht / Verifiëer een handtekening</translation> </message> <message> <location line="+10"/> <source>&amp;Sign Message</source> <translation>O&amp;nderteken Bericht</translation> </message> <message> <location line="+6"/> <source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source> <translation>U kunt berichten ondertekenen met een van uw adressen om te bewijzen dat u dit adres bezit. Pas op dat u geen onduidelijke berichten ondertekent, want phishing aanvallen zouden u kunnen misleiden om zo uw identiteit te stelen. Onderteken alleen berichten waarmee u het volledig eens bent.</translation> </message> <message> <location line="+18"/> <source>The address to sign the message with (e.g. DJ7zB7c5BsB9UJLy1rKQtY7c6CQfGiaRLM)</source> <translation>Het adres om het bericht mee te ondertekenen (bijv. DJ7zB7c5BsB9UJLy1rKQtY7c6CQfGiaRLM).</translation> </message> <message> <location line="+7"/> <location line="+210"/> <source>Choose previously used address</source> <translation>Kies een eerder gebruikt adres</translation> </message> <message> <location line="-200"/> <location line="+210"/> <source>Alt+A</source> <translation>Alt+A</translation> </message> <message> <location line="-200"/> <source>Paste address from clipboard</source> <translation>Plak adres vanuit klembord</translation> </message> <message> <location line="+10"/> <source>Alt+P</source> <translation>Alt+P</translation> </message> <message> <location line="+12"/> <source>Enter the message you want to sign here</source> <translation>Typ hier het bericht dat u wilt ondertekenen</translation> </message> <message> <location line="+7"/> <source>Signature</source> <translation>Handtekening</translation> </message> <message> <location line="+27"/> <source>Copy the current signature to the system clipboard</source> <translation>Kopieer de huidige handtekening naar het systeemklembord</translation> </message> <message> <location line="+21"/> <source>Sign the message to prove you own this Freshcoin address</source> <translation>Onderteken een bericht om te bewijzen dat u een bepaald Freshcoinadres bezit</translation> </message> <message> <location line="+3"/> <source>Sign &amp;Message</source> <translation>Onderteken &amp;Bericht</translation> </message> <message> <location line="+14"/> <source>Reset all sign message fields</source> <translation>Verwijder alles in de invulvelden</translation> </message> <message> <location line="+3"/> <location line="+143"/> <source>Clear &amp;All</source> <translation>Verwijder &amp;Alles</translation> </message> <message> <location line="-84"/> <source>&amp;Verify Message</source> <translation>&amp;Verifiëer Bericht</translation> </message> <message> <location line="+6"/> <source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source> <translation>Voer het ondertekenende adres, bericht en handtekening hieronder in (let erop dat u nieuwe regels, spaties en tabs juist overneemt) om de handtekening te verifiëren. Let erop dat u niet meer uit het bericht interpreteert dan er daadwerkelijk staat, om te voorkomen dat u wordt misleid in een man-in-the-middle-aanval.</translation> </message> <message> <location line="+21"/> <source>The address the message was signed with (e.g. DJ7zB7c5BsB9UJLy1rKQtY7c6CQfGiaRLM)</source> <translation>Het adres waarmee het bericht was ondertekend (bijv. DJ7zB7c5BsB9UJLy1rKQtY7c6CQfGiaRLM).</translation> </message> <message> <location line="+37"/> <source>Verify the message to ensure it was signed with the specified Freshcoin address</source> <translation>Controleer een bericht om te verifiëren dat het gespecificeerde Freshcoinadres het bericht heeft ondertekend.</translation> </message> <message> <location line="+3"/> <source>Verify &amp;Message</source> <translation>Verifiëer &amp;Bericht</translation> </message> <message> <location line="+14"/> <source>Reset all verify message fields</source> <translation>Verwijder alles in de invulvelden</translation> </message> <message> <location filename="../signverifymessagedialog.cpp" line="+30"/> <source>Enter a Freshcoin address (e.g. DJ7zB7c5BsB9UJLy1rKQtY7c6CQfGiaRLM)</source> <translation>Vul een Freshcoinadres in (bijv. DJ7zB7c5BsB9UJLy1rKQtY7c6CQfGiaRLM)</translation> </message> <message> <location line="-1"/> <source>Click &quot;Sign Message&quot; to generate signature</source> <translation>Klik &quot;Onderteken Bericht&quot; om de handtekening te genereren</translation> </message> <message> <location line="+84"/> <location line="+80"/> <source>The entered address is invalid.</source> <translation>Het opgegeven adres is ongeldig.</translation> </message> <message> <location line="-80"/> <location line="+8"/> <location line="+72"/> <location line="+8"/> <source>Please check the address and try again.</source> <translation>Controleer s.v.p. het adres en probeer het opnieuw.</translation> </message> <message> <location line="-80"/> <location line="+80"/> <source>The entered address does not refer to a key.</source> <translation>Het opgegeven adres verwijst niet naar een sleutel.</translation> </message> <message> <location line="-72"/> <source>Wallet unlock was cancelled.</source> <translation>Portemonnee-ontsleuteling is geannuleerd</translation> </message> <message> <location line="+8"/> <source>Private key for the entered address is not available.</source> <translation>Geheime sleutel voor het ingevoerde adres is niet beschikbaar.</translation> </message> <message> <location line="+12"/> <source>Message signing failed.</source> <translation>Ondertekenen van het bericht is mislukt.</translation> </message> <message> <location line="+5"/> <source>Message signed.</source> <translation>Bericht ondertekend.</translation> </message> <message> <location line="+58"/> <source>The signature could not be decoded.</source> <translation>De handtekening kon niet worden gedecodeerd.</translation> </message> <message> <location line="+0"/> <location line="+13"/> <source>Please check the signature and try again.</source> <translation>Controleer s.v.p. de handtekening en probeer het opnieuw.</translation> </message> <message> <location line="+0"/> <source>The signature did not match the message digest.</source> <translation>De handtekening hoort niet bij het bericht.</translation> </message> <message> <location line="+7"/> <source>Message verification failed.</source> <translation>Berichtverificatie mislukt.</translation> </message> <message> <location line="+5"/> <source>Message verified.</source> <translation>Bericht correct geverifiëerd.</translation> </message> </context> <context> <name>SplashScreen</name> <message> <location filename="../splashscreen.cpp" line="+28"/> <source>Freshcoin Core</source> <translation>Freshcoin Core</translation> </message> <message> <location line="+2"/> <source>The Freshcoin Core developers</source> <translation>De Freshcoin Core ontwikkelaars</translation> </message> <message> <location line="+1"/> <source>[testnet]</source> <translation>[testnetwerk]</translation> </message> </context> <context> <name>TrafficGraphWidget</name> <message> <location filename="../trafficgraphwidget.cpp" line="+79"/> <source>KB/s</source> <translation>KB/s</translation> </message> </context> <context> <name>TransactionDesc</name> <message> <location filename="../transactiondesc.cpp" line="+28"/> <source>Open until %1</source> <translation>Openen totdat %1</translation> </message> <message> <location line="+6"/> <source>%1/offline</source> <translation>%1/offline</translation> </message> <message> <location line="+2"/> <source>%1/unconfirmed</source> <translation>%1/onbevestigd</translation> </message> <message> <location line="+2"/> <source>%1 confirmations</source> <translation>%1 bevestigingen</translation> </message> <message> <location line="+18"/> <source>Status</source> <translation>Status</translation> </message> <message numerus="yes"> <location line="+7"/> <source>, broadcast through %n node(s)</source> <translation><numerusform>, verzonden naar %n node</numerusform><numerusform>, verzonden naar %n nodes</numerusform></translation> </message> <message> <location line="+4"/> <source>Date</source> <translation>Datum</translation> </message> <message> <location line="+7"/> <source>Source</source> <translation>Bron</translation> </message> <message> <location line="+0"/> <source>Generated</source> <translation>Gegenereerd</translation> </message> <message> <location line="+5"/> <location line="+17"/> <source>From</source> <translation>Van</translation> </message> <message> <location line="+1"/> <location line="+22"/> <location line="+58"/> <source>To</source> <translation>Aan</translation> </message> <message> <location line="-77"/> <location line="+2"/> <source>own address</source> <translation>eigen adres</translation> </message> <message> <location line="-2"/> <source>label</source> <translation>label</translation> </message> <message> <location line="+37"/> <location line="+12"/> <location line="+45"/> <location line="+17"/> <location line="+53"/> <source>Credit</source> <translation>Credit</translation> </message> <message numerus="yes"> <location line="-125"/> <source>matures in %n more block(s)</source> <translation><numerusform>komt tot wasdom na %n nieuw blok</numerusform><numerusform>komt tot wasdom na %n nieuwe blokken</numerusform></translation> </message> <message> <location line="+2"/> <source>not accepted</source> <translation>niet geaccepteerd</translation> </message> <message> <location line="+44"/> <location line="+8"/> <location line="+15"/> <location line="+53"/> <source>Debit</source> <translation>Debet</translation> </message> <message> <location line="-62"/> <source>Transaction fee</source> <translation>Transactiekosten</translation> </message> <message> <location line="+16"/> <source>Net amount</source> <translation>Netto bedrag</translation> </message> <message> <location line="+6"/> <location line="+9"/> <source>Message</source> <translation>Bericht</translation> </message> <message> <location line="-7"/> <source>Comment</source> <translation>Opmerking</translation> </message> <message> <location line="+2"/> <source>Transaction ID</source> <translation>Transactie-ID</translation> </message> <message> <location line="+18"/> <source>Merchant</source> <translation>Handelaar</translation> </message> <message> <location line="+7"/> <source>Generated coins must mature %1 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to &quot;not accepted&quot; and it won&apos;t be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source> <translation>Gegenereerde munten moeten %1 blokken rijpen voordat ze kunnen worden besteed. Toen dit blok gegenereerd werd, werd het uitgezonden naar het netwerk om aan de blok keten toegevoegd te worden. Als het niet lukt om in de keten toe te voegen, zal de status te veranderen naar &quot;niet geaccepteerd&quot; en het zal deze niet besteedbaar zijn. Dit kan soms gebeuren als een ander knooppunt een blok genereert binnen een paar seconden na die van jou.</translation> </message> <message> <location line="+8"/> <source>Debug information</source> <translation>Debug-informatie</translation> </message> <message> <location line="+8"/> <source>Transaction</source> <translation>Transactie</translation> </message> <message> <location line="+3"/> <source>Inputs</source> <translation>Inputs</translation> </message> <message> <location line="+23"/> <source>Amount</source> <translation>Bedrag</translation> </message> <message> <location line="+1"/> <source>true</source> <translation>waar</translation> </message> <message> <location line="+0"/> <source>false</source> <translation>onwaar</translation> </message> <message> <location line="-232"/> <source>, has not been successfully broadcast yet</source> <translation>, is nog niet met succes verzonden</translation> </message> <message numerus="yes"> <location line="-35"/> <source>Open for %n more block(s)</source> <translation><numerusform>Open voor nog %n blok</numerusform><numerusform>Open voor nog %n blokken</numerusform></translation> </message> <message> <location line="+70"/> <source>unknown</source> <translation>onbekend</translation> </message> </context> <context> <name>TransactionDescDialog</name> <message> <location filename="../forms/transactiondescdialog.ui" line="+14"/> <source>Transaction details</source> <translation>Transactiedetails</translation> </message> <message> <location line="+6"/> <source>This pane shows a detailed description of the transaction</source> <translation>Dit venster laat een uitgebreide beschrijving van de transactie zien</translation> </message> </context> <context> <name>TransactionTableModel</name> <message> <location filename="../transactiontablemodel.cpp" line="+234"/> <source>Date</source> <translation>Datum</translation> </message> <message> <location line="+0"/> <source>Type</source> <translation>Type</translation> </message> <message> <location line="+0"/> <source>Address</source> <translation>Adres</translation> </message> <message> <location line="+0"/> <source>Amount</source> <translation>Bedrag</translation> </message> <message> <location line="+59"/> <source>Immature (%1 confirmations, will be available after %2)</source> <translation>Onrijp (%1 bevestigingen, zal na %2 beschikbaar worden)</translation> </message> <message numerus="yes"> <location line="+16"/> <source>Open for %n more block(s)</source> <translation><numerusform>Open voor nog %n blok</numerusform><numerusform>Open voor nog %n blokken</numerusform></translation> </message> <message> <location line="+3"/> <source>Open until %1</source> <translation>Open tot %1</translation> </message> <message> <location line="+3"/> <source>Offline (%1 confirmations)</source> <translation>Niet verbonden (%1 bevestigingen)</translation> </message> <message> <location line="+3"/> <source>Unconfirmed (%1 of %2 confirmations)</source> <translation>Onbevestigd (%1 van %2 bevestigd)</translation> </message> <message> <location line="-22"/> <location line="+25"/> <source>Confirmed (%1 confirmations)</source> <translation>Bevestigd (%1 bevestigingen)</translation> </message> <message> <location line="-22"/> <source>This block was not received by any other nodes and will probably not be accepted!</source> <translation>Dit blok is niet ontvangen bij andere nodes en zal waarschijnlijk niet worden geaccepteerd!</translation> </message> <message> <location line="+3"/> <source>Generated but not accepted</source> <translation>Gegenereerd maar niet geaccepteerd</translation> </message> <message> <location line="+62"/> <source>Received with</source> <translation>Ontvangen met</translation> </message> <message> <location line="+2"/> <source>Received from</source> <translation>Ontvangen van</translation> </message> <message> <location line="+3"/> <source>Sent to</source> <translation>Verzonden aan</translation> </message> <message> <location line="+2"/> <source>Payment to yourself</source> <translation>Betaling aan uzelf</translation> </message> <message> <location line="+2"/> <source>Mined</source> <translation>Gedolven</translation> </message> <message> <location line="+38"/> <source>(n/a)</source> <translation>(nvt)</translation> </message> <message> <location line="+199"/> <source>Transaction status. Hover over this field to show number of confirmations.</source> <translation>Transactiestatus. Houd de muiscursor boven dit veld om het aantal bevestigingen te tonen.</translation> </message> <message> <location line="+2"/> <source>Date and time that the transaction was received.</source> <translation>Datum en tijd waarop deze transactie is ontvangen.</translation> </message> <message> <location line="+2"/> <source>Type of transaction.</source> <translation>Type transactie.</translation> </message> <message> <location line="+2"/> <source>Destination address of transaction.</source> <translation>Ontvangend adres van transactie.</translation> </message> <message> <location line="+2"/> <source>Amount removed from or added to balance.</source> <translation>Bedrag verwijderd van of toegevoegd aan saldo</translation> </message> </context> <context> <name>TransactionView</name> <message> <location filename="../transactionview.cpp" line="+57"/> <location line="+16"/> <source>All</source> <translation>Alles</translation> </message> <message> <location line="-15"/> <source>Today</source> <translation>Vandaag</translation> </message> <message> <location line="+1"/> <source>This week</source> <translation>Deze week</translation> </message> <message> <location line="+1"/> <source>This month</source> <translation>Deze maand</translation> </message> <message> <location line="+1"/> <source>Last month</source> <translation>Vorige maand</translation> </message> <message> <location line="+1"/> <source>This year</source> <translation>Dit jaar</translation> </message> <message> <location line="+1"/> <source>Range...</source> <translation>Bereik...</translation> </message> <message> <location line="+11"/> <source>Received with</source> <translation>Ontvangen met</translation> </message> <message> <location line="+2"/> <source>Sent to</source> <translation>Verzonden aan</translation> </message> <message> <location line="+2"/> <source>To yourself</source> <translation>Aan uzelf</translation> </message> <message> <location line="+1"/> <source>Mined</source> <translation>Gedolven</translation> </message> <message> <location line="+1"/> <source>Other</source> <translation>Anders</translation> </message> <message> <location line="+6"/> <source>Enter address or label to search</source> <translation>Vul adres of label in om te zoeken</translation> </message> <message> <location line="+6"/> <source>Min amount</source> <translation>Min. bedrag</translation> </message> <message> <location line="+34"/> <source>Copy address</source> <translation>Kopieer adres</translation> </message> <message> <location line="+1"/> <source>Copy label</source> <translation>Kopieer label</translation> </message> <message> <location line="+1"/> <source>Copy amount</source> <translation>Kopieer bedrag</translation> </message> <message> <location line="+1"/> <source>Copy transaction ID</source> <translation>Kopieer transactie-ID</translation> </message> <message> <location line="+1"/> <source>Edit label</source> <translation>Bewerk label</translation> </message> <message> <location line="+1"/> <source>Show transaction details</source> <translation>Toon transactiedetails</translation> </message> <message> <location line="+142"/> <source>Export Transaction History</source> <translation>Exporteer Transactieverleden</translation> </message> <message> <location line="+19"/> <source>Exporting Failed</source> <translation>Export Mislukt</translation> </message> <message> <location line="+0"/> <source>There was an error trying to save the transaction history to %1.</source> <translation>Er is een fout opgetreden bij het opslaan van het transactieverleden naar %1.</translation> </message> <message> <location line="+4"/> <source>Exporting Successful</source> <translation>Export Succesvol</translation> </message> <message> <location line="+0"/> <source>The transaction history was successfully saved to %1.</source> <translation>Het transactieverleden was succesvol opgeslagen in %1.</translation> </message> <message> <location line="-22"/> <source>Comma separated file (*.csv)</source> <translation>Kommagescheiden bestand (*.csv)</translation> </message> <message> <location line="+9"/> <source>Confirmed</source> <translation>Bevestigd</translation> </message> <message> <location line="+1"/> <source>Date</source> <translation>Datum</translation> </message> <message> <location line="+1"/> <source>Type</source> <translation>Type</translation> </message> <message> <location line="+1"/> <source>Label</source> <translation>Label</translation> </message> <message> <location line="+1"/> <source>Address</source> <translation>Adres</translation> </message> <message> <location line="+1"/> <source>Amount</source> <translation>Bedrag</translation> </message> <message> <location line="+1"/> <source>ID</source> <translation>ID</translation> </message> <message> <location line="+107"/> <source>Range:</source> <translation>Bereik:</translation> </message> <message> <location line="+8"/> <source>to</source> <translation>naar</translation> </message> </context> <context> <name>WalletFrame</name> <message> <location filename="../walletframe.cpp" line="+26"/> <source>No wallet has been loaded.</source> <translation>Portemonnee werd niet geladen.</translation> </message> </context> <context> <name>WalletModel</name> <message> <location filename="../walletmodel.cpp" line="+245"/> <source>Send Coins</source> <translation>Verzend munten</translation> </message> </context> <context> <name>WalletView</name> <message> <location filename="../walletview.cpp" line="+43"/> <source>&amp;Export</source> <translation>&amp;Exporteer</translation> </message> <message> <location line="+1"/> <source>Export the data in the current tab to a file</source> <translation>Exporteer de data in de huidige tab naar een bestand</translation> </message> <message> <location line="+181"/> <source>Backup Wallet</source> <translation>Portemonnee backuppen</translation> </message> <message> <location line="+1"/> <source>Wallet Data (*.dat)</source> <translation>Portemonnee-data (*.dat)</translation> </message> <message> <location line="+6"/> <source>Backup Failed</source> <translation>Backup Mislukt</translation> </message> <message> <location line="+0"/> <source>There was an error trying to save the wallet data to %1.</source> <translation>Er is een fout opgetreden bij het wegschrijven van de portemonnee-data naar %1.</translation> </message> <message> <location line="+4"/> <source>The wallet data was successfully saved to %1.</source> <translation>De portemonneedata is succesvol opgeslagen in %1.</translation> </message> <message> <location line="+0"/> <source>Backup Successful</source> <translation>Backup Succesvol</translation> </message> </context> <context> <name>freshcoin-core</name> <message> <location filename="../bitcoinstrings.cpp" line="+221"/> <source>Usage:</source> <translation>Gebruik:</translation> </message> <message> <location line="-54"/> <source>List commands</source> <translation>Lijst van commando&apos;s</translation> </message> <message> <location line="-14"/> <source>Get help for a command</source> <translation>Toon hulp voor een commando</translation> </message> <message> <location line="+26"/> <source>Options:</source> <translation>Opties:</translation> </message> <message> <location line="+22"/> <source>Specify configuration file (default: freshcoin.conf)</source> <translation>Specificeer configuratiebestand (standaard: freshcoin.conf) </translation> </message> <message> <location line="+3"/> <source>Specify pid file (default: freshcoind.pid)</source> <translation>Specificeer pid-bestand (standaard: freshcoind.pid) </translation> </message> <message> <location line="-1"/> <source>Specify data directory</source> <translation>Stel datamap in</translation> </message> <message> <location line="-9"/> <source>Set database cache size in megabytes (default: 25)</source> <translation>Stel database-cachegrootte in in megabytes (standaard: 25)</translation> </message> <message> <location line="-26"/> <source>Listen for connections on &lt;port&gt; (default: 22556 or testnet: 44556)</source> <translation>Luister voor verbindingen op &lt;poort&gt; (standaard: 22556 of testnet: 44556)</translation> </message> <message> <location line="+5"/> <source>Maintain at most &lt;n&gt; connections to peers (default: 125)</source> <translation>Onderhoud maximaal &lt;n&gt; verbindingen naar peers (standaard: 125)</translation> </message> <message> <location line="-51"/> <source>Connect to a node to retrieve peer addresses, and disconnect</source> <translation>Verbind naar een node om netwerkadressen van anderen op te halen, en verbreek vervolgens de verbinding</translation> </message> <message> <location line="+84"/> <source>Specify your own public address</source> <translation>Specificeer uw eigen publieke netwerkadres</translation> </message> <message> <location line="+5"/> <source>Threshold for disconnecting misbehaving peers (default: 100)</source> <translation>Drempel om verbinding te verbreken naar zich misdragende peers (standaard: 100)</translation> </message> <message> <location line="-148"/> <source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source> <translation>Aantal seconden dat zich misdragende peers niet opnieuw mogen verbinden (standaard: 86400)</translation> </message> <message> <location line="-36"/> <source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source> <translation>Er is een fout opgetreden tijdens het instellen van de inkomende RPC-poort %u op IPv4: %s</translation> </message> <message> <location line="+34"/> <source>Listen for JSON-RPC connections on &lt;port&gt; (default: 22555 or testnet: 44555)</source> <translation>Wacht op JSON-RPC-connecties op poort &lt;port&gt; (standaard: 22555 of testnet: 44555)</translation> </message> <message> <location line="+45"/> <source>Accept command line and JSON-RPC commands</source> <translation>Aanvaard commandoregel- en JSON-RPC-commando&apos;s</translation> </message> <message> <location line="+80"/> <source>Run in the background as a daemon and accept commands</source> <translation>Draai in de achtergrond als daemon en aanvaard commando&apos;s</translation> </message> <message> <location line="+39"/> <source>Use the test network</source> <translation>Gebruik het testnetwerk</translation> </message> <message> <location line="-118"/> <source>Accept connections from outside (default: 1 if no -proxy or -connect)</source> <translation>Accepteer verbindingen van buitenaf (standaard: 1 als geen -proxy of -connect is opgegeven)</translation> </message> <message> <location line="-95"/> <source>%s, you must set a rpcpassword in the configuration file: %s It is recommended you use the following random password: rpcuser=freshcoinrpc rpcpassword=%s (you do not need to remember this password) The username and password MUST NOT be the same. If the file does not exist, create it with owner-readable-only file permissions. It is also recommended to set alertnotify so you are notified of problems; for example: alertnotify=echo %%s | mail -s &quot;Freshcoin Alert&quot; [email protected] </source> <translation>%s, u moet een RPC-wachtwoord instellen in het configuratiebestand: %s U wordt aangeraden het volgende willekeurige wachtwoord te gebruiken: rpcuser=freshcoinrpc rpcpassword=%s (u hoeft dit wachtwoord niet te onthouden) De gebruikersnaam en wachtwoord mogen niet hetzelfde zijn. Als het bestand niet bestaat, make hem dan aan met leesrechten voor enkel de eigenaar. Het is ook aan te bevelen &quot;alertnotify&quot; in te stellen zodat u op de hoogte gesteld wordt van problemen; for example: alertnotify=echo %%s | mail -s &quot;Freshcoin Alert&quot; [email protected]</translation> </message> <message> <location line="+12"/> <source>Acceptable ciphers (default: TLSv1.2+HIGH:TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!3DES:@STRENGTH)</source> <translation>Aanvaardbare cijfers (standaard: TLSv1.2+HIGH:TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!3DES:@STRENGTH)</translation> </message> <message> <location line="+5"/> <source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source> <translation>Er is een fout opgetreden tijdens het instellen van de inkomende RPC-poort %u op IPv6, terugval naar IPv4: %s</translation> </message> <message> <location line="+3"/> <source>Bind to given address and always listen on it. Use [host]:port notation for IPv6</source> <translation>Bind aan opgegeven adres en luister er altijd op. Gebruik [host]:port notatie voor IPv6</translation> </message> <message> <location line="+3"/> <source>Cannot obtain a lock on data directory %s. Freshcoin Core is probably already running.</source> <translation>Kan geen lock op de datamap %s verkrijgen. Freshcoin draait vermoedelijk reeds.</translation> </message> <message> <location line="+3"/> <source>Enter regression test mode, which uses a special chain in which blocks can be solved instantly. This is intended for regression testing tools and app development.</source> <translation>Schakel regressietest-modus in, die een speciale blokketen gebruikt waarin blokken onmiddelijk opgelost kunnen worden. Dit is bedoeld voor regressietestsoftware en app-ontwikkeling.</translation> </message> <message> <location line="+4"/> <source>Enter regression test mode, which uses a special chain in which blocks can be solved instantly.</source> <translation>Schakel regressitest-modus in, die een speciale blok keten gebruikt waarin blokken onmiddelijk opgelost kunnen worden.</translation> </message> <message> <location line="+3"/> <source>Error: The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source> <translation>Fout: De transactie was afgewezen! Dit kan gebeuren als sommige munten in uw portemonnee al eerder uitgegeven zijn, zoals wanneer u een kopie van uw wallet.dat heeft gebruikt en in de kopie deze munten zijn uitgegeven, maar in deze portemonnee die munten nog niet als zodanig zijn gemarkeerd.</translation> </message> <message> <location line="+4"/> <source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds!</source> <translation>Fout: Deze transactie vereist transactiekosten van tenminste %s, vanwege zijn grootte, complexiteit, of het gebruik van onlangs ontvangen munten!</translation> </message> <message> <location line="+6"/> <source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source> <translation>Voer opdracht uit zodra een portemonneetransactie verandert (%s in cmd wordt vervangen door TxID)</translation> </message> <message> <location line="+18"/> <source>This is a pre-release test build - use at your own risk - do not use for mining or merchant applications</source> <translation>Dit is een pre-release testversie - gebruik op eigen risico! Gebruik deze niet voor het delven van munten of handelsdoeleinden</translation> </message> <message> <location line="+5"/> <source>Use separate SOCKS5 proxy to reach peers via Tor hidden services (default: -proxy)</source> <translation>Gebruik een aparte SOCKS5 proxy om &apos;Tor hidden services&apos; te bereiken (standaard: hetzelfde als -proxy)</translation> </message> <message> <location line="+3"/> <source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source> <translation>Waarschuwing: -paytxfee is zeer hoog ingesteld. Dit zijn de transactiekosten die u betaalt bij het verzenden van een transactie.</translation> </message> <message> <location line="+3"/> <source>Warning: Please check that your computer&apos;s date and time are correct! If your clock is wrong Freshcoin Core will not work properly.</source> <translation>Waarschuwing: Controleer dat de datum en tijd op uw computer correct zijn ingesteld. Als uw klok fout staat zal Freshcoin niet correct werken.</translation> </message> <message> <location line="+3"/> <source>Warning: The network does not appear to fully agree! Some miners appear to be experiencing issues.</source> <translation>Waarschuwing: Het lijkt erop dat het netwerk geen consensus kan vinden! Sommige delvers lijken problemen te ondervinden.</translation> </message> <message> <location line="+3"/> <source>Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade.</source> <translation>Waarschuwing: Het lijkt erop dat we geen consensus kunnen vinden met verbonden nodes! Mogelijk dient u te upgraden, of andere nodes moeten wellicht upgraden.</translation> </message> <message> <location line="+3"/> <source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source> <translation>Waarschuwing: Fout bij het lezen van wallet.dat! Alle sleutels zijn in goede orde uitgelezen, maar transactiedata of adresboeklemma&apos;s zouden kunnen ontbreken of fouten bevatten.</translation> </message> <message> <location line="+3"/> <source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source> <translation>Waarschuwing: wallet.dat is corrupt, data is veiliggesteld! Originele wallet.dat is opgeslagen als wallet.{tijdstip}.bak in %s; als uw balans of transacties incorrect zijn dient u een backup terug te zetten.</translation> </message> <message> <location line="+9"/> <source>&lt;category&gt; can be:</source> <translation>&lt;category&gt; kan zijn:</translation> </message> <message> <location line="+6"/> <source>Attempt to recover private keys from a corrupt wallet.dat</source> <translation>Poog de geheime sleutels uit een corrupt wallet.dat bestand terug te halen</translation> </message> <message> <location line="+1"/> <source>Freshcoin Core Daemon</source> <translation>Freshcoin Core Daemon</translation> </message> <message> <location line="+1"/> <source>Freshcoin Core RPC client version</source> <translation>Freshcoin RPC desktop versie</translation> </message> <message> <location line="+1"/> <source>Block creation options:</source> <translation>Blokcreatie-opties:</translation> </message> <message> <location line="+5"/> <source>Connect only to the specified node(s)</source> <translation>Verbind alleen naar de gespecificeerde node(s)</translation> </message> <message> <location line="+1"/> <source>Connect through SOCKS proxy</source> <translation>Verbind via een SOCKS-proxy</translation> </message> <message> <location line="+1"/> <source>Connect to JSON-RPC on &lt;port&gt; (default: 22555 or testnet: 44555)</source> <translation>Verbinden met JSON-RPC op &lt;poort&gt; (standaard: 22555 of testnet: 44555)</translation> </message> <message> <location line="+2"/> <source>Corrupted block database detected</source> <translation>Corrupte blokkendatabase gedetecteerd</translation> </message> <message> <location line="+1"/> <source>Discover own IP address (default: 1 when listening and no -externalip)</source> <translation>Ontdek eigen IP-adres (standaard: 1 als er wordt geluisterd en geen -externalip is opgegeven)</translation> </message> <message> <location line="+1"/> <source>Do not load the wallet and disable wallet RPC calls</source> <translation>Laad geen portemonnee en schakel RPC aanvragen uit</translation> </message> <message> <location line="+1"/> <source>Do you want to rebuild the block database now?</source> <translation>Wilt u de blokkendatabase nu herbouwen?</translation> </message> <message> <location line="+2"/> <source>Error initializing block database</source> <translation>Fout bij intialisatie blokkendatabase</translation> </message> <message> <location line="+1"/> <source>Error initializing wallet database environment %s!</source> <translation>Probleem met initializeren van de database-omgeving %s!</translation> </message> <message> <location line="+1"/> <source>Error loading block database</source> <translation>Fout bij het laden van blokkendatabase</translation> </message> <message> <location line="+4"/> <source>Error opening block database</source> <translation>Fout bij openen blokkendatabase</translation> </message> <message> <location line="+2"/> <source>Error: Disk space is low!</source> <translation>Fout: Weinig vrije diskruimte!</translation> </message> <message> <location line="+1"/> <source>Error: Wallet locked, unable to create transaction!</source> <translation>Fout: Portemonnee vergrendeld, aanmaak transactie niet mogelijk!</translation> </message> <message> <location line="+1"/> <source>Error: system error: </source> <translation>Fout: Systeemfout:</translation> </message> <message> <location line="+1"/> <source>Failed to listen on any port. Use -listen=0 if you want this.</source> <translation>Mislukt om op welke poort dan ook te luisteren. Gebruik -listen=0 as u dit wilt.</translation> </message> <message> <location line="+1"/> <source>Failed to read block info</source> <translation>Lezen van blokinformatie mislukt</translation> </message> <message> <location line="+1"/> <source>Failed to read block</source> <translation>Lezen van blok mislukt</translation> </message> <message> <location line="+1"/> <source>Failed to sync block index</source> <translation>Synchroniseren van blokindex mislukt</translation> </message> <message> <location line="+1"/> <source>Failed to write block index</source> <translation>Schrijven van blokindex mislukt</translation> </message> <message> <location line="+1"/> <source>Failed to write block info</source> <translation>Schrijven van blokinformatie mislukt</translation> </message> <message> <location line="+1"/> <source>Failed to write block</source> <translation>Schrijven van blok mislukt</translation> </message> <message> <location line="+1"/> <source>Failed to write file info</source> <translation>Schrijven van bestandsinformatie mislukt</translation> </message> <message> <location line="+1"/> <source>Failed to write to coin database</source> <translation>Schrijven naar coindatabase mislukt</translation> </message> <message> <location line="+1"/> <source>Failed to write transaction index</source> <translation>Schrijven van transactieindex mislukt</translation> </message> <message> <location line="+1"/> <source>Failed to write undo data</source> <translation>Schrijven van undo-data mislukt</translation> </message> <message> <location line="+1"/> <source>Fee per kB to add to transactions you send</source> <translation>Transactiekosten per kB om toe te voegen aan transacties die u verzendt</translation> </message> <message> <location line="+1"/> <source>Find peers using DNS lookup (default: 1 unless -connect)</source> <translation>Vind andere nodes d.m.v. DNS-naslag (standaard: 1 tenzij -connect)</translation> </message> <message> <location line="+1"/> <source>Generate coins (default: 0)</source> <translation>Genereer munten (standaard: 0)</translation> </message> <message> <location line="+2"/> <source>How many blocks to check at startup (default: 288, 0 = all)</source> <translation>Aantal te checken blokken bij het opstarten (standaard: 288, 0 = allemaal)</translation> </message> <message> <location line="+1"/> <source>How thorough the block verification is (0-4, default: 3)</source> <translation>Hoe grondig de blokverificatie is (0-4, standaard: 3)</translation> </message> <message> <location line="+1"/> <source>If &lt;category&gt; is not supplied, output all debugging information.</source> <translation>Als er geen &lt;category&gt; is opgegeven, laat dan alle debugging informatie zien.</translation> </message> <message> <location line="+2"/> <source>Incorrect or no genesis block found. Wrong datadir for network?</source> <translation>Incorrect of geen genesis-blok gevonden. Verkeerde datamap voor het netwerk?</translation> </message> <message> <location line="+3"/> <source>Invalid -onion address: &apos;%s&apos;</source> <translation>Ongeldig -onion adres &apos;%s&apos;</translation> </message> <message> <location line="+15"/> <source>Not enough file descriptors available.</source> <translation>Niet genoeg file descriptors beschikbaar.</translation> </message> <message> <location line="+5"/> <source>Prepend debug output with timestamp (default: 1)</source> <translation>Prepend debug output met tijdstempel (standaard: 1)</translation> </message> <message> <location line="+1"/> <source>RPC client options:</source> <translation>RPC client opties:</translation> </message> <message> <location line="+1"/> <source>Rebuild block chain index from current blk000??.dat files</source> <translation>Blok keten opnieuw opbouwen met behulp van huidige blk000??.dat-bestanden</translation> </message> <message> <location line="+5"/> <source>Select SOCKS version for -proxy (4 or 5, default: 5)</source> <translation>Selecteer de versie van de SOCKS-proxy om te gebruiken (4 of 5, standaard is 5)</translation> </message> <message> <location line="+1"/> <source>Send command to Freshcoin Core server</source> <translation>Stuur commando naar Freshcoin server</translation> </message> <message> <location line="+7"/> <source>Set maximum block size in bytes (default: %d)</source> <translation>Stel maximum blokgrootte in in bytes (standaard: %d)</translation> </message> <message> <location line="+2"/> <source>Set the number of threads to service RPC calls (default: 4)</source> <translation>Stel het aantal threads in om RPC-aanvragen mee te bedienen (standaard: 4)</translation> </message> <message> <location line="+7"/> <source>Specify wallet file (within data directory)</source> <translation>Specificeer het portemonnee bestand (vanuit de gegevensmap)</translation> </message> <message> <location line="+2"/> <source>Start Freshcoin Core server</source> <translation>Start Freshcoin Core server</translation> </message> <message> <location line="+3"/> <source>This is intended for regression testing tools and app development.</source> <translation>Dit is bedoeld voor regressietest hulpmiddelen en applicatieontwikkeling.</translation> </message> <message> <location line="+10"/> <source>Usage (deprecated, use freshcoin-cli):</source> <translation>Gebruik (vervallen; gebruik Freshcoin-cli);</translation> </message> <message> <location line="+7"/> <source>Verifying blocks...</source> <translation>Blokken aan het controleren...</translation> </message> <message> <location line="+1"/> <source>Verifying wallet...</source> <translation>Portemonnee aan het controleren...</translation> </message> <message> <location line="+1"/> <source>Wait for RPC server to start</source> <translation>Wacht voor RPC server om te starten</translation> </message> <message> <location line="+1"/> <source>Wallet %s resides outside data directory %s</source> <translation>Portemonnee %s bevindt zich buiten de gegevensmap %s</translation> </message> <message> <location line="+2"/> <source>Wallet options:</source> <translation>Portemonnee opties:</translation> </message> <message> <location line="+2"/> <source>Warning: Deprecated argument -debugnet ignored, use -debug=net</source> <translation>Waarschuwing: Afgekeurd argument -debugnet ignored, use -debug=net</translation> </message> <message> <location line="+2"/> <source>You need to rebuild the database using -reindex to change -txindex</source> <translation>Om -txindex te kunnen veranderen dient u de database opnieuw te bouwen met gebruik van -reindex.</translation> </message> <message> <location line="-79"/> <source>Imports blocks from external blk000??.dat file</source> <translation>Importeert blokken van extern blk000??.dat bestand</translation> </message> <message> <location line="-105"/> <source>Execute command when a relevant alert is received or we see a really long fork (%s in cmd is replaced by message)</source> <translation>Voer commando uit zodra een waarschuwing is ontvangen of wanneer we een erg lange fork detecteren (%s in commando wordt vervangen door bericht)</translation> </message> <message> <location line="+14"/> <source>Output debugging information (default: 0, supplying &lt;category&gt; is optional)</source> <translation>Output extra debugginginformatie (standaard: 0, het leveren van &lt;category&gt; is optioneel)</translation> </message> <message> <location line="+2"/> <source>Set maximum size of high-priority/low-fee transactions in bytes (default: %d)</source> <translation>Stel maximumgrootte in bytes in voor hoge-prioriteits-/lage-transactiekosten-transacties (standaard: %d)</translation> </message> <message> <location line="+2"/> <source>Set the number of script verification threads (up to 16, 0 = auto, &lt;0 = leave that many cores free, default: 0)</source> <translation>Stel het aantal threads voor scriptverificatie in (max 16, 0 = auto, &lt;0 = laat zoveel cores vrij, standaard: 0)</translation> </message> <message> <location line="+89"/> <source>Information</source> <translation>Informatie</translation> </message> <message> <location line="+4"/> <source>Invalid amount for -minrelaytxfee=&lt;amount&gt;: &apos;%s&apos;</source> <translation>Ongeldig bedrag voor -minrelaytxfee=&lt;bedrag&gt;: &apos;%s&apos;</translation> </message> <message> <location line="+1"/> <source>Invalid amount for -mintxfee=&lt;amount&gt;: &apos;%s&apos;</source> <translation>Ongeldig bedrag voor -mintxfee=&lt;bedrag&gt;: &apos;%s&apos;</translation> </message> <message> <location line="+8"/> <source>Maintain a full transaction index (default: 0)</source> <translation>Onderhoud een volledige transactieindex (standaard: 0)</translation> </message> <message> <location line="+2"/> <source>Maximum per-connection receive buffer, &lt;n&gt;*1000 bytes (default: 5000)</source> <translation>Maximum per-connectie ontvangstbuffer, &lt;n&gt;*1000 bytes (standaard: 5000)</translation> </message> <message> <location line="+1"/> <source>Maximum per-connection send buffer, &lt;n&gt;*1000 bytes (default: 1000)</source> <translation>Maximum per-connectie zendbuffer, &lt;n&gt;*1000 bytes (standaard: 1000)</translation> </message> <message> <location line="+2"/> <source>Only accept block chain matching built-in checkpoints (default: 1)</source> <translation>Accepteer alleen blokketen die overeenkomt met de ingebouwde checkpoints (standaard: 1)</translation> </message> <message> <location line="+1"/> <source>Only connect to nodes in network &lt;net&gt; (IPv4, IPv6 or Tor)</source> <translation>Verbind alleen naar nodes in netwerk &lt;net&gt; (IPv4, IPv6 of Tor)</translation> </message> <message> <location line="+9"/> <source>SSL options: (see the Bitcoin Wiki for SSL setup instructions)</source> <translation>SSL-opties: (zie de Bitcoin Wiki voor SSL-instructies)</translation> </message> <message> <location line="+4"/> <source>Send trace/debug info to console instead of debug.log file</source> <translation>Stuur trace/debug-info naar de console in plaats van het debug.log bestand</translation> </message> <message> <location line="+6"/> <source>Set minimum block size in bytes (default: 0)</source> <translation>Stel minimum blokgrootte in in bytes (standaard: 0)</translation> </message> <message> <location line="+2"/> <source>Shrink debug.log file on client startup (default: 1 when no -debug)</source> <translation>Verklein debug.log-bestand bij het opstarten van de client (standaard: 1 als geen -debug)</translation> </message> <message> <location line="+1"/> <source>Signing transaction failed</source> <translation>Ondertekenen van transactie mislukt</translation> </message> <message> <location line="+2"/> <source>Specify connection timeout in milliseconds (default: 5000)</source> <translation>Specificeer de timeout-tijd in milliseconden (standaard: 5000)</translation> </message> <message> <location line="+6"/> <source>System error: </source> <translation>Systeemfout:</translation> </message> <message> <location line="+5"/> <source>Transaction amount too small</source> <translation>Transactiebedrag te klein</translation> </message> <message> <location line="+1"/> <source>Transaction amounts must be positive</source> <translation>Transactiebedragen moeten positief zijn</translation> </message> <message> <location line="+1"/> <source>Transaction too large</source> <translation>Transactie te groot</translation> </message> <message> <location line="+8"/> <source>Use UPnP to map the listening port (default: 0)</source> <translation>Gebruik UPnP om de luisterende poort te mappen (standaard: 0)</translation> </message> <message> <location line="+1"/> <source>Use UPnP to map the listening port (default: 1 when listening)</source> <translation>Gebruik UPnP om de luisterende poort te mappen (standaard: 1 als er wordt geluisterd)</translation> </message> <message> <location line="+2"/> <source>Username for JSON-RPC connections</source> <translation>Gebruikersnaam voor JSON-RPC-verbindingen</translation> </message> <message> <location line="+7"/> <source>Warning</source> <translation>Waarschuwing</translation> </message> <message> <location line="+2"/> <source>Warning: This version is obsolete, upgrade required!</source> <translation>Waarschuwing: Deze versie is verouderd, een upgrade is vereist!</translation> </message> <message> <location line="+2"/> <source>version</source> <translation>versie</translation> </message> <message> <location line="+1"/> <source>wallet.dat corrupt, salvage failed</source> <translation>wallet.dat corrupt, veiligstellen mislukt</translation> </message> <message> <location line="-58"/> <source>Password for JSON-RPC connections</source> <translation>Wachtwoord voor JSON-RPC-verbindingen</translation> </message> <message> <location line="-70"/> <source>Allow JSON-RPC connections from specified IP address</source> <translation>Sta JSON-RPC verbindingen van opgegeven IP-adres toe</translation> </message> <message> <location line="+80"/> <source>Send commands to node running on &lt;ip&gt; (default: 127.0.0.1)</source> <translation>Verstuur commando&apos;s naar proces dat op &lt;ip&gt; draait (standaard: 127.0.0.1)</translation> </message> <message> <location line="-132"/> <source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source> <translation>Voer commando uit zodra het beste blok verandert (%s in cmd wordt vervangen door blockhash)</translation> </message> <message> <location line="+161"/> <source>Upgrade wallet to latest format</source> <translation>Vernieuw portemonnee naar nieuwste versie</translation> </message> <message> <location line="-24"/> <source>Set key pool size to &lt;n&gt; (default: 100)</source> <translation>Stel sleutelpoelgrootte in op &lt;n&gt; (standaard: 100)</translation> </message> <message> <location line="-11"/> <source>Rescan the block chain for missing wallet transactions</source> <translation>Doorzoek de blokketen op ontbrekende portemonnee-transacties</translation> </message> <message> <location line="+38"/> <source>Use OpenSSL (https) for JSON-RPC connections</source> <translation>Gebruik OpenSSL (https) voor JSON-RPC-verbindingen</translation> </message> <message> <location line="-30"/> <source>Server certificate file (default: server.cert)</source> <translation>Certificaat-bestand voor server (standaard: server.cert)</translation> </message> <message> <location line="+1"/> <source>Server private key (default: server.pem)</source> <translation>Geheime sleutel voor server (standaard: server.pem)</translation> </message> <message> <location line="+16"/> <source>This help message</source> <translation>Dit helpbericht</translation> </message> <message> <location line="+7"/> <source>Unable to bind to %s on this computer (bind returned error %d, %s)</source> <translation>Niet in staat om aan %s te binden op deze computer (bind gaf error %d, %s)</translation> </message> <message> <location line="-107"/> <source>Allow DNS lookups for -addnode, -seednode and -connect</source> <translation>Sta DNS-naslag toe voor -addnode, -seednode en -connect</translation> </message> <message> <location line="+60"/> <source>Loading addresses...</source> <translation>Adressen aan het laden...</translation> </message> <message> <location line="-37"/> <source>Error loading wallet.dat: Wallet corrupted</source> <translation>Fout bij laden wallet.dat: Portemonnee corrupt</translation> </message> <message> <location line="+1"/> <source>Error loading wallet.dat: Wallet requires newer version of Freshcoin Core</source> <translation>Fout bij laden wallet.dat: Portemonnee vereist een nieuwere versie van Freshcoin</translation> </message> <message> <location line="+98"/> <source>Wallet needed to be rewritten: restart Freshcoin Core to complete</source> <translation>Portemonnee moest herschreven worden: Herstart Freshcoin om te voltooien</translation> </message> <message> <location line="-100"/> <source>Error loading wallet.dat</source> <translation>Fout bij laden wallet.dat</translation> </message> <message> <location line="+31"/> <source>Invalid -proxy address: &apos;%s&apos;</source> <translation>Ongeldig -proxy adres: &apos;%s&apos;</translation> </message> <message> <location line="+56"/> <source>Unknown network specified in -onlynet: &apos;%s&apos;</source> <translation>Onbekend netwerk gespecificeerd in -onlynet: &apos;%s&apos;</translation> </message> <message> <location line="-1"/> <source>Unknown -socks proxy version requested: %i</source> <translation>Onbekende -socks proxyversie aangegeven: %i</translation> </message> <message> <location line="-101"/> <source>Cannot resolve -bind address: &apos;%s&apos;</source> <translation>Kan -bind adres niet herleiden: &apos;%s&apos;</translation> </message> <message> <location line="+1"/> <source>Cannot resolve -externalip address: &apos;%s&apos;</source> <translation>Kan -externlip adres niet herleiden: &apos;%s&apos;</translation> </message> <message> <location line="+48"/> <source>Invalid amount for -paytxfee=&lt;amount&gt;: &apos;%s&apos;</source> <translation>Ongeldig bedrag voor -paytxfee=&lt;bedrag&gt;: &apos;%s&apos;</translation> </message> <message> <location line="+1"/> <source>Invalid amount</source> <translation>Ongeldig bedrag</translation> </message> <message> <location line="-6"/> <source>Insufficient funds</source> <translation>Ontoereikend saldo</translation> </message> <message> <location line="+10"/> <source>Loading block index...</source> <translation>Blokindex aan het laden...</translation> </message> <message> <location line="-62"/> <source>Add a node to connect to and attempt to keep the connection open</source> <translation>Voeg een node om naar te verbinden toe en probeer de verbinding open te houden</translation> </message> <message> <location line="-32"/> <source>Unable to bind to %s on this computer. Freshcoin Core is probably already running.</source> <translation>Niet in staat om aan %s te binden op deze computer. Freshcoin draait vermoedelijk reeds.</translation> </message> <message> <location line="+95"/> <source>Loading wallet...</source> <translation>Portemonnee aan het laden...</translation> </message> <message> <location line="-56"/> <source>Cannot downgrade wallet</source> <translation>Kan portemonnee niet downgraden</translation> </message> <message> <location line="+3"/> <source>Cannot write default address</source> <translation>Kan standaardadres niet schrijven</translation> </message> <message> <location line="+67"/> <source>Rescanning...</source> <translation>Blokketen aan het doorzoeken...</translation> </message> <message> <location line="-58"/> <source>Done loading</source> <translation>Klaar met laden</translation> </message> <message> <location line="+85"/> <source>To use the %s option</source> <translation>Om de %s optie te gebruiken</translation> </message> <message> <location line="-77"/> <source>Error</source> <translation>Fout</translation> </message> <message> <location line="-35"/> <source>You must set rpcpassword=&lt;password&gt; in the configuration file: %s If the file does not exist, create it with owner-readable-only file permissions.</source> <translation>U dient rpcpassword=&lt;wachtwoord&gt; in te stellen in het configuratiebestand: %s Als het bestand niet bestaat, maak het dan aan, met een alleen-lezen permissie.</translation> </message> </context> </TS><|fim▁end|>
<location line="-407"/> <source>&amp;Overview</source> <translation>&amp;Overzicht</translation>
<|file_name|>harwell_boeing.py<|end_file_name|><|fim▁begin|># This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.io` namespace for importing the functions # included below. import warnings from . import _harwell_boeing __all__ = [ # noqa: F822 'MalformedHeader', 'hb_read', 'hb_write', 'HBInfo', 'HBFile', 'HBMatrixType', 'FortranFormatParser', 'IntFormat', 'ExpFormat', 'BadFortranFormat' ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.io.harwell_boeing is deprecated and has no attribute " f"{name}. Try looking in scipy.io instead.") warnings.warn(f"Please use `{name}` from the `scipy.io` namespace, " "the `scipy.io.harwell_boeing` namespace is deprecated.",<|fim▁hole|><|fim▁end|>
category=DeprecationWarning, stacklevel=2) return getattr(_harwell_boeing, name)
<|file_name|>config.py<|end_file_name|><|fim▁begin|># config.py # Configuration for tasks application. # Author: Julien Pecqueur ([email protected]) # License: GPL NAME = 'tasks.py' VERSION = 0.1 # Path to tasks database DB = './tasks.db' <|fim▁hole|>PORT = '8081' # Debug mode DEBUG = False # Auto-reload service in case of file change RELOADER = True<|fim▁end|>
# Interface to listen HOST = '0.0.0.0'
<|file_name|>floatScalar.C<|end_file_name|><|fim▁begin|>/*---------------------------------------------------------------------------*\ ========= | \\ / F ield | OpenFOAM: The Open Source CFD Toolbox \\ / O peration | \\ / A nd | Copyright held by original author \\/ M anipulation | ------------------------------------------------------------------------------- License This file is part of OpenFOAM. OpenFOAM is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. OpenFOAM is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with OpenFOAM; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA \*---------------------------------------------------------------------------*/ #include "floatScalar.H" #include "IOstreams.H" #include <sstream> // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // #define Scalar floatScalar #define ScalarVSMALL floatScalarVSMALL #define readScalar readFloatScalar #include "Scalar.C" #undef Scalar<|fim▁hole|><|fim▁end|>
#undef ScalarVSMALL #undef readScalar // ************************************************************************* //
<|file_name|>test01_validity_test_SH-PSV_input_S.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- """ Created on Thu Jan 21 04:00:41 2016 @author: irnakat """ # test IOfile import IOfile from TFCalculator import TFCalculator as TFC import TFDisplayTools # validity test for SH PSV case using S wave as an input # filename fname = 'sampleinput_linear_elastic_1layer_halfspace.dat' fname2 = 'sampleinput_psv_s_linear_elastic_1layer_halfspace.dat' # input file reading datash = IOfile.parsing_input_file(fname) datapsvs = IOfile.parsing_input_file(fname2) # kramer print 'TF calculatoin using kramer approach' theclass1 = TFC(datash) theclass1.tf_kramer286_sh() # check/verify kramer calculation print 'calculation has been finished!' # knopoff sh complete print 'TF calculation using complete knopoff sh approach' theclass3 = TFC(datash) theclass3.tf_knopoff_sh_adv() print 'calculation has been finished!' # knopoff psv-s print 'TF calculation using complete knopoff psv-s approach' <|fim▁hole|> # kennet sh print 'TF calculation using kennet sh method' theclass5 = TFC(datash) theclass5.tf_kennet_sh() print 'calculation has been finished!' TFDisplayTools.TFPlot(theclass1,theclass3,theclass5,theclass4, \ label=['Kramer SH','Knopoff SH','Kennet SH','Knopoff PSV']) TFDisplayTools.PhasePlot(theclass1,theclass3,theclass5,theclass4, \ label=['Kramer SH','Knopoff SH','Kennet SH','Knopoff PSV'])<|fim▁end|>
theclass4 = TFC(datapsvs) theclass4.tf_knopoff_psv_adv() print theclass4.tf[1][19] print 'calculation has been finished!'
<|file_name|>diag_diff.py<|end_file_name|><|fim▁begin|>import ast<|fim▁hole|>s1=s2=0 for i in range(0, n): a = list(ast.literal_eval(','.join(input().split()))) if(len(a) == n): s1+=a[i] s2+=a[n-1-i] print(abs(s1-s2))<|fim▁end|>
n = int(input()) a=[]
<|file_name|>AACTransport.d.ts<|end_file_name|><|fim▁begin|>/// <reference types="node" /> import RTSPClient from "../RTSPClient"; import { RTPPacket } from "../util"; import * as transform from "sdp-transform"; import { Writable } from "stream"; interface Details { codec: string; mediaSource: transform.MediaDescription; rtpChannel: number; rtcpChannel: number; }<|fim▁hole|> client: RTSPClient; stream: Writable; ObjectType: number; FrequencyIndex: number; ChannelConfiguration: number; constructor(client: RTSPClient, stream: Writable, details: Details); processRTPPacket(packet: RTPPacket): void; } export {};<|fim▁end|>
export default class AACTransport {
<|file_name|>scatter_nd_ops_test.py<|end_file_name|><|fim▁begin|># Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow.ops.tf.scatter_nd.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import numpy as np from tensorflow.python.client import session from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import gradients_impl from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test GRADIENT_TESTS_DTYPES = (dtypes.float16, dtypes.float32, dtypes.float64) def _AsType(v, vtype): return v.astype(vtype) if isinstance(v, np.ndarray) else vtype(v) def _FlatInnerDims(tensor, ndims=2): shape = list(tensor.shape) return tensor.reshape([ functools.reduce(lambda x, y: x * y, shape[:-ndims + 1], 1) ] + shape[-ndims + 1:]) def _FlatOuterDims(tensor, ndims=2): shape = list(tensor.shape) return tensor.reshape(shape[:ndims - 1] + [ functools.reduce(lambda x, y: x * y, shape[ndims - 1:], 1) ]) def _NumpyScatterNd(ref, indices, updates, op): ixdim = indices.shape[-1] num_updates = indices.size // ixdim total_nd = len(ref.shape) slice_size = 1 for i in range(ixdim, total_nd): slice_size *= ref.shape[i] flat_indices = _FlatInnerDims(indices) flat_updates = updates.reshape((num_updates, slice_size)) output_flat = _FlatOuterDims(ref, ixdim + 1) for ix_updates, ix_output in enumerate(flat_indices): ix_output = tuple(ix_output) output_flat[ix_output] = op(output_flat[ix_output], flat_updates[ix_updates]) return output_flat.reshape(ref.shape) def _NumpyUpdate(ref, indices, updates): return _NumpyScatterNd(ref, indices, updates, lambda p, u: u) def _NumpyAdd(ref, indices, updates): return _NumpyScatterNd(ref, indices, updates, lambda p, u: p + u) def _NumpySub(ref, indices, updates): return _NumpyScatterNd(ref, indices, updates, lambda p, u: p - u) def _NumpyMul(ref, indices, updates): return _NumpyScatterNd(ref, indices, updates, lambda p, u: p * u) def _NumpyDiv(ref, indices, updates): return _NumpyScatterNd(ref, indices, updates, lambda p, u: p / u) class StatefulScatterNdTest(test.TestCase): def _VariableRankTest(self, np_scatter, tf_scatter, vtype, itype, repeat_indices=False): np.random.seed(8) ref_shapes = [(3, 6), (3, 6), (3, 6, 9), (3, 6, 9), (3, 6, 9), (3, 6, 9)] indices_shapes = [(2,), (2, 2), (2,), (2, 2), (2, 3), (2, 3, 3)] with self.cached_session(use_gpu=True): for ref_shape, indices_shape in zip(ref_shapes, indices_shapes): num_updates = indices_shape[0] ixdim = indices_shape[-1] indexable_area_shape = () for i in range(ixdim): indexable_area_shape += (ref_shape[i],) all_indices = [ list(coord) for coord, _ in np.ndenumerate( np.empty(indexable_area_shape, vtype)) ] np.random.shuffle(all_indices) indices = np.array(all_indices[:num_updates]) if num_updates > 1 and repeat_indices: indices = indices[:num_updates // 2] for _ in range(num_updates - num_updates // 2): indices = np.append( indices, [indices[np.random.randint(num_updates // 2)]], axis=0) np.random.shuffle(indices) indices = _AsType(indices[:num_updates], itype) updates_shape = (num_updates,) for i in range(ixdim, len(ref_shape)): updates_shape += (ref_shape[i],) updates = _AsType(np.random.randn(*(updates_shape)), vtype) ref = _AsType(np.random.randn(*(ref_shape)), vtype) # Scatter via numpy new = ref.copy() np_scatter(new, indices, updates) # Scatter via tensorflow ref_var = variables.VariableV1(ref) ref_var.initializer.run() tf_scatter(ref_var, indices, updates).eval() # Compare self.assertAllClose(new, self.evaluate(ref_var)) def _VariableRankTests(self, np_scatter, tf_scatter): for vtype in (np.int32, np.float16, np.float32, np.float64, np.complex64, np.complex128): for itype in (np.int32, np.int64): self._VariableRankTest(np_scatter, tf_scatter, vtype, itype) def testSimple(self): indices = constant_op.constant([[4], [3], [1], [7]], dtype=dtypes.int32) updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32) ref = variables.Variable([0, 0, 0, 0, 0, 0, 0, 0], dtype=dtypes.float32) expected = np.array([0, 11, 0, 10, 9, 0, 0, 12]) scatter = state_ops.scatter_nd_update(ref, indices, updates) init = variables.global_variables_initializer() with self.session(use_gpu=True) as sess: sess.run(init) result = sess.run(scatter) self.assertAllClose(result, expected) def testSimpleResource(self): indices = constant_op.constant([[4], [3], [1], [7]], dtype=dtypes.int32) updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32) ref = resource_variable_ops.ResourceVariable( [0, 0, 0, 0, 0, 0, 0, 0], dtype=dtypes.float32) expected = np.array([0, 11, 0, 10, 9, 0, 0, 12]) scatter = state_ops.scatter_nd_update(ref, indices, updates) init = variables.global_variables_initializer() with self.session(use_gpu=True) as sess: sess.run(init) sess.run(scatter) self.assertAllClose(ref.eval(), expected) def testSimple2(self): indices = constant_op.constant([[1, 0], [1, 1]], dtype=dtypes.int32) updates = constant_op.constant([11., 12.], dtype=dtypes.float32) ref = variables.Variable( [[0., 0.], [0., 0.], [0., 0.]], dtype=dtypes.float32) expected = np.array([[0., 0.], [11., 12.], [0., 0.]]) scatter = state_ops.scatter_nd_update(ref, indices, updates) init = variables.global_variables_initializer() with self.session(use_gpu=True) as sess: sess.run(init) result = sess.run(scatter) self.assertAllClose(result, expected) def testSimple3(self): indices = constant_op.constant([[1]], dtype=dtypes.int32) updates = constant_op.constant([[11., 12.]], dtype=dtypes.float32) ref = variables.Variable( [[0., 0.], [0., 0.], [0., 0.]], dtype=dtypes.float32) expected = np.array([[0., 0.], [11., 12.], [0., 0.]]) scatter = state_ops.scatter_nd_update(ref, indices, updates) init = variables.global_variables_initializer() with self.session(use_gpu=True) as sess: sess.run(init) result = sess.run(scatter) self.assertAllClose(result, expected) def testVariableRankUpdate(self): self._VariableRankTests(_NumpyUpdate, state_ops.scatter_nd_update) def testVariableRankAdd(self): self._VariableRankTests(_NumpyAdd, state_ops.scatter_nd_add) def testVariableRankSub(self): self._VariableRankTests(_NumpySub, state_ops.scatter_nd_sub) # TODO(ebrevdo): Re-enable when we need ScatterNdMul. # def testVariableRankMul(self): # self._VariableRankTests(_NumpyMul, state_ops.scatter_nd_mul) # TODO(ebrevdo): Re-enable when we need ScatterNdDiv. # def testVariableRankDiv(self): # self._VariableRankTests(_NumpyDiv, state_ops.scatter_nd_div) def _ScatterRepeatIndicesTest(self, np_scatter, tf_scatter): for vtype in (np.int32, np.float16, np.float32, np.float64): for itype in (np.int32, np.int64): self._VariableRankTest( np_scatter, tf_scatter, vtype, itype, repeat_indices=True) def testScatterRepeatIndices(self): """This tests scatter_add using indices that repeat.""" self._ScatterRepeatIndicesTest(_NumpyAdd, state_ops.scatter_nd_add) self._ScatterRepeatIndicesTest(_NumpySub, state_ops.scatter_nd_sub) # TODO(ebrevdo): Re-enable when we need ScatterNdMul and ScatterNdDiv. # self._ScatterRepeatIndicesTest(_NumpyMul, state_ops.scatter_nd_mul) # self._ScatterRepeatIndicesTest(_NumpyDiv, state_ops.scatter_nd_div) # TODO(simister): Re-enable once binary size increase due to # extra templating is back under control and this op is re-enabled # def testBooleanScatterUpdate(self): # with self.session(use_gpu=False) as session: # var = tf.Variable([True, False]) # update0 = tf.scatter_nd_update(var, [[1]], [True]) # update1 = tf.scatter_nd_update( # var, tf.constant( # [[0]], dtype=tf.int64), [False]) # var.initializer.run() # session.run([update0, update1]) # self.assertAllEqual([False, True], self.evaluate(var)) def testScatterOutOfRangeCpu(self): # TODO(simister): Re-enable once binary size increase due to # scatter_nd ops is under control. # tf.scatter_nd_mul, tf.scatter_nd_div, for op in (state_ops.scatter_nd_add, state_ops.scatter_nd_sub, state_ops.scatter_nd_update): params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32) updates = np.array([-3, -4, -5]).astype(np.float32) with self.cached_session(use_gpu=False): ref = variables.VariableV1(params) ref.initializer.run() # Indices all in range, no problem. indices = np.array([[2], [0], [5]]) op(ref, indices, updates).eval() # Test some out of range errors. indices = np.array([[-1], [0], [5]]) with self.assertRaisesOpError( r"indices\[0\] = \[-1\] does not index into shape \[6\]"): op(ref, indices, updates).eval() indices = np.array([[2], [0], [6]]) with self.assertRaisesOpError( r"indices\[2\] = \[6\] does not index into shape \[6\]"): op(ref, indices, updates).eval() def testRank3ValidShape(self): indices = array_ops.zeros([2, 2, 2], dtypes.int32) updates = array_ops.zeros([2, 2, 2], dtypes.int32) shape = np.array([2, 2, 2]) ref = variables.Variable(array_ops.zeros(shape, dtypes.int32)) self.assertAllEqual( state_ops.scatter_nd_update(ref, indices, updates).get_shape().as_list(), shape) def testResVarInvalidOutputShape(self): res = variables.Variable( initial_value=lambda: array_ops.zeros(shape=[], dtype=dtypes.float32), dtype=dtypes.float32) with self.cached_session(): res.initializer.run() with self.assertRaisesOpError("Output must be at least 1-D"): state_ops.scatter_nd_update(res, [[0]], [0.22]).eval() def testExtraIndicesDimensions(self): indices = array_ops.zeros([1, 1, 2], dtypes.int32) updates = array_ops.zeros([1, 1], dtypes.int32) shape = np.array([2, 2]) ref = variables.Variable(array_ops.zeros(shape, dtypes.int32)) scatter_update = state_ops.scatter_nd_update(ref, indices, updates) self.assertAllEqual(scatter_update.get_shape().as_list(), shape) expected_result = np.zeros([2, 2], dtype=np.int32) with self.cached_session(): ref.initializer.run() self.assertAllEqual(expected_result, self.evaluate(scatter_update)) def testRank3InvalidShape1(self): indices = array_ops.zeros([3, 2, 2], dtypes.int32) updates = array_ops.zeros([2, 2, 2], dtypes.int32) shape = np.array([2, 2, 2]) ref = variables.Variable(array_ops.zeros(shape, dtypes.int32)) with self.assertRaisesWithPredicateMatch( ValueError, "The outer \\d+ dimensions of indices\\.shape="): state_ops.scatter_nd_update(ref, indices, updates) def testRank3InvalidShape2(self): indices = array_ops.zeros([2, 2, 1], dtypes.int32) updates = array_ops.zeros([2, 2], dtypes.int32) shape = np.array([2, 2, 2]) ref = variables.Variable(array_ops.zeros(shape, dtypes.int32)) with self.assertRaisesWithPredicateMatch( ValueError, "The inner \\d+ dimensions of input\\.shape="): state_ops.scatter_nd_update(ref, indices, updates) def testConcurrentUpdates(self): num_updates = 10000 update_values = np.random.rand(num_updates) ref = variables.Variable(np.zeros([2, 2]), dtype=dtypes.float64) indices = constant_op.constant([[0, 1]] * num_updates, dtype=dtypes.int32) updates = constant_op.constant(update_values, dtype=dtypes.float64) expected_result = np.zeros([2, 2], dtype=np.float64) expected_result[0, 1] = np.sum(update_values) scatter = state_ops.scatter_nd_add(ref, indices, updates) init = variables.global_variables_initializer() with session.Session() as sess: sess.run(init) result = sess.run(scatter) assert np.allclose(result, expected_result) # TODO(fpmc): Re-enable this test when gpu_pip test actually runs on a GPU. def _disabledTestScatterOutOfRangeGpu(self): if not test.IsBuiltWithCuda(): return # TODO(simister): Re-enable once binary size increase due to # scatter_nd ops is under control. # tf.scatter_nd_mul, tf.scatter_nd_div, for op in (state_ops.scatter_nd_add, state_ops.scatter_nd_sub, state_ops.scatter_nd_update): params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32) updates = np.array([-3, -4, -5]).astype(np.float32) # With GPU, the code ignores indices that are out of range. # We don't test the implementation; just test there's no failures. with self.cached_session(force_gpu=True): ref = variables.Variable(params) ref.initializer.run() # Indices all in range, no problem. indices = np.array([2, 0, 5]) op(ref, indices, updates).eval() # Indices out of range should not fail. indices = np.array([-1, 0, 5]) op(ref, indices, updates).eval() indices = np.array([2, 0, 6]) op(ref, indices, updates).eval() class ScatterNdTest(test.TestCase): non_aliasing_add_test = False def scatter_nd(self, indices, updates, shape, input_=None): del input_ # input_ is not used in scatter_nd return array_ops.scatter_nd(indices, updates, shape) @test_util.run_in_graph_and_eager_modes def testBool(self): indices = constant_op.constant( [[4], [3], [1], [7]], dtype=dtypes.int32) updates = constant_op.constant( [False, True, False, True], dtype=dtypes.bool) expected = np.array( [False, False, False, True, False, False, False, True]) scatter = self.scatter_nd(indices, updates, shape=(8,)) result = self.evaluate(scatter) self.assertAllEqual(expected, result) # Same indice is updated twice by same value. indices = constant_op.constant( [[4], [3], [3], [7]], dtype=dtypes.int32) updates = constant_op.constant( [False, True, True, True], dtype=dtypes.bool)<|fim▁hole|> False, False, False, True, False, False, False, True]) scatter = self.scatter_nd(indices, updates, shape=(8,)) result = self.evaluate(scatter) self.assertAllEqual(expected, result) @test_util.run_in_graph_and_eager_modes def testInvalidShape(self): # TODO(apassos) figure out how to unify these errors with self.assertRaises(errors.InvalidArgumentError if context.executing_eagerly() else ValueError): array_ops.scatter_nd(indices=[0], # this should be indices=[[0]] updates=[0.0], shape=[1]) def testString(self): indices = constant_op.constant([[4], [3], [1], [7]], dtype=dtypes.int32) updates = constant_op.constant(["four", "three", "one", "seven"], dtype=dtypes.string) expected = np.array([b"", b"one", b"", b"three", b"four", b"", b"", b"seven"]) scatter = self.scatter_nd(indices, updates, shape=(8,)) with self.cached_session() as sess: result = sess.run(scatter) self.assertAllEqual(expected, result) # Same indice is updated twice by same value. indices = constant_op.constant([[4], [3], [3], [7]], dtype=dtypes.int32) updates = constant_op.constant(["a", "b", "b", "c"], dtype=dtypes.string) expected = np.array([b"", b"", b"", b"bb", b"a", b"", b"", b"c"]) scatter = self.scatter_nd(indices, updates, shape=(8,)) with self.cached_session() as sess: result = sess.run(scatter) self.assertAllEqual(expected, result) # Same indice is updated twice by different value. indices = constant_op.constant([[4], [3], [3], [7]], dtype=dtypes.int32) updates = constant_op.constant(["a", "b", "c", "d"], dtype=dtypes.string) expected = [np.array([b"", b"", b"", b"bc", b"a", b"", b"", b"d"]), np.array([b"", b"", b"", b"cb", b"a", b"", b"", b"d"])] scatter = self.scatter_nd(indices, updates, shape=(8,)) with self.cached_session() as sess: result = sess.run(scatter) self.assertTrue(np.array_equal(result, expected[0]) or np.array_equal(result, expected[1])) def testRank3ValidShape(self): indices = array_ops.zeros([2, 2, 2], dtypes.int32) updates = array_ops.zeros([2, 2, 2], dtypes.int32) shape = np.array([2, 2, 2]) self.assertAllEqual( self.scatter_nd(indices, updates, shape).get_shape().as_list(), shape) def testExtraIndicesDimensions(self): indices = array_ops.zeros([1, 1, 2], dtypes.int32) updates = array_ops.zeros([1, 1], dtypes.int32) shape = np.array([2, 2]) scatter = self.scatter_nd(indices, updates, shape) self.assertAllEqual(scatter.get_shape().as_list(), shape) expected_result = np.zeros([2, 2], dtype=np.int32) with self.cached_session(): self.assertAllEqual(expected_result, self.evaluate(scatter)) def testUndefinedIndicesShape(self): indices = array_ops.placeholder(dtypes.int32, shape=None) updates = array_ops.placeholder(dtypes.int32, shape=[2, 2, 2]) shape = constant_op.constant([2, 2, 2], dtypes.int32) self.scatter_nd(indices, updates, shape) def testUndefinedUpdatesShape(self): indices = array_ops.placeholder(dtypes.int32, shape=[2, 2, 2]) updates = array_ops.placeholder(dtypes.int32, shape=None) shape = constant_op.constant([2, 2, 2], dtypes.int32) self.scatter_nd(indices, updates, shape) def testUndefinedOutputShape(self): indices = array_ops.placeholder(dtypes.int32, shape=[2, 2, 2]) updates = array_ops.placeholder(dtypes.int32, shape=[2, 2, 2]) shape = array_ops.placeholder(dtypes.int32, shape=[None]) self.scatter_nd(indices, updates, shape) def testEmptyOutputShape1(self): indices = array_ops.zeros([2, 2, 2], dtypes.int32) updates = array_ops.zeros([2, 2, 2], dtypes.int32) shape = constant_op.constant([0, 3, 2], dtypes.int32) with self.assertRaisesWithPredicateMatch( ValueError, "Indices and updates specified for empty output shape"): self.scatter_nd(indices, updates, shape) def testEmptyOutputShape2(self): indices = array_ops.placeholder(dtypes.int32, shape=None) updates = array_ops.placeholder(dtypes.int32, shape=None) shape = constant_op.constant([0, 3, 2], dtypes.int32) with self.cached_session(): with self.assertRaisesOpError( "Indices and updates specified for empty output"): self.scatter_nd(indices, updates, shape).eval(feed_dict={ indices: np.zeros([2, 2, 2], dtype=np.int32), updates: np.zeros([2, 2, 2], dtype=np.int32) }) def testEmptyOutputShape3(self): indices = array_ops.zeros([0], dtypes.int32) updates = array_ops.zeros([0], dtypes.int32) shape = constant_op.constant([0], dtypes.int32) scatter = self.scatter_nd(indices, updates, shape) with self.cached_session(): self.assertEqual(scatter.eval().size, 0) def testRank3InvalidShape1(self): indices = array_ops.zeros([3, 2, 2], dtypes.int32) updates = array_ops.zeros([2, 2, 2], dtypes.int32) shape = np.array([2, 2, 2]) with self.assertRaisesWithPredicateMatch( ValueError, "The outer \\d+ dimensions of indices\\.shape="): self.scatter_nd(indices, updates, shape) def testRank3InvalidShape2(self): indices = array_ops.zeros([2, 2, 1], dtypes.int32) updates = array_ops.zeros([2, 2], dtypes.int32) shape = np.array([2, 2, 2]) with self.assertRaisesWithPredicateMatch( ValueError, "The inner \\d+ dimensions of (input|output)\\.shape="): self.scatter_nd(indices, updates, shape) def testGradientsRank2ElementUpdate(self): for dtype in GRADIENT_TESTS_DTYPES: indices = constant_op.constant([[0, 0], [1, 1]], dtype=dtypes.int32) updates = constant_op.constant([1, 4], dtype=dtype) shape = constant_op.constant([2, 2], dtype=dtypes.int32) input_ = array_ops.zeros(shape, dtype=dtype) outputs = self.scatter_nd(indices, updates, shape, input_) grad_vals = constant_op.constant([[1, 2], [3, 4]], dtype=dtype) updates_grad, input_grad = gradients_impl.gradients( [outputs], [updates, input_], [grad_vals]) expected_updates_grad = np.array([1, 4], dtype=dtype.as_numpy_dtype()) expected_input_grad = np.array([[1, 2], [3, 4]], dtype=dtype.as_numpy_dtype()) with self.cached_session(): self.assertAllEqual(expected_updates_grad, self.evaluate(updates_grad)) if self.non_aliasing_add_test: self.assertAllEqual(expected_input_grad, self.evaluate(input_grad)) def testGradientsRank2SliceUpdate(self): for dtype in GRADIENT_TESTS_DTYPES: indices = constant_op.constant([[1], [0]], dtype=dtypes.int32) updates = constant_op.constant([[3, 4], [1, 2]], dtype=dtype) shape = constant_op.constant([2, 2], dtype=dtypes.int32) input_ = array_ops.zeros(shape, dtype=dtype) outputs = self.scatter_nd(indices, updates, shape, input_) grad_vals = constant_op.constant([[3, 4], [1, 2]], dtype=dtype) updates_grad, input_grad = gradients_impl.gradients( [outputs], [updates, input_], [grad_vals]) expected_updates_grad = np.array([[1, 2], [3, 4]], dtype=dtype.as_numpy_dtype()) expected_input_grad = np.array([[3, 4], [1, 2]], dtype=dtype.as_numpy_dtype()) with self.cached_session(): self.assertAllEqual(expected_updates_grad, self.evaluate(updates_grad)) if self.non_aliasing_add_test: self.assertAllEqual(expected_input_grad, self.evaluate(input_grad)) def testGradientsRank3SliceUpdate(self): for dtype in GRADIENT_TESTS_DTYPES: indices = constant_op.constant([[[0, 1], [1, 0]], [[0, 0], [1, 1]]], dtype=dtypes.int32) updates = constant_op.constant([[[5, 7], [2, 4]], [[1, 3], [6, 8]]], dtype=dtype) shape = constant_op.constant([2, 2, 2], dtype=dtypes.int32) input_ = array_ops.zeros(shape, dtype=dtype) outputs = self.scatter_nd(indices, updates, shape, input_) grad_vals = constant_op.constant([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=dtype) updates_grad, input_grad = gradients_impl.gradients( [outputs], [updates, input_], [grad_vals]) expected_updates_grad = np.array([[[3, 4], [5, 6]], [[1, 2], [7, 8]]], dtype=dtype.as_numpy_dtype()) expected_input_grad = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=dtype.as_numpy_dtype()) with self.cached_session(): self.assertAllEqual(expected_updates_grad, self.evaluate(updates_grad)) if self.non_aliasing_add_test: self.assertAllEqual(expected_input_grad, self.evaluate(input_grad)) def testGradientsRank7SliceUpdate(self): for dtype in GRADIENT_TESTS_DTYPES: indices = constant_op.constant( [[[[[[[0, 0, 0, 0, 0, 1], [0, 0, 1, 0, 0, 0]]]], [[[[0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 1]]]]]]], dtype=dtypes.int32) updates = constant_op.constant( [[[[[[[5, 6], [2, 4]]]], [[[[1, 3], [6, 8]]]]]]], dtype=dtype) shape = constant_op.constant([1, 1, 2, 1, 1, 2, 2], dtype=dtypes.int32) input_ = array_ops.zeros(shape, dtype=dtype) outputs = self.scatter_nd(indices, updates, shape, input_) grad_vals = constant_op.constant( [[[[[[[1, 2], [3, 4]]]], [[[[5, 6], [7, 8]]]]]]], dtype=dtype) updates_grad, input_grad = gradients_impl.gradients( [outputs], [updates, input_], [grad_vals]) expected_updates_grad = np.array( [[[[[[[3, 4], [5, 6]]]], [[[[1, 2], [7, 8]]]]]]], dtype=dtype.as_numpy_dtype()) expected_input_grad = np.array( [[[[[[[1, 2], [3, 4]]]], [[[[5, 6], [7, 8]]]]]]], dtype=dtype.as_numpy_dtype()) with self.cached_session(): self.assertAllEqual(expected_updates_grad, self.evaluate(updates_grad)) if self.non_aliasing_add_test: self.assertAllEqual(expected_input_grad, self.evaluate(input_grad)) def testScatterNdRepatedIndicesAdd(self): indices = array_ops.zeros([100000, 1], dtypes.int32) values = np.random.randn(100000) shape = [1] with self.cached_session(): val = self.scatter_nd(indices, values, shape).eval() self.assertAllClose([np.sum(values)], val) def testSmokeScatterNdBatch2DSliceDim2(self): with self.cached_session(): indices = array_ops.zeros([3, 5, 2], dtype=dtypes.int32) values = array_ops.zeros([3, 5, 7]) shape = [4, 6, 7] self.scatter_nd(indices, values, shape).eval() def testSmokeScatterNdBatch1DSliceDim2(self): with self.cached_session(): indices = array_ops.zeros([0, 2], dtype=dtypes.int32) values = array_ops.zeros([0, 7]) shape = [4, 6, 7] self.scatter_nd(indices, values, shape).eval() def testSmokeScatterNdBatch1DSliceDim3ShapeRank7(self): with self.cached_session(): indices = array_ops.zeros([1, 3], dtype=dtypes.int32) values = array_ops.zeros([1, 6, 7, 8, 9]) shape = [3, 4, 5, 6, 7, 8, 9] self.scatter_nd(indices, values, shape).eval() def testSmokeScatterNdBatch2DSliceDim3ShapeRank7(self): with self.cached_session(): indices = array_ops.zeros([1, 2, 3], dtype=dtypes.int32) values = array_ops.zeros([1, 2, 6, 7, 8, 9]) shape = [3, 4, 5, 6, 7, 8, 9] self.scatter_nd(indices, values, shape).eval() class ScatterNdNonAliasingAddTest(ScatterNdTest): non_aliasing_add_test = True def scatter_nd(self, indices, updates, shape, input_=None): input_ = (input_ if input_ is not None else array_ops.zeros( shape, dtype=updates.dtype)) return array_ops.scatter_nd_non_aliasing_add(input_, indices, updates) def testString(self): # Not supported yet. pass if __name__ == "__main__": test.main()<|fim▁end|>
expected = np.array([
<|file_name|>mit_benchmark.py<|end_file_name|><|fim▁begin|>import itertools import os import os.path as osp import chainer import numpy as np import scipy.misc from sklearn.model_selection import train_test_split from base import APC2016DatasetBase def ids_from_scene_dir(scene_dir, empty_scene_dir): for i_frame in itertools.count(): empty_file = osp.join( empty_scene_dir, 'frame-{:06}.color.png'.format(i_frame)) rgb_file = osp.join( scene_dir, 'frame-{:06}.color.png'.format(i_frame)) segm_file = osp.join( scene_dir, 'segm/frame-{:06}.segm.png'.format(i_frame)) if not (osp.exists(rgb_file) and osp.exists(segm_file)): break data_id = (empty_file, rgb_file, segm_file) yield data_id def bin_id_from_scene_dir(scene_dir): caminfo = open(osp.join(scene_dir, 'cam.info.txt')).read() loc = caminfo.splitlines()[0].split(': ')[-1] if loc == 'shelf': bin_id = caminfo.splitlines()[1][-1] else: bin_id = 'tote' return bin_id class APC2016mit_benchmarkDataset(APC2016DatasetBase): def __init__(self, data_type): assert data_type in ('train', 'val') self.dataset_dir = chainer.dataset.get_dataset_directory( 'apc2016/benchmark') data_ids = self._get_ids() ids_train, ids_val = train_test_split( data_ids, test_size=0.25, random_state=1234) if data_type == 'train': self._ids = ids_train else: self._ids = ids_val def __len__(self): return len(self._ids) def _get_ids_from_loc_dir(self, env, loc_dir): assert env in ('office', 'warehouse') loc = osp.basename(loc_dir) data_ids = [] for scene_dir in os.listdir(loc_dir): scene_dir = osp.join(loc_dir, scene_dir) bin_id = bin_id_from_scene_dir(scene_dir) empty_dir = osp.join( self.dataset_dir, env, 'empty', loc, 'scene-{}'.format(bin_id)) data_ids += list(ids_from_scene_dir(scene_dir, empty_dir)) return data_ids def _get_ids(self):<|fim▁hole|> loc_dir = osp.join(contain_dir, loc) data_ids += self._get_ids_from_loc_dir('office', loc_dir) # warehouse contain_dir = osp.join(self.dataset_dir, 'warehouse') for sub in ['practice', 'competition']: sub_contain_dir = osp.join(contain_dir, sub) for loc in ['shelf', 'tote']: loc_dir = osp.join(sub_contain_dir, loc) data_ids += self._get_ids_from_loc_dir('warehouse', loc_dir) return data_ids def _load_from_id(self, data_id): empty_file, rgb_file, segm_file = data_id img = scipy.misc.imread(rgb_file, mode='RGB') img_empty = scipy.misc.imread(empty_file, mode='RGB') # Label value is multiplied by 9: # ex) 0: 0/6=0 (background), 54: 54/6=9 (dasani_bottle_water) lbl = scipy.misc.imread(segm_file, mode='L') / 6 lbl = lbl.astype(np.int32) img_empty[lbl > 0] = img[lbl > 0] return img_empty, lbl def get_example(self, i): data_id = self._ids[i] img, lbl = self._load_from_id(data_id) datum = self.img_to_datum(img) return datum, lbl if __name__ == '__main__': import matplotlib.pyplot as plt import six dataset_train = APC2016mit_benchmarkDataset('train') dataset_val = APC2016mit_benchmarkDataset('val') print('train: %d, val: %d' % (len(dataset_train), len(dataset_val))) for i in six.moves.range(len(dataset_val)): viz = dataset_val.visualize_example(i) plt.imshow(viz) plt.show()<|fim▁end|>
data_ids = [] # office contain_dir = osp.join(self.dataset_dir, 'office/test') for loc in ['shelf', 'tote']:
<|file_name|>units.py<|end_file_name|><|fim▁begin|>I = complex(0,1) ha2ev = 27.211396132 ev2cm1 = 8065.5440044136285 bohr2ang = 0.52917720859 atomic_mass = [ None, 1.00794, 4.002602, 6.941, 9.012182, 10.811, 12.0107, 14.0067, 15.9994, 18.9984032, 20.1797, 22.98976928, 24.305,26.9815386, 28.0855, 30.973762, 32.065, 35.453, 39.948, 39.0983, 40.078, 44.955912, 47.867, 50.9415, 51.9961, 54.938045, 55.845, 58.933195, 58.6934, 63.546, 65.38, 69.723, 72.64, 74.9216, 78.96, 79.904, 83.798, 85.4678, 87.62, 88.90585, 91.224, 92.90638, 95.96, None, 101.07, 102.9055, 106.42, 107.8682, 112.411, 114.818, 118.71, 121.76, 127.6, 126.90447, 131.293, 132.9054519, 137.327, 138.90547, 140.116, 140.90765, 144.242, None, 150.36, 151.964, 157.25, 158.92535, 162.5, 164.93032, 167.259, 168.93421, 173.054, 174.9668, 178.49, 180.94788, 183.84, 186.207, 190.23, 192.217, 195.084, 196.966569, 200.59, 204.3833, 207.2, 208.9804, None, None, None, None, None, None, 232.03806, 231.03588, 238.02891, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None] chemical_symbols = ['X', 'H', 'He', 'Li', 'Be', 'B', 'C', 'N', 'O', 'F', 'Ne', 'Na', 'Mg', 'Al', 'Si', 'P', 'S', 'Cl', 'Ar', 'K', 'Ca', 'Sc', 'Ti', 'V', 'Cr', 'Mn', 'Fe', 'Co', 'Ni', 'Cu', 'Zn', 'Ga', 'Ge', 'As', 'Se',<|fim▁hole|> 'Br', 'Kr', 'Rb', 'Sr', 'Y', 'Zr', 'Nb', 'Mo', 'Tc', 'Ru', 'Rh', 'Pd', 'Ag', 'Cd', 'In', 'Sn', 'Sb', 'Te', 'I', 'Xe', 'Cs', 'Ba', 'La', 'Ce', 'Pr', 'Nd', 'Pm', 'Sm', 'Eu', 'Gd', 'Tb', 'Dy', 'Ho', 'Er', 'Tm', 'Yb', 'Lu', 'Hf', 'Ta', 'W', 'Re', 'Os', 'Ir', 'Pt', 'Au', 'Hg', 'Tl', 'Pb', 'Bi', 'Po', 'At', 'Rn', 'Fr', 'Ra', 'Ac', 'Th', 'Pa', 'U', 'Np', 'Pu', 'Am', 'Cm', 'Bk', 'Cf', 'Es', 'Fm', 'Md', 'No', 'Lr']<|fim▁end|>
<|file_name|>schedule.py<|end_file_name|><|fim▁begin|>from npoapi.npoapi import NpoApi class Schedule(NpoApi): def get(self, guideDay=None, channel=None, sort="asc", offset=0, limit=240, properties=None, accept=None): params = { 'guideDay': guideDay, "sort": sort, "max": limit,<|fim▁hole|> } if channel: return self.request("/api/schedule/channel/" + channel, params=params, accept=accept) else: return self.request("/api/schedule", params=params) def search(self, form="{}", sort="asc", offset=0, limit=240, profile=None, properties=None, accept=None): return self.request("/api/schedule/", data=form, accept=accept, params={ "profile": profile, "sort": sort, "offset": offset, "max": limit, "properties": properties} )<|fim▁end|>
"offset": offset, "properties": properties
<|file_name|>aquifer.py<|end_file_name|><|fim▁begin|>import numpy as np import matplotlib.pyplot as plt import inspect # Used for storing the input class AquiferData: def __init__(self, model, kaq, z, Haq, Hll, c, Saq, Sll, poraq, porll, ltype, topboundary, phreatictop, kzoverkh=None, model3d=False): '''kzoverkh and model3d only need to be specified when model is model3d''' self.model = model self.kaq = np.atleast_1d(kaq).astype('d') self.z = np.atleast_1d(z).astype('d') self.naq = len(self.kaq) self.nlayers = len(self.z) - 1 self.Haq = np.atleast_1d(Haq).astype('d') self.Hll = np.atleast_1d(Hll).astype('d') self.T = self.kaq * self.Haq self.Tcol = self.T.reshape(self.naq, 1) self.c = np.atleast_1d(c).astype('d') self.c[self.c > 1e100] = 1e100 self.Saq = np.atleast_1d(Saq).astype('d') self.Sll = np.atleast_1d(Sll).astype('d') self.Sll[self.Sll < 1e-20] = 1e-20 # Cannot be zero self.poraq = np.atleast_1d(poraq).astype('d') self.porll = np.atleast_1d(porll).astype('d') self.ltype = np.atleast_1d(ltype) self.zaqtop = self.z[:-1][self.ltype == 'a'] self.zaqbot = self.z[1:][self.ltype == 'a'] self.layernumber = np.zeros(self.nlayers, dtype='int') self.layernumber[self.ltype == 'a'] = np.arange(self.naq) self.layernumber[self.ltype == 'l'] = np.arange(self.nlayers - self.naq) if self.ltype[0] == 'a': self.layernumber[self.ltype == 'l'] += 1 # first leaky layer below first aquifer layer self.topboundary = topboundary[:3] self.phreatictop = phreatictop self.kzoverkh = kzoverkh if self.kzoverkh is not None: self.kzoverkh = np.atleast_1d(self.kzoverkh).astype('d') if len(self.kzoverkh) == 1: self.kzoverkh = self.kzoverkh * np.ones(self.naq) self.model3d = model3d if self.model3d: assert self.kzoverkh is not None, \ "model3d specified without kzoverkh" #self.D = self.T / self.Saq self.area = 1e200 # Smaller than default of ml.aq so that inhom is found def __repr__(self): return 'Inhom T: ' + str(self.T) def initialize(self): ''' eigval[naq, npval]: Array with eigenvalues lab[naq, npval]: Array with lambda values lab2[naq, nint, npint]: Array with lambda values reorganized per interval eigvec[naq, naq, npval]: Array with eigenvector matrices coef[naq ,naq, npval]: Array with coefficients; coef[ilayers, :, np] are the coefficients if the element is in ilayers belonging to Laplace parameter number np ''' # Recompute T for when kaq is changed self.T = self.kaq * self.Haq self.Tcol = self.T.reshape(self.naq, 1) # Compute Saq and Sll self.Scoefaq = self.Saq * self.Haq self.Scoefll = self.Sll * self.Hll if (self.topboundary == 'con') and self.phreatictop: self.Scoefaq[0] = self.Scoefaq[0] / self.Haq[0] elif (self.topboundary == 'lea') and self.phreatictop: self.Scoefll[0] = self.Scoefll[0] / self.Hll[0] self.D = self.T / self.Scoefaq # Compute c if model3d for when kaq is changed if self.model3d: self.c[1:] = \ 0.5 * self.Haq[:-1] / (self.kzoverkh[:-1] * self.kaq[:-1]) + \ 0.5 * self.Haq[1:] / (self.kzoverkh[1:] * self.kaq[1:]) # self.eigval = np.zeros((self.naq, self.model.npval), 'D') self.lab = np.zeros((self.naq, self.model.npval), 'D') self.eigvec = np.zeros((self.naq, self.naq, self.model.npval), 'D') self.coef = np.zeros((self.naq, self.naq, self.model.npval), 'D') b = np.diag(np.ones(self.naq)) for i in range(self.model.npval): w, v = self.compute_lab_eigvec(self.model.p[i]) # Eigenvectors are columns of v self.eigval[:, i] = w; self.eigvec[:, :, i] = v self.coef[:, :, i] = np.linalg.solve(v, b).T self.lab = 1.0 / np.sqrt(self.eigval) self.lab2 = self.lab.copy() self.lab2.shape = (self.naq, self.model.nint, self.model.npint) self.lababs = np.abs(self.lab2[:, :, 0]) # used to check distances self.eigvec2 = self.eigvec.copy() self.eigvec2.shape = (self.naq, self.naq, self.model.nint, self.model.npint) def compute_lab_eigvec(self, p, returnA = False, B = None): sqrtpSc = np.sqrt( p * self.Scoefll * self.c ) a, b = np.zeros_like(sqrtpSc), np.zeros_like(sqrtpSc) small = np.abs(sqrtpSc) < 200 a[small] = sqrtpSc[small] / np.tanh(sqrtpSc[small]) b[small] = sqrtpSc[small] / np.sinh(sqrtpSc[small]) a[~small] = sqrtpSc[~small] / ((1.0 - np.exp(-2.0*sqrtpSc[~small])) / (1.0 + np.exp(-2.0*sqrtpSc[~small]))) b[~small] = sqrtpSc[~small] * 2.0 * np.exp(-sqrtpSc[~small]) / \ (1.0 - np.exp(-2.0*sqrtpSc[~small])) if (self.topboundary[:3] == 'sem') or (self.topboundary[:3] == 'lea'): dzero = sqrtpSc[0] * np.tanh(sqrtpSc[0]) d0 = p / self.D if B is not None: d0 = d0 * B # B is vector of load efficiency paramters d0[:-1] += a[1:] / (self.c[1:] * self.T[:-1]) d0[1:] += a[1:] / (self.c[1:] * self.T[1:]) if self.topboundary[:3] == 'lea': d0[0] += dzero / ( self.c[0] * self.T[0] ) elif self.topboundary[:3] == 'sem': d0[0] += a[0] / ( self.c[0] * self.T[0] ) dm1 = -b[1:] / (self.c[1:] * self.T[:-1]) dp1 = -b[1:] / (self.c[1:] * self.T[1:]) A = np.diag(dm1,-1) + np.diag(d0,0) + np.diag(dp1,1) if returnA: return A w, v = np.linalg.eig(A) # sorting moved here index = np.argsort(abs(w))[::-1] w = w[index] v = v[:, index] return w, v def head_to_potential(self, h, layers): return h * self.Tcol[layers] def potential_to_head(self, pot, layers):<|fim▁hole|> print('Must overload AquiferData.isInside method') return True def inWhichLayer(self, z): '''Returns -9999 if above top of system, +9999 if below bottom of system, negative for in leaky layer. leaky layer -n is on top of aquifer n''' if z > self.zt[0]: return -9999 for i in range(self.naq-1): if z >= self.zb[i]: return i if z > self.zt[i+1]: return -i-1 if z >= self.zb[self.naq-1]: return self.naq - 1 return +9999 def findlayer(self, z): ''' Returns layer-number, layer-type and model-layer-number''' if z > self.z[0]: modellayer = -1 ltype = 'above' layernumber = None elif z < self.z[-1]: modellayer = len(self.layernumber) ltype = 'below' layernumber = None else: modellayer = np.argwhere((z <= self.z[:-1]) & (z >= self.z[1:]))[0, 0] layernumber = self.layernumber[modellayer] ltype = self.ltype[modellayer] return layernumber, ltype, modellayer class Aquifer(AquiferData): def __init__(self, model, kaq, z, Haq, Hll, c, Saq, Sll, poraq, porll, ltype, topboundary, phreatictop, kzoverkh=None, model3d=False): AquiferData.__init__(self, model, kaq, z, Haq, Hll, c, Saq, Sll, poraq, porll, ltype, topboundary, phreatictop, kzoverkh, model3d) self.inhomlist = [] self.area = 1e300 # Needed to find smallest inhomogeneity def __repr__(self): return 'Background Aquifer T: ' + str(self.T) def initialize(self): AquiferData.initialize(self) for inhom in self.inhomlist: inhom.initialize() def find_aquifer_data(self, x, y): rv = self for aq in self.inhomlist: if aq.isInside(x, y): if aq.area < rv.area: rv = aq return rv<|fim▁end|>
return pot / self.Tcol[layers] def isInside(self,x,y):
<|file_name|>dst-deref.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Test that a custom deref with a fat pointer return type does not ICE use std::ops::Deref; pub struct Arr { ptr: Box<[usize]> } impl Deref for Arr { type Target = [usize]; fn deref(&self) -> &[usize] { &*self.ptr<|fim▁hole|>} pub fn foo(arr: &Arr) { assert_eq!(arr.len(), 3); let x: &[usize] = &**arr; assert_eq!(x[0], 1); assert_eq!(x[1], 2); assert_eq!(x[2], 3); } fn main() { // FIXME (#22405): Replace `Box::new` with `box` here when/if possible. let a = Arr { ptr: Box::new([1, 2, 3]) }; foo(&a); }<|fim▁end|>
}
<|file_name|>cve_role.py<|end_file_name|><|fim▁begin|>""" An interpreted text role to link docs to CVE issues. To use: :cve:`XXXXX`<|fim▁hole|>from docutils.parsers.rst import roles def cve_role(name, rawtext, text, lineno, inliner, options=None, content=None): if options is None: options = {} url_pattern = inliner.document.settings.env.app.config.cve_url if url_pattern is None: msg = inliner.reporter.warning("cve not configured: please configure cve_url in conf.py") prb = inliner.problematic(rawtext, rawtext, msg) return [prb], [msg] url = url_pattern % text roles.set_classes(options) node = nodes.reference(rawtext, utils.unescape('CVE-%s' % text), refuri=url, **options) return [node], [] def setup(app): app.add_config_value('cve_url', None, 'env') app.add_role('cve', cve_role) return {'parallel_read_safe': True}<|fim▁end|>
""" from docutils import nodes, utils
<|file_name|>lexical-scope-in-match.rs<|end_file_name|><|fim▁begin|>// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // ignore-android: FIXME(#10381) // min-lldb-version: 310 // compile-flags:-g // === GDB TESTS =================================================================================== // gdb-command:run // gdb-command:print shadowed // gdb-check:$1 = 231 // gdb-command:print not_shadowed // gdb-check:$2 = 232 // gdb-command:continue // gdb-command:print shadowed // gdb-check:$3 = 233 // gdb-command:print not_shadowed // gdb-check:$4 = 232 // gdb-command:print local_to_arm // gdb-check:$5 = 234 // gdb-command:continue // gdb-command:print shadowed // gdb-check:$6 = 236 // gdb-command:print not_shadowed // gdb-check:$7 = 232 // gdb-command:continue <|fim▁hole|>// gdb-check:$8 = 237 // gdb-command:print not_shadowed // gdb-check:$9 = 232 // gdb-command:print local_to_arm // gdb-check:$10 = 238 // gdb-command:continue // gdb-command:print shadowed // gdb-check:$11 = 239 // gdb-command:print not_shadowed // gdb-check:$12 = 232 // gdb-command:continue // gdb-command:print shadowed // gdb-check:$13 = 241 // gdb-command:print not_shadowed // gdb-check:$14 = 232 // gdb-command:continue // gdb-command:print shadowed // gdb-check:$15 = 243 // gdb-command:print *local_to_arm // gdb-check:$16 = 244 // gdb-command:continue // gdb-command:print shadowed // gdb-check:$17 = 231 // gdb-command:print not_shadowed // gdb-check:$18 = 232 // gdb-command:continue // === LLDB TESTS ================================================================================== // lldb-command:run // lldb-command:print shadowed // lldb-check:[...]$0 = 231 // lldb-command:print not_shadowed // lldb-check:[...]$1 = 232 // lldb-command:continue // lldb-command:print shadowed // lldb-check:[...]$2 = 233 // lldb-command:print not_shadowed // lldb-check:[...]$3 = 232 // lldb-command:print local_to_arm // lldb-check:[...]$4 = 234 // lldb-command:continue // lldb-command:print shadowed // lldb-check:[...]$5 = 236 // lldb-command:print not_shadowed // lldb-check:[...]$6 = 232 // lldb-command:continue // lldb-command:print shadowed // lldb-check:[...]$7 = 237 // lldb-command:print not_shadowed // lldb-check:[...]$8 = 232 // lldb-command:print local_to_arm // lldb-check:[...]$9 = 238 // lldb-command:continue // lldb-command:print shadowed // lldb-check:[...]$10 = 239 // lldb-command:print not_shadowed // lldb-check:[...]$11 = 232 // lldb-command:continue // lldb-command:print shadowed // lldb-check:[...]$12 = 241 // lldb-command:print not_shadowed // lldb-check:[...]$13 = 232 // lldb-command:continue // lldb-command:print shadowed // lldb-check:[...]$14 = 243 // lldb-command:print *local_to_arm // lldb-check:[...]$15 = 244 // lldb-command:continue // lldb-command:print shadowed // lldb-check:[...]$16 = 231 // lldb-command:print not_shadowed // lldb-check:[...]$17 = 232 // lldb-command:continue #![omit_gdb_pretty_printer_section] struct Struct { x: int, y: int } fn main() { let shadowed = 231i; let not_shadowed = 232i; zzz(); // #break sentinel(); match (233i, 234i) { (shadowed, local_to_arm) => { zzz(); // #break sentinel(); } } match (235i, 236i) { // with literal (235, shadowed) => { zzz(); // #break sentinel(); } _ => {} } match (Struct { x: 237, y: 238 }) { Struct { x: shadowed, y: local_to_arm } => { zzz(); // #break sentinel(); } } match (Struct { x: 239, y: 240 }) { // ignored field Struct { x: shadowed, .. } => { zzz(); // #break sentinel(); } } match (Struct { x: 241, y: 242 }) { // with literal Struct { x: shadowed, y: 242 } => { zzz(); // #break sentinel(); } _ => {} } match (243i, 244i) { (shadowed, ref local_to_arm) => { zzz(); // #break sentinel(); } } zzz(); // #break sentinel(); } fn zzz() {()} fn sentinel() {()}<|fim▁end|>
// gdb-command:print shadowed
<|file_name|>loadInitialState.js<|end_file_name|><|fim▁begin|>import _ from 'lodash' // eslint-disable-line export default function loadInitialState(req) { const user = req.user const state = { auth: {}, }<|fim▁hole|> } if (req.session.accessToken) { state.auth.accessToken = req.session.accessToken.token } } if (req.csrfToken) { state.auth.csrf = req.csrfToken() } // Immutable.fromJS has a bug with objects flagged as anonymous in node 6 // https://github.com/facebook/immutable-js/issues/1001 return JSON.parse(JSON.stringify(state)) // callback(null, state) }<|fim▁end|>
if (user) { state.auth = { user: {id: user.id},
<|file_name|>localVariable.py<|end_file_name|><|fim▁begin|>def test_local_variable(): x = 1<|fim▁hole|><|fim▁end|>
x = 2
<|file_name|>LightingMode.java<|end_file_name|><|fim▁begin|>/* * _____ _ _ _____ _ * | __ \| | | | / ____| | | * | |__) | | ___ | |_| (___ __ _ _ _ __ _ _ __ ___ __| | * | ___/| |/ _ \| __|\___ \ / _` | | | |/ _` | '__/ _ \/ _` | * | | | | (_) | |_ ____) | (_| | |_| | (_| | | | __/ (_| | * |_| |_|\___/ \__|_____/ \__, |\__,_|\__,_|_| \___|\__,_| * | | * |_| * PlotSquared plot management system for Minecraft * Copyright (C) 2021 IntellectualSites * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. *<|fim▁hole|> * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package com.plotsquared.core.queue; import java.util.HashMap; import java.util.Map; public enum LightingMode { NONE(0), PLACEMENT(1), REPLACEMENT(2), ALL(3); private static final Map<Integer, LightingMode> map = new HashMap<>(); static { for (LightingMode mode : LightingMode.values()) { map.put(mode.mode, mode); } } private final int mode; LightingMode(int mode) { this.mode = mode; } public static LightingMode valueOf(int mode) { return map.get(mode); } public int getMode() { return mode; } }<|fim▁end|>
<|file_name|>user.py<|end_file_name|><|fim▁begin|>class User(object): def __init__(self, username=None, password=None, email=None): self.username = username self.password = password self.email = email @classmethod def admin(cls): return cls(username="admin", password="admin")<|fim▁hole|> def random_data(cls): from random import randint return cls(username="user" + str(randint(0, 1000)), password="pass" + str(randint(0, 1000)))<|fim▁end|>
#random values for username and password @classmethod
<|file_name|>step_success.go<|end_file_name|><|fim▁begin|>package proxmox import ( "context" "github.com/hashicorp/packer-plugin-sdk/multistep" )<|fim▁hole|>type stepSuccess struct{} func (s *stepSuccess) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { // We need to ensure stepStartVM.Cleanup doesn't delete the template (no // difference between VMs and templates when deleting) state.Put("success", true) return multistep.ActionContinue } func (s *stepSuccess) Cleanup(state multistep.StateBag) {}<|fim▁end|>
// stepSuccess runs after the full build has succeeded. // // It sets the success state, which ensures cleanup does not remove the finished template
<|file_name|>Range.java<|end_file_name|><|fim▁begin|>/* * To change this template, choose Tools | Templates * and open the template in the editor. */ package VerilogCompiler.SyntacticTree; import VerilogCompiler.SemanticCheck.ErrorHandler; import VerilogCompiler.SemanticCheck.ExpressionType; import VerilogCompiler.SyntacticTree.Expressions.Expression; /** * * @author Néstor A. Bermúdez < [email protected] > */ public class Range extends VNode { Expression minValue; Expression maxValue; public Range(Expression minValue, Expression maxValue, int line, int column) { super(line, column); this.minValue = minValue; this.maxValue = maxValue; } public Expression getMinValue() { return minValue; } public void setMinValue(Expression minValue) { this.minValue = minValue; } public Expression getMaxValue() { return maxValue; } public void setMaxValue(Expression maxValue) { this.maxValue = maxValue; } @Override public String toString() { return String.format("[%s:%s]", this.minValue, this.maxValue); }<|fim▁hole|> @Override public ExpressionType validateSemantics() { ExpressionType minReturnType = minValue.validateSemantics(); ExpressionType maxReturnType = maxValue.validateSemantics(); if (minReturnType != ExpressionType.INTEGER || maxReturnType != ExpressionType.INTEGER) { ErrorHandler.getInstance().handleError(line, column, "range min and max value must be integer"); } return null; } @Override public VNode getCopy() { return new Range((Expression)minValue.getCopy(), (Expression)maxValue.getCopy(), line, column); } }<|fim▁end|>
<|file_name|>run_tests.py<|end_file_name|><|fim▁begin|>import os import nose import django NAME = os.path.basename(os.path.dirname(__file__)) ROOT = os.path.abspath(os.path.dirname(__file__)) os.environ['DJANGO_SETTINGS_MODULE'] = 'fake_settings' os.environ['PYTHONPATH'] = os.pathsep.join([ROOT, os.path.join(ROOT, 'examples')]) if __name__ == '__main__': if hasattr(django, 'setup'): # Django's app registry was added in 1.7. We need to call `setup` to<|fim▁hole|> django.setup() nose.main()<|fim▁end|>
# initiate it.
<|file_name|>workspace_event_handler_unittest.cc<|end_file_name|><|fim▁begin|>// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "ash/wm/workspace/workspace_event_handler.h" #include "ash/screen_util.h" #include "ash/shell.h" #include "ash/test/ash_test_base.h" #include "ash/wm/window_state.h" #include "ash/wm/window_util.h" #include "ash/wm/wm_event.h" #include "ash/wm/workspace_controller.h" #include "ash/wm/workspace_controller_test_helper.h" #include "ui/aura/client/aura_constants.h" #include "ui/aura/client/window_move_client.h" #include "ui/aura/test/event_generator.h" #include "ui/aura/test/test_window_delegate.h" #include "ui/aura/window.h" #include "ui/aura/window_tree_host.h" #include "ui/base/hit_test.h" #include "ui/events/event_processor.h" #include "ui/gfx/screen.h" #include "ui/wm/core/window_util.h" #if defined(OS_WIN) #include "base/win/windows_version.h" #endif namespace ash { namespace internal { class WorkspaceEventHandlerTest : public test::AshTestBase { public: WorkspaceEventHandlerTest() {} virtual ~WorkspaceEventHandlerTest() {} protected: aura::Window* CreateTestWindow(aura::WindowDelegate* delegate, const gfx::Rect& bounds) { aura::Window* window = new aura::Window(delegate); window->SetType(ui::wm::WINDOW_TYPE_NORMAL); window->Init(aura::WINDOW_LAYER_TEXTURED); ParentWindowInPrimaryRootWindow(window); window->SetBounds(bounds); window->Show(); return window; } private: DISALLOW_COPY_AND_ASSIGN(WorkspaceEventHandlerTest); }; // Keeps track of the properties changed of a particular window. class WindowPropertyObserver : public aura::WindowObserver { public: explicit WindowPropertyObserver(aura::Window* window) : window_(window) { window->AddObserver(this); } virtual ~WindowPropertyObserver() { window_->RemoveObserver(this); } bool DidPropertyChange(const void* property) const { return std::find(properties_changed_.begin(), properties_changed_.end(), property) != properties_changed_.end(); } private: virtual void OnWindowPropertyChanged(aura::Window* window, const void* key, intptr_t old) OVERRIDE { properties_changed_.push_back(key); } aura::Window* window_; std::vector<const void*> properties_changed_; DISALLOW_COPY_AND_ASSIGN(WindowPropertyObserver); }; TEST_F(WorkspaceEventHandlerTest, DoubleClickSingleAxisResizeEdge) { // Double clicking the vertical resize edge of a window should maximize it // vertically. gfx::Rect restored_bounds(10, 10, 50, 50); aura::test::TestWindowDelegate wd; scoped_ptr<aura::Window> window(CreateTestWindow(&wd, restored_bounds)); wm::ActivateWindow(window.get()); gfx::Rect work_area = Shell::GetScreen()->GetDisplayNearestWindow( window.get()).work_area(); aura::test::EventGenerator generator(Shell::GetPrimaryRootWindow(), window.get()); // Double-click the top resize edge. wd.set_window_component(HTTOP); // On X a double click actually generates a drag between each press/release. // Explicitly trigger this path since we had bugs in dealing with it // correctly. generator.PressLeftButton(); generator.ReleaseLeftButton(); generator.set_flags(ui::EF_IS_DOUBLE_CLICK); generator.PressLeftButton(); generator.MoveMouseTo(generator.current_location(), 1); generator.ReleaseLeftButton(); gfx::Rect bounds_in_screen = window->GetBoundsInScreen(); EXPECT_EQ(restored_bounds.x(), bounds_in_screen.x()); EXPECT_EQ(restored_bounds.width(), bounds_in_screen.width()); EXPECT_EQ(work_area.y(), bounds_in_screen.y()); EXPECT_EQ(work_area.height(), bounds_in_screen.height()); wm::WindowState* window_state = wm::GetWindowState(window.get()); // Single-axis maximization is not considered real maximization. EXPECT_FALSE(window_state->IsMaximized()); // Restore. generator.DoubleClickLeftButton(); bounds_in_screen = window->GetBoundsInScreen(); EXPECT_EQ(restored_bounds.ToString(), bounds_in_screen.ToString()); // Note that it should not even be restored at this point, it should have // also cleared the restore rectangle. EXPECT_FALSE(window_state->HasRestoreBounds()); // Double clicking the left resize edge should maximize horizontally. wd.set_window_component(HTLEFT); generator.DoubleClickLeftButton(); bounds_in_screen = window->GetBoundsInScreen(); EXPECT_EQ(restored_bounds.y(), bounds_in_screen.y()); EXPECT_EQ(restored_bounds.height(), bounds_in_screen.height()); EXPECT_EQ(work_area.x(), bounds_in_screen.x()); EXPECT_EQ(work_area.width(), bounds_in_screen.width()); // Single-axis maximization is not considered real maximization. EXPECT_FALSE(window_state->IsMaximized()); // Restore. generator.DoubleClickLeftButton(); EXPECT_EQ(restored_bounds.ToString(), window->GetBoundsInScreen().ToString()); #if defined(OS_WIN) // Multi display test does not run on Win8 bot. crbug.com/247427. if (base::win::GetVersion() >= base::win::VERSION_WIN8) return; #endif // Verify the double clicking the resize edge works on 2nd display too. UpdateDisplay("200x200,400x300"); gfx::Rect work_area2 = ScreenUtil::GetSecondaryDisplay().work_area(); restored_bounds.SetRect(220,20, 50, 50); window->SetBoundsInScreen(restored_bounds, ScreenUtil::GetSecondaryDisplay()); aura::Window* second_root = Shell::GetAllRootWindows()[1]; EXPECT_EQ(second_root, window->GetRootWindow()); aura::test::EventGenerator generator2(second_root, window.get()); // Y-axis maximization. wd.set_window_component(HTTOP); generator2.PressLeftButton(); generator2.ReleaseLeftButton(); generator2.set_flags(ui::EF_IS_DOUBLE_CLICK); generator2.PressLeftButton(); generator2.MoveMouseTo(generator.current_location(), 1); generator2.ReleaseLeftButton(); generator.DoubleClickLeftButton(); bounds_in_screen = window->GetBoundsInScreen(); EXPECT_EQ(restored_bounds.x(), bounds_in_screen.x()); EXPECT_EQ(restored_bounds.width(), bounds_in_screen.width()); EXPECT_EQ(work_area2.y(), bounds_in_screen.y()); EXPECT_EQ(work_area2.height(), bounds_in_screen.height()); EXPECT_FALSE(window_state->IsMaximized()); // Restore. generator2.DoubleClickLeftButton(); EXPECT_EQ(restored_bounds.ToString(), window->GetBoundsInScreen().ToString()); // X-axis maximization. wd.set_window_component(HTLEFT); generator2.DoubleClickLeftButton(); bounds_in_screen = window->GetBoundsInScreen();<|fim▁hole|> EXPECT_EQ(work_area2.width(), bounds_in_screen.width()); EXPECT_FALSE(window_state->IsMaximized()); // Restore. generator2.DoubleClickLeftButton(); EXPECT_EQ(restored_bounds.ToString(), window->GetBoundsInScreen().ToString()); } // Tests the behavior when double clicking the border of a side snapped window. TEST_F(WorkspaceEventHandlerTest, DoubleClickSingleAxisWhenSideSnapped) { gfx::Rect restored_bounds(10, 10, 50, 50); aura::test::TestWindowDelegate wd; scoped_ptr<aura::Window> window(CreateTestWindow(&wd, restored_bounds)); gfx::Rect work_area_in_screen = Shell::GetScreen()->GetDisplayNearestWindow( window.get()).work_area(); wm::WindowState* window_state = wm::GetWindowState(window.get()); const wm::WMEvent snap_event(wm::WM_EVENT_SNAP_LEFT); window_state->OnWMEvent(&snap_event); gfx::Rect snapped_bounds_in_screen = window->GetBoundsInScreen(); EXPECT_EQ(work_area_in_screen.x(), snapped_bounds_in_screen.x()); EXPECT_EQ(work_area_in_screen.y(), snapped_bounds_in_screen.y()); EXPECT_GT(work_area_in_screen.width(), snapped_bounds_in_screen.width()); EXPECT_EQ(work_area_in_screen.height(), snapped_bounds_in_screen.height()); // Double clicking the top border should not do anything for side snapped // windows. (They already take up the entire workspace height and reverting // to the restored bounds would be weird). aura::test::EventGenerator generator(Shell::GetPrimaryRootWindow(), window.get()); wd.set_window_component(HTTOP); generator.DoubleClickLeftButton(); EXPECT_EQ(wm::WINDOW_STATE_TYPE_LEFT_SNAPPED, window_state->GetStateType()); EXPECT_EQ(snapped_bounds_in_screen.ToString(), window->GetBoundsInScreen().ToString()); // Double clicking the right border should exit the side snapped state and // make the window take up the entire work area. wd.set_window_component(HTRIGHT); generator.DoubleClickLeftButton(); EXPECT_TRUE(window_state->IsNormalStateType()); EXPECT_EQ(work_area_in_screen.ToString(), window->GetBoundsInScreen().ToString()); } TEST_F(WorkspaceEventHandlerTest, DoubleClickSingleAxisDoesntResizeVerticalEdgeIfConstrained) { gfx::Rect restored_bounds(10, 10, 50, 50); aura::test::TestWindowDelegate wd; scoped_ptr<aura::Window> window(CreateTestWindow(&wd, restored_bounds)); wm::ActivateWindow(window.get()); gfx::Rect work_area = Shell::GetScreen()->GetDisplayNearestWindow( window.get()).work_area(); wd.set_maximum_size(gfx::Size(0, 100)); aura::test::EventGenerator generator(Shell::GetPrimaryRootWindow(), window.get()); // Double-click the top resize edge. wd.set_window_component(HTTOP); generator.DoubleClickLeftButton(); // The size of the window should be unchanged. EXPECT_EQ(restored_bounds.y(), window->bounds().y()); EXPECT_EQ(restored_bounds.height(), window->bounds().height()); } TEST_F(WorkspaceEventHandlerTest, DoubleClickSingleAxisDoesntResizeHorizontalEdgeIfConstrained) { gfx::Rect restored_bounds(10, 10, 50, 50); aura::test::TestWindowDelegate wd; scoped_ptr<aura::Window> window(CreateTestWindow(&wd, restored_bounds)); wm::ActivateWindow(window.get()); gfx::Rect work_area = Shell::GetScreen()->GetDisplayNearestWindow( window.get()).work_area(); wd.set_maximum_size(gfx::Size(100, 0)); aura::test::EventGenerator generator(Shell::GetPrimaryRootWindow(), window.get()); // Double-click the top resize edge. wd.set_window_component(HTRIGHT); generator.DoubleClickLeftButton(); // The size of the window should be unchanged. EXPECT_EQ(restored_bounds.x(), window->bounds().x()); EXPECT_EQ(restored_bounds.width(), window->bounds().width()); } TEST_F(WorkspaceEventHandlerTest, DoubleClickOrTapWithModalChildDoesntMaximize) { aura::test::TestWindowDelegate wd1; aura::test::TestWindowDelegate wd2; scoped_ptr<aura::Window> window( CreateTestWindow(&wd1, gfx::Rect(10, 20, 30, 40))); scoped_ptr<aura::Window> child( CreateTestWindow(&wd2, gfx::Rect(0, 0, 1, 1))); window->SetProperty(aura::client::kCanMaximizeKey, true); wd1.set_window_component(HTCAPTION); child->SetProperty(aura::client::kModalKey, ui::MODAL_TYPE_WINDOW); views::corewm::AddTransientChild(window.get(), child.get()); wm::WindowState* window_state = wm::GetWindowState(window.get()); EXPECT_FALSE(window_state->IsMaximized()); aura::Window* root = Shell::GetPrimaryRootWindow(); aura::test::EventGenerator generator(root, window.get()); generator.DoubleClickLeftButton(); EXPECT_EQ("10,20 30x40", window->bounds().ToString()); EXPECT_FALSE(window_state->IsMaximized()); generator.GestureTapAt(gfx::Point(25, 25)); generator.GestureTapAt(gfx::Point(25, 25)); RunAllPendingInMessageLoop(); EXPECT_EQ("10,20 30x40", window->bounds().ToString()); EXPECT_FALSE(window_state->IsMaximized()); } // Test the behavior as a result of double clicking the window header. TEST_F(WorkspaceEventHandlerTest, DoubleClickCaptionTogglesMaximize) { aura::test::TestWindowDelegate wd; scoped_ptr<aura::Window> window( CreateTestWindow(&wd, gfx::Rect(1, 2, 30, 40))); window->SetProperty(aura::client::kCanMaximizeKey, true); wm::WindowState* window_state = wm::GetWindowState(window.get()); gfx::Rect restore_bounds = window->bounds(); gfx::Rect work_area_in_parent = ScreenUtil::GetDisplayWorkAreaBoundsInParent( window.get()); EXPECT_FALSE(window_state->IsMaximized()); // 1) Double clicking a normal window should maximize. wd.set_window_component(HTCAPTION); aura::Window* root = Shell::GetPrimaryRootWindow(); aura::test::EventGenerator generator(root, window.get()); generator.DoubleClickLeftButton(); EXPECT_NE(restore_bounds.ToString(), window->bounds().ToString()); EXPECT_TRUE(window_state->IsMaximized()); generator.DoubleClickLeftButton(); EXPECT_TRUE(window_state->IsNormalStateType()); EXPECT_EQ(restore_bounds.ToString(), window->bounds().ToString()); // 2) Double clicking a horizontally maximized window should maximize. wd.set_window_component(HTLEFT); generator.DoubleClickLeftButton(); EXPECT_TRUE(window_state->IsNormalStateType()); EXPECT_EQ(work_area_in_parent.x(), window->bounds().x()); EXPECT_EQ(restore_bounds.y(), window->bounds().y()); EXPECT_EQ(work_area_in_parent.width(), window->bounds().width()); EXPECT_EQ(restore_bounds.height(), window->bounds().height()); wd.set_window_component(HTCAPTION); generator.DoubleClickLeftButton(); EXPECT_TRUE(window_state->IsMaximized()); generator.DoubleClickLeftButton(); EXPECT_TRUE(window_state->IsNormalStateType()); EXPECT_EQ(restore_bounds.ToString(), window->bounds().ToString()); // 3) Double clicking a snapped window should maximize. const wm::WMEvent snap_event(wm::WM_EVENT_SNAP_LEFT); window_state->OnWMEvent(&snap_event); EXPECT_TRUE(window_state->IsSnapped()); generator.MoveMouseTo(window->GetBoundsInRootWindow().CenterPoint()); generator.DoubleClickLeftButton(); EXPECT_TRUE(window_state->IsMaximized()); generator.DoubleClickLeftButton(); EXPECT_TRUE(window_state->IsNormalStateType()); EXPECT_EQ(restore_bounds.ToString(), window->bounds().ToString()); } // Test that double clicking the middle button on the window header does not // toggle the maximized state. TEST_F(WorkspaceEventHandlerTest, DoubleClickMiddleButtonDoesNotToggleMaximize) { aura::test::TestWindowDelegate wd; scoped_ptr<aura::Window> window( CreateTestWindow(&wd, gfx::Rect(1, 2, 30, 40))); window->SetProperty(aura::client::kCanMaximizeKey, true); wd.set_window_component(HTCAPTION); aura::Window* root = Shell::GetPrimaryRootWindow(); aura::test::EventGenerator generator(root, window.get()); WindowPropertyObserver observer(window.get()); ui::MouseEvent press(ui::ET_MOUSE_PRESSED, generator.current_location(), generator.current_location(), ui::EF_MIDDLE_MOUSE_BUTTON | ui::EF_IS_DOUBLE_CLICK, ui::EF_MIDDLE_MOUSE_BUTTON); ui::EventProcessor* dispatcher = root->GetHost()->event_processor(); ui::EventDispatchDetails details = dispatcher->OnEventFromSource(&press); ASSERT_FALSE(details.dispatcher_destroyed); ui::MouseEvent release(ui::ET_MOUSE_RELEASED, generator.current_location(), generator.current_location(), ui::EF_IS_DOUBLE_CLICK, ui::EF_MIDDLE_MOUSE_BUTTON); details = dispatcher->OnEventFromSource(&release); ASSERT_FALSE(details.dispatcher_destroyed); EXPECT_FALSE(wm::GetWindowState(window.get())->IsMaximized()); EXPECT_EQ("1,2 30x40", window->bounds().ToString()); EXPECT_FALSE(observer.DidPropertyChange(aura::client::kShowStateKey)); } TEST_F(WorkspaceEventHandlerTest, DoubleTapCaptionTogglesMaximize) { aura::test::TestWindowDelegate wd; gfx::Rect bounds(10, 20, 30, 40); scoped_ptr<aura::Window> window(CreateTestWindow(&wd, bounds)); window->SetProperty(aura::client::kCanMaximizeKey, true); wd.set_window_component(HTCAPTION); wm::WindowState* window_state = wm::GetWindowState(window.get()); EXPECT_FALSE(window_state->IsMaximized()); aura::test::EventGenerator generator(Shell::GetPrimaryRootWindow(), window.get()); generator.GestureTapAt(gfx::Point(25, 25)); generator.GestureTapAt(gfx::Point(25, 25)); RunAllPendingInMessageLoop(); EXPECT_NE(bounds.ToString(), window->bounds().ToString()); EXPECT_TRUE(window_state->IsMaximized()); generator.GestureTapAt(gfx::Point(5, 5)); generator.GestureTapAt(gfx::Point(10, 10)); EXPECT_FALSE(window_state->IsMaximized()); EXPECT_EQ(bounds.ToString(), window->bounds().ToString()); } // Verifies deleting the window while dragging doesn't crash. TEST_F(WorkspaceEventHandlerTest, DeleteWhenDragging) { // Create a large window in the background. This is necessary so that when we // delete |window| WorkspaceEventHandler is still the active event handler. aura::test::TestWindowDelegate wd2; scoped_ptr<aura::Window> window2( CreateTestWindow(&wd2, gfx::Rect(0, 0, 500, 500))); aura::test::TestWindowDelegate wd; const gfx::Rect bounds(10, 20, 30, 40); scoped_ptr<aura::Window> window(CreateTestWindow(&wd, bounds)); wd.set_window_component(HTCAPTION); aura::test::EventGenerator generator(window->GetRootWindow()); generator.MoveMouseToCenterOf(window.get()); generator.PressLeftButton(); generator.MoveMouseTo(generator.current_location() + gfx::Vector2d(50, 50)); DCHECK_NE(bounds.origin().ToString(), window->bounds().origin().ToString()); window.reset(); generator.MoveMouseTo(generator.current_location() + gfx::Vector2d(50, 50)); } // Verifies deleting the window while in a run loop doesn't crash. TEST_F(WorkspaceEventHandlerTest, DeleteWhileInRunLoop) { aura::test::TestWindowDelegate wd; const gfx::Rect bounds(10, 20, 30, 40); scoped_ptr<aura::Window> window(CreateTestWindow(&wd, bounds)); wd.set_window_component(HTCAPTION); ASSERT_TRUE(aura::client::GetWindowMoveClient(window->GetRootWindow())); base::MessageLoop::current()->DeleteSoon(FROM_HERE, window.get()); aura::client::GetWindowMoveClient(window->GetRootWindow()) ->RunMoveLoop(window.release(), gfx::Vector2d(), aura::client::WINDOW_MOVE_SOURCE_MOUSE); } } // namespace internal } // namespace ash<|fim▁end|>
EXPECT_EQ(restored_bounds.y(), bounds_in_screen.y()); EXPECT_EQ(restored_bounds.height(), bounds_in_screen.height()); EXPECT_EQ(work_area2.x(), bounds_in_screen.x());
<|file_name|>GuiManager.cpp<|end_file_name|><|fim▁begin|>#include "GuiManager.h" #include "Shader.h" #include "TextureData.h" #include "Component.h" #if defined(GLES2) #include <GLES2/gl2.h> #elif defined(GLES3) #include <GLES3/gl3.h> #else #include <GL/glew.h> #endif #include <glm/gtx/transform.hpp> TextureData *m_textureData; Shader *m_shader; static int g_AttribLocationPosition = 0, g_AttribLocationUV = 0, g_AttribLocationColor = 0; static unsigned int g_VboHandle = 0, g_VaoHandle = 0, g_ElementsHandle = 0; void GuiManager::addInputCharactersUTF8(const char *text) { ImGuiIO &io = ImGui::GetIO(); io.AddInputCharactersUTF8(text); } void GuiManager::setKeyEvent(int key, bool keydown) { ImGuiIO &io = ImGui::GetIO(); io.KeysDown[key] = keydown; } void GuiManager::renderDrawLists(ImDrawData *draw_data) { // Avoid rendering when minimized, scale coordinates for retina displays (screen coordinates != framebuffer coordinates) ImGuiIO &io = ImGui::GetIO(); int fb_width = (int)(io.DisplaySize.x * io.DisplayFramebufferScale.x); int fb_height = (int)(io.DisplaySize.y * io.DisplayFramebufferScale.y); if (fb_width == 0 || fb_height == 0) return; draw_data->ScaleClipRects(io.DisplayFramebufferScale); // Backup GL state GLint last_program; glGetIntegerv(GL_CURRENT_PROGRAM, &last_program); GLint last_texture; glGetIntegerv(GL_TEXTURE_BINDING_2D, &last_texture); GLint last_active_texture; glGetIntegerv(GL_ACTIVE_TEXTURE, &last_active_texture); GLint last_array_buffer; glGetIntegerv(GL_ARRAY_BUFFER_BINDING, &last_array_buffer); GLint last_element_array_buffer; glGetIntegerv(GL_ELEMENT_ARRAY_BUFFER_BINDING, &last_element_array_buffer); #if !defined(GLES2) GLint last_vertex_array; glGetIntegerv(GL_VERTEX_ARRAY_BINDING, &last_vertex_array); GLint last_blend_src; glGetIntegerv(GL_BLEND_SRC, &last_blend_src); GLint last_blend_dst; glGetIntegerv(GL_BLEND_DST, &last_blend_dst); #endif GLint last_blend_equation_rgb; glGetIntegerv(GL_BLEND_EQUATION_RGB, &last_blend_equation_rgb); GLint last_blend_equation_alpha; glGetIntegerv(GL_BLEND_EQUATION_ALPHA, &last_blend_equation_alpha); GLint last_viewport[4]; glGetIntegerv(GL_VIEWPORT, last_viewport); GLboolean last_enable_blend = glIsEnabled(GL_BLEND); GLboolean last_enable_cull_face = glIsEnabled(GL_CULL_FACE); GLboolean last_enable_depth_test = glIsEnabled(GL_DEPTH_TEST); GLboolean last_enable_scissor_test = glIsEnabled(GL_SCISSOR_TEST); // Setup render state: alpha-blending enabled, no face culling, no depth testing, scissor enabled glEnable(GL_BLEND); glBlendEquation(GL_FUNC_ADD); glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); glDisable(GL_CULL_FACE); glDisable(GL_DEPTH_TEST); glEnable(GL_SCISSOR_TEST); glActiveTexture(GL_TEXTURE0); // Setup orthographic projection matrix glViewport(0, 0, (GLsizei)fb_width, (GLsizei)fb_height); auto ortho_projection = glm::ortho(0.0f, io.DisplaySize.x, io.DisplaySize.y, 0.0f); m_shader->bind(); m_shader->setUniform1i("Texture", 0); m_shader->setUniformMatrix4f("ProjMtx", ortho_projection); #if !defined(GLES2) glBindVertexArray(g_VaoHandle); #else glBindBuffer(GL_ARRAY_BUFFER, g_VboHandle); glEnableVertexAttribArray(g_AttribLocationPosition); glEnableVertexAttribArray(g_AttribLocationUV); glEnableVertexAttribArray(g_AttribLocationColor); #define OFFSETOF(TYPE, ELEMENT) ((size_t) & (((TYPE *)0)->ELEMENT)) glVertexAttribPointer(g_AttribLocationPosition, 2, GL_FLOAT, GL_FALSE, sizeof(ImDrawVert), (GLvoid *)OFFSETOF(ImDrawVert, pos)); glVertexAttribPointer(g_AttribLocationUV, 2, GL_FLOAT, GL_FALSE, sizeof(ImDrawVert), (GLvoid *)OFFSETOF(ImDrawVert, uv)); glVertexAttribPointer(g_AttribLocationColor, 4, GL_UNSIGNED_BYTE, GL_TRUE, sizeof(ImDrawVert), (GLvoid *)OFFSETOF(ImDrawVert, col)); #undef OFFSETOF #endif for (int n = 0; n < draw_data->CmdListsCount; n++) { const ImDrawList *cmd_list = draw_data->CmdLists[n]; const ImDrawIdx *idx_buffer_offset = 0; glBindBuffer(GL_ARRAY_BUFFER, g_VboHandle); glBufferData(GL_ARRAY_BUFFER, (GLsizeiptr)cmd_list->VtxBuffer.size() * sizeof(ImDrawVert), (GLvoid *)&cmd_list->VtxBuffer.front(), GL_STREAM_DRAW); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, g_ElementsHandle); glBufferData(GL_ELEMENT_ARRAY_BUFFER, (GLsizeiptr)cmd_list->IdxBuffer.size() * sizeof(ImDrawIdx), (GLvoid *)&cmd_list->IdxBuffer.front(), GL_STREAM_DRAW); for (const ImDrawCmd *pcmd = cmd_list->CmdBuffer.begin(); pcmd != cmd_list->CmdBuffer.end(); pcmd++) { if (pcmd->UserCallback) { pcmd->UserCallback(cmd_list, pcmd); } else { m_textureData->bind(0); glScissor((int)pcmd->ClipRect.x, (int)(fb_height - pcmd->ClipRect.w), (int)(pcmd->ClipRect.z - pcmd->ClipRect.x), (int)(pcmd->ClipRect.w - pcmd->ClipRect.y)); glDrawElements(GL_TRIANGLES, (GLsizei)pcmd->ElemCount, sizeof(ImDrawIdx) == 2 ? GL_UNSIGNED_SHORT : GL_UNSIGNED_INT, idx_buffer_offset); } idx_buffer_offset += pcmd->ElemCount; } } #if defined(GLES2) glDisableVertexAttribArray(g_AttribLocationPosition); glDisableVertexAttribArray(g_AttribLocationUV); glDisableVertexAttribArray(g_AttribLocationColor); #endif // Restore modified GL state glUseProgram(last_program); glActiveTexture(last_active_texture); glBindTexture(GL_TEXTURE_2D, last_texture); #if !defined(GLES2) glBindVertexArray(last_vertex_array); #endif glBindBuffer(GL_ARRAY_BUFFER, last_array_buffer); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, last_element_array_buffer); glBlendEquationSeparate(last_blend_equation_rgb, last_blend_equation_alpha); #if !defined(GLES2) glBlendFunc(last_blend_src, last_blend_dst); #endif if (last_enable_blend) glEnable(GL_BLEND); else glDisable(GL_BLEND); if (last_enable_cull_face) glEnable(GL_CULL_FACE); else glDisable(GL_CULL_FACE); if (last_enable_depth_test) glEnable(GL_DEPTH_TEST); else glDisable(GL_DEPTH_TEST); if (last_enable_scissor_test) glEnable(GL_SCISSOR_TEST); else glDisable(GL_SCISSOR_TEST); glViewport(last_viewport[0], last_viewport[1], (GLsizei)last_viewport[2], (GLsizei)last_viewport[3]); } void GuiManager::invalidateDeviceObjects(void) { #if !defined(GLES2) if (g_VaoHandle) glDeleteVertexArrays(1, &g_VaoHandle); #endif if (g_VboHandle) glDeleteBuffers(1, &g_VboHandle); if (g_ElementsHandle) glDeleteBuffers(1, &g_ElementsHandle); g_VaoHandle = g_VboHandle = g_ElementsHandle = 0; delete m_shader; } void GuiManager::createDeviceObjects(void) { m_shader = new Shader("shaders/gui"); m_shader->link(); m_shader->createUniform("Texture"); m_shader->createUniform("ProjMtx"); g_AttribLocationPosition = glGetAttribLocation(m_shader->getProgram(), "Position"); g_AttribLocationUV = glGetAttribLocation(m_shader->getProgram(), "UV"); g_AttribLocationColor = glGetAttribLocation(m_shader->getProgram(), "Color"); glGenBuffers(1, &g_VboHandle); glGenBuffers(1, &g_ElementsHandle); #if !defined(GLES2) glGenVertexArrays(1, &g_VaoHandle); glBindVertexArray(g_VaoHandle); glBindBuffer(GL_ARRAY_BUFFER, g_VboHandle); glEnableVertexAttribArray(g_AttribLocationPosition); glEnableVertexAttribArray(g_AttribLocationUV); glEnableVertexAttribArray(g_AttribLocationColor); #define OFFSETOF(TYPE, ELEMENT) ((size_t) & (((TYPE *)0)->ELEMENT)) glVertexAttribPointer(g_AttribLocationPosition, 2, GL_FLOAT, GL_FALSE, sizeof(ImDrawVert), (GLvoid *)OFFSETOF(ImDrawVert, pos)); glVertexAttribPointer(g_AttribLocationUV, 2, GL_FLOAT, GL_FALSE, sizeof(ImDrawVert), (GLvoid *)OFFSETOF(ImDrawVert, uv)); glVertexAttribPointer(g_AttribLocationColor, 4, GL_UNSIGNED_BYTE, GL_TRUE, sizeof(ImDrawVert), (GLvoid *)OFFSETOF(ImDrawVert, col)); #undef OFFSETOF #endif } GuiManager::GuiManager(const glm::vec2 &drawableSize, const glm::vec2 &displaySize, SDL_Window *sdlWindow) { m_sdlWindow = sdlWindow; showProps = true; ImGuiIO &io = ImGui::GetIO(); io.KeyMap[ImGuiKey_Tab] = SDLK_TAB; // Keyboard mapping. ImGui will use those indices to peek into the io.KeyDown[] array. io.KeyMap[ImGuiKey_LeftArrow] = SDL_SCANCODE_LEFT; io.KeyMap[ImGuiKey_RightArrow] = SDL_SCANCODE_RIGHT; io.KeyMap[ImGuiKey_UpArrow] = SDL_SCANCODE_UP; io.KeyMap[ImGuiKey_DownArrow] = SDL_SCANCODE_DOWN; io.KeyMap[ImGuiKey_PageUp] = SDL_SCANCODE_PAGEUP; io.KeyMap[ImGuiKey_PageDown] = SDL_SCANCODE_PAGEDOWN; io.KeyMap[ImGuiKey_Home] = SDL_SCANCODE_HOME; io.KeyMap[ImGuiKey_End] = SDL_SCANCODE_END; io.KeyMap[ImGuiKey_Delete] = SDLK_DELETE; io.KeyMap[ImGuiKey_Backspace] = SDLK_BACKSPACE; io.KeyMap[ImGuiKey_Enter] = SDLK_RETURN; io.KeyMap[ImGuiKey_Escape] = SDLK_ESCAPE; io.KeyMap[ImGuiKey_A] = SDLK_a; io.KeyMap[ImGuiKey_C] = SDLK_c; io.KeyMap[ImGuiKey_V] = SDLK_v; io.KeyMap[ImGuiKey_X] = SDLK_x; io.KeyMap[ImGuiKey_Y] = SDLK_y; io.KeyMap[ImGuiKey_Z] = SDLK_z; io.RenderDrawListsFn = GuiManager::renderDrawLists; // Alternatively you can set this to NULL and call ImGui::GetDrawData() after ImGui::Render() to get the same ImDrawData pointer. io.SetClipboardTextFn = Window::setClipboardText; io.GetClipboardTextFn = Window::getClipboardText; //#ifdef _WIN32 // SDL_SysWMinfo wmInfo; // SDL_VERSION(&wmInfo.version); // SDL_GetWindowWMInfo(m_sdlWindow, &wmInfo); // io.ImeWindowHandle = wmInfo.info.win.window; //#endif createDeviceObjects(); unsigned char *pixels; int width, height; io.Fonts->GetTexDataAsRGBA32(&pixels, &width, &height); // Load as RGBA 32-bits for OpenGL3 demo because it is more likely to be compatible with user's existing shader. m_textureData = new TextureData(width, height, pixels, GL_TEXTURE_2D, GL_LINEAR); io.DisplaySize = ImVec2(displaySize.x, displaySize.y); io.DisplayFramebufferScale = ImVec2(displaySize.x > 0 ? (drawableSize.x / displaySize.x) : 0, displaySize.y > 0 ? (drawableSize.y / displaySize.y) : 0); } GuiManager::~GuiManager(void) { invalidateDeviceObjects(); delete m_textureData; ImGui::Shutdown(); } void GuiManager::tick(Window *window, std::chrono::microseconds delta) { ImGuiIO &io = ImGui::GetIO(); io.DeltaTime = std::chrono::duration_cast<std::chrono::duration<float>>(delta).count(); glm::vec2 mousePos = window->getInput()->getMousePosition(); io.MousePos = ImVec2(mousePos.x, mousePos.y); io.MouseDown[0] = window->getInput()->mouseIsPressed(SDL_BUTTON_LEFT); io.MouseDown[1] = window->getInput()->mouseIsPressed(SDL_BUTTON_RIGHT); io.MouseDown[2] = window->getInput()->mouseIsPressed(SDL_BUTTON_MIDDLE); io.MouseWheel = window->getInput()->getMouseWheel().y / 15.0f; io.KeyShift = (window->getInput()->getKeyModState() & KMOD_SHIFT) != 0; io.KeyCtrl = (window->getInput()->getKeyModState() & KMOD_CTRL) != 0; io.KeyAlt = (window->getInput()->getKeyModState() & KMOD_ALT) != 0; io.KeySuper = (window->getInput()->getKeyModState() & KMOD_GUI) != 0; window->drawCursor(io.MouseDrawCursor ? false : true); // Start the frame ImGui::NewFrame(); } void renderComponent(Component *component) { ImGui::PushID(component); ImGui::AlignFirstTextHeightToWidgets(); ImGui::PushStyleColor(ImGuiCol_Text, ImColor(1.0f, 0.78f, 0.58f, 1.0f)); bool node_open = ImGui::TreeNodeEx("Component", ImGuiTreeNodeFlags_DefaultOpen, "%s_%u", "component", component); ImGui::NextColumn(); ImGui::AlignFirstTextHeightToWidgets(); ImGui::Text(component->getType()); ImGui::PopStyleColor(); ImGui::NextColumn(); int id = 0; if (node_open) { for (auto &property : component->m_properties) { ImGui::PushID(id++); ImGui::AlignFirstTextHeightToWidgets(); ImGui::Bullet(); ImGui::PushStyleColor(ImGuiCol_Text, ImColor(0.78f, 0.58f, 1.0f, 1.0f)); ImGui::Selectable(property.first); ImGui::NextColumn(); ImGui::PushItemWidth(-1); switch (property.second.type) { case FLOAT: ImGui::SliderFloat("##value", (float *)property.second.p, property.second.min, property.second.max); break; case FLOAT3: ImGui::SliderFloat3("##value", (float *)property.second.p, property.second.min, property.second.max); break; case BOOLEAN: ImGui::Checkbox("##value", (bool *)property.second.p); break; case COLOR: ImGui::ColorEdit3("##value", (float *)property.second.p); break; case ANGLE: ImGui::SliderAngle("##value", (float *)property.second.p, property.second.min, property.second.max); break; } ImGui::PopStyleColor(); ImGui::PopItemWidth(); ImGui::NextColumn(); ImGui::PopID(); } ImGui::TreePop(); } ImGui::PopID(); } void renderSceneGraph(Entity *sceneGraph) { ImGui::PushID(sceneGraph); ImGui::AlignFirstTextHeightToWidgets(); ImGui::PushStyleColor(ImGuiCol_Text, ImColor(0.78f, 1.0f, 0.58f, 1.0f)); bool node_open = ImGui::TreeNodeEx("Node", ImGuiTreeNodeFlags_DefaultOpen, "%s_%u", "node", sceneGraph); ImGui::PopStyleColor(); ImGui::NextColumn(); ImGui::AlignFirstTextHeightToWidgets(); ImGui::NextColumn(); int id = 0; if (node_open) { ImGui::PushID(id); ImGui::PushStyleColor(ImGuiCol_Text, ImColor(0.0f, 0.8f, 1.0f, 1.0f)); ImGui::AlignFirstTextHeightToWidgets(); ImGui::Bullet(); ImGui::Selectable("translation"); ImGui::NextColumn(); ImGui::PushItemWidth(-1); ImGui::SliderFloat3("##value", &(sceneGraph->getTransform().m_position.x), -10.0f, 10.0f); ImGui::PopItemWidth(); ImGui::NextColumn(); ImGui::PopID(); ImGui::PushID(++id); ImGui::Bullet(); ImGui::Selectable("rotation"); ImGui::NextColumn(); ImGui::PushItemWidth(-1); ImGui::SliderFloat4("##value", &(sceneGraph->getTransform().m_rotation.x), -1.0f, 1.0f); ImGui::PopItemWidth(); ImGui::NextColumn(); ImGui::PopID(); ImGui::PushID(++id); ImGui::Bullet(); ImGui::Selectable("scale"); ImGui::NextColumn(); ImGui::PushItemWidth(-1); ImGui::SliderFloat3("##value", &(sceneGraph->getTransform().m_scale.x), 0.0f, 10.0f); ImGui::PopItemWidth(); ImGui::NextColumn(); ImGui::PopStyleColor(); ImGui::PopID(); for (auto component : sceneGraph->getComponents()) { renderComponent(component.get()); } for (auto entity : sceneGraph->getChildren()) { renderSceneGraph(entity.get()); } ImGui::TreePop(); } ImGui::PopID(); } void GuiManager::togglePropertyEditor(void) { showProps = !showProps; } void GuiManager::render(Entity *sceneGraph) { if (showProps) { ImGui::SetNextWindowPos(ImVec2(10, 10)); ImGui::SetNextWindowSize(ImVec2(500, 0), ImGuiSetCond_FirstUseEver); if (!ImGui::Begin("Example: Fixed Overlay", nullptr, ImVec2(0, 0), 0.3f, ImGuiWindowFlags_NoTitleBar | ImGuiWindowFlags_NoResize | ImGuiWindowFlags_NoMove | ImGuiWindowFlags_NoSavedSettings)) { ImGui::End(); return; } ImGui::Text("Application average %.3f ms/frame (%.1f FPS)", 1000.0f / ImGui::GetIO().Framerate, ImGui::GetIO().Framerate); ImGui::PushStyleVar(ImGuiStyleVar_FramePadding, ImVec2(2, 2)); ImGui::Separator();<|fim▁hole|> ImGui::Columns(1); ImGui::Separator(); ImGui::PopStyleVar(); ImGui::End(); // ImGui::ShowTestWindow(); ImGui::Render(); } }<|fim▁end|>
ImGui::Columns(2); renderSceneGraph(sceneGraph);
<|file_name|>package.py<|end_file_name|><|fim▁begin|># Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) class PyAzureMgmtDeploymentmanager(PythonPackage): """Microsoft Azure Deployment Manager Client Library for Python.""" homepage = "https://github.com/Azure/azure-sdk-for-python" pypi = "azure-mgmt-deploymentmanager/azure-mgmt-deploymentmanager-0.2.0.zip" version('0.2.0', sha256='46e342227993fc9acab1dda42f2eb566b522a8c945ab9d0eea56276b46f6d730') <|fim▁hole|> depends_on('[email protected]:', type=('build', 'run')) depends_on('[email protected]:1', type=('build', 'run')) depends_on('[email protected]:1', type=('build', 'run')) depends_on('py-azure-mgmt-nspkg', when='^python@:2', type=('build', 'run'))<|fim▁end|>
depends_on('py-setuptools', type='build')
<|file_name|>natural_language.py<|end_file_name|><|fim▁begin|># coding: utf-8<|fim▁hole|> natural_language = [ 'Natural Language :: Portuguese (Brazilian)', ]<|fim▁end|>
<|file_name|>send-resource.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use std::thread::Thread; use std::sync::mpsc::channel; struct test { f: int, }<|fim▁hole|> fn drop(&mut self) {} } fn test(f: int) -> test { test { f: f } } pub fn main() { let (tx, rx) = channel(); let _t = Thread::spawn(move|| { let (tx2, rx2) = channel(); tx.send(tx2).unwrap(); let _r = rx2.recv().unwrap(); }); rx.recv().unwrap().send(test(42)).unwrap(); }<|fim▁end|>
impl Drop for test {
<|file_name|>P13nSelectionItem.js<|end_file_name|><|fim▁begin|>/* * ! ${copyright} */ // Provides control sap.m.P13nSelectionItem. sap.ui.define([ 'jquery.sap.global', './library', 'sap/ui/core/Item' ], function(jQuery, library, Item) { "use strict"; /** * Constructor for a new P13nSelectionItem. * * @param {string} [sId] ID for the new control, generated automatically if no ID is given * @param {object} [mSettings] initial settings for the new control * @class Type for <code>selectionItems</code> aggregation in <code>P13nSelectionPanel</code> control. * @extends sap.ui.core.Item * @version ${version} * @constructor * @author SAP SE * @private * @since 1.46.0 * @alias sap.m.P13nSelectionItem * @ui5-metamodel This control/element also will be described in the UI5 (legacy) designtime metamodel */ var P13nSelectionItem = Item.extend("sap.m.P13nSelectionItem", /** @lends sap.m.P13nSelectionItem.prototype */ { metadata: { library: "sap.m", properties: { /** * Defines the unique table column key. */ columnKey: { type: "string", defaultValue: undefined }, /** * Defines the index of a table column. */<|fim▁hole|> index: { type: "int", defaultValue: -1 }, /** * Defines whether the <code>P13nSelectionItem</code> is selected. */ selected: { type: "boolean", defaultValue: false } } } }); return P13nSelectionItem; }, /* bExport= */true);<|fim▁end|>
<|file_name|>cmp.rs<|end_file_name|><|fim▁begin|>#![feature(core)] extern crate core; #[cfg(test)] mod tests { use core::cmp::Ordering::{self, Less, Equal, Greater}; // #[derive(Clone, Copy, PartialEq, Debug)] // #[stable(feature = "rust1", since = "1.0.0")] // pub enum Ordering { // /// An ordering where a compared value is less [than another]. // #[stable(feature = "rust1", since = "1.0.0")] // Less = -1, // /// An ordering where a compared value is equal [to another]. // #[stable(feature = "rust1", since = "1.0.0")] // Equal = 0, // /// An ordering where a compared value is greater [than another]. // #[stable(feature = "rust1", since = "1.0.0")] // Greater = 1, // }<|fim▁hole|> // #[stable(feature = "rust1", since = "1.0.0")] // fn cmp(&self, other: &Ordering) -> Ordering { // (*self as i32).cmp(&(*other as i32)) // } // } #[test] fn cmp_test1() { let x: Ordering = Less; let y: Ordering = Less; let result: Ordering = x.cmp(&y); assert_eq!(result, Equal); } #[test] fn cmp_test2() { let x: Ordering = Equal; let y: Ordering = Less; let result: Ordering = x.cmp(&y); assert_eq!(result, Greater); } #[test] fn cmp_test3() { let x: Ordering = Greater; let y: Ordering = Less; let result: Ordering = x.cmp(&y); assert_eq!(result, Greater); } #[test] fn cmp_test4() { let x: Ordering = Less; let y: Ordering = Equal; let result: Ordering = x.cmp(&y); assert_eq!(result, Less); } #[test] fn cmp_test5() { let x: Ordering = Equal; let y: Ordering = Equal; let result: Ordering = x.cmp(&y); assert_eq!(result, Equal); } #[test] fn cmp_test6() { let x: Ordering = Greater; let y: Ordering = Equal; let result: Ordering = x.cmp(&y); assert_eq!(result, Greater); } #[test] fn cmp_test7() { let x: Ordering = Less; let y: Ordering = Greater; let result: Ordering = x.cmp(&y); assert_eq!(result, Less); } #[test] fn cmp_test8() { let x: Ordering = Equal; let y: Ordering = Greater; let result: Ordering = x.cmp(&y); assert_eq!(result, Less); } #[test] fn cmp_test9() { let x: Ordering = Greater; let y: Ordering = Greater; let result: Ordering = x.cmp(&y); assert_eq!(result, Equal); } }<|fim▁end|>
// impl Ord for Ordering { // #[inline]
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>from fancontroller.fan_controller import Thermostat, STATE_ON, STATE_OFF<|fim▁hole|> Thermostat STATE_ON STATE_OFF<|fim▁end|>
<|file_name|>test_type.py<|end_file_name|><|fim▁begin|>from unittest import TestCase from decimal import Decimal import datetime import sys if sys.version_info[0] == 3: unicode_str = '\u2603' else: unicode_str = unicode('snowman') import validictory class TestType(TestCase): def test_schema(self): schema = { "type": [ {"type": "array", "minItems": 10}, {"type": "string", "pattern": "^0+$"} ] } data1 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] data2 = "0" data3 = 1203 for x in [data1, data2]: try: validictory.validate(x, schema) except ValueError as e: self.fail("Unexpected failure: %s" % e) self.assertRaises(ValueError, validictory.validate, data3, schema) def _test_type(self, typename, valids, invalids): for x in valids: try: validictory.validate(x, {"type": typename}) except ValueError as e: self.fail("Unexpected failure: %s" % e) for x in invalids: self.assertRaises(ValueError, validictory.validate, x, {"type": typename}) def test_integer(self): valid_ints = [1, -89, 420000] invalid_ints = [1.2, "bad", {"test": "blah"}, [32, 49], None, True] self._test_type('integer', valid_ints, invalid_ints) def test_string(self): valids = ["abc", unicode_str] invalids = [1.2, 1, {"test": "blah"}, [32, 49], None, True] self._test_type('string', valids, invalids) def test_number(self): valids = [1.2, -89.42, 48, -32, Decimal('25.25')] invalids = ["bad", {"test": "blah"}, [32.42, 494242], None, True] self._test_type('number', valids, invalids) def test_boolean(self): valids = [True, False] invalids = [1.2, "False", {"test": "blah"}, [32, 49], None, 1, 0] self._test_type('boolean', valids, invalids) def test_object(self): valids = [{"blah": "test"}, {"this": {"blah": "test"}}, {1: 2, 10: 20}] invalids = [1.2, "bad", 123, [32, 49], None, True] self._test_type('object', valids, invalids) def test_array(self): valids = [[1, 89], [48, {"test": "blah"}, "49", 42], (47, 11)] invalids = [1.2, "bad", {"test": "blah"}, 1234, None, True] self._test_type('array', valids, invalids) def test_null(self): valids = [None] invalids = [1.2, "bad", {"test": "blah"}, [32, 49], 1284, True] self._test_type('null', valids, invalids) def test_any(self): valids = [1.2, "bad", {"test": "blah"}, [32, 49], None, 1284, True] self._test_type('any', valids, [])<|fim▁hole|> valids = [1.2, "bad", {"test": "blah"}, [32, 49], None, 1284, True] for x in valids: try: validictory.validate(x, {}) except ValueError as e: self.fail("Unexpected failure: %s" % e) def test_multi(self): types = ["null", "integer", "string"] valids = [None, 42, "string"] invalids = [1.2, {"test": "blah"}, [32, 49], True] self._test_type(types, valids, invalids) self._test_type(tuple(types), valids, invalids) class TestDisallow(TestType): def _test_type(self, typename, valids, invalids): for x in invalids: try: validictory.validate(x, {"disallow": typename}) except ValueError as e: self.fail("Unexpected failure: %s" % e) for x in valids: self.assertRaises(ValueError, validictory.validate, x, {"disallow": typename}) class DateValidator(validictory.validator.SchemaValidator): def validate_type_date(self, value): return isinstance(value, datetime.date) def validate_type_datetime(self, value): return isinstance(value, datetime.datetime) class TestCustomType(TestCase): def test_date(self): self._test_type('date', [datetime.date.today()], [2010, '2010']) def test_datetime(self): self._test_type('datetime', [datetime.datetime.now()], [2010, '2010', datetime.date.today()]) def test_either(self): self._test_type(['datetime', 'date'], [datetime.date.today(), datetime.datetime.now()], [2010, '2010']) def _test_type(self, typename, valids, invalids): validator = DateValidator() for x in valids: try: validator.validate(x, {"type": typename}) except ValueError as e: self.fail("Unexpected failure: %s" % e) for x in invalids: self.assertRaises(ValueError, validator.validate, x, {"type": typename})<|fim▁end|>
def test_default(self): # test default value (same as any really)
<|file_name|>issue-2718-a.rs<|end_file_name|><|fim▁begin|>pub struct SendPacket<T> { p: T } mod pingpong { use SendPacket; pub type Ping = SendPacket<Pong>;<|fim▁hole|> pub struct Pong(SendPacket<Ping>); //~^ ERROR recursive type `Pong` has infinite size } fn main() {}<|fim▁end|>
<|file_name|>test_send.py<|end_file_name|><|fim▁begin|>from test_base import BaseTest, load_msg from mock import patch from smtplib import SMTP from deliver.send import Sender class SendTest(BaseTest): def setUp(self): super(SendTest,self).setUp() self.sender = Sender(self.config) @patch('smtplib.SMTP') @patch.object(SMTP, 'sendmail') def test_send(self, smtp, sendmail): msg = load_msg('sample') self.sender.send(msg, u'[email protected]') self.assertEqual(sendmail.call_count, 1)<|fim▁hole|> self.assertEqual(msg['Subject'], u'[Test] BETA 2.0') def test_get_address(self): self.assertEqual(self.sender.get_address(),self.config['sender'])<|fim▁end|>
self.assertEqual(msg['To'], u'[email protected]') self.assertEqual(msg['From'], self.sender.get_address()) self.assertEqual(msg['Reply-To'], self.sender.get_address())
<|file_name|>main.js<|end_file_name|><|fim▁begin|><|fim▁hole|> stretchyNavs.each(function(){ var stretchyNav = $(this), stretchyNavTrigger = stretchyNav.find('.cd-nav-trigger'); stretchyNavTrigger.on('click', function(event){ event.preventDefault(); stretchyNav.toggleClass('nav-is-visible'); }); }); $(document).on('click', function(event){ ( !$(event.target).is('.cd-nav-trigger') && !$(event.target).is('.cd-nav-trigger span') ) && stretchyNavs.removeClass('nav-is-visible'); }); }; ///toggle en contenido $(".toggle-view li").on('click', function(e){ e.currentTarget.classList.toggle("active"); }); });<|fim▁end|>
jQuery(document).ready(function(){ if( $('.cd-stretchy-nav').length > 0 ) { var stretchyNavs = $('.cd-stretchy-nav');
<|file_name|>run.py<|end_file_name|><|fim▁begin|>from app import create_app app = create_app()<|fim▁hole|>if __name__ == "__main__": app.run(host="0.0.0.0", port=5000, debug=True)<|fim▁end|>
<|file_name|>0002_auto_20150623_1913.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations <|fim▁hole|>class Migration(migrations.Migration): dependencies = [ ('QuickBooking', '0001_initial'), ] operations = [ migrations.AlterField( model_name='timing', name='id', field=models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True), ), ]<|fim▁end|>
<|file_name|>node.js<|end_file_name|><|fim▁begin|>'use strict'; var i18n = require('./i18n.js') i18n.add_translation("pt-BR", { test: 'OK', greetings: { hello: 'Olá', welcome: 'Bem vindo' } }); i18n.locale = 'pt-BR'; console.log(i18n.t('greetings.hello')); console.log(i18n.t('greetings.welcome')); console.log("Hallo"); // Example 2 i18n.add_translation("pt-BR", { greetings: { hello: 'Olá', welcome: 'Bem vindo' } }); console.log(i18n.t('greetings.hello')); i18n.add_translation("pt-BR", { test: 'OK', greetings: { hello: 'Oi', bye: 'Tchau' } });<|fim▁hole|><|fim▁end|>
console.log(i18n.t('greetings.hello')); console.log(i18n.t('test'));
<|file_name|>__openerp__.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2004-2012 Pexego Sistemas Informáticos All Rights Reserved # $Omar Castiñeira Saavedra$ <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. #<|fim▁hole|># but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { "name" : "Automatic update cost from BOMs", "description" : """Cron job to automate update product cost from BOMs""", "version" : "1.0", "author" : "Pexego", "depends" : ["base", "product", "product_extended"], "category" : "Mrp/Product", "init_xml" : [], "update_xml" : ["mrp_bom_data.xml", "product_category_view.xml", "product_view.xml"], 'demo_xml': [], 'installable': True, 'active': False, }<|fim▁end|>
# This program is distributed in the hope that it will be useful,
<|file_name|>quadcopter_controller_example.py<|end_file_name|><|fim▁begin|>from xboxdrv_parser import Controller from time import sleep import os def main (): # Get input from the two analog sticks as yaw, throttle, roll, and pitch. Take the (0 - 255) input value and # map it to a (-1 - 1) range. controller = Controller (["X1", "Y1", "X2", "Y2", "L2", "R2", "X", "/\\", "[]"], ["yaw", "throttle", "roll", "pitch", "descend", "ascend", "takeover", "takeoff", "land"], (0, 255), (-1, 1)) #controller = Controller (["X1", "Y1", "X2", "Y2"]) while True:<|fim▁hole|> control_packet = controller.get_values () os.system("clear") for i in control_packet: print i, ": ", control_packet[i] # Update at 1000 messages a second sleep (.01) if __name__ == '__main__': main()<|fim▁end|>
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-<|fim▁hole|>from . import test_convert from . import test_env<|fim▁end|>
<|file_name|>devlali.py<|end_file_name|><|fim▁begin|>import sys devlali = {}<|fim▁hole|> for n in str_n: total += int(n) return total + number def generate_devlali(): for i in range(10001): total = sum_number(i) if (total in devlali.keys()): tmp_list = devlali[total] tmp_list.append(i) devlali[total] = tmp_list else: devlali[total] = [i] def get_devlali(number): if (number in devlali.keys()): if (len(devlali[number]) > 1): return 'junction' else: return 'generated' else: return 'self' def main(): generate_devlali() lines = int(sys.stdin.readline().strip()) for num in range(lines): n = int(sys.stdin.readline().strip()) print n, get_devlali(n) main()<|fim▁end|>
def sum_number(number): str_n = str(number) total = 0
<|file_name|>ip_ban.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # -*- coding: utf-8 -*- ''' Copyright 2013 Joe Harris Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' ''' Trivial example of how to ban an IP. See: http://www.cloudflare.com/docs/client-api.html#s4.7 ''' import os, sys # make sure our local copy of txcloudflare is in sys.path PATH_TO_TXCF = '../txcloudflare/' try: import txcloudflare except ImportError: txcfpath = os.path.dirname(os.path.realpath(PATH_TO_TXCF)) if txcfpath not in sys.path: sys.path.insert(0, txcfpath) <|fim▁hole|>from twisted.internet import reactor import txcloudflare def got_response(response): ''' 'response' is a txcloudflare.response.Response() instance. ''' print '< got a response (done)' print '< ip: {0}'.format(response.data.get('ip', '')) print '< action: {0}'.format(response.data.get('action', '')) reactor.stop() def got_error(error): ''' 'error' is a twisted.python.failure.Failure() instance wrapping one of the exceptions in txcloudflare.errors. The exceptions return the CloudFlare error code, a plain text string and a response object (txcloudflare.response.Response). The response object has a 'request' parameter if you need to look at the reques that generated the error. ''' print '< error' print error.printTraceback() reactor.stop() email_address = os.environ.get('TXCFEMAIL', '') api_token = os.environ.get('TXCFAPI', '') if __name__ == '__main__': ip = '8.8.8.8' print '> banning IP: {0}'.format(ip) cloudflare = txcloudflare.client_api(email_address, api_token) cloudflare.ban(ip=ip).addCallback(got_response).addErrback(got_error) reactor.run() ''' EOF '''<|fim▁end|>
<|file_name|>linalg.py<|end_file_name|><|fim▁begin|>from __future__ import absolute_import import numpy.linalg as npla from .numpy_wrapper import wrap_namespace, dot from . import numpy_wrapper as anp wrap_namespace(npla.__dict__, globals()) def atleast_2d_col(x): # Promotes a 1D array into a column rather than a row. return x if x.ndim > 1 else x[:,None] # Some formulas are from # "An extended collection of matrix derivative results # for forward and reverse mode algorithmic differentiation" # by Mike Giles # https://people.maths.ox.ac.uk/gilesm/files/NA-08-01.pdf<|fim▁hole|>solve.defgrad( lambda ans, a, b : lambda g : -dot(atleast_2d_col(solve(a.T, g)), atleast_2d_col(ans).T)) solve.defgrad(lambda ans, a, b : lambda g : solve(a.T, g), argnum=1) norm.defgrad( lambda ans, a : lambda g : dot(g, a/ans)) def make_grad_eigh(ans, x, UPLO='L'): """Gradient for eigenvalues and vectors of a symmetric matrix.""" N = x.shape[0] w, v = ans # Eigenvalues, eigenvectors. def eigh_grad(g): wg, vg = g # Gradient w.r.t. eigenvalues, eigenvectors. w_repeated = anp.repeat(w[:, anp.newaxis], N, 1) off_diag = anp.ones((N, N)) - anp.eye(N) F = off_diag / (w_repeated.T - w_repeated + anp.eye(N)) dx = dot(v * wg + dot(v, F * dot(v.T, vg)), v.T) if UPLO == 'U': # Reflect to account for symmetry. return anp.triu(dx) + anp.tril(dx, -1).T else: return anp.tril(dx) + anp.triu(dx, 1).T return eigh_grad eigh.defgrad(make_grad_eigh)<|fim▁end|>
inv.defgrad( lambda ans, x : lambda g : -dot(dot(ans.T, g), ans.T)) det.defgrad( lambda ans, x : lambda g : g * ans * inv(x).T) slogdet.defgrad(lambda ans, x : lambda g : g[1] * inv(x).T)
<|file_name|>plotting.py<|end_file_name|><|fim▁begin|>"""This screws up visualize.py""" """ import numpy as np from matplotlib import pyplot as plt from matplotlib.lines import Line2D from torch.autograd import Variable as Var from torch import Tensor class RealtimePlot(): def __init__(self, style='ggplot'): plt.style.use(style) plt.ion() self.fig, self.ax = plt.subplots() self.xlim = 0 self.yvals = [] self.line = Line2D([], []) self.ax.add_line(self.line) def config(self, ylabel, xlabel): self.ax.set_ylabel(ylabel) self.ax.set_xlabel(xlabel) self.fig.tight_layout() def plot(self, y):<|fim▁hole|> self.ax.relim() self.ax.autoscale_view() self.ax.set_xlim(0, self.xlim) self.xlim += 1 self.fig.canvas.flush_events() def done(self): plt.ioff() plt.show() def policyplot(env, policy, trj_len): obs_dim = env.observation_space.shape[0] action_dim = env.action_space.shape[0] y = np.zeros((trj_len, action_dim)) X = np.zeros((trj_len, obs_dim)) obs = env.reset() for t in range(trj_len): X[t, :] = obs action = policy(Var(Tensor(obs[None, :]))).data.numpy()[0] y[t, :] = action obs = env.step(action)[0] fig, axes = plt.subplots(1, action_dim) for a in range(action_dim): axes[a].plot(np.arange(trj_len), y[:, a]) plt.show() """<|fim▁end|>
self.yvals.append(y) self.line.set_data(np.arange(len(self.yvals)), self.yvals)
<|file_name|>DataBaseCronTriggerBean.java<|end_file_name|><|fim▁begin|>package net.iharding.core.timer; import java.text.ParseException; import net.iharding.modules.task.model.TimeTask; import net.iharding.modules.task.service.TimeTaskService; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.scheduling.quartz.CronTriggerFactoryBean; /** * spring4.1 数据库的读取配置任务触发器 * @author zhangxuhui * @date 2013-9-22 * @version 1.0 */ public class DataBaseCronTriggerBean extends CronTriggerFactoryBean{ @Autowired private TimeTaskService timeTaskService; /** * 读取数据库更新文件 */ public void afterPropertiesSet() throws ParseException { super.afterPropertiesSet(); TimeTask task = timeTaskService.findUniqueBy("taskId",this.getObject().getKey().getName()); if(task!=null && task.getIsEffect().equals("1") &&!task.getCronExpression().equals(this.getObject().getCronExpression())){<|fim▁hole|> } }<|fim▁end|>
this.setCronExpression(task.getCronExpression()); DynamicTask.updateSpringMvcTaskXML(getObject(),task.getCronExpression()); }