file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
statestore_test.go | package statestore_test
import (
"fmt"
"testing"
"github.com/ipfs/go-datastore"
"github.com/stretchr/testify/require"
cbg "github.com/whyrusleeping/cbor-gen"
"github.com/filecoin-project/go-statestore"
versioning "github.com/filecoin-project/go-ds-versioning/pkg"
versioned "github.com/filecoin-project/go-ds-versioning/pkg/statestore"
)
func TestStateStore(t *testing.T) {
testCases := map[string]struct {
migrationErr error
inputDatabase map[fmt.Stringer]cbg.CBORMarshaler
test func(t *testing.T, stateStore versioned.StateStore)
}{
"Get, not ready": {
migrationErr: versioning.ErrMigrationsNotRun,
inputDatabase: map[fmt.Stringer]cbg.CBORMarshaler{
stringer("/apples"): newInt(54),
},
test: func(t *testing.T, ss versioned.StateStore) {
storedState := ss.Get(stringer("/apples"))
var out cbg.CborInt
require.EqualError(t, storedState.Get(&out), versioning.ErrMigrationsNotRun.Error())
},
},
"Get, ready": {
inputDatabase: map[fmt.Stringer]cbg.CBORMarshaler{
stringer("/apples"): newInt(54),
},
test: func(t *testing.T, ss versioned.StateStore) {
storedState := ss.Get(stringer("/apples"))
var out cbg.CborInt
err := storedState.Get(&out)
require.Equal(t, cbg.CborInt(54), out)
require.NoError(t, err)
},
},
"Begin, not ready": {
migrationErr: versioning.ErrMigrationsNotRun,
test: func(t *testing.T, ss versioned.StateStore) {
require.EqualError(t, ss.Begin(stringer("/apples"), newInt(54)), versioning.ErrMigrationsNotRun.Error())
},
},
"Beging, ready": {
test: func(t *testing.T, ss versioned.StateStore) {
err := ss.Begin(stringer("/apples"), newInt(54))
require.NoError(t, err)
storedState := ss.Get(stringer("/apples"))
var out cbg.CborInt
err = storedState.Get(&out)
require.Equal(t, cbg.CborInt(54), out)
require.NoError(t, err)
},
},
"List, not ready": {
migrationErr: versioning.ErrMigrationsNotRun,
inputDatabase: map[fmt.Stringer]cbg.CBORMarshaler{
stringer("/apples"): newInt(54),
},
test: func(t *testing.T, ss versioned.StateStore) { | var out []*cbg.CborInt
require.EqualError(t, ss.List(&out), versioning.ErrMigrationsNotRun.Error())
},
},
"List, ready": {
inputDatabase: map[fmt.Stringer]cbg.CBORMarshaler{
stringer("/apples"): newInt(54),
},
test: func(t *testing.T, ss versioned.StateStore) {
var out []cbg.CborInt
err := ss.List(&out)
require.Len(t, out, 1)
require.Equal(t, cbg.CborInt(54), out[0])
require.NoError(t, err)
},
},
"Has, not ready": {
migrationErr: versioning.ErrMigrationsNotRun,
inputDatabase: map[fmt.Stringer]cbg.CBORMarshaler{
stringer("/apples"): newInt(54),
},
test: func(t *testing.T, ss versioned.StateStore) {
has, err := ss.Has(stringer("/apples"))
require.False(t, has)
require.EqualError(t, err, versioning.ErrMigrationsNotRun.Error())
},
},
"Has, ready": {
inputDatabase: map[fmt.Stringer]cbg.CBORMarshaler{
stringer("/apples"): newInt(54),
},
test: func(t *testing.T, ss versioned.StateStore) {
has, err := ss.Has(stringer("/apples"))
require.True(t, has)
require.NoError(t, err)
},
},
}
for testCase, data := range testCases {
t.Run(testCase, func(t *testing.T) {
ds := datastore.NewMapDatastore()
ss := statestore.New(ds)
if data.inputDatabase != nil {
for key, value := range data.inputDatabase {
err := ss.Begin(key, value)
require.NoError(t, err)
}
}
ms := migrationState{data.migrationErr}
migratedSs := versioned.NewMigratedStateStore(ss, ms)
data.test(t, migratedSs)
})
}
}
type migrationState struct {
err error
}
func (ms migrationState) ReadyError() error {
return ms.err
}
func newInt(i int64) *cbg.CborInt {
val := cbg.CborInt(i)
return &val
}
type stringer string
func (s stringer) String() string {
return string(s)
} | |
tap_test.go | package output
import (
"bytes"
"testing"
"github.com/icyxp/kubeconform/pkg/resource"
"github.com/icyxp/kubeconform/pkg/validator"
)
func TestTapWrite(t *testing.T) | {
for _, testCase := range []struct {
name string
withSummary bool
isStdin bool
verbose bool
results []validator.Result
expect string
}{
{
"a single deployment, summary, no verbose",
true,
false,
false,
[]validator.Result{
{
Resource: resource.Resource{
Path: "deployment.yml",
Bytes: []byte(`apiVersion: apps/v1
kind: Deployment
metadata:
name: "my-app"
`),
},
Status: validator.Valid,
Err: nil,
},
},
"TAP version 13\nok 1 - deployment.yml (Deployment)\n1..1\n",
},
{
"a single deployment, verbose, with summary",
true,
false,
true,
[]validator.Result{
{
Resource: resource.Resource{
Path: "deployment.yml",
Bytes: []byte(`apiVersion: apps/v1
kind: Deployment
metadata:
name: "my-app"
`),
},
Status: validator.Valid,
Err: nil,
},
},
"TAP version 13\nok 1 - deployment.yml (Deployment)\n1..1\n",
},
} {
w := new(bytes.Buffer)
o := tapOutput(w, testCase.withSummary, testCase.isStdin, testCase.verbose)
for _, res := range testCase.results {
o.Write(res)
}
o.Flush()
if w.String() != testCase.expect {
t.Errorf("%s - expected:, got:\n%s\n%s", testCase.name, testCase.expect, w)
}
}
} |
|
Symtab.py | #
# Symbol Table
#
from __future__ import absolute_import
import re
import copy
import operator
try:
import __builtin__ as builtins
except ImportError: # Py3
import builtins
from .Errors import warning, error, InternalError
from .StringEncoding import EncodedString
from . import Options, Naming
from . import PyrexTypes
from .PyrexTypes import py_object_type, unspecified_type
from .TypeSlots import (
pyfunction_signature, pymethod_signature, richcmp_special_methods,
get_special_method_signature, get_property_accessor_signature)
from . import Future
from . import Code
iso_c99_keywords = set(
['auto', 'break', 'case', 'char', 'const', 'continue', 'default', 'do',
'double', 'else', 'enum', 'extern', 'float', 'for', 'goto', 'if',
'int', 'long', 'register', 'return', 'short', 'signed', 'sizeof',
'static', 'struct', 'switch', 'typedef', 'union', 'unsigned', 'void',
'volatile', 'while',
'_Bool', '_Complex'', _Imaginary', 'inline', 'restrict'])
def c_safe_identifier(cname):
# There are some C limitations on struct entry names.
if ((cname[:2] == '__' and not (cname.startswith(Naming.pyrex_prefix)
or cname in ('__weakref__', '__dict__')))
or cname in iso_c99_keywords):
cname = Naming.pyrex_prefix + cname
return cname
class BufferAux(object):
writable_needed = False
def __init__(self, buflocal_nd_var, rcbuf_var):
self.buflocal_nd_var = buflocal_nd_var
self.rcbuf_var = rcbuf_var
def __repr__(self):
return "<BufferAux %r>" % self.__dict__
class Entry(object):
# A symbol table entry in a Scope or ModuleNamespace.
#
# name string Python name of entity
# cname string C name of entity
# type PyrexType Type of entity
# doc string Doc string
# annotation ExprNode PEP 484/526 annotation
# init string Initial value
# visibility 'private' or 'public' or 'extern'
# is_builtin boolean Is an entry in the Python builtins dict
# is_cglobal boolean Is a C global variable
# is_pyglobal boolean Is a Python module-level variable
# or class attribute during
# class construction
# is_member boolean Is an assigned class member
# is_pyclass_attr boolean Is a name in a Python class namespace
# is_variable boolean Is a variable
# is_cfunction boolean Is a C function
# is_cmethod boolean Is a C method of an extension type
# is_builtin_cmethod boolean Is a C method of a builtin type (implies is_cmethod)
# is_unbound_cmethod boolean Is an unbound C method of an extension type
# is_final_cmethod boolean Is non-overridable C method
# is_inline_cmethod boolean Is inlined C method
# is_anonymous boolean Is a anonymous pyfunction entry
# is_type boolean Is a type definition
# is_cclass boolean Is an extension class
# is_cpp_class boolean Is a C++ class
# is_const boolean Is a constant
# is_property boolean Is a property of an extension type:
# doc_cname string or None C const holding the docstring
# getter_cname string C func for getting property
# setter_cname string C func for setting or deleting property
# is_self_arg boolean Is the "self" arg of an exttype method
# is_arg boolean Is the arg of a method
# is_local boolean Is a local variable
# in_closure boolean Is referenced in an inner scope
# in_subscope boolean Belongs to a generator expression scope
# is_readonly boolean Can't be assigned to
# func_cname string C func implementing Python func
# func_modifiers [string] C function modifiers ('inline')
# pos position Source position where declared
# namespace_cname string If is_pyglobal, the C variable
# holding its home namespace
# pymethdef_cname string PyMethodDef structure
# signature Signature Arg & return types for Python func
# as_variable Entry Alternative interpretation of extension
# type name or builtin C function as a variable
# xdecref_cleanup boolean Use Py_XDECREF for error cleanup
# in_cinclude boolean Suppress C declaration code
# enum_values [Entry] For enum types, list of values
# qualified_name string "modname.funcname" or "modname.classname"
# or "modname.classname.funcname"
# is_declared_generic boolean Is declared as PyObject * even though its
# type is an extension type
# as_module None Module scope, if a cimported module
# is_inherited boolean Is an inherited attribute of an extension type
# pystring_cname string C name of Python version of string literal
# is_interned boolean For string const entries, value is interned
# is_identifier boolean For string const entries, value is an identifier
# used boolean
# is_special boolean Is a special method or property accessor
# of an extension type
# defined_in_pxd boolean Is defined in a .pxd file (not just declared)
# api boolean Generate C API for C class or function
# utility_code string Utility code needed when this entry is used
#
# buffer_aux BufferAux or None Extra information needed for buffer variables
# inline_func_in_pxd boolean Hacky special case for inline function in pxd file.
# Ideally this should not be necessary.
# might_overflow boolean In an arithmetic expression that could cause
# overflow (used for type inference).
# utility_code_definition For some Cython builtins, the utility code
# which contains the definition of the entry.
# Currently only supported for CythonScope entries.
# error_on_uninitialized Have Control Flow issue an error when this entry is
# used uninitialized
# cf_used boolean Entry is used
# is_fused_specialized boolean Whether this entry of a cdef or def function
# is a specialization
# is_cgetter boolean Is a c-level getter function
# TODO: utility_code and utility_code_definition serves the same purpose...
inline_func_in_pxd = False
borrowed = 0
init = ""
annotation = None
visibility = 'private'
is_builtin = 0
is_cglobal = 0
is_pyglobal = 0
is_member = 0
is_pyclass_attr = 0
is_variable = 0
is_cfunction = 0
is_cmethod = 0
is_builtin_cmethod = False
is_unbound_cmethod = 0
is_final_cmethod = 0
is_inline_cmethod = 0
is_anonymous = 0
is_type = 0
is_cclass = 0
is_cpp_class = 0
is_const = 0
is_property = 0
doc_cname = None
getter_cname = None
setter_cname = None
is_self_arg = 0
is_arg = 0
is_local = 0
in_closure = 0
from_closure = 0
in_subscope = 0
is_declared_generic = 0
is_readonly = 0
pyfunc_cname = None
func_cname = None
func_modifiers = []
final_func_cname = None
doc = None
as_variable = None
xdecref_cleanup = 0
in_cinclude = 0
as_module = None
is_inherited = 0
pystring_cname = None
is_identifier = 0
is_interned = 0
used = 0
is_special = 0
defined_in_pxd = 0
is_implemented = 0
api = 0
utility_code = None
is_overridable = 0
buffer_aux = None
prev_entry = None
might_overflow = 0
fused_cfunction = None
is_fused_specialized = False
utility_code_definition = None
needs_property = False
in_with_gil_block = 0
from_cython_utility_code = None
error_on_uninitialized = False
cf_used = True
outer_entry = None
is_cgetter = False
def __init__(self, name, cname, type, pos = None, init = None):
self.name = name
self.cname = cname
self.type = type
self.pos = pos
self.init = init
self.overloaded_alternatives = []
self.cf_assignments = []
self.cf_references = []
self.inner_entries = []
self.defining_entry = self
def __repr__(self):
return "%s(<%x>, name=%s, type=%s)" % (type(self).__name__, id(self), self.name, self.type)
def already_declared_here(self):
error(self.pos, "Previous declaration is here")
def redeclared(self, pos):
error(pos, "'%s' does not match previous declaration" % self.name)
self.already_declared_here()
def all_alternatives(self):
return [self] + self.overloaded_alternatives
def all_entries(self):
return [self] + self.inner_entries
def __lt__(left, right):
if isinstance(left, Entry) and isinstance(right, Entry):
return (left.name, left.cname) < (right.name, right.cname)
else:
return NotImplemented
class InnerEntry(Entry):
"""
An entry in a closure scope that represents the real outer Entry.
"""
from_closure = True
def __init__(self, outer_entry, scope):
Entry.__init__(self, outer_entry.name,
outer_entry.cname,
outer_entry.type,
outer_entry.pos)
self.outer_entry = outer_entry
self.scope = scope
# share state with (outermost) defining entry
outermost_entry = outer_entry
while outermost_entry.outer_entry:
outermost_entry = outermost_entry.outer_entry
self.defining_entry = outermost_entry
self.inner_entries = outermost_entry.inner_entries
self.cf_assignments = outermost_entry.cf_assignments
self.cf_references = outermost_entry.cf_references
self.overloaded_alternatives = outermost_entry.overloaded_alternatives
self.inner_entries.append(self)
def __getattr__(self, name):
if name.startswith('__'):
# we wouldn't have been called if it was there
raise AttributeError(name)
return getattr(self.defining_entry, name)
def all_entries(self):
return self.defining_entry.all_entries()
class Scope(object):
# name string Unqualified name
# outer_scope Scope or None Enclosing scope
# entries {string : Entry} Python name to entry, non-types
# const_entries [Entry] Constant entries
# type_entries [Entry] Struct/union/enum/typedef/exttype entries
# sue_entries [Entry] Struct/union/enum entries
# arg_entries [Entry] Function argument entries
# var_entries [Entry] User-defined variable entries
# pyfunc_entries [Entry] Python function entries
# cfunc_entries [Entry] C function entries
# c_class_entries [Entry] All extension type entries
# cname_to_entry {string : Entry} Temp cname to entry mapping
# return_type PyrexType or None Return type of function owning scope
# is_builtin_scope boolean Is the builtin scope of Python/Cython
# is_py_class_scope boolean Is a Python class scope
# is_c_class_scope boolean Is an extension type scope
# is_closure_scope boolean Is a closure scope
# is_passthrough boolean Outer scope is passed directly
# is_cpp_class_scope boolean Is a C++ class scope
# is_property_scope boolean Is a extension type property scope
# scope_prefix string Disambiguator for C names
# in_cinclude boolean Suppress C declaration code
# qualified_name string "modname" or "modname.classname"
# Python strings in this scope
# nogil boolean In a nogil section
# directives dict Helper variable for the recursive
# analysis, contains directive values.
# is_internal boolean Is only used internally (simpler setup)
is_builtin_scope = 0
is_py_class_scope = 0
is_c_class_scope = 0
is_closure_scope = 0
is_genexpr_scope = 0
is_passthrough = 0
is_cpp_class_scope = 0
is_property_scope = 0
is_module_scope = 0
is_internal = 0
scope_prefix = ""
in_cinclude = 0
nogil = 0
fused_to_specific = None
return_type = None
def __init__(self, name, outer_scope, parent_scope):
# The outer_scope is the next scope in the lookup chain.
# The parent_scope is used to derive the qualified name of this scope.
self.name = name
self.outer_scope = outer_scope
self.parent_scope = parent_scope
mangled_name = "%d%s_" % (len(name), name.replace('.', '_dot_'))
qual_scope = self.qualifying_scope()
if qual_scope:
self.qualified_name = qual_scope.qualify_name(name)
self.scope_prefix = qual_scope.scope_prefix + mangled_name
else:
self.qualified_name = EncodedString(name)
self.scope_prefix = mangled_name
self.entries = {}
self.subscopes = set()
self.const_entries = []
self.type_entries = []
self.sue_entries = []
self.arg_entries = []
self.var_entries = []
self.pyfunc_entries = []
self.cfunc_entries = []
self.c_class_entries = []
self.defined_c_classes = []
self.imported_c_classes = {}
self.cname_to_entry = {}
self.string_to_entry = {}
self.identifier_to_entry = {}
self.num_to_entry = {}
self.obj_to_entry = {}
self.buffer_entries = []
self.lambda_defs = []
self.id_counters = {}
def __deepcopy__(self, memo):
return self
def merge_in(self, other, merge_unused=True, whitelist=None):
# Use with care...
entries = []
for name, entry in other.entries.items():
if not whitelist or name in whitelist:
if entry.used or merge_unused:
entries.append((name, entry))
self.entries.update(entries)
for attr in ('const_entries',
'type_entries',
'sue_entries',
'arg_entries',
'var_entries',
'pyfunc_entries',
'cfunc_entries',
'c_class_entries'):
self_entries = getattr(self, attr)
names = set(e.name for e in self_entries)
for entry in getattr(other, attr):
if (entry.used or merge_unused) and entry.name not in names:
self_entries.append(entry)
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.qualified_name)
def qualifying_scope(self):
return self.parent_scope
def mangle(self, prefix, name = None):
if name:
return "%s%s%s" % (prefix, self.scope_prefix, name)
else:
return self.parent_scope.mangle(prefix, self.name)
def mangle_internal(self, name):
# Mangle an internal name so as not to clash with any
# user-defined name in this scope.
prefix = "%s%s_" % (Naming.pyrex_prefix, name)
return self.mangle(prefix)
#return self.parent_scope.mangle(prefix, self.name)
def mangle_class_private_name(self, name):
if self.parent_scope:
return self.parent_scope.mangle_class_private_name(name)
return name
def next_id(self, name=None):
# Return a cname fragment that is unique for this module
counters = self.global_scope().id_counters
try:
count = counters[name] + 1
except KeyError:
count = 0
counters[name] = count
if name:
if not count:
# unique names don't need a suffix, reoccurrences will get one
return name
return '%s%d' % (name, count)
else:
return '%d' % count
def global_scope(self):
""" Return the module-level scope containing this scope. """
return self.outer_scope.global_scope()
def builtin_scope(self):
""" Return the module-level scope containing this scope. """
return self.outer_scope.builtin_scope()
def iter_local_scopes(self):
yield self
if self.subscopes:
for scope in sorted(self.subscopes, key=operator.attrgetter('scope_prefix')):
yield scope
def declare(self, name, cname, type, pos, visibility, shadow = 0, is_type = 0, create_wrapper = 0):
# Create new entry, and add to dictionary if
# name is not None. Reports a warning if already
# declared.
if type.is_buffer and not isinstance(self, LocalScope): # and not is_type:
error(pos, 'Buffer types only allowed as function local variables')
if not self.in_cinclude and cname and re.match("^_[_A-Z]+$", cname):
# See https://www.gnu.org/software/libc/manual/html_node/Reserved-Names.html#Reserved-Names
warning(pos, "'%s' is a reserved name in C." % cname, -1)
entries = self.entries
if name and name in entries and not shadow:
old_entry = entries[name]
# Reject redeclared C++ functions only if they have the same type signature.
cpp_override_allowed = False
if type.is_cfunction and old_entry.type.is_cfunction and self.is_cpp():
for alt_entry in old_entry.all_alternatives():
if type == alt_entry.type:
if name == '<init>' and not type.args:
# Cython pre-declares the no-args constructor - allow later user definitions.
cpp_override_allowed = True
break
else:
cpp_override_allowed = True
if cpp_override_allowed:
# C++ function/method overrides with different signatures are ok.
pass
elif self.is_cpp_class_scope and entries[name].is_inherited:
# Likewise ignore inherited classes.
pass
elif visibility == 'extern':
# Silenced outside of "cdef extern" blocks, until we have a safe way to
# prevent pxd-defined cpdef functions from ending up here.
warning(pos, "'%s' redeclared " % name, 1 if self.in_cinclude else 0)
elif visibility != 'ignore':
error(pos, "'%s' redeclared " % name)
entries[name].already_declared_here()
entry = Entry(name, cname, type, pos = pos)
entry.in_cinclude = self.in_cinclude
entry.create_wrapper = create_wrapper
if name:
entry.qualified_name = self.qualify_name(name)
# if name in entries and self.is_cpp():
# entries[name].overloaded_alternatives.append(entry)
# else:
# entries[name] = entry
if not shadow:
entries[name] = entry
if type.is_memoryviewslice:
from . import MemoryView
entry.init = MemoryView.memslice_entry_init
entry.scope = self
entry.visibility = visibility
return entry
def qualify_name(self, name):
return EncodedString("%s.%s" % (self.qualified_name, name))
def declare_const(self, name, type, value, pos, cname = None, visibility = 'private', api = 0, create_wrapper = 0):
# Add an entry for a named constant.
if not cname:
if self.in_cinclude or (visibility == 'public' or api):
cname = name
else:
cname = self.mangle(Naming.enum_prefix, name)
entry = self.declare(name, cname, type, pos, visibility, create_wrapper = create_wrapper)
entry.is_const = 1
entry.value_node = value
return entry
def declare_type(self, name, type, pos,
cname = None, visibility = 'private', api = 0, defining = 1,
shadow = 0, template = 0):
# Add an entry for a type definition.
if not cname:
cname = name
entry = self.declare(name, cname, type, pos, visibility, shadow,
is_type=True)
entry.is_type = 1
entry.api = api
if defining:
self.type_entries.append(entry)
if not template:
type.entry = entry
# here we would set as_variable to an object representing this type
return entry
def declare_typedef(self, name, base_type, pos, cname = None,
visibility = 'private', api = 0):
if not cname:
if self.in_cinclude or (visibility != 'private' or api):
cname = name
else:
cname = self.mangle(Naming.type_prefix, name)
try:
if self.is_cpp_class_scope:
namespace = self.outer_scope.lookup(self.name).type
else:
namespace = None
type = PyrexTypes.create_typedef_type(name, base_type, cname,
(visibility == 'extern'),
namespace)
except ValueError as e:
error(pos, e.args[0])
type = PyrexTypes.error_type
entry = self.declare_type(name, type, pos, cname,
visibility = visibility, api = api)
type.qualified_name = entry.qualified_name
return entry
def declare_struct_or_union(self, name, kind, scope,
typedef_flag, pos, cname = None,
visibility = 'private', api = 0,
packed = False):
# Add an entry for a struct or union definition.
if not cname:
if self.in_cinclude or (visibility == 'public' or api):
cname = name
else:
cname = self.mangle(Naming.type_prefix, name)
entry = self.lookup_here(name)
if not entry:
type = PyrexTypes.CStructOrUnionType(
name, kind, scope, typedef_flag, cname, packed)
entry = self.declare_type(name, type, pos, cname,
visibility = visibility, api = api,
defining = scope is not None)
self.sue_entries.append(entry)
type.entry = entry
else:
if not (entry.is_type and entry.type.is_struct_or_union
and entry.type.kind == kind):
warning(pos, "'%s' redeclared " % name, 0)
elif scope and entry.type.scope:
warning(pos, "'%s' already defined (ignoring second definition)" % name, 0)
else:
self.check_previous_typedef_flag(entry, typedef_flag, pos)
self.check_previous_visibility(entry, visibility, pos)
if scope:
entry.type.scope = scope
self.type_entries.append(entry)
if self.is_cpp_class_scope:
entry.type.namespace = self.outer_scope.lookup(self.name).type
return entry
def declare_cpp_class(self, name, scope,
pos, cname = None, base_classes = (),
visibility = 'extern', templates = None):
if cname is None:
if self.in_cinclude or (visibility != 'private'):
cname = name
else:
cname = self.mangle(Naming.type_prefix, name)
base_classes = list(base_classes)
entry = self.lookup_here(name)
if not entry:
type = PyrexTypes.CppClassType(
name, scope, cname, base_classes, templates = templates)
entry = self.declare_type(name, type, pos, cname,
visibility = visibility, defining = scope is not None)
self.sue_entries.append(entry)
else:
if not (entry.is_type and entry.type.is_cpp_class):
error(pos, "'%s' redeclared " % name)
entry.already_declared_here()
return None
elif scope and entry.type.scope:
warning(pos, "'%s' already defined (ignoring second definition)" % name, 0)
else:
if scope:
entry.type.scope = scope
self.type_entries.append(entry)
if base_classes:
if entry.type.base_classes and entry.type.base_classes != base_classes:
error(pos, "Base type does not match previous declaration")
entry.already_declared_here()
else:
entry.type.base_classes = base_classes
if templates or entry.type.templates:
if templates != entry.type.templates:
error(pos, "Template parameters do not match previous declaration")
entry.already_declared_here()
def declare_inherited_attributes(entry, base_classes):
for base_class in base_classes:
if base_class is PyrexTypes.error_type:
continue
if base_class.scope is None:
error(pos, "Cannot inherit from incomplete type")
else:
declare_inherited_attributes(entry, base_class.base_classes)
entry.type.scope.declare_inherited_cpp_attributes(base_class)
if scope:
declare_inherited_attributes(entry, base_classes)
scope.declare_var(name="this", cname="this", type=PyrexTypes.CPtrType(entry.type), pos=entry.pos)
if self.is_cpp_class_scope:
entry.type.namespace = self.outer_scope.lookup(self.name).type
return entry
def check_previous_typedef_flag(self, entry, typedef_flag, pos):
if typedef_flag != entry.type.typedef_flag:
error(pos, "'%s' previously declared using '%s'" % (
entry.name, ("cdef", "ctypedef")[entry.type.typedef_flag]))
def check_previous_visibility(self, entry, visibility, pos):
if entry.visibility != visibility:
error(pos, "'%s' previously declared as '%s'" % (
entry.name, entry.visibility))
def declare_enum(self, name, pos, cname, typedef_flag,
visibility = 'private', api = 0, create_wrapper = 0):
if name:
if not cname:
if (self.in_cinclude or visibility == 'public'
or visibility == 'extern' or api):
cname = name
else:
cname = self.mangle(Naming.type_prefix, name)
if self.is_cpp_class_scope:
namespace = self.outer_scope.lookup(self.name).type
else:
namespace = None
type = PyrexTypes.CEnumType(name, cname, typedef_flag, namespace)
else:
type = PyrexTypes.c_anon_enum_type
entry = self.declare_type(name, type, pos, cname = cname,
visibility = visibility, api = api)
entry.create_wrapper = create_wrapper
entry.enum_values = []
self.sue_entries.append(entry)
return entry
def declare_tuple_type(self, pos, components):
return self.outer_scope.declare_tuple_type(pos, components)
def declare_var(self, name, type, pos,
cname = None, visibility = 'private',
api = 0, in_pxd = 0, is_cdef = 0):
# Add an entry for a variable.
if not cname:
if visibility != 'private' or api:
cname = name
else:
cname = self.mangle(Naming.var_prefix, name)
if type.is_cpp_class and visibility != 'extern':
type.check_nullary_constructor(pos)
entry = self.declare(name, cname, type, pos, visibility)
entry.is_variable = 1
if in_pxd and visibility != 'extern':
entry.defined_in_pxd = 1
entry.used = 1
if api:
entry.api = 1
entry.used = 1
return entry
def declare_builtin(self, name, pos):
return self.outer_scope.declare_builtin(name, pos)
def _declare_pyfunction(self, name, pos, visibility='extern', entry=None):
if entry and not entry.type.is_cfunction:
error(pos, "'%s' already declared" % name)
error(entry.pos, "Previous declaration is here")
entry = self.declare_var(name, py_object_type, pos, visibility=visibility)
entry.signature = pyfunction_signature
self.pyfunc_entries.append(entry)
return entry
def declare_pyfunction(self, name, pos, allow_redefine=False, visibility='extern'):
# Add an entry for a Python function.
entry = self.lookup_here(name)
if not allow_redefine:
return self._declare_pyfunction(name, pos, visibility=visibility, entry=entry)
if entry:
if entry.type.is_unspecified:
entry.type = py_object_type
elif entry.type is not py_object_type:
return self._declare_pyfunction(name, pos, visibility=visibility, entry=entry)
else: # declare entry stub
self.declare_var(name, py_object_type, pos, visibility=visibility)
entry = self.declare_var(None, py_object_type, pos,
cname=name, visibility='private')
entry.name = EncodedString(name)
entry.qualified_name = self.qualify_name(name)
entry.signature = pyfunction_signature
entry.is_anonymous = True
return entry
def declare_lambda_function(self, lambda_name, pos):
# Add an entry for an anonymous Python function.
func_cname = self.mangle(Naming.lambda_func_prefix + u'funcdef_', lambda_name)
pymethdef_cname = self.mangle(Naming.lambda_func_prefix + u'methdef_', lambda_name)
qualified_name = self.qualify_name(lambda_name)
entry = self.declare(None, func_cname, py_object_type, pos, 'private')
entry.name = lambda_name
entry.qualified_name = qualified_name
entry.pymethdef_cname = pymethdef_cname
entry.func_cname = func_cname
entry.signature = pyfunction_signature
entry.is_anonymous = True
return entry
def add_lambda_def(self, def_node):
self.lambda_defs.append(def_node)
def register_pyfunction(self, entry):
self.pyfunc_entries.append(entry)
def declare_cfunction(self, name, type, pos,
cname=None, visibility='private', api=0, in_pxd=0,
defining=0, modifiers=(), utility_code=None, overridable=False):
# Add an entry for a C function.
if not cname:
if visibility != 'private' or api:
cname = name
else:
cname = self.mangle(Naming.func_prefix, name)
entry = self.lookup_here(name)
if entry:
if not in_pxd and visibility != entry.visibility and visibility == 'extern':
# Previously declared, but now extern => treat this
# as implementing the function, using the new cname
defining = True
visibility = entry.visibility
entry.cname = cname
entry.func_cname = cname
if visibility != 'private' and visibility != entry.visibility:
warning(pos, "Function '%s' previously declared as '%s', now as '%s'" % (
name, entry.visibility, visibility), 1)
if overridable != entry.is_overridable:
warning(pos, "Function '%s' previously declared as '%s'" % (
name, 'cpdef' if overridable else 'cdef'), 1)
if entry.type.same_as(type):
# Fix with_gil vs nogil.
entry.type = entry.type.with_with_gil(type.with_gil)
else:
if visibility == 'extern' and entry.visibility == 'extern':
can_override = False
if self.is_cpp():
can_override = True
elif cname:
# if all alternatives have different cnames,
# it's safe to allow signature overrides
for alt_entry in entry.all_alternatives():
if not alt_entry.cname or cname == alt_entry.cname:
break # cname not unique!
else:
can_override = True
if can_override:
temp = self.add_cfunction(name, type, pos, cname, visibility, modifiers)
temp.overloaded_alternatives = entry.all_alternatives()
entry = temp
else:
warning(pos, "Function signature does not match previous declaration", 1)
entry.type = type
elif not in_pxd and entry.defined_in_pxd and type.compatible_signature_with(entry.type):
# TODO: check that this was done by a signature optimisation and not a user error.
#warning(pos, "Function signature does not match previous declaration", 1)
entry.type = type
else:
error(pos, "Function signature does not match previous declaration")
else:
entry = self.add_cfunction(name, type, pos, cname, visibility, modifiers)
entry.func_cname = cname
entry.is_overridable = overridable
if in_pxd and visibility != 'extern':
entry.defined_in_pxd = 1
if api:
entry.api = 1
if not defining and not in_pxd and visibility != 'extern':
error(pos, "Non-extern C function '%s' declared but not defined" % name)
if defining:
entry.is_implemented = True
if modifiers:
entry.func_modifiers = modifiers
if utility_code:
assert not entry.utility_code, "duplicate utility code definition in entry %s (%s)" % (name, cname)
entry.utility_code = utility_code
if overridable:
# names of cpdef functions can be used as variables and can be assigned to
var_entry = Entry(name, cname, py_object_type) # FIXME: cname?
var_entry.is_variable = 1
var_entry.is_pyglobal = 1
var_entry.scope = entry.scope
entry.as_variable = var_entry
type.entry = entry
return entry
def add_cfunction(self, name, type, pos, cname, visibility, modifiers,
inherited=False):
# Add a C function entry without giving it a func_cname.
entry = self.declare(name, cname, type, pos, visibility)
entry.is_cfunction = 1
if modifiers:
entry.func_modifiers = modifiers
if inherited or type.is_fused:
self.cfunc_entries.append(entry)
else:
# For backwards compatibility reasons, we must keep all non-fused methods
# before all fused methods, but separately for each type.
i = len(self.cfunc_entries)
for cfunc_entry in reversed(self.cfunc_entries):
if cfunc_entry.is_inherited or not cfunc_entry.type.is_fused:
break
i -= 1
self.cfunc_entries.insert(i, entry)
return entry
def find(self, name, pos):
# Look up name, report error if not found.
entry = self.lookup(name)
if entry:
return entry
else:
error(pos, "'%s' is not declared" % name)
def find_imported_module(self, path, pos):
# Look up qualified name, must be a module, report error if not found.
# Path is a list of names.
scope = self
for name in path:
entry = scope.find(name, pos)
if not entry:
return None
if entry.as_module:
scope = entry.as_module
else:
error(pos, "'%s' is not a cimported module" % '.'.join(path))
return None
return scope
def lookup(self, name):
# Look up name in this scope or an enclosing one.
# Return None if not found.
return (self.lookup_here(name)
or (self.outer_scope and self.outer_scope.lookup(name))
or None)
def lookup_here(self, name):
# Look up in this scope only, return None if not found.
return self.entries.get(name, None)
def lookup_target(self, name):
# Look up name in this scope only. Declare as Python
# variable if not found.
entry = self.lookup_here(name)
if not entry:
entry = self.declare_var(name, py_object_type, None)
return entry
def lookup_type(self, name):
entry = self.lookup(name)
if entry and entry.is_type:
if entry.type.is_fused and self.fused_to_specific:
return entry.type.specialize(self.fused_to_specific)
return entry.type
def lookup_operator(self, operator, operands):
if operands[0].type.is_cpp_class:
obj_type = operands[0].type
method = obj_type.scope.lookup("operator%s" % operator)
if method is not None:
arg_types = [arg.type for arg in operands[1:]]
res = PyrexTypes.best_match([arg.type for arg in operands[1:]],
method.all_alternatives())
if res is not None:
return res
function = self.lookup("operator%s" % operator)
function_alternatives = []
if function is not None:
function_alternatives = function.all_alternatives()
# look-up nonmember methods listed within a class
method_alternatives = []
if len(operands)==2: # binary operators only
for n in range(2):
if operands[n].type.is_cpp_class:
obj_type = operands[n].type
method = obj_type.scope.lookup("operator%s" % operator)
if method is not None:
method_alternatives += method.all_alternatives()
if (not method_alternatives) and (not function_alternatives):
return None
# select the unique alternatives
all_alternatives = list(set(method_alternatives + function_alternatives))
return PyrexTypes.best_match([arg.type for arg in operands],
all_alternatives)
def lookup_operator_for_types(self, pos, operator, types):
from .Nodes import Node
class FakeOperand(Node):
pass
operands = [FakeOperand(pos, type=type) for type in types]
return self.lookup_operator(operator, operands)
def use_utility_code(self, new_code):
self.global_scope().use_utility_code(new_code)
def use_entry_utility_code(self, entry):
self.global_scope().use_entry_utility_code(entry)
def defines_any(self, names):
# Test whether any of the given names are defined in this scope.
for name in names:
if name in self.entries:
return 1
return 0
def defines_any_special(self, names):
# Test whether any of the given names are defined as special methods in this scope.
for name in names:
if name in self.entries and self.entries[name].is_special:
return 1
return 0
def infer_types(self):
from .TypeInference import get_type_inferer
get_type_inferer().infer_types(self)
def is_cpp(self):
outer = self.outer_scope
if outer is None:
return False
else:
return outer.is_cpp()
def add_include_file(self, filename, verbatim_include=None, late=False):
self.outer_scope.add_include_file(filename, verbatim_include, late)
class PreImportScope(Scope):
namespace_cname = Naming.preimport_cname
def __init__(self):
Scope.__init__(self, Options.pre_import, None, None)
def declare_builtin(self, name, pos):
entry = self.declare(name, name, py_object_type, pos, 'private')
entry.is_variable = True
entry.is_pyglobal = True
return entry
class BuiltinScope(Scope):
# The builtin namespace.
is_builtin_scope = True
def __init__(self):
if Options.pre_import is None:
Scope.__init__(self, "__builtin__", None, None)
else:
Scope.__init__(self, "__builtin__", PreImportScope(), None)
self.type_names = {}
for name, definition in sorted(self.builtin_entries.items()):
cname, type = definition
self.declare_var(name, type, None, cname)
def lookup(self, name, language_level=None, str_is_str=None):
# 'language_level' and 'str_is_str' are passed by ModuleScope
if name == 'str':
if str_is_str is None:
str_is_str = language_level in (None, 2)
if not str_is_str:
name = 'unicode'
return Scope.lookup(self, name)
def declare_builtin(self, name, pos):
if not hasattr(builtins, name):
if self.outer_scope is not None:
return self.outer_scope.declare_builtin(name, pos)
else:
if Options.error_on_unknown_names:
error(pos, "undeclared name not builtin: %s" % name)
else:
warning(pos, "undeclared name not builtin: %s" % name, 2)
def declare_builtin_cfunction(self, name, type, cname, python_equiv=None, utility_code=None):
# If python_equiv == "*", the Python equivalent has the same name
# as the entry, otherwise it has the name specified by python_equiv.
name = EncodedString(name)
entry = self.declare_cfunction(name, type, None, cname, visibility='extern', utility_code=utility_code)
if python_equiv:
if python_equiv == "*":
python_equiv = name
else:
python_equiv = EncodedString(python_equiv)
var_entry = Entry(python_equiv, python_equiv, py_object_type)
var_entry.is_variable = 1
var_entry.is_builtin = 1
var_entry.utility_code = utility_code
var_entry.scope = entry.scope
entry.as_variable = var_entry
return entry
def declare_builtin_type(self, name, cname, utility_code = None, objstruct_cname = None):
name = EncodedString(name)
type = PyrexTypes.BuiltinObjectType(name, cname, objstruct_cname)
scope = CClassScope(name, outer_scope=None, visibility='extern')
scope.directives = {}
if name == 'bool':
type.is_final_type = True
type.set_scope(scope)
self.type_names[name] = 1
entry = self.declare_type(name, type, None, visibility='extern')
entry.utility_code = utility_code
var_entry = Entry(name = entry.name,
type = self.lookup('type').type, # make sure "type" is the first type declared...
pos = entry.pos,
cname = entry.type.typeptr_cname)
var_entry.is_variable = 1
var_entry.is_cglobal = 1
var_entry.is_readonly = 1
var_entry.is_builtin = 1
var_entry.utility_code = utility_code
var_entry.scope = self
if Options.cache_builtins:
var_entry.is_const = True
entry.as_variable = var_entry
return type
def builtin_scope(self):
return self
builtin_entries = {
"type": ["((PyObject*)&PyType_Type)", py_object_type],
"bool": ["((PyObject*)&PyBool_Type)", py_object_type],
"int": ["((PyObject*)&PyInt_Type)", py_object_type],
"long": ["((PyObject*)&PyLong_Type)", py_object_type],
"float": ["((PyObject*)&PyFloat_Type)", py_object_type],
"complex":["((PyObject*)&PyComplex_Type)", py_object_type],
"bytes": ["((PyObject*)&PyBytes_Type)", py_object_type],
"bytearray": ["((PyObject*)&PyByteArray_Type)", py_object_type],
"str": ["((PyObject*)&PyString_Type)", py_object_type],
"unicode":["((PyObject*)&PyUnicode_Type)", py_object_type],
"tuple": ["((PyObject*)&PyTuple_Type)", py_object_type],
"list": ["((PyObject*)&PyList_Type)", py_object_type],
"dict": ["((PyObject*)&PyDict_Type)", py_object_type],
"set": ["((PyObject*)&PySet_Type)", py_object_type],
"frozenset": ["((PyObject*)&PyFrozenSet_Type)", py_object_type],
"slice": ["((PyObject*)&PySlice_Type)", py_object_type],
# "file": ["((PyObject*)&PyFile_Type)", py_object_type], # not in Py3
"None": ["Py_None", py_object_type],
"False": ["Py_False", py_object_type],
"True": ["Py_True", py_object_type],
}
const_counter = 1 # As a temporary solution for compiling code in pxds
class ModuleScope(Scope):
# module_name string Python name of the module
# module_cname string C name of Python module object
# #module_dict_cname string C name of module dict object
# method_table_cname string C name of method table
# doc string Module doc string
# doc_cname string C name of module doc string
# utility_code_list [UtilityCode] Queuing utility codes for forwarding to Code.py
# c_includes {key: IncludeCode} C headers or verbatim code to be generated
# See process_include() for more documentation
# string_to_entry {string : Entry} Map string const to entry
# identifier_to_entry {string : Entry} Map identifier string const to entry
# context Context
# parent_module Scope Parent in the import namespace
# module_entries {string : Entry} For cimport statements
# type_names {string : 1} Set of type names (used during parsing)
# included_files [string] Cython sources included with 'include'
# pxd_file_loaded boolean Corresponding .pxd file has been processed
# cimported_modules [ModuleScope] Modules imported with cimport
# types_imported {PyrexType} Set of types for which import code generated
# has_import_star boolean Module contains import *
# cpp boolean Compiling a C++ file
# is_cython_builtin boolean Is this the Cython builtin scope (or a child scope)
# is_package boolean Is this a package module? (__init__)
is_module_scope = 1
has_import_star = 0
is_cython_builtin = 0
old_style_globals = 0
def __init__(self, name, parent_module, context):
from . import Builtin
self.parent_module = parent_module
outer_scope = Builtin.builtin_scope
Scope.__init__(self, name, outer_scope, parent_module)
if name == "__init__":
# Treat Spam/__init__.pyx specially, so that when Python loads
# Spam/__init__.so, initSpam() is defined.
self.module_name = parent_module.module_name
self.is_package = True
else:
self.module_name = name
self.is_package = False
self.module_name = EncodedString(self.module_name)
self.context = context
self.module_cname = Naming.module_cname
self.module_dict_cname = Naming.moddict_cname
self.method_table_cname = Naming.methtable_cname
self.doc = ""
self.doc_cname = Naming.moddoc_cname
self.utility_code_list = []
self.module_entries = {}
self.c_includes = {}
self.type_names = dict(outer_scope.type_names)
self.pxd_file_loaded = 0
self.cimported_modules = []
self.types_imported = set()
self.included_files = []
self.has_extern_class = 0
self.cached_builtins = []
self.undeclared_cached_builtins = []
self.namespace_cname = self.module_cname
self._cached_tuple_types = {}
for var_name in ['__builtins__', '__name__', '__file__', '__doc__', '__path__',
'__spec__', '__loader__', '__package__', '__cached__']:
self.declare_var(EncodedString(var_name), py_object_type, None)
self.process_include(Code.IncludeCode("Python.h", initial=True))
def qualifying_scope(self):
return self.parent_module
def global_scope(self):
return self
def lookup(self, name, language_level=None, str_is_str=None):
entry = self.lookup_here(name)
if entry is not None:
return entry
if language_level is None:
language_level = self.context.language_level if self.context is not None else 3
if str_is_str is None:
str_is_str = language_level == 2 or (
self.context is not None and Future.unicode_literals not in self.context.future_directives)
return self.outer_scope.lookup(name, language_level=language_level, str_is_str=str_is_str)
def declare_tuple_type(self, pos, components):
components = tuple(components)
try:
ttype = self._cached_tuple_types[components]
except KeyError:
ttype = self._cached_tuple_types[components] = PyrexTypes.c_tuple_type(components)
cname = ttype.cname
entry = self.lookup_here(cname)
if not entry:
scope = StructOrUnionScope(cname)
for ix, component in enumerate(components):
scope.declare_var(name="f%s" % ix, type=component, pos=pos)
struct_entry = self.declare_struct_or_union(
cname + '_struct', 'struct', scope, typedef_flag=True, pos=pos, cname=cname)
self.type_entries.remove(struct_entry)
ttype.struct_entry = struct_entry
entry = self.declare_type(cname, ttype, pos, cname)
ttype.entry = entry
return entry
def declare_builtin(self, name, pos):
if not hasattr(builtins, name) \
and name not in Code.non_portable_builtins_map \
and name not in Code.uncachable_builtins:
if self.has_import_star:
entry = self.declare_var(name, py_object_type, pos)
return entry
else:
if Options.error_on_unknown_names:
error(pos, "undeclared name not builtin: %s" % name)
else:
warning(pos, "undeclared name not builtin: %s" % name, 2)
# unknown - assume it's builtin and look it up at runtime
entry = self.declare(name, None, py_object_type, pos, 'private')
entry.is_builtin = 1
return entry
if Options.cache_builtins:
for entry in self.cached_builtins:
if entry.name == name:
return entry
if name == 'globals' and not self.old_style_globals:
return self.outer_scope.lookup('__Pyx_Globals')
else:
entry = self.declare(None, None, py_object_type, pos, 'private')
if Options.cache_builtins and name not in Code.uncachable_builtins:
entry.is_builtin = 1
entry.is_const = 1 # cached
entry.name = name
entry.cname = Naming.builtin_prefix + name
self.cached_builtins.append(entry)
self.undeclared_cached_builtins.append(entry)
else:
entry.is_builtin = 1
entry.name = name
return entry
def find_module(self, module_name, pos, relative_level=-1):
# Find a module in the import namespace, interpreting
# relative imports relative to this module's parent.
# Finds and parses the module's .pxd file if the module
# has not been referenced before.
relative_to = None
absolute_fallback = False
if relative_level is not None and relative_level > 0:
# explicit relative cimport
# error of going beyond top-level is handled in cimport node
relative_to = self
while relative_level > 0 and relative_to:
relative_to = relative_to.parent_module
relative_level -= 1
elif relative_level != 0:
# -1 or None: try relative cimport first, then absolute
relative_to = self.parent_module
absolute_fallback = True
module_scope = self.global_scope()
return module_scope.context.find_module(
module_name, relative_to=relative_to, pos=pos, absolute_fallback=absolute_fallback)
def find_submodule(self, name):
# Find and return scope for a submodule of this module,
# creating a new empty one if necessary. Doesn't parse .pxd.
if '.' in name:
name, submodule = name.split('.', 1)
else:
submodule = None
scope = self.lookup_submodule(name)
if not scope:
scope = ModuleScope(name, parent_module=self, context=self.context)
self.module_entries[name] = scope
if submodule:
scope = scope.find_submodule(submodule)
return scope
def lookup_submodule(self, name):
# Return scope for submodule of this module, or None.
if '.' in name:
name, submodule = name.split('.', 1)
else:
submodule = None
module = self.module_entries.get(name, None)
if submodule and module is not None:
module = module.lookup_submodule(submodule)
return module
def add_include_file(self, filename, verbatim_include=None, late=False):
"""
Add `filename` as include file. Add `verbatim_include` as
verbatim text in the C file.
Both `filename` and `verbatim_include` can be `None` or empty.
"""
inc = Code.IncludeCode(filename, verbatim_include, late=late)
self.process_include(inc)
def process_include(self, inc):
"""
Add `inc`, which is an instance of `IncludeCode`, to this
`ModuleScope`. This either adds a new element to the
`c_includes` dict or it updates an existing entry.
In detail: the values of the dict `self.c_includes` are
instances of `IncludeCode` containing the code to be put in the
generated C file. The keys of the dict are needed to ensure
uniqueness in two ways: if an include file is specified in
multiple "cdef extern" blocks, only one `#include` statement is
generated. Second, the same include might occur multiple times
if we find it through multiple "cimport" paths. So we use the
generated code (of the form `#include "header.h"`) as dict key.
If verbatim code does not belong to any include file (i.e. it
was put in a `cdef extern from *` block), then we use a unique
dict key: namely, the `sortkey()`.
One `IncludeCode` object can contain multiple pieces of C code:
one optional "main piece" for the include file and several other
pieces for the verbatim code. The `IncludeCode.dict_update`
method merges the pieces of two different `IncludeCode` objects
if needed.
"""
key = inc.mainpiece()
if key is None:
key = inc.sortkey()
inc.dict_update(self.c_includes, key)
inc = self.c_includes[key]
def add_imported_module(self, scope):
if scope not in self.cimported_modules:
for inc in scope.c_includes.values():
self.process_include(inc)
self.cimported_modules.append(scope)
for m in scope.cimported_modules:
self.add_imported_module(m)
def add_imported_entry(self, name, entry, pos):
if entry.is_pyglobal:
# Allow cimports to follow imports.
entry.is_variable = True
if entry not in self.entries:
self.entries[name] = entry
else:
warning(pos, "'%s' redeclared " % name, 0)
def declare_module(self, name, scope, pos):
# Declare a cimported module. This is represented as a
# Python module-level variable entry with a module
# scope attached to it. Reports an error and returns
# None if previously declared as something else.
entry = self.lookup_here(name)
if entry:
if entry.is_pyglobal and entry.as_module is scope:
return entry # Already declared as the same module
if not (entry.is_pyglobal and not entry.as_module):
# SAGE -- I put this here so Pyrex
# cimport's work across directories.
# Currently it tries to multiply define
# every module appearing in an import list.
# It shouldn't be an error for a module
# name to appear again, and indeed the generated
# code compiles fine.
return entry
else:
entry = self.declare_var(name, py_object_type, pos)
entry.is_variable = 0
entry.as_module = scope
self.add_imported_module(scope)
return entry
def declare_var(self, name, type, pos,
cname = None, visibility = 'private',
api = 0, in_pxd = 0, is_cdef = 0):
# Add an entry for a global variable. If it is a Python
# object type, and not declared with cdef, it will live
# in the module dictionary, otherwise it will be a C
# global variable.
if not visibility in ('private', 'public', 'extern'):
error(pos, "Module-level variable cannot be declared %s" % visibility)
if not is_cdef:
if type is unspecified_type:
type = py_object_type
if not (type.is_pyobject and not type.is_extension_type):
raise InternalError(
"Non-cdef global variable is not a generic Python object")
if not cname:
defining = not in_pxd
if visibility == 'extern' or (visibility == 'public' and defining):
cname = name
else:
cname = self.mangle(Naming.var_prefix, name)
entry = self.lookup_here(name)
if entry and entry.defined_in_pxd:
#if visibility != 'private' and visibility != entry.visibility:
# warning(pos, "Variable '%s' previously declared as '%s'" % (name, entry.visibility), 1)
if not entry.type.same_as(type):
if visibility == 'extern' and entry.visibility == 'extern':
warning(pos, "Variable '%s' type does not match previous declaration" % name, 1)
entry.type = type
#else:
# error(pos, "Variable '%s' type does not match previous declaration" % name)
if entry.visibility != "private":
mangled_cname = self.mangle(Naming.var_prefix, name)
if entry.cname == mangled_cname:
cname = name
entry.cname = name
if not entry.is_implemented:
entry.is_implemented = True
return entry
entry = Scope.declare_var(self, name, type, pos,
cname=cname, visibility=visibility,
api=api, in_pxd=in_pxd, is_cdef=is_cdef)
if is_cdef:
entry.is_cglobal = 1
if entry.type.declaration_value:
entry.init = entry.type.declaration_value
self.var_entries.append(entry)
else:
entry.is_pyglobal = 1
if Options.cimport_from_pyx:
entry.used = 1
return entry
def declare_cfunction(self, name, type, pos,
cname=None, visibility='private', api=0, in_pxd=0,
defining=0, modifiers=(), utility_code=None, overridable=False):
if not defining and 'inline' in modifiers:
# TODO(github/1736): Make this an error.
warning(pos, "Declarations should not be declared inline.", 1)
# Add an entry for a C function.
if not cname:
if visibility == 'extern' or (visibility == 'public' and defining):
cname = name
else:
cname = self.mangle(Naming.func_prefix, name)
if visibility == 'extern' and type.optional_arg_count:
error(pos, "Extern functions cannot have default arguments values.")
entry = self.lookup_here(name)
if entry and entry.defined_in_pxd:
if entry.visibility != "private":
mangled_cname = self.mangle(Naming.var_prefix, name)
if entry.cname == mangled_cname:
cname = name
entry.cname = cname
entry.func_cname = cname
entry = Scope.declare_cfunction(
self, name, type, pos,
cname=cname, visibility=visibility, api=api, in_pxd=in_pxd,
defining=defining, modifiers=modifiers, utility_code=utility_code,
overridable=overridable)
return entry
def declare_global(self, name, pos):
entry = self.lookup_here(name)
if not entry:
self.declare_var(name, py_object_type, pos)
def use_utility_code(self, new_code):
if new_code is not None:
self.utility_code_list.append(new_code)
def use_entry_utility_code(self, entry):
if entry is None:
return
if entry.utility_code:
self.utility_code_list.append(entry.utility_code)
if entry.utility_code_definition:
self.utility_code_list.append(entry.utility_code_definition)
def declare_c_class(self, name, pos, defining=0, implementing=0,
module_name=None, base_type=None, objstruct_cname=None,
typeobj_cname=None, typeptr_cname=None, visibility='private',
typedef_flag=0, api=0, check_size=None,
buffer_defaults=None, shadow=0):
# If this is a non-extern typedef class, expose the typedef, but use
# the non-typedef struct internally to avoid needing forward
# declarations for anonymous structs.
if typedef_flag and visibility != 'extern':
if not (visibility == 'public' or api):
warning(pos, "ctypedef only valid for 'extern' , 'public', and 'api'", 2)
objtypedef_cname = objstruct_cname
typedef_flag = 0
else:
objtypedef_cname = None
#
# Look for previous declaration as a type
#
entry = self.lookup_here(name)
if entry and not shadow:
type = entry.type
if not (entry.is_type and type.is_extension_type):
entry = None # Will cause redeclaration and produce an error
else:
scope = type.scope
if typedef_flag and (not scope or scope.defined):
self.check_previous_typedef_flag(entry, typedef_flag, pos)
if (scope and scope.defined) or (base_type and type.base_type):
if base_type and base_type is not type.base_type:
error(pos, "Base type does not match previous declaration")
if base_type and not type.base_type:
type.base_type = base_type
#
# Make a new entry if needed
#
if not entry or shadow:
type = PyrexTypes.PyExtensionType(
name, typedef_flag, base_type, visibility == 'extern', check_size=check_size)
type.pos = pos
type.buffer_defaults = buffer_defaults
if objtypedef_cname is not None:
type.objtypedef_cname = objtypedef_cname
if visibility == 'extern':
type.module_name = module_name
else:
type.module_name = self.qualified_name
if typeptr_cname:
type.typeptr_cname = typeptr_cname
else:
type.typeptr_cname = self.mangle(Naming.typeptr_prefix, name)
entry = self.declare_type(name, type, pos, visibility = visibility,
defining = 0, shadow = shadow)
entry.is_cclass = True
if objstruct_cname:
type.objstruct_cname = objstruct_cname
elif not entry.in_cinclude:
type.objstruct_cname = self.mangle(Naming.objstruct_prefix, name)
else:
error(entry.pos,
"Object name required for 'public' or 'extern' C class")
self.attach_var_entry_to_c_class(entry)
self.c_class_entries.append(entry)
#
# Check for re-definition and create scope if needed
#
if not type.scope:
if defining or implementing:
scope = CClassScope(name = name, outer_scope = self,
visibility = visibility)
scope.directives = self.directives.copy()
if base_type and base_type.scope:
scope.declare_inherited_c_attributes(base_type.scope)
type.set_scope(scope)
self.type_entries.append(entry)
else:
if defining and type.scope.defined:
error(pos, "C class '%s' already defined" % name)
elif implementing and type.scope.implemented:
error(pos, "C class '%s' already implemented" % name)
#
# Fill in options, checking for compatibility with any previous declaration
#
if defining:
entry.defined_in_pxd = 1
if implementing: # So that filenames in runtime exceptions refer to
entry.pos = pos # the .pyx file and not the .pxd file
if visibility != 'private' and entry.visibility != visibility:
error(pos, "Class '%s' previously declared as '%s'"
% (name, entry.visibility))
if api:
entry.api = 1
if objstruct_cname:
if type.objstruct_cname and type.objstruct_cname != objstruct_cname:
error(pos, "Object struct name differs from previous declaration")
type.objstruct_cname = objstruct_cname
if typeobj_cname:
if type.typeobj_cname and type.typeobj_cname != typeobj_cname:
error(pos, "Type object name differs from previous declaration")
type.typeobj_cname = typeobj_cname
if self.directives.get('final'):
entry.type.is_final_type = True
# cdef classes are always exported, but we need to set it to
# distinguish between unused Cython utility code extension classes
entry.used = True
#
# Return new or existing entry
#
return entry
def allocate_vtable_names(self, entry):
# If extension type has a vtable, allocate vtable struct and
# slot names for it.
type = entry.type
if type.base_type and type.base_type.vtabslot_cname:
#print "...allocating vtabslot_cname because base type has one" ###
type.vtabslot_cname = "%s.%s" % (
Naming.obj_base_cname, type.base_type.vtabslot_cname)
elif type.scope and type.scope.cfunc_entries:
# one special case here: when inheriting from builtin
# types, the methods may also be built-in, in which
# case they won't need a vtable
entry_count = len(type.scope.cfunc_entries)
base_type = type.base_type
while base_type:
# FIXME: this will break if we ever get non-inherited C methods
if not base_type.scope or entry_count > len(base_type.scope.cfunc_entries):
break
if base_type.is_builtin_type:
# builtin base type defines all methods => no vtable needed
return
base_type = base_type.base_type
#print "...allocating vtabslot_cname because there are C methods" ###
type.vtabslot_cname = Naming.vtabslot_cname
if type.vtabslot_cname:
#print "...allocating other vtable related cnames" ###
type.vtabstruct_cname = self.mangle(Naming.vtabstruct_prefix, entry.name)
type.vtabptr_cname = self.mangle(Naming.vtabptr_prefix, entry.name)
def check_c_classes_pxd(self):
# Performs post-analysis checking and finishing up of extension types
# being implemented in this module. This is called only for the .pxd.
#
# Checks all extension types declared in this scope to
# make sure that:
#
# * The extension type is fully declared
#
# Also allocates a name for the vtable if needed.
#
for entry in self.c_class_entries:
# Check defined
if not entry.type.scope:
error(entry.pos, "C class '%s' is declared but not defined" % entry.name)
def check_c_class(self, entry):
type = entry.type
name = entry.name
visibility = entry.visibility
# Check defined
if not type.scope:
error(entry.pos, "C class '%s' is declared but not defined" % name)
# Generate typeobj_cname
if visibility != 'extern' and not type.typeobj_cname:
type.typeobj_cname = self.mangle(Naming.typeobj_prefix, name)
## Generate typeptr_cname
#type.typeptr_cname = self.mangle(Naming.typeptr_prefix, name)
# Check C methods defined
if type.scope:
for method_entry in type.scope.cfunc_entries:
if not method_entry.is_inherited and not method_entry.func_cname:
error(method_entry.pos, "C method '%s' is declared but not defined" %
method_entry.name)
# Allocate vtable name if necessary
if type.vtabslot_cname:
#print "ModuleScope.check_c_classes: allocating vtable cname for", self ###
type.vtable_cname = self.mangle(Naming.vtable_prefix, entry.name)
def check_c_classes(self):
# Performs post-analysis checking and finishing up of extension types
# being implemented in this module. This is called only for the main
# .pyx file scope, not for cimported .pxd scopes.
#
# Checks all extension types declared in this scope to
# make sure that:
#
# * The extension type is implemented
# * All required object and type names have been specified or generated
# * All non-inherited C methods are implemented
#
# Also allocates a name for the vtable if needed.
#
debug_check_c_classes = 0
if debug_check_c_classes:
print("Scope.check_c_classes: checking scope " + self.qualified_name)
for entry in self.c_class_entries:
if debug_check_c_classes:
print("...entry %s %s" % (entry.name, entry))
print("......type = ", entry.type)
print("......visibility = ", entry.visibility)
self.check_c_class(entry)
def check_c_functions(self):
# Performs post-analysis checking making sure all
# defined c functions are actually implemented.
for name, entry in self.entries.items():
if entry.is_cfunction:
if (entry.defined_in_pxd
and entry.scope is self
and entry.visibility != 'extern'
and not entry.in_cinclude
and not entry.is_implemented):
error(entry.pos, "Non-extern C function '%s' declared but not defined" % name)
def attach_var_entry_to_c_class(self, entry):
# The name of an extension class has to serve as both a type
# name and a variable name holding the type object. It is
# represented in the symbol table by a type entry with a
# variable entry attached to it. For the variable entry,
# we use a read-only C global variable whose name is an
# expression that refers to the type object.
from . import Builtin
var_entry = Entry(name = entry.name,
type = Builtin.type_type,
pos = entry.pos,
cname = entry.type.typeptr_cname)
var_entry.is_variable = 1
var_entry.is_cglobal = 1
var_entry.is_readonly = 1
var_entry.scope = entry.scope
entry.as_variable = var_entry
def is_cpp(self):
return self.cpp
def infer_types(self):
from .TypeInference import PyObjectTypeInferer
PyObjectTypeInferer().infer_types(self)
class LocalScope(Scope):
# Does the function have a 'with gil:' block?
has_with_gil_block = False
# Transient attribute, used for symbol table variable declarations
_in_with_gil_block = False
def __init__(self, name, outer_scope, parent_scope = None):
if parent_scope is None:
parent_scope = outer_scope
Scope.__init__(self, name, outer_scope, parent_scope)
def mangle(self, prefix, name):
return prefix + name
def declare_arg(self, name, type, pos):
# Add an entry for an argument of a function.
cname = self.mangle(Naming.var_prefix, name)
entry = self.declare(name, cname, type, pos, 'private')
entry.is_variable = 1
if type.is_pyobject:
entry.init = "0"
entry.is_arg = 1
#entry.borrowed = 1 # Not using borrowed arg refs for now
self.arg_entries.append(entry)
return entry
def declare_var(self, name, type, pos,
cname = None, visibility = 'private',
api = 0, in_pxd = 0, is_cdef = 0):
# Add an entry for a local variable.
if visibility in ('public', 'readonly'):
error(pos, "Local variable cannot be declared %s" % visibility)
entry = Scope.declare_var(self, name, type, pos,
cname=cname, visibility=visibility,
api=api, in_pxd=in_pxd, is_cdef=is_cdef)
if entry.type.declaration_value:
entry.init = entry.type.declaration_value
entry.is_local = 1
entry.in_with_gil_block = self._in_with_gil_block
self.var_entries.append(entry)
return entry
def declare_global(self, name, pos):
# Pull entry from global scope into local scope.
if self.lookup_here(name):
warning(pos, "'%s' redeclared ", 0)
else:
entry = self.global_scope().lookup_target(name)
self.entries[name] = entry
def declare_nonlocal(self, name, pos):
# Pull entry from outer scope into local scope
orig_entry = self.lookup_here(name)
if orig_entry and orig_entry.scope is self and not orig_entry.from_closure:
error(pos, "'%s' redeclared as nonlocal" % name)
orig_entry.already_declared_here()
else:
entry = self.lookup(name)
if entry is None or not entry.from_closure:
error(pos, "no binding for nonlocal '%s' found" % name)
def lookup(self, name):
# Look up name in this scope or an enclosing one.
# Return None if not found.
entry = Scope.lookup(self, name)
if entry is not None:
entry_scope = entry.scope
while entry_scope.is_genexpr_scope:
entry_scope = entry_scope.outer_scope
if entry_scope is not self and entry_scope.is_closure_scope:
if hasattr(entry.scope, "scope_class"):
raise InternalError("lookup() after scope class created.")
# The actual c fragment for the different scopes differs
# on the outside and inside, so we make a new entry
entry.in_closure = True
inner_entry = InnerEntry(entry, self)
inner_entry.is_variable = True
self.entries[name] = inner_entry
return inner_entry
return entry
def mangle_closure_cnames(self, outer_scope_cname):
for scope in self.iter_local_scopes():
for entry in scope.entries.values():
if entry.from_closure:
cname = entry.outer_entry.cname
if self.is_passthrough:
entry.cname = cname
else:
if cname.startswith(Naming.cur_scope_cname):
cname = cname[len(Naming.cur_scope_cname)+2:]
entry.cname = "%s->%s" % (outer_scope_cname, cname)
elif entry.in_closure:
entry.original_cname = entry.cname
entry.cname = "%s->%s" % (Naming.cur_scope_cname, entry.cname)
class GeneratorExpressionScope(Scope):
"""Scope for generator expressions and comprehensions. As opposed
to generators, these can be easily inlined in some cases, so all
we really need is a scope that holds the loop variable(s).
"""
is_genexpr_scope = True
def __init__(self, outer_scope):
parent_scope = outer_scope
# TODO: also ignore class scopes?
while parent_scope.is_genexpr_scope:
parent_scope = parent_scope.parent_scope
name = parent_scope.global_scope().next_id(Naming.genexpr_id_ref)
Scope.__init__(self, name, outer_scope, parent_scope)
self.directives = outer_scope.directives
self.genexp_prefix = "%s%d%s" % (Naming.pyrex_prefix, len(name), name)
# Class/ExtType scopes are filled at class creation time, i.e. from the
# module init function or surrounding function.
while outer_scope.is_genexpr_scope or outer_scope.is_c_class_scope or outer_scope.is_py_class_scope:
outer_scope = outer_scope.outer_scope
self.var_entries = outer_scope.var_entries # keep declarations outside
outer_scope.subscopes.add(self)
def mangle(self, prefix, name):
return '%s%s' % (self.genexp_prefix, self.parent_scope.mangle(prefix, name))
def declare_var(self, name, type, pos,
cname = None, visibility = 'private',
api = 0, in_pxd = 0, is_cdef = True):
if type is unspecified_type:
# if the outer scope defines a type for this variable, inherit it
outer_entry = self.outer_scope.lookup(name)
if outer_entry and outer_entry.is_variable:
type = outer_entry.type # may still be 'unspecified_type' !
# the parent scope needs to generate code for the variable, but
# this scope must hold its name exclusively
cname = '%s%s' % (self.genexp_prefix, self.parent_scope.mangle(Naming.var_prefix, name or self.next_id()))
entry = self.declare(name, cname, type, pos, visibility)
entry.is_variable = True
if self.parent_scope.is_module_scope:
entry.is_cglobal = True
else:
entry.is_local = True
entry.in_subscope = True
self.var_entries.append(entry)
self.entries[name] = entry
return entry
def declare_pyfunction(self, name, pos, allow_redefine=False):
return self.outer_scope.declare_pyfunction(
name, pos, allow_redefine)
def declare_lambda_function(self, func_cname, pos):
return self.outer_scope.declare_lambda_function(func_cname, pos)
def add_lambda_def(self, def_node):
return self.outer_scope.add_lambda_def(def_node)
class ClosureScope(LocalScope):
is_closure_scope = True
def __init__(self, name, scope_name, outer_scope, parent_scope=None):
LocalScope.__init__(self, name, outer_scope, parent_scope)
self.closure_cname = "%s%s" % (Naming.closure_scope_prefix, scope_name)
# def mangle_closure_cnames(self, scope_var):
# for entry in self.entries.values() + self.temp_entries:
# entry.in_closure = 1
# LocalScope.mangle_closure_cnames(self, scope_var)
# def mangle(self, prefix, name):
# return "%s->%s" % (self.cur_scope_cname, name)
# return "%s->%s" % (self.closure_cname, name)
def declare_pyfunction(self, name, pos, allow_redefine=False):
return LocalScope.declare_pyfunction(self, name, pos, allow_redefine, visibility='private')
class StructOrUnionScope(Scope):
# Namespace of a C struct or union.
def __init__(self, name="?"):
Scope.__init__(self, name, None, None)
def declare_var(self, name, type, pos,
cname = None, visibility = 'private',
api = 0, in_pxd = 0, is_cdef = 0,
allow_pyobject=False, allow_memoryview=False):
# Add an entry for an attribute.
if not cname:
cname = name
if visibility == 'private':
cname = c_safe_identifier(cname)
if type.is_cfunction:
type = PyrexTypes.CPtrType(type)
entry = self.declare(name, cname, type, pos, visibility)
entry.is_variable = 1
self.var_entries.append(entry)
if type.is_pyobject and not allow_pyobject:
error(pos, "C struct/union member cannot be a Python object")
elif type.is_memoryviewslice and not allow_memoryview:
# Memory views wrap their buffer owner as a Python object.
error(pos, "C struct/union member cannot be a memory view")
if visibility != 'private':
error(pos, "C struct/union member cannot be declared %s" % visibility)
return entry
def declare_cfunction(self, name, type, pos,
cname=None, visibility='private', api=0, in_pxd=0,
defining=0, modifiers=(), overridable=False): # currently no utility code ...
if overridable:
error(pos, "C struct/union member cannot be declared 'cpdef'")
return self.declare_var(name, type, pos,
cname=cname, visibility=visibility)
class ClassScope(Scope):
# Abstract base class for namespace of
# Python class or extension type.
#
# class_name string Python name of the class
# scope_prefix string Additional prefix for names
# declared in the class
# doc string or None Doc string
def __init__(self, name, outer_scope):
Scope.__init__(self, name, outer_scope, outer_scope)
self.class_name = name
self.doc = None
def lookup(self, name):
entry = Scope.lookup(self, name)
if entry:
return entry
if name == "classmethod":
# We don't want to use the builtin classmethod here 'cause it won't do the
# right thing in this scope (as the class members aren't still functions).
# Don't want to add a cfunction to this scope 'cause that would mess with
# the type definition, so we just return the right entry.
entry = Entry(
"classmethod",
"__Pyx_Method_ClassMethod",
PyrexTypes.CFuncType(
py_object_type,
[PyrexTypes.CFuncTypeArg("", py_object_type, None)], 0, 0))
entry.utility_code_definition = Code.UtilityCode.load_cached("ClassMethod", "CythonFunction.c")
self.use_entry_utility_code(entry)
entry.is_cfunction = 1
return entry
class PyClassScope(ClassScope):
# Namespace of a Python class.
#
# class_obj_cname string C variable holding class object
is_py_class_scope = 1
def mangle_class_private_name(self, name):
return self.mangle_special_name(name)
def mangle_special_name(self, name):
if name and name.startswith('__') and not name.endswith('__'):
name = EncodedString('_%s%s' % (self.class_name.lstrip('_'), name))
return name
def lookup_here(self, name):
name = self.mangle_special_name(name)
return ClassScope.lookup_here(self, name)
def declare_var(self, name, type, pos,
cname = None, visibility = 'private',
api = 0, in_pxd = 0, is_cdef = 0):
name = self.mangle_special_name(name)
if type is unspecified_type:
type = py_object_type
# Add an entry for a class attribute.
entry = Scope.declare_var(self, name, type, pos,
cname=cname, visibility=visibility,
api=api, in_pxd=in_pxd, is_cdef=is_cdef)
entry.is_pyglobal = 1
entry.is_pyclass_attr = 1
return entry
def declare_nonlocal(self, name, pos):
# Pull entry from outer scope into local scope
orig_entry = self.lookup_here(name)
if orig_entry and orig_entry.scope is self and not orig_entry.from_closure:
error(pos, "'%s' redeclared as nonlocal" % name)
orig_entry.already_declared_here()
else:
entry = self.lookup(name)
if entry is None:
error(pos, "no binding for nonlocal '%s' found" % name)
else:
# FIXME: this works, but it's unclear if it's the
# right thing to do
self.entries[name] = entry
def declare_global(self, name, pos):
# Pull entry from global scope into local scope.
if self.lookup_here(name):
warning(pos, "'%s' redeclared ", 0)
else:
entry = self.global_scope().lookup_target(name)
self.entries[name] = entry
def add_default_value(self, type):
return self.outer_scope.add_default_value(type)
class CClassScope(ClassScope):
# Namespace of an extension type.
#
# parent_type CClassType
# #typeobj_cname string or None
# #objstruct_cname string
# method_table_cname string
# getset_table_cname string
# has_pyobject_attrs boolean Any PyObject attributes?
# has_memoryview_attrs boolean Any memory view attributes?
# has_cpp_class_attrs boolean Any (non-pointer) C++ attributes?
# has_cyclic_pyobject_attrs boolean Any PyObject attributes that may need GC?
# property_entries [Entry]
# defined boolean Defined in .pxd file
# implemented boolean Defined in .pyx file
# inherited_var_entries [Entry] Adapted var entries from base class
is_c_class_scope = 1
is_closure_class_scope = False
has_pyobject_attrs = False
has_memoryview_attrs = False
has_cpp_class_attrs = False
has_cyclic_pyobject_attrs = False
defined = False
implemented = False
def __init__(self, name, outer_scope, visibility):
ClassScope.__init__(self, name, outer_scope)
if visibility != 'extern':
self.method_table_cname = outer_scope.mangle(Naming.methtab_prefix, name)
self.getset_table_cname = outer_scope.mangle(Naming.gstab_prefix, name)
self.property_entries = []
self.inherited_var_entries = []
def needs_gc(self):
# If the type or any of its base types have Python-valued
# C attributes, then it needs to participate in GC.
if self.has_cyclic_pyobject_attrs and not self.directives.get('no_gc', False):
return True
base_type = self.parent_type.base_type
if base_type and base_type.scope is not None:
return base_type.scope.needs_gc()
elif self.parent_type.is_builtin_type:
return not self.parent_type.is_gc_simple
return False
def needs_tp_clear(self):
"""
Do we need to generate an implementation for the tp_clear slot? Can
be disabled to keep references for the __dealloc__ cleanup function.
"""
return self.needs_gc() and not self.directives.get('no_gc_clear', False)
def get_refcounted_entries(self, include_weakref=False,
include_gc_simple=True):
|
def declare_var(self, name, type, pos,
cname = None, visibility = 'private',
api = 0, in_pxd = 0, is_cdef = 0):
if is_cdef:
# Add an entry for an attribute.
if self.defined:
error(pos,
"C attributes cannot be added in implementation part of"
" extension type defined in a pxd")
if not self.is_closure_class_scope and get_special_method_signature(name):
error(pos,
"The name '%s' is reserved for a special method."
% name)
if not cname:
cname = name
if visibility == 'private':
cname = c_safe_identifier(cname)
if type.is_cpp_class and visibility != 'extern':
type.check_nullary_constructor(pos)
self.use_utility_code(Code.UtilityCode("#include <new>"))
entry = self.declare(name, cname, type, pos, visibility)
entry.is_variable = 1
self.var_entries.append(entry)
if type.is_memoryviewslice:
self.has_memoryview_attrs = True
elif type.is_cpp_class:
self.has_cpp_class_attrs = True
elif type.is_pyobject and (self.is_closure_class_scope or name != '__weakref__'):
self.has_pyobject_attrs = True
if (not type.is_builtin_type
or not type.scope or type.scope.needs_gc()):
self.has_cyclic_pyobject_attrs = True
if visibility not in ('private', 'public', 'readonly'):
error(pos,
"Attribute of extension type cannot be declared %s" % visibility)
if visibility in ('public', 'readonly'):
# If the field is an external typedef, we cannot be sure about the type,
# so do conversion ourself rather than rely on the CPython mechanism (through
# a property; made in AnalyseDeclarationsTransform).
entry.needs_property = True
if not self.is_closure_class_scope and name == "__weakref__":
error(pos, "Special attribute __weakref__ cannot be exposed to Python")
if not (type.is_pyobject or type.can_coerce_to_pyobject(self)):
# we're not testing for coercion *from* Python here - that would fail later
error(pos, "C attribute of type '%s' cannot be accessed from Python" % type)
else:
entry.needs_property = False
return entry
else:
if type is unspecified_type:
type = py_object_type
# Add an entry for a class attribute.
entry = Scope.declare_var(self, name, type, pos,
cname=cname, visibility=visibility,
api=api, in_pxd=in_pxd, is_cdef=is_cdef)
entry.is_member = 1
entry.is_pyglobal = 1 # xxx: is_pyglobal changes behaviour in so many places that
# I keep it in for now. is_member should be enough
# later on
self.namespace_cname = "(PyObject *)%s" % self.parent_type.typeptr_cname
return entry
def declare_pyfunction(self, name, pos, allow_redefine=False):
# Add an entry for a method.
if name in richcmp_special_methods:
if self.lookup_here('__richcmp__'):
error(pos, "Cannot define both % and __richcmp__" % name)
elif name == '__richcmp__':
for n in richcmp_special_methods:
if self.lookup_here(n):
error(pos, "Cannot define both % and __richcmp__" % n)
if name == "__new__":
error(pos, "__new__ method of extension type will change semantics "
"in a future version of Pyrex and Cython. Use __cinit__ instead.")
entry = self.declare_var(name, py_object_type, pos,
visibility='extern')
special_sig = get_special_method_signature(name)
if special_sig:
# Special methods get put in the method table with a particular
# signature declared in advance.
entry.signature = special_sig
entry.is_special = 1
else:
entry.signature = pymethod_signature
entry.is_special = 0
self.pyfunc_entries.append(entry)
return entry
def lookup_here(self, name):
if not self.is_closure_class_scope and name == "__new__":
name = EncodedString("__cinit__")
entry = ClassScope.lookup_here(self, name)
if entry and entry.is_builtin_cmethod:
if not self.parent_type.is_builtin_type:
# For subtypes of builtin types, we can only return
# optimised C methods if the type if final.
# Otherwise, subtypes may choose to override the
# method, but the optimisation would prevent the
# subtype method from being called.
if not self.parent_type.is_final_type:
return None
return entry
def declare_cfunction(self, name, type, pos,
cname=None, visibility='private', api=0, in_pxd=0,
defining=0, modifiers=(), utility_code=None, overridable=False):
if get_special_method_signature(name) and not self.parent_type.is_builtin_type:
error(pos, "Special methods must be declared with 'def', not 'cdef'")
args = type.args
if not type.is_static_method:
if not args:
error(pos, "C method has no self argument")
elif not self.parent_type.assignable_from(args[0].type):
error(pos, "Self argument (%s) of C method '%s' does not match parent type (%s)" %
(args[0].type, name, self.parent_type))
entry = self.lookup_here(name)
if cname is None:
cname = c_safe_identifier(name)
if entry:
if not entry.is_cfunction:
warning(pos, "'%s' redeclared " % name, 0)
else:
if defining and entry.func_cname:
error(pos, "'%s' already defined" % name)
#print "CClassScope.declare_cfunction: checking signature" ###
if entry.is_final_cmethod and entry.is_inherited:
error(pos, "Overriding final methods is not allowed")
elif type.same_c_signature_as(entry.type, as_cmethod = 1) and type.nogil == entry.type.nogil:
# Fix with_gil vs nogil.
entry.type = entry.type.with_with_gil(type.with_gil)
elif type.compatible_signature_with(entry.type, as_cmethod = 1) and type.nogil == entry.type.nogil:
if (self.defined and not in_pxd
and not type.same_c_signature_as_resolved_type(entry.type, as_cmethod = 1, as_pxd_definition = 1)):
# TODO(robertwb): Make this an error.
warning(pos,
"Compatible but non-identical C method '%s' not redeclared "
"in definition part of extension type '%s'. "
"This may cause incorrect vtables to be generated." % (
name, self.class_name), 2)
warning(entry.pos, "Previous declaration is here", 2)
entry = self.add_cfunction(name, type, pos, cname, visibility='ignore', modifiers=modifiers)
else:
error(pos, "Signature not compatible with previous declaration")
error(entry.pos, "Previous declaration is here")
else:
if self.defined:
error(pos,
"C method '%s' not previously declared in definition part of"
" extension type '%s'" % (name, self.class_name))
entry = self.add_cfunction(name, type, pos, cname, visibility,
modifiers)
if defining:
entry.func_cname = self.mangle(Naming.func_prefix, name)
entry.utility_code = utility_code
type.entry = entry
if u'inline' in modifiers:
entry.is_inline_cmethod = True
if (self.parent_type.is_final_type or entry.is_inline_cmethod or
self.directives.get('final')):
entry.is_final_cmethod = True
entry.final_func_cname = entry.func_cname
return entry
def add_cfunction(self, name, type, pos, cname, visibility, modifiers,
inherited=False):
# Add a cfunction entry without giving it a func_cname.
prev_entry = self.lookup_here(name)
entry = ClassScope.add_cfunction(self, name, type, pos, cname,
visibility, modifiers,
inherited=inherited)
entry.is_cmethod = 1
entry.prev_entry = prev_entry
return entry
def declare_builtin_cfunction(self, name, type, cname, utility_code = None):
# overridden methods of builtin types still have their Python
# equivalent that must be accessible to support bound methods
name = EncodedString(name)
entry = self.declare_cfunction(
name, type, pos=None, cname=cname, visibility='extern', utility_code=utility_code)
var_entry = Entry(name, name, py_object_type)
var_entry.is_variable = 1
var_entry.is_builtin = 1
var_entry.utility_code = utility_code
var_entry.scope = entry.scope
entry.as_variable = var_entry
return entry
def declare_property(self, name, doc, pos):
entry = self.lookup_here(name)
if entry is None:
entry = self.declare(name, name, py_object_type, pos, 'private')
entry.is_property = 1
entry.doc = doc
entry.scope = PropertyScope(name,
outer_scope = self.global_scope(), parent_scope = self)
entry.scope.parent_type = self.parent_type
self.property_entries.append(entry)
return entry
def declare_inherited_c_attributes(self, base_scope):
# Declare entries for all the C attributes of an
# inherited type, with cnames modified appropriately
# to work with this type.
def adapt(cname):
return "%s.%s" % (Naming.obj_base_cname, base_entry.cname)
entries = base_scope.inherited_var_entries + base_scope.var_entries
for base_entry in entries:
entry = self.declare(
base_entry.name, adapt(base_entry.cname),
base_entry.type, None, 'private')
entry.is_variable = 1
self.inherited_var_entries.append(entry)
# If the class defined in a pxd, specific entries have not been added.
# Ensure now that the parent (base) scope has specific entries
# Iterate over a copy as get_all_specialized_function_types() will mutate
for base_entry in base_scope.cfunc_entries[:]:
if base_entry.type.is_fused:
base_entry.type.get_all_specialized_function_types()
for base_entry in base_scope.cfunc_entries:
cname = base_entry.cname
var_entry = base_entry.as_variable
is_builtin = var_entry and var_entry.is_builtin
if not is_builtin:
cname = adapt(cname)
entry = self.add_cfunction(base_entry.name, base_entry.type,
base_entry.pos, cname,
base_entry.visibility, base_entry.func_modifiers, inherited=True)
entry.is_inherited = 1
if base_entry.is_final_cmethod:
entry.is_final_cmethod = True
entry.is_inline_cmethod = base_entry.is_inline_cmethod
if (self.parent_scope == base_scope.parent_scope or
entry.is_inline_cmethod):
entry.final_func_cname = base_entry.final_func_cname
if is_builtin:
entry.is_builtin_cmethod = True
entry.as_variable = var_entry
if base_entry.utility_code:
entry.utility_code = base_entry.utility_code
class CppClassScope(Scope):
# Namespace of a C++ class.
is_cpp_class_scope = 1
default_constructor = None
type = None
def __init__(self, name, outer_scope, templates=None):
Scope.__init__(self, name, outer_scope, None)
self.directives = outer_scope.directives
self.inherited_var_entries = []
if templates is not None:
for T in templates:
template_entry = self.declare(
T, T, PyrexTypes.TemplatePlaceholderType(T), None, 'extern')
template_entry.is_type = 1
def declare_var(self, name, type, pos,
cname = None, visibility = 'extern',
api = 0, in_pxd = 0, is_cdef = 0, defining = 0):
# Add an entry for an attribute.
if not cname:
cname = name
entry = self.lookup_here(name)
if defining and entry is not None:
if entry.type.same_as(type):
# Fix with_gil vs nogil.
entry.type = entry.type.with_with_gil(type.with_gil)
elif type.is_cfunction and type.compatible_signature_with(entry.type):
entry.type = type
else:
error(pos, "Function signature does not match previous declaration")
else:
entry = self.declare(name, cname, type, pos, visibility)
entry.is_variable = 1
if type.is_cfunction and self.type:
if not self.type.get_fused_types():
entry.func_cname = "%s::%s" % (self.type.empty_declaration_code(), cname)
if name != "this" and (defining or name != "<init>"):
self.var_entries.append(entry)
return entry
def declare_cfunction(self, name, type, pos,
cname=None, visibility='extern', api=0, in_pxd=0,
defining=0, modifiers=(), utility_code=None, overridable=False):
class_name = self.name.split('::')[-1]
if name in (class_name, '__init__') and cname is None:
cname = "%s__init__%s" % (Naming.func_prefix, class_name)
name = '<init>'
type.return_type = PyrexTypes.CVoidType()
# This is called by the actual constructor, but need to support
# arguments that cannot by called by value.
type.original_args = type.args
def maybe_ref(arg):
if arg.type.is_cpp_class and not arg.type.is_reference:
return PyrexTypes.CFuncTypeArg(
arg.name, PyrexTypes.c_ref_type(arg.type), arg.pos)
else:
return arg
type.args = [maybe_ref(arg) for arg in type.args]
elif name == '__dealloc__' and cname is None:
cname = "%s__dealloc__%s" % (Naming.func_prefix, class_name)
name = '<del>'
type.return_type = PyrexTypes.CVoidType()
if name in ('<init>', '<del>') and type.nogil:
for base in self.type.base_classes:
base_entry = base.scope.lookup(name)
if base_entry and not base_entry.type.nogil:
error(pos, "Constructor cannot be called without GIL unless all base constructors can also be called without GIL")
error(base_entry.pos, "Base constructor defined here.")
prev_entry = self.lookup_here(name)
entry = self.declare_var(name, type, pos,
defining=defining,
cname=cname, visibility=visibility)
if prev_entry and not defining:
entry.overloaded_alternatives = prev_entry.all_alternatives()
entry.utility_code = utility_code
type.entry = entry
return entry
def declare_inherited_cpp_attributes(self, base_class):
base_scope = base_class.scope
template_type = base_class
while getattr(template_type, 'template_type', None):
template_type = template_type.template_type
if getattr(template_type, 'templates', None):
base_templates = [T.name for T in template_type.templates]
else:
base_templates = ()
# Declare entries for all the C++ attributes of an
# inherited type, with cnames modified appropriately
# to work with this type.
for base_entry in \
base_scope.inherited_var_entries + base_scope.var_entries:
#constructor/destructor is not inherited
if base_entry.name in ("<init>", "<del>"):
continue
#print base_entry.name, self.entries
if base_entry.name in self.entries:
base_entry.name # FIXME: is there anything to do in this case?
entry = self.declare(base_entry.name, base_entry.cname,
base_entry.type, None, 'extern')
entry.is_variable = 1
entry.is_inherited = 1
self.inherited_var_entries.append(entry)
for base_entry in base_scope.cfunc_entries:
entry = self.declare_cfunction(base_entry.name, base_entry.type,
base_entry.pos, base_entry.cname,
base_entry.visibility, api=0,
modifiers=base_entry.func_modifiers,
utility_code=base_entry.utility_code)
entry.is_inherited = 1
for base_entry in base_scope.type_entries:
if base_entry.name not in base_templates:
entry = self.declare_type(base_entry.name, base_entry.type,
base_entry.pos, base_entry.cname,
base_entry.visibility)
entry.is_inherited = 1
def specialize(self, values, type_entry):
scope = CppClassScope(self.name, self.outer_scope)
scope.type = type_entry
for entry in self.entries.values():
if entry.is_type:
scope.declare_type(entry.name,
entry.type.specialize(values),
entry.pos,
entry.cname,
template=1)
elif entry.type.is_cfunction:
for e in entry.all_alternatives():
scope.declare_cfunction(e.name,
e.type.specialize(values),
e.pos,
e.cname,
utility_code=e.utility_code)
else:
scope.declare_var(entry.name,
entry.type.specialize(values),
entry.pos,
entry.cname,
entry.visibility)
return scope
class PropertyScope(Scope):
# Scope holding the __get__, __set__ and __del__ methods for
# a property of an extension type.
#
# parent_type PyExtensionType The type to which the property belongs
is_property_scope = 1
def declare_pyfunction(self, name, pos, allow_redefine=False):
# Add an entry for a method.
signature = get_property_accessor_signature(name)
if signature:
entry = self.declare(name, name, py_object_type, pos, 'private')
entry.is_special = 1
entry.signature = signature
return entry
else:
error(pos, "Only __get__, __set__ and __del__ methods allowed "
"in a property declaration")
return None
class CConstOrVolatileScope(Scope):
def __init__(self, base_type_scope, is_const=0, is_volatile=0):
Scope.__init__(
self,
'cv_' + base_type_scope.name,
base_type_scope.outer_scope,
base_type_scope.parent_scope)
self.base_type_scope = base_type_scope
self.is_const = is_const
self.is_volatile = is_volatile
def lookup_here(self, name):
entry = self.base_type_scope.lookup_here(name)
if entry is not None:
entry = copy.copy(entry)
entry.type = PyrexTypes.c_const_or_volatile_type(
entry.type, self.is_const, self.is_volatile)
return entry
class TemplateScope(Scope):
def __init__(self, name, outer_scope):
Scope.__init__(self, name, outer_scope, None)
self.directives = outer_scope.directives
| py_attrs = []
py_buffers = []
memoryview_slices = []
for entry in self.var_entries:
if entry.type.is_pyobject:
if include_weakref or (self.is_closure_class_scope or entry.name != "__weakref__"):
if include_gc_simple or not entry.type.is_gc_simple:
py_attrs.append(entry)
elif entry.type == PyrexTypes.c_py_buffer_type:
py_buffers.append(entry)
elif entry.type.is_memoryviewslice:
memoryview_slices.append(entry)
have_entries = py_attrs or py_buffers or memoryview_slices
return have_entries, (py_attrs, py_buffers, memoryview_slices) |
index.js | import React from 'react';
import { FontAwesomeIcon } from '@fortawesome/react-fontawesome';
import { Row, Col, Card, Button } from 'reactstrap';
import GaugeChart from 'react-gauge-chart';
export default function | () {
return (
<>
<Row>
<Col xl="4">
<Card className="p-4 text-center mb-5">
<div className="card-tr-actions">
<div className="badge badge-neutral-success text-success">
+ 0.32%
</div>
</div>
<div className="d-60 rounded-sm border-0 mb-4 mx-auto card-icon-wrapper bg-success text-white btn-icon text-center shadow-success">
<FontAwesomeIcon
icon={['far', 'bell']}
className="font-size-xl"
/>
</div>
<div className="display-4 mt-2 text-second font-weight-bold">
Hal9000
</div>
<div className="text-black-50 pt-2">
This server has a <b className="text-success">regular</b> load at
the moment.
</div>
<div className="py-4 my-2">
<GaugeChart
hideText
id="chartsGauges2A"
nrOfLevels={6}
colors={['#1bc943', '#f4772e', '#f83245']}
arcWidth={0.3}
percent={0.27}
/>
</div>
<div className="d-flex align-items-center justify-content-center">
<Button size="sm" color="second" outline className="btn-pill">
Server Dashboard
</Button>
</div>
</Card>
</Col>
<Col xl="4">
<Card className="p-4 text-center mb-5">
<div className="card-tr-actions">
<div className="badge badge-neutral-warning text-warning">
+ 57.32%
</div>
</div>
<div className="d-60 rounded-sm border-0 mx-auto mb-4 card-icon-wrapper pulse-animation bg-danger text-white btn-icon text-center shadow-danger">
<FontAwesomeIcon
icon={['fas', 'exclamation']}
className="font-size-xl"
/>
</div>
<div className="display-4 mt-2 text-second font-weight-bold">
Optimus
</div>
<div className="text-black-50 pt-2">
This server has a <b className="text-danger">high</b> load at the
moment.
</div>
<div className="py-4 my-2">
<GaugeChart
hideText
id="chartsGauges2B"
nrOfLevels={6}
colors={[
'rgba(248,50,69,0.1)',
'rgba(248,50,69,0.5)',
'rgba(248,50,69,0.99)'
]}
arcWidth={0.3}
percent={0.89}
/>
</div>
<div className="d-flex align-items-center justify-content-center">
<Button size="sm" color="second" outline className="btn-pill">
Server Dashboard
</Button>
</div>
</Card>
</Col>
<Col xl="4">
<Card className="bg-deep-blue text-center p-4">
<div className="card-tr-actions">
<div className="badge badge-second text-white">+ 57.32%</div>
</div>
<div className="d-60 rounded-sm border-0 mx-auto mb-4 card-icon-wrapper bg-plum-plate text-white btn-icon text-center shadow-sm">
<FontAwesomeIcon
icon={['far', 'user']}
className="font-size-xl"
/>
</div>
<div className="display-4 mt-2 text-second font-weight-bold">
Kitt2
</div>
<div className="text-black-50 pt-2">
This server has a <b className="text-white">low</b> load at the
moment.
</div>
<div className="py-4 my-2">
<GaugeChart
hideText
id="chartsGauges2C"
nrOfLevels={6}
colors={[
'rgba(255,255,255,0.2)',
'rgba(255,255,255,0.5)',
'rgba(255,255,255,0.8)'
]}
arcWidth={0.3}
percent={0.55}
/>
</div>
<div className="d-flex align-items-center justify-content-center">
<Button size="sm" color="second" outline className="btn-pill">
Server Dashboard
</Button>
</div>
</Card>
</Col>
</Row>
</>
);
}
| LivePreviewExample |
dmactl.rs | #[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::DMACTL {
#[doc = r" Modifies the contents of the register"]
#[inline]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R { bits: self.register.get() }
}
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = r" Value of the field"]
pub struct DMAONERRR {
bits: bool,
}
impl DMAONERRR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct TXDMAER {
bits: bool,
}
impl TXDMAER {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct RXDMAER {
bits: bool,
}
impl RXDMAER {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Proxy"]
pub struct _DMAONERRW<'a> {
w: &'a mut W,
}
impl<'a> _DMAONERRW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 2;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _TXDMAEW<'a> {
w: &'a mut W,
}
impl<'a> _TXDMAEW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 1;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _RXDMAEW<'a> {
w: &'a mut W,
}
impl<'a> _RXDMAEW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true) | pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 0;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bit 2 - DMA on error. If this bit is set to 1, the DMA receive request outputs (for single and burst requests) are disabled when the UART error interrupt is asserted (more specifically if any of the error interrupts RIS.PERIS, RIS.BERIS, RIS.FERIS or RIS.OERIS are asserted)."]
#[inline]
pub fn dmaonerr(&self) -> DMAONERRR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 2;
((self.bits >> OFFSET) & MASK as u32) != 0
};
DMAONERRR { bits }
}
#[doc = "Bit 1 - Transmit DMA enable. If this bit is set to 1, DMA for the transmit FIFO is enabled."]
#[inline]
pub fn txdmae(&self) -> TXDMAER {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 1;
((self.bits >> OFFSET) & MASK as u32) != 0
};
TXDMAER { bits }
}
#[doc = "Bit 0 - Receive DMA enable. If this bit is set to 1, DMA for the receive FIFO is enabled."]
#[inline]
pub fn rxdmae(&self) -> RXDMAER {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) != 0
};
RXDMAER { bits }
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bit 2 - DMA on error. If this bit is set to 1, the DMA receive request outputs (for single and burst requests) are disabled when the UART error interrupt is asserted (more specifically if any of the error interrupts RIS.PERIS, RIS.BERIS, RIS.FERIS or RIS.OERIS are asserted)."]
#[inline]
pub fn dmaonerr(&mut self) -> _DMAONERRW {
_DMAONERRW { w: self }
}
#[doc = "Bit 1 - Transmit DMA enable. If this bit is set to 1, DMA for the transmit FIFO is enabled."]
#[inline]
pub fn txdmae(&mut self) -> _TXDMAEW {
_TXDMAEW { w: self }
}
#[doc = "Bit 0 - Receive DMA enable. If this bit is set to 1, DMA for the receive FIFO is enabled."]
#[inline]
pub fn rxdmae(&mut self) -> _RXDMAEW {
_RXDMAEW { w: self }
}
} | }
#[doc = r" Clears the field bit"] |
lab.rs | //! The CIELAB perceptually uniform device-independent color space
#![allow(clippy::many_single_char_names)]
#![allow(non_snake_case)]
use crate::channel::{
ChannelCast, ChannelFormatCast, ColorChannel, FreeChannel, FreeChannelScalar, PosFreeChannel,
};
use crate::color::{Bounded, Broadcast, Color, FromTuple, HomogeneousColor, Lerp};
use crate::tags::LabTag;
use crate::white_point::{UnitWhitePoint, WhitePoint};
use crate::xyz::Xyz;
#[cfg(feature = "approx")]
use approx;
use num_traits;
use std::fmt;
/// The CIELAB perceptually uniform device-independent color space
///
/// Lab is a color space obtained by a non-linear transformation for XYZ that is intended to be
/// perceptually uniform, that is, such that a euclidean distance in any direction appears to change
/// the same amount. Unlike XYZ, Lab spaces require a reference white point in order to be defined.
/// This means that there are many different lab spaces that are incompatible because of having different
/// white points. Like XYZ, most values in `Lab` lie outside the visible gamut of the eye.
///
/// The `L` value represents lightness, while a and b are green vs red and blue vs yellow respectively.
/// Lab is one of two commonly used perceptually uniform spaces, the other being [`Luv`](struct.Luv.html).
///
/// A polar version of `Lab` exists as [`Lchab`](struct.Lchab.html). Lchab is to Lab as Hsv is to Rgb,
/// and is generally easier to reason about.
#[repr(C)]
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd)]
pub struct Lab<T, W> {
L: PosFreeChannel<T>,
a: FreeChannel<T>,
b: FreeChannel<T>,
white_point: W,
}
impl<T, W> Lab<T, W>
where
T: FreeChannelScalar,
W: UnitWhitePoint<T>,
{
/// Construct a new `Lab` value with a named white point and channels.
///
/// Unlike `new_with_whitepoint`, `new` constructs a default instance of a [`UnitWhitePoint`](white_point/trait.UnitWhitePoint.html).
/// It is only valid when `W` is a `UnitWhitePoint`.
pub fn new(L: T, a: T, b: T) -> Self {
Lab {
L: PosFreeChannel::new(L),
a: FreeChannel::new(a),
b: FreeChannel::new(b),
white_point: W::default(),
}
}
}
impl<T, W> Lab<T, W>
where
T: FreeChannelScalar,
W: WhitePoint<T>,
{
/// Construct a new `Lab` value with a given white point and channels
pub fn new_with_whitepoint(L: T, a: T, b: T, white_point: W) -> Self {
Lab {
L: PosFreeChannel::new(L),
a: FreeChannel::new(a),
b: FreeChannel::new(b),
white_point,
}
}
/// Convert the internal channel scalar format
pub fn color_cast<TOut>(&self) -> Lab<TOut, W>
where
T: ChannelFormatCast<TOut>,
TOut: FreeChannelScalar,
{
Lab {
L: self.L.clone().channel_cast(),
a: self.a.clone().channel_cast(),
b: self.b.clone().channel_cast(),
white_point: self.white_point.clone(),
}
}
/// Returns the `L` lightness channel scalar
pub fn L(&self) -> T {
self.L.0.clone()
}
/// Returns the `a` green-red channel scalar
pub fn a(&self) -> T {
self.a.0.clone()
}
/// Returns the `b` yellow-blue channel scalar
pub fn b(&self) -> T {
self.b.0.clone()
}
/// Returns a mutable reference to the `L` lightness channel scalar
pub fn L_mut(&mut self) -> &mut T {
&mut self.L.0
}
/// Returns a mutable reference to the `a` green-red channel scalar
pub fn a_mut(&mut self) -> &mut T {
&mut self.a.0
}
/// Returns a mutable reference to the `b` yellow-blue channel scalar
pub fn b_mut(&mut self) -> &mut T {
&mut self.b.0
}
/// Set the `L` channel scalar
pub fn set_L(&mut self, val: T) {
self.L.0 = val;
}
/// Set the `a` channel scalar
pub fn set_a(&mut self, val: T) {
self.a.0 = val;
}
/// Set the `b` channel scalar
pub fn set_b(&mut self, val: T) {
self.b.0 = val;
}
/// Returns a reference to the white point for the `Lab` color space
pub fn white_point(&self) -> &W {
&self.white_point
}
}
impl<T, W> Color for Lab<T, W>
where
T: FreeChannelScalar,
W: WhitePoint<T>,
{
type Tag = LabTag;
type ChannelsTuple = (T, T, T);
#[inline]
fn num_channels() -> u32 {
3
}
fn to_tuple(self) -> Self::ChannelsTuple {
(self.L.0, self.a.0, self.b.0)
}
}
impl<T, W> FromTuple for Lab<T, W>
where
T: FreeChannelScalar,
W: UnitWhitePoint<T>,
{
fn from_tuple(values: (T, T, T)) -> Self {
let (L, a, b) = values;
Lab::new(L, a, b)
}
}
impl<T, W> HomogeneousColor for Lab<T, W>
where
T: FreeChannelScalar,
W: WhitePoint<T>,
{
type ChannelFormat = T;
fn clamp(self, min: T, max: T) -> Self {
Lab {
L: self.L.clamp(min.clone(), max.clone()),
a: self.a.clamp(min.clone(), max.clone()),
b: self.b.clamp(min, max),
white_point: self.white_point,
}
}
}
impl<T, W> Broadcast for Lab<T, W>
where
T: FreeChannelScalar,
W: UnitWhitePoint<T>,
{
fn broadcast(value: T) -> Self {
Lab::new(value.clone(), value.clone(), value)
}
}
impl<T, W> Bounded for Lab<T, W>
where
T: FreeChannelScalar,
W: WhitePoint<T>,
{
fn normalize(self) -> Self {
Lab::new_with_whitepoint(self.L.normalize().0, self.a(), self.b(), self.white_point)
}
fn is_normalized(&self) -> bool {
self.L.is_normalized()
}
}
impl<T, W> Lerp for Lab<T, W>
where
T: FreeChannelScalar + Lerp,
W: WhitePoint<T>,
{
type Position = <FreeChannel<T> as Lerp>::Position;
impl_color_lerp_square!(Lab { L, a, b }, copy = { white_point });
}
#[cfg(feature = "approx")]
impl<T, W> approx::AbsDiffEq for Lab<T, W>
where
T: FreeChannelScalar + approx::AbsDiffEq,
T::Epsilon: Clone,
W: WhitePoint<T>,
{
impl_abs_diff_eq!({L, a, b});
}
#[cfg(feature = "approx")]
impl<T, W> approx::RelativeEq for Lab<T, W>
where
T: FreeChannelScalar + approx::RelativeEq,
T::Epsilon: Clone,
W: WhitePoint<T>,
{
impl_rel_eq!({L, a, b});
}
#[cfg(feature = "approx")]
impl<T, W> approx::UlpsEq for Lab<T, W>
where
T: FreeChannelScalar + approx::UlpsEq,
T::Epsilon: Clone,
W: WhitePoint<T>,
{
impl_ulps_eq!({L, a, b});
}
impl<T, W> Default for Lab<T, W>
where
T: FreeChannelScalar,
W: UnitWhitePoint<T>,
{
fn default() -> Self {
Lab::new(T::default(), T::default(), T::default())
}
}
impl<T, W> fmt::Display for Lab<T, W>
where
T: FreeChannelScalar + fmt::Display,
W: WhitePoint<T>,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "L*a*b*({}, {}, {})", self.L, self.a, self.b)
}
}
impl<T, W> Lab<T, W>
where
T: FreeChannelScalar,
W: WhitePoint<T>,
{
/// Construct an `Lab` value from an `Xyz` instance and a white point
pub fn from_xyz(from: &Xyz<T>, wp: W) -> Lab<T, W> {
let wp_xyz = wp.get_xyz();
let x = from.x() / wp_xyz.x();
let y = from.y() / wp_xyz.y();
let z = from.z() / wp_xyz.z();
let L = num_traits::cast::<_, T>(116.0).unwrap() * Lab::<T, W>::lab_f(y)
- num_traits::cast(16.0).unwrap();
let a = num_traits::cast::<_, T>(500.0).unwrap()
* (Lab::<T, W>::lab_f(x) - Lab::<T, W>::lab_f(y));
let b = num_traits::cast::<_, T>(200.0).unwrap()
* (Lab::<T, W>::lab_f(y) - Lab::<T, W>::lab_f(z));
Lab::new_with_whitepoint(L, a, b, wp)
}
/// Construct an `Xyz` value from `self`
pub fn to_xyz(&self) -> Xyz<T> {
let wp = self.white_point.get_xyz();
let fy = Self::inv_f_y(self.L());
let fx = Self::inv_f_x(self.a(), fy);
let fz = Self::inv_f_z(self.b(), fy);
let x = Self::calc_xz(fx) * wp.x();
let y = Self::calc_y(self.L()) * wp.y();
let z = Self::calc_xz(fz) * wp.z();
Xyz::new(x, y, z)
}
fn lab_f(channel: T) -> T {
if channel > Self::epsilon() {
channel.cbrt()
} else {
(Self::kappa() * channel + num_traits::cast(16.0).unwrap())
/ num_traits::cast(116.0).unwrap()
}
}
fn calc_xz(f: T) -> T {
let f3 = f * f * f;
if f3 > Self::epsilon() {
f3
} else {
(num_traits::cast::<_, T>(116.0).unwrap() * f
- num_traits::cast::<_, T>(16.00).unwrap())
/ Self::kappa()
}
}
fn calc_y(L: T) -> T {
if L > Self::kappa() * Self::epsilon() {
let num = (L + num_traits::cast::<_, T>(16.0).unwrap())
/ num_traits::cast::<_, T>(116.0).unwrap();
num * num * num
} else {
L / Self::kappa()
}
}
fn inv_f_x(a: T, fy: T) -> T {
a / num_traits::cast::<_, T>(500.0).unwrap() + fy
}
fn inv_f_y(L: T) -> T {
(L + num_traits::cast::<_, T>(16.0).unwrap()) / num_traits::cast::<_, T>(116.0).unwrap()
}
fn inv_f_z(b: T, fy: T) -> T {
fy - b / num_traits::cast::<_, T>(200.0).unwrap()
}
#[inline]
/// Return the $`\epsilon`$ constant used in the Lab conversion
///
/// For a description of the value, visit [`BruceLindbloom.com`](http://www.brucelindbloom.com/LContinuity.html).
pub fn epsilon() -> T {
num_traits::cast(0.008856451679035631).unwrap()
}
#[inline]
/// Return the $`\kappa`$ constant used in the Lab conversion
///
/// For a description of the value, visit [`BruceLindbloom.com`](http://www.brucelindbloom.com/LContinuity.html).
pub fn kappa() -> T {
num_traits::cast(903.2962962963).unwrap()
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::white_point::*;
use crate::xyz::Xyz;
use approx::*;
#[test]
fn test_construct() {
let c1 = Lab::<_, D65>::new(82.00, -32.0, 77.7);
assert_relative_eq!(c1.L(), 82.00);
assert_relative_eq!(c1.a(), -32.0);
assert_relative_eq!(c1.b(), 77.7);
assert_eq!(c1.to_tuple(), (82.0, -32.0, 77.7));
assert_relative_eq!(Lab::from_tuple(c1.to_tuple()), c1);
let c2 = Lab::<_, D65>::new(0.0, -86.0, -11.0);
assert_relative_eq!(c2.L(), 0.0);
assert_relative_eq!(c2.a(), -86.0);
assert_relative_eq!(c2.b(), -11.0);
assert_eq!(c2.to_tuple(), (0.0, -86.0, -11.0));
assert_relative_eq!(Lab::from_tuple(c2.to_tuple()), c2);
}
#[test]
fn test_lerp() {
let c1 = Lab::<_, D65>::new(55.0, 25.0, 80.0);
let c2 = Lab::<_, D65>::new(100.0, -25.0, 20.0);
assert_relative_eq!(c1.lerp(&c2, 0.0), c1);
assert_relative_eq!(c1.lerp(&c2, 1.0), c2);
assert_relative_eq!(c1.lerp(&c2, 0.5), Lab::<_, D65>::new(77.5, 0.0, 50.0));
assert_relative_eq!(c1.lerp(&c2, 0.25), Lab::<_, D65>::new(66.25, 12.5, 65.0));
}
#[test]
fn test_normalize() {
let c1 = Lab::<_, D65>::new(100.0, -50.0, 50.0);
assert!(c1.is_normalized());
assert_relative_eq!(c1.normalize(), c1);
let c2 = Lab::<_, D65>::new(25.0, 250.0, -1000.0);
assert!(c2.is_normalized());
assert_relative_eq!(c2.normalize(), c2);
let c3 = Lab::<_, D65>::new(-25.0, 0.0, 0.0);
assert!(!c3.is_normalized());
assert_relative_eq!(c3.normalize(), Lab::new(0.0, 0.0, 0.0));
}
#[test]
fn test_from_xyz() {
let c1 = Xyz::new(0.3, 0.22, 0.5);
let t1 = Lab::from_xyz(&c1, D65);
assert_relative_eq!(t1, Lab::new(54.0270, 38.5919, -33.5640), epsilon = 1e-4);
assert_relative_eq!(t1.to_xyz(), c1, epsilon = 1e-4);
let c2 = Xyz::new(0.0, 0.0, 0.0);
let t2 = Lab::from_xyz(&c2, D65);
assert_relative_eq!(t2, Lab::new(0.0, 0.0, 0.0), epsilon = 1e-4);
assert_relative_eq!(t2.to_xyz(), c2, epsilon = 1e-4);
let c3 = Xyz::new(1.0, 1.0, 1.0);
let t3 = Lab::from_xyz(&c3, D65);
assert_relative_eq!(t3, Lab::new(100.0, 8.5385, 5.5939), epsilon = 1e-4);
assert_relative_eq!(t3.to_xyz(), c3, epsilon = 1e-4);
let c4 = Xyz::new(0.6, 0.8, 0.1);
let t4 = Lab::from_xyz(&c4, D50);
let t4_2 = Lab::from_xyz(&c4, E);
assert_relative_eq!(t4, Lab::new(91.6849, -37.2895, 86.6924), epsilon = 1e-4);
assert_relative_eq!(t4.to_xyz(), c4, epsilon = 1e-4);
assert!(t4.to_xyz() != c4);
assert_relative_eq!(t4_2, Lab::new(91.6849, -42.4425, 92.8319), epsilon = 1e-3);
assert_relative_eq!(t4_2.to_xyz(), c4, epsilon = 1e-4);
let c5 = D65.get_xyz();
let t5 = Lab::from_xyz(&c5, D65);
assert_relative_eq!(t5, Lab::new(100.0, 0.0, 0.0), epsilon = 1e-4);
assert_relative_eq!(t5.to_xyz(), c5);
}
#[test]
fn test_to_xyz() {
let c1 = Lab::new(50.0, 33.0, -66.0);
let t1 = c1.to_xyz();
assert_relative_eq!(t1, Xyz::new(0.243326, 0.184187, 0.791023), epsilon = 1e-4);
assert_relative_eq!(Lab::from_xyz(&t1, D65), c1, epsilon = 1e-4);
let c2 = Lab::<_, D50>::new(65.0, 47.5, 11.1);
let t2 = c2.to_xyz();
assert_relative_eq!(t2, Xyz::new(0.4811337, 0.340472, 0.219151), epsilon = 1e-3);
assert_relative_eq!(Lab::from_xyz(&t2, D50), c2, epsilon = 1e-3);
let c3 = Lab::<_, D75>::new(100.0, -100.0, -100.0);
let t3 = c3.to_xyz();
assert_relative_eq!(t3, Xyz::new(0.486257, 1.00, 4.139032), epsilon = 1e-4);
assert_relative_eq!(Lab::from_xyz(&t3, D75), c3, epsilon = 1e-4);
}
#[test]
fn | () {
let c1 = Lab::<_, D65>::new(30.0, -50.0, 76.0);
assert_relative_eq!(c1.color_cast(), c1);
assert_relative_eq!(c1.color_cast(), Lab::new(30.0f32, -50.0, 76.0));
assert_relative_eq!(c1.color_cast::<f32>().color_cast(), c1);
}
}
| test_color_cast |
ranges.rs | use clippy_utils::diagnostics::{span_lint, span_lint_and_sugg, span_lint_and_then};
use clippy_utils::source::{snippet, snippet_opt, snippet_with_applicability};
use clippy_utils::sugg::Sugg;
use clippy_utils::{get_parent_expr, in_constant, is_integer_const, meets_msrv, single_segment_path};
use clippy_utils::{higher, SpanlessEq};
use if_chain::if_chain;
use rustc_ast::ast::RangeLimits;
use rustc_errors::Applicability;
use rustc_hir::{BinOpKind, Expr, ExprKind, PathSegment, QPath};
use rustc_lint::{LateContext, LateLintPass, LintContext};
use rustc_middle::ty;
use rustc_semver::RustcVersion;
use rustc_session::{declare_tool_lint, impl_lint_pass};
use rustc_span::source_map::{Span, Spanned};
use rustc_span::sym;
use rustc_span::symbol::Ident;
use std::cmp::Ordering;
declare_clippy_lint! {
/// **What it does:** Checks for zipping a collection with the range of
/// `0.._.len()`.
///
/// **Why is this bad?** The code is better expressed with `.enumerate()`.
///
/// **Known problems:** None.
///
/// **Example:**
/// ```rust
/// # let x = vec![1];
/// x.iter().zip(0..x.len());
/// ```
/// Could be written as
/// ```rust
/// # let x = vec![1];
/// x.iter().enumerate();
/// ```
pub RANGE_ZIP_WITH_LEN,
complexity,
"zipping iterator with a range when `enumerate()` would do"
}
declare_clippy_lint! {
/// **What it does:** Checks for exclusive ranges where 1 is added to the
/// upper bound, e.g., `x..(y+1)`.
///
/// **Why is this bad?** The code is more readable with an inclusive range
/// like `x..=y`.
///
/// **Known problems:** Will add unnecessary pair of parentheses when the
/// expression is not wrapped in a pair but starts with a opening parenthesis
/// and ends with a closing one.
/// I.e., `let _ = (f()+1)..(f()+1)` results in `let _ = ((f()+1)..=f())`.
///
/// Also in many cases, inclusive ranges are still slower to run than
/// exclusive ranges, because they essentially add an extra branch that
/// LLVM may fail to hoist out of the loop.
///
/// This will cause a warning that cannot be fixed if the consumer of the
/// range only accepts a specific range type, instead of the generic
/// `RangeBounds` trait
/// ([#3307](https://github.com/rust-lang/rust-clippy/issues/3307)).
///
/// **Example:**
/// ```rust,ignore
/// for x..(y+1) { .. }
/// ```
/// Could be written as
/// ```rust,ignore
/// for x..=y { .. }
/// ```
pub RANGE_PLUS_ONE,
pedantic,
"`x..(y+1)` reads better as `x..=y`"
}
declare_clippy_lint! {
/// **What it does:** Checks for inclusive ranges where 1 is subtracted from
/// the upper bound, e.g., `x..=(y-1)`.
///
/// **Why is this bad?** The code is more readable with an exclusive range
/// like `x..y`.
///
/// **Known problems:** This will cause a warning that cannot be fixed if
/// the consumer of the range only accepts a specific range type, instead of
/// the generic `RangeBounds` trait
/// ([#3307](https://github.com/rust-lang/rust-clippy/issues/3307)).
///
/// **Example:**
/// ```rust,ignore
/// for x..=(y-1) { .. }
/// ```
/// Could be written as
/// ```rust,ignore
/// for x..y { .. }
/// ```
pub RANGE_MINUS_ONE,
pedantic,
"`x..=(y-1)` reads better as `x..y`"
}
declare_clippy_lint! {
/// **What it does:** Checks for range expressions `x..y` where both `x` and `y`
/// are constant and `x` is greater or equal to `y`.
///
/// **Why is this bad?** Empty ranges yield no values so iterating them is a no-op.
/// Moreover, trying to use a reversed range to index a slice will panic at run-time.
///
/// **Known problems:** None.
///
/// **Example:**
///
/// ```rust,no_run
/// fn main() {
/// (10..=0).for_each(|x| println!("{}", x));
///
/// let arr = [1, 2, 3, 4, 5];
/// let sub = &arr[3..1];
/// }
/// ```
/// Use instead:
/// ```rust
/// fn main() {
/// (0..=10).rev().for_each(|x| println!("{}", x));
///
/// let arr = [1, 2, 3, 4, 5];
/// let sub = &arr[1..3];
/// }
/// ```
pub REVERSED_EMPTY_RANGES,
correctness,
"reversing the limits of range expressions, resulting in empty ranges"
}
declare_clippy_lint! {
/// **What it does:** Checks for expressions like `x >= 3 && x < 8` that could
/// be more readably expressed as `(3..8).contains(x)`.
///
/// **Why is this bad?** `contains` expresses the intent better and has less
/// failure modes (such as fencepost errors or using `||` instead of `&&`).
///
/// **Known problems:** None.
///
/// **Example:**
///
/// ```rust
/// // given
/// let x = 6;
///
/// assert!(x >= 3 && x < 8);
/// ```
/// Use instead:
/// ```rust
///# let x = 6;
/// assert!((3..8).contains(&x));
/// ```
pub MANUAL_RANGE_CONTAINS,
style,
"manually reimplementing {`Range`, `RangeInclusive`}`::contains`"
}
const MANUAL_RANGE_CONTAINS_MSRV: RustcVersion = RustcVersion::new(1, 35, 0);
pub struct Ranges {
msrv: Option<RustcVersion>,
}
impl Ranges {
#[must_use]
pub fn new(msrv: Option<RustcVersion>) -> Self {
Self { msrv }
}
}
impl_lint_pass!(Ranges => [
RANGE_ZIP_WITH_LEN,
RANGE_PLUS_ONE,
RANGE_MINUS_ONE,
REVERSED_EMPTY_RANGES,
MANUAL_RANGE_CONTAINS,
]);
impl<'tcx> LateLintPass<'tcx> for Ranges {
fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) {
match expr.kind {
ExprKind::MethodCall(ref path, _, ref args, _) => {
check_range_zip_with_len(cx, path, args, expr.span);
},
ExprKind::Binary(ref op, ref l, ref r) => {
if meets_msrv(self.msrv.as_ref(), &MANUAL_RANGE_CONTAINS_MSRV) {
check_possible_range_contains(cx, op.node, l, r, expr);
}
},
_ => {},
}
check_exclusive_range_plus_one(cx, expr);
check_inclusive_range_minus_one(cx, expr);
check_reversed_empty_range(cx, expr);
}
extract_msrv_attr!(LateContext);
}
fn check_possible_range_contains(cx: &LateContext<'_>, op: BinOpKind, l: &Expr<'_>, r: &Expr<'_>, expr: &Expr<'_>) {
if in_constant(cx, expr.hir_id) {
return;
}
let span = expr.span;
let combine_and = match op {
BinOpKind::And | BinOpKind::BitAnd => true,
BinOpKind::Or | BinOpKind::BitOr => false,
_ => return,
};
// value, name, order (higher/lower), inclusiveness
if let (Some((lval, lname, name_span, lval_span, lord, linc)), Some((rval, rname, _, rval_span, rord, rinc))) =
(check_range_bounds(cx, l), check_range_bounds(cx, r))
{
// we only lint comparisons on the same name and with different
// direction
if lname != rname || lord == rord {
return;
}
let ord = Constant::partial_cmp(cx.tcx, cx.typeck_results().expr_ty(l), &lval, &rval);
if combine_and && ord == Some(rord) {
// order lower bound and upper bound
let (l_span, u_span, l_inc, u_inc) = if rord == Ordering::Less {
(lval_span, rval_span, linc, rinc)
} else {
(rval_span, lval_span, rinc, linc)
};
// we only lint inclusive lower bounds
if !l_inc {
return;
}
let (range_type, range_op) = if u_inc {
("RangeInclusive", "..=")
} else {
("Range", "..")
};
let mut applicability = Applicability::MachineApplicable;
let name = snippet_with_applicability(cx, name_span, "_", &mut applicability);
let lo = snippet_with_applicability(cx, l_span, "_", &mut applicability);
let hi = snippet_with_applicability(cx, u_span, "_", &mut applicability);
let space = if lo.ends_with('.') { " " } else { "" };
span_lint_and_sugg(
cx,
MANUAL_RANGE_CONTAINS,
span,
&format!("manual `{}::contains` implementation", range_type),
"use",
format!("({}{}{}{}).contains(&{})", lo, space, range_op, hi, name),
applicability,
);
} else if !combine_and && ord == Some(lord) {
// `!_.contains(_)`
// order lower bound and upper bound
let (l_span, u_span, l_inc, u_inc) = if lord == Ordering::Less {
(lval_span, rval_span, linc, rinc)
} else {
(rval_span, lval_span, rinc, linc)
};
if l_inc {
return;
}
let (range_type, range_op) = if u_inc {
("Range", "..")
} else {
("RangeInclusive", "..=")
};
let mut applicability = Applicability::MachineApplicable;
let name = snippet_with_applicability(cx, name_span, "_", &mut applicability);
let lo = snippet_with_applicability(cx, l_span, "_", &mut applicability);
let hi = snippet_with_applicability(cx, u_span, "_", &mut applicability);
let space = if lo.ends_with('.') { " " } else { "" };
span_lint_and_sugg(
cx,
MANUAL_RANGE_CONTAINS,
span,
&format!("manual `!{}::contains` implementation", range_type),
"use",
format!("!({}{}{}{}).contains(&{})", lo, space, range_op, hi, name),
applicability,
);
}
}
}
fn check_range_bounds(cx: &LateContext<'_>, ex: &Expr<'_>) -> Option<(Constant, Ident, Span, Span, Ordering, bool)> {
if let ExprKind::Binary(ref op, ref l, ref r) = ex.kind {
let (inclusive, ordering) = match op.node {
BinOpKind::Gt => (false, Ordering::Greater),
BinOpKind::Ge => (true, Ordering::Greater),
BinOpKind::Lt => (false, Ordering::Less),
BinOpKind::Le => (true, Ordering::Less),
_ => return None,
};
if let Some(id) = match_ident(l) {
if let Some((c, _)) = constant(cx, cx.typeck_results(), r) {
return Some((c, id, l.span, r.span, ordering, inclusive));
}
} else if let Some(id) = match_ident(r) {
if let Some((c, _)) = constant(cx, cx.typeck_results(), l) {
return Some((c, id, r.span, l.span, ordering.reverse(), inclusive));
}
}
}
None
}
fn match_ident(e: &Expr<'_>) -> Option<Ident> {
if let ExprKind::Path(ref qpath) = e.kind {
if let Some(seg) = single_segment_path(qpath) {
if seg.args.is_none() {
return Some(seg.ident);
}
}
}
None
}
fn check_range_zip_with_len(cx: &LateContext<'_>, path: &PathSegment<'_>, args: &[Expr<'_>], span: Span) {
if_chain! {
if path.ident.as_str() == "zip";
if let [iter, zip_arg] = args;
// `.iter()` call
if let ExprKind::MethodCall(ref iter_path, _, ref iter_args, _) = iter.kind;
if iter_path.ident.name == sym::iter;
// range expression in `.zip()` call: `0..x.len()`
if let Some(higher::Range { start: Some(start), end: Some(end), .. }) = higher::range(zip_arg);
if is_integer_const(cx, start, 0);
// `.len()` call
if let ExprKind::MethodCall(ref len_path, _, ref len_args, _) = end.kind;
if len_path.ident.name == sym!(len) && len_args.len() == 1;
// `.iter()` and `.len()` called on same `Path`
if let ExprKind::Path(QPath::Resolved(_, ref iter_path)) = iter_args[0].kind;
if let ExprKind::Path(QPath::Resolved(_, ref len_path)) = len_args[0].kind;
if SpanlessEq::new(cx).eq_path_segments(&iter_path.segments, &len_path.segments);
then {
span_lint(cx,
RANGE_ZIP_WITH_LEN,
span,
&format!("it is more idiomatic to use `{}.iter().enumerate()`",
snippet(cx, iter_args[0].span, "_"))
);
}
}
}
// exclusive range plus one: `x..(y+1)`
fn check_exclusive_range_plus_one(cx: &LateContext<'_>, expr: &Expr<'_>) {
if_chain! {
if let Some(higher::Range {
start,
end: Some(end),
limits: RangeLimits::HalfOpen
}) = higher::range(expr);
if let Some(y) = y_plus_one(cx, end);
then {
let span = if expr.span.from_expansion() {
expr.span
.ctxt()
.outer_expn_data()
.call_site
} else {
expr.span
};
span_lint_and_then(
cx,
RANGE_PLUS_ONE,
span,
"an inclusive range would be more readable",
|diag| {
let start = start.map_or(String::new(), |x| Sugg::hir(cx, x, "x").to_string());
let end = Sugg::hir(cx, y, "y");
if let Some(is_wrapped) = &snippet_opt(cx, span) {
if is_wrapped.starts_with('(') && is_wrapped.ends_with(')') {
diag.span_suggestion(
span,
"use",
format!("({}..={})", start, end),
Applicability::MaybeIncorrect,
);
} else {
diag.span_suggestion(
span,
"use",
format!("{}..={}", start, end),
Applicability::MachineApplicable, // snippet
);
}
}
},
);
}
}
}
// inclusive range minus one: `x..=(y-1)`
fn check_inclusive_range_minus_one(cx: &LateContext<'_>, expr: &Expr<'_>) {
if_chain! {
if let Some(higher::Range { start, end: Some(end), limits: RangeLimits::Closed }) = higher::range(expr);
if let Some(y) = y_minus_one(cx, end);
then {
span_lint_and_then(
cx,
RANGE_MINUS_ONE,
expr.span,
"an exclusive range would be more readable",
|diag| {
let start = start.map_or(String::new(), |x| Sugg::hir(cx, x, "x").to_string());
let end = Sugg::hir(cx, y, "y");
diag.span_suggestion(
expr.span,
"use",
format!("{}..{}", start, end),
Applicability::MachineApplicable, // snippet
);
},
);
}
}
}
fn check_reversed_empty_range(cx: &LateContext<'_>, expr: &Expr<'_>) {
fn inside_indexing_expr(cx: &LateContext<'_>, expr: &Expr<'_>) -> bool {
matches!(
get_parent_expr(cx, expr),
Some(Expr {
kind: ExprKind::Index(..),
..
})
)
}
fn is_for_loop_arg(cx: &LateContext<'_>, expr: &Expr<'_>) -> bool {
let mut cur_expr = expr;
while let Some(parent_expr) = get_parent_expr(cx, cur_expr) {
match higher::for_loop(parent_expr) {
Some((_, args, _, _)) if args.hir_id == expr.hir_id => return true,
_ => cur_expr = parent_expr,
}
}
false
}
fn is_empty_range(limits: RangeLimits, ordering: Ordering) -> bool {
match limits {
RangeLimits::HalfOpen => ordering != Ordering::Less,
RangeLimits::Closed => ordering == Ordering::Greater,
}
}
if_chain! {
if let Some(higher::Range { start: Some(start), end: Some(end), limits }) = higher::range(expr);
let ty = cx.typeck_results().expr_ty(start);
if let ty::Int(_) | ty::Uint(_) = ty.kind();
if let Some((start_idx, _)) = constant(cx, cx.typeck_results(), start);
if let Some((end_idx, _)) = constant(cx, cx.typeck_results(), end);
if let Some(ordering) = Constant::partial_cmp(cx.tcx, ty, &start_idx, &end_idx);
if is_empty_range(limits, ordering);
then {
if inside_indexing_expr(cx, expr) {
// Avoid linting `N..N` as it has proven to be useful, see #5689 and #5628 ...
if ordering != Ordering::Equal {
span_lint(
cx,
REVERSED_EMPTY_RANGES,
expr.span,
"this range is reversed and using it to index a slice will panic at run-time",
);
}
// ... except in for loop arguments for backwards compatibility with `reverse_range_loop`
} else if ordering != Ordering::Equal || is_for_loop_arg(cx, expr) {
span_lint_and_then(
cx,
REVERSED_EMPTY_RANGES,
expr.span,
"this range is empty so it will yield no values",
|diag| {
if ordering != Ordering::Equal {
let start_snippet = snippet(cx, start.span, "_");
let end_snippet = snippet(cx, end.span, "_");
let dots = match limits {
RangeLimits::HalfOpen => "..",
RangeLimits::Closed => "..="
};
diag.span_suggestion(
expr.span,
"consider using the following if you are attempting to iterate over this \
range in reverse",
format!("({}{}{}).rev()", end_snippet, dots, start_snippet),
Applicability::MaybeIncorrect,
);
}
},
);
}
}
}
}
fn y_plus_one<'t>(cx: &LateContext<'_>, expr: &'t Expr<'_>) -> Option<&'t Expr<'t>> {
match expr.kind {
ExprKind::Binary(
Spanned {
node: BinOpKind::Add, ..
},
ref lhs,
ref rhs,
) => {
if is_integer_const(cx, lhs, 1) {
Some(rhs)
} else if is_integer_const(cx, rhs, 1) {
Some(lhs)
} else {
None
}
},
_ => None,
}
}
fn y_minus_one<'t>(cx: &LateContext<'_>, expr: &'t Expr<'_>) -> Option<&'t Expr<'t>> {
match expr.kind {
ExprKind::Binary(
Spanned {
node: BinOpKind::Sub, ..
},
ref lhs,
ref rhs,
) if is_integer_const(cx, rhs, 1) => Some(lhs),
_ => None,
}
} | use crate::consts::{constant, Constant}; |
|
vars.py |
x = 1
y = 1
def | ():
x = 3
x = x + 1
print x
def bar():
global y
y = 3
y = y + 1
print y
foo()
bar()
print x
print y
| foo |
retransmit_stage.rs | //! The `retransmit_stage` retransmits shreds between validators
#![allow(clippy::rc_buffer)]
use crate::{
cluster_info::{compute_retransmit_peers, ClusterInfo, DATA_PLANE_FANOUT},
cluster_info_vote_listener::VerifiedVoteReceiver,
cluster_slots::ClusterSlots,
cluster_slots_service::ClusterSlotsService,
completed_data_sets_service::CompletedDataSetsSender,
contact_info::ContactInfo,
repair_service::DuplicateSlotsResetSender,
repair_service::RepairInfo,
result::{Error, Result},
window_service::{should_retransmit_and_persist, WindowService},
};
use crossbeam_channel::{Receiver, Sender};
use lru::LruCache;
use solana_client::rpc_response::SlotUpdate;
use solana_ledger::shred::{get_shred_slot_index_type, ShredFetchStats};
use solana_ledger::{
blockstore::{Blockstore, CompletedSlotsReceiver},
leader_schedule_cache::LeaderScheduleCache,
};
use solana_measure::measure::Measure;
use solana_metrics::inc_new_counter_error;
use solana_perf::packet::{Packet, Packets};
use solana_rpc::{
max_slots::MaxSlots, rpc_completed_slots_service::RpcCompletedSlotsService,
rpc_subscriptions::RpcSubscriptions,
};
use solana_runtime::{bank::Bank, bank_forks::BankForks};
use solana_sdk::{clock::Slot, epoch_schedule::EpochSchedule, pubkey::Pubkey, timing::timestamp};
use solana_streamer::streamer::PacketReceiver;
use std::{
cmp,
collections::hash_set::HashSet,
collections::{BTreeMap, BTreeSet, HashMap},
net::UdpSocket,
ops::{Deref, DerefMut},
sync::atomic::{AtomicBool, AtomicU64, Ordering},
sync::mpsc::channel,
sync::mpsc::RecvTimeoutError,
sync::Mutex,
sync::{Arc, RwLock},
thread::{self, Builder, JoinHandle},
time::Duration,
};
const MAX_DUPLICATE_COUNT: usize = 2;
const DEFAULT_LRU_SIZE: usize = 10_000;
// Limit a given thread to consume about this many packets so that
// it doesn't pull up too much work.
const MAX_PACKET_BATCH_SIZE: usize = 100;
#[derive(Default)]
struct RetransmitStats {
total_packets: AtomicU64,
total_batches: AtomicU64,
total_time: AtomicU64,
epoch_fetch: AtomicU64,
epoch_cache_update: AtomicU64,
repair_total: AtomicU64,
discard_total: AtomicU64,
retransmit_total: AtomicU64,
last_ts: AtomicU64,
compute_turbine_peers_total: AtomicU64,
packets_by_slot: Mutex<BTreeMap<Slot, usize>>,
packets_by_source: Mutex<BTreeMap<String, usize>>,
}
#[allow(clippy::too_many_arguments)]
fn update_retransmit_stats(
stats: &RetransmitStats,
total_time: u64,
total_packets: usize,
retransmit_total: u64,
discard_total: u64,
repair_total: u64,
compute_turbine_peers_total: u64,
peers_len: usize,
packets_by_slot: HashMap<Slot, usize>,
packets_by_source: HashMap<String, usize>,
epoch_fetch: u64,
epoch_cach_update: u64,
) {
stats.total_time.fetch_add(total_time, Ordering::Relaxed);
stats
.total_packets
.fetch_add(total_packets as u64, Ordering::Relaxed);
stats
.retransmit_total
.fetch_add(retransmit_total, Ordering::Relaxed);
stats
.repair_total
.fetch_add(repair_total, Ordering::Relaxed);
stats
.discard_total
.fetch_add(discard_total, Ordering::Relaxed);
stats
.compute_turbine_peers_total
.fetch_add(compute_turbine_peers_total, Ordering::Relaxed);
stats.total_batches.fetch_add(1, Ordering::Relaxed);
stats.epoch_fetch.fetch_add(epoch_fetch, Ordering::Relaxed);
stats
.epoch_cache_update
.fetch_add(epoch_cach_update, Ordering::Relaxed);
{
let mut stats_packets_by_slot = stats.packets_by_slot.lock().unwrap();
for (slot, count) in packets_by_slot {
*stats_packets_by_slot.entry(slot).or_insert(0) += count;
}
}
{
let mut stats_packets_by_source = stats.packets_by_source.lock().unwrap();
for (source, count) in packets_by_source {
*stats_packets_by_source.entry(source).or_insert(0) += count;
}
}
let now = timestamp();
let last = stats.last_ts.load(Ordering::Relaxed);
#[allow(deprecated)]
if now.saturating_sub(last) > 2000
&& stats.last_ts.compare_and_swap(last, now, Ordering::Relaxed) == last
{
datapoint_info!("retransmit-num_nodes", ("count", peers_len, i64));
datapoint_info!(
"retransmit-stage",
(
"total_time",
stats.total_time.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"epoch_fetch",
stats.epoch_fetch.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"epoch_cache_update",
stats.epoch_cache_update.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"total_batches",
stats.total_batches.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"total_packets",
stats.total_packets.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"retransmit_total",
stats.retransmit_total.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"compute_turbine",
stats.compute_turbine_peers_total.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"repair_total",
stats.repair_total.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"discard_total",
stats.discard_total.swap(0, Ordering::Relaxed) as i64,
i64
),
);
let mut packets_by_slot = stats.packets_by_slot.lock().unwrap();
let old_packets_by_slot = std::mem::take(&mut *packets_by_slot);
drop(packets_by_slot);
for (slot, num_shreds) in old_packets_by_slot {
datapoint_info!(
"retransmit-slot-num-packets",
("slot", slot, i64),
("num_shreds", num_shreds, i64)
);
}
let mut packets_by_source = stats.packets_by_source.lock().unwrap();
let mut top = BTreeMap::new();
let mut max = 0;
for (source, num) in packets_by_source.iter() {
if *num > max {
top.insert(*num, source.clone());
if top.len() > 5 {
let last = *top.iter().next().unwrap().0;
top.remove(&last);
}
max = *top.iter().next().unwrap().0;
}
}
info!(
"retransmit: top packets_by_source: {:?} len: {}",
top,
packets_by_source.len()
);
packets_by_source.clear();
}
}
#[derive(Default)]
struct EpochStakesCache {
peers: Vec<ContactInfo>,
stakes_and_index: Vec<(u64, usize)>,
}
use crate::packet_hasher::PacketHasher;
// Map of shred (slot, index, is_data) => list of hash values seen for that key.
pub type ShredFilter = LruCache<(Slot, u32, bool), Vec<u64>>;
pub type ShredFilterAndHasher = (ShredFilter, PacketHasher);
// Returns None if shred is already received and should skip retransmit.
// Otherwise returns shred's slot and whether the shred is a data shred.
fn check_if_already_received(
packet: &Packet,
shreds_received: &Mutex<ShredFilterAndHasher>,
) -> Option<Slot> {
let shred = get_shred_slot_index_type(packet, &mut ShredFetchStats::default())?;
let mut shreds_received = shreds_received.lock().unwrap();
let (cache, hasher) = shreds_received.deref_mut();
match cache.get_mut(&shred) {
Some(sent) if sent.len() >= MAX_DUPLICATE_COUNT => None,
Some(sent) => {
let hash = hasher.hash_packet(packet);
if sent.contains(&hash) {
None
} else {
sent.push(hash);
Some(shred.0)
}
}
None => {
let hash = hasher.hash_packet(packet);
cache.put(shred, vec![hash]);
Some(shred.0)
}
}
}
// Returns true if this is the first time receiving a shred for `shred_slot`.
fn check_if_first_shred_received(
shred_slot: Slot,
first_shreds_received: &Mutex<BTreeSet<Slot>>,
root_bank: &Bank,
) -> bool {
if shred_slot <= root_bank.slot() {
return false;
}
let mut first_shreds_received_locked = first_shreds_received.lock().unwrap();
if !first_shreds_received_locked.contains(&shred_slot) {
datapoint_info!("retransmit-first-shred", ("slot", shred_slot, i64));
first_shreds_received_locked.insert(shred_slot);
if first_shreds_received_locked.len() > 100 {
let mut slots_before_root =
first_shreds_received_locked.split_off(&(root_bank.slot() + 1));
// `slots_before_root` now contains all slots <= root
std::mem::swap(&mut slots_before_root, &mut first_shreds_received_locked);
}
true
} else {
false
}
}
// Drops shred slot leader from retransmit peers.
// TODO: decide which bank should be used here.
fn get_retransmit_peers(
self_pubkey: Pubkey,
shred_slot: Slot,
leader_schedule_cache: &LeaderScheduleCache,
bank: &Bank,
stakes_cache: &EpochStakesCache,
) -> Vec<(u64 /*stakes*/, usize /*index*/)> {
match leader_schedule_cache.slot_leader_at(shred_slot, Some(bank)) {
None => {
error!("unknown leader for shred slot");
stakes_cache.stakes_and_index.clone()
}
Some(pubkey) if pubkey == self_pubkey => {
error!("retransmit from slot leader: {}", pubkey);
stakes_cache.stakes_and_index.clone()
}
Some(pubkey) => stakes_cache
.stakes_and_index
.iter()
.filter(|(_, i)| stakes_cache.peers[*i].id != pubkey)
.copied()
.collect(),
}
}
#[allow(clippy::too_many_arguments)]
fn retransmit(
bank_forks: &RwLock<BankForks>,
leader_schedule_cache: &LeaderScheduleCache,
cluster_info: &ClusterInfo,
r: &Mutex<PacketReceiver>,
sock: &UdpSocket,
id: u32,
stats: &RetransmitStats,
epoch_stakes_cache: &RwLock<EpochStakesCache>,
last_peer_update: &AtomicU64,
shreds_received: &Mutex<ShredFilterAndHasher>,
max_slots: &MaxSlots,
first_shreds_received: &Mutex<BTreeSet<Slot>>,
rpc_subscriptions: &Option<Arc<RpcSubscriptions>>,
) -> Result<()> {
let timer = Duration::new(1, 0);
let r_lock = r.lock().unwrap();
let packets = r_lock.recv_timeout(timer)?;
let mut timer_start = Measure::start("retransmit");
let mut total_packets = packets.packets.len();
let mut packet_v = vec![packets];
while let Ok(nq) = r_lock.try_recv() {
total_packets += nq.packets.len();
packet_v.push(nq);
if total_packets >= MAX_PACKET_BATCH_SIZE {
break;
}
}
drop(r_lock);
let mut epoch_fetch = Measure::start("retransmit_epoch_fetch");
let (r_bank, root_bank) = {
let bank_forks = bank_forks.read().unwrap();
(bank_forks.working_bank(), bank_forks.root_bank())
};
let bank_epoch = r_bank.get_leader_schedule_epoch(r_bank.slot());
epoch_fetch.stop();
let mut epoch_cache_update = Measure::start("retransmit_epoch_cach_update");
let now = timestamp();
let last = last_peer_update.load(Ordering::Relaxed);
#[allow(deprecated)]
if now.saturating_sub(last) > 1000
&& last_peer_update.compare_and_swap(last, now, Ordering::Relaxed) == last
{
let epoch_staked_nodes = r_bank.epoch_staked_nodes(bank_epoch);
let (peers, stakes_and_index) =
cluster_info.sorted_retransmit_peers_and_stakes(epoch_staked_nodes.as_ref());
{
let mut epoch_stakes_cache = epoch_stakes_cache.write().unwrap();
epoch_stakes_cache.peers = peers;
epoch_stakes_cache.stakes_and_index = stakes_and_index;
}
{
let mut sr = shreds_received.lock().unwrap();
sr.0.clear();
sr.1.reset();
}
}
let r_epoch_stakes_cache = epoch_stakes_cache.read().unwrap();
let mut peers_len = 0;
epoch_cache_update.stop();
let my_id = cluster_info.id();
let mut discard_total = 0;
let mut repair_total = 0;
let mut retransmit_total = 0;
let mut compute_turbine_peers_total = 0;
let mut packets_by_slot: HashMap<Slot, usize> = HashMap::new();
let mut packets_by_source: HashMap<String, usize> = HashMap::new();
let mut max_slot = 0;
for mut packets in packet_v {
for packet in packets.packets.iter_mut() {
// skip discarded packets and repair packets
if packet.meta.discard {
total_packets -= 1;
discard_total += 1;
continue;
}
if packet.meta.repair {
total_packets -= 1;
repair_total += 1;
continue;
}
let shred_slot = match check_if_already_received(packet, shreds_received) {
Some(slot) => slot,
None => continue,
};
max_slot = max_slot.max(shred_slot);
if let Some(rpc_subscriptions) = rpc_subscriptions {
if check_if_first_shred_received(shred_slot, first_shreds_received, &root_bank) {
rpc_subscriptions.notify_slot_update(SlotUpdate::FirstShredReceived {
slot: shred_slot,
timestamp: timestamp(),
});
}
}
let mut compute_turbine_peers = Measure::start("turbine_start");
let stakes_and_index = get_retransmit_peers(
my_id,
shred_slot,
leader_schedule_cache,
r_bank.deref(),
r_epoch_stakes_cache.deref(),
);
let (my_index, shuffled_stakes_and_index) = ClusterInfo::shuffle_peers_and_index(
&my_id,
&r_epoch_stakes_cache.peers,
&stakes_and_index,
packet.meta.seed,
);
peers_len = cmp::max(peers_len, shuffled_stakes_and_index.len());
// split off the indexes, we don't need the stakes anymore
let indexes: Vec<_> = shuffled_stakes_and_index
.into_iter()
.map(|(_, index)| index)
.collect();
let (neighbors, children) =
compute_retransmit_peers(DATA_PLANE_FANOUT, my_index, &indexes);
let neighbors: Vec<_> = neighbors
.into_iter()
.filter_map(|index| {
let peer = &r_epoch_stakes_cache.peers[index];
if peer.id == my_id {
None
} else {
Some(peer)
}
})
.collect();
let children: Vec<_> = children
.into_iter()
.map(|index| &r_epoch_stakes_cache.peers[index])
.collect();
compute_turbine_peers.stop();
compute_turbine_peers_total += compute_turbine_peers.as_us();
*packets_by_slot.entry(packet.meta.slot).or_insert(0) += 1;
*packets_by_source
.entry(packet.meta.addr().to_string())
.or_insert(0) += 1;
let mut retransmit_time = Measure::start("retransmit_to");
if !packet.meta.forward {
ClusterInfo::retransmit_to(&neighbors, packet, sock, true)?;
}
ClusterInfo::retransmit_to(&children, packet, sock, packet.meta.forward)?;
retransmit_time.stop();
retransmit_total += retransmit_time.as_us();
}
}
max_slots.retransmit.fetch_max(max_slot, Ordering::Relaxed);
timer_start.stop();
debug!(
"retransmitted {} packets in {}ms retransmit_time: {}ms id: {}",
total_packets,
timer_start.as_ms(),
retransmit_total,
id,
);
update_retransmit_stats(
stats,
timer_start.as_us(),
total_packets,
retransmit_total,
discard_total,
repair_total,
compute_turbine_peers_total,
peers_len,
packets_by_slot,
packets_by_source,
epoch_fetch.as_us(),
epoch_cache_update.as_us(),
);
Ok(())
}
/// Service to retransmit messages from the leader or layer 1 to relevant peer nodes.
/// See `cluster_info` for network layer definitions.
/// # Arguments
/// * `sockets` - Sockets to read from.
/// * `bank_forks` - The BankForks structure
/// * `leader_schedule_cache` - The leader schedule to verify shreds
/// * `cluster_info` - This structure needs to be updated and populated by the bank and via gossip.
/// * `r` - Receive channel for shreds to be retransmitted to all the layer 1 nodes.
pub fn retransmitter(
sockets: Arc<Vec<UdpSocket>>,
bank_forks: Arc<RwLock<BankForks>>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
cluster_info: Arc<ClusterInfo>,
r: Arc<Mutex<PacketReceiver>>,
max_slots: &Arc<MaxSlots>,
rpc_subscriptions: Option<Arc<RpcSubscriptions>>,
) -> Vec<JoinHandle<()>> |
pub struct RetransmitStage {
thread_hdls: Vec<JoinHandle<()>>,
window_service: WindowService,
cluster_slots_service: ClusterSlotsService,
}
impl RetransmitStage {
#[allow(clippy::new_ret_no_self)]
#[allow(clippy::too_many_arguments)]
pub fn new(
bank_forks: Arc<RwLock<BankForks>>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
blockstore: Arc<Blockstore>,
cluster_info: &Arc<ClusterInfo>,
retransmit_sockets: Arc<Vec<UdpSocket>>,
repair_socket: Arc<UdpSocket>,
verified_receiver: Receiver<Vec<Packets>>,
exit: &Arc<AtomicBool>,
completed_slots_receivers: [CompletedSlotsReceiver; 2],
epoch_schedule: EpochSchedule,
cfg: Option<Arc<AtomicBool>>,
shred_version: u16,
cluster_slots: Arc<ClusterSlots>,
duplicate_slots_reset_sender: DuplicateSlotsResetSender,
verified_vote_receiver: VerifiedVoteReceiver,
repair_validators: Option<HashSet<Pubkey>>,
completed_data_sets_sender: CompletedDataSetsSender,
max_slots: &Arc<MaxSlots>,
rpc_subscriptions: Option<Arc<RpcSubscriptions>>,
duplicate_slots_sender: Sender<Slot>,
) -> Self {
let (retransmit_sender, retransmit_receiver) = channel();
let retransmit_receiver = Arc::new(Mutex::new(retransmit_receiver));
let t_retransmit = retransmitter(
retransmit_sockets,
bank_forks.clone(),
leader_schedule_cache,
cluster_info.clone(),
retransmit_receiver,
max_slots,
rpc_subscriptions.clone(),
);
let [rpc_completed_slots_receiver, cluster_completed_slots_receiver] =
completed_slots_receivers;
let rpc_completed_slots_hdl =
RpcCompletedSlotsService::spawn(rpc_completed_slots_receiver, rpc_subscriptions);
let cluster_slots_service = ClusterSlotsService::new(
blockstore.clone(),
cluster_slots.clone(),
bank_forks.clone(),
cluster_info.clone(),
cluster_completed_slots_receiver,
exit.clone(),
);
let leader_schedule_cache_clone = leader_schedule_cache.clone();
let repair_info = RepairInfo {
bank_forks,
epoch_schedule,
duplicate_slots_reset_sender,
repair_validators,
};
let window_service = WindowService::new(
blockstore,
cluster_info.clone(),
verified_receiver,
retransmit_sender,
repair_socket,
exit,
repair_info,
leader_schedule_cache,
move |id, shred, working_bank, last_root| {
let is_connected = cfg
.as_ref()
.map(|x| x.load(Ordering::Relaxed))
.unwrap_or(true);
let rv = should_retransmit_and_persist(
shred,
working_bank,
&leader_schedule_cache_clone,
id,
last_root,
shred_version,
);
rv && is_connected
},
cluster_slots,
verified_vote_receiver,
completed_data_sets_sender,
duplicate_slots_sender,
);
let mut thread_hdls = t_retransmit;
if let Some(thread_hdl) = rpc_completed_slots_hdl {
thread_hdls.push(thread_hdl);
}
Self {
thread_hdls,
window_service,
cluster_slots_service,
}
}
pub fn join(self) -> thread::Result<()> {
for thread_hdl in self.thread_hdls {
thread_hdl.join()?;
}
self.window_service.join()?;
self.cluster_slots_service.join()?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::contact_info::ContactInfo;
use solana_ledger::blockstore_processor::{process_blockstore, ProcessOptions};
use solana_ledger::create_new_tmp_ledger;
use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use solana_ledger::shred::Shred;
use solana_net_utils::find_available_port_in_range;
use solana_perf::packet::{Packet, Packets};
use std::net::{IpAddr, Ipv4Addr};
#[test]
fn test_skip_repair() {
solana_logger::setup();
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(123);
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
let blockstore = Blockstore::open(&ledger_path).unwrap();
let opts = ProcessOptions {
full_leader_cache: true,
..ProcessOptions::default()
};
let (bank_forks, cached_leader_schedule) =
process_blockstore(&genesis_config, &blockstore, Vec::new(), opts, None).unwrap();
let leader_schedule_cache = Arc::new(cached_leader_schedule);
let bank_forks = Arc::new(RwLock::new(bank_forks));
let mut me = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
let ip_addr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0));
let port = find_available_port_in_range(ip_addr, (8000, 10000)).unwrap();
let me_retransmit = UdpSocket::bind(format!("127.0.0.1:{}", port)).unwrap();
// need to make sure tvu and tpu are valid addresses
me.tvu_forwards = me_retransmit.local_addr().unwrap();
let port = find_available_port_in_range(ip_addr, (8000, 10000)).unwrap();
me.tvu = UdpSocket::bind(format!("127.0.0.1:{}", port))
.unwrap()
.local_addr()
.unwrap();
let other = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
let cluster_info = ClusterInfo::new_with_invalid_keypair(other);
cluster_info.insert_info(me);
let retransmit_socket = Arc::new(vec![UdpSocket::bind("0.0.0.0:0").unwrap()]);
let cluster_info = Arc::new(cluster_info);
let (retransmit_sender, retransmit_receiver) = channel();
let t_retransmit = retransmitter(
retransmit_socket,
bank_forks,
&leader_schedule_cache,
cluster_info,
Arc::new(Mutex::new(retransmit_receiver)),
&Arc::new(MaxSlots::default()),
None,
);
let _thread_hdls = vec![t_retransmit];
let mut shred = Shred::new_from_data(0, 0, 0, None, true, true, 0, 0x20, 0);
let mut packet = Packet::default();
shred.copy_to_packet(&mut packet);
let packets = Packets::new(vec![packet.clone()]);
// it should send this over the sockets.
retransmit_sender.send(packets).unwrap();
let mut packets = Packets::new(vec![]);
solana_streamer::packet::recv_from(&mut packets, &me_retransmit, 1).unwrap();
assert_eq!(packets.packets.len(), 1);
assert!(!packets.packets[0].meta.repair);
let mut repair = packet.clone();
repair.meta.repair = true;
shred.set_slot(1);
shred.copy_to_packet(&mut packet);
// send 1 repair and 1 "regular" packet so that we don't block forever on the recv_from
let packets = Packets::new(vec![repair, packet]);
retransmit_sender.send(packets).unwrap();
let mut packets = Packets::new(vec![]);
solana_streamer::packet::recv_from(&mut packets, &me_retransmit, 1).unwrap();
assert_eq!(packets.packets.len(), 1);
assert!(!packets.packets[0].meta.repair);
}
#[test]
fn test_already_received() {
let mut packet = Packet::default();
let slot = 1;
let index = 5;
let version = 0x40;
let shred = Shred::new_from_data(slot, index, 0, None, true, true, 0, version, 0);
shred.copy_to_packet(&mut packet);
let shreds_received = Arc::new(Mutex::new((LruCache::new(100), PacketHasher::default())));
// unique shred for (1, 5) should pass
assert_eq!(
check_if_already_received(&packet, &shreds_received),
Some(slot)
);
// duplicate shred for (1, 5) blocked
assert_eq!(check_if_already_received(&packet, &shreds_received), None);
let shred = Shred::new_from_data(slot, index, 2, None, true, true, 0, version, 0);
shred.copy_to_packet(&mut packet);
// first duplicate shred for (1, 5) passed
assert_eq!(
check_if_already_received(&packet, &shreds_received),
Some(slot)
);
// then blocked
assert_eq!(check_if_already_received(&packet, &shreds_received), None);
let shred = Shred::new_from_data(slot, index, 8, None, true, true, 0, version, 0);
shred.copy_to_packet(&mut packet);
// 2nd duplicate shred for (1, 5) blocked
assert_eq!(check_if_already_received(&packet, &shreds_received), None);
assert_eq!(check_if_already_received(&packet, &shreds_received), None);
let shred = Shred::new_empty_coding(slot, index, 0, 1, 1, version);
shred.copy_to_packet(&mut packet);
// Coding at (1, 5) passes
assert_eq!(
check_if_already_received(&packet, &shreds_received),
Some(slot)
);
// then blocked
assert_eq!(check_if_already_received(&packet, &shreds_received), None);
let shred = Shred::new_empty_coding(slot, index, 2, 1, 1, version);
shred.copy_to_packet(&mut packet);
// 2nd unique coding at (1, 5) passes
assert_eq!(
check_if_already_received(&packet, &shreds_received),
Some(slot)
);
// same again is blocked
assert_eq!(check_if_already_received(&packet, &shreds_received), None);
let shred = Shred::new_empty_coding(slot, index, 3, 1, 1, version);
shred.copy_to_packet(&mut packet);
// Another unique coding at (1, 5) always blocked
assert_eq!(check_if_already_received(&packet, &shreds_received), None);
assert_eq!(check_if_already_received(&packet, &shreds_received), None);
}
}
| {
let stats = Arc::new(RetransmitStats::default());
let shreds_received = Arc::new(Mutex::new((
LruCache::new(DEFAULT_LRU_SIZE),
PacketHasher::default(),
)));
let first_shreds_received = Arc::new(Mutex::new(BTreeSet::new()));
(0..sockets.len())
.map(|s| {
let sockets = sockets.clone();
let bank_forks = bank_forks.clone();
let leader_schedule_cache = leader_schedule_cache.clone();
let r = r.clone();
let cluster_info = cluster_info.clone();
let stats = stats.clone();
let epoch_stakes_cache = Arc::new(RwLock::new(EpochStakesCache::default()));
let last_peer_update = Arc::new(AtomicU64::new(0));
let shreds_received = shreds_received.clone();
let max_slots = max_slots.clone();
let first_shreds_received = first_shreds_received.clone();
let rpc_subscriptions = rpc_subscriptions.clone();
Builder::new()
.name("solana-retransmitter".to_string())
.spawn(move || {
trace!("retransmitter started");
loop {
if let Err(e) = retransmit(
&bank_forks,
&leader_schedule_cache,
&cluster_info,
&r,
&sockets[s],
s as u32,
&stats,
&epoch_stakes_cache,
&last_peer_update,
&shreds_received,
&max_slots,
&first_shreds_received,
&rpc_subscriptions,
) {
match e {
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
_ => {
inc_new_counter_error!("streamer-retransmit-error", 1, 1);
}
}
}
}
trace!("exiting retransmitter");
})
.unwrap()
})
.collect()
} |
chunk.js | var userInfo = {md5: ""}; //用户会话信息
var chunkSize = 5 * 1000 * 1024; //分块大小
var uniqueFileName = null; //文件唯一标识符
var folderId = -1; //文件夹id 根据当前所属文件夹来赋值
function getUrl(uri) { //测试用,根据不同类型的后端返回对应的请求地址
// return "http://127.0.0.1:11008/file/" + uri;
return _GATE_URL + "chunk/" + uri;
}
function getHeaders() {
return {
token: "test"
};
}
WebUploader.Uploader.register({
"before-send-file": "beforeSendFile" //时间点1:所有分块进行上传之前调用此函数 ,检查文件存不存在
, "before-send": "beforeSend" //时间点2:如果有分块上传,则每个分块上传之前调用此函数 ,判断分块存不存在
, "after-send-file": "afterSendFile" //时间点3:分片上传完成后,通知后台合成分片
}, {
beforeSendFile: function (file) {
console.log(file);
//秒传验证
var task = new $.Deferred();
var start = new Date().getTime();
(new WebUploader.Uploader()).md5File(file, 0, 10 * 1024 * 1024).progress(function (percentage) {
console.log(percentage);
}).then(function (val) {
console.log("总耗时: " + ((new Date().getTime()) - start) / 1000);
// md5Mark = val;
userInfo.md5 = val;
uploader.options.formData["md5"] = val;
uploader.options.formData["ext"] = file.ext;
$.ajax({
type: "POST"
, url: getUrl('md5')
, data: {
md5: val
, folderId: folderId
}
, headers: getHeaders()
, async: false // 同步
, cache: false
, dataType: "json"
}).then(function (data, textStatus, jqXHR) {
console.log(data);
if (data.isSuccess) {
if (data.data) { //若存在,这返回失败给WebUploader,表明该文件不需要上传
task.reject(); //分片存在,跳过
uploader.skipFile(file);
UploadComlate(file);
} else {
task.resolve(); //分块不存在或不完整,重新发送该分块内容
//拿到上传文件的唯一名称,用于断点续传 需要确保前后端的md5算法和参数一致
uniqueFileName = md5('' + file.name + file.type + file.lastModifiedDate + file.size);
}
} else {
task.reject(); //报错, 跳过
alert(data.msg);
}
}, function (jqXHR, textStatus, errorThrown) { //任何形式的验证失败,都触发重新上传
task.resolve();
//拿到上传文件的唯一名称,用于断点续传
uniqueFileName = md5('' + file.name + file.type + file.lastModifiedDate + file.size);
});
});
return $.when(task);
}
, beforeSend: function (block) {
//分片验证是否已传过,用于断点续传
var task = new $.Deferred();
$.ajax({
type: "POST"
, url: getUrl('check')
, data: JSON.stringify({
name: uniqueFileName
, chunkIndex: block.chunk
, size: block.end - block.start
})
, headers: getHeaders()
, cache: false
, dataType: "json"
, contentType: "application/json",
}).then(function (data, textStatus, jqXHR) {
if (data.isSuccess) {
if (data.data) { //若存在,返回失败给WebUploader,表明该分块不需要上传
task.reject();
} else {
task.resolve();
}
} else {
task.reject(); //报错, 跳过
alert(data.msg);
}
}, function (jqXHR, textStatus, errorThrown) { //任何形式的验证失败,都触发重新上传
task.resolve();
});
return $.when(task);
}
, afterSendFile: function (file) {
var chunksTotal = 0;
if ((chunksTotal = Math.ceil(file.size / chunkSize)) > 1) {
//合并请求
var task = new $.Deferred();
$.ajax({
type: "POST"
, url: getUrl('merge')
, data: JSON.stringify({
name: uniqueFileName
, chunks: chunksTotal
, ext: file.ext
, md5: userInfo.md5
, submittedFileName: file.name
, contextType: file.type
, size: file.size
, folderId: folderId
})
, headers: getHeaders()
, cache: false | , dataType: "json"
, contentType: "application/json",
}).then(function (data, textStatus, jqXHR) {
if (data.isSuccess) {
task.resolve();
UploadComlate(file);
} else {
task.reject(); //报错, 跳过
alert(data.msg);
}
}, function (jqXHR, textStatus, errorThrown) {
task.reject();
});
return $.when(task);
} else {
UploadComlate(file);
}
}
});
var uploader = WebUploader.create({
swf: "Uploader.swf"
, server: getUrl('upload')
, pick: "#picker"
, resize: false
, dnd: "#theList"
, paste: document.body
, disableGlobalDnd: true
, thumb: {
width: 100
, height: 100
, quality: 70
, allowMagnify: true
, crop: true
}
, compress: false
, prepareNextFile: true
, chunked: true
, chunkSize: chunkSize
, threads: true
, formData: $.extend(true, {
folderId: folderId
}, userInfo)
// , formData: function(){return $.extend(true, {userId: userId}, userInfo);}
, headers: getHeaders()
, fileNumLimit: 1
, fileSingleSizeLimit: 1000 * 1024 * 1024
, duplicate: true
});
uploader.on("fileQueued", function (file) {
$("#theList").append('<li id="' + file.id + '">' +
'<img /><span>' + file.name + '</span><span class="itemUpload">上传</span><span class="itemStop">暂停</span><span class="itemDel">删除</span>' +
'<div class="percentage"></div>' +
'</li>');
var $img = $("#" + file.id).find("img");
uploader.makeThumb(file, function (error, src) {
if (error) {
$img.replaceWith("<span>不能预览</span>");
}
$img.attr("src", src);
});
});
$("#theList").on("click", ".itemUpload", function () {
uploader.upload();
//"上传"-->"暂停"
$(this).hide();
$(".itemStop").show();
});
$("#theList").on("click", ".itemStop", function () {
uploader.stop(true);
//"暂停"-->"上传"
$(this).hide();
$(".itemUpload").show();
});
// 如果要删除的文件正在上传(包括暂停),则需要发送给后端一个请求用来清除服务器端的缓存文件
$("#theList").on("click", ".itemDel", function () {
uploader.removeFile($(this).parent().attr("id")); //从上传文件列表中删除
$(this).parent().remove(); //从上传列表dom中删除
});
uploader.on("uploadProgress", function (file, percentage) {
$("#" + file.id + " .percentage").text(percentage * 100 + "%");
});
function UploadComlate(file) {
console.log(file);
$("#" + file.id + " .percentage").text("上传完毕");
$(".itemStop").hide();
$(".itemUpload").hide();
$(".itemDel").hide();
} | , async: false // 同步 |
gobatis_stage.go | // Copyright (C) 2019-2021, Xiongfa Li.
// @author xiongfa.li
// @version V1.0
// Description:
package stage
import (
"context"
"fmt"
"github.com/xfali/gobatis-cmd/pkg"
"github.com/xfali/gobatis-cmd/pkg/config"
"github.com/xfali/gobatis-cmd/pkg/generator"
"github.com/xfali/neve-gen/pkg/database"
"github.com/xfali/neve-gen/pkg/model"
"github.com/xfali/neve-gen/pkg/stringfunc"
"github.com/xfali/neve-gen/pkg/utils"
"github.com/xfali/xlog"
"os"
"path/filepath"
"strings"
)
type GenGobatisStage struct {
logger xlog.Logger
target string
tmplSpec model.TemplateSepc
files []string
}
func NewGenGobatisStage(target string, tmplSpec model.TemplateSepc) *GenGobatisStage {
return &GenGobatisStage{
logger: xlog.GetLogger(),
tmplSpec: tmplSpec,
target: target,
}
}
func (s *GenGobatisStage) Name() string {
return s.tmplSpec.Name
}
func (s *GenGobatisStage) ShouldSkip(ctx context.Context, model *model.ModelData) bool {
return !CheckCondition(ctx, s.tmplSpec.Condition, model)
}
func (s *GenGobatisStage) Generate(ctx context.Context, model *model.ModelData) error {
select {
case <-ctx.Done():
return context.Canceled
default:
infos, have := database.GetTableInfo(ctx)
if have {
for _, m := range model.Value.App.Modules {
info, ok := infos[m.Name]
if ok {
output := filepath.Join(s.target, s.tmplSpec.Target)
output = strings.Replace(output, "${MODULE}", stringfunc.FirstLower(m.Name), -1) | }
conf := config.Config{
Driver: info.DriverName,
Path: output + "/",
PackageName: m.Pkg,
//ModelFile: pkg.Camel2snake(m.Name),
TagName: "xfield,json,yaml,xml",
Namespace: fmt.Sprintf("%s.%s", m.Pkg, pkg.Camel2snake(m.Name)),
}
s.files = append(s.files, filepath.Join(output, strings.ToLower(m.Name)+"_proxy.go"))
generator.GenV2Proxy(conf, m.Name, info.Info)
}
}
}
}
return nil
}
func (s *GenGobatisStage) Rollback(ctx context.Context) error {
var last error
for _, v := range s.files {
err := os.Remove(v)
if err != nil {
last = err
s.logger.Errorln(err)
}
}
return last
} | err := utils.Mkdir(output)
if err != nil {
s.logger.Errorln(err)
return fmt.Errorf("Create Module dir : %s failed. ", output) |
thread_worker.rs | //! Small utility to correctly spawn crossbeam-channel based worker threads.
//! Original source: https://github.com/rust-analyzer/rust-analyzer/blob/c7ceea82a5ab8aabab2f98e7c1e1ec94e82087c2/crates/thread_worker/src/lib.rs
use std::thread;
use crossbeam_channel::{bounded, unbounded, Receiver, Sender};
/// Like `std::thread::JoinHandle<()>`, but joins thread in drop automatically.
pub struct ScopedThread {
// Option for drop
inner: Option<thread::JoinHandle<()>>,
}
impl Drop for ScopedThread {
fn | (&mut self) {
let inner = self.inner.take().unwrap();
let name = inner.thread().name().unwrap().to_string();
info!("Waiting for {} to finish...", name);
let res = inner.join();
info!(
"... {} terminated with {}",
name,
if res.is_ok() { "ok" } else { "err" }
);
// escalate panic, but avoid aborting the process
if let Err(e) = res {
if !thread::panicking() {
panic!(e)
}
}
}
}
impl ScopedThread {
pub fn spawn(name: &'static str, f: impl FnOnce() + Send + 'static) -> ScopedThread {
let inner = thread::Builder::new().name(name.into()).spawn(f).unwrap();
ScopedThread { inner: Some(inner) }
}
}
/// A wrapper around event-processing thread with automatic shutdown semantics.
pub struct Worker<I, O> {
// XXX: field order is significant here.
//
// In Rust, fields are dropped in the declaration order, and we rely on this
// here. We must close input first, so that the `thread` (who holds the
// opposite side of the channel) noticed shutdown. Then, we must join the
// thread, but we must keep out alive so that the thread does not panic.
//
// Note that a potential problem here is that we might drop some messages
// from receiver on the floor. This is ok for rust-analyzer: we have only a
// single client, so, if we are shutting down, nobody is interested in the
// unfinished work anyway! (It's okay for kak-lsp too).
sender: Sender<I>,
_thread: ScopedThread,
receiver: Receiver<O>,
}
impl<I, O> Worker<I, O> {
pub fn spawn<F>(name: &'static str, buf: usize, f: F) -> Worker<I, O>
where
F: FnOnce(Receiver<I>, Sender<O>) + Send + 'static,
I: Send + 'static,
O: Send + 'static,
{
// Set up worker channels in a deadlock-avoiding way. If one sets both input
// and output buffers to a fixed size, a worker might get stuck.
let (sender, input_receiver) = bounded::<I>(buf);
let (output_sender, receiver) = unbounded::<O>();
let _thread = ScopedThread::spawn(name, move || f(input_receiver, output_sender));
Worker {
sender,
_thread,
receiver,
}
}
}
impl<I, O> Worker<I, O> {
pub fn sender(&self) -> &Sender<I> {
&self.sender
}
pub fn receiver(&self) -> &Receiver<O> {
&self.receiver
}
}
| drop |
multibody_limits2.rs | extern crate nalgebra as na;
extern crate ncollide2d;
extern crate nphysics2d;
extern crate nphysics_testbed2d;
use na::{Isometry2, Vector2};
use ncollide2d::shape::{Cuboid, ShapeHandle};
use nphysics2d::joint::{FreeJoint, RevoluteJoint};
use nphysics2d::object::{BodyHandle, Material};
use nphysics2d::volumetric::Volumetric;
use nphysics2d::world::World;
use nphysics_testbed2d::Testbed;
use std::f32::consts::PI;
const COLLIDER_MARGIN: f32 = 0.01;
fn main() {
/*
* World
*/
let mut world = World::new();
world.set_gravity(Vector2::new(0.0, -9.81));
/*
* Setup the ground.
*/
let ground_radx = 25.0;
let ground_rady = 1.0;
let ground_shape = ShapeHandle::new(Cuboid::new(Vector2::new(
ground_radx - COLLIDER_MARGIN,
ground_rady - COLLIDER_MARGIN,
)));
let ground_pos = Isometry2::new(Vector2::y() * -ground_rady, na::zero());
world.add_collider(
COLLIDER_MARGIN,
ground_shape,
BodyHandle::ground(),
ground_pos,
Material::default(), | /*
* Setup the multibody.
*/
let rad = 0.2;
let num = 20;
let geom = ShapeHandle::new(Cuboid::new(Vector2::repeat(rad)));
let inertia = geom.inertia(1.0);
let center_of_mass = geom.center_of_mass();
// Setup the first link with a free joint.
let free = FreeJoint::new(Isometry2::new(Vector2::y(), na::zero()));
let mut parent = BodyHandle::ground();
parent = world.add_multibody_link(
parent,
free,
na::zero(),
na::zero(),
inertia,
center_of_mass,
);
// Setup the other links with revolute joints.
let mut revo = RevoluteJoint::new(PI / 10.0);
revo.enable_min_angle(PI / 10.0);
revo.enable_max_angle(PI / 10.0);
for _ in 0usize..num {
parent = world.add_multibody_link(
parent,
revo,
Vector2::x() * rad * 3.0,
na::zero(),
inertia,
center_of_mass,
);
world.add_collider(
COLLIDER_MARGIN,
geom.clone(),
parent,
Isometry2::identity(),
Material::default(),
);
}
/*
* Set up the testbed.
*/
let testbed = Testbed::new(world);
testbed.run();
} | );
|
derived.gen.go | // Code generated by goderive DO NOT EDIT.
package intersect
// deriveIntersect returns the intersection of the two lists' values
// It assumes that the first list only contains unique items.
func deriveIntersect(this, that []int) []int {
intersect := make([]int, 0, deriveMin(len(this), len(that)))
for i, v := range this {
if deriveContains(that, v) {
intersect = append(intersect, this[i])
}
}
return intersect
}
// deriveContains returns whether the item is contained in the list.
func deriveContains(list []int, item int) bool {
for _, v := range list {
if v == item {
return true
}
}
return false
}
// deriveMin returns the mimimum of the two input values.
func deriveMin(a, b int) int | {
if a < b {
return a
}
return b
} |
|
state_mask.rs | use byteorder::WriteBytesExt;
use std::fmt;
use crate::PacketReader;
/// The State Mask is a variable-length byte array, where each bit represents
/// the current state of a Property owned by an Actor. The Property state
/// tracked is whether it has been updated and needs to be synced with the
/// remote Client
#[derive(Debug, Clone)]
pub struct StateMask {
mask: Vec<u8>,
bytes: u8,
}
impl StateMask {
/// Create a new StateMask with a given number of bytes
pub fn new(bytes: u8) -> StateMask {
StateMask {
bytes,
mask: vec![0; bytes as usize],
}
}
/// Gets the bit at the specified position within the StateMask
pub fn get_bit(&self, index: u8) -> Option<bool> {
if let Some(byte) = self.mask.get((index / 8) as usize) {
let adjusted_index = index % 8;
return Some(byte & (1 << adjusted_index) != 0);
}
return None;
}
/// Sets the bit at the specified position within the StateMask
pub fn set_bit(&mut self, index: u8, value: bool) {
if let Some(byte) = self.mask.get_mut((index / 8) as usize) {
let adjusted_index = index % 8;
let bit_mask = 1 << adjusted_index;
if value {
*byte |= bit_mask;
} else {
*byte &= !bit_mask;
}
}
}
/// Clears the whole StateMask
pub fn clear(&mut self) {
self.mask = vec![0; self.bytes as usize];
}
/// Returns whether any bit has been set in the StateMask
pub fn is_clear(&self) -> bool {
for byte in self.mask.iter() {
if *byte != 0 {
return false;
}
}
return true;
}
/// Get the number of bytes required to represent the StateMask
pub fn byte_number(&self) -> u8 {
return self.bytes;
}
/// Gets a byte at the specified index in the StateMask
pub fn get_byte(&self, index: usize) -> u8 {
return self.mask[index];
}
/// Performs a NAND operation on the StateMask, with another StateMask
pub fn nand(&mut self, other: &StateMask) {
//if other state mask has different capacity, do nothing
if other.byte_number() != self.byte_number() {
return;
}
for n in 0..self.bytes {
if let Some(my_byte) = self.mask.get_mut(n as usize) {
let other_byte = !other.get_byte(n as usize);
*my_byte &= other_byte;
}
}
}
/// Performs an OR operation on the StateMask, with another StateMask
pub fn or(&mut self, other: &StateMask) {
//if other state mask has different capacity, do nothing
if other.byte_number() != self.byte_number() {
return;
}
for n in 0..self.bytes {
if let Some(my_byte) = self.mask.get_mut(n as usize) {
let other_byte = other.get_byte(n as usize);
*my_byte |= other_byte;
}
}
}
/// Writes the StateMask into an outgoing byte stream
pub fn write(&mut self, out_bytes: &mut Vec<u8>) {
out_bytes.write_u8(self.bytes).unwrap();
for x in 0..self.bytes {
out_bytes.write_u8(self.mask[x as usize]).unwrap();
}
}
/// Reads the StateMask from an incoming packet
pub fn read(reader: &mut PacketReader) -> StateMask {
let bytes: u8 = reader.read_u8();
let mut mask: Vec<u8> = Vec::new();
for _ in 0..bytes {
mask.push(reader.read_u8());
}
StateMask { bytes, mask }
}
/// Copies the StateMask into another StateMask
pub fn copy_contents(&mut self, other: &StateMask) {
//if other state mask has different capacity, do nothing
if other.byte_number() != self.byte_number() {
return;
}
for n in 0..self.bytes {
if let Some(my_byte) = self.mask.get_mut(n as usize) {
let other_byte = other.get_byte(n as usize);
*my_byte = other_byte;
}
}
}
}
impl fmt::Display for StateMask {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut out_string: String = String::new();
for y in 0..8 {
if let Some(bit) = self.get_bit(y) {
if bit {
out_string.push('1');
} else {
out_string.push('0');
}
}
}
write!(f, "{}", out_string)
}
}
#[cfg(test)]
mod single_byte_tests {
use crate::StateMask;
#[test]
fn getset() {
let mut mask = StateMask::new(1);
mask.set_bit(0, true);
mask.set_bit(2, true);
mask.set_bit(4, true);
mask.set_bit(6, true);
mask.set_bit(4, false);
assert!(mask.get_bit(0).unwrap() == true);
assert!(mask.get_bit(1).unwrap() == false);
assert!(mask.get_bit(2).unwrap() == true);
assert!(mask.get_bit(4).unwrap() == false);
assert!(mask.get_bit(6).unwrap() == true);
}
#[test]
fn clear() {
let mut mask = StateMask::new(1);
mask.set_bit(0, true);
mask.set_bit(2, true);
mask.set_bit(4, true);
mask.set_bit(6, true);
mask.clear();
assert!(mask.get_bit(0).unwrap() == false);
assert!(mask.get_bit(2).unwrap() == false);
assert!(mask.get_bit(4).unwrap() == false);
assert!(mask.get_bit(6).unwrap() == false);
}
#[test]
fn is_clear_true() {
let mut mask = StateMask::new(1);
mask.set_bit(2, true);
assert!(mask.is_clear() == false);
mask.set_bit(2, false);
assert!(mask.is_clear() == true);
}
#[test]
fn bytes() {
let mut mask = StateMask::new(1);
assert!(mask.byte_number() == 1);
}
#[test]
fn | () {
let mut mask = StateMask::new(1);
mask.set_bit(2, true);
let byte = mask.get_byte(0);
assert!(byte == 4);
}
#[test]
fn nand() {
let mut mask_a = StateMask::new(1);
mask_a.set_bit(1, true);
mask_a.set_bit(2, true);
let mut mask_b = StateMask::new(1);
mask_b.set_bit(1, true);
mask_a.nand(&mask_b);
assert!(mask_a.get_bit(0).unwrap() == false);
assert!(mask_a.get_bit(1).unwrap() == false);
assert!(mask_a.get_bit(2).unwrap() == true);
assert!(mask_a.get_bit(3).unwrap() == false);
}
#[test]
fn or() {
let mut mask_a = StateMask::new(1);
mask_a.set_bit(1, true);
mask_a.set_bit(2, true);
let mut mask_b = StateMask::new(1);
mask_b.set_bit(2, true);
mask_b.set_bit(3, true);
mask_a.or(&mask_b);
assert!(mask_a.get_bit(0).unwrap() == false);
assert!(mask_a.get_bit(1).unwrap() == true);
assert!(mask_a.get_bit(2).unwrap() == true);
assert!(mask_a.get_bit(3).unwrap() == true);
assert!(mask_a.get_bit(4).unwrap() == false);
}
#[test]
fn clone() {
let mut mask_a = StateMask::new(1);
mask_a.set_bit(1, true);
mask_a.set_bit(4, true);
let mut mask_b = mask_a.clone();
assert!(mask_b.get_bit(1).unwrap() == true);
assert!(mask_b.get_bit(3).unwrap() == false);
assert!(mask_b.get_bit(4).unwrap() == true);
}
}
#[cfg(test)]
mod double_byte_tests {
use crate::StateMask;
#[test]
fn getset() {
let mut mask = StateMask::new(2);
mask.set_bit(0, true);
mask.set_bit(4, true);
mask.set_bit(8, true);
mask.set_bit(12, true);
mask.set_bit(8, false);
assert!(mask.get_bit(0).unwrap() == true);
assert!(mask.get_bit(4).unwrap() == true);
assert!(mask.get_bit(8).unwrap() == false);
assert!(mask.get_bit(12).unwrap() == true);
assert!(mask.get_bit(13).unwrap() == false);
}
#[test]
fn clear() {
let mut mask = StateMask::new(2);
mask.set_bit(0, true);
mask.set_bit(4, true);
mask.set_bit(8, true);
mask.set_bit(12, true);
mask.clear();
assert!(mask.get_bit(0).unwrap() == false);
assert!(mask.get_bit(4).unwrap() == false);
assert!(mask.get_bit(8).unwrap() == false);
assert!(mask.get_bit(12).unwrap() == false);
}
#[test]
fn is_clear_true() {
let mut mask = StateMask::new(2);
mask.set_bit(9, true);
assert!(mask.is_clear() == false);
mask.set_bit(9, false);
assert!(mask.is_clear() == true);
}
#[test]
fn bytes() {
let mut mask = StateMask::new(2);
assert!(mask.byte_number() == 2);
}
#[test]
fn get_byte() {
let mut mask = StateMask::new(2);
mask.set_bit(10, true);
let byte = mask.get_byte(1);
assert!(byte == 4);
}
#[test]
fn nand() {
let mut mask_a = StateMask::new(2);
mask_a.set_bit(1, true);
mask_a.set_bit(2, true);
mask_a.set_bit(9, true);
mask_a.set_bit(10, true);
let mut mask_b = StateMask::new(2);
mask_b.set_bit(1, true);
mask_b.set_bit(9, true);
mask_a.nand(&mask_b);
assert!(mask_a.get_bit(0).unwrap() == false);
assert!(mask_a.get_bit(1).unwrap() == false);
assert!(mask_a.get_bit(2).unwrap() == true);
assert!(mask_a.get_bit(3).unwrap() == false);
assert!(mask_a.get_bit(8).unwrap() == false);
assert!(mask_a.get_bit(9).unwrap() == false);
assert!(mask_a.get_bit(10).unwrap() == true);
assert!(mask_a.get_bit(11).unwrap() == false);
}
#[test]
fn or() {
let mut mask_a = StateMask::new(2);
mask_a.set_bit(4, true);
mask_a.set_bit(8, true);
let mut mask_b = StateMask::new(2);
mask_b.set_bit(8, true);
mask_b.set_bit(12, true);
mask_a.or(&mask_b);
assert!(mask_a.get_bit(0).unwrap() == false);
assert!(mask_a.get_bit(4).unwrap() == true);
assert!(mask_a.get_bit(8).unwrap() == true);
assert!(mask_a.get_bit(12).unwrap() == true);
assert!(mask_a.get_bit(15).unwrap() == false);
}
#[test]
fn clone() {
let mut mask_a = StateMask::new(2);
mask_a.set_bit(2, true);
mask_a.set_bit(10, true);
let mut mask_b = mask_a.clone();
assert!(mask_b.get_bit(2).unwrap() == true);
assert!(mask_b.get_bit(4).unwrap() == false);
assert!(mask_b.get_bit(9).unwrap() == false);
assert!(mask_b.get_bit(10).unwrap() == true);
}
}
| get_byte |
generate_training_data.py | from __future__ import print_function
import readline
import json
import re
from .config.config import read_from_user
from intent_schema import IntentSchema
from argparse import ArgumentParser
def print_description(intent):
print ("<> Enter data for <{intent}> OR Press enter with empty string to move onto next intent"
.format(intent=intent["intent"]))
print ("<> Enter '<' to delete last training utterance")
print ("<> Sample utterance to remind you of the format:")
print (">> what is the recipe for {ravioli|Food} ?")
if len(intent["slots"]) > 0:
print ("<> Available slots for this intent")
for slot in intent["slots"]:
print (" - - ", slot["name"], "<TYPE: {}>".format(slot["type"]))
def validate_input_format(utterance, intent):
""" TODO add handling for bad input"""
slots = {slot["name"] for slot in intent["slots"]}
split_utt = re.split("{(.*)}", utterance)
banned = set("-/\\()^%$#@~`-_=+><;:")
for token in split_utt:
if (banned & set(token)):
print (" - Banned character found in substring", token)
print (" - Banned character list", banned)
return False
if "|" in token:
split_token = token.split("|")
if len(split_token)!=2:
print (" - Error, token is incorrect in", token, split_token)
return False
word, slot = split_token
if slot.strip() not in slots:
print (" -", slot, "is not a valid slot for this Intent, valid slots are", slots)
return False
return True
def lowercase_utterance(utterance):
split_utt = re.split("({.*})", utterance)
def lower_case_split(token):
if "|" in token:
phrase, slot = token.split("|")
return "|".join([phrase.strip().lower(), slot.strip()])
else:
return token.lower()
return " ".join([lower_case_split(token) for token in split_utt])
def | (schema):
print ("Loaded intent schema, populating intents")
training_data = []
for intent in schema.get_intents():
print_description(intent)
keep_prompting = True
while keep_prompting:
utterance = read_from_user(str,
str(len(training_data))+". "+intent["intent"]+'\t')
if utterance.strip() == "":
keep_prompting = False
elif utterance.strip() == "<":
print (" - Discarded utterance: ", training_data.pop())
elif validate_input_format(utterance, intent):
training_data.append("\t".join([intent["intent"], lowercase_utterance(utterance)]))
else:
print (" - Discarded utterance:", utterance)
return training_data
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--intent_schema', '-i', required=True)
parser.add_argument('--output', '-o', default='utterances.txt')
args = parser.parse_args()
intent_schema = IntentSchema.from_filename(args.intent_schema)
with open(args.output, 'w') as utterance_file:
utterance_file.write("\n".join(generate_training_data(intent_schema)))
| generate_training_data |
lib.rs | //! Implementation of a highly-scalable and ergonomic actor model for Rust
//!
//! [](https://crates.io/crates/axiom)
//! [](https://travis-ci.org/rsimmonsjr/axiom)
//! [](https://isitmaintained.com/project/rsimmonsjr/axiom)
//! [](https://github.com/rsimmonsjr/axiom#license)
//!
//! # Axiom
//!
//! Axiom brings a highly-scalable actor model to the Rust language based on the many lessons
//! learned over years of Actor model implementations in Akka and Erlang. Axiom is, however, not a
//! direct re-implementation of either of the two aforementioned actor models but rather a new
//! implementation deriving inspiration from the good parts of those projects.
//!
//! * 2019-12-06 0.2.0
//! * Massive internal refactor in order to support async Actors. There are only a few breaking
//! changes, so porting to this version will be relatively simple.
//! * BREAKING CHANGE: The signature for Processors has changed from references for `Context` and
//! `Message` to values. For closures-as-actors, wrap the body in an `async` block. `move |...|
//! {...}` becomes `|...| async move { ... }`. For regular function syntax, simply add `async` in
//! front of `fn`.
//! * NOTE: the positioning of `move` may need to be different, depending on semantics. Values
//! cannot be moved out of the closure and into the async block.
//! * BREAKING CHANGE: Due to the nature of futures, the actor's processor cannot be given a
//! mutable reference to the state of the actor. The state needs to live at least as long as the
//! future and our research could find no way to do this easily. So now when the actor returns a
//! status it will return the new state as well. See the examples for more info. The signature for
//! the processor is now:
//! ```ignore
//! impl<F, S, R> Processor<S, R> for F where
//! S: Send + Sync,
//! R: Future<Output = AxiomResult<S>> + Send + 'static,
//! F: (FnMut(S, Context, Message) -> R) + Send + Sync + 'static {}
//! ```
//! * BREAKING: Actors are now panic-tolerant! This means `assert`s and `panic`s will be caught
//! and converted, treated the same as errors. Errors should already be considered fatal, as
//! Actors should handle any errors in their own scope.
//! * BREAKING: Error types have been broken up to be more context-specific.
//! * BREAKING: A `start_on_launch` flag has been added to the `ActorSystemConfig` struct. This
//! allows for an ActorSystem to be created without immediately starting it. See `ActorSystem::start`
//! for how to start an unstarted `ActorSystem`.
//! * Helper methods have been added to `Status` to help with the return points in Actors. Each
//! variant has a corresponding function that takes the Actor's state. `Ok(Status::Done)` is
//! instead `Ok(Status::done(state))`.
//! * The user should take be aware that, at runtime, Actors will follow the semantics of Rust
//! Futures. This means that an Actor awaiting a future will not process any messages nor will
//! continue executing until that future is ready to be polled again. While async/await will
//! provide ergonomic usage of async APIs, this can be a concern and can affect timing.
//! * A prelude has been introduced. Attempts will be made at keeping the prelude relatively the
//! same even across major versions, and we recommend using it whenever possible.
//! * More `log` points have been added across the codebase.
//!
//! [Release Notes for All Versions](https://github.com/rsimmonsjr/axiom/blob/master/RELEASE_NOTES.md)
//!
//! # Getting Started
//!
//! *An actor model is an architectural asynchronous programming paradigm characterized by the use
//! of actors for all processing activities.*
//!
//! Actors have the following characteristics:
//! 1. An actor can be interacted with only by means of messages.
//! 2. An actor processes only one message at a time.
//! 3. An actor will process a message only once.
//! 4. An actor can send a message to any other actor without knowledge of that actor's internals.
//! 5. Actors send only immutable data as messages, though they may have mutable internal state.
//! 6. Actors are location agnostic; they can be sent a message from anywhere in the cluster.
//!
//! Note that within the language of Rust, rule five cannot be enforced by Rust but is a best
//! practice which is important for developers creating actors based on Axiom. In Erlang and
//! Elixir rule five cannot be violated because of the structure of the language but this also
//! leads to performance limitations. It's better to allow internal mutable state and encourage
//! the good practice of not sending mutable messages.
//!
//! What is important to understand is that these rules combined together makes each actor operate
//! like a micro-service in the memory space of the program using them. Since actor messages are
//! immutable, actors can trade information safely and easily without copying large data
//! structures.
//!
//! Although programming in the actor model is quite an involved process you can get started with
//! Axiom in only a few lines of code.
//!
//! ```rust
//! use axiom::prelude::*;
//! use std::sync::Arc;
//! use std::time::Duration;
//!
//! let system = ActorSystem::create(ActorSystemConfig::default().thread_pool_size(2));
//!
//! let aid = system
//! .spawn()
//! .with(
//! 0 as usize,
//! |state: usize, _context: Context, _message: Message| async move {
//! Ok(Status::done(state))
//! }
//! )
//! .unwrap();
//!
//! aid.send(Message::new(11)).unwrap();
//!
//! // It is worth noting that you probably wouldn't just unwrap in real code but deal with
//! // the result as a panic in Axiom will take down a dispatcher thread and potentially
//! // hang the system.
//!
//! // This will wrap the value `17` in a Message for you!
//! aid.send_new(17).unwrap();
//!
//! // We can also create and send separately using just `send`, not `send_new`.
//! let message = Message::new(19);
//! aid.send(message).unwrap();
//!
//! // Another neat capability is to send a message after some time has elapsed.
//! aid.send_after(Message::new(7), Duration::from_millis(10)).unwrap();
//! aid.send_new_after(7, Duration::from_millis(10)).unwrap();
//! ```
//! This code creates an actor system, fetches a builder for an actor via the `spawn()` method,
//! spawns an actor and finally sends the actor a message. Once the actor is done processing a
//! message it returns the new state of the actor and the status after handling this message. In
//! this case we didnt change the state so we just return it. Creating an Axiom actor is literally
//! that easy but there is a lot more functionality available as well.
//!
//! Keep in mind that if you are capturing variables from the environment you will have to wrap
//! the `async move {}` block in another block and then move your variables into the first block.
//! Please see the test cases for more examples of this.
//!
//! If you want to create an actor with a struct that is simple as well. Let's create one that
//! handles a couple of different message types:
//!
//! ```rust
//! use axiom::prelude::*;
//! use std::sync::Arc;
//!
//! let system = ActorSystem::create(ActorSystemConfig::default().thread_pool_size(2));
//!
//! struct Data {
//! value: i32,
//! }
//!
//! impl Data {
//! fn handle_bool(mut self, message: bool) -> ActorResult<Self> {
//! if message {
//! self.value += 1;
//! } else {
//! self.value -= 1;
//! }
//! Ok(Status::done(self))
//! }
//!
//! fn handle_i32(mut self, message: i32) -> ActorResult<Self> {
//! self.value += message;
//! Ok(Status::done(self))
//! }
//!
//! async fn handle(mut self, _context: Context, message: Message) -> ActorResult<Self> {
//! if let Some(msg) = message.content_as::<bool>() {
//! self.handle_bool(*msg)
//! } else if let Some(msg) = message.content_as::<i32>() {
//! self.handle_i32(*msg)
//! } else {
//! panic!("Failed to dispatch properly");
//! }
//! }
//! }
//!
//! let data = Data { value: 0 };
//! let aid = system.spawn().name("Fred").with(data, Data::handle).unwrap();
//!
//! aid.send_new(11).unwrap();
//! aid.send_new(true).unwrap();
//! aid.send_new(false).unwrap();
//! ```
//!
//! This code creates a named actor out of an arbitrary struct. Since the only requirement to make
//! an actor is to have a function that is compliant with the [`axiom::actors::Processor`] trait,
//! anything can be an actor. If this struct had been declared somewhere outside of your control you
//! could use it in an actor as state by declaring your own handler function and making the calls to
//! the 3rd party structure.
//!
//! *It's important to keep in mind that the starting state is moved into the actor and you will not
//! have external access to it afterwards.* This is by design and although you could conceivably use
//! a [`Arc`] or [`Mutex`] enclosing a structure as state, that would definitely be a bad idea as it
//! would break the rules we laid out for actors.
//!
//! There is a lot more to learn and explore and your best resource is the test code for Axiom. The
//! developers have a belief that test code should be well architected and well commented to act as
//! a set of examples for users of Axiom.
//!
//! # Detailed Examples
//! * [Hello World](https://github.com/rsimmonsjr/axiom/blob/master/examples/hello_world.rs): The
//! obligatory introduction to any computer system.
//! * [Dining Philosophers](https://github.com/rsimmonsjr/axiom/blob/master/examples/philosophers.rs):
//! An example of using Axiom to solve a classic Finite State Machine problem in computer science.
//! * [Monte Carlo](https://github.com/rsimmonsjr/axiom/blob/master/examples/montecarlo.rs): An
//! example of how to use Axiom for parallel computation.
//!
//! ## Design Principals of Axiom
//!
//! Based on previous experience with other actor models I wanted to design Axiom around some
//! core principles:
//! 1. **At its core an actor is just an function that processes messages.** The simplest actor is a
//! function that takes a message and simply ignores it. The benefit to the functional approach over
//! the Akka model is that it allows the user to create actors easily and simply. This is the notion
//! of _micro module programming_; the notion of building a complex system from the smallest
//! components. Software based on the actor model can get complicated; keeping it simple at the core
//! is fundamental to solid architecture.
//! 2. **Actors can be a Finite State Machine (FSM).** Actors receive and process messages nominally
//! in the order received. However, there are certain circumstances where an actor has to change to
//! another state and process other messages, skipping certain messages to be processed later.
//! 3. **When skipping messages, the messages must not move.** Akka allows the skipping of messages
//! by _stashing_ the message in another data structure and then restoring this stash later. This
//! process has many inherent flaws. Instead Axiom allows an actor to skip messages in its channel
//! but leave them where they are, increasing performance and avoiding many problems.
//! 4. **Actors use a bounded capacity channel.** In Axiom the message capacity for the actor's
//! channel is bounded, resulting in greater simplicity and an emphasis on good actor design.
//! 5. **Axiom should be kept as small as possible.** Axiom is the core of the actor model and
//! should not be expanded to include everything possible for actors. That should be the job of
//! libraries that extend Axiom. Axiom itself should be an example of _micro module programming_.
//! 6. **The tests are the best place for examples.** The tests of Axiom will be extensive and well
//! maintained and should be a resource for those wanting to use Axiom. They should not be a dumping
//! ground for copy-paste or throwaway code. The best tests will look like architected code.
//! 7. **A huge emphasis is put on crate user ergonomics.** Axiom should be easy to use.
use std::any::Any;
use std::error::Error;
use std::fmt::{Display, Formatter};
// Re-export futures so the user doesn't need to import it.
pub use futures;
use prelude::*;
pub mod actors;
pub mod cluster;
mod executor;
pub mod message;
pub mod system;
pub mod prelude;
/// A helper alias to ensure returned errors conform as needed.
pub type StdError = Box<dyn Error + Send + Sync + 'static>;
/// A type for a result from an actor's message processor.
/// A Result::Err is treated as a fatal error, and the Actor will be stopped.
pub type ActorResult<State> = Result<(State, Status), StdError>;
#[derive(Debug)]
pub struct Panic {
panic_payload: String,
}
impl Display for Panic {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result |
}
impl Error for Panic {}
impl From<Box<dyn Any + Send + 'static>> for Panic {
fn from(val: Box<dyn Any + Send + 'static>) -> Self {
let panic_payload = match val.downcast::<&'static str>() {
Ok(s) => String::from(*s),
Err(val) => match val.downcast::<String>() {
Ok(s) => *s,
Err(_) => String::from("Panic payload unserializable"),
},
};
Self { panic_payload }
}
}
#[cfg(test)]
mod tests {
use std::thread;
use std::time::Duration;
use log::LevelFilter;
use secc::{SeccReceiver, SeccSender};
use serde::{Deserialize, Serialize};
use super::*;
#[derive(Clone)]
pub struct AssertCollect {
tx: SeccSender<(bool, String)>,
rx: SeccReceiver<(bool, String)>,
}
impl AssertCollect {
pub fn new() -> Self {
let (tx, rx) = secc::create(256, Duration::from_millis(10));
Self { tx, rx }
}
pub fn assert(&self, cond: bool, msg: impl Into<String>) {
let m = msg.into();
self.tx.send((cond, m.clone())).unwrap();
if !cond {
panic!("{}", m)
}
}
pub fn panic(&self, msg: impl Into<String>) -> ! {
let m = msg.into();
self.tx.send((false, m.clone())).unwrap();
panic!("{}", m)
}
pub fn collect(&self) {
while let Ok((cond, s)) = self.rx.receive() {
assert!(cond, "{}", s);
}
}
}
pub fn init_test_log() {
let _ = env_logger::builder()
.filter_level(LevelFilter::Warn)
.is_test(true)
.try_init();
}
pub fn sleep(millis: u64) {
thread::sleep(Duration::from_millis(millis))
}
/// A function that just returns `Ok(Status::Done)` which can be used as a handler for
/// a simple dummy actor.
pub async fn simple_handler(_: (), _: Context, _: Message) -> ActorResult<()> {
Ok(Status::done(()))
}
/// A utility that waits for a certain number of messages to arrive in a certain time and
/// returns an `Ok<()>` when they do or an `Err<String>` when not.
pub fn await_received(aid: &Aid, count: u8, timeout_ms: u64) -> Result<(), String> {
use std::time::Instant;
let start = Instant::now();
let duration = Duration::from_millis(timeout_ms);
while aid.received().unwrap() < count as usize {
if Instant::elapsed(&start) > duration {
return Err(format!(
"Timed out after {}ms! Messages received: {}; Messages expected: {}",
timeout_ms,
aid.received().unwrap(),
count
));
}
}
Ok(())
}
#[test]
#[should_panic]
fn test_assert_receive() {
let tracker = AssertCollect::new();
let t2 = tracker.clone();
let join = thread::spawn(move || t2.panic("This is a panic"));
let _ = join.join();
tracker.collect();
}
/// This test shows how the simplest actor can be built and used. This actor uses a closure
/// that simply returns that the message is processed without doing anything with it.
#[test]
fn test_simplest_actor() {
init_test_log();
let system = ActorSystem::create(ActorSystemConfig::default().thread_pool_size(2));
// We spawn the actor using a closure. Note that because of a bug in the Rust compiler
// as of 2019-07-12 regarding type inference we have to specify all of the types manually
// but when that bug goes away this will be even simpler.
let aid = system
.spawn()
.with((), |_: (), _: Context, _: Message| {
async { Ok(Status::done(())) }
})
.unwrap();
// Send a message to the actor.
aid.send_new(11).unwrap();
// The actor will get two messages including the Start message.
await_received(&aid, 2, 1000).unwrap();
system.trigger_and_await_shutdown(None);
}
/// This test shows how the simplest struct-based actor can be built and used. This actor
/// merely returns that the message was processed.
#[test]
fn test_simplest_struct_actor() {
init_test_log();
let system = ActorSystem::create(ActorSystemConfig::default().thread_pool_size(2));
// We declare a basic struct that has a handle method that does basically nothing.
// Subsequently we will create that struct as a starting state when we spawn the actor
// and then send the actor a message.
struct Data {}
impl Data {
async fn handle(self, _: Context, _: Message) -> ActorResult<Self> {
Ok(Status::done(self))
}
}
let aid = system.spawn().with(Data {}, Data::handle).unwrap();
// Send a message to the actor.
aid.send_new(11).unwrap();
await_received(&aid, 2, 1000).unwrap();
system.trigger_and_await_shutdown(None);
}
/// This test shows how a closure based actor can be created to process different kinds of
/// messages and mutate the actor's state based upon the messages passed. Note that the
/// state of the actor is not available outside the actor itself.
#[test]
fn test_dispatching_with_closure() {
init_test_log();
let tracker = AssertCollect::new();
let t = tracker.clone();
let system = ActorSystem::create(ActorSystemConfig::default().thread_pool_size(2));
let starting_state: usize = 0 as usize;
let closure = move |mut state: usize, context: Context, message: Message| {
let t = t.clone();
async move {
// Expected messages in the expected order.
let expected: Vec<i32> = vec![11, 13, 17];
// Attempt to downcast to expected message.
if let Some(_msg) = message.content_as::<SystemMsg>() {
state += 1;
Ok(Status::done(state))
} else if let Some(msg) = message.content_as::<i32>() {
t.assert(expected[state - 1] == *msg, "Unexpected message content");
t.assert(state == context.aid.received().unwrap(), "Unexpected state");
state += 1;
Ok(Status::done(state))
} else if let Some(_msg) = message.content_as::<SystemMsg>() {
// Note that we put this last because it only is ever received once, we
// want the most frequently received messages first.
Ok(Status::done(state))
} else {
t.panic("Failed to dispatch properly")
}
}
};
let aid = system.spawn().with(starting_state, closure).unwrap();
// First message will always be the SystemMsg::Start.
assert_eq!(1, aid.sent().unwrap());
// Send some messages to the actor in the order required in the test. In a real actor
// its unlikely any order restriction would be needed. However this test makes sure that
// the messages are processed correctly.
aid.send_new(11 as i32).unwrap();
assert_eq!(2, aid.sent().unwrap());
aid.send_new(13 as i32).unwrap();
assert_eq!(3, aid.sent().unwrap());
aid.send_new(17 as i32).unwrap();
assert_eq!(4, aid.sent().unwrap());
await_received(&aid, 4, 1000).unwrap();
system.trigger_and_await_shutdown(None);
tracker.collect();
}
/// This test shows how a struct-based actor can be used and process different kinds of
/// messages and mutate the actor's state based upon the messages passed. Note that the
/// state of the actor is not available outside the actor itself.
#[test]
fn test_dispatching_with_struct() {
init_test_log();
let tracker = AssertCollect::new();
let system = ActorSystem::create(ActorSystemConfig::default().thread_pool_size(2));
// We create a basic struct with a handler and use that handler to dispatch to other
// inherent methods in the struct. Note that we don't have to implement any traits here
// and there is nothing forcing the handler to be an inherent method.
struct Data {
value: i32,
tracker: AssertCollect,
}
impl Data {
fn handle_bool(mut self, message: bool) -> ActorResult<Self> {
if message {
self.value += 1;
} else {
self.value -= 1;
}
Ok(Status::done(self)) // This assertion will fail but we still have to return.
}
fn handle_i32(mut self, message: i32) -> ActorResult<Self> {
self.value += message;
Ok(Status::done(self)) // This assertion will fail but we still have to return.
}
async fn handle(self, _context: Context, message: Message) -> ActorResult<Self> {
if let Some(msg) = message.content_as::<bool>() {
self.handle_bool(*msg)
} else if let Some(msg) = message.content_as::<i32>() {
self.handle_i32(*msg)
} else if let Some(_msg) = message.content_as::<SystemMsg>() {
// Note that we put this last because it only is ever received once, we
// want the most frequently received messages first.
Ok(Status::done(self))
} else {
self.tracker.panic("Failed to dispatch properly")
}
}
}
let data = Data {
value: 0,
tracker: tracker.clone(),
};
let aid = system.spawn().with(data, Data::handle).unwrap();
// Send some messages to the actor.
aid.send_new(11).unwrap();
aid.send_new(true).unwrap();
aid.send_new(true).unwrap();
aid.send_new(false).unwrap();
await_received(&aid, 4, 1000).unwrap();
system.trigger_and_await_shutdown(None);
tracker.collect();
}
/// Tests and demonstrates the process to create a closure that captures the environment
/// outside the closure in a manner sufficient to be used in a future.
#[test]
fn test_closure_with_move() {
init_test_log();
let system = ActorSystem::create(ActorSystemConfig::default().thread_pool_size(2));
let target_aid = system.spawn().with((), simple_handler).unwrap();
let aid_moved = target_aid.clone(); // clone for the closure
let aid = system
.spawn()
.with((), move |_: (), _: Context, _: Message| {
// Each future needs its own copy of the target aid.
let tgt = aid_moved.clone();
async move {
tgt.send_new(11)?;
Ok(Status::done(()))
}
})
.unwrap();
aid.send_new(11).unwrap();
await_received(&target_aid, 2, 1000).unwrap();
system.trigger_and_await_shutdown(None);
}
/// Tests an example where one actor starts another actor, the actors exchange a simple
/// ping-pong message and then the first actor triggers a shutdown when the pong message is
/// received. Note that these actors just use simple functions to accomplish the task though
/// they could have used functions on structures, closures, and even had a multiple methods
/// to handle the messages.
#[test]
fn test_ping_pong() {
/// A simple enum used as test messages.
#[derive(Serialize, Deserialize)]
pub enum PingPong {
Ping(Aid),
Pong,
}
async fn ping(_: (), context: Context, message: Message) -> ActorResult<()> {
if let Some(msg) = message.content_as::<PingPong>() {
match &*msg {
PingPong::Pong => {
context.system.trigger_shutdown();
Ok(Status::done(()))
}
_ => Err("Unexpected message".to_string().into()),
}
} else if let Some(msg) = message.content_as::<SystemMsg>() {
// Start messages happen only once so we keep them last.
match &*msg {
SystemMsg::Start => {
// Now we will spawn a new actor to handle our pong and send to it.
let pong_aid = context.system.spawn().with((), pong)?;
pong_aid.send_new(PingPong::Ping(context.aid.clone()))?;
Ok(Status::done(()))
}
_ => Ok(Status::done(())),
}
} else {
Ok(Status::done(()))
}
}
async fn pong(_: (), _: Context, message: Message) -> ActorResult<()> {
if let Some(msg) = message.content_as::<PingPong>() {
match &*msg {
PingPong::Ping(from) => {
from.send_new(PingPong::Pong)?;
Ok(Status::done(()))
}
_ => Err("Unexpected message".into()),
}
} else {
Ok(Status::done(()))
}
}
let system = ActorSystem::create(ActorSystemConfig::default().thread_pool_size(2));
system.spawn().with((), ping).unwrap();
system.await_shutdown(None);
}
}
| {
write!(f, "{}", self.panic_payload)
} |
app.js | require('./bootstrap');
window.Vue = require('vue');
import { BootstrapVue } from 'bootstrap-vue'
import store from './store'
import VueSweetalert2 from 'vue-sweetalert2';
import 'sweetalert2/dist/sweetalert2.min.css';
Vue.use(VueSweetalert2); |
Vue.use(BootstrapVue)
Vue.component('example-component', require('./components/ExampleComponent.vue').default);
Vue.component('form-contato', require('./components/FormContato.vue').default);
Vue.component('table-contato', require('./components/TableContato.vue').default);
const app = new Vue({
store,
el: '#app',
}); | |
expression.go | package Scheme
type Expression interface{}
func isPrimitive(exp Expression) bool{
return isInteger(exp) || isInteger(exp)
}
func isNumber(exp Expression) bool |
func isInteger(exp Expression) bool {
_, ok := exp.(int)
return ok
}
func isString(exp Expression) bool {
_, ok := exp.(string)
return ok
}
| {
_, ok := exp.(float32)
return ok
} |
date.component.ts | import { Component, ViewChild } from '@angular/core'
import { MaterialComponent } from '../MaterialComponent';
//@ts-ignore
import DateTimeComponent from 'formiojs/components/datetime/DateTime.js';
//@ts-ignore
import { momentDate } from 'formiojs/utils/utils.js';
import {FormControl} from '@angular/forms';
@Component({
selector: 'mat-formio-date',
host: {
'(document:click)': 'clickOutside($event)',
},
template: `
<mat-formio-form-field [instance]="instance" [componentTemplate]="componentTemplate"></mat-formio-form-field>
<ng-template #componentTemplate let-hasLabel>
<mat-label *ngIf="hasLabel" fxFill>
<span [instance]="instance" matFormioLabel></span>
</mat-label>
<form class="example-form">
<mat-datepicker-toggle [disabled]="isDisabled()" (click)="toggleCalendar($event)">
<mat-icon matDatepickerToggleIcon *ngIf="enableTime && !enableDate">schedule</mat-icon>
</mat-datepicker-toggle>
<mat-form-field class="example-full-width">
<input
*ngIf="enableTime && enableDate"
matInput
type="datetime-local"
[placeholder]="instance.component.placeholder"
[formControl]="displayControl"
(input)="onChangeInput()"
[readonly]="!allowManualInput"
>
<input
*ngIf="enableTime && !enableDate"
matInput
[placeholder]="instance.component.placeholder"
[formControl]="displayControl"
[matMask]="formatTime"
(input)="onChangeInput()"
[readonly]="!allowManualInput"
>
<input
*ngIf="!enableTime && enableDate"
matInput
[placeholder]="instance.component.placeholder"
[formControl]="displayControl"
(input)="onChangeInput()"
[readonly]="!allowManualInput"
>
</mat-form-field>
<mat-formio-calendar
#calendar
[minDate]="instance.component.datePicker.minDate || ''"
[maxDate]="instance.component.datePicker.maxDate || ''"
[dateFilter]="dateFilter"
[hidden]="!isPickerOpened"
(dateSelectEvent)="onChangeDate($event)"
(timeSelectEvent)="onChangeTime($event)"
[enableDate]="enableDate"
[enableTime]="enableTime"
[hourStep]="instance.component.timePicker.hourStep"
[minuteStep]="instance.component.timePicker.minuteStep"
[instance]="instance"
></mat-formio-calendar>
<mat-error *ngIf="instance.error">{{ instance.error.message }}</mat-error>
</form>
</ng-template>
`
})
export class MaterialDateComponent extends MaterialComponent {
public timeControl: FormControl = new FormControl();
public displayControl: FormControl = new FormControl();
public isPickerOpened?: boolean;
public selectedDate: any;
public selectedTime: any = '00:00';
public allowManualInput: boolean = true;
@ViewChild('calendar') calendar:any;
get enableDate() {
return this.instance && this.instance.component.enableDate !== false;
}
get enableTime() {
return this.instance && this.instance.component.enableTime === true;
}
setDisplayControlValue(value:any) {
const format = `YYYY-MM-DD${this.enableTime ? 'THH:mm' : ''}`;
value = value || this.getDateTimeValue();
if (value) {
this.displayControl.setValue(momentDate(value).format(format));
}
else {
this.displayControl.setValue('');
}
}
onChangeDate(event:any) {
this.selectedDate = momentDate(event).utc().format();
this.control.setValue(this.selectedDate);
this.setDateTime();
}
onChangeTime(time:any) {
this.selectedTime = time;
if (this.selectedDate || (this.enableTime && !this.enableDate)) {
this.setDateTime();
}
}
onChangeInput() {
const value = this.dateFilter(this.displayControl.value) &&
this.checkMinMax(this.displayControl.value) ? this.displayControl.value : '';
this.control.setValue(value);
this.onChange();
}
getDateTimeValue() {
let newDate = '';
let isSelectedTime = false;
if (this.calendar && this.calendar.selectedTime) {
const { selectedTime } = this.calendar;
isSelectedTime = true;
if (this.selectedTime !== selectedTime) {
this.selectedTime = selectedTime;
}
}
if (this.enableTime && this.enableDate) {
const [hours, minutes] = this.selectedTime.split(':');
newDate = isSelectedTime
? momentDate(this.selectedDate)
.hours(Number.parseInt(hours))
.minutes(Number.parseInt(minutes))
.utc()
: this.selectedDate;
} | }
if (this.enableTime && !this.enableDate) {
const [hours, minutes] = this.selectedTime.split(':');
newDate = momentDate(new Date())
.hours(Number.parseInt(hours))
.minutes(Number.parseInt(minutes))
.seconds(0)
.utc();
}
return newDate;
}
setDateTime() {
this.control.setValue(this.getDateTimeValue());
this.onChange();
}
setInstance(instance: any) {
super.setInstance(instance);
this.isDisabled() ? this.control.disable() : this.control.enable();
this.isDisabled() ? this.displayControl.disable() : this.displayControl.enable();
if (this.instance) {
this.allowManualInput = this.instance.component.allowInput === false ? false : true;
if (this.instance.component && this.instance.component.datePicker) {
const {minDate: min, maxDate: max} = this.instance.component.datePicker;
// It improves the date to the full format if the customer set only a year. Otherwise we will have conflicts into the moment.js.
const { minDate, maxDate } = this.improveMinMaxDate(min, max);
this.instance.component.datePicker.minDate = minDate;
this.instance.component.datePicker.maxDate = maxDate;
}
}
}
toggleCalendar(event:any) {
if (!this.isDisabled()) {
if (!this.isPickerOpened) {
const date = this.getValue();
if (date &&this.checkMinMax(date)) {
if (this.enableDate && this.calendar && !this.calendar.selectedDate) {
this.calendar.setExistedDate(momentDate(date).toDate())
}
if (this.enableTime && this.calendar && !this.calendar.selectedTime) {
const time = momentDate(date)
this.calendar.setExistedTime(time.format('HH:mm'), time.format('h:mm:A'))
}
}
}
this.isPickerOpened = !this.isPickerOpened;
event.stopPropagation();
}
}
isDisabled() {
const { readonly, disabled } = this.instance.component;
return readonly || disabled || this.instance.root.options.readOnly
}
public formatTime = (value:any) => {
if (!value) {
return this.instance.emptyValue;
}
return momentDate(value).format(this.instance.component.format);
}
setValue(value:any) {
if (this.dateFilter(value) && this.checkMinMax(value)) {
this.setDisplayControlValue(value);
super.setValue(value);
}
}
onChange() {
const value = this.dateFilter(this.getValue()) && this.checkMinMax(this.getValue()) ? this.getValue() : '';
this.setDisplayControlValue(value);
}
beforeSubmit() {
this.onChange();
super.beforeSubmit();
}
checkMinMax(value:any) {
let isValid = true;
const { minDate: min, maxDate: max } = this.instance.component.datePicker;
const { minDate, maxDate } = this.improveMinMaxDate(min, max);
if (minDate) {
isValid = momentDate(value).isSameOrAfter(minDate);
}
if (maxDate && isValid) {
isValid = momentDate(value).isSameOrBefore(maxDate);
}
return isValid;
}
disableWeekends(d: Date) {
if (d && d.getDay) {
const day = d.getDay();
return day !== 0 && day !== 6;
}
return true;
}
disableDates(dates: Array<string>, d: Date) {
const formattedDates = dates.map((date) => momentDate(date).format('YYYY-MM-DD'));
return !formattedDates.includes(momentDate(d).format('YYYY-MM-DD'));
}
dateFilter = (d: Date | null): boolean => {
const isValid = this.instance.component.datePicker.disableWeekends ? this.disableWeekends(d!) : true;
return this.instance.component.widget.disabledDates && isValid ?
this.disableDates(this.instance.component.widget.disabledDates.split(','), d!) : isValid;
}
clickOutside(event:any) {
if (!this.calendar.element.nativeElement.contains(event.target) && this.isPickerOpened)
this.toggleCalendar(event);
}
improveMinMaxDate(minDate:any, maxDate:any) {
if (minDate && minDate.length === 4) {
minDate = momentDate(`${minDate}-01-01`).format('YYYY-MM-DD');
}
if (maxDate && maxDate.length === 4) {
maxDate = momentDate(`${maxDate}-01-01`).subtract(1, 'day').format('YYYY-MM-DD');
}
return {minDate, maxDate};
}
}
DateTimeComponent.MaterialComponent = MaterialDateComponent;
export { DateTimeComponent }; |
if (!this.enableTime && this.enableDate) {
newDate = this.selectedDate; |
resample_test.go | package photoprism
import (
"os"
"strings"
"testing"
"github.com/disintegration/imaging"
"github.com/photoprism/photoprism/internal/classify"
"github.com/photoprism/photoprism/internal/entity"
"github.com/photoprism/photoprism/internal/nsfw"
"github.com/photoprism/photoprism/internal/thumb"
"github.com/photoprism/photoprism/internal/config"
"github.com/stretchr/testify/assert"
)
func TestResample_Start(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
conf := config.TestConfig()
if err := conf.CreateDirectories(); err != nil {
t.Fatal(err)
}
conf.InitializeTestData(t)
tf := classify.New(conf.AssetsPath(), conf.TensorFlowOff())
nd := nsfw.New(conf.NSFWModelPath())
convert := NewConvert(conf)
ind := NewIndex(conf, tf, nd, convert)
imp := NewImport(conf, ind, convert)
opt := ImportOptionsMove(conf.ImportPath())
imp.Start(opt)
rs := NewResample(conf)
err := rs.Start(true)
if err != nil {
t.Fatal(err)
}
}
func TestThumb_Filename(t *testing.T) {
conf := config.TestConfig()
thumbsPath := conf.CachePath() + "/_tmp"
defer os.RemoveAll(thumbsPath)
if err := conf.CreateDirectories(); err != nil {
t.Error(err)
}
t.Run("", func(t *testing.T) {
filename, err := thumb.Filename("99988", thumbsPath, 150, 150, thumb.ResampleFit, thumb.ResampleNearestNeighbor)
if err != nil {
t.Fatal(err)
}
assert.True(t, strings.HasSuffix(filename, "/storage/testdata/cache/_tmp/9/9/9/99988_150x150_fit.jpg"))
})
t.Run("hash too short", func(t *testing.T) {
_, err := thumb.Filename("999", thumbsPath, 150, 150, thumb.ResampleFit, thumb.ResampleNearestNeighbor)
if err == nil {
t.FailNow()
}
assert.Equal(t, "resample: file hash is empty or too short (999)", err.Error())
})
t.Run("invalid width", func(t *testing.T) {
_, err := thumb.Filename("99988", thumbsPath, -4, 150, thumb.ResampleFit, thumb.ResampleNearestNeighbor)
if err == nil {
t.FailNow()
}
assert.Equal(t, "resample: width exceeds limit (-4)", err.Error())
})
t.Run("invalid height", func(t *testing.T) {
_, err := thumb.Filename("99988", thumbsPath, 200, -1, thumb.ResampleFit, thumb.ResampleNearestNeighbor)
if err == nil {
t.FailNow()
}
assert.Equal(t, "resample: height exceeds limit (-1)", err.Error())
})
t.Run("empty thumbpath", func(t *testing.T) {
path := ""
_, err := thumb.Filename("99988", path, 200, 150, thumb.ResampleFit, thumb.ResampleNearestNeighbor)
if err == nil {
t.FailNow()
}
assert.Equal(t, "resample: folder is empty", err.Error())
})
}
func TestThumb_FromFile(t *testing.T) |
func TestThumb_Create(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
conf := config.TestConfig()
thumbsPath := conf.CachePath() + "/_tmp"
defer os.RemoveAll(thumbsPath)
if err := conf.CreateDirectories(); err != nil {
t.Error(err)
}
t.Run("valid parameter", func(t *testing.T) {
expectedFilename, err := thumb.Filename("12345", thumbsPath, 150, 150, thumb.ResampleFit, thumb.ResampleNearestNeighbor)
if err != nil {
t.Error(err)
}
img, err := imaging.Open(conf.ExamplesPath()+"/elephants.jpg", imaging.AutoOrientation(true))
if err != nil {
t.Errorf("can't open original: %s", err)
}
res, err := thumb.Create(img, expectedFilename, 150, 150, thumb.ResampleFit, thumb.ResampleNearestNeighbor)
if err != nil || res == nil {
t.Fatal("err should be nil and res should NOT be nil")
}
thumbnail := res
bounds := thumbnail.Bounds()
assert.Equal(t, 150, bounds.Dx())
assert.Equal(t, 99, bounds.Dy())
assert.FileExists(t, expectedFilename)
})
t.Run("invalid width", func(t *testing.T) {
expectedFilename, err := thumb.Filename("12345", thumbsPath, 150, 150, thumb.ResampleFit, thumb.ResampleNearestNeighbor)
if err != nil {
t.Error(err)
}
img, err := imaging.Open(conf.ExamplesPath()+"/elephants.jpg", imaging.AutoOrientation(true))
if err != nil {
t.Errorf("can't open original: %s", err)
}
res, err := thumb.Create(img, expectedFilename, -1, 150, thumb.ResampleFit, thumb.ResampleNearestNeighbor)
if err == nil || res == nil {
t.Fatal("err and res should NOT be nil")
}
thumbnail := res
assert.Equal(t, "resample: width has an invalid value (-1)", err.Error())
bounds := thumbnail.Bounds()
assert.NotEqual(t, 150, bounds.Dx())
})
t.Run("invalid height", func(t *testing.T) {
expectedFilename, err := thumb.Filename("12345", thumbsPath, 150, 150, thumb.ResampleFit, thumb.ResampleNearestNeighbor)
if err != nil {
t.Error(err)
}
img, err := imaging.Open(conf.ExamplesPath()+"/elephants.jpg", imaging.AutoOrientation(true))
if err != nil {
t.Errorf("can't open original: %s", err)
}
res, err := thumb.Create(img, expectedFilename, 150, -1, thumb.ResampleFit, thumb.ResampleNearestNeighbor)
if err == nil || res == nil {
t.Fatal("err and res should NOT be nil")
}
thumbnail := res
assert.Equal(t, "resample: height has an invalid value (-1)", err.Error())
bounds := thumbnail.Bounds()
assert.NotEqual(t, 150, bounds.Dx())
})
}
| {
conf := config.TestConfig()
thumbsPath := conf.CachePath() + "/_tmp"
defer os.RemoveAll(thumbsPath)
if err := conf.CreateDirectories(); err != nil {
t.Error(err)
}
t.Run("valid parameter", func(t *testing.T) {
fileModel := &entity.File{
FileName: conf.ExamplesPath() + "/elephants.jpg",
FileHash: "1234568889",
}
thumbnail, err := thumb.FromFile(fileModel.FileName, fileModel.FileHash, thumbsPath, 224, 224)
assert.Nil(t, err)
assert.FileExists(t, thumbnail)
})
t.Run("hash too short", func(t *testing.T) {
fileModel := &entity.File{
FileName: conf.ExamplesPath() + "/elephants.jpg",
FileHash: "123",
}
_, err := thumb.FromFile(fileModel.FileName, fileModel.FileHash, thumbsPath, 224, 224)
if err == nil {
t.Fatal("err should NOT be nil")
}
assert.Equal(t, "resample: file hash is empty or too short (123)", err.Error())
})
t.Run("filename too short", func(t *testing.T) {
fileModel := &entity.File{
FileName: "xxx",
FileHash: "12367890",
}
_, err := thumb.FromFile(fileModel.FileName, fileModel.FileHash, thumbsPath, 224, 224)
if err == nil {
t.FailNow()
}
assert.Equal(t, "resample: image filename is empty or too short (xxx)", err.Error())
})
} |
main.go | // Copyright 2018 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"bytes"
"context"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"os"
"github.com/golang/glog"
"github.com/golang/protobuf/jsonpb"
"github.com/google/keytransparency/core/testdata"
"github.com/google/keytransparency/core/testutil"
"github.com/google/keytransparency/impl/authentication"
"github.com/google/keytransparency/impl/integration"
"github.com/google/tink/go/signature"
"github.com/google/tink/go/tink"
"github.com/google/trillian/types"
tpb "github.com/google/keytransparency/core/api/type/type_go_proto"
pb "github.com/google/keytransparency/core/api/v1/keytransparency_go_proto"
tinkpb "github.com/google/tink/proto/tink_go_proto"
)
var (
testdataDir = flag.String("testdata", "core/testdata", "The directory in which to place the generated test data")
)
const (
// openssl ecparam -name prime256v1 -genkey -out p256-key.pem
testPrivKey1 = `-----BEGIN EC PRIVATE KEY-----
MHcCAQEEIBoLpoKGPbrFbEzF/ZktBSuGP+Llmx2wVKSkbdAdQ+3JoAoGCCqGSM49
AwEHoUQDQgAE+xVOdphkfpEtl7OF8oCyvWw31dV4hnGbXDPbdFlL1nmayhnqyEfR
dXNlpBT2U9hXcSxliKI1rHrAJFDx3ncttA==
-----END EC PRIVATE KEY-----`
// openssl ec -in p256-key.pem -pubout -out p256-pubkey.pem
testPubKey1 = `-----BEGIN PUBLIC KEY-----
MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE+xVOdphkfpEtl7OF8oCyvWw31dV4
hnGbXDPbdFlL1nmayhnqyEfRdXNlpBT2U9hXcSxliKI1rHrAJFDx3ncttA==
-----END PUBLIC KEY-----`
appID = "app"
)
func main() {
flag.Parse()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
env, err := integration.NewEnv(ctx)
if err != nil {
glog.Fatalf("Could not create Env: %v", err)
}
defer env.Close()
if err := GenerateTestVectors(ctx, env); err != nil {
glog.Fatalf("GenerateTestVectors(): %v", err)
}
}
// GenerateTestVectors verifies set/get semantics.
func GenerateTestVectors(ctx context.Context, env *integration.Env) error {
if err := signature.Register(); err != nil {
return err
}
// Create lists of signers.
signers1 := testutil.SignKeysetsFromPEMs(testPrivKey1)
// Create lists of authorized keys
authorizedKeys1 := testutil.VerifyKeysetFromPEMs(testPubKey1).Keyset()
// Collect a list of valid GetEntryResponses
getEntryResps := make([]testdata.GetEntryResponseVector, 0)
// Start with an empty trusted log root
slr := &types.LogRootV1{}
for _, tc := range []struct {
desc string
wantProfile []byte
setProfile []byte
ctx context.Context
userID string
signers []*tink.KeysetHandle
authorizedKeys *tinkpb.Keyset
}{
{
desc: "empty_alice",
wantProfile: nil,
setProfile: []byte("alice-key1"),
ctx: authentication.WithOutgoingFakeAuth(ctx, "alice"),
userID: "alice",
signers: signers1,
authorizedKeys: authorizedKeys1,
},
{
desc: "bob0_set",
wantProfile: nil,
setProfile: []byte("bob-key1"),
ctx: authentication.WithOutgoingFakeAuth(ctx, "bob"),
userID: "bob",
signers: signers1,
authorizedKeys: authorizedKeys1,
},
{
desc: "set_carol",
wantProfile: nil,
setProfile: []byte("carol-key1"),
ctx: authentication.WithOutgoingFakeAuth(ctx, "carol"),
userID: "carol",
signers: signers1,
authorizedKeys: authorizedKeys1,
},
{
desc: "bob1_get",
wantProfile: []byte("bob-key1"),
setProfile: nil,
ctx: context.Background(),
userID: "bob",
signers: signers1,
authorizedKeys: authorizedKeys1,
},
{
desc: "bob1_set",
wantProfile: []byte("bob-key1"),
setProfile: []byte("bob-key2"),
ctx: authentication.WithOutgoingFakeAuth(ctx, "bob"),
userID: "bob",
signers: signers1,
authorizedKeys: authorizedKeys1,
},
} {
// Check profile.
e, err := env.Cli.GetEntry(ctx, &pb.GetEntryRequest{
DomainId: env.Domain.DomainId,
UserId: tc.userID,
AppId: appID,
FirstTreeSize: int64(slr.TreeSize),
})
if err != nil {
return fmt.Errorf("gen-test-vectors: GetEntry(): %v", err)
}
var newslr *types.LogRootV1
if _, newslr, err = env.Client.VerifyGetEntryResponse(ctx, env.Domain.DomainId, appID, tc.userID, *slr, e); err != nil {
return fmt.Errorf("gen-test-vectors: VerifyGetEntryResponse(): %v", err)
}
if got, want := e.GetCommitted().GetData(), tc.wantProfile; !bytes.Equal(got, want) {
return fmt.Errorf("gen-test-vectors: VerifiedGetEntry(%v): %s, want %s", tc.userID, got, want)
}
// Update the trusted root on the first revision, then let it fall behind
// every few revisions to make consistency proofs more interesting.
trust := newslr.TreeSize%5 == 1
if trust {
slr = newslr
}
getEntryResps = append(getEntryResps, testdata.GetEntryResponseVector{
Desc: tc.desc,
AppID: appID,
UserID: tc.userID,
Resp: e,
TrustNewLog: trust,
})
// Update profile.
if tc.setProfile != nil {
u := &tpb.User{
DomainId: env.Domain.DomainId,
AppId: appID,
UserId: tc.userID,
PublicKeyData: tc.setProfile,
AuthorizedKeys: tc.authorizedKeys,
}
cctx, cancel := context.WithTimeout(tc.ctx, env.Timeout)
defer cancel()
_, err := env.Client.Update(cctx, u, tc.signers)
if err != nil {
return fmt.Errorf("gen-test-vectors: Update(%v): %v", tc.userID, err)
}
}
}
if err := SaveTestVectors(*testdataDir, env, getEntryResps); err != nil {
return fmt.Errorf("gen-test-vectors: SaveTestVectors(): %v", err)
}
return nil
}
// SaveTestVectors generates test vectors for interoprability testing.
func SaveTestVectors(dir string, env *integration.Env, resps []testdata.GetEntryResponseVector) error | {
marshaler := &jsonpb.Marshaler{
Indent: "\t",
}
// Output all key material needed to verify the test vectors.
domainFile := dir + "/domain.json"
f, err := os.Create(domainFile)
if err != nil {
return err
}
defer f.Close()
if err := marshaler.Marshal(f, env.Domain); err != nil {
return fmt.Errorf("gen-test-vectors: jsonpb.Marshal(): %v", err)
}
// Save list of responses
respFile := dir + "/getentryresponse.json"
out, err := json.MarshalIndent(resps, "", "\t")
if err != nil {
return fmt.Errorf("gen-test-vectors: json.Marshal(): %v", err)
}
if err := ioutil.WriteFile(respFile, out, 0666); err != nil {
return fmt.Errorf("gen-test-vectors: WriteFile(%v): %v", respFile, err)
}
return nil
} |
|
runtime.rs | // Copyright (c) Microsoft. All rights reserved.
use std::collections::HashMap;
use std::convert::From;
use std::ops::Deref;
use std::time::Duration;
use base64;
use failure::{Fail, ResultExt};
use futures::prelude::*;
use futures::{future, stream, Async, Stream};
use hyper::{Body, Chunk as HyperChunk, Client};
use log::Level;
use serde_json;
use url::Url;
use client::DockerClient;
use config::DockerConfig;
use docker::apis::client::APIClient;
use docker::apis::configuration::Configuration;
use docker::models::{ContainerCreateBody, InlineResponse200, InlineResponse2001, NetworkConfig};
use edgelet_core::{
pid::Pid, LogOptions, Module, ModuleRegistry, ModuleRuntime, ModuleRuntimeState, ModuleSpec,
ModuleTop, RegistryOperation, RuntimeOperation, SystemInfo as CoreSystemInfo,
};
use edgelet_http::{UrlConnector, UrlExt};
use edgelet_utils::{ensure_not_empty_with_context, log_failure};
use error::{Error, ErrorKind, Result};
use module::{runtime_state, DockerModule, MODULE_TYPE as DOCKER_MODULE_TYPE};
type Deserializer = &'static mut serde_json::Deserializer<serde_json::de::IoRead<std::io::Empty>>;
const WAIT_BEFORE_KILL_SECONDS: i32 = 10;
static LABEL_KEY: &str = "net.azure-devices.edge.owner";
static LABEL_VALUE: &str = "Microsoft.Azure.Devices.Edge.Agent";
lazy_static! {
static ref LABELS: Vec<&'static str> = {
let mut labels = vec![];
labels.push("net.azure-devices.edge.owner=Microsoft.Azure.Devices.Edge.Agent");
labels
};
}
#[derive(Clone)]
pub struct DockerModuleRuntime {
client: DockerClient<UrlConnector>,
network_id: Option<String>,
}
impl DockerModuleRuntime {
pub fn new(docker_url: &Url) -> Result<Self> {
// build the hyper client
let client = Client::builder()
.build(UrlConnector::new(docker_url).context(ErrorKind::Initialization)?);
// extract base path - the bit that comes after the scheme
let base_path = docker_url
.to_base_path()
.context(ErrorKind::Initialization)?;
let mut configuration = Configuration::new(client);
configuration.base_path = base_path
.to_str()
.ok_or(ErrorKind::Initialization)?
.to_string();
let scheme = docker_url.scheme().to_string();
configuration.uri_composer = Box::new(move |base_path, path| {
Ok(UrlConnector::build_hyper_uri(&scheme, base_path, path)
.context(ErrorKind::Initialization)?)
});
Ok(DockerModuleRuntime {
client: DockerClient::new(APIClient::new(configuration)),
network_id: None,
})
}
pub fn with_network_id(mut self, network_id: String) -> Self {
self.network_id = Some(network_id);
self
}
fn merge_env(cur_env: Option<&[String]>, new_env: &HashMap<String, String>) -> Vec<String> {
// build a new merged hashmap containing string slices for keys and values
// pointing into String instances in new_env
let mut merged_env = HashMap::new();
merged_env.extend(new_env.iter().map(|(k, v)| (k.as_str(), v.as_str())));
if let Some(env) = cur_env {
// extend merged_env with variables in cur_env (again, these are
// only string slices pointing into strings inside cur_env)
merged_env.extend(env.iter().filter_map(|s| {
let mut tokens = s.splitn(2, '=');
tokens.next().map(|key| (key, tokens.next().unwrap_or("")))
}));
}
// finally build a new Vec<String>; we alloc new strings here
merged_env
.iter()
.map(|(key, value)| format!("{}={}", key, value))
.collect()
}
}
impl ModuleRegistry for DockerModuleRuntime {
type Error = Error;
type PullFuture = Box<Future<Item = (), Error = Self::Error> + Send>;
type RemoveFuture = Box<Future<Item = (), Error = Self::Error>>;
type Config = DockerConfig;
fn pull(&self, config: &Self::Config) -> Self::PullFuture {
let image = config.image().to_string();
info!("Pulling image {}...", image);
let creds: Result<String> = config.auth().map_or_else(
|| Ok("".to_string()),
|a| {
let json = serde_json::to_string(a).with_context(|_| {
ErrorKind::RegistryOperation(RegistryOperation::PullImage(image.clone()))
})?;
Ok(base64::encode(&json))
},
);
let response = creds
.map(|creds| {
self.client
.image_api()
.image_create(&image, "", "", "", "", &creds, "")
.then(|result| match result {
Ok(()) => Ok(image),
Err(err) => Err(Error::from_docker_error(
err,
ErrorKind::RegistryOperation(RegistryOperation::PullImage(image)),
)),
})
})
.into_future()
.flatten()
.then(move |result| match result {
Ok(image) => {
info!("Successfully pulled image {}", image);
Ok(())
}
Err(err) => {
log_failure(Level::Warn, &err);
Err(err)
}
});
Box::new(response)
}
fn remove(&self, name: &str) -> Self::RemoveFuture {
info!("Removing image {}...", name);
if let Err(err) = ensure_not_empty_with_context(name, || {
ErrorKind::RegistryOperation(RegistryOperation::RemoveImage(name.to_string()))
}) {
return Box::new(future::err(Error::from(err)));
}
let name = name.to_string();
Box::new(
self.client
.image_api()
.image_delete(&name, false, false)
.then(|result| match result {
Ok(_) => {
info!("Successfully removed image {}", name);
Ok(())
}
Err(err) => {
let err = Error::from_docker_error(
err,
ErrorKind::RegistryOperation(RegistryOperation::RemoveImage(name)),
);
log_failure(Level::Warn, &err);
Err(err)
}
}),
)
}
}
fn parse_get_response<'de, D>(resp: &InlineResponse200) -> std::result::Result<String, D::Error>
where
D: serde::Deserializer<'de>,
{
let name = resp
.name()
.map(ToOwned::to_owned)
.ok_or_else(|| serde::de::Error::missing_field("Name"))?;
Ok(name)
}
fn parse_top_response<'de, D>(resp: &InlineResponse2001) -> std::result::Result<Vec<Pid>, D::Error>
where
D: serde::Deserializer<'de>,
{
let titles = resp
.titles()
.ok_or_else(|| serde::de::Error::missing_field("Titles"))?;
let pid_index = titles
.iter()
.position(|ref s| s.as_str() == "PID")
.ok_or_else(|| {
serde::de::Error::invalid_value(
serde::de::Unexpected::Seq,
&"array including the column title 'PID'",
)
})?;
let processes = resp
.processes()
.ok_or_else(|| serde::de::Error::missing_field("Processes"))?;
let pids: std::result::Result<_, _> = processes
.iter()
.map(|ref p| {
let val = p.get(pid_index).ok_or_else(|| {
serde::de::Error::invalid_length(
p.len(),
&&*format!("at least {} columns", pid_index + 1),
)
})?;
let pid = val.parse::<i32>().map_err(|_| {
serde::de::Error::invalid_value(
serde::de::Unexpected::Str(val),
&"a process ID number",
)
})?;
Ok(Pid::Value(pid))
})
.collect();
Ok(pids?)
}
impl ModuleRuntime for DockerModuleRuntime {
type Error = Error;
type Config = DockerConfig;
type Module = DockerModule<UrlConnector>;
type ModuleRegistry = Self;
type Chunk = Chunk;
type Logs = Logs;
type CreateFuture = Box<Future<Item = (), Error = Self::Error> + Send>;
type GetFuture =
Box<Future<Item = (Self::Module, ModuleRuntimeState), Error = Self::Error> + Send>;
type InitFuture = Box<Future<Item = (), Error = Self::Error> + Send>;
type ListFuture = Box<Future<Item = Vec<Self::Module>, Error = Self::Error> + Send>;
type ListWithDetailsStream =
Box<Stream<Item = (Self::Module, ModuleRuntimeState), Error = Self::Error> + Send>;
type LogsFuture = Box<Future<Item = Self::Logs, Error = Self::Error> + Send>;
type RemoveFuture = Box<Future<Item = (), Error = Self::Error> + Send>;
type RestartFuture = Box<Future<Item = (), Error = Self::Error> + Send>;
type StartFuture = Box<Future<Item = (), Error = Self::Error> + Send>;
type StopFuture = Box<Future<Item = (), Error = Self::Error> + Send>;
type SystemInfoFuture = Box<Future<Item = CoreSystemInfo, Error = Self::Error> + Send>;
type RemoveAllFuture = Box<Future<Item = (), Error = Self::Error> + Send>;
type TopFuture = Box<Future<Item = ModuleTop, Error = Self::Error> + Send>;
fn init(&self) -> Self::InitFuture {
info!("Initializing module runtime...");
let created = self.network_id.clone().map_or_else(
|| future::Either::B(future::ok(())),
|id| {
let filter = format!(r#"{{"name":{{"{}":true}}}}"#, id);
let client_copy = self.client.clone();
let fut = self
.client
.network_api()
.network_list(&filter)
.and_then(move |existing_networks| {
if existing_networks.is_empty() {
let fut = client_copy
.network_api()
.network_create(NetworkConfig::new(id))
.map(|_| ());
future::Either::A(fut)
} else {
future::Either::B(future::ok(()))
}
})
.map_err(|err| {
let e = Error::from_docker_error(
err,
ErrorKind::RuntimeOperation(RuntimeOperation::Init),
);
log_failure(Level::Warn, &e);
e
});
future::Either::A(fut)
},
);
let created = created.then(|result| {
match result {
Ok(()) => info!("Successfully initialized module runtime"),
Err(ref err) => log_failure(Level::Warn, err),
}
result
});
Box::new(created)
}
fn create(&self, module: ModuleSpec<Self::Config>) -> Self::CreateFuture {
info!("Creating module {}...", module.name());
// we only want "docker" modules
if module.type_() != DOCKER_MODULE_TYPE {
return Box::new(future::err(Error::from(ErrorKind::InvalidModuleType(
module.type_().to_string(),
))));
}
let result = module
.config()
.clone_create_options()
.and_then(|create_options| {
// merge environment variables
let merged_env = DockerModuleRuntime::merge_env(create_options.env(), module.env());
let mut labels = create_options
.labels()
.cloned()
.unwrap_or_else(HashMap::new);
labels.insert(LABEL_KEY.to_string(), LABEL_VALUE.to_string());
debug!(
"Creating container {} with image {}",
module.name(),
module.config().image()
);
let create_options = create_options
.with_image(module.config().image().to_string())
.with_env(merged_env)
.with_labels(labels);
// Here we don't add the container to the iot edge docker network as the edge-agent is expected to do that.
// It contains the logic to add a container to the iot edge network only if a network is not already specified.
Ok(self
.client
.container_api()
.container_create(create_options, module.name())
.then(|result| match result {
Ok(_) => Ok(module),
Err(err) => Err(Error::from_docker_error(
err,
ErrorKind::RuntimeOperation(RuntimeOperation::CreateModule(
module.name().to_string(),
)),
)),
}))
})
.into_future()
.flatten()
.then(|result| match result {
Ok(module) => {
info!("Successfully created module {}", module.name());
Ok(())
}
Err(err) => {
log_failure(Level::Warn, &err);
Err(err)
}
});
Box::new(result)
}
fn get(&self, id: &str) -> Self::GetFuture {
debug!("Getting module {}...", id);
let id = id.to_string();
if let Err(err) = ensure_not_empty_with_context(&id, || {
ErrorKind::RuntimeOperation(RuntimeOperation::GetModule(id.clone()))
}) {
return Box::new(future::err(Error::from(err)));
}
let client_copy = self.client.clone();
Box::new(
self.client
.container_api()
.container_inspect(&id, false)
.then(|result| match result {
Ok(container) => {
let name =
parse_get_response::<Deserializer>(&container).with_context(|_| {
ErrorKind::RuntimeOperation(RuntimeOperation::GetModule(id.clone()))
})?;
let config =
DockerConfig::new(name.clone(), ContainerCreateBody::new(), None)
.with_context(|_| {
ErrorKind::RuntimeOperation(RuntimeOperation::GetModule(
id.clone(),
))
})?;
let module =
DockerModule::new(client_copy, name, config).with_context(|_| {
ErrorKind::RuntimeOperation(RuntimeOperation::GetModule(id.clone()))
})?;
let state = runtime_state(container.id(), container.state());
Ok((module, state))
}
Err(err) => {
let err = Error::from_docker_error(
err,
ErrorKind::RuntimeOperation(RuntimeOperation::GetModule(id)),
);
log_failure(Level::Warn, &err);
Err(err)
}
}),
)
}
fn start(&self, id: &str) -> Self::StartFuture {
info!("Starting module {}...", id);
let id = id.to_string();
if let Err(err) = ensure_not_empty_with_context(&id, || {
ErrorKind::RuntimeOperation(RuntimeOperation::StartModule(id.clone()))
}) {
return Box::new(future::err(Error::from(err)));
}
Box::new(
self.client
.container_api()
.container_start(&id, "")
.then(|result| match result {
Ok(_) => {
info!("Successfully started module {}", id);
Ok(())
}
Err(err) => {
let err = Error::from_docker_error(
err,
ErrorKind::RuntimeOperation(RuntimeOperation::StartModule(id)),
);
log_failure(Level::Warn, &err);
Err(err)
}
}),
)
}
fn stop(&self, id: &str, wait_before_kill: Option<Duration>) -> Self::StopFuture {
info!("Stopping module {}...", id);
let id = id.to_string();
if let Err(err) = ensure_not_empty_with_context(&id, || {
ErrorKind::RuntimeOperation(RuntimeOperation::StopModule(id.clone()))
}) {
return Box::new(future::err(Error::from(err)));
}
#[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)]
Box::new(
self.client
.container_api()
.container_stop(
&id,
wait_before_kill.map_or(WAIT_BEFORE_KILL_SECONDS, |s| match s.as_secs() {
s if s > i32::max_value() as u64 => i32::max_value(),
s => s as i32,
}),
)
.then(|result| match result {
Ok(_) => {
info!("Successfully stopped module {}", id);
Ok(())
}
Err(err) => {
let err = Error::from_docker_error(
err,
ErrorKind::RuntimeOperation(RuntimeOperation::StopModule(id)),
);
log_failure(Level::Warn, &err);
Err(err)
}
}),
)
}
fn system_info(&self) -> Self::SystemInfoFuture {
info!("Querying system info...");
Box::new(
self.client
.system_api()
.system_info()
.then(|result| match result {
Ok(system_info) => {
let system_info = CoreSystemInfo::new(
system_info
.os_type()
.unwrap_or(&String::from("Unknown"))
.to_string(),
system_info
.architecture()
.unwrap_or(&String::from("Unknown"))
.to_string(),
);
info!("Successfully queried system info");
Ok(system_info)
}
Err(err) => {
let err = Error::from_docker_error(
err,
ErrorKind::RuntimeOperation(RuntimeOperation::SystemInfo),
);
log_failure(Level::Warn, &err);
Err(err)
}
}),
)
}
fn restart(&self, id: &str) -> Self::RestartFuture {
info!("Restarting module {}...", id);
let id = id.to_string();
if let Err(err) = ensure_not_empty_with_context(&id, || {
ErrorKind::RuntimeOperation(RuntimeOperation::RestartModule(id.clone()))
}) {
return Box::new(future::err(Error::from(err)));
}
Box::new(
self.client
.container_api()
.container_restart(&id, WAIT_BEFORE_KILL_SECONDS)
.then(|result| match result {
Ok(_) => {
info!("Successfully restarted module {}", id);
Ok(())
}
Err(err) => {
let err = Error::from_docker_error(
err,
ErrorKind::RuntimeOperation(RuntimeOperation::RestartModule(id)),
);
log_failure(Level::Warn, &err);
Err(err)
}
}),
)
}
fn remove(&self, id: &str) -> Self::RemoveFuture {
info!("Removing module {}...", id);
let id = id.to_string();
if let Err(err) = ensure_not_empty_with_context(&id, || {
ErrorKind::RuntimeOperation(RuntimeOperation::RemoveModule(id.clone()))
}) {
return Box::new(future::err(Error::from(err)));
}
Box::new(
self.client
.container_api()
.container_delete(
&id, /* remove volumes */ false, /* force */ true,
/* remove link */ false,
)
.then(|result| match result {
Ok(_) => {
info!("Successfully removed module {}", id);
Ok(())
}
Err(err) => {
let err = Error::from_docker_error(
err,
ErrorKind::RuntimeOperation(RuntimeOperation::RemoveModule(id)),
);
log_failure(Level::Warn, &err);
Err(err)
}
}),
)
}
fn list(&self) -> Self::ListFuture {
debug!("Listing modules...");
let mut filters = HashMap::new();
filters.insert("label", LABELS.deref());
let client_copy = self.client.clone();
let result = serde_json::to_string(&filters)
.context(ErrorKind::RuntimeOperation(RuntimeOperation::ListModules))
.map_err(Error::from)
.map(|filters| {
self.client
.container_api()
.container_list(true, 0, false, &filters)
.map(move |containers| {
containers
.iter()
.flat_map(|container| {
DockerConfig::new(
container.image().to_string(),
ContainerCreateBody::new()
.with_labels(container.labels().clone()),
None,
)
.map(|config| {
(
container,
config.with_image_id(container.image_id().clone()),
)
})
})
.flat_map(|(container, config)| {
DockerModule::new(
client_copy.clone(),
container
.names()
.iter()
.next()
.map_or("Unknown", |s| &s[1..])
.to_string(),
config,
)
})
.collect()
})
.map_err(|err| {
Error::from_docker_error(
err,
ErrorKind::RuntimeOperation(RuntimeOperation::ListModules),
)
})
})
.into_future()
.flatten()
.then(|result| {
match result {
Ok(_) => debug!("Successfully listed modules"),
Err(ref err) => log_failure(Level::Warn, err),
}
result
});
Box::new(result)
}
fn list_with_details(&self) -> Self::ListWithDetailsStream {
list_with_details(self)
}
fn logs(&self, id: &str, options: &LogOptions) -> Self::LogsFuture {
info!("Getting logs for module {}...", id);
let id = id.to_string();
let tail = &options.tail().to_string();
let result = self
.client
.container_api()
.container_logs(&id, options.follow(), true, true, 0, false, tail)
.then(|result| match result {
Ok(logs) => {
info!("Successfully got logs for module {}", id);
Ok(Logs(id, logs))
}
Err(err) => {
let err = Error::from_docker_error(
err,
ErrorKind::RuntimeOperation(RuntimeOperation::GetModuleLogs(id)),
);
log_failure(Level::Warn, &err);
Err(err)
}
});
Box::new(result)
}
fn registry(&self) -> &Self::ModuleRegistry {
self
}
fn remove_all(&self) -> Self::RemoveAllFuture {
let self_for_remove = self.clone();
Box::new(self.list().and_then(move |list| {
let n = list.into_iter().map(move |c| {
<DockerModuleRuntime as ModuleRuntime>::remove(&self_for_remove, c.name())
});
future::join_all(n).map(|_| ())
}))
}
fn top(&self, id: &str) -> Self::TopFuture {
let id = id.to_string();
Box::new(
self.client
.container_api()
.container_top(&id, "")
.then(|result| match result {
Ok(resp) => {
let p = parse_top_response::<Deserializer>(&resp).with_context(|_| {
ErrorKind::RuntimeOperation(RuntimeOperation::TopModule(id.clone()))
})?;
Ok(ModuleTop::new(id, p))
}
Err(err) => {
let err = Error::from_docker_error(
err,
ErrorKind::RuntimeOperation(RuntimeOperation::TopModule(id)),
);
Err(err)
}
}),
)
}
}
#[derive(Debug)]
pub struct Logs(String, Body);
impl Stream for Logs {
type Item = Chunk;
type Error = Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
match self.1.poll() {
Ok(Async::Ready(chunk)) => Ok(Async::Ready(chunk.map(Chunk))),
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(err) => Err(Error::from(err.context(ErrorKind::RuntimeOperation(
RuntimeOperation::GetModuleLogs(self.0.clone()),
)))),
}
}
}
impl From<Logs> for Body {
fn from(logs: Logs) -> Self {
logs.1
}
}
#[derive(Debug, Default)]
pub struct Chunk(HyperChunk);
impl IntoIterator for Chunk {
type Item = u8;
type IntoIter = <HyperChunk as IntoIterator>::IntoIter;
fn into_iter(self) -> Self::IntoIter {
self.0.into_iter()
}
}
impl Extend<u8> for Chunk {
fn extend<T>(&mut self, iter: T)
where
T: IntoIterator<Item = u8>,
{
self.0.extend(iter)
}
}
impl AsRef<[u8]> for Chunk {
fn as_ref(&self) -> &[u8] {
self.0.as_ref()
}
}
/// Invokes `ModuleRuntime::list`, then `Module::runtime_state` on each Module.
/// Modules whose `runtime_state` returns `NotFound` are filtered out from the result,
/// instead of letting the whole `list_with_details` call fail.
fn list_with_details<MR, M>(
runtime: &MR,
) -> Box<Stream<Item = (M, ModuleRuntimeState), Error = Error> + Send>
where
MR: ModuleRuntime<Error = Error, Config = <M as Module>::Config, Module = M>,
<MR as ModuleRuntime>::ListFuture: 'static,
M: Module<Error = Error> + Send + 'static,
<M as Module>::Config: Send,
{
Box::new(
runtime
.list()
.into_stream()
.map(|list| {
stream::futures_unordered(
list.into_iter()
.map(|module| module.runtime_state().map(|state| (module, state))),
)
})
.flatten()
.then(Ok::<_, Error>) // Ok(_) -> Ok(Ok(_)), Err(_) -> Ok(Err(_)), ! -> Err(_)
.filter_map(|value| match value {
Ok(value) => Some(Ok(value)),
Err(err) => match err.kind() {
ErrorKind::NotFound(_) => None,
_ => Some(Err(err)),
},
})
.then(Result::unwrap), // Ok(Ok(_)) -> Ok(_), Ok(Err(_)) -> Err(_), Err(_) -> !
)
}
#[cfg(test)]
mod tests {
use super::*;
use futures::future::FutureResult;
use futures::stream::Empty;
#[cfg(unix)]
use tempfile::NamedTempFile;
use tokio;
use url::Url;
use docker::models::ContainerCreateBody;
use edgelet_core::pid::Pid;
use edgelet_core::ModuleRegistry;
use error::{Error, ErrorKind};
#[test]
#[should_panic(expected = "URL does not have a recognized scheme")]
fn invalid_uri_prefix_fails() {
let _mri =
DockerModuleRuntime::new(&Url::parse("foo:///this/is/not/valid").unwrap()).unwrap();
}
#[cfg(unix)]
#[test]
#[should_panic(expected = "Socket file could not be found")]
fn invalid_uds_path_fails() {
let _mri =
DockerModuleRuntime::new(&Url::parse("unix:///this/file/does/not/exist").unwrap())
.unwrap();
}
#[cfg(unix)]
#[test]
fn create_with_uds_succeeds() {
let file = NamedTempFile::new().unwrap();
let file_path = file.path().to_str().unwrap();
let _mri = DockerModuleRuntime::new(&Url::parse(&format!("unix://{}", file_path)).unwrap())
.unwrap();
}
#[test]
fn image_remove_with_empty_name_fails() {
let mri = DockerModuleRuntime::new(&Url::parse("http://localhost/").unwrap()).unwrap();
let name = "";
let task = ModuleRegistry::remove(&mri, name).then(|res| match res {
Ok(_) => Err("Expected error but got a result.".to_string()),
Err(err) => match err.kind() {
ErrorKind::RegistryOperation(RegistryOperation::RemoveImage(s)) if s == name => {
Ok(())
}
kind => panic!(
"Expected `RegistryOperation(RemoveImage)` error but got {:?}.",
kind
),
},
});
tokio::runtime::current_thread::Runtime::new()
.unwrap()
.block_on(task)
.unwrap();
}
#[test]
fn image_remove_with_white_space_name_fails() {
let mri = DockerModuleRuntime::new(&Url::parse("http://localhost/").unwrap()).unwrap();
let name = " ";
let task = ModuleRegistry::remove(&mri, name).then(|res| match res {
Ok(_) => Err("Expected error but got a result.".to_string()),
Err(err) => match err.kind() {
ErrorKind::RegistryOperation(RegistryOperation::RemoveImage(s)) if s == name => {
Ok(())
}
kind => panic!(
"Expected `RegistryOperation(RemoveImage)` error but got {:?}.",
kind
),
},
});
tokio::runtime::current_thread::Runtime::new()
.unwrap()
.block_on(task)
.unwrap();
}
#[test]
fn merge_env_empty() {
let cur_env = Some(&[][..]);
let new_env = HashMap::new();
assert_eq!(0, DockerModuleRuntime::merge_env(cur_env, &new_env).len());
}
#[test]
fn merge_env_new_empty() {
let cur_env = Some(vec!["k1=v1".to_string(), "k2=v2".to_string()]);
let new_env = HashMap::new();
let mut merged_env =
DockerModuleRuntime::merge_env(cur_env.as_ref().map(AsRef::as_ref), &new_env);
merged_env.sort();
assert_eq!(vec!["k1=v1", "k2=v2"], merged_env);
}
#[test]
fn merge_env_extend_new() {
let cur_env = Some(vec!["k1=v1".to_string(), "k2=v2".to_string()]);
let mut new_env = HashMap::new();
new_env.insert("k3".to_string(), "v3".to_string());
let mut merged_env =
DockerModuleRuntime::merge_env(cur_env.as_ref().map(AsRef::as_ref), &new_env);
merged_env.sort();
assert_eq!(vec!["k1=v1", "k2=v2", "k3=v3"], merged_env);
}
#[test]
fn merge_env_extend_replace_new() {
let cur_env = Some(vec!["k1=v1".to_string(), "k2=v2".to_string()]);
let mut new_env = HashMap::new();
new_env.insert("k2".to_string(), "v02".to_string());
new_env.insert("k3".to_string(), "v3".to_string());
let mut merged_env =
DockerModuleRuntime::merge_env(cur_env.as_ref().map(AsRef::as_ref), &new_env);
merged_env.sort();
assert_eq!(vec!["k1=v1", "k2=v2", "k3=v3"], merged_env);
}
#[test]
fn create_fails_for_non_docker_type() {
let mri = DockerModuleRuntime::new(&Url::parse("http://localhost/").unwrap()).unwrap();
let name = "not_docker".to_string();
let module_config = ModuleSpec::new(
"m1".to_string(),
name.clone(),
DockerConfig::new("nginx:latest".to_string(), ContainerCreateBody::new(), None)
.unwrap(),
HashMap::new(),
)
.unwrap();
let task = mri.create(module_config).then(|result| match result {
Ok(_) => panic!("Expected test to fail but it didn't!"),
Err(err) => match err.kind() {
ErrorKind::InvalidModuleType(s) if s == &name => Ok::<_, Error>(()),
kind => panic!("Expected `InvalidModuleType` error but got {:?}.", kind),
},
});
tokio::runtime::current_thread::Runtime::new()
.unwrap()
.block_on(task)
.unwrap();
}
#[test]
fn start_fails_for_empty_id() {
let mri = DockerModuleRuntime::new(&Url::parse("http://localhost/").unwrap()).unwrap();
let name = "";
let task = mri.start(name).then(|result| match result {
Ok(_) => panic!("Expected test to fail but it didn't!"),
Err(err) => match err.kind() {
ErrorKind::RuntimeOperation(RuntimeOperation::StartModule(s)) if s == name => {
Ok::<_, Error>(())
}
kind => panic!(
"Expected `RuntimeOperation(StartModule)` error but got {:?}.",
kind
),
},
});
tokio::runtime::current_thread::Runtime::new()
.unwrap()
.block_on(task)
.unwrap();
}
#[test]
fn start_fails_for_white_space_id() {
let mri = DockerModuleRuntime::new(&Url::parse("http://localhost/").unwrap()).unwrap();
let name = " ";
let task = mri.start(name).then(|result| match result {
Ok(_) => panic!("Expected test to fail but it didn't!"),
Err(err) => match err.kind() {
ErrorKind::RuntimeOperation(RuntimeOperation::StartModule(s)) if s == name => {
Ok::<_, Error>(())
}
kind => panic!(
"Expected `RuntimeOperation(StartModule)` error but got {:?}.",
kind
),
},
});
tokio::runtime::current_thread::Runtime::new()
.unwrap()
.block_on(task)
.unwrap();
}
#[test]
fn stop_fails_for_empty_id() {
let mri = DockerModuleRuntime::new(&Url::parse("http://localhost/").unwrap()).unwrap();
let name = "";
let task = mri.stop(name, None).then(|result| match result {
Ok(_) => panic!("Expected test to fail but it didn't!"),
Err(err) => match err.kind() {
ErrorKind::RuntimeOperation(RuntimeOperation::StopModule(s)) if s == name => {
Ok::<_, Error>(())
}
kind => panic!(
"Expected `RuntimeOperation(StopModule)` error but got {:?}.",
kind
),
},
});
tokio::runtime::current_thread::Runtime::new()
.unwrap()
.block_on(task)
.unwrap();
}
#[test]
fn stop_fails_for_white_space_id() {
let mri = DockerModuleRuntime::new(&Url::parse("http://localhost/").unwrap()).unwrap();
let name = " ";
let task = mri.stop(name, None).then(|result| match result {
Ok(_) => panic!("Expected test to fail but it didn't!"),
Err(err) => match err.kind() {
ErrorKind::RuntimeOperation(RuntimeOperation::StopModule(s)) if s == name => {
Ok::<_, Error>(())
}
kind => panic!(
"Expected `RuntimeOperation(StopModule)` error but got {:?}.",
kind
),
},
});
tokio::runtime::current_thread::Runtime::new()
.unwrap()
.block_on(task)
.unwrap();
}
#[test]
fn restart_fails_for_empty_id() {
let mri = DockerModuleRuntime::new(&Url::parse("http://localhost/").unwrap()).unwrap();
let name = "";
let task = mri.restart(name).then(|result| match result {
Ok(_) => panic!("Expected test to fail but it didn't!"),
Err(err) => match err.kind() {
ErrorKind::RuntimeOperation(RuntimeOperation::RestartModule(s)) if s == name => {
Ok::<_, Error>(())
}
kind => panic!(
"Expected `RuntimeOperation(RestartModule)` error but got {:?}.",
kind
),
},
});
tokio::runtime::current_thread::Runtime::new()
.unwrap()
.block_on(task)
.unwrap();
}
#[test]
fn restart_fails_for_white_space_id() {
let mri = DockerModuleRuntime::new(&Url::parse("http://localhost/").unwrap()).unwrap();
let name = " ";
let task = mri.restart(name).then(|result| match result {
Ok(_) => panic!("Expected test to fail but it didn't!"),
Err(err) => match err.kind() {
ErrorKind::RuntimeOperation(RuntimeOperation::RestartModule(s)) if s == name => {
Ok::<_, Error>(())
}
kind => panic!(
"Expected `RuntimeOperation(RestartModule)` error but got {:?}.",
kind
),
},
});
tokio::runtime::current_thread::Runtime::new()
.unwrap()
.block_on(task)
.unwrap();
}
#[test]
fn remove_fails_for_empty_id() {
let mri = DockerModuleRuntime::new(&Url::parse("http://localhost/").unwrap()).unwrap();
let name = "";
let task = ModuleRuntime::remove(&mri, name).then(|result| match result {
Ok(_) => panic!("Expected test to fail but it didn't!"),
Err(err) => match err.kind() {
ErrorKind::RuntimeOperation(RuntimeOperation::RemoveModule(s)) if s == name => {
Ok::<_, Error>(())
}
kind => panic!(
"Expected `RuntimeOperation(RemoveModule)` error but got {:?}.",
kind
),
},
});
tokio::runtime::current_thread::Runtime::new()
.unwrap()
.block_on(task)
.unwrap();
}
#[test]
fn remove_fails_for_white_space_id() {
let mri = DockerModuleRuntime::new(&Url::parse("http://localhost/").unwrap()).unwrap();
let name = " ";
let task = ModuleRuntime::remove(&mri, name).then(|result| match result {
Ok(_) => panic!("Expected test to fail but it didn't!"),
Err(err) => match err.kind() {
ErrorKind::RuntimeOperation(RuntimeOperation::RemoveModule(s)) if s == name => {
Ok::<_, Error>(())
}
kind => panic!(
"Expected `RuntimeOperation(RemoveModule)` error but got {:?}.",
kind
),
},
});
tokio::runtime::current_thread::Runtime::new()
.unwrap()
.block_on(task)
.unwrap();
}
#[test]
fn get_fails_for_empty_id() {
let mri = DockerModuleRuntime::new(&Url::parse("http://localhost/").unwrap()).unwrap();
let name = "";
let task = ModuleRuntime::get(&mri, name).then(|result| match result {
Ok(_) => panic!("Expected test to fail but it didn't!"),
Err(err) => match err.kind() {
ErrorKind::RuntimeOperation(RuntimeOperation::GetModule(s)) if s == name => {
Ok::<_, Error>(())
}
kind => panic!(
"Expected `RuntimeOperation(GetModule)` error but got {:?}.",
kind
),
},
});
tokio::runtime::current_thread::Runtime::new()
.unwrap()
.block_on(task)
.unwrap();
}
#[test]
fn get_fails_for_white_space_id() {
let mri = DockerModuleRuntime::new(&Url::parse("http://localhost/").unwrap()).unwrap();
let name = " ";
let task = ModuleRuntime::get(&mri, name).then(|result| match result {
Ok(_) => panic!("Expected test to fail but it didn't!"),
Err(err) => match err.kind() {
ErrorKind::RuntimeOperation(RuntimeOperation::GetModule(s)) if s == name => {
Ok::<_, Error>(())
}
kind => panic!(
"Expected `RuntimeOperation(GetModule)` error but got {:?}.",
kind
),
},
});
tokio::runtime::current_thread::Runtime::new()
.unwrap()
.block_on(task)
.unwrap();
}
#[test]
fn list_with_details_filters_out_deleted_containers() {
let runtime = TestModuleList {
modules: vec![
TestModule {
name: "a".to_string(),
runtime_state_behavior: TestModuleRuntimeStateBehavior::Default,
},
TestModule {
name: "b".to_string(),
runtime_state_behavior: TestModuleRuntimeStateBehavior::NotFound,
},
TestModule {
name: "c".to_string(),
runtime_state_behavior: TestModuleRuntimeStateBehavior::NotFound,
},
TestModule {
name: "d".to_string(),
runtime_state_behavior: TestModuleRuntimeStateBehavior::Default,
},
],
};
assert_eq!(
runtime.list_with_details().collect().wait().unwrap(),
vec![
(
TestModule {
name: "a".to_string(),
runtime_state_behavior: TestModuleRuntimeStateBehavior::Default,
},
ModuleRuntimeState::default().with_pid(Pid::Any)
),
(
TestModule {
name: "d".to_string(),
runtime_state_behavior: TestModuleRuntimeStateBehavior::Default,
},
ModuleRuntimeState::default().with_pid(Pid::Any)
),
]
);
}
#[test]
fn top_fails_for_empty_id() {
let mri = DockerModuleRuntime::new(&Url::parse("http://localhost/").unwrap()).unwrap();
let name = "";
let task = ModuleRuntime::top(&mri, name).then(|result| match result {
Ok(_) => panic!("Expected test to fail but it didn't!"),
Err(err) => match err.kind() {
ErrorKind::RuntimeOperation(RuntimeOperation::TopModule(s)) if s == name => {
Ok::<_, Error>(())
}
kind => panic!(
"Expected `RuntimeOperation(TopModule)` error but got {:?}.",
kind
),
},
});
tokio::runtime::current_thread::Runtime::new()
.unwrap()
.block_on(task)
.unwrap();
}
#[test]
fn top_fails_for_white_space_id() {
let mri = DockerModuleRuntime::new(&Url::parse("http://localhost/").unwrap()).unwrap();
let name = " ";
let task = ModuleRuntime::top(&mri, name).then(|result| match result {
Ok(_) => panic!("Expected test to fail but it didn't!"),
Err(err) => match err.kind() {
ErrorKind::RuntimeOperation(RuntimeOperation::TopModule(s)) if s == name => {
Ok::<_, Error>(())
}
kind => panic!(
"Expected `RuntimeOperation(TopModule)` error but got {:?}.",
kind
),
},
});
tokio::runtime::current_thread::Runtime::new()
.unwrap()
.block_on(task)
.unwrap();
}
#[test]
fn parse_get_response_returns_the_name() {
let response = InlineResponse200::new().with_name("hello".to_string());
let name = parse_get_response::<Deserializer>(&response);
assert!(name.is_ok());
assert_eq!("hello".to_string(), name.unwrap());
}
#[test]
fn parse_get_response_returns_error_when_name_is_missing() {
let response = InlineResponse200::new();
let name = parse_get_response::<Deserializer>(&response);
assert!(name.is_err());
assert_eq!("missing field `Name`", format!("{}", name.unwrap_err()));
}
#[test]
fn parse_top_response_returns_pid_array() {
let response = InlineResponse2001::new()
.with_titles(vec!["PID".to_string()])
.with_processes(vec![vec!["123".to_string()]]);
let pids = parse_top_response::<Deserializer>(&response);
assert!(pids.is_ok());
assert_eq!(vec![Pid::Value(123)], pids.unwrap());
}
#[test]
fn parse_top_response_returns_error_when_titles_is_missing() {
let response = InlineResponse2001::new().with_processes(vec![vec!["123".to_string()]]);
let pids = parse_top_response::<Deserializer>(&response);
assert!(pids.is_err());
assert_eq!("missing field `Titles`", format!("{}", pids.unwrap_err()));
}
#[test]
fn parse_top_response_returns_error_when_pid_title_is_missing() {
let response = InlineResponse2001::new().with_titles(vec!["Command".to_string()]);
let pids = parse_top_response::<Deserializer>(&response);
assert!(pids.is_err());
assert_eq!(
"invalid value: sequence, expected array including the column title \'PID\'",
format!("{}", pids.unwrap_err())
);
}
#[test]
fn parse_top_response_returns_error_when_processes_is_missing() {
let response = InlineResponse2001::new().with_titles(vec!["PID".to_string()]);
let pids = parse_top_response::<Deserializer>(&response);
assert!(pids.is_err());
assert_eq!(
"missing field `Processes`",
format!("{}", pids.unwrap_err())
);
}
#[test]
fn parse_top_response_returns_error_when_process_pid_is_missing() {
let response = InlineResponse2001::new()
.with_titles(vec!["Command".to_string(), "PID".to_string()])
.with_processes(vec![vec!["sh".to_string()]]);
let pids = parse_top_response::<Deserializer>(&response);
assert!(pids.is_err());
assert_eq!(
"invalid length 1, expected at least 2 columns",
format!("{}", pids.unwrap_err())
);
}
#[test]
fn parse_top_response_returns_error_when_process_pid_is_not_i32() {
let response = InlineResponse2001::new() | assert!(pids.is_err());
assert_eq!(
"invalid value: string \"xyz\", expected a process ID number",
format!("{}", pids.unwrap_err())
);
}
struct TestConfig;
#[derive(Clone, Copy, Debug, PartialEq)]
enum TestModuleRuntimeStateBehavior {
Default,
NotFound,
}
#[derive(Clone, Debug, PartialEq)]
struct TestModule {
name: String,
runtime_state_behavior: TestModuleRuntimeStateBehavior,
}
impl Module for TestModule {
type Config = TestConfig;
type Error = Error;
type RuntimeStateFuture = FutureResult<ModuleRuntimeState, Self::Error>;
fn name(&self) -> &str {
&self.name
}
fn type_(&self) -> &str {
""
}
fn config(&self) -> &Self::Config {
&TestConfig
}
fn runtime_state(&self) -> Self::RuntimeStateFuture {
match self.runtime_state_behavior {
TestModuleRuntimeStateBehavior::Default => {
future::ok(ModuleRuntimeState::default().with_pid(Pid::Any))
}
TestModuleRuntimeStateBehavior::NotFound => {
future::err(ErrorKind::NotFound(String::new()).into())
}
}
}
}
#[derive(Clone)]
struct TestModuleList {
modules: Vec<TestModule>,
}
impl ModuleRegistry for TestModuleList {
type Config = TestConfig;
type Error = Error;
type PullFuture = FutureResult<(), Self::Error>;
type RemoveFuture = FutureResult<(), Self::Error>;
fn pull(&self, _config: &Self::Config) -> Self::PullFuture {
unimplemented!()
}
fn remove(&self, _name: &str) -> Self::RemoveFuture {
unimplemented!()
}
}
impl ModuleRuntime for TestModuleList {
type Error = Error;
type Config = TestConfig;
type Module = TestModule;
type ModuleRegistry = Self;
type Chunk = String;
type Logs = Empty<Self::Chunk, Self::Error>;
type CreateFuture = FutureResult<(), Self::Error>;
type GetFuture = FutureResult<(Self::Module, ModuleRuntimeState), Self::Error>;
type InitFuture = FutureResult<(), Self::Error>;
type ListFuture = FutureResult<Vec<Self::Module>, Self::Error>;
type ListWithDetailsStream =
Box<Stream<Item = (Self::Module, ModuleRuntimeState), Error = Self::Error> + Send>;
type LogsFuture = FutureResult<Self::Logs, Self::Error>;
type RemoveFuture = FutureResult<(), Self::Error>;
type RestartFuture = FutureResult<(), Self::Error>;
type StartFuture = FutureResult<(), Self::Error>;
type StopFuture = FutureResult<(), Self::Error>;
type SystemInfoFuture = FutureResult<CoreSystemInfo, Self::Error>;
type RemoveAllFuture = FutureResult<(), Self::Error>;
type TopFuture = FutureResult<ModuleTop, Self::Error>;
fn init(&self) -> Self::InitFuture {
unimplemented!()
}
fn create(&self, _module: ModuleSpec<Self::Config>) -> Self::CreateFuture {
unimplemented!()
}
fn get(&self, _id: &str) -> Self::GetFuture {
unimplemented!()
}
fn start(&self, _id: &str) -> Self::StartFuture {
unimplemented!()
}
fn stop(&self, _id: &str, _wait_before_kill: Option<Duration>) -> Self::StopFuture {
unimplemented!()
}
fn system_info(&self) -> Self::SystemInfoFuture {
unimplemented!()
}
fn restart(&self, _id: &str) -> Self::RestartFuture {
unimplemented!()
}
fn remove(&self, _id: &str) -> Self::RemoveFuture {
unimplemented!()
}
fn list(&self) -> Self::ListFuture {
future::ok(self.modules.clone())
}
fn list_with_details(&self) -> Self::ListWithDetailsStream {
list_with_details(self)
}
fn logs(&self, _id: &str, _options: &LogOptions) -> Self::LogsFuture {
unimplemented!()
}
fn registry(&self) -> &Self::ModuleRegistry {
self
}
fn remove_all(&self) -> Self::RemoveAllFuture {
unimplemented!()
}
fn top(&self, _id: &str) -> Self::TopFuture {
unimplemented!()
}
}
} | .with_titles(vec!["PID".to_string()])
.with_processes(vec![vec!["xyz".to_string()]]);
let pids = parse_top_response::<Deserializer>(&response); |
median.go | package quantile
import (
"fmt"
"github.com/pkg/errors"
)
// Median keeps track of the median of a stream using order statistics.
type Median struct {
quantile *Quantile
}
// NewMedian instantiates a Median struct. The implementation of the underlying data
// structure for tracking order statistics can be configured by passing in a constant
// of type Impl.
func NewMedian(window int, options ...Option) (*Median, error) {
quantile, err := New(window, append(options, InterpolationOption(Midpoint))...)
if err != nil {
return nil, errors.Wrap(err, "error creating Quantile")
}
return &Median{quantile: quantile}, nil
}
// NewGlobalMedian instantiates a global Median struct.
// This is equivalent to calling NewMedian(0, options...). | // String returns a string representation of the metric.
func (m *Median) String() string {
name := "quantile.Median"
quantile := fmt.Sprintf("quantile:%v", m.quantile.String())
return fmt.Sprintf("%s_{%s}", name, quantile)
}
// Push adds a number for calculating the median.
func (m *Median) Push(x float64) error {
err := m.quantile.Push(x)
if err != nil {
return errors.Wrapf(err, "error pushing %f to Quantile", x)
}
return nil
}
// Value returns the value of the median.
func (m *Median) Value() (float64, error) {
value, err := m.quantile.Value(0.5)
if err != nil {
return 0, errors.Wrap(err, "error retrieving quantile value")
}
return value, nil
}
// Clear resets the metric.
func (m *Median) Clear() {
m.quantile.Clear()
} | func NewGlobalMedian(options ...Option) (*Median, error) {
return NewMedian(0, options...)
}
|
googletest-list-tests-unittest.py | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its | # this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's --gtest_list_tests flag.
A user can ask Google Test to list all tests by specifying the
--gtest_list_tests flag. This script tests such functionality
by invoking googletest-list-tests-unittest_ (a program written with
Google Test) the command line flags.
"""
import re
import gtest_test_utils
# Constants.
# The command line flag for enabling/disabling listing all tests.
LIST_TESTS_FLAG = 'gtest_list_tests'
# Path to the googletest-list-tests-unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath('googletest-list-tests-unittest_')
# The expected output when running googletest-list-tests-unittest_ with
# --gtest_list_tests
EXPECTED_OUTPUT_NO_FILTER_RE = re.compile(r"""FooDeathTest\.
Test1
Foo\.
Bar1
Bar2
DISABLED_Bar3
Abc\.
Xyz
Def
FooBar\.
Baz
FooTest\.
Test1
DISABLED_Test2
Test3
TypedTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\.
TestA
TestB
TypedTest/1\. # TypeParam = int\s*\*( __ptr64)?
TestA
TestB
TypedTest/2\. # TypeParam = .*MyArray<bool,\s*42>
TestA
TestB
My/TypeParamTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\.
TestA
TestB
My/TypeParamTest/1\. # TypeParam = int\s*\*( __ptr64)?
TestA
TestB
My/TypeParamTest/2\. # TypeParam = .*MyArray<bool,\s*42>
TestA
TestB
MyInstantiation/ValueParamTest\.
TestA/0 # GetParam\(\) = one line
TestA/1 # GetParam\(\) = two\\nlines
TestA/2 # GetParam\(\) = a very\\nlo{241}\.\.\.
TestB/0 # GetParam\(\) = one line
TestB/1 # GetParam\(\) = two\\nlines
TestB/2 # GetParam\(\) = a very\\nlo{241}\.\.\.
""")
# The expected output when running googletest-list-tests-unittest_ with
# --gtest_list_tests and --gtest_filter=Foo*.
EXPECTED_OUTPUT_FILTER_FOO_RE = re.compile(r"""FooDeathTest\.
Test1
Foo\.
Bar1
Bar2
DISABLED_Bar3
FooBar\.
Baz
FooTest\.
Test1
DISABLED_Test2
Test3
""")
# Utilities.
def Run(args):
"""Runs googletest-list-tests-unittest_ and returns the list of tests printed."""
return gtest_test_utils.Subprocess([EXE_PATH] + args,
capture_stderr=False).output
# The unit test.
class GTestListTestsUnitTest(gtest_test_utils.TestCase):
"""Tests using the --gtest_list_tests flag to list all tests."""
def RunAndVerify(self, flag_value, expected_output_re, other_flag):
"""Runs googletest-list-tests-unittest_ and verifies that it prints
the correct tests.
Args:
flag_value: value of the --gtest_list_tests flag;
None if the flag should not be present.
expected_output_re: regular expression that matches the expected
output after running command;
other_flag: a different flag to be passed to command
along with gtest_list_tests;
None if the flag should not be present.
"""
if flag_value is None:
flag = ''
flag_expression = 'not set'
elif flag_value == '0':
flag = '--%s=0' % LIST_TESTS_FLAG
flag_expression = '0'
else:
flag = '--%s' % LIST_TESTS_FLAG
flag_expression = '1'
args = [flag]
if other_flag is not None:
args += [other_flag]
output = Run(args)
if expected_output_re:
self.assert_(
expected_output_re.match(output),
('when %s is %s, the output of "%s" is "%s",\n'
'which does not match regex "%s"' %
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output,
expected_output_re.pattern)))
else:
self.assert_(
not EXPECTED_OUTPUT_NO_FILTER_RE.match(output),
('when %s is %s, the output of "%s" is "%s"' %
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output)))
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(flag_value=None,
expected_output_re=None,
other_flag=None)
def testFlag(self):
"""Tests using the --gtest_list_tests flag."""
self.RunAndVerify(flag_value='0',
expected_output_re=None,
other_flag=None)
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE,
other_flag=None)
def testOverrideNonFilterFlags(self):
"""Tests that --gtest_list_tests overrides the non-filter flags."""
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE,
other_flag='--gtest_break_on_failure')
def testWithFilterFlags(self):
"""Tests that --gtest_list_tests takes into account the
--gtest_filter flag."""
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_FILTER_FOO_RE,
other_flag='--gtest_filter=Foo*')
if __name__ == '__main__':
gtest_test_utils.Main() | # contributors may be used to endorse or promote products derived from |
options.py | from django.contrib.admin import ModelAdmin
from django.contrib.gis.admin.widgets import OpenLayersWidget
from django.contrib.gis.gdal import OGRGeomType
from django.contrib.gis.db import models
class | (ModelAdmin):
"""
The administration options class for Geographic models. Map settings
may be overloaded from their defaults to create custom maps.
"""
# The default map settings that may be overloaded -- still subject
# to API changes.
default_lon = 0
default_lat = 0
default_zoom = 4
display_wkt = False
display_srid = False
extra_js = []
num_zoom = 18
max_zoom = False
min_zoom = False
units = False
max_resolution = False
max_extent = False
modifiable = True
mouse_position = True
scale_text = True
layerswitcher = True
scrollable = True
map_width = 600
map_height = 400
map_srid = 4326
map_template = 'gis/admin/openlayers.html'
openlayers_url = 'http://openlayers.org/api/2.11/OpenLayers.js'
point_zoom = num_zoom - 6
wms_url = 'http://vmap0.tiles.osgeo.org/wms/vmap0'
wms_layer = 'basic'
wms_name = 'OpenLayers WMS'
debug = False
widget = OpenLayersWidget
@property
def media(self):
"Injects OpenLayers JavaScript into the admin."
media = super(GeoModelAdmin, self).media
media.add_js([self.openlayers_url])
media.add_js(self.extra_js)
return media
def formfield_for_dbfield(self, db_field, **kwargs):
"""
Overloaded from ModelAdmin so that an OpenLayersWidget is used
for viewing/editing GeometryFields.
"""
if isinstance(db_field, models.GeometryField):
request = kwargs.pop('request', None)
# Setting the widget with the newly defined widget.
kwargs['widget'] = self.get_map_widget(db_field)
return db_field.formfield(**kwargs)
else:
return super(GeoModelAdmin, self).formfield_for_dbfield(db_field, **kwargs)
def get_map_widget(self, db_field):
"""
Returns a subclass of the OpenLayersWidget (or whatever was specified
in the `widget` attribute) using the settings from the attributes set
in this class.
"""
is_collection = db_field.geom_type in ('MULTIPOINT', 'MULTILINESTRING', 'MULTIPOLYGON', 'GEOMETRYCOLLECTION')
if is_collection:
if db_field.geom_type == 'GEOMETRYCOLLECTION': collection_type = 'Any'
else: collection_type = OGRGeomType(db_field.geom_type.replace('MULTI', ''))
else:
collection_type = 'None'
class OLMap(self.widget):
template = self.map_template
geom_type = db_field.geom_type
params = {'default_lon' : self.default_lon,
'default_lat' : self.default_lat,
'default_zoom' : self.default_zoom,
'display_wkt' : self.debug or self.display_wkt,
'geom_type' : OGRGeomType(db_field.geom_type),
'field_name' : db_field.name,
'is_collection' : is_collection,
'scrollable' : self.scrollable,
'layerswitcher' : self.layerswitcher,
'collection_type' : collection_type,
'is_linestring' : db_field.geom_type in ('LINESTRING', 'MULTILINESTRING'),
'is_polygon' : db_field.geom_type in ('POLYGON', 'MULTIPOLYGON'),
'is_point' : db_field.geom_type in ('POINT', 'MULTIPOINT'),
'num_zoom' : self.num_zoom,
'max_zoom' : self.max_zoom,
'min_zoom' : self.min_zoom,
'units' : self.units, #likely shoud get from object
'max_resolution' : self.max_resolution,
'max_extent' : self.max_extent,
'modifiable' : self.modifiable,
'mouse_position' : self.mouse_position,
'scale_text' : self.scale_text,
'map_width' : self.map_width,
'map_height' : self.map_height,
'point_zoom' : self.point_zoom,
'srid' : self.map_srid,
'display_srid' : self.display_srid,
'wms_url' : self.wms_url,
'wms_layer' : self.wms_layer,
'wms_name' : self.wms_name,
'debug' : self.debug,
}
return OLMap
from django.contrib.gis import gdal
if gdal.HAS_GDAL:
# Use the official spherical mercator projection SRID on versions
# of GDAL that support it; otherwise, fallback to 900913.
if gdal.GDAL_VERSION >= (1, 7):
spherical_mercator_srid = 3857
else:
spherical_mercator_srid = 900913
class OSMGeoAdmin(GeoModelAdmin):
map_template = 'gis/admin/osm.html'
num_zoom = 20
map_srid = spherical_mercator_srid
max_extent = '-20037508,-20037508,20037508,20037508'
max_resolution = '156543.0339'
point_zoom = num_zoom - 6
units = 'm'
| GeoModelAdmin |
test.rs | //! Various helpers for Actix applications to use during testing.
use std::future::Future;
use actix_codec::Framed;
use actix_http::h1::Codec;
use actix_http::http::header::{Header, HeaderName, IntoHeaderValue};
use actix_http::http::{HttpTryFrom, Method, Uri, Version};
use actix_http::test::{TestBuffer, TestRequest as HttpTestRequest};
use actix_router::{Path, Url};
use crate::{FramedRequest, State};
/// Test `Request` builder.
pub struct TestRequest<S = ()> {
req: HttpTestRequest,
path: Path<Url>,
state: State<S>,
}
impl Default for TestRequest<()> {
fn default() -> TestRequest {
TestRequest {
req: HttpTestRequest::default(),
path: Path::new(Url::new(Uri::default())),
state: State::new(()),
}
}
}
impl TestRequest<()> {
/// Create TestRequest and set request uri
pub fn with_uri(path: &str) -> Self {
Self::get().uri(path)
}
/// Create TestRequest and set header
pub fn with_hdr<H: Header>(hdr: H) -> Self {
Self::default().set(hdr)
}
/// Create TestRequest and set header
pub fn with_header<K, V>(key: K, value: V) -> Self
where
HeaderName: HttpTryFrom<K>,
V: IntoHeaderValue,
{
Self::default().header(key, value)
}
/// Create TestRequest and set method to `Method::GET`
pub fn get() -> Self |
/// Create TestRequest and set method to `Method::POST`
pub fn post() -> Self {
Self::default().method(Method::POST)
}
}
impl<S> TestRequest<S> {
/// Create TestRequest and set request uri
pub fn with_state(state: S) -> TestRequest<S> {
let req = TestRequest::get();
TestRequest {
state: State::new(state),
req: req.req,
path: req.path,
}
}
/// Set HTTP version of this request
pub fn version(mut self, ver: Version) -> Self {
self.req.version(ver);
self
}
/// Set HTTP method of this request
pub fn method(mut self, meth: Method) -> Self {
self.req.method(meth);
self
}
/// Set HTTP Uri of this request
pub fn uri(mut self, path: &str) -> Self {
self.req.uri(path);
self
}
/// Set a header
pub fn set<H: Header>(mut self, hdr: H) -> Self {
self.req.set(hdr);
self
}
/// Set a header
pub fn header<K, V>(mut self, key: K, value: V) -> Self
where
HeaderName: HttpTryFrom<K>,
V: IntoHeaderValue,
{
self.req.header(key, value);
self
}
/// Set request path pattern parameter
pub fn param(mut self, name: &'static str, value: &'static str) -> Self {
self.path.add_static(name, value);
self
}
/// Complete request creation and generate `Request` instance
pub fn finish(mut self) -> FramedRequest<TestBuffer, S> {
let req = self.req.finish();
self.path.get_mut().update(req.uri());
let framed = Framed::new(TestBuffer::empty(), Codec::default());
FramedRequest::new(req, framed, self.path, self.state)
}
/// This method generates `FramedRequest` instance and executes async handler
pub async fn run<F, R, I, E>(self, f: F) -> Result<I, E>
where
F: FnOnce(FramedRequest<TestBuffer, S>) -> R,
R: Future<Output = Result<I, E>>,
{
f(self.finish()).await
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test() {
let req = TestRequest::with_uri("/index.html")
.header("x-test", "test")
.param("test", "123")
.finish();
assert_eq!(*req.state(), ());
assert_eq!(req.version(), Version::HTTP_11);
assert_eq!(req.method(), Method::GET);
assert_eq!(req.path(), "/index.html");
assert_eq!(req.query_string(), "");
assert_eq!(
req.headers().get("x-test").unwrap().to_str().unwrap(),
"test"
);
assert_eq!(&req.match_info()["test"], "123");
}
}
| {
Self::default().method(Method::GET)
} |
mustache_core.js | /*!
* CanJS - 2.3.27
* http://canjs.com/
* Copyright (c) 2016 Bitovi
* Thu, 15 Sep 2016 21:14:18 GMT
* Licensed MIT
*/
/*[email protected]#view/stache/mustache_core*/
define([
'can/util/library',
'can/view/utils',
'can/view/mustache_helpers',
'can/view/expression',
'can/view/live',
'can/elements',
'can/view/scope',
'can/view/node_lists'
], function (can, utils, mustacheHelpers, expression, live, elements, Scope, nodeLists) {
live = live || can.view.live;
elements = elements || can.view.elements;
Scope = Scope || can.view.Scope;
nodeLists = nodeLists || can.view.nodeLists;
var mustacheLineBreakRegExp = /(?:(?:^|(\r?)\n)(\s*)(\{\{([^\}]*)\}\}\}?)([^\S\n\r]*)($|\r?\n))|(\{\{([^\}]*)\}\}\}?)/g, getItemsFragContent = function (items, isObserveList, helperOptions, options) {
var frag = (can.document || can.global.document).createDocumentFragment();
for (var i = 0, len = items.length; i < len; i++) {
append(frag, helperOptions.fn(isObserveList ? items.attr('' + i) : items[i], options));
}
return frag;
}, append = function (frag, content) {
if (content) {
frag.appendChild(typeof content === 'string' ? frag.ownerDocument.createTextNode(content) : content);
}
}, getItemsStringContent = function (items, isObserveList, helperOptions, options) {
var txt = '';
for (var i = 0, len = items.length; i < len; i++) {
txt += helperOptions.fn(isObserveList ? items.attr('' + i) : items[i], options);
}
return txt;
}, k = function () {
};
var core = {
expression: expression,
makeEvaluator: function (scope, helperOptions, nodeList, mode, exprData, truthyRenderer, falseyRenderer, stringOnly) {
if (mode === '^') {
var temp = truthyRenderer;
truthyRenderer = falseyRenderer;
falseyRenderer = temp;
} | },
inverse: function () {
},
context: scope.attr('.'),
scope: scope,
nodeList: nodeList,
exprData: exprData,
helpersScope: helperOptions
};
utils.convertToScopes(helperOptionArg, scope, helperOptions, nodeList, truthyRenderer, falseyRenderer, stringOnly);
value = exprData.value(scope, helperOptions, helperOptionArg);
if (exprData.isHelper) {
return value;
}
} else {
var readOptions = {
isArgument: true,
args: [
scope.attr('.'),
scope
],
asCompute: true
};
var helperAndValue = exprData.helperAndValue(scope, helperOptions, readOptions, nodeList, truthyRenderer, falseyRenderer, stringOnly);
var helper = helperAndValue.helper;
value = helperAndValue.value;
if (helper) {
return exprData.evaluator(helper, scope, helperOptions, readOptions, nodeList, truthyRenderer, falseyRenderer, stringOnly);
}
}
if (!mode) {
if (value && value.isComputed) {
return value;
} else {
return function () {
return '' + (value != null ? value : '');
};
}
} else if (mode === '#' || mode === '^') {
helperOptionArg = {
fn: function () {
},
inverse: function () {
}
};
utils.convertToScopes(helperOptionArg, scope, helperOptions, nodeList, truthyRenderer, falseyRenderer, stringOnly);
return function () {
var finalValue;
if (can.isFunction(value) && value.isComputed) {
finalValue = value();
} else {
finalValue = value;
}
if (typeof finalValue === 'function') {
return finalValue;
} else if (utils.isArrayLike(finalValue)) {
var isObserveList = utils.isObserveLike(finalValue);
if (isObserveList ? finalValue.attr('length') : finalValue.length) {
return (stringOnly ? getItemsStringContent : getItemsFragContent)(finalValue, isObserveList, helperOptionArg, helperOptions);
} else {
return helperOptionArg.inverse(scope, helperOptions);
}
} else {
return finalValue ? helperOptionArg.fn(finalValue || scope, helperOptions) : helperOptionArg.inverse(scope, helperOptions);
}
};
} else {
}
},
makeLiveBindingPartialRenderer: function (partialName, state) {
partialName = can.trim(partialName);
return function (scope, options, parentSectionNodeList) {
var nodeList = [this];
nodeList.expression = '>' + partialName;
nodeLists.register(nodeList, null, parentSectionNodeList || true, state.directlyNested);
var partialFrag = can.compute(function () {
var localPartialName = partialName;
var partial = options.attr('partials.' + localPartialName), renderer;
if (partial) {
renderer = function () {
return partial.render ? partial.render(scope, options, nodeList) : partial(scope, options);
};
} else {
var scopePartialName = scope.read(localPartialName, { isArgument: true }).value;
if (scopePartialName === null || !scopePartialName && localPartialName[0] === '*') {
return can.frag('');
}
if (scopePartialName) {
localPartialName = scopePartialName;
}
renderer = function () {
return can.isFunction(localPartialName) ? localPartialName(scope, options, nodeList) : can.view.render(localPartialName, scope, options, nodeList);
};
}
var res = can.__notObserve(renderer)();
return can.frag(res);
});
partialFrag.computeInstance.setPrimaryDepth(nodeList.nesting);
live.html(this, partialFrag, this.parentNode, nodeList);
};
},
makeStringBranchRenderer: function (mode, expressionString) {
var exprData = core.expression.parse(expressionString), fullExpression = mode + expressionString;
if (!(exprData instanceof expression.Helper) && !(exprData instanceof expression.Call)) {
exprData = new expression.Helper(exprData, [], {});
}
return function branchRenderer(scope, options, truthyRenderer, falseyRenderer) {
var evaluator = scope.__cache[fullExpression];
if (mode || !evaluator) {
evaluator = makeEvaluator(scope, options, null, mode, exprData, truthyRenderer, falseyRenderer, true);
if (!mode) {
scope.__cache[fullExpression] = evaluator;
}
}
var res = evaluator();
return res == null ? '' : '' + res;
};
},
makeLiveBindingBranchRenderer: function (mode, expressionString, state) {
var exprData = core.expression.parse(expressionString);
if (!(exprData instanceof expression.Helper) && !(exprData instanceof expression.Call)) {
exprData = new expression.Helper(exprData, [], {});
}
return function branchRenderer(scope, options, parentSectionNodeList, truthyRenderer, falseyRenderer) {
var nodeList = [this];
nodeList.expression = expressionString;
nodeLists.register(nodeList, null, parentSectionNodeList || true, state.directlyNested);
var evaluator = makeEvaluator(scope, options, nodeList, mode, exprData, truthyRenderer, falseyRenderer, state.tag);
var gotCompute = evaluator.isComputed, compute;
if (gotCompute) {
compute = evaluator;
} else {
compute = can.compute(evaluator, null, false);
}
compute.computeInstance.setPrimaryDepth(nodeList.nesting);
compute.computeInstance.bind('change', k);
var value = compute();
if (typeof value === 'function') {
can.__notObserve(value)(this);
} else if (gotCompute || compute.computeInstance.hasDependencies) {
if (state.attr) {
live.simpleAttribute(this, state.attr, compute);
} else if (state.tag) {
live.attributes(this, compute);
} else if (state.text && typeof value !== 'object') {
live.text(this, compute, this.parentNode, nodeList);
} else {
live.html(this, compute, this.parentNode, nodeList);
}
} else {
if (state.attr) {
can.attr.set(this, state.attr, value);
} else if (state.tag) {
live.setAttributes(this, value);
} else if (state.text && typeof value === 'string') {
this.nodeValue = value;
} else if (value != null) {
elements.replace([this], can.frag(value, this.ownerDocument));
}
}
compute.computeInstance.unbind('change', k);
};
},
splitModeFromExpression: function (expression, state) {
expression = can.trim(expression);
var mode = expression.charAt(0);
if ('#/{&^>!'.indexOf(mode) >= 0) {
expression = can.trim(expression.substr(1));
} else {
mode = null;
}
if (mode === '{' && state.node) {
mode = null;
}
return {
mode: mode,
expression: expression
};
},
cleanLineEndings: function (template) {
return template.replace(mustacheLineBreakRegExp, function (whole, returnBefore, spaceBefore, special, expression, spaceAfter, returnAfter, spaceLessSpecial, spaceLessExpression, matchIndex) {
spaceAfter = spaceAfter || '';
returnBefore = returnBefore || '';
spaceBefore = spaceBefore || '';
var modeAndExpression = splitModeFromExpression(expression || spaceLessExpression, {});
if (spaceLessSpecial || '>{'.indexOf(modeAndExpression.mode) >= 0) {
return whole;
} else if ('^#!/'.indexOf(modeAndExpression.mode) >= 0) {
return special + (matchIndex !== 0 && returnAfter.length ? returnBefore + '\n' : '');
} else {
return spaceBefore + special + spaceAfter + (spaceBefore.length || matchIndex !== 0 ? returnBefore + '\n' : '');
}
});
},
Options: utils.Options
};
var makeEvaluator = core.makeEvaluator, splitModeFromExpression = core.splitModeFromExpression;
can.view.mustacheCore = core;
return core;
}); | var value, helperOptionArg;
if (exprData instanceof expression.Call) {
helperOptionArg = {
fn: function () { |
GeneratedPaginatedResponseOfPipelineJobStatus.ts | // tslint:disable
/**
* Copyright 2021 Splunk, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"): you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* Data Stream Processing REST API
* Use the Streams service to perform create, read, update, and delete (CRUD) operations on your data pipeline. The Streams service also has metrics and preview session endpoints and gives you full control over your data pipeline.
*
* OpenAPI spec version: v3beta1.1 (recommended default)
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* https://openapi-generator.tech
* Do not edit the class manually.
*/
import {
PipelineJobStatus,
} from './';
/**
*
* @export
* @interface PaginatedResponseOfPipelineJobStatus
*/
export interface PaginatedResponseOfPipelineJobStatus {
/**
*
* @type {Array<PipelineJobStatus>}
* @memberof PaginatedResponseOfPipelineJobStatus
*/
items?: Array<PipelineJobStatus>;
/**
*
* @type {number}
* @memberof PaginatedResponseOfPipelineJobStatus | } | */
total?: number;
|
when-file-with-no-error-changes.js | Input::
//// [/a/lib/lib.d.ts]
/// <reference no-default-lib="true"/>
interface Boolean {}
interface Function {}
interface CallableFunction {}
interface NewableFunction {}
interface IArguments {}
interface Number { toExponential: any; }
interface Object {}
interface RegExp {}
interface String { charAt: any; }
interface Array<T> { length: number; [n: number]: T; }
//// [/user/username/projects/solution/app/fileWithError.ts]
export var myClassWithError = class {
tags() { }
};
//// [/user/username/projects/solution/app/fileWithoutError.ts]
export class | { }
//// [/user/username/projects/solution/app/tsconfig.json]
{"compilerOptions":{"composite":true}}
/a/lib/tsc.js -b -w app
Output::
>> Screen clear
[[90m12:00:25 AM[0m] Starting compilation in watch mode...
[[90m12:00:38 AM[0m] Found 0 errors. Watching for file changes.
Program root files: ["/user/username/projects/solution/app/fileWithError.ts","/user/username/projects/solution/app/fileWithoutError.ts"]
Program options: {"composite":true,"watch":true,"configFilePath":"/user/username/projects/solution/app/tsconfig.json"}
Program structureReused: Not
Program files::
/a/lib/lib.d.ts
/user/username/projects/solution/app/fileWithError.ts
/user/username/projects/solution/app/fileWithoutError.ts
Semantic diagnostics in builder refreshed for::
/a/lib/lib.d.ts
/user/username/projects/solution/app/fileWithError.ts
/user/username/projects/solution/app/fileWithoutError.ts
Shape signatures in builder refreshed for::
/a/lib/lib.d.ts (used version)
/user/username/projects/solution/app/filewitherror.ts (used version)
/user/username/projects/solution/app/filewithouterror.ts (used version)
WatchedFiles::
/user/username/projects/solution/app/tsconfig.json:
{"fileName":"/user/username/projects/solution/app/tsconfig.json","pollingInterval":250}
/user/username/projects/solution/app/filewitherror.ts:
{"fileName":"/user/username/projects/solution/app/fileWithError.ts","pollingInterval":250}
/user/username/projects/solution/app/filewithouterror.ts:
{"fileName":"/user/username/projects/solution/app/fileWithoutError.ts","pollingInterval":250}
FsWatches::
FsWatchesRecursive::
/user/username/projects/solution/app:
{"directoryName":"/user/username/projects/solution/app","fallbackPollingInterval":500,"fallbackOptions":{"watchFile":"PriorityPollingInterval"}}
exitCode:: ExitStatus.undefined
//// [/user/username/projects/solution/app/fileWithError.js]
"use strict";
exports.__esModule = true;
exports.myClassWithError = void 0;
exports.myClassWithError = /** @class */ (function () {
function myClassWithError() {
}
myClassWithError.prototype.tags = function () { };
return myClassWithError;
}());
//// [/user/username/projects/solution/app/fileWithError.d.ts]
export declare var myClassWithError: {
new (): {
tags(): void;
};
};
//// [/user/username/projects/solution/app/fileWithoutError.js]
"use strict";
exports.__esModule = true;
exports.myClass = void 0;
var myClass = /** @class */ (function () {
function myClass() {
}
return myClass;
}());
exports.myClass = myClass;
//// [/user/username/projects/solution/app/fileWithoutError.d.ts]
export declare class myClass {
}
//// [/user/username/projects/solution/app/tsconfig.tsbuildinfo]
{"program":{"fileNames":["../../../../../a/lib/lib.d.ts","./filewitherror.ts","./filewithouterror.ts"],"fileInfos":[{"version":"-7698705165-/// <reference no-default-lib=\"true\"/>\ninterface Boolean {}\ninterface Function {}\ninterface CallableFunction {}\ninterface NewableFunction {}\ninterface IArguments {}\ninterface Number { toExponential: any; }\ninterface Object {}\ninterface RegExp {}\ninterface String { charAt: any; }\ninterface Array<T> { length: number; [n: number]: T; }","affectsGlobalScope":true},"-8106435186-export var myClassWithError = class {\n tags() { }\n \n };","-11785903855-export class myClass { }"],"options":{"composite":true},"referencedMap":[],"exportedModulesMap":[],"semanticDiagnosticsPerFile":[1,2,3]},"version":"FakeTSVersion"}
//// [/user/username/projects/solution/app/tsconfig.tsbuildinfo.readable.baseline.txt]
{
"program": {
"fileNames": [
"../../../../../a/lib/lib.d.ts",
"./filewitherror.ts",
"./filewithouterror.ts"
],
"fileInfos": {
"../../../../../a/lib/lib.d.ts": {
"version": "-7698705165-/// <reference no-default-lib=\"true\"/>\ninterface Boolean {}\ninterface Function {}\ninterface CallableFunction {}\ninterface NewableFunction {}\ninterface IArguments {}\ninterface Number { toExponential: any; }\ninterface Object {}\ninterface RegExp {}\ninterface String { charAt: any; }\ninterface Array<T> { length: number; [n: number]: T; }",
"signature": "-7698705165-/// <reference no-default-lib=\"true\"/>\ninterface Boolean {}\ninterface Function {}\ninterface CallableFunction {}\ninterface NewableFunction {}\ninterface IArguments {}\ninterface Number { toExponential: any; }\ninterface Object {}\ninterface RegExp {}\ninterface String { charAt: any; }\ninterface Array<T> { length: number; [n: number]: T; }",
"affectsGlobalScope": true
},
"./filewitherror.ts": {
"version": "-8106435186-export var myClassWithError = class {\n tags() { }\n \n };",
"signature": "-8106435186-export var myClassWithError = class {\n tags() { }\n \n };"
},
"./filewithouterror.ts": {
"version": "-11785903855-export class myClass { }",
"signature": "-11785903855-export class myClass { }"
}
},
"options": {
"composite": true
},
"referencedMap": {},
"exportedModulesMap": {},
"semanticDiagnosticsPerFile": [
"../../../../../a/lib/lib.d.ts",
"./filewitherror.ts",
"./filewithouterror.ts"
]
},
"version": "FakeTSVersion",
"size": 782
}
Change:: Introduce error
Input::
//// [/user/username/projects/solution/app/fileWithError.ts]
export var myClassWithError = class {
tags() { }
private p = 12
};
Output::
>> Screen clear
[[90m12:00:42 AM[0m] File change detected. Starting incremental compilation...
[96mapp/fileWithError.ts[0m:[93m1[0m:[93m12[0m - [91merror[0m[90m TS4094: [0mProperty 'p' of exported class expression may not be private or protected.
[7m1[0m export var myClassWithError = class {
[7m [0m [91m ~~~~~~~~~~~~~~~~[0m
[[90m12:00:43 AM[0m] Found 1 error. Watching for file changes.
Program root files: ["/user/username/projects/solution/app/fileWithError.ts","/user/username/projects/solution/app/fileWithoutError.ts"]
Program options: {"composite":true,"watch":true,"configFilePath":"/user/username/projects/solution/app/tsconfig.json"}
Program structureReused: Not
Program files::
/a/lib/lib.d.ts
/user/username/projects/solution/app/fileWithError.ts
/user/username/projects/solution/app/fileWithoutError.ts
Semantic diagnostics in builder refreshed for::
/user/username/projects/solution/app/fileWithError.ts
Shape signatures in builder refreshed for::
/user/username/projects/solution/app/filewitherror.ts (computed .d.ts)
WatchedFiles::
/user/username/projects/solution/app/tsconfig.json:
{"fileName":"/user/username/projects/solution/app/tsconfig.json","pollingInterval":250}
/user/username/projects/solution/app/filewitherror.ts:
{"fileName":"/user/username/projects/solution/app/fileWithError.ts","pollingInterval":250}
/user/username/projects/solution/app/filewithouterror.ts:
{"fileName":"/user/username/projects/solution/app/fileWithoutError.ts","pollingInterval":250}
FsWatches::
FsWatchesRecursive::
/user/username/projects/solution/app:
{"directoryName":"/user/username/projects/solution/app","fallbackPollingInterval":500,"fallbackOptions":{"watchFile":"PriorityPollingInterval"}}
exitCode:: ExitStatus.undefined
Change:: Change fileWithoutError
Input::
//// [/user/username/projects/solution/app/fileWithoutError.ts]
export class myClass2 { }
Output::
>> Screen clear
[[90m12:00:47 AM[0m] File change detected. Starting incremental compilation...
[96mapp/fileWithError.ts[0m:[93m1[0m:[93m12[0m - [91merror[0m[90m TS4094: [0mProperty 'p' of exported class expression may not be private or protected.
[7m1[0m export var myClassWithError = class {
[7m [0m [91m ~~~~~~~~~~~~~~~~[0m
[[90m12:00:48 AM[0m] Found 1 error. Watching for file changes.
Program root files: ["/user/username/projects/solution/app/fileWithError.ts","/user/username/projects/solution/app/fileWithoutError.ts"]
Program options: {"composite":true,"watch":true,"configFilePath":"/user/username/projects/solution/app/tsconfig.json"}
Program structureReused: Not
Program files::
/a/lib/lib.d.ts
/user/username/projects/solution/app/fileWithError.ts
/user/username/projects/solution/app/fileWithoutError.ts
Semantic diagnostics in builder refreshed for::
/user/username/projects/solution/app/fileWithoutError.ts
Shape signatures in builder refreshed for::
/user/username/projects/solution/app/filewithouterror.ts (computed .d.ts)
WatchedFiles::
/user/username/projects/solution/app/tsconfig.json:
{"fileName":"/user/username/projects/solution/app/tsconfig.json","pollingInterval":250}
/user/username/projects/solution/app/filewitherror.ts:
{"fileName":"/user/username/projects/solution/app/fileWithError.ts","pollingInterval":250}
/user/username/projects/solution/app/filewithouterror.ts:
{"fileName":"/user/username/projects/solution/app/fileWithoutError.ts","pollingInterval":250}
FsWatches::
FsWatchesRecursive::
/user/username/projects/solution/app:
{"directoryName":"/user/username/projects/solution/app","fallbackPollingInterval":500,"fallbackOptions":{"watchFile":"PriorityPollingInterval"}}
exitCode:: ExitStatus.undefined
| myClass |
delete.js | 'use strict'
const co = require('co')
const prompt = require('co-prompt')
const config = require('../templates.json')
const chalk = require('chalk')
const fs = require('fs')
const path = require('path')
module.exports = () => {
co(function* () {
const tplName = yield prompt(chalk.green('Please input will delete template name: ')) | delete config.tpl[tplName]
} else {
console.log(chalk.red('Template does not exist!'))
process.exit()
}
fs.writeFile(path.resolve(__dirname, '../templates.json'), JSON.stringify(config), 'utf-8', (err) => {
if (err) console.log(err)
console.log(chalk.green('Delete template success!'))
console.log(chalk.grey('The last template list is:'))
console.log(config)
process.exit()
})
})
} |
if (config.tpl[tplName]) { |
bitcoin_he.ts | <?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE TS>
<TS version="2.1" language="he">
<context>
<name>AboutDialog</name>
<message>
<location filename="../forms/aboutdialog.ui" line="+14"/>
<source>About Riocoin2</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+39"/>
<source><b>Riocoin2</b> version</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+41"/>
<source>Copyright © 2009-2014 The Bitcoin developers
Copyright © 2012-2014 The NovaCoin developers
Copyright © 2014 The BlackCoin developers
Copyright © 2014 The ShadowCoin developers
Copyright © 2014-2015 The Riocoin2 developers</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+17"/>
<source>
This is experimental software.
Distributed under the MIT/X11 software license, see the accompanying file COPYING or <a href="http://www.opensource.org/licenses/mit-license.php">http://www.opensource.org/licenses/mit-license.php</a>.
This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (<a href="https://www.openssl.org/">https://www.openssl.org/</a>) and cryptographic software written by Eric Young (<a href="mailto:[email protected]">[email protected]</a>) and UPnP software written by Thomas Bernard.</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>
This is experimental software.
Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php.
This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</source>
<translation type="vanished">
זוהי תוכנה ניסיונית.
מופצת תחת רישיון התוכנה MIT/X11, ראה את הקובץ המצורף COPYING או http://www.opensource.org/licenses/mit-license.php.
המוצר הזה כולל תוכנה שפותחה ע"י פרויקט OpenSSL לשימוש בתיבת הכלים OpenSSL (http://www.openssl.org/) ותוכנה קריפטוגרפית שנכתבה ע"י אריק יאנג ([email protected]) ותוכנת UPnP שנכתבה ע"י תומס ברנרד.</translation>
</message>
</context>
<context>
<name>AddressBookPage</name>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>Address Book</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+22"/>
<source>Double-click to edit address or label</source>
<translation>לחץ לחיצה כפולה לערוך כתובת או תוית</translation>
</message>
<message>
<location line="+27"/>
<source>Create a new address</source>
<translation>יצירת כתובת חדשה</translation>
</message>
<message>
<location line="+14"/>
<source>Copy the currently selected address to the system clipboard</source>
<translation>העתק את הכתובת המסומנת ללוח העריכה</translation>
</message>
<message>
<location line="-11"/>
<source>&New Address</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-46"/>
<source>These are your Riocoin2 addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+60"/>
<source>&Copy Address</source>
<translation>העתק כתובת</translation>
</message>
<message>
<location line="+11"/>
<source>Show &QR Code</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+11"/>
<source>Sign a message to prove you own a Riocoin2 address</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+25"/>
<source>Delete the currently selected address from the list</source>
<translation>מחק את הכתובת שנבחרה מהרשימה</translation>
</message>
<message>
<location line="-14"/>
<source>Verify a message to ensure it was signed with a specified Riocoin2 address</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>&Verify Message</source>
<translation type="unfinished">אמת הודעה</translation>
</message>
<message>
<location line="+14"/>
<source>&Delete</source>
<translation>&מחק</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="+65"/>
<source>Copy &Label</source>
<translation>העתק תוית</translation>
</message>
<message>
<location line="+2"/>
<source>&Edit</source>
<translation>עריכה</translation>
</message>
<message>
<location line="+250"/>
<source>Export Address Book Data</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>קובץ מופרד בפסיקים (*.csv)</translation>
</message>
<message>
<location line="+13"/>
<source>Error exporting</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>AddressTableModel</name>
<message>
<location filename="../addresstablemodel.cpp" line="+156"/>
<source>Label</source>
<translation>תוית</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>כתובת</translation>
</message>
<message>
<location line="+36"/>
<source>(no label)</source>
<translation>(ללא תוית)</translation>
</message>
</context>
<context>
<name>AskPassphraseDialog</name>
<message>
<location filename="../forms/askpassphrasedialog.ui" line="+26"/>
<source>Passphrase Dialog</source>
<translation>שיח סיסמא</translation>
</message>
<message>
<location line="+21"/>
<source>Enter passphrase</source>
<translation>הכנס סיסמה</translation>
</message>
<message>
<location line="+14"/>
<source>New passphrase</source>
<translation>סיסמה חדשה</translation>
</message>
<message>
<location line="+14"/>
<source>Repeat new passphrase</source>
<translation>חזור על הסיסמה החדשה</translation>
</message>
<message>
<location line="+33"/>
<source>Serves to disable the trivial sendmoney when OS account compromised. Provides no real security.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>For staking only</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="+37"/>
<source>Enter the new passphrase to the wallet.<br/>Please use a passphrase of <b>10 or more random characters</b>, or <b>eight or more words</b>.</source>
<translation>הכנס את הסיסמה החדשה לארנק. <br/>אנא השתמש בסיסמה המכילה <b>10 תוים אקראיים או יותר</b>, או <b>שמונה מילים או יותר</b>.</translation>
</message>
<message>
<location line="+1"/>
<source>Encrypt wallet</source>
<translation>הצפן ארנק</translation>
</message>
<message>
<location line="+7"/>
<source>This operation needs your wallet passphrase to unlock the wallet.</source>
<translation>הפעולה הזו דורשת את סיסמת הארנק שלך בשביל לפתוח את הארנק.</translation>
</message>
<message>
<location line="+5"/>
<source>Unlock wallet</source>
<translation>פתיחת ארנק</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to decrypt the wallet.</source>
<translation>הפעולה הזו דורשת את סיסמת הארנק שלך בשביל לפענח את הארנק.</translation>
</message>
<message>
<location line="+5"/>
<source>Decrypt wallet</source>
<translation>פענוח ארנק</translation>
</message>
<message>
<location line="+3"/>
<source>Change passphrase</source>
<translation>שינוי סיסמה</translation>
</message>
<message>
<location line="+1"/>
<source>Enter the old and new passphrase to the wallet.</source>
<translation>הכנס את הסיסמות הישנה והחדשה לארנק.</translation>
</message>
<message>
<location line="+45"/>
<source>Confirm wallet encryption</source>
<translation>אשר הצפנת ארנק</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: If you encrypt your wallet and lose your passphrase, you will <b>LOSE ALL OF YOUR COINS</b>!</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+0"/>
<source>Are you sure you wish to encrypt your wallet?</source>
<translation>האם אתה בטוח שברצונך להצפין את הארנק?</translation>
</message>
<message>
<location line="+15"/>
<source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source>
<translation>חשוב! כל גיבוי קודם שעשית לארנק שלך יש להחליף עם קובץ הארנק המוצפן שזה עתה נוצר. מסיבות אבטחה, גיבויים קודמים של קובץ הארנק הלא-מוצפן יהפכו לחסרי שימוש ברגע שתתחיל להשתמש בארנק החדש המוצפן.</translation>
</message>
<message>
<location line="+103"/>
<location line="+24"/>
<source>Warning: The Caps Lock key is on!</source>
<translation>זהירות: מקש Caps Lock מופעל!</translation>
</message>
<message>
<location line="-133"/>
<location line="+60"/>
<source>Wallet encrypted</source>
<translation>הארנק הוצפן</translation>
</message>
<message>
<location line="-58"/>
<source>Riocoin2 will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your coins from being stolen by malware infecting your computer.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+13"/>
<location line="+7"/>
<location line="+44"/>
<location line="+6"/>
<source>Wallet encryption failed</source>
<translation>הצפנת הארנק נכשלה</translation>
</message>
<message>
<location line="-56"/>
<source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source>
<translation>הצפנת הארנק נכשלה עקב שגיאה פנימית. הארנק שלך לא הוצפן.</translation>
</message>
<message>
<location line="+7"/>
<location line="+50"/>
<source>The supplied passphrases do not match.</source>
<translation>הסיסמות שניתנו אינן תואמות.</translation>
</message>
<message>
<location line="-38"/>
<source>Wallet unlock failed</source>
<translation>פתיחת הארנק נכשלה</translation>
</message>
<message>
<location line="+1"/>
<location line="+12"/>
<location line="+19"/>
<source>The passphrase entered for the wallet decryption was incorrect.</source>
<translation>הסיסמה שהוכנסה לפענוח הארנק שגויה.</translation>
</message>
<message>
<location line="-20"/>
<source>Wallet decryption failed</source>
<translation>פענוח הארנק נכשל</translation>
</message>
<message>
<location line="+14"/>
<source>Wallet passphrase was successfully changed.</source>
<translation>סיסמת הארנק שונתה בהצלחה.</translation>
</message>
</context>
<context>
<name>BitcoinGUI</name>
<message>
<location filename="../bitcoingui.cpp" line="+344"/>
<source>Sign &message...</source>
<translation>חתום על הודעה</translation>
</message>
<message>
<location line="+240"/>
<source>Synchronizing with network...</source>
<translation>מסתנכרן עם הרשת...</translation>
</message>
<message>
<location line="-306"/>
<source>&Overview</source>
<translation>&סקירה</translation>
</message>
<message>
<location line="+1"/>
<source>Show general overview of wallet</source>
<translation>הצג סקירה כללית של הארנק</translation>
</message>
<message>
<location line="+17"/>
<source>&Transactions</source>
<translation>&פעולות</translation>
</message>
<message>
<location line="+1"/>
<source>Browse transaction history</source>
<translation>דפדף בהיסטוריית הפעולות</translation>
</message>
<message>
<location line="+5"/>
<source>&Address Book</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Edit the list of stored addresses and labels</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-13"/>
<source>&Receive coins</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Show the list of addresses for receiving payments</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-7"/>
<source>&Send coins</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+35"/>
<source>E&xit</source>
<translation>י&ציאה</translation>
</message>
<message>
<location line="+1"/>
<source>Quit application</source>
<translation>סגור תוכנה</translation>
</message>
<message>
<location line="+4"/>
<source>Show information about Riocoin2</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+2"/>
<source>About &Qt</source>
<translation>אודות Qt</translation>
</message>
<message> | <translation>הצג מידע על Qt</translation>
</message>
<message>
<location line="+2"/>
<source>&Options...</source>
<translation>&אפשרויות</translation>
</message>
<message>
<location line="+4"/>
<source>&Encrypt Wallet...</source>
<translation>הצפן ארנק</translation>
</message>
<message>
<location line="+3"/>
<source>&Backup Wallet...</source>
<translation>גיבוי ארנק</translation>
</message>
<message>
<location line="+2"/>
<source>&Change Passphrase...</source>
<translation>שנה סיסמא</translation>
</message>
<message numerus="yes">
<location line="+248"/>
<source>~%n block(s) remaining</source>
<translation type="unfinished">
<numerusform></numerusform>
<numerusform></numerusform>
</translation>
</message>
<message>
<location line="+6"/>
<source>Downloaded %1 of %2 blocks of transaction history (%3% done).</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-307"/>
<source>Send coins to a Riocoin2 address</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+45"/>
<source>Modify configuration options for Riocoin2</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+4"/>
<source>Encrypt or decrypt wallet</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>Backup wallet to another location</source>
<translation>גיבוי הארנק למקום אחר</translation>
</message>
<message>
<location line="+2"/>
<source>Change the passphrase used for wallet encryption</source>
<translation>שנה את הסיסמה להצפנת הארנק</translation>
</message>
<message>
<location line="+7"/>
<source>&Debug window</source>
<translation>חלון ניפוי</translation>
</message>
<message>
<location line="+1"/>
<source>Open debugging and diagnostic console</source>
<translation>פתח את לוח הבקרה לאבחון וניפוי</translation>
</message>
<message>
<location line="-2"/>
<source>&Verify message...</source>
<translation>אמת הודעה...</translation>
</message>
<message>
<location line="-214"/>
<source>Riocoin2</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+0"/>
<source>Wallet</source>
<translation>ארנק</translation>
</message>
<message>
<location line="+192"/>
<source>&About Riocoin2</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+9"/>
<source>&Show / Hide</source>
<translation>הצג / הסתר</translation>
</message>
<message>
<location line="+9"/>
<source>Unlock wallet</source>
<translation type="unfinished">פתיחת ארנק</translation>
</message>
<message>
<location line="+1"/>
<source>&Lock Wallet</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Lock wallet</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+31"/>
<source>&File</source>
<translation>&קובץ</translation>
</message>
<message>
<location line="+7"/>
<source>&Settings</source>
<translation>ה&גדרות</translation>
</message>
<message>
<location line="+8"/>
<source>&Help</source>
<translation>&עזרה</translation>
</message>
<message>
<location line="+10"/>
<source>Tabs toolbar</source>
<translation>סרגל כלים טאבים</translation>
</message>
<message>
<location line="+22"/>
<location line="+9"/>
<source>[testnet]</source>
<translation>[רשת-בדיקה]</translation>
</message>
<message>
<location line="+0"/>
<location line="+60"/>
<source>Riocoin2 client</source>
<translation type="unfinished"></translation>
</message>
<message numerus="yes">
<location line="+70"/>
<source>%n active connection(s) to Riocoin2 network</source>
<translation type="unfinished">
<numerusform></numerusform>
<numerusform></numerusform>
</translation>
</message>
<message>
<location line="+40"/>
<source>Downloaded %1 blocks of transaction history.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+430"/>
<source>Staking.<br>Your weight is %1<br>Network weight is %2<br>Expected time to earn reward is %3</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+6"/>
<source>Not staking because wallet is locked</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+2"/>
<source>Not staking because wallet is offline</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+2"/>
<source>Not staking because wallet is syncing</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+2"/>
<source>Not staking because you don't have mature coins</source>
<translation type="unfinished"></translation>
</message>
<message numerus="yes">
<location line="-420"/>
<source>%n second(s) ago</source>
<translation type="unfinished">
<numerusform></numerusform>
<numerusform></numerusform>
</translation>
</message>
<message>
<location line="-282"/>
<source>&Unlock Wallet...</source>
<translation type="unfinished"></translation>
</message>
<message numerus="yes">
<location line="+286"/>
<source>%n minute(s) ago</source>
<translation type="unfinished">
<numerusform></numerusform>
<numerusform></numerusform>
</translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n hour(s) ago</source>
<translation type="unfinished">
<numerusform></numerusform>
<numerusform></numerusform>
</translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n day(s) ago</source>
<translation type="unfinished">
<numerusform></numerusform>
<numerusform></numerusform>
</translation>
</message>
<message>
<location line="+6"/>
<source>Up to date</source>
<translation>עדכני</translation>
</message>
<message>
<location line="+7"/>
<source>Catching up...</source>
<translation>מתעדכן...</translation>
</message>
<message>
<location line="+10"/>
<source>Last received block was generated %1.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+59"/>
<source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+5"/>
<source>Confirm transaction fee</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+27"/>
<source>Sent transaction</source>
<translation>פעולה שנשלחה</translation>
</message>
<message>
<location line="+1"/>
<source>Incoming transaction</source>
<translation>פעולה שהתקבלה</translation>
</message>
<message>
<location line="+1"/>
<source>Date: %1
Amount: %2
Type: %3
Address: %4
</source>
<translation>תאריך: %1
כמות: %2
סוג: %3
כתובת: %4</translation>
</message>
<message>
<location line="+87"/>
<location line="+15"/>
<source>URI handling</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-15"/>
<location line="+15"/>
<source>URI can not be parsed! This can be caused by an invalid Riocoin2 address or malformed URI parameters.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+30"/>
<source>Wallet is <b>encrypted</b> and currently <b>unlocked</b></source>
<translation>הארנק <b>מוצפן</b> וכרגע <b>פתוח</b></translation>
</message>
<message>
<location line="+13"/>
<source>Wallet is <b>encrypted</b> and currently <b>locked</b></source>
<translation>הארנק <b>מוצפן</b> וכרגע <b>נעול</b></translation>
</message>
<message>
<location line="+25"/>
<source>Backup Wallet</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+0"/>
<source>Wallet Data (*.dat)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>Backup Failed</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+0"/>
<source>There was an error trying to save the wallet data to the new location.</source>
<translation type="unfinished"></translation>
</message>
<message numerus="yes">
<location line="+91"/>
<source>%n second(s)</source>
<translation type="unfinished">
<numerusform></numerusform>
<numerusform></numerusform>
</translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n minute(s)</source>
<translation type="unfinished">
<numerusform></numerusform>
<numerusform></numerusform>
</translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n hour(s)</source>
<translation>
<numerusform>%n שעה</numerusform>
<numerusform>%n שעות</numerusform>
</translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n day(s)</source>
<translation>
<numerusform>%n יום</numerusform>
<numerusform>%n ימים</numerusform>
</translation>
</message>
<message>
<location line="+18"/>
<source>Not staking</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoin.cpp" line="+114"/>
<source>A fatal error occurred. Riocoin2 can no longer continue safely and will quit.</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>ClientModel</name>
<message>
<location filename="../clientmodel.cpp" line="+109"/>
<source>Network Alert</source>
<translation>אזעקת רשת</translation>
</message>
</context>
<context>
<name>CoinControlDialog</name>
<message>
<location filename="../forms/coincontroldialog.ui" line="+14"/>
<source>Coin Control</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+31"/>
<source>Quantity:</source>
<translation>כמות:</translation>
</message>
<message>
<location line="+32"/>
<source>Bytes:</source>
<translation>בייטים:</translation>
</message>
<message>
<location line="+48"/>
<source>Amount:</source>
<translation>כמות:</translation>
</message>
<message>
<location line="+32"/>
<source>Priority:</source>
<translation>קדימות:</translation>
</message>
<message>
<location line="+48"/>
<source>Fee:</source>
<translation>תשלום:</translation>
</message>
<message>
<location line="+35"/>
<source>Low Output:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../coincontroldialog.cpp" line="+551"/>
<source>no</source>
<translation>לא</translation>
</message>
<message>
<location filename="../forms/coincontroldialog.ui" line="+51"/>
<source>After Fee:</source>
<translation>לאחר עמלה:</translation>
</message>
<message>
<location line="+35"/>
<source>Change:</source>
<translation>שינוי:</translation>
</message>
<message>
<location line="+69"/>
<source>(un)select all</source>
<translation>(מחק)(בחר) הכל</translation>
</message>
<message>
<location line="+13"/>
<source>Tree mode</source>
<translation>מצב עץ</translation>
</message>
<message>
<location line="+16"/>
<source>List mode</source>
<translation>מצר רשימה</translation>
</message>
<message>
<location line="+45"/>
<source>Amount</source>
<translation>כמות</translation>
</message>
<message>
<location line="+5"/>
<source>Label</source>
<translation type="unfinished">תוית</translation>
</message>
<message>
<location line="+5"/>
<source>Address</source>
<translation>כתובת</translation>
</message>
<message>
<location line="+5"/>
<source>Date</source>
<translation>תאריך</translation>
</message>
<message>
<location line="+5"/>
<source>Confirmations</source>
<translation>אישורים</translation>
</message>
<message>
<location line="+3"/>
<source>Confirmed</source>
<translation>מאושר</translation>
</message>
<message>
<location line="+5"/>
<source>Priority</source>
<translation>קדימות</translation>
</message>
<message>
<location filename="../coincontroldialog.cpp" line="-515"/>
<source>Copy address</source>
<translation>העתק כתובת</translation>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation>העתק תוית</translation>
</message>
<message>
<location line="+1"/>
<location line="+26"/>
<source>Copy amount</source>
<translation>העתק כמות</translation>
</message>
<message>
<location line="-25"/>
<source>Copy transaction ID</source>
<translation>העתק מזהה פעולה</translation>
</message>
<message>
<location line="+24"/>
<source>Copy quantity</source>
<translation>העתק כמות</translation>
</message>
<message>
<location line="+2"/>
<source>Copy fee</source>
<translation>העתק מחיר</translation>
</message>
<message>
<location line="+1"/>
<source>Copy after fee</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Copy bytes</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Copy priority</source>
<translation>העתק קדימות</translation>
</message>
<message>
<location line="+1"/>
<source>Copy low output</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Copy change</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+317"/>
<source>highest</source>
<translation>הכי גבוה</translation>
</message>
<message>
<location line="+1"/>
<source>high</source>
<translation>גבוה</translation>
</message>
<message>
<location line="+1"/>
<source>medium-high</source>
<translation>בנוני גבוה</translation>
</message>
<message>
<location line="+1"/>
<source>medium</source>
<translation>בינוני</translation>
</message>
<message>
<location line="+4"/>
<source>low-medium</source>
<translation>בינוני - נמוך</translation>
</message>
<message>
<location line="+1"/>
<source>low</source>
<translation>נמוך</translation>
</message>
<message>
<location line="+1"/>
<source>lowest</source>
<translation>הכי נמוך</translation>
</message>
<message>
<location line="+155"/>
<source>DUST</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+0"/>
<source>yes</source>
<translation>כן</translation>
</message>
<message>
<location line="+10"/>
<source>This label turns red, if the transaction size is bigger than 10000 bytes.
This means a fee of at least %1 per kb is required.
Can vary +/- 1 Byte per input.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Transactions with higher priority get more likely into a block.
This label turns red, if the priority is smaller than "medium".
This means a fee of at least %1 per kb is required.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>This label turns red, if any recipient receives an amount smaller than %1.
This means a fee of at least %2 is required.
Amounts below 0.546 times the minimum relay fee are shown as DUST.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>This label turns red, if the change is smaller than %1.
This means a fee of at least %2 is required.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+37"/>
<location line="+66"/>
<source>(no label)</source>
<translation>(ללא תוית)</translation>
</message>
<message>
<location line="-9"/>
<source>change from %1 (%2)</source>
<translation>עודף מ־%1 (%2)</translation>
</message>
<message>
<location line="+1"/>
<source>(change)</source>
<translation>(עודף)</translation>
</message>
</context>
<context>
<name>EditAddressDialog</name>
<message>
<location filename="../forms/editaddressdialog.ui" line="+14"/>
<source>Edit Address</source>
<translation>ערוך כתובת</translation>
</message>
<message>
<location line="+11"/>
<source>&Label</source>
<translation>ת&וית</translation>
</message>
<message>
<location line="+10"/>
<source>The label associated with this address book entry</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+7"/>
<source>&Address</source>
<translation>&כתובת</translation>
</message>
<message>
<location line="+10"/>
<source>The address associated with this address book entry. This can only be modified for sending addresses.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+7"/>
<source>&Stealth Address</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="+20"/>
<source>New receiving address</source>
<translation>כתובת חדשה לקבלה</translation>
</message>
<message>
<location line="+7"/>
<source>New sending address</source>
<translation>כתובת חדשה לשליחה</translation>
</message>
<message>
<location line="+4"/>
<source>Edit receiving address</source>
<translation>ערוך כתובת לקבלה</translation>
</message>
<message>
<location line="+7"/>
<source>Edit sending address</source>
<translation>ערוך כתובת לשליחה</translation>
</message>
<message>
<location line="+82"/>
<source>The entered address "%1" is already in the address book.</source>
<translation>הכתובת שהכנסת "%1" כבר נמצאת בפנקס הכתובות.</translation>
</message>
<message>
<location line="-5"/>
<source>The entered address "%1" is not a valid Riocoin2 address.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+10"/>
<source>Could not unlock wallet.</source>
<translation>פתיחת הארנק נכשלה.</translation>
</message>
<message>
<location line="+5"/>
<source>New key generation failed.</source>
<translation>יצירת מפתח חדש נכשלה.</translation>
</message>
</context>
<context>
<name>GUIUtil::HelpMessageBox</name>
<message>
<location filename="../guiutil.cpp" line="+424"/>
<location line="+12"/>
<source>Riocoin2-Qt</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-12"/>
<source>version</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+2"/>
<source>Usage:</source>
<translation type="unfinished">שימוש:</translation>
</message>
<message>
<location line="+1"/>
<source>command-line options</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+4"/>
<source>UI options</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Set language, for example "de_DE" (default: system locale)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Start minimized</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Show splash screen on startup (default: 1)</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>OptionsDialog</name>
<message>
<location filename="../forms/optionsdialog.ui" line="+14"/>
<source>Options</source>
<translation>אפשרויות</translation>
</message>
<message>
<location line="+16"/>
<source>&Main</source>
<translation>ראשי</translation>
</message>
<message>
<location line="+6"/>
<source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB. Fee 0.01 recommended.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+15"/>
<source>Pay transaction &fee</source>
<translation>שלם &עמלת פעולה</translation>
</message>
<message>
<location line="+31"/>
<source>Reserved amount does not participate in staking and is therefore spendable at any time.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+15"/>
<source>Reserve</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+31"/>
<source>Automatically start Riocoin2 after logging in to the system.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>&Start Riocoin2 on system login</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+21"/>
<source>&Network</source>
<translation>רשת</translation>
</message>
<message>
<location line="+6"/>
<source>Automatically open the Riocoin2 client port on the router. This only works when your router supports UPnP and it is enabled.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>Map port using &UPnP</source>
<translation>מיפוי פורט באמצעות UPnP</translation>
</message>
<message>
<location line="+7"/>
<source>Connect to the Riocoin2 network through a SOCKS proxy (e.g. when connecting through Tor).</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>&Connect through SOCKS proxy:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+9"/>
<source>Proxy &IP:</source>
<translation>כתובת IP של פרוקסי:</translation>
</message>
<message>
<location line="+19"/>
<source>IP address of the proxy (e.g. 127.0.0.1)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+7"/>
<source>&Port:</source>
<translation>פורט:</translation>
</message>
<message>
<location line="+19"/>
<source>Port of the proxy (e.g. 9050)</source>
<translation>הפורט של הפרוקסי (למשל 9050)</translation>
</message>
<message>
<location line="+7"/>
<source>SOCKS &Version:</source>
<translation>גרסת SOCKS:</translation>
</message>
<message>
<location line="+13"/>
<source>SOCKS version of the proxy (e.g. 5)</source>
<translation>גרסת SOCKS של הפרוקסי (למשל 5)</translation>
</message>
<message>
<location line="+36"/>
<source>&Window</source>
<translation>חלון</translation>
</message>
<message>
<location line="+6"/>
<source>Show only a tray icon after minimizing the window.</source>
<translation>הצג סמל מגש בלבד לאחר מזעור החלון.</translation>
</message>
<message>
<location line="+3"/>
<source>&Minimize to the tray instead of the taskbar</source>
<translation>מ&זער למגש במקום לשורת המשימות</translation>
</message>
<message>
<location line="+7"/>
<source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source>
<translation>מזער את התוכנה במקום לצאת ממנה כשהחלון נסגר. כשאפשרות זו פעילה, התוכנה תיסגר רק לאחר בחירת יציאה מהתפריט.</translation>
</message>
<message>
<location line="+3"/>
<source>M&inimize on close</source>
<translation>מזער בעת סגירה</translation>
</message>
<message>
<location line="+21"/>
<source>&Display</source>
<translation>תצוגה</translation>
</message>
<message>
<location line="+8"/>
<source>User Interface &language:</source>
<translation>שפת ממשק המשתמש:</translation>
</message>
<message>
<location line="+13"/>
<source>The user interface language can be set here. This setting will take effect after restarting Riocoin2.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+11"/>
<source>&Unit to show amounts in:</source>
<translation>יחידת מדידה להצגת כמויות:</translation>
</message>
<message>
<location line="+13"/>
<source>Choose the default subdivision unit to show in the interface and when sending coins.</source>
<translation>בחר את ברירת המחדל ליחידת החלוקה אשר תוצג בממשק ובעת שליחת מטבעות.</translation>
</message>
<message>
<source>&Display addresses in transaction list</source>
<translation type="vanished">הצג כתובות ברשימת הפעולות</translation>
</message>
<message>
<location line="+9"/>
<source>Whether to show coin control features or not.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>Display coin &control features (experts only!)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+71"/>
<source>&OK</source>
<translation>אישור</translation>
</message>
<message>
<location line="+7"/>
<source>&Cancel</source>
<translation>ביטול</translation>
</message>
<message>
<location line="+10"/>
<source>&Apply</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="+55"/>
<source>default</source>
<translation>ברירת מחדל</translation>
</message>
<message>
<location line="+147"/>
<location line="+9"/>
<source>Warning</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-9"/>
<location line="+9"/>
<source>This setting will take effect after restarting Riocoin2.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+29"/>
<source>The supplied proxy address is invalid.</source>
<translation>כתובת הפרוקסי שסופקה אינה תקינה.</translation>
</message>
</context>
<context>
<name>OverviewPage</name>
<message>
<location filename="../forms/overviewpage.ui" line="+14"/>
<source>Form</source>
<translation>טופס</translation>
</message>
<message>
<location line="+33"/>
<location line="+231"/>
<source>The displayed information may be out of date. Your wallet automatically synchronizes with the Riocoin2 network after a connection is established, but this process has not completed yet.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-160"/>
<source>Stake:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+29"/>
<source>Unconfirmed:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-107"/>
<source>Wallet</source>
<translation>ארנק</translation>
</message>
<message>
<location line="+49"/>
<source>Spendable:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+16"/>
<source>Your current spendable balance</source>
<translation>היתרה הזמינה הנוכחית</translation>
</message>
<message>
<location line="+71"/>
<source>Immature:</source>
<translation>לא בשל:</translation>
</message>
<message>
<location line="+13"/>
<source>Mined balance that has not yet matured</source>
<translation>מאזן שנכרה וטרם הבשיל</translation>
</message>
<message>
<location line="+20"/>
<source>Total:</source>
<translation>סך הכול:</translation>
</message>
<message>
<location line="+16"/>
<source>Your current total balance</source>
<translation>סך כל היתרה הנוכחית שלך</translation>
</message>
<message>
<location line="+46"/>
<source><b>Recent transactions</b></source>
<translation><b>פעולות אחרונות</b></translation>
</message>
<message>
<location line="-108"/>
<source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-29"/>
<source>Total of coins that was staked, and do not yet count toward the current balance</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../overviewpage.cpp" line="+113"/>
<location line="+1"/>
<source>out of sync</source>
<translation>לא מסונכרן</translation>
</message>
</context>
<context>
<name>QRCodeDialog</name>
<message>
<location filename="../forms/qrcodedialog.ui" line="+14"/>
<source>QR Code Dialog</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+59"/>
<source>Request Payment</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+56"/>
<source>Amount:</source>
<translation type="unfinished">כמות:</translation>
</message>
<message>
<location line="-44"/>
<source>Label:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+19"/>
<source>Message:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+71"/>
<source>&Save As...</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../qrcodedialog.cpp" line="+62"/>
<source>Error encoding URI into QR Code.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+40"/>
<source>The entered amount is invalid, please check.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+23"/>
<source>Resulting URI too long, try to reduce the text for label / message.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+25"/>
<source>Save QR Code</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+0"/>
<source>PNG Images (*.png)</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>RPCConsole</name>
<message>
<location filename="../forms/rpcconsole.ui" line="+46"/>
<source>Client name</source>
<translation>שם ממשק</translation>
</message>
<message>
<location line="+10"/>
<location line="+23"/>
<location line="+26"/>
<location line="+23"/>
<location line="+23"/>
<location line="+36"/>
<location line="+53"/>
<location line="+23"/>
<location line="+23"/>
<location filename="../rpcconsole.cpp" line="+352"/>
<source>N/A</source>
<translation>N/A</translation>
</message>
<message>
<location line="-217"/>
<source>Client version</source>
<translation>גרסת ממשק</translation>
</message>
<message>
<location line="-45"/>
<source>&Information</source>
<translation>מידע</translation>
</message>
<message>
<location line="+68"/>
<source>Using OpenSSL version</source>
<translation>משתמש ב-OpenSSL גרסה</translation>
</message>
<message>
<location line="+49"/>
<source>Startup time</source>
<translation>זמן אתחול</translation>
</message>
<message>
<location line="+29"/>
<source>Network</source>
<translation>רשת</translation>
</message>
<message>
<location line="+7"/>
<source>Number of connections</source>
<translation>מספר חיבורים</translation>
</message>
<message>
<location line="+23"/>
<source>On testnet</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+23"/>
<source>Block chain</source>
<translation>שרשרת הבלוקים</translation>
</message>
<message>
<location line="+7"/>
<source>Current number of blocks</source>
<translation>מספר הבלוקים הנוכחי</translation>
</message>
<message>
<location line="+23"/>
<source>Estimated total blocks</source>
<translation>מספר כולל משוער של בלוקים</translation>
</message>
<message>
<location line="+23"/>
<source>Last block time</source>
<translation>זמן הבלוק האחרון</translation>
</message>
<message>
<location line="+52"/>
<source>&Open</source>
<translation>פתח</translation>
</message>
<message>
<location line="+16"/>
<source>Command-line options</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+7"/>
<source>Show the Riocoin2-Qt help message to get a list with possible Riocoin2 command-line options.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>&Show</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+24"/>
<source>&Console</source>
<translation>לוח בקרה</translation>
</message>
<message>
<location line="+72"/>
<source>&Network Traffic</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+52"/>
<source>&Clear</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+13"/>
<source>Totals</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+64"/>
<source>In:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+80"/>
<source>Out:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-541"/>
<source>Build date</source>
<translation>תאריך בניה</translation>
</message>
<message>
<location line="-104"/>
<source>Riocoin2 - Debug window</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+25"/>
<source>Riocoin2 Core</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+279"/>
<source>Debug log file</source>
<translation>קובץ יומן ניפוי</translation>
</message>
<message>
<location line="+7"/>
<source>Open the Riocoin2 debug log file from the current data directory. This can take a few seconds for large log files.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+102"/>
<source>Clear console</source>
<translation>נקה לוח בקרה</translation>
</message>
<message>
<location filename="../rpcconsole.cpp" line="-30"/>
<source>Welcome to the Riocoin2 RPC console.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Use up and down arrows to navigate history, and <b>Ctrl-L</b> to clear screen.</source>
<translation>השתמש בחיצים למעלה ולמטה כדי לנווט בהיסטוריה, ו- <b>Ctrl-L</b> כדי לנקות את המסך.</translation>
</message>
<message>
<location line="+1"/>
<source>Type <b>help</b> for an overview of available commands.</source>
<translation>הקלד <b>help</b> בשביל סקירה של הפקודות הזמינות.</translation>
</message>
<message>
<location line="+123"/>
<source>%1 B</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+2"/>
<source>%1 KB</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+2"/>
<source>%1 MB</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+2"/>
<source>%1 GB</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+7"/>
<source>%1 m</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+5"/>
<source>%1 h</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+2"/>
<source>%1 h %2 m</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>SendCoinsDialog</name>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="+14"/>
<location filename="../sendcoinsdialog.cpp" line="+182"/>
<location line="+5"/>
<location line="+5"/>
<location line="+5"/>
<location line="+6"/>
<location line="+5"/>
<location line="+5"/>
<location line="+5"/>
<source>Send Coins</source>
<translation>שלח מטבעות</translation>
</message>
<message>
<location line="+76"/>
<source>Coin Control Features</source>
<translation>ה</translation>
</message>
<message>
<location line="+20"/>
<source>Inputs...</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+7"/>
<source>automatically selected</source>
<translation>נבחר אוטומאטית</translation>
</message>
<message>
<location line="+19"/>
<source>Insufficient funds!</source>
<translation>אין מספיק כספים!</translation>
</message>
<message>
<location line="+77"/>
<source>Quantity:</source>
<translation>כמות:</translation>
</message>
<message>
<location line="+22"/>
<location line="+35"/>
<source>0</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-19"/>
<source>Bytes:</source>
<translation>בייטים:</translation>
</message>
<message>
<location line="+51"/>
<source>Amount:</source>
<translation>כמות:</translation>
</message>
<message>
<location line="+22"/>
<location line="+86"/>
<location line="+86"/>
<location line="+32"/>
<source>0.00 RIO</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-191"/>
<source>Priority:</source>
<translation>קדימות:</translation>
</message>
<message>
<location line="+19"/>
<source>medium</source>
<translation type="unfinished">בינוני</translation>
</message>
<message>
<location line="+32"/>
<source>Fee:</source>
<translation>תשלום:</translation>
</message>
<message>
<location line="+35"/>
<source>Low Output:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+19"/>
<source>no</source>
<translation type="unfinished">לא</translation>
</message>
<message>
<location line="+32"/>
<source>After Fee:</source>
<translation>לאחר עמלה:</translation>
</message>
<message>
<location line="+35"/>
<source>Change</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+50"/>
<source>custom change address</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+106"/>
<source>Send to multiple recipients at once</source>
<translation>שלח למספר מקבלים בו-זמנית</translation>
</message>
<message>
<location line="+3"/>
<source>Add &Recipient</source>
<translation>הוסף מקבל</translation>
</message>
<message>
<location line="+20"/>
<source>Remove all transaction fields</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>Clear &All</source>
<translation>נקה הכל</translation>
</message>
<message>
<location line="+28"/>
<source>Balance:</source>
<translation>יתרה:</translation>
</message>
<message>
<location line="+16"/>
<source>123.456 RIO</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+31"/>
<source>Confirm the send action</source>
<translation>אשר את פעולת השליחה</translation>
</message>
<message>
<location line="+3"/>
<source>S&end</source>
<translation>שלח</translation>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="-178"/>
<source>Enter a Riocoin2 address (e.g. JXywGBZBowrppUwwNUo1GCRDTibzJi7g2M)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+15"/>
<source>Copy quantity</source>
<translation>העתק כמות</translation>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation>העתק כמות</translation>
</message>
<message>
<location line="+1"/>
<source>Copy fee</source>
<translation>העתק מחיר</translation>
</message>
<message>
<location line="+1"/>
<source>Copy after fee</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Copy bytes</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Copy priority</source>
<translation>העתק קדימות</translation>
</message>
<message>
<location line="+1"/>
<source>Copy low output</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Copy change</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+86"/>
<source><b>%1</b> to %2 (%3)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+5"/>
<source>Confirm send coins</source>
<translation>אשר שליחת מטבעות</translation>
</message>
<message>
<location line="+1"/>
<source>Are you sure you want to send %1?</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+0"/>
<source> and </source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+29"/>
<source>The recipient address is not valid, please recheck.</source>
<translation>כתובת המקבל אינה תקינה, אנא בדוק שנית.</translation>
</message>
<message>
<location line="+5"/>
<source>The amount to pay must be larger than 0.</source>
<translation>הכמות לשלם חייבת להיות גדולה מ-0.</translation>
</message>
<message>
<location line="+5"/>
<source>The amount exceeds your balance.</source>
<translation>הכמות עולה על המאזן שלך.</translation>
</message>
<message>
<location line="+5"/>
<source>The total exceeds your balance when the %1 transaction fee is included.</source>
<translation>הכמות הכוללת, ובכללה עמלת פעולה בסך %1, עולה על המאזן שלך.</translation>
</message>
<message>
<location line="+6"/>
<source>Duplicate address found, can only send to each address once per send operation.</source>
<translation>כתובת כפולה נמצאה, ניתן לשלוח לכל כתובת רק פעם אחת בכל פעולת שליחה.</translation>
</message>
<message>
<location line="+5"/>
<source>Error: Transaction creation failed.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+5"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+5"/>
<source>Error: Narration is too long.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+251"/>
<source>WARNING: Invalid Riocoin2 address</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+13"/>
<source>(no label)</source>
<translation>(ללא תוית)</translation>
</message>
<message>
<location line="+4"/>
<source>WARNING: unknown change address</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>SendCoinsEntry</name>
<message>
<location filename="../forms/sendcoinsentry.ui" line="+14"/>
<source>Form</source>
<translation type="unfinished">טופס</translation>
</message>
<message>
<location line="+15"/>
<source>A&mount:</source>
<translation>כ&מות:</translation>
</message>
<message>
<location line="+13"/>
<source>Pay &To:</source>
<translation>שלם &ל:</translation>
</message>
<message>
<location line="+24"/>
<location filename="../sendcoinsentry.cpp" line="+26"/>
<source>Enter a label for this address to add it to your address book</source>
<translation>הכנס תוית לכתובת הזאת כדי להכניס לפנקס הכתובות</translation>
</message>
<message>
<location line="+9"/>
<source>N&arration:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+16"/>
<location filename="../sendcoinsentry.cpp" line="+2"/>
<source>Enter a short note to send with payment (max 24 characters)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+10"/>
<source>&Label:</source>
<translation>ת&וית:</translation>
</message>
<message>
<location line="+18"/>
<source>The address to send the payment to (e.g. JXywGBZBowrppUwwNUo1GCRDTibzJi7g2M)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+10"/>
<source>Choose address from address book</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+10"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="+7"/>
<source>Paste address from clipboard</source>
<translation>הדבר כתובת מהלוח</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+7"/>
<source>Remove this recipient</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../sendcoinsentry.cpp" line="-1"/>
<source>Enter a Riocoin2 address (e.g. JXywGBZBowrppUwwNUo1GCRDTibzJi7g2M)</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>SignVerifyMessageDialog</name>
<message>
<location filename="../forms/signverifymessagedialog.ui" line="+14"/>
<source>Signatures - Sign / Verify a Message</source>
<translation>חתימות - חתום או אמת הודעה</translation>
</message>
<message>
<location line="+13"/>
<location line="+124"/>
<source>&Sign Message</source>
<translation>חתום על הו&דעה</translation>
</message>
<message>
<location line="-118"/>
<source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source>
<translation>אתה יכול לחתום על הודעות עם הכתובות שלך כדי להוכיח שהן בבעלותך. היזהר לא לחתום על משהו מעורפל, שכן התקפות פישינג עשויות לגרום לך בעורמה למסור את זהותך. חתום רק על אמרות מפורטות לחלוטין שאתה מסכים עימן.</translation>
</message>
<message>
<location line="+18"/>
<source>The address to sign the message with (e.g. JXywGBZBowrppUwwNUo1GCRDTibzJi7g2M)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+10"/>
<location line="+203"/>
<source>Choose an address from the address book</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-193"/>
<location line="+203"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="-193"/>
<source>Paste address from clipboard</source>
<translation>הדבק כתובת מהלוח</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+12"/>
<source>Enter the message you want to sign here</source>
<translation>הכנס כאן את ההודעה שעליך ברצונך לחתום</translation>
</message>
<message>
<location line="+24"/>
<source>Copy the current signature to the system clipboard</source>
<translation>העתק את החתימה הנוכחית ללוח המערכת</translation>
</message>
<message>
<location line="+21"/>
<source>Sign the message to prove you own this Riocoin2 address</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+17"/>
<source>Reset all sign message fields</source>
<translation>אפס את כל שדות החתימה על הודעה</translation>
</message>
<message>
<location line="+3"/>
<location line="+146"/>
<source>Clear &All</source>
<translation>נקה הכל</translation>
</message>
<message>
<location line="-87"/>
<location line="+70"/>
<source>&Verify Message</source>
<translation>אמת הודעה</translation>
</message>
<message>
<location line="-64"/>
<source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source>
<translation>הכנס למטה את הכתובת החותמת, ההודעה (ודא שאתה מעתיק מעברי שורה, רווחים, טאבים וכו' באופן מדויק) והחתימה כדי לאמת את ההודעה. היזהר לא לפרש את החתימה כיותר ממה שמופיע בהודעה החתומה בעצמה, כדי להימנע מליפול קורבן למתקפת איש-באמצע.</translation>
</message>
<message>
<location line="+21"/>
<source>The address the message was signed with (e.g. JXywGBZBowrppUwwNUo1GCRDTibzJi7g2M)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+40"/>
<source>Verify the message to ensure it was signed with the specified Riocoin2 address</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+17"/>
<source>Reset all verify message fields</source>
<translation>אפס את כל שדות אימות הודעה</translation>
</message>
<message>
<location filename="../signverifymessagedialog.cpp" line="+27"/>
<location line="+3"/>
<source>Enter an Riocoin2 address (e.g. JXywGBZBowrppUwwNUo1GCRDTibzJi7g2M)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-2"/>
<source>Click "Sign Message" to generate signature</source>
<translation>לחץ "חתום על ההודעה" כדי לחולל חתימה</translation>
</message>
<message>
<location line="+3"/>
<source>Enter Riocoin2 signature</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+82"/>
<location line="+81"/>
<source>The entered address is invalid.</source>
<translation>הכתובת שהוכנסה אינה תקינה.</translation>
</message>
<message>
<location line="-81"/>
<location line="+8"/>
<location line="+73"/>
<location line="+8"/>
<source>Please check the address and try again.</source>
<translation>אנא בדוק את הכתובת ונסה שנית.</translation>
</message>
<message>
<location line="-81"/>
<location line="+81"/>
<source>The entered address does not refer to a key.</source>
<translation>הכתובת שהוכנסה אינה מתייחסת למפתח.</translation>
</message>
<message>
<location line="-73"/>
<source>Wallet unlock was cancelled.</source>
<translation>פתיחת הארנק בוטלה.</translation>
</message>
<message>
<location line="+8"/>
<source>Private key for the entered address is not available.</source>
<translation>המפתח הפרטי עבור הכתובת שהוכנסה אינו זמין.</translation>
</message>
<message>
<location line="+12"/>
<source>Message signing failed.</source>
<translation>החתימה על ההודעה נכשלה.</translation>
</message>
<message>
<location line="+5"/>
<source>Message signed.</source>
<translation>ההודעה נחתמה.</translation>
</message>
<message>
<location line="+59"/>
<source>The signature could not be decoded.</source>
<translation>לא ניתן לפענח את החתימה.</translation>
</message>
<message>
<location line="+0"/>
<location line="+13"/>
<source>Please check the signature and try again.</source>
<translation>אנא בדוק את החתימה ונסה שנית.</translation>
</message>
<message>
<location line="+0"/>
<source>The signature did not match the message digest.</source>
<translation>החתימה לא תואמת את תקציר ההודעה.</translation>
</message>
<message>
<location line="+7"/>
<source>Message verification failed.</source>
<translation>אימות ההודעה נכשל.</translation>
</message>
<message>
<location line="+5"/>
<source>Message verified.</source>
<translation>ההודעה אומתה.</translation>
</message>
</context>
<context>
<name>TrafficGraphWidget</name>
<message>
<location filename="../trafficgraphwidget.cpp" line="+79"/>
<source>KB/s</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>TransactionDesc</name>
<message>
<location filename="../transactiondesc.cpp" line="+20"/>
<source>Open until %1</source>
<translation>פתוח עד %1</translation>
</message>
<message numerus="yes">
<location line="-2"/>
<source>Open for %n block(s)</source>
<translation type="unfinished">
<numerusform></numerusform>
<numerusform></numerusform>
</translation>
</message>
<message>
<location line="+8"/>
<source>conflicted</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+2"/>
<source>%1/offline</source>
<translation>%1/מנותק</translation>
</message>
<message>
<location line="+2"/>
<source>%1/unconfirmed</source>
<translation>%1/ממתין לאישור</translation>
</message>
<message>
<location line="+2"/>
<source>%1 confirmations</source>
<translation>%1 אישורים</translation>
</message>
<message>
<location line="+17"/>
<source>Status</source>
<translation>מצב</translation>
</message>
<message numerus="yes">
<location line="+7"/>
<source>, broadcast through %n node(s)</source>
<translation>
<numerusform>, הופץ דרך צומת אחד</numerusform>
<numerusform>, הופץ דרך %n צמתים</numerusform>
</translation>
</message>
<message>
<location line="+4"/>
<source>Date</source>
<translation>תאריך</translation>
</message>
<message>
<location line="+7"/>
<source>Source</source>
<translation>מקור</translation>
</message>
<message>
<location line="+0"/>
<source>Generated</source>
<translation>נוצר</translation>
</message>
<message>
<location line="+5"/>
<location line="+17"/>
<source>From</source>
<translation>מאת</translation>
</message>
<message>
<location line="+1"/>
<location line="+22"/>
<location line="+58"/>
<source>To</source>
<translation>אל</translation>
</message>
<message>
<location line="-77"/>
<location line="+2"/>
<source>own address</source>
<translation>כתובת עצמית</translation>
</message>
<message>
<location line="-2"/>
<source>label</source>
<translation>תוית</translation>
</message>
<message>
<location line="+37"/>
<location line="+12"/>
<location line="+45"/>
<location line="+17"/>
<location line="+38"/>
<source>Credit</source>
<translation>זיכוי</translation>
</message>
<message numerus="yes">
<location line="-110"/>
<source>matures in %n more block(s)</source>
<translation>
<numerusform>מבשיל בעוד בלוק אחד</numerusform>
<numerusform>מבשיל בעוד %n בלוקים</numerusform>
</translation>
</message>
<message>
<location line="+2"/>
<source>not accepted</source>
<translation>לא התקבל</translation>
</message>
<message>
<location line="+44"/>
<location line="+8"/>
<location line="+15"/>
<location line="+38"/>
<source>Debit</source>
<translation>חיוב</translation>
</message>
<message>
<location line="-47"/>
<source>Transaction fee</source>
<translation>עמלת פעולה</translation>
</message>
<message>
<location line="+16"/>
<source>Net amount</source>
<translation>כמות נקיה</translation>
</message>
<message>
<location line="+6"/>
<source>Message</source>
<translation>הודעה</translation>
</message>
<message>
<location line="+2"/>
<source>Comment</source>
<translation>הערה</translation>
</message>
<message>
<location line="+10"/>
<source>Transaction ID</source>
<translation>זיהוי פעולה</translation>
</message>
<message>
<location line="+3"/>
<source>Generated coins must mature 510 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to "not accepted" and it won't be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+7"/>
<source>Debug information</source>
<translation>מידע ניפוי</translation>
</message>
<message>
<location line="+8"/>
<source>Transaction</source>
<translation>פעולה</translation>
</message>
<message>
<location line="+5"/>
<source>Inputs</source>
<translation>קלטים</translation>
</message>
<message>
<location line="+21"/>
<source>Amount</source>
<translation>כמות</translation>
</message>
<message>
<location line="+1"/>
<source>true</source>
<translation>אמת</translation>
</message>
<message>
<location line="+0"/>
<source>false</source>
<translation>שקר</translation>
</message>
<message>
<location line="-217"/>
<source>, has not been successfully broadcast yet</source>
<translation>, טרם שודר בהצלחה</translation>
</message>
<message>
<location line="+35"/>
<source>unknown</source>
<translation>לא ידוע</translation>
</message>
</context>
<context>
<name>TransactionDescDialog</name>
<message>
<location filename="../forms/transactiondescdialog.ui" line="+14"/>
<source>Transaction details</source>
<translation>פרטי הפעולה</translation>
</message>
<message>
<location line="+6"/>
<source>This pane shows a detailed description of the transaction</source>
<translation>חלונית זו מציגה תיאור מפורט של הפעולה</translation>
</message>
</context>
<context>
<name>TransactionTableModel</name>
<message>
<location filename="../transactiontablemodel.cpp" line="+231"/>
<source>Date</source>
<translation>תאריך</translation>
</message>
<message>
<location line="+0"/>
<source>Type</source>
<translation>סוג</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>כתובת</translation>
</message>
<message>
<location line="+0"/>
<source>Amount</source>
<translation>כמות</translation>
</message>
<message>
<location line="+52"/>
<source>Open until %1</source>
<translation>פתוח עד %1</translation>
</message>
<message>
<location line="+12"/>
<source>Confirmed (%1 confirmations)</source>
<translation>מאושר (%1 אישורים)</translation>
</message>
<message numerus="yes">
<location line="-15"/>
<source>Open for %n more block(s)</source>
<translation>
<numerusform>פתח למשך בלוק %n יותר</numerusform>
<numerusform>פתח למשך %n בלוקים נוספים</numerusform>
</translation>
</message>
<message>
<location line="-49"/>
<source>Narration</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+55"/>
<source>Offline</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>Unconfirmed</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>Confirming (%1 of %2 recommended confirmations)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+6"/>
<source>Conflicted</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>Immature (%1 confirmations, will be available after %2)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>This block was not received by any other nodes and will probably not be accepted!</source>
<translation>הבלוק הזה לא נקלט על ידי אף צומת אחר, וכנראה לא יתקבל!</translation>
</message>
<message>
<location line="+3"/>
<source>Generated but not accepted</source>
<translation>נוצר אך לא התקבל</translation>
</message>
<message>
<location line="+42"/>
<source>Received with</source>
<translation>התקבל עם</translation>
</message>
<message>
<location line="+2"/>
<source>Received from</source>
<translation>התקבל מאת</translation>
</message>
<message>
<location line="+3"/>
<source>Sent to</source>
<translation>נשלח ל</translation>
</message>
<message>
<location line="+2"/>
<source>Payment to yourself</source>
<translation>תשלום לעצמך</translation>
</message>
<message>
<location line="+2"/>
<source>Mined</source>
<translation>נכרה</translation>
</message>
<message>
<location line="+38"/>
<source>(n/a)</source>
<translation>(n/a)</translation>
</message>
<message>
<location line="+198"/>
<source>Transaction status. Hover over this field to show number of confirmations.</source>
<translation>מצב הפעולה. השהה את הסמן מעל שדה זה כדי לראות את מספר האישורים.</translation>
</message>
<message>
<location line="+2"/>
<source>Date and time that the transaction was received.</source>
<translation>התאריך והשעה בה הפעולה הזאת התקבלה.</translation>
</message>
<message>
<location line="+2"/>
<source>Type of transaction.</source>
<translation>סוג הפעולה.</translation>
</message>
<message>
<location line="+2"/>
<source>Destination address of transaction.</source>
<translation>כתובת היעד של הפעולה.</translation>
</message>
<message>
<location line="+2"/>
<source>Amount removed from or added to balance.</source>
<translation>הכמות שהתווספה או הוסרה מהיתרה.</translation>
</message>
</context>
<context>
<name>TransactionView</name>
<message>
<location filename="../transactionview.cpp" line="+55"/>
<location line="+16"/>
<source>All</source>
<translation>הכל</translation>
</message>
<message>
<location line="-15"/>
<source>Today</source>
<translation>היום</translation>
</message>
<message>
<location line="+1"/>
<source>This week</source>
<translation>השבוע</translation>
</message>
<message>
<location line="+1"/>
<source>This month</source>
<translation>החודש</translation>
</message>
<message>
<location line="+1"/>
<source>Last month</source>
<translation>החודש שעבר</translation>
</message>
<message>
<location line="+1"/>
<source>This year</source>
<translation>השנה</translation>
</message>
<message>
<location line="+1"/>
<source>Range...</source>
<translation>טווח...</translation>
</message>
<message>
<location line="+11"/>
<source>Received with</source>
<translation>התקבל עם</translation>
</message>
<message>
<location line="+2"/>
<source>Sent to</source>
<translation>נשלח ל</translation>
</message>
<message>
<location line="+2"/>
<source>To yourself</source>
<translation>לעצמך</translation>
</message>
<message>
<location line="+1"/>
<source>Mined</source>
<translation>נכרה</translation>
</message>
<message>
<location line="+1"/>
<source>Other</source>
<translation>אחר</translation>
</message>
<message>
<location line="+7"/>
<source>Enter address or label to search</source>
<translation>הכנס כתובת או תוית לחפש</translation>
</message>
<message>
<location line="+7"/>
<source>Min amount</source>
<translation>כמות מזערית</translation>
</message>
<message>
<location line="+34"/>
<source>Copy address</source>
<translation>העתק כתובת</translation>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation>העתק תוית</translation>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation>העתק כמות</translation>
</message>
<message>
<location line="+1"/>
<source>Copy transaction ID</source>
<translation>העתק מזהה פעולה</translation>
</message>
<message>
<location line="+1"/>
<source>Edit label</source>
<translation>ערוך תוית</translation>
</message>
<message>
<location line="+1"/>
<source>Show transaction details</source>
<translation>הצג פרטי פעולה</translation>
</message>
<message>
<location line="+144"/>
<source>Export Transaction Data</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>קובץ מופרד בפסיקים (*.csv)</translation>
</message>
<message>
<location line="+8"/>
<source>Confirmed</source>
<translation>מאושר</translation>
</message>
<message>
<location line="+1"/>
<source>Date</source>
<translation>תאריך</translation>
</message>
<message>
<location line="+1"/>
<source>Type</source>
<translation>סוג</translation>
</message>
<message>
<location line="+1"/>
<source>Label</source>
<translation>תוית</translation>
</message>
<message>
<location line="+1"/>
<source>Address</source>
<translation>כתובת</translation>
</message>
<message>
<location line="+1"/>
<source>Amount</source>
<translation>כמות</translation>
</message>
<message>
<location line="+1"/>
<source>ID</source>
<translation>מזהה</translation>
</message>
<message>
<location line="+4"/>
<source>Error exporting</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+100"/>
<source>Range:</source>
<translation>טווח:</translation>
</message>
<message>
<location line="+8"/>
<source>to</source>
<translation>אל</translation>
</message>
</context>
<context>
<name>WalletModel</name>
<message>
<location filename="../walletmodel.cpp" line="+324"/>
<source>Sending...</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>bitcoin-core</name>
<message>
<location filename="../bitcoinstrings.cpp" line="+33"/>
<source>Riocoin2 version</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Usage:</source>
<translation>שימוש:</translation>
</message>
<message>
<location line="+1"/>
<source>Send command to -server or Riocoin2d</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>List commands</source>
<translation>רשימת פקודות</translation>
</message>
<message>
<location line="+1"/>
<source>Get help for a command</source>
<translation>קבל עזרה עבור פקודה</translation>
</message>
<message>
<location line="+2"/>
<source>Options:</source>
<translation>אפשרויות:</translation>
</message>
<message>
<location line="+2"/>
<source>Specify configuration file (default: Riocoin2.conf)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Specify pid file (default: Riocoin2d.pid)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+2"/>
<source>Specify wallet file (within data directory)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-1"/>
<source>Specify data directory</source>
<translation>ציין תיקיית נתונים</translation>
</message>
<message>
<location line="+2"/>
<source>Set database cache size in megabytes (default: 25)</source>
<translation>קבע את גודל המטמון של מסד הנתונים במגהבייט (ברירת מחדל: 25)</translation>
</message>
<message>
<location line="+1"/>
<source>Set database disk log size in megabytes (default: 100)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+6"/>
<source>Listen for connections on <port> (default: 51717 or testnet: 51977)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Maintain at most <n> connections to peers (default: 125)</source>
<translation>החזק לכל היותר <n> חיבורים לעמיתים (ברירת מחדל: 125)</translation>
</message>
<message>
<location line="+3"/>
<source>Connect to a node to retrieve peer addresses, and disconnect</source>
<translation>התחבר לצומת כדי לדלות כתובות עמיתים, ואז התנתק</translation>
</message>
<message>
<location line="+1"/>
<source>Specify your own public address</source>
<translation>ציין את הכתובת הפומבית שלך</translation>
</message>
<message>
<location line="+5"/>
<source>Bind to given address. Use [host]:port notation for IPv6</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+2"/>
<source>Stake your coins to support network and gain reward (default: 1)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+5"/>
<source>Threshold for disconnecting misbehaving peers (default: 100)</source>
<translation>סף להתנתקות מעמיתים הנוהגים שלא כהלכה (ברירת מחדל: 100)</translation>
</message>
<message>
<location line="+1"/>
<source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source>
<translation>מספר שניות למנוע מעמיתים הנוהגים שלא כהלכה מלהתחבר מחדש (ברירת מחדל: 86400)</translation>
</message>
<message>
<location line="-44"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source>
<translation>אירעה שגיאה בעת הגדרת פורט RPC %u להאזנה ב-IPv4: %s</translation>
</message>
<message>
<location line="+51"/>
<source>Detach block and address databases. Increases shutdown time (default: 0)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+109"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-5"/>
<source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds </source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-87"/>
<source>Listen for JSON-RPC connections on <port> (default: 51716 or testnet: 51976)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-11"/>
<source>Accept command line and JSON-RPC commands</source>
<translation>קבל פקודות משורת הפקודה ו- JSON-RPC</translation>
</message>
<message>
<location line="+101"/>
<source>Error: Transaction creation failed </source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-5"/>
<source>Error: Wallet locked, unable to create transaction </source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-8"/>
<source>Importing blockchain data file.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Importing bootstrap blockchain data file.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-88"/>
<source>Run in the background as a daemon and accept commands</source>
<translation>רוץ ברקע כדימון וקבל פקודות</translation>
</message>
<message>
<location line="+1"/>
<source>Use the test network</source>
<translation>השתמש ברשת הבדיקה</translation>
</message>
<message>
<location line="-24"/>
<source>Accept connections from outside (default: 1 if no -proxy or -connect)</source>
<translation>קבל חיבורים מבחוץ (ברירת מחדל: 1 ללא -proxy או -connect)</translation>
</message>
<message>
<location line="-38"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source>
<translation>אירעה שגיאה בעת הגדרת פורט RPC %u להאזנה ב-IPv6, נסוג ל-IPv4: %s</translation>
</message>
<message>
<location line="+117"/>
<source>Error initializing database environment %s! To recover, BACKUP THAT DIRECTORY, then remove everything from it except for wallet.dat.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-20"/>
<source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+11"/>
<source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source>
<translation>אזהרה: -paytxfee נקבע לערך מאד גבוה! זוהי עמלת הפעולה שתשלם אם אתה שולח פעולה.</translation>
</message>
<message>
<location line="+61"/>
<source>Warning: Please check that your computer's date and time are correct! If your clock is wrong Riocoin2 will not work properly.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-31"/>
<source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source>
<translation>אזהרה: שגיאה בקריאת wallet.dat! כל המתפחות נקראו באופן תקין, אך נתוני הפעולות או ספר הכתובות עלולים להיות חסרים או שגויים.</translation>
</message>
<message>
<location line="-18"/>
<source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source>
<translation>אזהרה: קובץ wallet.dat מושחת, המידע חולץ! קובץ wallet.dat המקורח נשמר כ - wallet.{timestamp}.bak ב - %s; אם המאזן או הפעולות שגויים עליך לשחזר גיבוי.</translation>
</message>
<message>
<location line="-30"/>
<source>Attempt to recover private keys from a corrupt wallet.dat</source>
<translation>נסה לשחזר מפתחות פרטיים מקובץ wallet.dat מושחת.</translation>
</message>
<message>
<location line="+4"/>
<source>Block creation options:</source>
<translation>אפשרויות יצירת בלוק:</translation>
</message>
<message>
<location line="-62"/>
<source>Connect only to the specified node(s)</source>
<translation>התחבר רק לצמתים המצוינים</translation>
</message>
<message>
<location line="+4"/>
<source>Discover own IP address (default: 1 when listening and no -externalip)</source>
<translation>גלה את כתובת ה-IP העצמית (ברירת מחדל: 1 כשמאזינים וללא -externalip)</translation>
</message>
<message>
<location line="+94"/>
<source>Failed to listen on any port. Use -listen=0 if you want this.</source>
<translation>האזנה נכשלה בכל פורט. השתמש ב- -listen=0 אם ברצונך בכך.</translation>
</message>
<message>
<location line="-90"/>
<source>Find peers using DNS lookup (default: 1)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+5"/>
<source>Sync checkpoints policy (default: strict)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+83"/>
<source>Invalid -tor address: '%s'</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+4"/>
<source>Invalid amount for -reservebalance=<amount></source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-82"/>
<source>Maximum per-connection receive buffer, <n>*1000 bytes (default: 5000)</source>
<translation>חוצץ קבלה מירבי לכל חיבור, <n>*1000 בתים (ברירת מחדל: 5000)</translation>
</message>
<message>
<location line="+1"/>
<source>Maximum per-connection send buffer, <n>*1000 bytes (default: 1000)</source>
<translation>חוצץ שליחה מירבי לכל חיבור, <n>*1000 בתים (ברירת מחדל: 1000)</translation>
</message>
<message>
<location line="-16"/>
<source>Only connect to nodes in network <net> (IPv4, IPv6 or Tor)</source>
<translation>התחבר רק לצמתים ברשת <net> (IPv4, IPv6 או Tor)</translation>
</message>
<message>
<location line="+28"/>
<source>Output extra debugging information. Implies all other -debug* options</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Output extra network debugging information</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Prepend debug output with timestamp</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+35"/>
<source>SSL options: (see the Bitcoin Wiki for SSL setup instructions)</source>
<translation>אפשרויות SSL: (ראה את הויקי של ביטקוין עבור הוראות הגדרת SSL)</translation>
</message>
<message>
<location line="-74"/>
<source>Select the version of socks proxy to use (4-5, default: 5)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+41"/>
<source>Send trace/debug info to console instead of debug.log file</source>
<translation>שלח מידע דיבאג ועקבה לקונסולה במקום לקובץ debug.log</translation>
</message>
<message>
<location line="+1"/>
<source>Send trace/debug info to debugger</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+28"/>
<source>Set maximum block size in bytes (default: 250000)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-1"/>
<source>Set minimum block size in bytes (default: 0)</source>
<translation>קבע את גודל הבלוק המינימלי בבתים (ברירת מחדל: 0)</translation>
</message>
<message>
<location line="-29"/>
<source>Shrink debug.log file on client startup (default: 1 when no -debug)</source>
<translation>כווץ את קובץ debug.log בהפעלת הקליינט (ברירת מחדל: 1 ללא -debug)</translation>
</message>
<message>
<location line="-42"/>
<source>Specify connection timeout in milliseconds (default: 5000)</source>
<translation>ציין הגבלת זמן לחיבור במילישניות (ברירת מחדל: 5000)</translation>
</message>
<message>
<location line="+109"/>
<source>Unable to sign checkpoint, wrong checkpointkey?
</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-80"/>
<source>Use UPnP to map the listening port (default: 0)</source>
<translation>השתמש ב-UPnP כדי למפות את הפורט להאזנה (ברירת מחדל: 0)</translation>
</message>
<message>
<location line="-1"/>
<source>Use UPnP to map the listening port (default: 1 when listening)</source>
<translation>השתמש ב-UPnP כדי למפות את הפורט להאזנה (ברירת מחדל: 1 בעת האזנה)</translation>
</message>
<message>
<location line="-25"/>
<source>Use proxy to reach tor hidden services (default: same as -proxy)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+42"/>
<source>Username for JSON-RPC connections</source>
<translation>שם משתמש לחיבורי JSON-RPC</translation>
</message>
<message>
<location line="+47"/>
<source>Verifying database integrity...</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+57"/>
<source>WARNING: syncronized checkpoint violation detected, but skipped!</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Warning: Disk space is low!</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-2"/>
<source>Warning: This version is obsolete, upgrade required!</source>
<translation>אזהרה: הגרסה הזאת מיושנת, יש צורך בשדרוג!</translation>
</message>
<message>
<location line="-48"/>
<source>wallet.dat corrupt, salvage failed</source>
<translation>קובץ wallet.dat מושחת, החילוץ נכשל</translation>
</message>
<message>
<location line="-54"/>
<source>Password for JSON-RPC connections</source>
<translation>סיסמה לחיבורי JSON-RPC</translation>
</message>
<message>
<location line="-84"/>
<source>%s, you must set a rpcpassword in the configuration file:
%s
It is recommended you use the following random password:
rpcuser=Riocoin2rpc
rpcpassword=%s
(you do not need to remember this password)
The username and password MUST NOT be the same.
If the file does not exist, create it with owner-readable-only file permissions.
It is also recommended to set alertnotify so you are notified of problems;
for example: alertnotify=echo %%s | mail -s "Riocoin2 Alert" [email protected]
</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+51"/>
<source>Find peers using internet relay chat (default: 0)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+5"/>
<source>Sync time with other nodes. Disable if time on your system is precise e.g. syncing with NTP (default: 1)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+15"/>
<source>When creating transactions, ignore inputs with value less than this (default: 0.01)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+16"/>
<source>Allow JSON-RPC connections from specified IP address</source>
<translation>אפשר חיבורי JSON-RPC מכתובת האינטרנט המצוינת</translation>
</message>
<message>
<location line="+1"/>
<source>Send commands to node running on <ip> (default: 127.0.0.1)</source>
<translation>שלח פקודות לצומת ב-<ip> (ברירת מחדל: 127.0.0.1)</translation>
</message>
<message>
<location line="+1"/>
<source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source>
<translation>בצע פקודה זו כשהבלוק הטוב ביותר משתנה (%s בפקודה יוחלף בגיבוב הבלוק)</translation>
</message>
<message>
<location line="+3"/>
<source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source>
<translation>בצע פקודה כאשר פעולת ארנק משתנה (%s ב cmd יוחלף ב TxID)</translation>
</message>
<message>
<location line="+3"/>
<source>Require a confirmations for change (default: 0)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Enforce transaction scripts to use canonical PUSH operators (default: 1)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+2"/>
<source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>Upgrade wallet to latest format</source>
<translation>שדרג את הארנק לפורמט העדכני</translation>
</message>
<message>
<location line="+1"/>
<source>Set key pool size to <n> (default: 100)</source>
<translation>קבע את גודל המאגר ל -<n> (ברירת מחדל: 100)</translation>
</message>
<message>
<location line="+1"/>
<source>Rescan the block chain for missing wallet transactions</source>
<translation>סרוק מחדש את שרשרת הבלוקים למציאת פעולות חסרות בארנק</translation>
</message>
<message>
<location line="+2"/>
<source>How many blocks to check at startup (default: 2500, 0 = all)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>How thorough the block verification is (0-6, default: 1)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Imports blocks from external blk000?.dat file</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+8"/>
<source>Use OpenSSL (https) for JSON-RPC connections</source>
<translation>השתמש ב-OpenSSL (https( עבור חיבורי JSON-RPC</translation>
</message>
<message>
<location line="+1"/>
<source>Server certificate file (default: server.cert)</source>
<translation>קובץ תעודת שרת (ברירת מחדל: server.cert)</translation>
</message>
<message>
<location line="+1"/>
<source>Server private key (default: server.pem)</source>
<translation>מפתח פרטי של השרת (ברירת מחדל: server.pem)</translation>
</message>
<message>
<location line="+1"/>
<source>Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+53"/>
<source>Error: Wallet unlocked for staking only, unable to create transaction.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+18"/>
<source>WARNING: Invalid checkpoint found! Displayed transactions may not be correct! You may need to upgrade, or notify developers.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-158"/>
<source>This help message</source>
<translation>הודעת העזרה הזו</translation>
</message>
<message>
<location line="+95"/>
<source>Wallet %s resides outside data directory %s.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Cannot obtain a lock on data directory %s. Riocoin2 is probably already running.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-98"/>
<source>Riocoin2</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+140"/>
<source>Unable to bind to %s on this computer (bind returned error %d, %s)</source>
<translation>לא מסוגל לקשור ל-%s במחשב זה (הקשירה החזירה שגיאה %d, %s)</translation>
</message>
<message>
<location line="-130"/>
<source>Connect through socks proxy</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>Allow DNS lookups for -addnode, -seednode and -connect</source>
<translation>אפשר בדיקת DNS עבור -addnode, -seednode ו- -connect</translation>
</message>
<message>
<location line="+122"/>
<source>Loading addresses...</source>
<translation>טוען כתובות...</translation>
</message>
<message>
<location line="-15"/>
<source>Error loading blkindex.dat</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+2"/>
<source>Error loading wallet.dat: Wallet corrupted</source>
<translation>שגיאה בטעינת הקובץ wallet.dat: הארנק מושחת</translation>
</message>
<message>
<location line="+4"/>
<source>Error loading wallet.dat: Wallet requires newer version of Riocoin2</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Wallet needed to be rewritten: restart Riocoin2 to complete</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Error loading wallet.dat</source>
<translation>שגיאה בטעינת הקובץ wallet.dat</translation>
</message>
<message>
<location line="-16"/>
<source>Invalid -proxy address: '%s'</source>
<translation>כתובת -proxy לא תקינה: '%s'</translation>
</message>
<message>
<location line="-1"/>
<source>Unknown network specified in -onlynet: '%s'</source>
<translation>רשת לא ידועה צוינה ב- -onlynet: '%s'</translation>
</message>
<message>
<location line="-1"/>
<source>Unknown -socks proxy version requested: %i</source>
<translation>התבקשה גרסת פרוקסי -socks לא ידועה: %i</translation>
</message>
<message>
<location line="+4"/>
<source>Cannot resolve -bind address: '%s'</source>
<translation>לא מסוגל לפתור כתובת -bind: '%s'</translation>
</message>
<message>
<location line="+2"/>
<source>Cannot resolve -externalip address: '%s'</source>
<translation>לא מסוגל לפתור כתובת -externalip: '%s'</translation>
</message>
<message>
<location line="-24"/>
<source>Invalid amount for -paytxfee=<amount>: '%s'</source>
<translation>כמות לא תקינה עבור -paytxfee=<amount>: '%s'</translation>
</message>
<message>
<location line="+44"/>
<source>Error: could not start node</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+11"/>
<source>Sending...</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+5"/>
<source>Invalid amount</source>
<translation>כמות לא תקינה</translation>
</message>
<message>
<location line="+1"/>
<source>Insufficient funds</source>
<translation>אין מספיק כספים</translation>
</message>
<message>
<location line="-34"/>
<source>Loading block index...</source>
<translation>טוען את אינדקס הבלוקים...</translation>
</message>
<message>
<location line="-103"/>
<source>Add a node to connect to and attempt to keep the connection open</source>
<translation>הוסף צומת להתחברות ונסה לשמור את החיבור פתוח</translation>
</message>
<message>
<location line="+122"/>
<source>Unable to bind to %s on this computer. Riocoin2 is probably already running.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-97"/>
<source>Fee per KB to add to transactions you send</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+55"/>
<source>Invalid amount for -mininput=<amount>: '%s'</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+25"/>
<source>Loading wallet...</source>
<translation>טוען ארנק...</translation>
</message>
<message>
<location line="+8"/>
<source>Cannot downgrade wallet</source>
<translation>לא יכול להוריד דרגת הארנק</translation>
</message>
<message>
<location line="+1"/>
<source>Cannot initialize keypool</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Cannot write default address</source>
<translation>לא יכול לכתוב את כתובת ברירת המחדל</translation>
</message>
<message>
<location line="+1"/>
<source>Rescanning...</source>
<translation>סורק מחדש...</translation>
</message>
<message>
<location line="+5"/>
<source>Done loading</source>
<translation>טעינה הושלמה</translation>
</message>
<message>
<location line="-167"/>
<source>To use the %s option</source>
<translation>להשתמש באפשרות %s</translation>
</message>
<message>
<location line="+14"/>
<source>Error</source>
<translation>שגיאה</translation>
</message>
<message>
<location line="+6"/>
<source>You must set rpcpassword=<password> in the configuration file:
%s
If the file does not exist, create it with owner-readable-only file permissions.</source>
<translation>עליך לקבוע rpcpassword=yourpassword בקובץ ההגדרות:
%s
אם הקובץ אינו קיים, צור אותו עם הרשאות קריאה לבעלים בלבד.</translation>
</message>
</context>
</TS> | <location line="+1"/>
<source>Show information about Qt</source> |
atom.rs | use std::{
hash,
collections::{BTreeMap, HashMap},
};
use crate::{
Wedge, Fork, Join, Fuset, Port, Link, Polarity, AnyId, DotId, Context, Contextual,
ExclusivelyContextual, InContext, AcesError, AcesErrorKind, sat,
domain::{Dotset, DotsetId},
};
/// An abstract structural identifier serving as the common base of
/// [`PortId`], [`LinkId`], [`ForkId`], [`JoinId`], and [`FusetId`].
///
/// Since this is a numeric identifier, which is serial and one-based,
/// it trivially maps into numeric codes of variables in the DIMACS
/// SAT format.
///
/// See [`AnyId`] for more details.
pub type AtomId = AnyId;
/// An identifier of a [`Port`], a type derived from [`AtomId`].
///
/// There is a trivial bijection between values of this type and
/// numeric codes of DIMACS variables. This mapping simplifies the
/// construction of SAT queries and interpretation of solutions.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
#[repr(transparent)]
pub struct PortId(pub(crate) AtomId);
impl PortId {
#[inline]
pub const fn get(self) -> AtomId {
self.0
}
}
impl From<AtomId> for PortId {
#[inline]
fn from(id: AtomId) -> Self {
PortId(id)
}
}
impl From<PortId> for AtomId {
#[inline]
fn from(id: PortId) -> Self {
id.0
}
}
impl ExclusivelyContextual for PortId {
fn format_locked(&self, ctx: &Context) -> Result<String, AcesError> {
let port = ctx
.get_port(*self)
.ok_or_else(|| AcesError::from(AcesErrorKind::PortMissingForId(*self)))?;
port.format_locked(ctx)
}
}
/// An identifier of a [`Link`], a type derived from [`AtomId`].
///
/// There is a trivial bijection between values of this type and
/// numeric codes of DIMACS variables. This mapping simplifies the
/// construction of SAT queries and interpretation of solutions.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)]
#[repr(transparent)]
pub struct LinkId(pub(crate) AtomId);
impl LinkId {
#[inline]
pub const fn get(self) -> AtomId {
self.0
}
}
impl From<AtomId> for LinkId {
#[inline]
fn from(id: AtomId) -> Self {
LinkId(id)
}
}
impl From<LinkId> for AtomId {
#[inline]
fn from(id: LinkId) -> Self {
id.0
}
}
impl ExclusivelyContextual for LinkId {
fn format_locked(&self, ctx: &Context) -> Result<String, AcesError> {
let link = ctx
.get_link(*self)
.ok_or_else(|| AcesError::from(AcesErrorKind::LinkMissingForId(*self)))?;
link.format_locked(ctx)
}
}
/// An identifier of a [`Fork`], a type derived from [`AtomId`].
///
/// There is a trivial bijection between values of this type and
/// numeric codes of DIMACS variables. This mapping simplifies the
/// construction of SAT queries and interpretation of solutions.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
#[repr(transparent)]
pub struct ForkId(pub(crate) AtomId);
impl ForkId {
#[inline]
pub const fn get(self) -> AtomId {
self.0
}
}
impl From<AtomId> for ForkId {
#[inline]
fn from(id: AtomId) -> Self {
ForkId(id)
}
}
impl From<ForkId> for AtomId {
#[inline]
fn from(id: ForkId) -> Self {
id.0
}
}
impl ExclusivelyContextual for ForkId {
fn format_locked(&self, ctx: &Context) -> Result<String, AcesError> {
let fork = ctx
.get_fork(*self)
.ok_or_else(|| AcesError::from(AcesErrorKind::ForkMissingForId(*self)))?;
fork.format_locked(ctx)
}
}
/// An identifier of a [`Join`], a type derived from [`AtomId`].
///
/// There is a trivial bijection between values of this type and
/// numeric codes of DIMACS variables. This mapping simplifies the
/// construction of SAT queries and interpretation of solutions.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
#[repr(transparent)]
pub struct JoinId(pub(crate) AtomId);
impl JoinId {
#[inline]
pub const fn get(self) -> AtomId {
self.0
}
}
impl From<AtomId> for JoinId {
#[inline]
fn from(id: AtomId) -> Self {
JoinId(id)
}
}
impl From<JoinId> for AtomId {
#[inline]
fn from(id: JoinId) -> Self {
id.0
}
}
impl ExclusivelyContextual for JoinId {
fn format_locked(&self, ctx: &Context) -> Result<String, AcesError> {
let join = ctx
.get_join(*self)
.ok_or_else(|| AcesError::from(AcesErrorKind::JoinMissingForId(*self)))?;
join.format_locked(ctx)
}
}
/// An identifier of a [`Fuset`], a type derived from [`AtomId`].
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
#[repr(transparent)]
pub struct FusetId(pub(crate) AtomId);
impl FusetId {
#[inline]
pub const fn get(self) -> AtomId {
self.0
}
}
impl From<AtomId> for FusetId {
#[inline]
fn from(id: AtomId) -> Self {
FusetId(id)
}
}
impl From<FusetId> for AtomId {
#[inline]
fn from(id: FusetId) -> Self {
id.0
}
}
impl ExclusivelyContextual for FusetId {
fn format_locked(&self, ctx: &Context) -> Result<String, AcesError> {
let fuset = ctx
.get_fuset(*self)
.ok_or_else(|| AcesError::from(AcesErrorKind::FusetMissingForId(*self)))?;
fuset.format_locked(ctx)
}
}
/// A collection of [`Atom`]s: [`Port`]s, [`Link`]s, [`Fork`]s,
/// [`Join`]s and [`Dotset`]s.
///
/// [`AtomSpace`] maintains a mapping from [`Atom`]s to [`AtomId`]s,
/// its inverse, and a mapping from [`DotId`]s to [`PortId`]s. For
/// the reverse mapping, from [`PortId`]s to [`DotId`]s, call
/// [`AtomSpace::get_port()`] followed by [`Port::get_dot_id()`].
#[derive(Clone, Debug)]
pub(crate) struct AtomSpace {
atoms: Vec<Atom>,
atom_ids: HashMap<Atom, AtomId>,
source_dots: BTreeMap<DotId, PortId>,
sink_dots: BTreeMap<DotId, PortId>,
internal_dots: BTreeMap<DotId, (PortId, PortId)>,
}
impl Default for AtomSpace {
fn default() -> Self {
Self {
atoms: vec![Atom::Bottom],
atom_ids: Default::default(),
source_dots: Default::default(),
sink_dots: Default::default(),
internal_dots: Default::default(),
}
}
}
impl AtomSpace {
fn do_share_atom(&mut self, mut new_atom: Atom) -> AtomId {
if let Some(old_atom_id) = self.get_atom_id(&new_atom) {
if new_atom.get_atom_id().is_none() {
trace!("Resharing: {:?}", new_atom);
old_atom_id
} else {
panic!("Attempt to reset identifier of atom {:?}", new_atom);
}
} else {
let atom_id = unsafe { AtomId::new_unchecked(self.atoms.len()) };
new_atom.set_atom_id(atom_id);
trace!("New share: {:?}", new_atom);
self.atoms.push(new_atom.clone());
self.atom_ids.insert(new_atom, atom_id);
atom_id
}
}
pub(crate) fn share_port(&mut self, port: &mut Port) -> PortId {
let tip = port.dot_id;
match port.polarity {
Polarity::Tx => {
let atom_id = self.do_share_atom(Atom::Tx(port.clone()));
port.atom_id = Some(atom_id);
let pid = PortId(atom_id);
if let Some(&rx_id) = self.sink_dots.get(&tip) {
self.sink_dots.remove(&tip);
self.internal_dots.insert(tip, (pid, rx_id));
} else {
self.source_dots.insert(tip, pid);
}
pid
}
Polarity::Rx => {
let atom_id = self.do_share_atom(Atom::Rx(port.clone()));
port.atom_id = Some(atom_id);
let pid = PortId(atom_id);
if let Some(&tx_id) = self.source_dots.get(&tip) {
self.source_dots.remove(&tip);
self.internal_dots.insert(tip, (tx_id, pid));
} else {
self.sink_dots.insert(tip, pid);
}
pid
}
}
}
#[inline]
pub(crate) fn share_link(&mut self, link: &mut Link) -> LinkId {
let atom_id = self.do_share_atom(Atom::Link(link.clone()));
link.atom_id = Some(atom_id);
LinkId(atom_id)
}
#[inline]
pub(crate) fn share_fork(&mut self, fork: &mut Fork) -> ForkId {
let atom_id = self.do_share_atom(Atom::Fork(fork.clone()));
fork.atom_id = Some(atom_id);
ForkId(atom_id)
}
#[inline]
pub(crate) fn share_join(&mut self, join: &mut Join) -> JoinId {
let atom_id = self.do_share_atom(Atom::Join(join.clone()));
join.atom_id = Some(atom_id);
JoinId(atom_id)
}
pub(crate) fn share_fork_from_tip_and_pit(&mut self, tip_id: DotId, mut pit: Dotset) -> ForkId {
let pit_id = self.share_dotset(&mut pit);
let mut fork = Wedge::new(Polarity::Tx, tip_id, pit_id);
self.share_fork(&mut fork)
}
pub(crate) fn share_join_from_tip_and_pit(&mut self, tip_id: DotId, mut pit: Dotset) -> JoinId {
let pit_id = self.share_dotset(&mut pit);
let mut join = Wedge::new(Polarity::Rx, tip_id, pit_id);
self.share_join(&mut join)
}
#[inline]
pub(crate) fn share_dotset(&mut self, pit: &mut Dotset) -> DotsetId {
let atom_id = self.do_share_atom(Atom::Pit(pit.clone()));
pit.atom_id = Some(atom_id);
DotsetId(atom_id)
}
#[inline]
pub(crate) fn share_fuset(&mut self, fuset: &mut Fuset) -> FusetId {
let atom_id = self.do_share_atom(Atom::Fuset(fuset.clone()));
fuset.atom_id = Some(atom_id);
FusetId(atom_id)
}
#[inline]
pub(crate) fn get_atom(&self, atom_id: AtomId) -> Option<&Atom> {
self.atoms.get(atom_id.get())
}
#[inline]
pub(crate) fn get_atom_id(&self, atom: &Atom) -> Option<AtomId> {
self.atom_ids.get(atom).copied()
}
#[inline]
pub(crate) fn is_port(&self, atom_id: AtomId) -> bool {
match self.get_atom(atom_id) {
Some(Atom::Tx(_)) | Some(Atom::Rx(_)) => true,
_ => false,
}
}
#[inline]
pub(crate) fn get_port(&self, pid: PortId) -> Option<&Port> {
match self.get_atom(pid.into()) {
Some(Atom::Tx(a)) => Some(a),
Some(Atom::Rx(a)) => Some(a),
_ => None,
}
}
#[inline]
pub(crate) fn is_link(&self, atom_id: AtomId) -> bool {
match self.get_atom(atom_id) {
Some(Atom::Link(_)) => true,
_ => false,
}
}
#[inline]
pub(crate) fn get_link(&self, lid: LinkId) -> Option<&Link> {
match self.get_atom(lid.into()) {
Some(Atom::Link(a)) => Some(a),
_ => None,
}
}
#[inline]
pub(crate) fn is_wedge(&self, atom_id: AtomId) -> bool {
match self.get_atom(atom_id) {
Some(Atom::Fork(_)) | Some(Atom::Join(_)) => true,
_ => false,
}
}
#[inline]
pub(crate) fn get_wedge(&self, aid: AtomId) -> Option<&Wedge> {
match self.get_atom(aid) {
Some(Atom::Fork(a)) => Some(a),
Some(Atom::Join(a)) => Some(a),
_ => None,
}
}
#[inline]
pub(crate) fn is_fork(&self, atom_id: AtomId) -> bool {
match self.get_atom(atom_id) {
Some(Atom::Fork(_)) => true,
_ => false,
}
}
#[inline]
pub(crate) fn get_fork(&self, fid: ForkId) -> Option<&Fork> {
match self.get_atom(fid.into()) {
Some(Atom::Fork(a)) => Some(a),
_ => None,
}
}
#[inline]
pub(crate) fn is_join(&self, atom_id: AtomId) -> bool {
match self.get_atom(atom_id) {
Some(Atom::Join(_)) => true,
_ => false,
}
}
#[inline]
pub(crate) fn get_join(&self, jid: JoinId) -> Option<&Join> {
match self.get_atom(jid.into()) {
Some(Atom::Join(a)) => Some(a),
_ => None,
}
}
#[inline]
#[allow(dead_code)]
pub(crate) fn is_dotset(&self, atom_id: AtomId) -> bool {
match self.get_atom(atom_id) {
Some(Atom::Pit(_)) => true,
_ => false,
}
}
#[inline]
pub(crate) fn get_dotset(&self, sid: DotsetId) -> Option<&Dotset> {
match self.get_atom(sid.into()) {
Some(Atom::Pit(a)) => Some(a),
_ => None,
}
}
#[inline]
#[allow(dead_code)]
pub(crate) fn is_fuset(&self, atom_id: AtomId) -> bool {
match self.get_atom(atom_id) {
Some(Atom::Fuset(_)) => true,
_ => false,
}
}
#[inline]
pub(crate) fn get_fuset(&self, fid: FusetId) -> Option<&Fuset> {
match self.get_atom(fid.into()) {
Some(Atom::Fuset(a)) => Some(a),
_ => None,
}
}
pub fn get_anti_port_id(&self, pid: PortId) -> Option<PortId> {
if let Some(port) = self.get_port(pid) {
if let Some(&(tx_id, rx_id)) = self.internal_dots.get(&port.dot_id) {
match port.polarity {
Polarity::Tx => {
if tx_id == pid {
return Some(rx_id)
} else {
panic!("Corrupt atom space")
}
}
Polarity::Rx => {
if rx_id == pid {
return Some(tx_id)
} else {
panic!("Corrupt atom space")
}
}
}
}
}
None
}
}
#[derive(Clone, Eq, Debug)]
pub(crate) enum Atom {
Tx(Port),
Rx(Port),
Link(Link),
Fork(Fork),
Join(Join),
Pit(Dotset),
Fuset(Fuset),
Bottom,
}
impl Atom {
fn set_atom_id(&mut self, atom_id: AtomId) {
use Atom::*;
let prev_id = match self {
Tx(p) => &mut p.atom_id,
Rx(p) => &mut p.atom_id,
Link(l) => &mut l.atom_id,
Fork(f) => &mut f.atom_id,
Join(j) => &mut j.atom_id,
Pit(s) => &mut s.atom_id,
Fuset(f) => &mut f.atom_id,
Bottom => panic!("Attempt to set identifier of the bottom atom"),
};
if *prev_id == None | else {
panic!("Attempt to reset identifier of atom {:?}", self);
}
}
fn get_atom_id(&self) -> Option<AtomId> {
use Atom::*;
match self {
Tx(p) => p.atom_id,
Rx(p) => p.atom_id,
Link(l) => l.atom_id,
Fork(f) => f.atom_id,
Join(j) => j.atom_id,
Pit(s) => s.atom_id,
Fuset(f) => f.atom_id,
Bottom => panic!("Attempt to get identifier of the bottom atom"),
}
}
}
impl PartialEq for Atom {
#[rustfmt::skip]
fn eq(&self, other: &Self) -> bool {
use Atom::*;
match self {
Tx(p) => if let Tx(o) = other { p == o } else { false },
Rx(p) => if let Rx(o) = other { p == o } else { false },
Link(l) => if let Link(o) = other { l == o } else { false },
Fork(f) => if let Fork(o) = other { f == o } else { false },
Join(j) => if let Join(o) = other { j == o } else { false },
Pit(s) => if let Pit(o) = other { s == o } else { false },
Fuset(f) => if let Fuset(o) = other { f == o } else { false },
Bottom => panic!("Attempt to access the bottom atom"),
}
}
}
impl hash::Hash for Atom {
fn hash<H: hash::Hasher>(&self, state: &mut H) {
use Atom::*;
match self {
Tx(p) | Rx(p) => p.hash(state),
Link(l) => l.hash(state),
Fork(f) => f.hash(state),
Join(j) => j.hash(state),
Pit(s) => s.hash(state),
Fuset(f) => f.hash(state),
Bottom => panic!("Attempt to access the bottom atom"),
}
}
}
/// A trait of an identifier convertible into [`DotId`] and into
/// [`sat::Literal`].
pub trait Atomic:
From<AtomId> + Into<AtomId> + Contextual + Copy + PartialEq + Eq + PartialOrd + Ord
{
fn into_dot_id(this: InContext<Self>) -> Option<DotId>;
fn into_dot_id_oriented(this: InContext<Self>, _polarity: Polarity) -> Option<DotId> {
Self::into_dot_id(this)
}
fn into_sat_literal(self, negated: bool) -> sat::Literal;
}
impl Atomic for PortId {
fn into_dot_id(this: InContext<Self>) -> Option<DotId> {
this.using_context(|pid, ctx| ctx.get_port(*pid).map(|port| port.get_dot_id()))
}
#[inline]
fn into_sat_literal(self, negated: bool) -> sat::Literal {
sat::Literal::from_atom_id(self.get(), negated)
}
}
impl Atomic for LinkId {
fn into_dot_id(_this: InContext<Self>) -> Option<DotId> {
None
}
fn into_dot_id_oriented(this: InContext<Self>, polarity: Polarity) -> Option<DotId> {
this.using_context(|lid, ctx| ctx.get_link(*lid).map(|link| link.get_dot_id(polarity)))
}
#[inline]
fn into_sat_literal(self, negated: bool) -> sat::Literal {
sat::Literal::from_atom_id(self.get(), negated)
}
}
impl Atomic for ForkId {
fn into_dot_id(this: InContext<Self>) -> Option<DotId> {
this.using_context(|fid, ctx| ctx.get_fork(*fid).map(|fork| fork.get_tip_id()))
}
#[inline]
fn into_sat_literal(self, negated: bool) -> sat::Literal {
sat::Literal::from_atom_id(self.get(), negated)
}
}
impl Atomic for JoinId {
fn into_dot_id(this: InContext<Self>) -> Option<DotId> {
this.using_context(|jid, ctx| ctx.get_join(*jid).map(|join| join.get_tip_id()))
}
#[inline]
fn into_sat_literal(self, negated: bool) -> sat::Literal {
sat::Literal::from_atom_id(self.get(), negated)
}
}
#[cfg(test)]
mod tests {
use super::*;
fn new_tx_port(id: usize) -> Port {
Port::new(Polarity::Tx, DotId(unsafe { AnyId::new_unchecked(id) }))
}
fn new_fork(atoms: &mut AtomSpace, tip_id: usize, pit_size: usize) -> ForkId {
let arm_ids =
(tip_id + 1..=tip_id + pit_size).map(|id| DotId(unsafe { AnyId::new_unchecked(id) }));
let pit = Dotset::new(arm_ids);
atoms.share_fork_from_tip_and_pit(DotId(unsafe { AnyId::new_unchecked(tip_id) }), pit)
}
fn new_dotset(first_id: usize, size: usize) -> Dotset {
let dot_ids =
(first_id..first_id + size).map(|id| DotId(unsafe { AnyId::new_unchecked(id) }));
Dotset::new(dot_ids)
}
#[test]
#[should_panic(expected = "uninitialized")]
fn test_atom_uninitialized() {
let atom = Atom::Tx(new_tx_port(1));
let _ = atom.get_atom_id().expect("uninitialized");
}
#[test]
#[should_panic(expected = "bottom")]
fn test_atom_bottom() {
let mut atoms = AtomSpace::default();
let atom = Atom::Bottom;
let _ = atoms.do_share_atom(atom);
}
#[test]
#[should_panic(expected = "reset")]
fn test_atom_reset_id() {
let mut atoms = AtomSpace::default();
let mut atom = Atom::Tx(new_tx_port(1));
atom.set_atom_id(unsafe { AtomId::new_unchecked(1) });
let _ = atoms.do_share_atom(atom);
}
#[test]
fn test_atom_id() {
let mut atoms = AtomSpace::default();
let atom = Atom::Tx(new_tx_port(1));
let atom_id = atoms.do_share_atom(atom);
let atom = atoms.get_atom(atom_id).unwrap();
assert_eq!(atom.get_atom_id().unwrap(), atom_id);
}
#[test]
fn test_fork_resharing() {
let mut atoms = AtomSpace::default();
let f1_id = new_fork(&mut atoms, 1, 2);
let f2_id = new_fork(&mut atoms, 1, 2);
assert_eq!(f1_id, f2_id);
}
#[test]
fn test_pit_resharing() {
let mut atoms = AtomSpace::default();
let s1 = Atom::Pit(new_dotset(1, 5));
let s1_id = atoms.do_share_atom(s1);
let s2 = Atom::Pit(new_dotset(1, 5));
let s2_id = atoms.do_share_atom(s2);
assert_eq!(s1_id, s2_id);
}
}
| {
*prev_id = Some(atom_id);
} |
test_kernel_language.py | from parcels import FieldSet, ParticleSet, ScipyParticle, JITParticle, Kernel, Variable, ErrorCode
from parcels.kernels.seawaterdensity import polyTEOS10_bsq, UNESCO_Density
from parcels import random as parcels_random
import numpy as np
import pytest
import random as py_random
from os import path
import sys
ptype = {'scipy': ScipyParticle, 'jit': JITParticle}
def expr_kernel(name, pset, expr):
pycode = """def %s(particle, fieldset, time):
particle.p = %s""" % (name, expr)
return Kernel(pset.fieldset, pset.ptype, pyfunc=None,
funccode=pycode, funcname=name,
funcvars=['particle'])
def fieldset(xdim=20, ydim=20):
""" Standard unit mesh fieldset """
lon = np.linspace(0., 1., xdim, dtype=np.float32)
lat = np.linspace(0., 1., ydim, dtype=np.float32)
U, V = np.meshgrid(lat, lon)
data = {'U': np.array(U, dtype=np.float32), 'V': np.array(V, dtype=np.float32)}
dimensions = {'lat': lat, 'lon': lon}
return FieldSet.from_data(data, dimensions, mesh='flat', transpose=True)
@pytest.fixture(name="fieldset")
def fieldset_fixture(xdim=20, ydim=20):
return fieldset(xdim=xdim, ydim=ydim)
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('name, expr, result', [
('Add', '2 + 5', 7),
('Sub', '6 - 2', 4),
('Mul', '3 * 5', 15),
('Div', '24 / 4', 6),
])
def test_expression_int(fieldset, mode, name, expr, result, npart=10):
""" Test basic arithmetic expressions """
class TestParticle(ptype[mode]):
p = Variable('p', dtype=np.float32)
pset = ParticleSet(fieldset, pclass=TestParticle,
lon=np.linspace(0., 1., npart),
lat=np.zeros(npart) + 0.5)
pset.execute(expr_kernel('Test%s' % name, pset, expr), endtime=1., dt=1.)
assert(np.all(result == pset.p))
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('name, expr, result', [
('Add', '2. + 5.', 7),
('Sub', '6. - 2.', 4),
('Mul', '3. * 5.', 15),
('Div', '24. / 4.', 6),
('Pow', '2 ** 3', 8),
])
def test_expression_float(fieldset, mode, name, expr, result, npart=10):
""" Test basic arithmetic expressions """
class TestParticle(ptype[mode]):
p = Variable('p', dtype=np.float32)
pset = ParticleSet(fieldset, pclass=TestParticle,
lon=np.linspace(0., 1., npart),
lat=np.zeros(npart) + 0.5)
pset.execute(expr_kernel('Test%s' % name, pset, expr), endtime=1., dt=1.)
assert(np.all(result == pset.p))
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('name, expr, result', [
('True', 'True', True),
('False', 'False', False),
('And', 'True and False', False),
('Or', 'True or False', True),
('Equal', '5 == 5', True),
('Lesser', '5 < 3', False),
('LesserEq', '3 <= 5', True),
('Greater', '4 > 2', True),
('GreaterEq', '2 >= 4', False),
])
def test_expression_bool(fieldset, mode, name, expr, result, npart=10):
""" Test basic arithmetic expressions """
class TestParticle(ptype[mode]):
p = Variable('p', dtype=np.float32)
pset = ParticleSet(fieldset, pclass=TestParticle,
lon=np.linspace(0., 1., npart),
lat=np.zeros(npart) + 0.5)
pset.execute(expr_kernel('Test%s' % name, pset, expr), endtime=1., dt=1.)
if mode == 'jit':
assert(np.all(result == (pset.p == 1)))
else:
assert(np.all(result == pset.p))
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_while_if_break(fieldset, mode):
"""Test while, if and break commands"""
class TestParticle(ptype[mode]):
p = Variable('p', dtype=np.float32, initial=0.)
pset = ParticleSet(fieldset, pclass=TestParticle, lon=[0], lat=[0])
def kernel(particle, fieldset, time):
while particle.p < 30:
if particle.p > 9:
break
particle.p += 1
if particle.p > 5:
particle.p *= 2.
pset.execute(kernel, endtime=1., dt=1.)
assert np.allclose(pset.p, 20., rtol=1e-12)
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_nested_if(fieldset, mode):
"""Test nested if commands"""
class TestParticle(ptype[mode]):
p0 = Variable('p0', dtype=np.int32, initial=0)
p1 = Variable('p1', dtype=np.int32, initial=1)
pset = ParticleSet(fieldset, pclass=TestParticle, lon=0, lat=0)
def kernel(particle, fieldset, time):
if particle.p1 >= particle.p0:
var = particle.p0
if var + 1 < particle.p1:
particle.p1 = -1
pset.execute(kernel, endtime=10, dt=1.)
assert np.allclose([pset.p0[0], pset.p1[0]], [0, 1])
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_pass(fieldset, mode):
"""Test pass commands"""
class TestParticle(ptype[mode]):
p = Variable('p', dtype=np.int32, initial=0)
pset = ParticleSet(fieldset, pclass=TestParticle, lon=0, lat=0)
def kernel(particle, fieldset, time):
particle.p = -1
pass
pset.execute(kernel, endtime=10, dt=1.) | assert np.allclose(pset[0].p, -1)
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_dt_as_variable_in_kernel(fieldset, mode):
pset = ParticleSet(fieldset, pclass=ptype[mode], lon=0, lat=0)
def kernel(particle, fieldset, time):
dt = 1. # noqa
pset.execute(kernel, endtime=10, dt=1.)
def test_parcels_tmpvar_in_kernel(fieldset):
"""Tests for error thrown if vartiable with 'tmp' defined in custom kernel"""
error_thrown = False
pset = ParticleSet(fieldset, pclass=JITParticle, lon=0, lat=0)
def kernel_tmpvar(particle, fieldset, time):
parcels_tmpvar0 = 0 # noqa
try:
pset.execute(kernel_tmpvar, endtime=1, dt=1.)
except NotImplementedError:
error_thrown = True
assert error_thrown
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_if_withfield(fieldset, mode):
"""Test combination of if and Field sampling commands"""
class TestParticle(ptype[mode]):
p = Variable('p', dtype=np.float32, initial=0.)
pset = ParticleSet(fieldset, pclass=TestParticle, lon=[0], lat=[0])
def kernel(particle, fieldset, time):
u = fieldset.U[time, 0, 0, 1.]
particle.p = 0
if fieldset.U[time, 0, 0, 1.] == u:
particle.p += 1
if fieldset.U[time, 0, 0, 1.] == fieldset.U[time, 0, 0, 1.]:
particle.p += 1
if True:
particle.p += 1
if fieldset.U[time, 0, 0, 1.] == u and 1 == 1:
particle.p += 1
if fieldset.U[time, 0, 0, 1.] == fieldset.U[time, 0, 0, 1.] and fieldset.U[time, 0, 0, 1.] == fieldset.U[time, 0, 0, 1.]:
particle.p += 1
if fieldset.U[time, 0, 0, 1.] == u:
particle.p += 1
else:
particle.p += 1000
if fieldset.U[time, 0, 0, 1.] == 3:
particle.p += 1000
else:
particle.p += 1
pset.execute(kernel, endtime=1., dt=1.)
assert np.allclose(pset.p, 7., rtol=1e-12)
@pytest.mark.parametrize(
'mode',
['scipy',
pytest.param('jit',
marks=pytest.mark.xfail(
(sys.version_info >= (3, 0)) or (sys.platform == 'win32'),
reason="py.test FD capturing does not work for jit on python3 or Win"))
])
def test_print(fieldset, mode, capfd):
"""Test print statements"""
class TestParticle(ptype[mode]):
p = Variable('p', dtype=np.float32, initial=0.)
pset = ParticleSet(fieldset, pclass=TestParticle, lon=[0.5], lat=[0.5])
def kernel(particle, fieldset, time):
particle.p = fieldset.U[time, particle.depth, particle.lat, particle.lon]
tmp = 5
print("%d %f %f" % (particle.id, particle.p, tmp))
pset.execute(kernel, endtime=1., dt=1.)
out, err = capfd.readouterr()
lst = out.split(' ')
tol = 1e-8
assert abs(float(lst[0]) - pset.id[0]) < tol and abs(float(lst[1]) - pset.p[0]) < tol and abs(float(lst[2]) - 5) < tol
def kernel2(particle, fieldset, time):
tmp = 3
print("%f" % (tmp))
pset.execute(kernel2, endtime=1., dt=1.)
out, err = capfd.readouterr()
lst = out.split(' ')
assert abs(float(lst[0]) - 3) < tol
def random_series(npart, rngfunc, rngargs, mode):
random = parcels_random if mode == 'jit' else py_random
random.seed(1234)
func = getattr(random, rngfunc)
series = [func(*rngargs) for _ in range(npart)]
random.seed(1234) # Reset the RNG seed
return series
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('rngfunc, rngargs', [
('random', []),
('uniform', [0., 20.]),
('randint', [0, 20]),
])
def test_random_float(fieldset, mode, rngfunc, rngargs, npart=10):
""" Test basic random number generation """
class TestParticle(ptype[mode]):
p = Variable('p', dtype=np.float32 if rngfunc == 'randint' else np.float32)
pset = ParticleSet(fieldset, pclass=TestParticle,
lon=np.linspace(0., 1., npart),
lat=np.zeros(npart) + 0.5)
series = random_series(npart, rngfunc, rngargs, mode)
kernel = expr_kernel('TestRandom_%s' % rngfunc, pset,
'random.%s(%s)' % (rngfunc, ', '.join([str(a) for a in rngargs])))
pset.execute(kernel, endtime=1., dt=1.)
assert np.allclose(pset.p, series, atol=1e-9)
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('c_inc', ['str', 'file'])
def test_c_kernel(fieldset, mode, c_inc):
coord_type = np.float32 if c_inc == 'str' else np.float64
pset = ParticleSet(fieldset, pclass=ptype[mode], lon=[0.5], lat=[0],
lonlatdepth_dtype=coord_type)
def func(U, lon, dt):
u = U.data[0, 2, 1]
return lon + u * dt
if c_inc == 'str':
c_include = """
static inline ErrorCode func(CField *f, float *lon, double *dt)
{
float data2D[2][2][2];
ErrorCode err = getCell2D(f, 1, 2, 0, data2D, 1); CHECKERROR(err);
float u = data2D[0][0][0];
*lon += u * *dt;
return SUCCESS;
}
"""
else:
c_include = path.join(path.dirname(__file__), 'customed_header.h')
def ckernel(particle, fieldset, time):
func('parcels_customed_Cfunc_pointer_args', fieldset.U, particle.lon, particle.dt)
def pykernel(particle, fieldset, time):
particle.lon = func(fieldset.U, particle.lon, particle.dt)
if mode == 'scipy':
kernel = pset.Kernel(pykernel)
else:
kernel = pset.Kernel(ckernel, c_include=c_include)
pset.execute(kernel, endtime=3., dt=3.)
assert np.allclose(pset.lon[0], 0.81578948)
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_dt_modif_by_kernel(fieldset, mode):
class TestParticle(ptype[mode]):
age = Variable('age', dtype=np.float32)
pset = ParticleSet(fieldset, pclass=TestParticle, lon=[0.5], lat=[0])
def modif_dt(particle, fieldset, time):
particle.age += particle.dt
particle.dt = 2
endtime = 4
pset.execute(modif_dt, endtime=endtime, dt=1.)
assert np.isclose(pset.age[0], endtime)
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('dt', [1e-2, 1e-6])
def test_small_dt(fieldset, mode, dt, npart=10):
pset = ParticleSet(fieldset, pclass=ptype[mode], lon=np.zeros(npart),
lat=np.zeros(npart), time=np.arange(0, npart)*dt*10)
def DoNothing(particle, fieldset, time):
return ErrorCode.Success
pset.execute(DoNothing, dt=dt, runtime=dt*100)
assert np.allclose([p.time for p in pset], dt*100)
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_seawaterdensity_kernels(mode):
def generate_fieldset(xdim=2, ydim=2, zdim=2, tdim=1):
lon = np.linspace(0., 10., xdim, dtype=np.float32)
lat = np.linspace(0., 10., ydim, dtype=np.float32)
depth = np.linspace(0, 2000, zdim, dtype=np.float32)
time = np.zeros(tdim, dtype=np.float64)
U = np.ones((tdim, zdim, ydim, xdim))
V = np.ones((tdim, zdim, ydim, xdim))
abs_salinity = 30 * np.ones((tdim, zdim, ydim, xdim))
cons_temperature = 10 * np.ones((tdim, zdim, ydim, xdim))
dimensions = {'lat': lat, 'lon': lon, 'depth': depth, 'time': time}
data = {'U': np.array(U, dtype=np.float32), 'V': np.array(V, dtype=np.float32),
'abs_salinity': np.array(abs_salinity, dtype=np.float32),
'cons_temperature': np.array(cons_temperature, dtype=np.float32)}
return (data, dimensions)
data, dimensions = generate_fieldset()
fieldset = FieldSet.from_data(data, dimensions)
class DensParticle(ptype[mode]):
density = Variable('density', dtype=np.float32)
pset = ParticleSet(fieldset, pclass=DensParticle, lon=5, lat=5, depth=1000)
pset.execute(polyTEOS10_bsq, runtime=0, dt=0)
assert np.allclose(pset[0].density, 1022.85377)
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('pressure', [0, 10])
def test_UNESCOdensity_kernel(mode, pressure):
def generate_fieldset(p, xdim=2, ydim=2, zdim=2, tdim=1):
lon = np.linspace(0., 10., xdim, dtype=np.float32)
lat = np.linspace(0., 10., ydim, dtype=np.float32)
depth = np.linspace(0, 2000, zdim, dtype=np.float32)
time = np.zeros(tdim, dtype=np.float64)
U = np.ones((tdim, zdim, ydim, xdim))
V = np.ones((tdim, zdim, ydim, xdim))
psu_salinity = 8 * np.ones((tdim, zdim, ydim, xdim))
cons_temperature = 10 * np.ones((tdim, zdim, ydim, xdim))
cons_pressure = p * np.ones((tdim, zdim, ydim, xdim))
dimensions = {'lat': lat, 'lon': lon, 'depth': depth, 'time': time}
data = {'U': np.array(U, dtype=np.float32), 'V': np.array(V, dtype=np.float32),
'psu_salinity': np.array(psu_salinity, dtype=np.float32),
'cons_pressure': np.array(cons_pressure, dtype=np.float32),
'cons_temperature': np.array(cons_temperature, dtype=np.float32)}
return (data, dimensions)
data, dimensions = generate_fieldset(pressure)
fieldset = FieldSet.from_data(data, dimensions)
class DensParticle(ptype[mode]):
density = Variable('density', dtype=np.float32)
pset = ParticleSet(fieldset, pclass=DensParticle, lon=5, lat=5, depth=1000)
pset.execute(UNESCO_Density, runtime=0, dt=0)
if(pressure == 0):
assert np.allclose(pset[0].density, 1005.9465)
elif(pressure == 10):
assert np.allclose(pset[0].density, 1006.4179) | |
service.py | # -*- coding: utf-8 -*-
"""Scan service.
This module implements uploading a scan file to XNAT and adding a scan to the database.
Todo: Maybe the public method should be called add, and that should kick off an upload procedure, rather than the
other way around.
Todo: do we want to infer file type from extension? Or use some other method?
Todo: Right now if we use the import service XNAT is inferring its own scan id. What do we want to do about that?
Todo: if someone uploads a zip file we don't actually know that there are dicoms inside (could be NIFTI). Consider this
fact.
Todo: Upload security for zip files?
"""
import os
from cookiecutter_mbam.xnat import XNATConnection
from cookiecutter_mbam.experiment import Experiment
from cookiecutter_mbam.user import User
from .models import Scan
from .utils import gzip_file
from flask import current_app
def debug():
assert current_app.debug == False, "Don't panic! You're here by request of debug()"
class ScanService:
def __init__(self, user_id, exp_id):
self.user_id = user_id
self.user = User.get_by_id(self.user_id)
self.experiment = Experiment.get_by_id(exp_id)
self.xc = XNATConnection()
# todo: what is the actual URI of the experiment I've created? Why does it have the XNAT prefix?
# maybe that's the accessor? Is the accessor in the URI?
def upload(self, image_file):
"""The top level public method for adding a scan
Calls methods to infer file type and further process the file, generate xnat identifiers and query strings,
check what XNAT identifiers objects have, upload the scan to XNAT, add the scan to the database, and update
user, experiment, and scan database objects with their XNAT-related attributes.
:param file object image_file: the file object
:return: None
"""
file, dcm = self._process_file(image_file)
xnat_ids = self._generate_xnat_identifiers(dcm=dcm)
existing_attributes = self._check_for_existing_xnat_ids()
uris = self.xc.upload_scan(xnat_ids, existing_attributes, image_file, import_service=dcm)
scan = self._add_scan()
keywords = ['subject', 'experiment', 'scan']
self._update_database_objects(keywords=keywords, objects=[self.user, self.experiment, scan],
ids=['{}_id'.format(xnat_ids[kw]['xnat_id']) for kw in keywords], uris=uris)
def _add_scan(self):
"""Add a scan to the database
Creates the scan object, adds it to the database, and increments the parent experiment's scan count
:return: scan
"""
scan = Scan.create(experiment_id=self.experiment.id)
self.experiment.num_scans += 1
return scan
def _process_file(self, image_file):
"""Infer file type from extension and respond to file type as necessary
Uses file extension to infer whether file should be left alone or gzipped, or whether zip file will be sent to
import service.
:param file object image_file: the file object
:return: a two-tuple of the image file, and a boolean indicating the file type is dcm
:rtype: tuple
"""
image_file_name = image_file.filename
file_name, file_ext = os.path.splitext(image_file_name)
dcm = False
if file_ext == '.nii':
image_file = (gzip_file(image_file, file_name))
if file_ext == '.zip':
dcm = True
return (image_file, dcm)
def _generate_xnat_identifiers(self, dcm=False):
"""Generate object ids for use in XNAT
Creates a dictionary with keys for type of XNAT object, including subject, experiment, scan, resource and file.
The values in the dictionary are dictionaries with keys 'xnat_id' and, optionally, 'query_string'. 'xnat_id'
points to the identifier of the object in XNAT, and 'query_string' to the query that will be used in the put
request to create the object.
:return: xnat_id dictionary
:rtype: dict
"""
xnat_ids = {}
xnat_ids['subject'] = {'xnat_id': str(self.user_id).zfill(6)}
xnat_exp_id = '{}_MR{}'.format(xnat_ids['subject']['xnat_id'], self.user.num_experiments)
exp_date = self.experiment.date.strftime('%m/%d/%Y')
xnat_ids['experiment'] = {'xnat_id': xnat_exp_id, 'query_string':'?xnat:mrSessionData/date={}'.format(exp_date)}
scan_number = self.experiment.num_scans + 1
xnat_scan_id = 'T1_{}'.format(scan_number)
xnat_ids['scan'] = {'xnat_id':xnat_scan_id, 'query_string':'?xsiType=xnat:mrScanData'}
if dcm:
resource = 'DICOM'
else:
resource = 'NIFTI'
xnat_ids['resource'] = {'xnat_id': resource}
xnat_ids['file'] = {'xnat_id':'T1.nii.gz', 'query_string':'?xsi:type=xnat:mrScanData'}
return xnat_ids
def _check_for_existing_xnat_ids(self):
"""Check for existing attributes on the user and experiment
Generates a dictionary with current xnat_subject_id for the user, xnat_experiment_id for the experiment as
values if they exist (empty string if they do not exist). A private method not designed to be accessed by other
classes.
:return: a dictionary with two keys with the xnat subject id and xnat experiment id.
:rtype: dict
"""
return {k: getattr(v, k) if getattr(v, k) else '' for k, v in {'xnat_subject_id': self.user,
'xnat_experiment_id': self.experiment}.items()}
# todo: the check for existence before reassigning the values is verbose. Decide whether its important.
def _update_database_objects(self, objects=[], keywords=[], uris=[], ids=[],):
"""Update database objects
After uploading a scan, ensures that user, experient, and scan are updated in the database with their xnat uri
and xnat id.
:param list objects: user, experiment, and scan
:param list keywords: 'subject', 'experiment', and 'scan'
:param list uris: xnat uris
:param list ids: xnat ids
:return: None
"""
attributes = zip(objects, keywords, uris, ids)
for (obj, kw, uri, id) in attributes:
if not hasattr(obj, 'xnat_uri'):
obj.update({'xnat_uri': uri})
if not hasattr(obj,'xnat_{}_id'.format(kw)):
| obj.update({'xnat_{}_id'.format(kw): id}) |
|
x509.go | package main
import (
"crypto/x509"
"encoding/pem"
"fmt"
)
var caCertPEM string = `-----BEGIN CERTIFICATE-----
MIIDIDCCAgigAwIBAgIJAJyrSCEGC+PEMA0GCSqGSIb3DQEBCwUAMBQxEjAQBgNV
BAMTCVN0aXRjaCBDQTAeFw0xNTA1MTMwMTM4NDNaFw0yNTA1MTAwMTM4NDNaMBQx
EjAQBgNVBAMTCVN0aXRjaCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
ggEBALH2+JCLOI3W8L9OAMQ5Od9JOIMzrBzg/IBAU6HZaP+r8VH/sdWEk5LzZgSm
mDg55L5WZsC8fWo03AtE9c5XTZ6lzjw0CeAg72aeE5Nd054CzX/XyV5NfOZgGOxp
m6Ng2JoZeUvEgvuk4AbCbEyn+k/IwJ71uWqWiVY4vDw9WHPkW29juy3aib4feWtI
6/pGy3WY8mZ9MV1Efvmkz0LxwjEek0irzhS/aZ08gKE6IqN7/JStm9+2Q9nzOhDb
/SnyuO1ZWpVa27LeVeVW3vuJ1j49fc6IIL/09bvaYT60FZH5U4oefOSMqTZqTlWi
hSoYGDgvQEB15ovzInU9tO9h3C0CAwEAAaN1MHMwHQYDVR0OBBYEFPvbQ7XVRG11
dD1bBKHQhmTZ0ADMMEQGA1UdIwQ9MDuAFPvbQ7XVRG11dD1bBKHQhmTZ0ADMoRik
FjAUMRIwEAYDVQQDEwlTdGl0Y2ggQ0GCCQCcq0ghBgvjxDAMBgNVHRMEBTADAQH/
MA0GCSqGSIb3DQEBCwUAA4IBAQBACYyfT3RK5dip4kINF+0ONd902MJRqjx6I1e3
LcLMFr3AlD/XQr9QdmojLfW4rjK5K9DW9O0Jpph3GkmZqGkSv4eovcSB6VcgQ0in
FT11AttOdn9xOFk7Y8UVfzS+twA5Cu2+IpI/16Dwz/dim3uwLQEHMlRQFkGv6UA7
73J8OG9HF1OLHHkGhII2s5me0O3iyCkSsRogml5N0KJWTERFx9bHfIOQ3D0/uPhq
AaleV7dSI0gW+XcvzrLnR5y25oGlJzZ9Wk6XILz0M2qA0wwVNrED/WGZkbg6rjwO
wk1nmqWPAcdECxiRHRul9Ftxe09zvNKreJQJiuiJBhQMn0UG
-----END CERTIFICATE-----`
var serverCertPEM_localhost = `-----BEGIN CERTIFICATE-----
MIICoTCCAYmgAwIBAgIBAjANBgkqhkiG9w0BAQsFADAUMRIwEAYDVQQDEwlTdGl0
Y2ggQ0EwHhcNMTUwNTEzMjMwMzA2WhcNMjAwNTExMjMwMzA2WjAUMRIwEAYDVQQD
Ewlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIKg3A
xHfX44QWBojAbJTS8qHBcoljLyv7zSQOu2Gs5H5Dgg+sEkAFeePWtnRMP6wkCl/y
W9GQh54Rx2slCNU6PIK34qBFd9jXh1/OSzjHieS9/PD2O9ExiwcKG1hItTEUm6OM
Ceer5/Z1/DL8cpvkmVrYGpTrHi8ZkSdJ+pBOhsunu6mVfEt8oaZ78kEbJ6+1PZs9
F2vvMCZYsonrndCTnnSlNg1YFCAlA1UprFGnCZBbuwTmvp+lhJMOXXKlle6pOgA5
S7hv8LxCQcp+ruud0CeQsA49p1CPH/Ez4lcXbppVH77M5fgAfzgvDjxxPRpE0p94
BgdehKR8hTxXdtAHAgMBAAEwDQYJKoZIhvcNAQELBQADggEBACaCiU/joUiCzrzR
yFL3vYNpg/dVVucysRpYxX8s38+4u2HVZU4CO0ghx47p4dRBxJ5WDbq2hCqfCp9N
WSjIIaCIgH9EH5igrafz/SVfp/1vOBKNw7I0A7y48WvRa9vCmRE5HmiyNrvttK3v
K5i0GF4/3YBNT1lWIwsQ8JAF3Y8A+XzXDetuL5a8F53pbgj9TzYuFr7HwTnkSLim
THZ5BZFiteeLL2IcHJHtb1PhdKlnC1+gfom6nENshRWf/+faYaBP9sr/aARTGfSF
6s9X1Tvidl7Jxo7ZPqSyPwoJM67zokLZAAv+3c28BCb3ARrvrPYQrUjmkBtsV+su
+5UwuqY=
-----END CERTIFICATE-----`
func main() |
func asserteq(v1 interface{}, v2 interface{}, args ...interface{}) {
if v1 != v2 {
panic(fmt.Sprintf("Assertion Failed: %v != %v. ", v1, v2) + fmt.Sprint(args...))
}
}
func assertneq(v1 interface{}, v2 interface{}, args ...interface{}) {
if v1 == v2 {
panic(fmt.Sprintf("Assertion Failed: %v == %v. ", v1, v2) + fmt.Sprint(args...))
}
}
| {
pemBlock, _ := pem.Decode([]byte(caCertPEM))
assertneq(pemBlock, nil, "Couldn't decode certificate in PEM format.")
asserteq(pemBlock.Type, "CERTIFICATE", "Not a certificate")
caCert, err := x509.ParseCertificate(pemBlock.Bytes)
asserteq(err, nil, "Error parsing certificate.", err)
assertneq(caCert, nil, "CA Certificate is nil")
asserteq(caCert.IsCA, true, "Certificate is not a Certificate Authority")
pool := x509.NewCertPool()
//var pool *x509.CertPool
//ok := pool.AppendCertsFromPEM([]byte(caCertPEM))
//asserteq(ok, true, "Failed to add PEM certificate to pool.")
pool.AddCert(caCert)
pemBlock, _ = pem.Decode([]byte(serverCertPEM_localhost))
assertneq(pemBlock, nil)
asserteq(pemBlock.Type, "CERTIFICATE")
serverCert, err := x509.ParseCertificate(pemBlock.Bytes)
asserteq(err, nil, "Error parsing certificate.", err)
assertneq(serverCert, nil, "CA Certificate is nil")
asserteq(serverCert.IsCA, false, "Certificate is a Certificate Authority but shouldn't be")
opts := x509.VerifyOptions{}
opts.DNSName = "localhost"
opts.Roots = pool
_, err = serverCert.Verify(opts)
asserteq(err, nil, "Error verifying certificate.", err)
err = serverCert.VerifyHostname("localhost")
asserteq(err, nil, "Error verifying hostname.", err)
fmt.Println("OK")
} |
targetidset.go | /*
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package event
// TargetIDSet - Set representation of TargetIDs.
type TargetIDSet map[TargetID]struct{}
// IsEmpty returns true if the set is empty.
func (set TargetIDSet) IsEmpty() bool {
return len(set) != 0
}
// Clone - returns copy of this set.
func (set TargetIDSet) Clone() TargetIDSet {
setCopy := NewTargetIDSet()
for k, v := range set {
setCopy[k] = v
}
return setCopy
}
// add - adds TargetID to the set.
func (set TargetIDSet) add(targetID TargetID) {
set[targetID] = struct{}{}
}
// Union - returns union with given set as new set.
func (set TargetIDSet) Union(sset TargetIDSet) TargetIDSet {
nset := set.Clone()
for k := range sset {
nset.add(k)
}
return nset
}
// Difference - returns diffrence with given set as new set. | nset := NewTargetIDSet()
for k := range set {
if _, ok := sset[k]; !ok {
nset.add(k)
}
}
return nset
}
// NewTargetIDSet - creates new TargetID set with given TargetIDs.
func NewTargetIDSet(targetIDs ...TargetID) TargetIDSet {
set := make(TargetIDSet)
for _, targetID := range targetIDs {
set.add(targetID)
}
return set
} | func (set TargetIDSet) Difference(sset TargetIDSet) TargetIDSet { |
io.rs | use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use rmp_serde::{Deserializer, Serializer};
use rmp_serde::encode::StructMapWriter;
use serde::{Deserialize, Serialize};
use std::fmt;
use std::io::{self, Read, Write};
use std::mem::size_of;
pub trait FromBytes {
fn from_bytes(reader: &mut Read) -> Result<Self, io::Error>
where
Self: Sized;
}
pub trait ToBytes {
fn to_bytes(&self, writer: &mut Write) -> Result<(), io::Error>;
}
pub trait HasBlockLength {
fn block_length() -> u16;
}
pub trait Message {
fn template_id() -> u16;
fn schema_id() -> u16;
fn version() -> u16;
}
pub trait HasData {
fn data(&self) -> &Data;
}
pub trait FromData {
fn from_data<H: HasData>(has_data: &H) -> Result<Self, io::Error>
where
Self: Sized;
}
pub trait ToData {
fn to_data(&self) -> Result<Data, io::Error>;
}
pub trait HasMessageLength {
fn message_length(&self) -> u32;
}
macro_rules! impl_has_message_length {
($t:ty) => (
impl HasMessageLength for $t {
fn message_length(&self) -> u32 {
::std::mem::size_of::<$t>() as u32
}
}
)
}
impl_has_message_length!(u8);
impl_has_message_length!(i8);
impl_has_message_length!(u16);
impl_has_message_length!(i16);
impl_has_message_length!(u32);
impl_has_message_length!(i32);
impl_has_message_length!(u64);
impl_has_message_length!(i64);
impl FromBytes for u8 {
fn from_bytes(reader: &mut Read) -> Result<Self, io::Error> {
reader.read_u8()
}
}
impl ToBytes for u8 {
fn to_bytes(&self, writer: &mut Write) -> Result<(), io::Error> {
writer.write_u8(*self)
}
}
impl HasBlockLength for u8 {
fn block_length() -> u16 {
size_of::<u8>() as u16
}
}
impl FromBytes for i8 {
fn from_bytes(reader: &mut Read) -> Result<Self, io::Error> {
reader.read_i8()
}
}
impl ToBytes for i8 {
fn to_bytes(&self, writer: &mut Write) -> Result<(), io::Error> {
writer.write_i8(*self)
}
}
impl HasBlockLength for i8 {
fn block_length() -> u16 {
size_of::<i8>() as u16
}
}
impl FromBytes for u16 {
fn from_bytes(reader: &mut Read) -> Result<Self, io::Error> {
reader.read_u16::<LittleEndian>()
}
}
impl ToBytes for u16 {
fn to_bytes(&self, writer: &mut Write) -> Result<(), io::Error> {
writer.write_u16::<LittleEndian>(*self)
}
}
impl HasBlockLength for u16 {
fn block_length() -> u16 {
size_of::<u16>() as u16
}
}
impl FromBytes for i16 {
fn from_bytes(reader: &mut Read) -> Result<Self, io::Error> {
reader.read_i16::<LittleEndian>()
}
}
impl ToBytes for i16 {
fn to_bytes(&self, writer: &mut Write) -> Result<(), io::Error> {
writer.write_i16::<LittleEndian>(*self) | fn block_length() -> u16 {
size_of::<i16>() as u16
}
}
impl FromBytes for u32 {
fn from_bytes(reader: &mut Read) -> Result<Self, io::Error> {
reader.read_u32::<LittleEndian>()
}
}
impl ToBytes for u32 {
fn to_bytes(&self, writer: &mut Write) -> Result<(), io::Error> {
writer.write_u32::<LittleEndian>(*self)
}
}
impl HasBlockLength for u32 {
fn block_length() -> u16 {
size_of::<u32>() as u16
}
}
impl FromBytes for i32 {
fn from_bytes(reader: &mut Read) -> Result<Self, io::Error> {
reader.read_i32::<LittleEndian>()
}
}
impl ToBytes for i32 {
fn to_bytes(&self, writer: &mut Write) -> Result<(), io::Error> {
writer.write_i32::<LittleEndian>(*self)
}
}
impl HasBlockLength for i32 {
fn block_length() -> u16 {
size_of::<i32>() as u16
}
}
impl FromBytes for u64 {
fn from_bytes(reader: &mut Read) -> Result<Self, io::Error> {
reader.read_u64::<LittleEndian>()
}
}
impl ToBytes for u64 {
fn to_bytes(&self, writer: &mut Write) -> Result<(), io::Error> {
writer.write_u64::<LittleEndian>(*self)
}
}
impl HasBlockLength for u64 {
fn block_length() -> u16 {
size_of::<u64>() as u16
}
}
impl FromBytes for i64 {
fn from_bytes(reader: &mut Read) -> Result<Self, io::Error> {
reader.read_i64::<LittleEndian>()
}
}
impl ToBytes for i64 {
fn to_bytes(&self, writer: &mut Write) -> Result<(), io::Error> {
writer.write_i64::<LittleEndian>(*self)
}
}
impl HasBlockLength for i64 {
fn block_length() -> u16 {
size_of::<i64>() as u16
}
}
#[derive(PartialEq, Default, Serialize, Deserialize)]
pub struct Data(Vec<u8>);
impl fmt::Debug for Data {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Data( len: {} )", self.0.len())
}
}
impl ::std::ops::Deref for Data {
type Target = Vec<u8>;
fn deref(&self) -> &Vec<u8> {
&self.0
}
}
impl From<Data> for Vec<u8> {
fn from(data: Data) -> Self {
data.0
}
}
impl From<Vec<u8>> for Data {
fn from(vec: Vec<u8>) -> Self {
Data(vec)
}
}
impl FromBytes for Data {
fn from_bytes(reader: &mut Read) -> Result<Self, io::Error> {
let length = reader.read_u16::<LittleEndian>()?;
let mut buffer = Vec::with_capacity(length as usize);
let mut handle = reader.take(u64::from(length));
handle.read_to_end(&mut buffer)?;
Ok(Data(buffer))
}
}
impl ToBytes for Data {
fn to_bytes(&self, writer: &mut Write) -> Result<(), io::Error> {
let length = self.0.len() as u16;
writer.write_u16::<LittleEndian>(length)?;
writer.write_all(&self.0)
}
}
impl HasData for Data {
fn data(&self) -> &Data {
self
}
}
impl HasMessageLength for Data {
fn message_length(&self) -> u32 {
(size_of::<u16>() + self.0.len()) as u32
}
}
impl FromBytes for String {
fn from_bytes(reader: &mut Read) -> Result<Self, io::Error> {
let buffer: Data = FromBytes::from_bytes(reader)?;
String::from_utf8(buffer.to_vec()).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
}
}
impl ToBytes for String {
fn to_bytes(&self, writer: &mut Write) -> Result<(), io::Error> {
let length = self.len() as u16;
writer.write_u16::<LittleEndian>(length)?;
writer.write_all(self.as_bytes())
}
}
impl HasMessageLength for String {
fn message_length(&self) -> u32 {
(size_of::<u16>() + self.as_bytes().len()) as u32
}
}
impl<T: FromBytes> FromBytes for Vec<T> {
fn from_bytes(reader: &mut Read) -> Result<Self, io::Error> {
let _block_length = reader.read_u16::<LittleEndian>()?;
let num_in_group = reader.read_u8()?;
let mut group: Vec<T> = Vec::with_capacity(num_in_group as usize);
for _ in 0..num_in_group {
group.push(T::from_bytes(reader)?);
}
Ok(group)
}
}
impl<T: ToBytes + HasBlockLength> ToBytes for Vec<T> {
fn to_bytes(&self, writer: &mut Write) -> Result<(), io::Error> {
writer.write_u16::<LittleEndian>(T::block_length())?;
let length = self.len() as u8;
writer.write_u8(length)?;
for element in self {
element.to_bytes(writer)?;
}
Ok(())
}
}
impl<'d, T> FromData for T
where
T: Deserialize<'d>,
{
fn from_data<H: HasData>(has_data: &H) -> Result<Self, io::Error> {
let reader: &[u8] = has_data.data();
let mut de = Deserializer::new(reader);
Deserialize::deserialize(&mut de).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
}
}
impl<T> ToData for T
where
T: Serialize,
{
fn to_data(&self) -> Result<Data, io::Error> {
let mut buffer = Vec::new();
self.serialize(&mut Serializer::with(&mut buffer, StructMapWriter))
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
Ok(Data(buffer))
}
}
impl<T: HasMessageLength> HasMessageLength for Vec<T> {
fn message_length(&self) -> u32 {
let length: u32 = self.into_iter().map(HasMessageLength::message_length).sum();
(size_of::<u16>() + size_of::<u8>()) as u32 + length
}
}
#[cfg(test)]
mod test {
use super::*;
use byteorder::{LittleEndian, WriteBytesExt};
use io::Write;
#[test]
fn from_bytes_u8() {
let mut buffer: &[u8] = &[1];
assert_eq!(1u8, FromBytes::from_bytes(&mut buffer).unwrap());
let mut empty: &[u8] = &[];
assert!(u8::from_bytes(&mut empty).is_err());
}
#[test]
fn to_bytes_u8() {
let mut buffer = vec![];
1u8.to_bytes(&mut buffer).unwrap();
assert_eq!(vec![1], buffer);
}
#[test]
fn from_bytes_i8() {
let mut buffer: &[u8] = &[1];
assert_eq!(1i8, FromBytes::from_bytes(&mut buffer).unwrap());
let mut empty: &[u8] = &[];
assert!(i8::from_bytes(&mut empty).is_err());
}
#[test]
fn to_bytes_i8() {
let mut buffer = vec![];
1i8.to_bytes(&mut buffer).unwrap();
assert_eq!(vec![1], buffer);
}
#[test]
fn from_bytes_u16() {
let mut buffer: &[u8] = &[0, 1];
assert_eq!(256u16, FromBytes::from_bytes(&mut buffer).unwrap());
let mut empty: &[u8] = &[];
assert!(u16::from_bytes(&mut empty).is_err());
}
#[test]
fn to_bytes_u16() {
let mut buffer = vec![];
256u16.to_bytes(&mut buffer).unwrap();
assert_eq!(vec![0, 1], buffer);
}
#[test]
fn from_bytes_i16() {
let mut buffer: &[u8] = &[0, 1];
assert_eq!(256i16, FromBytes::from_bytes(&mut buffer).unwrap());
let mut empty: &[u8] = &[];
assert!(i16::from_bytes(&mut empty).is_err());
}
#[test]
fn to_bytes_i16() {
let mut buffer = vec![];
256i16.to_bytes(&mut buffer).unwrap();
assert_eq!(vec![0, 1], buffer);
}
#[test]
fn from_bytes_u32() {
let mut buffer: &[u8] = &[0, 0, 1, 0];
assert_eq!(65536u32, FromBytes::from_bytes(&mut buffer).unwrap());
let mut empty: &[u8] = &[];
assert!(u32::from_bytes(&mut empty).is_err());
}
#[test]
fn to_bytes_u32() {
let mut buffer = vec![];
65536u32.to_bytes(&mut buffer).unwrap();
assert_eq!(vec![0, 0, 1, 0], buffer);
}
#[test]
fn from_bytes_i32() {
let mut buffer: &[u8] = &[0, 0, 1, 0];
assert_eq!(65536i32, FromBytes::from_bytes(&mut buffer).unwrap());
let mut empty: &[u8] = &[];
assert!(i32::from_bytes(&mut empty).is_err());
}
#[test]
fn to_bytes_i32() {
let mut buffer = vec![];
65536i32.to_bytes(&mut buffer).unwrap();
assert_eq!(vec![0, 0, 1, 0], buffer);
}
#[test]
fn from_bytes_u64() {
let mut buffer: &[u8] = &[0, 0, 0, 1, 0, 0, 0, 0];
assert_eq!(16777216u64, FromBytes::from_bytes(&mut buffer).unwrap());
let mut empty: &[u8] = &[];
assert!(u64::from_bytes(&mut empty).is_err());
}
#[test]
fn to_bytes_u64() {
let mut buffer = vec![];
16777216u64.to_bytes(&mut buffer).unwrap();
assert_eq!(vec![0, 0, 0, 1, 0, 0, 0, 0], buffer);
}
#[test]
fn from_bytes_i64() {
let mut buffer: &[u8] = &[0, 0, 0, 1, 0, 0, 0, 0];
assert_eq!(16777216i64, FromBytes::from_bytes(&mut buffer).unwrap());
let mut empty: &[u8] = &[];
assert!(i64::from_bytes(&mut empty).is_err());
}
#[test]
fn to_bytes_i64() {
let mut buffer = vec![];
16777216i64.to_bytes(&mut buffer).unwrap();
assert_eq!(vec![0, 0, 0, 1, 0, 0, 0, 0], buffer);
}
#[test]
fn from_bytes_string() {
let expected = "foobar".to_string();
let mut buffer = vec![];
buffer.write_u16::<LittleEndian>(6).unwrap();
buffer.write_all(expected.as_bytes()).unwrap();
assert_eq!(expected, String::from_bytes(&mut &buffer[..]).unwrap());
}
#[test]
fn to_bytes_string() {
let s = "foobar".to_string();
let mut buffer = vec![];
s.to_bytes(&mut buffer).unwrap();
assert_eq!(vec![6, 0, 102, 111, 111, 98, 97, 114], buffer);
}
#[test]
fn from_bytes_data() {
let expected = Data::from(vec![1, 2, 3, 4]);
let mut buffer = vec![];
buffer.write_u16::<LittleEndian>(4).unwrap();
buffer.write_all(&expected).unwrap();
assert_eq!(expected, Data::from_bytes(&mut &buffer[..]).unwrap());
}
#[test]
fn to_bytes_data() {
let data = Data::from(vec![1, 2, 3, 4]);
let mut buffer = vec![];
data.to_bytes(&mut buffer).unwrap();
assert_eq!(vec![4, 0, 1, 2, 3, 4], buffer);
}
#[test]
fn from_bytes_collection() {
let expected = vec![1u32, 2u32, 3u32, 4u32];
let mut buffer = vec![];
buffer
.write_u16::<LittleEndian>(u32::block_length())
.unwrap();
buffer.write_u8(expected.len() as u8).unwrap();
buffer.write_u32::<LittleEndian>(1).unwrap();
buffer.write_u32::<LittleEndian>(2).unwrap();
buffer.write_u32::<LittleEndian>(3).unwrap();
buffer.write_u32::<LittleEndian>(4).unwrap();
assert_eq!(expected, Vec::<u32>::from_bytes(&mut &buffer[..]).unwrap());
}
#[test]
fn to_bytes_collection() {
let c = vec![1u16, 2u16, 3u16, 4u16];
let mut buffer = vec![];
c.to_bytes(&mut buffer).unwrap();
assert_eq!(vec![2, 0, 4, 1, 0, 2, 0, 3, 0, 4, 0], buffer);
}
#[test]
fn has_block_length() {
assert_eq!(1, u8::block_length());
assert_eq!(1, i8::block_length());
assert_eq!(2, u16::block_length());
assert_eq!(2, i16::block_length());
assert_eq!(4, u32::block_length());
assert_eq!(4, i32::block_length());
assert_eq!(8, u64::block_length());
assert_eq!(8, i64::block_length());
}
#[test]
fn from_data() {
let data = Data(vec![0x92, 0x0c, 0xa3, 0x61, 0x62, 0x63]);
#[derive(Debug, PartialEq, Deserialize, Serialize)]
struct Foo {
a: u32,
b: String,
}
let expected = Foo {
a: 12,
b: "abc".to_string(),
};
assert_eq!(expected, Foo::from_data(&data).unwrap());
}
#[test]
fn to_data() {
#[derive(Debug, PartialEq, Deserialize, Serialize)]
struct Foo {
a: u32,
b: String,
}
let foo = Foo {
a: 12,
b: "abc".to_string(),
};
assert_eq!(
Data(vec![
0x82,
0xa1,
0x61,
0x0c,
0xa1,
0x62,
0xa3,
0x61,
0x62,
0x63,
]),
foo.to_data().unwrap()
);
}
} | }
}
impl HasBlockLength for i16 { |
index.ts | #!/usr/bin/env node
/**
* @author Sumant Manne <[email protected]>
* @license MIT
*/
/**
* The purpleist module.
*
* @module purplebot
*/
import * as yargs from 'yargs'
Promise = require('bluebird')
import { init } from './bot'
import Cli from './cli'
import { FileConfig } from './config'
if (require.main === module) {
const config = FileConfig.standard()
if (yargs.argv.help) {
yargs.showHelp()
process.exit(0)
}
launchBot(config)
}
| await bot.connect()
return cli
} | async function launchBot (options) {
const bot = await init(options)
const cli = new Cli(bot) |
user.interface.ts | export interface User {
fname?: string; | lname?: string;
email?: string;
} | |
interpreter.rs | use ::value::Value;
use ::scope::Scope;
use ::native;
use ::string_interner::StringInterner;
pub struct Interpreter {
pub interner: StringInterner,
pub current_scope: Scope,
}
impl Interpreter {
pub fn new() -> Self {
let mut interpreter = Interpreter {
interner: StringInterner::new(),
current_scope: Scope::new(),
};
interpreter.init();
interpreter
}
fn init(&mut self) {
self.add_str_to_current_scope("eq?", Value::new_native_proc(native::poly_eq));
self.add_str_to_current_scope("null?", Value::new_native_proc(native::null_));
self.add_str_to_current_scope("boolean?", Value::new_native_proc(native::boolean_));
self.add_str_to_current_scope("symbol?", Value::new_native_proc(native::symbol_));
self.add_str_to_current_scope("integer?", Value::new_native_proc(native::integer_));
self.add_str_to_current_scope("char?", Value::new_native_proc(native::char_));
self.add_str_to_current_scope("string?", Value::new_native_proc(native::string_));
self.add_str_to_current_scope("procedure?", Value::new_native_proc(native::procedure_));
self.add_str_to_current_scope("list?", Value::new_native_proc(native::list_));
self.add_str_to_current_scope("char->integer", Value::new_native_proc(native::char_integer));
self.add_str_to_current_scope("integer->char", Value::new_native_proc(native::integer_char));
self.add_str_to_current_scope("number->string", Value::new_native_proc(native::number_string));
self.add_str_to_current_scope("string->number", Value::new_native_proc(native::string_number));
self.add_str_to_current_scope("symbol->string", Value::new_native_proc(native::symbol_string));
self.add_str_to_current_scope("string->symbol", Value::new_native_proc(native::string_symbol));
self.add_str_to_current_scope("+", Value::new_native_proc(native::plus));
self.add_str_to_current_scope("-", Value::new_native_proc(native::minus));
self.add_str_to_current_scope("*", Value::new_native_proc(native::multiply));
self.add_str_to_current_scope("quotient", Value::new_native_proc(native::quotient));
self.add_str_to_current_scope("remainder", Value::new_native_proc(native::remainder));
self.add_str_to_current_scope("=", Value::new_native_proc(native::eq));
self.add_str_to_current_scope(">", Value::new_native_proc(native::gt));
self.add_str_to_current_scope(">=", Value::new_native_proc(native::ge));
self.add_str_to_current_scope("<", Value::new_native_proc(native::lt));
self.add_str_to_current_scope("<=", Value::new_native_proc(native::le));
self.add_str_to_current_scope("list", Value::new_native_proc(native::list));
self.add_str_to_current_scope("first", Value::new_native_proc(native::first));
self.add_str_to_current_scope("rest", Value::new_native_proc(native::rest));
self.add_str_to_current_scope("symbol-space", Value::new_native_proc(native::symbol_space));
}
pub fn evaluate(&mut self, value: &Value) -> Value {
let res: Value;
if let Some(mut list) = value.get_list() {
if list.len() > 0 {
let (func, mut args) = list.split_at_mut(1);
let func = self.evaluate(&func[0]);
if let Some(f) = func.get_native_fn_ptr() {
res = f(self, &mut args)
} else if let Some(p) = func.get_proc() {
res = p.evaluate(self, &args);
} else {
res = Value::new_condition(Value::new_string(format!("tried to call {}, which is not possible", func.to_string(&self.interner))));
}
} else {
res = Value::new_condition(Value::new_string(format!("tried to evaluate ()")));
};
} else if let Some(special_form) = value.get_special_form() {
res = special_form.evaluate(self);
} else if let Some(symbol) = value.get_symbol() {
res = self.current_scope
.lookup_symbol(symbol)
.unwrap_or(Value::new_condition(Value::new_string(format!("undefined ident: {}", value.to_string(&self.interner)))));
} else {
res = value.clone();
}
// TODO handle condition properly
match res.get_condition() {
Some(x) => panic!("{}", x.to_string(&self.interner)),
_ => (),
};
res
}
fn | (&mut self, s: &str, value: Value) {
let id = self.interner.intern(s);
self.current_scope.add_symbol(id, value);
}
}
| add_str_to_current_scope |
test_dist.py | # -*- coding: utf-8 -*-
from mollusc.dist import Twine
class TestTwine(object):
def test_register_command(self):
twine = Twine(username='registrar', password='reg1strar')
assert twine.get_command('register', 'package.whl', {'-c': 'test register'}) == [
'twine',
'register',
'--repository-url', Twine.DEFAULT_REPO_URL,
'-u', 'registrar',
'-c', 'test register',
'package.whl'
]
def test_upload_command(self):
| twine = Twine(username='uploader', password='upl0ader')
assert twine.get_command('upload', ['package.whl', 'package.tar.gz'], {'-c': 'test upload'}) == [
'twine',
'upload',
'--repository-url', Twine.DEFAULT_REPO_URL,
'-u', 'uploader',
'-c', 'test upload',
'package.whl', 'package.tar.gz'
] |
|
tasks.py | from __future__ import unicode_literals, print_function, division
from io import open
import unicodedata
import string
import re
import random
from random import shuffle
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
import sys
import os
import time
import math
import pickle
# Provide the predefined digit sequence tasks
def interleaved(sequence):
if len(sequence) <= 1:
return list(sequence)
else:
return [sequence[0], sequence[-1]] + interleaved(sequence[1:-1])
def | (sequence, task):
if task == "auto":
return sequence
if task == "rev":
return sequence[::-1]
if task == "sort":
return sorted(sequence)
if task == "interleave":
return interleaved(sequence)
| transform |
test_reader.py | """
"""
| from altdeutsch.reader import read_export
__author__ = ["Clément Besnier <[email protected]>", ]
class UnitTest(unittest.TestCase):
def test_hildebrandslied(self):
res = read_export(os.path.join(PACKDIR, "tests", "data", "hildebrandslied.txt"))
self.assertEqual(list(res.keys()),
['tok', 'lemma', 'inflection', 'verse', 'edition', 'pos', 'text', 'translation',
'lang', 'clause', 'inflectionClass', 'posLemma', 'rhyme', 'document',
'inflectionClassLemma'])
self.assertEqual(res["tok"][0], ['Ik', 'gihorta', 'ðat', 'seggen', 'ðat', 'sih', 'urhettun', 'ænon', 'muotin']) | import os
import unittest
from altdeutsch import PACKDIR |
PenFountain.js | /**
* Copyright IBM Corp. 2016, 2021
*
* This source code is licensed under the Apache-2.0 license found in the
* LICENSE file in the root directory of this source tree.
*
* Code generated by @carbon/icon-build-helpers. DO NOT EDIT.
*/
'use strict';
var iconPropTypes = require('./iconPropTypes-379308c1.js');
var React = require('react');
require('@carbon/icon-helpers');
require('prop-types');
function | (e) { return e && typeof e === 'object' && 'default' in e ? e : { 'default': e }; }
var React__default = /*#__PURE__*/_interopDefaultLegacy(React);
var _path;
var PenFountain = /*#__PURE__*/React__default['default'].forwardRef(function PenFountain(_ref, ref) {
var children = _ref.children,
_ref$size = _ref.size,
size = _ref$size === void 0 ? 16 : _ref$size,
rest = iconPropTypes._objectWithoutProperties(_ref, ["children", "size"]);
return /*#__PURE__*/React__default['default'].createElement(iconPropTypes.Icon, iconPropTypes._objectSpread2({
width: size,
height: size,
ref: ref,
xmlns: "http://www.w3.org/2000/svg",
viewBox: "0 0 32 32",
fill: "currentColor"
}, rest), _path || (_path = /*#__PURE__*/React__default['default'].createElement("path", {
d: "M29.707,5.293l-3-3a.9994.9994,0,0,0-1.414,0L19.5859,8H17.0947A11.0118,11.0118,0,0,0,6.7124,15.3662L2.0562,28.67a1,1,0,0,0,1.2744,1.2739l13.3037-4.6562A11.012,11.012,0,0,0,24,14.9053V12.4141L29.707,6.707A.9994.9994,0,0,0,29.707,5.293Zm-7.414,6A1,1,0,0,0,22,12v2.9053A9.01,9.01,0,0,1,15.9731,23.4l-9.1677,3.209L16,17.4141,14.5859,16,5.3914,25.1948,8.6,16.0269A9.01,9.01,0,0,1,17.0947,10H20a1,1,0,0,0,.707-.293L26,4.4141,27.5859,6Z"
})), children);
});
PenFountain.propTypes = iconPropTypes.iconPropTypes;
module.exports = PenFountain;
| _interopDefaultLegacy |
test_mean_std.py | import unittest
import numpy as np
import torch
from rl_safety_algorithms.common.online_mean_std import OnlineMeanStd
import rl_safety_algorithms.common.mpi_tools as mpi_tools
class TestOnlineMeanStd(unittest.TestCase):
""" Testing the non-MPI version.
"""
@staticmethod
def perform_single_pass(rms, input_shape) -> bool:
x = torch.from_numpy(np.random.normal(size=input_shape))
rms(x) # perform one call
return True
@staticmethod
def get_data(M, N, epoch):
|
def test_vector_updates(self):
""" OnlineMeanStd module is updated with a batch of vector inputs,
i.e. inputs of shape M x N.
Note that std dev might differ more than 1e-5 when epochs > 10.
"""
epochs = 20
T = 500
obs_shape = (1, )
# === calculation through online updates
rms = OnlineMeanStd(shape=obs_shape)
for ep in range(epochs):
# shape of batch: T x obs_shape
vector_input = self.get_data(T, obs_shape[0], ep).flatten()
rms.update(vector_input)
rms_mean = rms.mean.numpy()
rms_std = rms.std.numpy()
# ===== calculate ground truths
obs_list = [self.get_data(T, obs_shape[0], ep) for ep in range(epochs)]
obs = np.vstack(obs_list)
gt_mean = np.mean(obs, axis=0)
gt_std = np.std(obs, axis=0)
self.assertTrue(np.allclose(rms_mean, gt_mean))
self.assertTrue(np.allclose(rms_std, gt_std, rtol=1e-2))
self.assertTrue(self.perform_single_pass(rms, obs_shape))
if __name__ == '__main__':
unittest.main()
| """Returns data matrix of shape MxN."""
np.random.seed(epoch)
# start = 10000 + 4 * epoch
# stop = pid*10000 + M * N + 4 * epoch
data = np.random.normal(size=(M, N))
return data |
add_html_css_js.py | # ------------------------------------------------------------------------------
#
# MIT License
#
# Copyright (c) 2021 nogira
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
from .common_imports import *
from aqt.webview import WebContent
from aqt.toolbar import TopToolbar, BottomToolbar
from aqt.deckbrowser import DeckBrowser
from aqt.reviewer import ReviewerBottomBar
from aqt.editor import Editor
from aqt.gui_hooks import webview_will_set_content
from typing import Any, Optional
import re
def | (
web_content: WebContent,
context: Optional[Any]) -> None:
# for all
web_content.css.append(f"{files_folder_rel}css/all.css")
if isinstance(context, TopToolbar):
web_content.css.append(
f"{files_folder_rel}css/top_toolbar.css")
# font awesome icons
decks_icon = f'<svg viewBox="0 0 512 512"><path fill="currentColor" d="M12.41 148.02l232.94 105.67c6.8 3.09 14.49 3.09 21.29 0l232.94-105.67c16.55-7.51 16.55-32.52 0-40.03L266.65 2.31a25.607 25.607 0 0 0-21.29 0L12.41 107.98c-16.55 7.51-16.55 32.53 0 40.04zm487.18 88.28l-58.09-26.33-161.64 73.27c-7.56 3.43-15.59 5.17-23.86 5.17s-16.29-1.74-23.86-5.17L70.51 209.97l-58.1 26.33c-16.55 7.5-16.55 32.5 0 40l232.94 105.59c6.8 3.08 14.49 3.08 21.29 0L499.59 276.3c16.55-7.5 16.55-32.5 0-40zm0 127.8l-57.87-26.23-161.86 73.37c-7.56 3.43-15.59 5.17-23.86 5.17s-16.29-1.74-23.86-5.17L70.29 337.87 12.41 364.1c-16.55 7.5-16.55 32.5 0 40l232.94 105.59c6.8 3.08 14.49 3.08 21.29 0L499.59 404.1c16.55-7.5 16.55-32.5 0-40z"></path></svg>'
add_icon = f'<svg viewBox="0 0 448 512"><path fill="currentColor" d="M416 208H272V64c0-17.67-14.33-32-32-32h-32c-17.67 0-32 14.33-32 32v144H32c-17.67 0-32 14.33-32 32v32c0 17.67 14.33 32 32 32h144v144c0 17.67 14.33 32 32 32h32c17.67 0 32-14.33 32-32V304h144c17.67 0 32-14.33 32-32v-32c0-17.67-14.33-32-32-32z"></path></svg>'
browse_icon = f'<svg viewBox="0 0 512 512"><path fill="currentColor" d="M505 442.7L405.3 343c-4.5-4.5-10.6-7-17-7H372c27.6-35.3 44-79.7 44-128C416 93.1 322.9 0 208 0S0 93.1 0 208s93.1 208 208 208c48.3 0 92.7-16.4 128-44v16.3c0 6.4 2.5 12.5 7 17l99.7 99.7c9.4 9.4 24.6 9.4 33.9 0l28.3-28.3c9.4-9.4 9.4-24.6.1-34zM208 336c-70.7 0-128-57.2-128-128 0-70.7 57.2-128 128-128 70.7 0 128 57.2 128 128 0 70.7-57.2 128-128 128z"></path></svg>'
stats_icon = f'<svg viewBox="0 0 512 512"><path fill="currentColor" d="M332.8 320h38.4c6.4 0 12.8-6.4 12.8-12.8V172.8c0-6.4-6.4-12.8-12.8-12.8h-38.4c-6.4 0-12.8 6.4-12.8 12.8v134.4c0 6.4 6.4 12.8 12.8 12.8zm96 0h38.4c6.4 0 12.8-6.4 12.8-12.8V76.8c0-6.4-6.4-12.8-12.8-12.8h-38.4c-6.4 0-12.8 6.4-12.8 12.8v230.4c0 6.4 6.4 12.8 12.8 12.8zm-288 0h38.4c6.4 0 12.8-6.4 12.8-12.8v-70.4c0-6.4-6.4-12.8-12.8-12.8h-38.4c-6.4 0-12.8 6.4-12.8 12.8v70.4c0 6.4 6.4 12.8 12.8 12.8zm96 0h38.4c6.4 0 12.8-6.4 12.8-12.8V108.8c0-6.4-6.4-12.8-12.8-12.8h-38.4c-6.4 0-12.8 6.4-12.8 12.8v198.4c0 6.4 6.4 12.8 12.8 12.8zM496 384H64V80c0-8.84-7.16-16-16-16H16C7.16 64 0 71.16 0 80v336c0 17.67 14.33 32 32 32h464c8.84 0 16-7.16 16-16v-32c0-8.84-7.16-16-16-16z"></path></svg>'
sync_icon = f'<svg id=sync-spinner viewBox="0 0 512 512"><path fill="currentColor" d="M370.72 133.28C339.458 104.008 298.888 87.962 255.848 88c-77.458.068-144.328 53.178-162.791 126.85-1.344 5.363-6.122 9.15-11.651 9.15H24.103c-7.498 0-13.194-6.807-11.807-14.176C33.933 94.924 134.813 8 256 8c66.448 0 126.791 26.136 171.315 68.685L463.03 40.97C478.149 25.851 504 36.559 504 57.941V192c0 13.255-10.745 24-24 24H345.941c-21.382 0-32.09-25.851-16.971-40.971l41.75-41.749zM32 296h134.059c21.382 0 32.09 25.851 16.971 40.971l-41.75 41.75c31.262 29.273 71.835 45.319 114.876 45.28 77.418-.07 144.315-53.144 162.787-126.849 1.344-5.363 6.122-9.15 11.651-9.15h57.304c7.498 0 13.194 6.807 11.807 14.176C478.067 417.076 377.187 504 256 504c-66.448 0-126.791-26.136-171.315-68.685L48.97 471.03C33.851 486.149 8 475.441 8 454.059V320c0-13.255 10.745-24 24-24z"></path></svg>'
replace_navbar_buttons_dict = [
{"id": "decks",
"tooltip": "Decks\nShortcut: D",
"img": decks_icon},
{"id": "add",
"tooltip": "Add Card\nShortcut: A",
"img": add_icon},
{"id": "browse",
"tooltip": "Browse\nShortcut: B",
"img": browse_icon},
{"id": "stats",
"tooltip": "Stats\nShortcut: T",
"img": stats_icon}
]
mod_body = web_content.body
for item in replace_navbar_buttons_dict:
find = r'(<a.*?title=").*?(".*?id="' + item["id"] + r'".*?>).*?(?=</a>)'
replace = r'\1' + item["tooltip"] + r'\2REPLACE_ME'
mod_body = re.sub(find, replace, mod_body)
mod_body = re.sub('REPLACE_ME', item["img"], mod_body)
find_sync = r'(<a.*?title=").*?(".*?id="sync".*?>).*?\n.*?\n.*?(?=</a>)'
replace_sync = r'\1Sync\nShortcut: Y\2REPLACE_ME'
mod_body = re.sub(find_sync, replace_sync, mod_body)
mod_body = re.sub('REPLACE_ME', sync_icon, mod_body)
web_content.body = mod_body
# ------------------- ANKIMOTE COMPATIBILITY ---------------------------
web_content.head += '<script async' + \
f' src="{files_folder_rel}js/ankimote_patch.js"></script>'
elif isinstance(context, DeckBrowser):
web_content.css.append(
f"{files_folder_rel}css/home.css")
if not(config["show 'studied cards today' in homescreen"]):
web_content.css.append(
f"{files_folder_rel}css/home_studiedToday.css")
elif isinstance(context, BottomToolbar):
# this one doesnt work for some reason
web_content.css.append(
f"{files_folder_rel}css/bottom_toolbar.css")
elif isinstance(context, ReviewerBottomBar):
web_content.css.append(
f"{files_folder_rel}css/reviewer_bottom.css")
elif isinstance(context, Editor):
web_content.css.append(
f"{files_folder_rel}css/editor-window.css")
web_content.css.append(
f"{files_folder_rel}prism/prism.css")
web_content.js.append(
f"{files_folder_rel}prism/prism.js")
# add script after html has loaded to make sure shadowroot is present
web_content.head += '<script async id="editor-script"' + \
f' src="{files_folder_rel}js/editor_theme.js"></script>'
webview_will_set_content.append(on_webview_will_set_content)
| on_webview_will_set_content |
class_loader_context.go | // Copyright 2020 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dexpreopt
import (
"fmt"
"sort"
"strconv"
"strings"
"android/soong/android"
)
// This comment describes the following:
// 1. the concept of class loader context (CLC) and its relation to classpath
// 2. how PackageManager constructs CLC from shared libraries and their dependencies
// 3. build-time vs. run-time CLC and why this matters for dexpreopt
// 4. manifest fixer: a tool that adds missing <uses-library> tags to the manifests
// 5. build system support for CLC
//
// 1. Class loader context
// -----------------------
//
// Java libraries and apps that have run-time dependency on other libraries should list the used
// libraries in their manifest (AndroidManifest.xml file). Each used library should be specified in
// a <uses-library> tag that has the library name and an optional attribute specifying if the
// library is optional or required. Required libraries are necessary for the library/app to run (it
// will fail at runtime if the library cannot be loaded), and optional libraries are used only if
// they are present (if not, the library/app can run without them).
//
// The libraries listed in <uses-library> tags are in the classpath of a library/app.
//
// Besides libraries, an app may also use another APK (for example in the case of split APKs), or
// anything that gets added by the app dynamically. In general, it is impossible to know at build
// time what the app may use at runtime. In the build system we focus on the known part: libraries.
//
// Class loader context (CLC) is a tree-like structure that describes class loader hierarchy. The
// build system uses CLC in a more narrow sense: it is a tree of libraries that represents
// transitive closure of all <uses-library> dependencies of a library/app. The top-level elements of
// a CLC are the direct <uses-library> dependencies specified in the manifest (aka. classpath). Each
// node of a CLC tree is a <uses-library> which may have its own <uses-library> sub-nodes.
//
// Because <uses-library> dependencies are, in general, a graph and not necessarily a tree, CLC may
// contain subtrees for the same library multiple times. In other words, CLC is the dependency graph
// "unfolded" to a tree. The duplication is only on a logical level, and the actual underlying class
// loaders are not duplicated (at runtime there is a single class loader instance for each library).
//
// Example: A has <uses-library> tags B, C and D; C has <uses-library tags> B and D;
// D has <uses-library> E; B and E have no <uses-library> dependencies. The CLC is:
// A
// ├── B
// ├── C
// │ ├── B
// │ └── D
// │ └── E
// └── D
// └── E
//
// CLC defines the lookup order of libraries when resolving Java classes used by the library/app.
// The lookup order is important because libraries may contain duplicate classes, and the class is
// resolved to the first match.
//
// 2. PackageManager and "shared" libraries
// ----------------------------------------
//
// In order to load an APK at runtime, PackageManager (in frameworks/base) creates a CLC. It adds
// the libraries listed in the <uses-library> tags in the app's manifest as top-level CLC elements.
// For each of the used libraries PackageManager gets all its <uses-library> dependencies (specified
// as tags in the manifest of that library) and adds a nested CLC for each dependency. This process
// continues recursively until all leaf nodes of the constructed CLC tree are libraries that have no
// <uses-library> dependencies.
//
// PackageManager is aware only of "shared" libraries. The definition of "shared" here differs from
// its usual meaning (as in shared vs. static). In Android, Java "shared" libraries are those listed
// in /system/etc/permissions/platform.xml file. This file is installed on device. Each entry in it
// contains the name of a "shared" library, a path to its DEX jar file and a list of dependencies
// (other "shared" libraries that this one uses at runtime and specifies them in <uses-library> tags
// in its manifest).
//
// In other words, there are two sources of information that allow PackageManager to construct CLC
// at runtime: <uses-library> tags in the manifests and "shared" library dependencies in
// /system/etc/permissions/platform.xml.
//
// 3. Build-time and run-time CLC and dexpreopt
// --------------------------------------------
//
// CLC is needed not only when loading a library/app, but also when compiling it. Compilation may
// happen either on device (known as "dexopt") or during the build (known as "dexpreopt"). Since
// dexopt takes place on device, it has the same information as PackageManager (manifests and
// shared library dependencies). Dexpreopt, on the other hand, takes place on host and in a totally
// different environment, and it has to get the same information from the build system (see the
// section about build system support below).
//
// Thus, the build-time CLC used by dexpreopt and the run-time CLC used by PackageManager are
// the same thing, but computed in two different ways.
//
// It is important that build-time and run-time CLCs coincide, otherwise the AOT-compiled code
// created by dexpreopt will be rejected. In order to check the equality of build-time and
// run-time CLCs, the dex2oat compiler records build-time CLC in the *.odex files (in the
// "classpath" field of the OAT file header). To find the stored CLC, use the following command:
// `oatdump --oat-file=<FILE> | grep '^classpath = '`.
//
// Mismatch between build-time and run-time CLC is reported in logcat during boot (search with
// `logcat | grep -E 'ClassLoaderContext [a-z ]+ mismatch'`. Mismatch is bad for performance, as it
// forces the library/app to either be dexopted, or to run without any optimizations (e.g. the app's
// code may need to be extracted in memory from the APK, a very expensive operation).
//
// A <uses-library> can be either optional or required. From dexpreopt standpoint, required library
// must be present at build time (its absence is a build error). An optional library may be either
// present or absent at build time: if present, it will be added to the CLC, passed to dex2oat and
// recorded in the *.odex file; otherwise, if the library is absent, it will be skipped and not
// added to CLC. If there is a mismatch between built-time and run-time status (optional library is
// present in one case, but not the other), then the build-time and run-time CLCs won't match and
// the compiled code will be rejected. It is unknown at build time if the library will be present at
// runtime, therefore either including or excluding it may cause CLC mismatch.
//
// 4. Manifest fixer
// -----------------
//
// Sometimes <uses-library> tags are missing from the source manifest of a library/app. This may
// happen for example if one of the transitive dependencies of the library/app starts using another
// <uses-library>, and the library/app's manifest isn't updated to include it.
//
// Soong can compute some of the missing <uses-library> tags for a given library/app automatically
// as SDK libraries in the transitive dependency closure of the library/app. The closure is needed
// because a library/app may depend on a static library that may in turn depend on an SDK library,
// (possibly transitively via another library).
//
// Not all <uses-library> tags can be computed in this way, because some of the <uses-library>
// dependencies are not SDK libraries, or they are not reachable via transitive dependency closure.
// But when possible, allowing Soong to calculate the manifest entries is less prone to errors and
// simplifies maintenance. For example, consider a situation when many apps use some static library
// that adds a new <uses-library> dependency -- all the apps will have to be updated. That is
// difficult to maintain.
//
// Soong computes the libraries that need to be in the manifest as the top-level libraries in CLC.
// These libraries are passed to the manifest_fixer.
//
// All libraries added to the manifest should be "shared" libraries, so that PackageManager can look
// up their dependencies and reconstruct the nested subcontexts at runtime. There is no build check
// to ensure this, it is an assumption.
//
// 5. Build system support
// -----------------------
//
// In order to construct CLC for dexpreopt and manifest_fixer, the build system needs to know all
// <uses-library> dependencies of the dexpreopted library/app (including transitive dependencies).
// For each <uses-librarry> dependency it needs to know the following information:
//
// - the real name of the <uses-library> (it may be different from the module name)
// - build-time (on host) and run-time (on device) paths to the DEX jar file of the library
// - whether this library is optional or required
// - all <uses-library> dependencies
//
// Since the build system doesn't have access to the manifest contents (it cannot read manifests at
// the time of build rule generation), it is necessary to copy this information to the Android.bp
// and Android.mk files. For blueprints, the relevant properties are `uses_libs` and
// `optional_uses_libs`. For makefiles, relevant variables are `LOCAL_USES_LIBRARIES` and
// `LOCAL_OPTIONAL_USES_LIBRARIES`. It is preferable to avoid specifying these properties explicilty
// when they can be computed automatically by Soong (as the transitive closure of SDK library
// dependencies).
//
// Some of the Java libraries that are used as <uses-library> are not SDK libraries (they are
// defined as `java_library` rather than `java_sdk_library` in the Android.bp files). In order for
// the build system to handle them automatically like SDK libraries, it is possible to set a
// property `provides_uses_lib` or variable `LOCAL_PROVIDES_USES_LIBRARY` on the blueprint/makefile
// module of such library. This property can also be used to specify real library name in cases
// when it differs from the module name.
//
// Because the information from the manifests has to be duplicated in the Android.bp/Android.mk
// files, there is a danger that it may get out of sync. To guard against that, the build system
// generates a rule that checks the metadata in the build files against the contents of a manifest
// (verify_uses_libraries). The manifest can be available as a source file, or as part of a prebuilt
// APK. Note that reading the manifests at the Ninja stage of the build is fine, unlike the build
// rule generation phase.
//
// ClassLoaderContext is a structure that represents CLC.
//
type ClassLoaderContext struct {
// The name of the library.
Name string
// On-host build path to the library dex file (used in dex2oat argument --class-loader-context).
Host android.Path
// On-device install path (used in dex2oat argument --stored-class-loader-context).
Device string
// Nested sub-CLC for dependencies.
Subcontexts []*ClassLoaderContext
}
// ClassLoaderContextMap is a map from SDK version to CLC. There is a special entry with key
// AnySdkVersion that stores unconditional CLC that is added regardless of the target SDK version.
//
// Conditional CLC is for compatibility libraries which didn't exist prior to a certain SDK version
// (say, N), but classes in them were in the bootclasspath jars, etc., and in version N they have
// been separated into a standalone <uses-library>. Compatibility libraries should only be in the
// CLC if the library/app that uses them has `targetSdkVersion` less than N in the manifest.
//
// Currently only apps (but not libraries) use conditional CLC.
//
// Target SDK version information is unavailable to the build system at rule generation time, so
// the build system doesn't know whether conditional CLC is needed for a given app or not. So it
// generates a build rule that includes conditional CLC for all versions, extracts the target SDK
// version from the manifest, and filters the CLCs based on that version. Exact final CLC that is
// passed to dex2oat is unknown to the build system, and gets known only at Ninja stage.
//
type ClassLoaderContextMap map[int][]*ClassLoaderContext
// Compatibility libraries. Some are optional, and some are required: this is the default that
// affects how they are handled by the Soong logic that automatically adds implicit SDK libraries
// to the manifest_fixer, but an explicit `uses_libs`/`optional_uses_libs` can override this.
var OrgApacheHttpLegacy = "org.apache.http.legacy"
var AndroidTestBase = "android.test.base"
var AndroidTestMock = "android.test.mock"
var AndroidHidlBase = "android.hidl.base-V1.0-java"
var AndroidHidlManager = "android.hidl.manager-V1.0-java"
// Compatibility libraries grouped by version/optionality (for convenience, to avoid repeating the
// same lists in multiple places).
var OptionalCompatUsesLibs28 = []string{
OrgApacheHttpLegacy,
}
var OptionalCompatUsesLibs30 = []string{
AndroidTestBase,
AndroidTestMock,
}
var CompatUsesLibs29 = []string{
AndroidHidlManager,
AndroidHidlBase,
}
var OptionalCompatUsesLibs = append(android.CopyOf(OptionalCompatUsesLibs28), OptionalCompatUsesLibs30...)
var CompatUsesLibs = android.CopyOf(CompatUsesLibs29)
const UnknownInstallLibraryPath = "error"
// AnySdkVersion means that the class loader context is needed regardless of the targetSdkVersion
// of the app. The numeric value affects the key order in the map and, as a result, the order of
// arguments passed to construct_context.py (high value means that the unconditional context goes
// last). We use the converntional "current" SDK level (10000), but any big number would do as well.
const AnySdkVersion int = android.FutureApiLevelInt
// Add class loader context for the given library to the map entry for the given SDK version.
func (clcMap ClassLoaderContextMap) addContext(ctx android.ModuleInstallPathContext, sdkVer int, lib string,
hostPath, installPath android.Path, nestedClcMap ClassLoaderContextMap) error {
devicePath := UnknownInstallLibraryPath
if installPath == nil {
if android.InList(lib, CompatUsesLibs) || android.InList(lib, OptionalCompatUsesLibs) {
// Assume that compatibility libraries are installed in /system/framework.
installPath = android.PathForModuleInstall(ctx, "framework", lib+".jar")
} else {
// For some stub libraries the only known thing is the name of their implementation
// library, but the library itself is unavailable (missing or part of a prebuilt). In
// such cases we still need to add the library to <uses-library> tags in the manifest,
// but we cannot use it for dexpreopt.
}
}
if installPath != nil {
devicePath = android.InstallPathToOnDevicePath(ctx, installPath.(android.InstallPath))
}
// Nested class loader context shouldn't have conditional part (it is allowed only at the top level).
for ver, _ := range nestedClcMap {
if ver != AnySdkVersion {
clcStr, _ := ComputeClassLoaderContext(nestedClcMap)
return fmt.Errorf("nested class loader context shouldn't have conditional part: %s", clcStr)
}
}
subcontexts := nestedClcMap[AnySdkVersion]
// If the library with this name is already present as one of the unconditional top-level
// components, do not re-add it.
for _, clc := range clcMap[sdkVer] {
if clc.Name == lib {
return nil
}
}
clcMap[sdkVer] = append(clcMap[sdkVer], &ClassLoaderContext{
Name: lib,
Host: hostPath,
Device: devicePath,
Subcontexts: subcontexts,
})
return nil
}
// Add class loader context for the given SDK version. Don't fail on unknown build/install paths, as
// libraries with unknown paths still need to be processed by manifest_fixer (which doesn't care
// about paths). For the subset of libraries that are used in dexpreopt, their build/install paths
// are validated later before CLC is used (in validateClassLoaderContext).
func (clcMap ClassLoaderContextMap) AddContext(ctx android.ModuleInstallPathContext, sdkVer int,
lib string, hostPath, installPath android.Path, nestedClcMap ClassLoaderContextMap) {
err := clcMap.addContext(ctx, sdkVer, lib, hostPath, installPath, nestedClcMap)
if err != nil {
ctx.ModuleErrorf(err.Error())
}
}
// Merge the other class loader context map into this one, do not override existing entries.
// The implicitRootLib parameter is the name of the library for which the other class loader
// context map was constructed. If the implicitRootLib is itself a <uses-library>, it should be
// already present in the class loader context (with the other context as its subcontext) -- in
// that case do not re-add the other context. Otherwise add the other context at the top-level.
func (clcMap ClassLoaderContextMap) AddContextMap(otherClcMap ClassLoaderContextMap, implicitRootLib string) {
if otherClcMap == nil {
return
}
// If the implicit root of the merged map is already present as one of top-level subtrees, do
// not merge it second time.
for _, clc := range clcMap[AnySdkVersion] {
if clc.Name == implicitRootLib {
return
}
}
for sdkVer, otherClcs := range otherClcMap {
for _, otherClc := range otherClcs {
alreadyHave := false
for _, clc := range clcMap[sdkVer] {
if clc.Name == otherClc.Name {
alreadyHave = true
break
}
}
if !alreadyHave {
clcMap[sdkVer] = append(clcMap[sdkVer], otherClc)
}
}
}
}
// Returns top-level libraries in the CLC (conditional CLC, i.e. compatibility libraries are not
// included). This is the list of libraries that should be in the <uses-library> tags in the
// manifest. Some of them may be present in the source manifest, others are added by manifest_fixer.
func (clcMap ClassLoaderContextMap) UsesLibs() (ulibs []string) {
if clcMap != nil {
clcs := clcMap[AnySdkVersion]
ulibs = make([]string, 0, len(clcs))
for _, clc := range clcs {
ulibs = append(ulibs, clc.Name)
}
}
return ulibs
}
// Now that the full unconditional context is known, reconstruct conditional context.
// Apply filters for individual libraries, mirroring what the PackageManager does when it
// constructs class loader context on device.
//
// TODO(b/132357300): remove "android.hidl.manager" and "android.hidl.base" for non-system apps.
//
func fixClassLoaderContext(clcMap ClassLoaderContextM | cMap.UsesLibs()
for sdkVer, clcs := range clcMap {
if sdkVer == AnySdkVersion {
continue
}
fixedClcs := []*ClassLoaderContext{}
for _, clc := range clcs {
if android.InList(clc.Name, usesLibs) {
// skip compatibility libraries that are already included in unconditional context
} else if clc.Name == AndroidTestMock && !android.InList("android.test.runner", usesLibs) {
// android.test.mock is only needed as a compatibility library (in conditional class
// loader context) if android.test.runner is used, otherwise skip it
} else {
fixedClcs = append(fixedClcs, clc)
}
clcMap[sdkVer] = fixedClcs
}
}
}
// Return true if all build/install library paths are valid (including recursive subcontexts),
// otherwise return false. A build path is valid if it's not nil. An install path is valid if it's
// not equal to a special "error" value.
func validateClassLoaderContext(clcMap ClassLoaderContextMap) (bool, error) {
for sdkVer, clcs := range clcMap {
if valid, err := validateClassLoaderContextRec(sdkVer, clcs); !valid || err != nil {
return valid, err
}
}
return true, nil
}
// Helper function for validateClassLoaderContext() that handles recursion.
func validateClassLoaderContextRec(sdkVer int, clcs []*ClassLoaderContext) (bool, error) {
for _, clc := range clcs {
if clc.Host == nil || clc.Device == UnknownInstallLibraryPath {
if sdkVer == AnySdkVersion {
// Return error if dexpreopt doesn't know paths to one of the <uses-library>
// dependencies. In the future we may need to relax this and just disable dexpreopt.
if clc.Host == nil {
return false, fmt.Errorf("invalid build path for <uses-library> \"%s\"", clc.Name)
} else {
return false, fmt.Errorf("invalid install path for <uses-library> \"%s\"", clc.Name)
}
} else {
// No error for compatibility libraries, as Soong doesn't know if they are needed
// (this depends on the targetSdkVersion in the manifest), but the CLC is invalid.
return false, nil
}
}
if valid, err := validateClassLoaderContextRec(sdkVer, clc.Subcontexts); !valid || err != nil {
return valid, err
}
}
return true, nil
}
// Return the class loader context as a string, and a slice of build paths for all dependencies.
// Perform a depth-first preorder traversal of the class loader context tree for each SDK version.
// Return the resulting string and a slice of on-host build paths to all library dependencies.
func ComputeClassLoaderContext(clcMap ClassLoaderContextMap) (clcStr string, paths android.Paths) {
// CLC for different SDK versions should come in specific order that agrees with PackageManager.
// Since PackageManager processes SDK versions in ascending order and prepends compatibility
// libraries at the front, the required order is descending, except for AnySdkVersion that has
// numerically the largest order, but must be the last one. Example of correct order: [30, 29,
// 28, AnySdkVersion]. There are Soong tests to ensure that someone doesn't change this by
// accident, but there is no way to guard against changes in the PackageManager, except for
// grepping logcat on the first boot for absence of the following messages:
//
// `logcat | grep -E 'ClassLoaderContext [a-z ]+ mismatch`
//
versions := make([]int, 0, len(clcMap))
for ver, _ := range clcMap {
if ver != AnySdkVersion {
versions = append(versions, ver)
}
}
sort.Sort(sort.Reverse(sort.IntSlice(versions))) // descending order
versions = append(versions, AnySdkVersion)
for _, sdkVer := range versions {
sdkVerStr := fmt.Sprintf("%d", sdkVer)
if sdkVer == AnySdkVersion {
sdkVerStr = "any" // a special keyword that means any SDK version
}
hostClc, targetClc, hostPaths := computeClassLoaderContextRec(clcMap[sdkVer])
if hostPaths != nil {
clcStr += fmt.Sprintf(" --host-context-for-sdk %s %s", sdkVerStr, hostClc)
clcStr += fmt.Sprintf(" --target-context-for-sdk %s %s", sdkVerStr, targetClc)
}
paths = append(paths, hostPaths...)
}
return clcStr, android.FirstUniquePaths(paths)
}
// Helper function for ComputeClassLoaderContext() that handles recursion.
func computeClassLoaderContextRec(clcs []*ClassLoaderContext) (string, string, android.Paths) {
var paths android.Paths
var clcsHost, clcsTarget []string
for _, clc := range clcs {
subClcHost, subClcTarget, subPaths := computeClassLoaderContextRec(clc.Subcontexts)
if subPaths != nil {
subClcHost = "{" + subClcHost + "}"
subClcTarget = "{" + subClcTarget + "}"
}
clcsHost = append(clcsHost, "PCL["+clc.Host.String()+"]"+subClcHost)
clcsTarget = append(clcsTarget, "PCL["+clc.Device+"]"+subClcTarget)
paths = append(paths, clc.Host)
paths = append(paths, subPaths...)
}
clcHost := strings.Join(clcsHost, "#")
clcTarget := strings.Join(clcsTarget, "#")
return clcHost, clcTarget, paths
}
// Class loader contexts that come from Make via JSON dexpreopt.config. JSON CLC representation is
// the same as Soong representation except that SDK versions and paths are represented with strings.
type jsonClassLoaderContext struct {
Name string
Host string
Device string
Subcontexts []*jsonClassLoaderContext
}
// A map from SDK version (represented with a JSON string) to JSON CLCs.
type jsonClassLoaderContextMap map[string][]*jsonClassLoaderContext
// Convert JSON CLC map to Soong represenation.
func fromJsonClassLoaderContext(ctx android.PathContext, jClcMap jsonClassLoaderContextMap) ClassLoaderContextMap {
clcMap := make(ClassLoaderContextMap)
for sdkVerStr, clcs := range jClcMap {
sdkVer, ok := strconv.Atoi(sdkVerStr)
if ok != nil {
if sdkVerStr == "any" {
sdkVer = AnySdkVersion
} else {
android.ReportPathErrorf(ctx, "failed to parse SDK version in dexpreopt.config: '%s'", sdkVerStr)
}
}
clcMap[sdkVer] = fromJsonClassLoaderContextRec(ctx, clcs)
}
return clcMap
}
// Recursive helper for fromJsonClassLoaderContext.
func fromJsonClassLoaderContextRec(ctx android.PathContext, jClcs []*jsonClassLoaderContext) []*ClassLoaderContext {
clcs := make([]*ClassLoaderContext, 0, len(jClcs))
for _, clc := range jClcs {
clcs = append(clcs, &ClassLoaderContext{
Name: clc.Name,
Host: constructPath(ctx, clc.Host),
Device: clc.Device,
Subcontexts: fromJsonClassLoaderContextRec(ctx, clc.Subcontexts),
})
}
return clcs
}
// Convert Soong CLC map to JSON representation for Make.
func toJsonClassLoaderContext(clcMap ClassLoaderContextMap) jsonClassLoaderContextMap {
jClcMap := make(jsonClassLoaderContextMap)
for sdkVer, clcs := range clcMap {
sdkVerStr := fmt.Sprintf("%d", sdkVer)
jClcMap[sdkVerStr] = toJsonClassLoaderContextRec(clcs)
}
return jClcMap
}
// Recursive helper for toJsonClassLoaderContext.
func toJsonClassLoaderContextRec(clcs []*ClassLoaderContext) []*jsonClassLoaderContext {
jClcs := make([]*jsonClassLoaderContext, len(clcs))
for i, clc := range clcs {
jClcs[i] = &jsonClassLoaderContext{
Name: clc.Name,
Host: clc.Host.String(),
Device: clc.Device,
Subcontexts: toJsonClassLoaderContextRec(clc.Subcontexts),
}
}
return jClcs
}
| ap) {
usesLibs := cl |
sql_translation_client.go | // Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
package migration
import (
"context"
"fmt"
"math"
"net/url"
gax "github.com/googleapis/gax-go/v2"
"google.golang.org/api/option"
"google.golang.org/api/option/internaloption"
gtransport "google.golang.org/api/transport/grpc"
migrationpb "google.golang.org/genproto/googleapis/cloud/bigquery/migration/v2alpha"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
var newSqlTranslationClientHook clientHook
// SqlTranslationCallOptions contains the retry settings for each method of SqlTranslationClient.
type SqlTranslationCallOptions struct {
TranslateQuery []gax.CallOption
}
func defaultSqlTranslationGRPCClientOptions() []option.ClientOption {
return []option.ClientOption{
internaloption.WithDefaultEndpoint("bigquerymigration.googleapis.com:443"),
internaloption.WithDefaultMTLSEndpoint("bigquerymigration.mtls.googleapis.com:443"),
internaloption.WithDefaultAudience("https://bigquerymigration.googleapis.com/"),
internaloption.WithDefaultScopes(DefaultAuthScopes()...),
internaloption.EnableJwtWithScope(),
option.WithGRPCDialOption(grpc.WithDisableServiceConfig()),
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
grpc.MaxCallRecvMsgSize(math.MaxInt32))),
}
}
func defaultSqlTranslationCallOptions() *SqlTranslationCallOptions |
// internalSqlTranslationClient is an interface that defines the methods availaible from BigQuery Migration API.
type internalSqlTranslationClient interface {
Close() error
setGoogleClientInfo(...string)
Connection() *grpc.ClientConn
TranslateQuery(context.Context, *migrationpb.TranslateQueryRequest, ...gax.CallOption) (*migrationpb.TranslateQueryResponse, error)
}
// SqlTranslationClient is a client for interacting with BigQuery Migration API.
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
//
// Provides other SQL dialects to GoogleSQL translation operations.
type SqlTranslationClient struct {
// The internal transport-dependent client.
internalClient internalSqlTranslationClient
// The call options for this service.
CallOptions *SqlTranslationCallOptions
}
// Wrapper methods routed to the internal client.
// Close closes the connection to the API service. The user should invoke this when
// the client is no longer required.
func (c *SqlTranslationClient) Close() error {
return c.internalClient.Close()
}
// setGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients.
func (c *SqlTranslationClient) setGoogleClientInfo(keyval ...string) {
c.internalClient.setGoogleClientInfo(keyval...)
}
// Connection returns a connection to the API service.
//
// Deprecated.
func (c *SqlTranslationClient) Connection() *grpc.ClientConn {
return c.internalClient.Connection()
}
// TranslateQuery translates input queries from source dialects to GoogleSQL.
func (c *SqlTranslationClient) TranslateQuery(ctx context.Context, req *migrationpb.TranslateQueryRequest, opts ...gax.CallOption) (*migrationpb.TranslateQueryResponse, error) {
return c.internalClient.TranslateQuery(ctx, req, opts...)
}
// sqlTranslationGRPCClient is a client for interacting with BigQuery Migration API over gRPC transport.
//
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
type sqlTranslationGRPCClient struct {
// Connection pool of gRPC connections to the service.
connPool gtransport.ConnPool
// flag to opt out of default deadlines via GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE
disableDeadlines bool
// Points back to the CallOptions field of the containing SqlTranslationClient
CallOptions **SqlTranslationCallOptions
// The gRPC API client.
sqlTranslationClient migrationpb.SqlTranslationServiceClient
// The x-goog-* metadata to be sent with each request.
xGoogMetadata metadata.MD
}
// NewSqlTranslationClient creates a new sql translation service client based on gRPC.
// The returned client must be Closed when it is done being used to clean up its underlying connections.
//
// Provides other SQL dialects to GoogleSQL translation operations.
func NewSqlTranslationClient(ctx context.Context, opts ...option.ClientOption) (*SqlTranslationClient, error) {
clientOpts := defaultSqlTranslationGRPCClientOptions()
if newSqlTranslationClientHook != nil {
hookOpts, err := newSqlTranslationClientHook(ctx, clientHookParams{})
if err != nil {
return nil, err
}
clientOpts = append(clientOpts, hookOpts...)
}
disableDeadlines, err := checkDisableDeadlines()
if err != nil {
return nil, err
}
connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
if err != nil {
return nil, err
}
client := SqlTranslationClient{CallOptions: defaultSqlTranslationCallOptions()}
c := &sqlTranslationGRPCClient{
connPool: connPool,
disableDeadlines: disableDeadlines,
sqlTranslationClient: migrationpb.NewSqlTranslationServiceClient(connPool),
CallOptions: &client.CallOptions,
}
c.setGoogleClientInfo()
client.internalClient = c
return &client, nil
}
// Connection returns a connection to the API service.
//
// Deprecated.
func (c *sqlTranslationGRPCClient) Connection() *grpc.ClientConn {
return c.connPool.Conn()
}
// setGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients.
func (c *sqlTranslationGRPCClient) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", versionGo()}, keyval...)
kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
}
// Close closes the connection to the API service. The user should invoke this when
// the client is no longer required.
func (c *sqlTranslationGRPCClient) Close() error {
return c.connPool.Close()
}
func (c *sqlTranslationGRPCClient) TranslateQuery(ctx context.Context, req *migrationpb.TranslateQueryRequest, opts ...gax.CallOption) (*migrationpb.TranslateQueryResponse, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent())))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append((*c.CallOptions).TranslateQuery[0:len((*c.CallOptions).TranslateQuery):len((*c.CallOptions).TranslateQuery)], opts...)
var resp *migrationpb.TranslateQueryResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.sqlTranslationClient.TranslateQuery(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
| {
return &SqlTranslationCallOptions{
TranslateQuery: []gax.CallOption{},
}
} |
params.go | package types
import (
"fmt"
yaml "gopkg.in/yaml.v2"
sdk "github.com/cosmos/cosmos-sdk/types"
paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
"github.com/ethereum/go-ethereum/core/vm"
ethermint "github.com/cosmos/ethermint/types"
)
var _ paramtypes.ParamSet = &Params{}
// Parameter keys
var (
ParamStoreKeyEVMDenom = []byte("EVMDenom")
ParamStoreKeyEnableCreate = []byte("EnableCreate")
ParamStoreKeyEnableCall = []byte("EnableCall")
ParamStoreKeyExtraEIPs = []byte("EnableExtraEIPs")
)
// ParamKeyTable returns the parameter key table.
func ParamKeyTable() paramtypes.KeyTable {
return paramtypes.NewKeyTable().RegisterParamSet(&Params{})
}
// NewParams creates a new Params instance
func NewParams(evmDenom string, enableCreate, enableCall bool, extraEIPs ...int64) Params {
return Params{
EvmDenom: evmDenom,
EnableCreate: enableCreate,
EnableCall: enableCall,
ExtraEIPs: extraEIPs,
}
}
// DefaultParams returns default evm parameters
func DefaultParams() Params {
return Params{
EvmDenom: ethermint.AttoPhoton,
EnableCreate: true,
EnableCall: true,
ExtraEIPs: []int64(nil), // TODO: define default values
}
}
// String implements the fmt.Stringer interface
func (p Params) String() string {
out, _ := yaml.Marshal(p)
return string(out)
}
// ParamSetPairs returns the parameter set pairs.
func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs {
return paramtypes.ParamSetPairs{
paramtypes.NewParamSetPair(ParamStoreKeyEVMDenom, &p.EvmDenom, validateEVMDenom),
paramtypes.NewParamSetPair(ParamStoreKeyEnableCreate, &p.EnableCreate, validateBool),
paramtypes.NewParamSetPair(ParamStoreKeyEnableCall, &p.EnableCall, validateBool),
paramtypes.NewParamSetPair(ParamStoreKeyExtraEIPs, &p.ExtraEIPs, validateEIPs),
}
}
// Validate performs basic validation on evm parameters.
func (p Params) Validate() error {
if err := sdk.ValidateDenom(p.EvmDenom); err != nil {
return err
}
return validateEIPs(p.ExtraEIPs)
}
func validateEVMDenom(i interface{}) error |
func validateBool(i interface{}) error {
_, ok := i.(bool)
if !ok {
return fmt.Errorf("invalid parameter type: %T", i)
}
return nil
}
func validateEIPs(i interface{}) error {
eips, ok := i.([]int64)
if !ok {
return fmt.Errorf("invalid EIP slice type: %T", i)
}
for _, eip := range eips {
if !vm.ValidEip(int(eip)) {
return fmt.Errorf("EIP %d is not activateable", eip)
}
}
return nil
}
| {
denom, ok := i.(string)
if !ok {
return fmt.Errorf("invalid parameter EVM denom type: %T", i)
}
return sdk.ValidateDenom(denom)
} |
tests.js | var synchd = require('./synchronized-bind');
exports['functions executed in sequence and arguments passed to done'] = {
'multiple calls with objects as scope': function(test){
var self = this;
var scopes = {
scopeA: {},
scopeB: {}
};
var completedOrder = [];
test.expect(9);
synchd(scopes.scopeA, function(cb){
setTimeout(function(){
cb('firstArg', 'secondArg');
}, 500);
}, function(firstArg, secondArg){
test.same(firstArg, 'firstArg');
test.same(secondArg, 'secondArg');
completedOrder.push('scopeA-block1');
done();
});
synchd(scopes.scopeB, function(cb){
setTimeout(function(){
cb('thirdArg', 'fourthArg');
}, 20);
}, function(firstArg, secondArg){
test.same(firstArg, 'thirdArg');
test.same(secondArg, 'fourthArg');
completedOrder.push('scopeB-block1');
done();
});
synchd(scopes.scopeA, function(cb){
setTimeout(function(){
cb('fifthArg', 'sixthArg');
}, 50);
}, function(firstArg, secondArg){
test.same(firstArg, 'fifthArg');
test.same(secondArg, 'sixthArg');
completedOrder.push('scopeA-block2');
done();
});
synchd(scopes.scopeB, function(cb){
setTimeout(function(){
cb('seventhArg', 'eighthArg');
}, 200);
}, function(firstArg, secondArg){
test.same(firstArg, 'seventhArg');
test.same(secondArg, 'eighthArg');
completedOrder.push('scopeB-block2');
done();
});
var expectedCompletedOrder = [
'scopeB-block1',
'scopeB-block2',
'scopeA-block1',
'scopeA-block2'
];
var expected = 4;
function done(){
expected--;
if (expected) return;
test.same(expectedCompletedOrder, completedOrder);
test.done();
}
},
'multiple calls with equal strings as scope': function(test){
var self = this;
var scopes = {
scopeA: 'scope',
scopeB: 'scope'
};
var completedOrder = [];
test.expect(9);
synchd(scopes.scopeA, function(cb){
setTimeout(function(){
cb('firstArg', 'secondArg');
}, 500);
}, function(firstArg, secondArg){
test.same(firstArg, 'firstArg');
test.same(secondArg, 'secondArg');
completedOrder.push('scopeA-block1');
done();
});
synchd(scopes.scopeB, function(cb){
setTimeout(function(){
cb('thirdArg', 'fourthArg');
}, 20);
}, function(firstArg, secondArg){
test.same(firstArg, 'thirdArg');
test.same(secondArg, 'fourthArg');
completedOrder.push('scopeB-block1');
done();
});
synchd(scopes.scopeA, function(cb){
setTimeout(function(){
cb('fifthArg', 'sixthArg');
}, 50);
}, function(firstArg, secondArg){
test.same(firstArg, 'fifthArg');
test.same(secondArg, 'sixthArg');
completedOrder.push('scopeA-block2');
done();
});
synchd(scopes.scopeB, function(cb){
setTimeout(function(){
cb('seventhArg', 'eighthArg');
}, 200);
}, function(firstArg, secondArg){
test.same(firstArg, 'seventhArg');
test.same(secondArg, 'eighthArg');
completedOrder.push('scopeB-block2');
done();
});
var expectedCompletedOrder = [
'scopeA-block1',
'scopeB-block1',
'scopeA-block2',
'scopeB-block2'
];
var expected = 4;
function done(){
expected--;
if (expected) return;
test.same(expectedCompletedOrder, completedOrder);
test.done();
}
},
'multiple calls with null/empty as scope': function(test){
var self = this;
var scopes = {
scopeA: null,
scopeB: null
};
var completedOrder = [];
test.expect(9);
synchd(scopes.scopeA, function(cb){
setTimeout(function(){
cb('firstArg', 'secondArg');
}, 500);
}, function(firstArg, secondArg){
test.same(firstArg, 'firstArg');
test.same(secondArg, 'secondArg');
completedOrder.push('scopeA-block1');
done();
});
synchd(scopes.scopeB, function(cb){
setTimeout(function(){
cb('thirdArg', 'fourthArg');
}, 20);
}, function(firstArg, secondArg){
test.same(firstArg, 'thirdArg');
test.same(secondArg, 'fourthArg');
completedOrder.push('scopeB-block1');
done();
});
synchd(scopes.scopeA, function(cb){
setTimeout(function(){
cb('fifthArg', 'sixthArg');
}, 50);
}, function(firstArg, secondArg){
test.same(firstArg, 'fifthArg');
test.same(secondArg, 'sixthArg');
completedOrder.push('scopeA-block2');
done();
});
synchd(scopes.scopeB, function(cb){
setTimeout(function(){
cb('seventhArg', 'eighthArg');
}, 200);
}, function(firstArg, secondArg){
test.same(firstArg, 'seventhArg');
test.same(secondArg, 'eighthArg');
completedOrder.push('scopeB-block2');
done();
});
var expectedCompletedOrder = [
'scopeA-block1',
'scopeB-block1',
'scopeA-block2',
'scopeB-block2'
];
var expected = 4;
function done(){
expected--;
if (expected) return;
test.same(expectedCompletedOrder, completedOrder);
test.done();
}
},
'single call': function(test) {
test.expect(2);
synchd('scope', function(cb){
test.ok('main function called');
cb();
}, function(){
test.same(1, synchd.inScope());
test.done();
});
},
'with no `done` callback passed': function(test) {
test.expect(2);
synchd('scope', function(cb){
test.ok('main function called');
cb();
});
test.same(0, synchd.inScope());
test.done();
}
}
exports['can return a function for calling later'] = {
'without scope': function(test) {
test.expect(4);
synchd.fn(function(done){
test.ok('main function called');
done('err', 'other', 'args');
})(function(err, other, args){
test.same('err', err);
test.same('other', other);
test.same('args', args);
test.done()
});
},
'with scope': function(test) {
test.expect(4);
synchd.fn('scope', function(done){
test.ok('main function called');
done('err', 'other', 'args');
})(function(err, other, args){
test.same('err', err);
test.same('other', other);
test.same('args', args);
test.done()
});
},
'with function to determine scope': function(test) {
test.expect(4);
synchd.fn(function(){
test.ok('scope function called');
return 'abc';
}, function(firstArg, done){
test.ok('main function called');
done(firstArg, synchd.scopeKeys());
})('firstArg', function(firstArg, scopeKeys){
test.same('firstArg', firstArg);
test.same(['abc'], scopeKeys);
test.done();
});
},
'retaining context': {
'with only callback': function(test) {
test.expect(2);
var object = {
id: 'abc123'
};
object.testFn = synchd.fn(function(done){
test.ok('main function called');
done(this);
});
object.testFn(function(context){
test.same(object, context);
test.done();
});
},
'with argument and callback': function(test) {
test.expect(3);
var object = {
id: 'abc123'
};
object.testFn = synchd.fn(function(firstArg, done){
test.ok('main function called');
done(firstArg, this);
});
object.testFn('firstArg', function(firstArg, context){
test.same('firstArg', firstArg);
test.same(object, context);
test.done();
});
}
}
}
exports['can return a function for use as in a cache lookup'] = {
setUp: function(done) {
var self = this;
self.localCache = {};
self.remoteCacheLookups = 0;
self.remoteCacheLookup = function(cb) {
self.remoteCacheLookups++;
setTimeout(cb.bind(null, self.remoteCacheLookups), 10);
};
done();
},
'with only callback': {
setUp: function(done) {
var self = this;
function | (id) { this.id = id; }
MyObject.prototype.testFn = synchd.cachedFn(function(){
return this.id;
}, function(cb, cont){
var found = self.localCache[this.id];
if (found) return cb(null, found);
cont();
}, function(cb) {
var id = this.id;
self.remoteCacheLookup(function(val) {
self.localCache[id] = val;
cb(null, self.localCache[id]);
});
});
self.firstObj = new MyObject('abc');
self.secondObj = new MyObject({ objectAsId: 'def' });
self.thirdObj = new MyObject({ objectAsId: 'ghi' });
done();
},
'subsequent calls will use local cache': function(test) {
var self = this;
var results = [];
self.firstObj.testFn(function(){
results.push(arguments);
});
self.firstObj.testFn(function(){
results.push(arguments);
});
self.firstObj.testFn(function(){
results.push(arguments);
test.same(self.remoteCacheLookups, 1);
test.same('1', results[0][1]);
test.same(results[0][1], results[1][1]);
test.same(results[1][1], results[2][1]);
test.done();
});
},
'scopes isolate stacks': function(test) {
var self = this;
var results = [];
self.firstObj.testFn(function(){
results.push(arguments);
});
self.secondObj.testFn(function(){
results.push(arguments);
});
self.thirdObj.testFn(function(){
results.push(arguments);
test.same(self.remoteCacheLookups, 3);
test.same('1', results[0][1]);
test.same('2', results[1][1]);
test.same('3', results[2][1]);
test.done();
});
}
},
'with arguments and callback': {
setUp: function(done) {
var self = this;
function MyObject(id) { this.id = id; }
MyObject.prototype.testFn = synchd.cachedFn(function(){
return this.id;
}, function(firstArg, secondArg, cb, cont){
var found = self.localCache[this.id];
if (found) return cb(null, found);
cont();
}, function(firstArg, secondArg, cb) {
var id = this.id;
self.remoteCacheLookup(function(val) {
self.localCache[id] = val + firstArg + secondArg;
cb(null, self.localCache[id]);
});
});
self.firstObj = new MyObject('abc');
self.secondObj = new MyObject({ objectAsId: 'def' });
self.thirdObj = new MyObject({ objectAsId: 'ghi' });
done();
},
'subsequent calls will use local cache': function(test) {
var self = this;
var results = [];
self.firstObj.testFn('a', 'a', function(){
results.push(arguments);
});
self.firstObj.testFn('a', 'b', function(){
results.push(arguments);
});
self.firstObj.testFn('a', 'c', function(){
results.push(arguments);
test.same(self.remoteCacheLookups, 1);
test.same('1aa', results[0][1]);
test.same(results[0][1], results[1][1]);
test.same(results[1][1], results[2][1]);
test.done();
});
},
'scopes isolate stacks': function(test) {
var self = this;
var results = [];
self.firstObj.testFn('a', 'a', function(){
results.push(arguments);
});
self.secondObj.testFn('a', 'b', function(){
results.push(arguments);
});
self.thirdObj.testFn('a', 'c', function(){
results.push(arguments);
test.same(self.remoteCacheLookups, 3);
test.same('1aa', results[0][1]);
test.same('2ab', results[1][1]);
test.same('3ac', results[2][1]);
test.done();
});
}
}
}
// process.on('uncaughtException', function(err) {
// console.log('Caught exception: ' + err, err.stack);
// });
| MyObject |
main_test.go | /*
Copyright 2019 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package wordpress
import (
"flag"
"fmt"
"io/ioutil"
"net"
"os"
"os/exec"
"path"
"strings"
"syscall"
"testing"
"time"
"database/sql"
vtenv "vitess.io/vitess/go/vt/env"
_ "github.com/go-sql-driver/mysql"
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/test/endtoend/cluster"
)
var (
clusterInstance *cluster.LocalProcessCluster
vtParams mysql.ConnParams
KeyspaceName = "wordpressdb"
Cell = "test"
VSchema = `{
"sharded": false,
"tables": {
"wp_term_relationships":{},
"wp_comments":{},
"wp_links":{},
"wp_options":{},
"wp_postmeta":{},
"wp_term_taxonomy":{},
"wp_usermeta":{},
"wp_termmeta":{},
"wp_terms":{},
"wp_commentmeta":{},
"wp_posts":{},
"wp_users":{}
}
}`
)
func TestMain(m *testing.M) {
flag.Parse()
current, err := os.Getwd()
if err != nil {
panic(err)
}
path := current + "/wordpress.cnf"
os.Setenv("EXTRA_MY_CNF", path) | clusterInstance = cluster.NewCluster(Cell, "localhost")
defer clusterInstance.Teardown()
// Start topo server
err := clusterInstance.StartTopo()
if err != nil {
return 1
}
// Start keyspace
keyspace := &cluster.Keyspace{
Name: KeyspaceName,
SchemaSQL: "",
VSchema: VSchema,
}
err = clusterInstance.StartUnshardedKeyspace(*keyspace, 1, true)
if err != nil {
return 1
}
// Start vtgate
err = clusterInstance.StartVtgate()
if err != nil {
return 1
}
vtParams = mysql.ConnParams{
Host: clusterInstance.Hostname,
Port: clusterInstance.VtgateMySQLPort,
}
startVanillaMySQL()
return m.Run()
}()
if mysqld != nil {
fmt.Println("killing mysqld after tests")
mysqld.Process.Signal(syscall.SIGKILL)
}
os.Exit(exitCode)
}
var mysqld *exec.Cmd
var socketFile string
func startVanillaMySQL() {
handleErr := func(err error) {
if err != nil {
panic(err)
}
}
tmpDir, err := ioutil.TempDir("", "vitess_tests")
handleErr(err)
vtMysqlRoot, err := vtenv.VtMysqlRoot()
handleErr(err)
mysqldPath, err := binaryPath(vtMysqlRoot, "mysqld")
handleErr(err)
datadir := fmt.Sprintf("--datadir=%s", tmpDir)
basedir := "--basedir=" + vtMysqlRoot
args := []string{
basedir,
datadir,
"--initialize-insecure",
}
initDbCmd, err := startCommand(mysqldPath, args)
handleErr(err)
err = initDbCmd.Wait()
handleErr(err)
tmpPort, err := getFreePort()
handleErr(err)
socketFile = tmpDir + "socket_file"
args = []string{
basedir,
datadir,
fmt.Sprintf("--port=%d", tmpPort),
"--socket=" + socketFile,
}
mysqld, err = startCommand(mysqldPath, args)
handleErr(err)
time.Sleep(1 * time.Second) // give mysqld a chance to start listening to the socket before running tests
planMysql, err := sql.Open("mysql", fmt.Sprintf("root@unix(%s)/", socketFile))
handleErr(err)
defer planMysql.Close()
_, err = planMysql.Exec("create database wordpressdb")
handleErr(err)
}
func startCommand(mysqldPath string, args []string) (*exec.Cmd, error) {
cmd := exec.Command(mysqldPath, args...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd, cmd.Start()
}
// binaryPath does a limited path lookup for a command,
// searching only within sbin and bin in the given root.
func binaryPath(root, binary string) (string, error) {
subdirs := []string{"sbin", "bin", "libexec", "scripts"}
for _, subdir := range subdirs {
binPath := path.Join(root, subdir, binary)
if _, err := os.Stat(binPath); err == nil {
return binPath, nil
}
}
return "", fmt.Errorf("%s not found in any of %s/{%s}",
binary, root, strings.Join(subdirs, ","))
}
func getFreePort() (int, error) {
addr, err := net.ResolveTCPAddr("tcp", "localhost:0")
if err != nil {
return 0, err
}
l, err := net.ListenTCP("tcp", addr)
if err != nil {
return 0, err
}
defer l.Close()
return l.Addr().(*net.TCPAddr).Port, nil
} | exitCode := func() int { |
ui.py | """UI class"""
import cv2 as cv
import numpy as np
class UI:
"""Handles UI drawing and managing"""
def __init__(self, frame):
height, width, channels = frame.shape
self.width = width
self.height = height
self.separators = {
"y": (0, height // 3, 2 * height // 3),
"x": (0, width // 3, 2 * width // 3),
}
self.figure = np.zeros((height, width, channels), dtype=np.uint8)
self.grid_drawn = False
def draw_grid(self, color=(255, 0, 0), thickness=9):
"""Draws a 3 by 3 grid on the frame"""
if not self.grid_drawn:
for i in range(1, 3):
startpoint_height = (0, self.separators["y"][i])
startpoint_width = (self.separators["x"][i], 0)
endpoint_height = (self.width, self.separators["y"][i])
endpoint_width = (self.separators["x"][i], self.height)
self.figure = cv.line(
self.figure, startpoint_height, endpoint_height, color, thickness
)
self.figure = cv.line(
self.figure, startpoint_width, endpoint_width, color, thickness
)
self.grid_drawn = True
def _draw_x(self, x, y, color, thickness):
"""Draws X on the selected grid marker.\n
location should be a tuple with two numbers indicating place on the grid"""
width_offset = self.separators["x"][1] * 0.25
height_offset = self.separators["y"][1] * 0.25
left = int(self.separators["x"][x] + width_offset)
up = int(self.separators["y"][y] + height_offset)
right = int(self.separators["x"][x] + width_offset * 3)
down = int(self.separators["y"][y] + height_offset * 3)
self.figure = cv.line(self.figure, (left, up), (right, down), color, thickness)
self.figure = cv.line(self.figure, (left, down), (right, up), color, thickness)
def _draw_circle(self, x, y, color, thickness):
|
def draw_move(self, coords, color=(0, 0, 255), thickness=7):
"""Draws a shape based on the coordinate object"""
if coords.symbol == "x":
self._draw_x(coords.x, coords.y, color, thickness)
else:
self._draw_circle(coords.x, coords.y, color, thickness)
def get_separators(self):
"""Returns the separators used for the processing"""
return self.separators
def overlay(self, frame):
"""Returns the frame with added figure array"""
return cv.add(frame, self.figure)
| """Draws circle on the selected grid marker.\n
location should be a tuple with two numbers indicating place on the grid"""
width_offset = self.separators["x"][1] * 0.5
height_offset = self.separators["y"][1] * 0.5
center = (
int(self.separators["x"][x] + width_offset),
int(self.separators["y"][y] + height_offset),
)
radius = int(height_offset * 0.75)
self.figure = cv.circle(self.figure, center, radius, color, thickness) |
structures.py | # ===========================================================================
# dictionary.py -----------------------------------------------------------
# ===========================================================================
# function ----------------------------------------------------------------
# ---------------------------------------------------------------------------
def update_dict(a, b):
# @todo[comment]:
if a and b and isinstance(a, dict):
a.update(b)
return a
# function ----------------------------------------------------------------
# ---------------------------------------------------------------------------
def get_dict_element(dict_list, field, query):
# @todo[comment]:
for item in dict_list:
if item[field] == query:
return item
return dict()
# function ----------------------------------------------------------------
# ---------------------------------------------------------------------------
def get_dict_elements(dict_list, field, query, update=False):
# @todo[comment]:
if not isinstance(field, list) and isinstance(query, list):
field = [field] * len(query)
| item = get_dict_element(dict_list, field_item, query_item)
if item:
if not update:
result.append(item)
else:
result.update(item)
return result | result = list() if not update else dict()
if isinstance(field, list) and isinstance(query, list):
for field_item, query_item in zip(field, query): |
run_protocol_testing.py | """
NCams Toolbox
Copyright 2019-2020 Charles M Greenspon, Anton Sobinov
https://github.com/CMGreenspon/NCams
"""
import os
import time
import math
import pylab
import ncams
BASE_DIR = os.path.join('C:\\', 'FLIR_cameras', 'PublicExample')
def | ():
cdatetime = '2019.12.19_10.38.38';
camera_config_dir = os.path.join(BASE_DIR, 'camconf_'+cdatetime)
camera_config = ncams.yaml_to_config(os.path.join(camera_config_dir, 'config.yaml'))
calibration_config, pose_estimation_config = ncams.load_camera_config(camera_config)
session_shortnames = (
'exp_session_2019.12.20_09.49.42_AS_CMG_1',
'exp_session_2019.12.20_09.56.37_AS_CMG_2',
'exp_session_2019.12.20_09.57.31_AS_CMG_3',
'exp_session_2019.12.20_09.58.36_AS_CMG_4',
'exp_session_2019.12.20_10.09.44_AS_CMG_5',
'exp_session_2019.12.20_10.16.13_AS_CMG_6',
'exp_session_2019.12.20_10.34.40_AS_CMG_7',
'exp_session_2019.12.20_10.39.45_AS_CMG_8',
'exp_session_2019.12.20_10.45.01_AS_CMG_9',
'exp_session_2019.12.20_10.51.06_AS_CMG_10',
'exp_session_2019.12.20_11.11.21_AS_CMG_11',
'exp_session_2019.12.20_11.17.24_AS_CMG_12',
'exp_session_2019.12.20_11.21.52_AS_CMG_13',
)
for session_shortname in session_shortnames:
print('Processing session {}'.format(session_shortname))
session_full_filename = os.path.join(BASE_DIR, session_shortname, 'session_config.yaml')
session_config = ncams.import_session_config(session_full_filename)
session_config['video_path'] = 'videos'
session_config['ud_video_path'] = 'undistorted_videos'
for p in (os.path.join(session_config['session_path'], session_config['video_path']),
os.path.join(session_config['session_path'], session_config['ud_video_path'])):
if not os.path.isdir(p):
print('Making dir {}'.format(p))
os.mkdir(p)
for serial in camera_config['serials']:
session_config['cam_dicts'][serial]['pic_dir'] = session_config['cam_dicts'][serial]['name']
session_config['cam_dicts'][serial]['video'] = os.path.join(
session_config['video_path'], session_config['cam_dicts'][serial]['name']+'.mp4')
session_config['cam_dicts'][serial]['ud_video'] = os.path.join(
session_config['ud_video_path'], session_config['cam_dicts'][serial]['name']+'.mp4')
for cam_dict in session_config['cam_dicts'].values():
image_list = ncams.utils.get_image_list(
sort=True, path=os.path.join(session_config['session_path'], cam_dict['pic_dir']))
print('\tMaking a video for camera {} from {} images.'.format(
cam_dict['name'], len(image_list)))
ncams.images_to_video(
image_list, cam_dict['video'], fps=session_config['frame_rate'],
output_folder=session_config['session_path'])
for icam, serial in enumerate(camera_config['serials']):
cam_dict = session_config['cam_dicts'][serial]
ncams.undistort_video(
os.path.join(session_config['session_path'], cam_dict['video']),
calibration_config['dicts'][serial],
crop_and_resize=False,
output_filename=os.path.join(session_config['session_path'], cam_dict['ud_video']))
print('\tCamera {} video undistorted.'.format(cam_dict['name']))
ncams.export_session_config(session_config)
if __name__ == '__main__':
main()
pylab.show()
| main |
step16.go | package main
import (
"fmt"
"image"
"image/color"
"image/draw"
"math"
"math/rand"
"time"
"github.com/faiface/pixel"
"github.com/faiface/pixel/pixelgl"
)
const (
globalScale = 1.28
w, h = 192 * 5, 108 * 5
fw, fh = float64(w), float64(h)
)
var (
maxSpeed = 3 * globalScale
desireAmount = 5 * globalScale
friendRadius = 60 * globalScale
crowdRadius = friendRadius / 1.4
avoidRadius = 32 * globalScale
coheseRadius = friendRadius / 5
boids = Boids{}
avoids = Avoids{}
gray = color.RGBA{35, 35, 35, 255}
)
func init() {
rand.Seed(time.Now().UnixNano())
setup()
}
func setup() {
for x := 0; x < w; x += 10 {
avoids = append(avoids, newAvoid(pixel.V(float64(x+5), 50), 0, gray))
avoids = append(avoids, newAvoid(pixel.V(float64(x+5), fh-50), 0, gray))
}
for y := 0; y < h; y += 10 {
avoids = append(avoids, newAvoid(pixel.V(50, float64(y+5)), 0, gray))
avoids = append(avoids, newAvoid(pixel.V(fw-50, float64(y+5)), 0, gray))
}
}
func run() {
win, err := pixelgl.NewWindow(pixelgl.WindowConfig{
Bounds: pixel.R(0, 0, fw, fh),
Undecorated: true,
VSync: true,
})
if err != nil {
panic(err)
}
canvas := pixelgl.NewCanvas(win.Bounds())
last := time.Now()
for !win.Closed() {
dt := time.Since(last).Seconds()
last = time.Now()
win.SetClosed(win.JustPressed(pixelgl.KeyEscape) || win.JustPressed(pixelgl.KeyQ))
if win.JustPressed(pixelgl.KeyC) {
boids, avoids = nil, nil
//setup()
}
if win.Pressed(pixelgl.Key1) {
desireAmount = 1
}
if win.Pressed(pixelgl.Key2) {
desireAmount = 2
}
if win.Pressed(pixelgl.Key3) {
desireAmount = 10
}
if win.Pressed(pixelgl.Key4) {
desireAmount = 20
}
if win.Pressed(pixelgl.KeyUp) {
desireAmount += 0.1
fmt.Println(desireAmount)
}
if win.Pressed(pixelgl.KeyDown) {
desireAmount -= 0.1
fmt.Println(desireAmount)
}
pos := win.MousePosition()
if win.Pressed(pixelgl.KeyO) {
avoids = append(avoids, newAvoid(pos, 10, gray))
}
if win.Pressed(pixelgl.MouseButtonLeft) {
boids = append(boids, randomColorBoidAt(pos, dt))
}
win.Clear(color.RGBA{10, 10, 10, 255})
drawFrame(canvas)
canvas.Draw(win, pixel.IM.Moved(win.Bounds().Center()))
win.Update()
}
}
func drawFrame(canvas *pixelgl.Canvas) {
buffer := image.NewRGBA(image.Rect(0, 0, w, h))
for _, a := range avoids {
a.draw(buffer)
}
for _, b := range boids {
b.increment()
b.wrap()
if b.think == 0 {
b.updateFriends()
}
b.flock()
b.updatePosition()
b.draw(buffer)
}
canvas.SetPixels(buffer.Pix)
}
type Boids []*boid
type boid struct {
size int
think int
angle float64
speed float64
position pixel.Vec
velocity pixel.Vec
color color.RGBA
originalColor color.RGBA
friends []*boid
}
func newBoid(x, y, angle, speed float64, c color.RGBA) *boid {
angleInRadians := angle * math.Pi / 180
size := rand.Intn(3) + 1
return &boid{
size: size,
think: rand.Intn(10) * size,
angle: angle,
speed: speed,
position: pixel.Vec{x, y},
velocity: pixel.Vec{
X: speed * math.Cos(angleInRadians),
Y: -speed * math.Sin(angleInRadians),
},
color: c,
originalColor: c,
friends: nil,
}
}
func randomColorBoidAt(p pixel.Vec, dt float64) *boid |
func (b *boid) increment() {
b.think = (b.think + 1) % 5
}
func (b *boid) wrap() {
b.position.X = float64(int(b.position.X+fw) % w)
b.position.Y = float64(int(b.position.Y+fh) % h)
}
func (b *boid) updatePosition() {
angleInRadians := b.angle * math.Pi / 180
b.velocity = pixel.Vec{
X: b.speed * math.Cos(angleInRadians),
Y: -b.speed * math.Sin(angleInRadians),
}
b.position = b.position.Add(b.velocity)
}
func (b *boid) updateFriends() {
var nearby []*boid
for _, t := range boids {
if t != b {
if math.Abs(t.position.X-b.position.X) < friendRadius &&
math.Abs(t.position.Y-b.position.Y) < friendRadius {
nearby = append(nearby, t)
}
}
}
b.friends = nearby
}
func (b *boid) getAverageColor() color.RGBA {
if false {
return color.RGBA{255, 0, 0, 255}
}
c := len(b.friends)
tr, tg, tb := 0, 0, 0
br, bg, bb := int(b.color.R), int(b.color.G), int(b.color.B)
for _, f := range b.friends {
fr, fg, fb := int(f.originalColor.R), int(f.originalColor.G), int(f.originalColor.B)
if fr-br < -128 {
tr += fr + 255 - br
} else if fr-br > 128 {
tr += fr - 255 - br
} else {
tr += fr - br
}
if fg-bg < -128 {
tg += fg + 255 - bg
} else if fg-bg > 128 {
tg += fg - 255 - bg
} else {
tg += fg - bg
}
if fb-bb < -128 {
tb += fb + 255 - bb
} else if fb-bb > 128 {
tb += fb - 255 - bb
} else {
tb += fb - bb
}
}
return color.RGBA{
uint8(float64(tr) / float64(c)),
uint8(float64(tg) / float64(c)),
uint8(float64(tb) / float64(c)),
255,
}
}
func (b *boid) getAverageDir() pixel.Vec {
sum := pixel.V(0, 0)
for _, f := range b.friends {
d := dist(b.position, f.position)
if d > 0 && d < friendRadius {
sum = sum.Add(div(f.velocity.Unit(), d))
}
}
return sum
}
func (b *boid) getAvoidDir() pixel.Vec {
steer := pixel.V(0, 0)
for _, f := range b.friends {
d := dist(b.position, f.position)
if d > 0 && d < crowdRadius {
diff := div(b.position.Sub(f.position).Unit(), d)
steer = steer.Add(diff)
}
}
return steer
}
func (b *boid) getAvoidObjects() pixel.Vec {
steer := pixel.V(0, 0)
for _, f := range avoids {
d := dist(b.position, f.position)
if d > 0 && d < avoidRadius {
diff := div(b.position.Sub(f.position).Unit(), d)
steer = steer.Add(diff)
}
}
return steer
}
func (b *boid) getCohesion() pixel.Vec {
sum := pixel.V(0, 0)
count := 0
for _, other := range b.friends {
d := dist(b.position, other.position)
if d > 0 && d < coheseRadius {
sum = sum.Add(other.position)
count++
}
}
if count > 0 {
desired := div(sum, float64(count)).Sub(b.position)
return desired.Unit().Scaled(desireAmount)
}
return pixel.V(0, 0)
}
func (b *boid) move(v pixel.Vec) {
b.velocity = b.velocity.Add(v)
}
func (b *boid) limitSpeed(s float64) {
b.velocity = b.velocity.Unit().Scaled(s)
}
func (b *boid) updateColor() {
if len(b.friends) > 0 {
ac := b.getAverageColor()
nr, ng, nb := float64(b.color.R), float64(b.color.G), float64(b.color.B)
nr += float64(ac.R) * 0.03
ng += float64(ac.G) * 0.03
nb += float64(ac.B) * 0.03
b.color = color.RGBA{uint8(int(nr) % 255), uint8(int(ng) % 255), uint8(int(nb) % 255), 255}
}
}
func (b *boid) flock() {
var (
align = b.getAverageDir().Scaled(1)
cohesion = b.getCohesion().Scaled(1)
avoidDir = b.getAvoidDir().Scaled(1)
avoidObjects = b.getAvoidObjects().Scaled(1)
noise = pixel.V(rand.Float64()*2-1, rand.Float64()*2-1).Scaled(0.05)
)
b.move(align)
b.move(avoidDir)
b.move(avoidObjects)
b.move(noise)
b.move(cohesion)
b.limitSpeed(maxSpeed)
b.updateColor()
}
func (b *boid) draw(m *image.RGBA) {
x, y := int(b.position.X), int(b.position.Y)
r := image.Rect(x-b.size, y-b.size, x+b.size, y+b.size)
draw.Draw(m, r, &image.Uniform{b.color}, image.ZP, draw.Src)
}
type Avoids []*avoid
func newAvoid(p pixel.Vec, s float64, c color.RGBA) *avoid {
return &avoid{position: p, size: s, color: c}
}
type avoid struct {
position pixel.Vec
size float64
color color.RGBA
}
func (a *avoid) draw(m *image.RGBA) {
x, y := int(a.position.X), int(a.position.Y)
r := image.Rect(x-3, y-5, x+3, y+1)
draw.Draw(m, r, &image.Uniform{a.color}, image.ZP, draw.Src)
}
func randomColor() color.RGBA {
if true {
return color.RGBA{uint8(rand.Intn(255)), uint8(rand.Intn(255)), uint8(rand.Intn(255)), 255}
}
var r, g, b uint8
i := rand.Intn(3)
switch i {
case 0:
r = 255
g = uint8(rand.Intn(100) + 50)
b = uint8(rand.Intn(100) + 50)
case 1:
r = uint8(rand.Intn(100) + 50)
g = 255
b = uint8(rand.Intn(100) + 50)
default:
r = uint8(rand.Intn(200) + 50)
g = uint8(rand.Intn(100) + 50)
b = 255
}
return color.RGBA{r, g, b, 255}
}
func flip() float64 {
if rand.Float64() > 0.5 {
return 1.0
}
return -1.0
}
func dist(a, b pixel.Vec) float64 {
return math.Abs(a.X-b.X) + math.Abs(a.Y-b.Y)
}
func div(v pixel.Vec, d float64) pixel.Vec {
v.X /= d
v.Y /= d
return v
}
func main() {
pixelgl.Run(run)
}
| {
angle := (90.0 + rand.Float64()) * 180.0 * flip()
speed := maxSpeed * dt * (10 + (rand.Float64() * 10))
return newBoid(p.X, p.Y, angle, speed, randomColor())
} |
forms.py | # django
from django import forms
from django.contrib.auth.models import User
# choices
from core.cooggerapp.choices import *
# models
from core.cooggerapp.models import (
Content, OtherAddressesOfUsers, UserProfile,
ReportModel, UTopic, Issue)
from .models.utils import send_mail
class UTopicForm(forms.ModelForm):
class Meta:
model = UTopic
fields = ["name", "image_address", "definition", "tags", "address"]
class ContentForm(forms.ModelForm):
msg = forms.CharField(
max_length=150,
label="Commit Message",
help_text="What has changed with this update?"
)
class Meta:
model = Content
fields = ["category", "language", "title", "body", "tags"]
@classmethod
def send_mail(cls, form):
send_mail(
subject = f"{form.user} publish a new content | coogger".title(),
template_name="email/post.html",
context=dict(
get_absolute_url=form.get_absolute_url
),
to=[u.user.email for u in form.user.follow.follower if u.user.email],
)
class ReplyForm(forms.ModelForm):
class Meta:
model = Content
fields = ["title", "body"]
class AddressesForm(forms.ModelForm):
class Meta:
model = OtherAddressesOfUsers
fields = ["choices", "address"]
class CSettingsUserForm(forms.ModelForm):
class Meta:
model = User
fields = ["first_name", "last_name", "username", "email"]
class OtherAddressesOfUsersForm(forms.ModelForm):
class Meta:
model = OtherAddressesOfUsers |
class AboutForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ["about"]
class ReportsForm(forms.ModelForm):
class Meta:
model = ReportModel
fields = ["complaints", "add"]
class NewIssueForm(forms.ModelForm):
class Meta:
model = Issue
fields = ["title", "body"]
@classmethod
def send_mail(cls, form):
send_mail(
subject=f"{form.user} opened a new issue on your {form.utopic.name} topic | coogger".title(),
template_name="email/new-issue.html",
context=dict(
form=form,
),
to=[form.utopic.user.email]
)
class NewIssueReplyForm(forms.ModelForm):
body = forms.CharField(
widget=forms.Textarea,
help_text="problem | question | or anything else")
class Meta:
model = Issue
fields = ["body"]
class NewContentReplyForm(forms.ModelForm):
body = forms.CharField(
widget=forms.Textarea,
help_text="Your content | problem | question | or anything else")
class Meta:
model = Content
fields = ["body"] | fields = ["choices", "address"] |
open-CDN-menu-command.user.js | // ==UserScript==
// @name Open CDN MenuCommand
// @namespace https://github.com/Cologler/monkeys-javascript
// @version 0.1.1
// @description register open CDN MenuCommand
// @author Cologler ([email protected])
// @match https://github.com/*
// @grant GM.registerMenuCommand
// @grant GM.openInTab
// @noframes
// @license MIT
// ==/UserScript==
/**
* @typedef GithubMetadata
* @property {string} owner
* @property {string} repo
* @property {string} branch
* @property {string} path
*/
(function () {
'use strict';
/**
*
* @param {Location} location
* @returns {GithubMetadata}
*/
function parseGithubMetadataFromLocation(location) {
const match = location.pathname.match(
/^\/(?<owner>[^/]+)\/(?<repo>[^/]+)(?:\/(?<type>tree|blob)\/(?<branch>[^/]+)(?:\/(?<path>.+))?)?$/
);
if (!match) {
return null;
}
return {
owner: match.groups.owner,
repo: match.groups.repo,
branch: match.groups.branch,
path: match.groups.path,
};
}
/**
* @param {GithubMetadata} metadata
*/
function getJsDelivrUrlFromGithubMetadata(metadata) {
// browse packages:
// https://www.jsdelivr.com/package/gh/Cologler/monkey-in-zoo-javascript
// direct link:
// https://cdn.jsdelivr.net/gh/Cologler/[email protected]/dist/dom-builder.js
let url = `https://www.jsdelivr.com/package/gh/${metadata.owner}/${metadata.repo}`;
if (metadata.path) {
url += `?path=${metadata.path}`;
}
return url;
}
function | () {
const cdns = [];
if (window.location.hostname === 'github.com') {
const githubMetadata = parseGithubMetadataFromLocation(window.location);
if (githubMetadata) {
const jsDelivrUrl = getJsDelivrUrlFromGithubMetadata(githubMetadata);
cdns.push({
name: 'jsDelivr',
url: jsDelivrUrl
});
}
}
return cdns;
}
for (const cdn of getCDNInfos()) {
GM.registerMenuCommand(cdn.name, () => {
console.info(`GoTo: ${cdn.url}`);
GM.openInTab(cdn.url);
});
}
})();
| getCDNInfos |
conn.go | //
// Copyright 2022 SkyAPM org
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package sql
import (
"context"
"database/sql"
"time"
"github.com/powerapm/go2sky"
)
// Conn wrap sql.Conn and support trace
type Conn struct {
*sql.Conn
db *DB
}
// PingContext support trace
func (c *Conn) PingContext(ctx context.Context) error {
span, err := createSpan(ctx, c.db.tracer, c.db.opts, "ping")
if err != nil {
return err
}
defer span.End()
err = c.Conn.PingContext(ctx)
if err != nil {
span.Error(time.Now(), err.Error())
}
return err
}
// ExecContext support trace
func (c *Conn) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) {
span, err := createSpan(ctx, c.db.tracer, c.db.opts, "execute")
if err != nil {
return nil, err
}
defer span.End()
if c.db.opts.reportQuery {
span.Tag(go2sky.TagDBStatement, query)
}
if c.db.opts.reportParam {
span.Tag(go2sky.TagDBSqlParameters, argsToString(args))
}
res, err := c.Conn.ExecContext(ctx, query, args...)
if err != nil {
span.Error(time.Now(), err.Error())
}
return res, err
}
// QueryContext support trace
func (c *Conn) QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) {
span, err := createSpan(ctx, c.db.tracer, c.db.opts, "query")
if err != nil {
return nil, err
}
defer span.End()
if c.db.opts.reportQuery {
span.Tag(go2sky.TagDBStatement, query)
}
if c.db.opts.reportParam {
span.Tag(go2sky.TagDBSqlParameters, argsToString(args))
}
rows, err := c.Conn.QueryContext(ctx, query, args...)
if err != nil {
span.Error(time.Now(), err.Error())
}
return rows, err
}
// QueryRowContext support trace
func (c *Conn) QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row {
span, err := createSpan(ctx, c.db.tracer, c.db.opts, "query")
if err != nil {
return nil
}
defer span.End()
if c.db.opts.reportQuery {
span.Tag(go2sky.TagDBStatement, query)
}
if c.db.opts.reportParam {
span.Tag(go2sky.TagDBSqlParameters, argsToString(args))
}
return c.Conn.QueryRowContext(ctx, query, args...)
}
// PrepareContext support trace
func (c *Conn) PrepareContext(ctx context.Context, query string) (*Stmt, error) {
stmt, err := c.Conn.PrepareContext(ctx, query)
return &Stmt{
Stmt: stmt,
db: c.db,
query: query,
}, err
}
// BeginTx support trace
func (c *Conn) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) {
span, err := createSpan(ctx, c.db.tracer, c.db.opts, "begin")
if err != nil {
return nil, err
}
defer span.End()
tx, err := c.Conn.BeginTx(ctx, opts)
if err != nil {
span.Error(time.Now(), err.Error()) | return nil, err
}
return &Tx{
Tx: tx,
db: c.db,
ctx: ctx,
}, nil
} | |
kfapp.go | // Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package apps
import (
v1alpha1 "github.com/google/kf/pkg/apis/kf/v1alpha1"
"github.com/google/kf/pkg/internal/envutil"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// KfApp provides a facade around Knative services for accessing and mutating its
// values.
type KfApp v1alpha1.App
// GetName retrieves the name of the app.
func (k *KfApp) GetName() string {
return k.Name
}
// SetName sets the name of the app.
func (k *KfApp) SetName(name string) {
k.Name = name
}
// SetNamespace sets the namespace for the app.
func (k *KfApp) SetNamespace(namespace string) {
k.Namespace = namespace
}
// GetNamespace gets the namespace for the app.
func (k *KfApp) GetNamespace() string {
return k.Namespace
}
func (k *KfApp) getOrCreateRevisionTemplateSpec() *v1alpha1.AppSpecTemplate {
return &k.Spec.Template
}
func (k *KfApp) getRevisionTemplateSpecOrNil() *v1alpha1.AppSpecTemplate {
if k == nil {
return nil
}
return &k.Spec.Template
}
func (k *KfApp) getOrCreateContainer() *corev1.Container {
rl := k.getOrCreateRevisionTemplateSpec()
if len(rl.Spec.Containers) == 0 {
rl.Spec.Containers = []v1.Container{{}}
}
return &k.getOrCreateRevisionTemplateSpec().Spec.Containers[0]
}
func (k *KfApp) getContainerOrNil() *corev1.Container {
if rl := k.getRevisionTemplateSpecOrNil(); rl != nil {
if len(rl.Spec.Containers) != 0 {
return &rl.Spec.Containers[0]
}
}
return nil
}
// GetContainer returns the container of the app or nil if it's blank.
func (k *KfApp) GetContainer() *corev1.Container {
return k.getContainerOrNil()
}
// GetEnvVars reads the environment variables off an app.
func (k *KfApp) GetEnvVars() []corev1.EnvVar {
if container := k.getContainerOrNil(); container != nil {
return container.Env
}
return nil
}
// SetEnvVars sets environment variables on an app.
func (k *KfApp) SetEnvVars(env []corev1.EnvVar) {
k.getOrCreateContainer().Env = env
}
// MergeEnvVars adds the environment variables listed to the existing ones,
// overwriting duplicates by key.
func (k *KfApp) MergeEnvVars(env []corev1.EnvVar) {
k.SetEnvVars(envutil.DeduplicateEnvVars(append(k.GetEnvVars(), env...)))
}
// DeleteEnvVars removes environment variables with the given key.
func (k *KfApp) DeleteEnvVars(names []string) {
k.SetEnvVars(envutil.RemoveEnvVars(names, k.GetEnvVars()))
}
// Set a resource request for an app. Request amount can be cleared by passing in nil
func (k *KfApp) setResourceRequest(r v1.ResourceName, quantity *resource.Quantity) {
container := k.getOrCreateContainer()
resourceRequests := container.Resources.Requests
if resourceRequests == nil {
resourceRequests = v1.ResourceList{}
}
if quantity == nil {
delete(resourceRequests, r)
} else {
resourceRequests[r] = *quantity
}
container.Resources.Requests = resourceRequests
}
// GetHealthCheck gets the readiness probe or nil if one doesn't exist.
func (k *KfApp) GetHealthCheck() *corev1.Probe {
if cont := k.getContainerOrNil(); cont != nil {
return cont.ReadinessProbe
}
return nil
}
func (k *KfApp) GetServiceBindings() []v1alpha1.AppSpecServiceBinding {
return k.Spec.ServiceBindings
}
// SetContainer sets the container for the app.
func (k *KfApp) SetContainer(container corev1.Container) {
k.Spec.Template.Spec.Containers = []corev1.Container{container}
}
// GetClusterURL gets the internal address of the app or the empty string if
// unset.
func (k *KfApp) GetClusterURL() string {
clusterURL := ""
if k.Status.Address != nil && k.Status.Address.URL != nil |
return clusterURL
}
// ToApp casts this alias back into an App.
func (k *KfApp) ToApp() *v1alpha1.App {
app := v1alpha1.App(*k)
return &app
}
// NewKfApp creates a new KfApp.
func NewKfApp() KfApp {
return KfApp{
TypeMeta: metav1.TypeMeta{
Kind: "App",
APIVersion: "kf.dev/v1alpha1",
},
Spec: v1alpha1.AppSpec{
Template: v1alpha1.AppSpecTemplate{
Spec: v1.PodSpec{
Containers: []v1.Container{{}},
},
},
},
}
}
// NewFromApp creates a new KfApp from the given service pointer
// modifications to the KfApp will affect the underling app.
func NewFromApp(app *v1alpha1.App) *KfApp {
return (*KfApp)(app)
}
| {
clusterURL = k.Status.Address.URL.String()
} |
context.rs | use std::collections::HashMap;
use std::num::NonZeroU64;
use anyhow::format_err;
use log::debug;
use crate::core::interning::InternedString;
use crate::core::{Dependency, PackageId, SourceId, Summary};
use crate::util::Graph;
use super::dep_cache::RegistryQueryer;
use super::errors::ActivateResult;
use super::types::{ConflictMap, ConflictReason, FeaturesSet, ResolveOpts};
pub use super::encode::Metadata;
pub use super::encode::{EncodableDependency, EncodablePackageId, EncodableResolve};
pub use super::resolve::Resolve;
// A `Context` is basically a bunch of local resolution information which is
// kept around for all `BacktrackFrame` instances. As a result, this runs the
// risk of being cloned *a lot* so we want to make this as cheap to clone as
// possible.
#[derive(Clone)]
pub struct Context {
pub age: ContextAge,
pub activations: Activations,
/// list the features that are activated for each package
pub resolve_features: im_rc::HashMap<PackageId, FeaturesSet>,
/// get the package that will be linking to a native library by its links attribute
pub links: im_rc::HashMap<InternedString, PackageId>,
/// for each package the list of names it can see,
/// then for each name the exact version that name represents and weather the name is public.
pub public_dependency: Option<PublicDependency>,
/// a way to look up for a package in activations what packages required it
/// and all of the exact deps that it fulfilled.
pub parents: Graph<PackageId, im_rc::HashSet<Dependency>>,
}
/// When backtracking it can be useful to know how far back to go.
/// The `ContextAge` of a `Context` is a monotonically increasing counter of the number
/// of decisions made to get to this state.
/// Several structures store the `ContextAge` when it was added,
/// to be used in `find_candidate` for backtracking.
pub type ContextAge = usize;
/// Find the activated version of a crate based on the name, source, and semver compatibility.
/// By storing this in a hash map we ensure that there is only one
/// semver compatible version of each crate.
/// This all so stores the `ContextAge`.
pub type ActivationsKey = (InternedString, SourceId, SemverCompatibility);
pub type Activations = im_rc::HashMap<ActivationsKey, (Summary, ContextAge)>;
/// A type that represents when cargo treats two Versions as compatible.
/// Versions `a` and `b` are compatible if their left-most nonzero digit is the
/// same.
#[derive(Clone, Copy, Eq, PartialEq, Hash, Debug, PartialOrd, Ord)]
pub enum SemverCompatibility {
Major(NonZeroU64),
Minor(NonZeroU64),
Patch(u64),
}
impl From<&semver::Version> for SemverCompatibility {
fn from(ver: &semver::Version) -> Self {
if let Some(m) = NonZeroU64::new(ver.major) {
return SemverCompatibility::Major(m);
}
if let Some(m) = NonZeroU64::new(ver.minor) {
return SemverCompatibility::Minor(m);
}
SemverCompatibility::Patch(ver.patch)
}
}
impl PackageId {
pub fn as_activations_key(self) -> ActivationsKey {
(self.name(), self.source_id(), self.version().into())
}
}
impl Context {
pub fn new(check_public_visible_dependencies: bool) -> Context {
Context {
age: 0,
resolve_features: im_rc::HashMap::new(),
links: im_rc::HashMap::new(),
public_dependency: if check_public_visible_dependencies {
Some(PublicDependency::new())
} else {
None
},
parents: Graph::new(),
activations: im_rc::HashMap::new(),
}
}
/// Activate this summary by inserting it into our list of known activations.
///
/// The `parent` passed in here is the parent summary/dependency edge which
/// cased `summary` to get activated. This may not be present for the root
/// crate, for example.
///
/// Returns `true` if this summary with the given features is already activated.
pub fn flag_activated(
&mut self,
summary: &Summary,
opts: &ResolveOpts,
parent: Option<(&Summary, &Dependency)>,
) -> ActivateResult<bool> {
let id = summary.package_id();
let age: ContextAge = self.age;
match self.activations.entry(id.as_activations_key()) {
im_rc::hashmap::Entry::Occupied(o) => {
debug_assert_eq!(
&o.get().0,
summary,
"cargo does not allow two semver compatible versions"
);
}
im_rc::hashmap::Entry::Vacant(v) => {
if let Some(link) = summary.links() {
if self.links.insert(link, id).is_some() {
return Err(format_err!(
"Attempting to resolve a dependency with more then \
one crate with links={}.\nThis will not build as \
is. Consider rebuilding the .lock file.",
&*link
)
.into());
}
}
v.insert((summary.clone(), age));
// If we've got a parent dependency which activated us, *and*
// the dependency has a different source id listed than the
// `summary` itself, then things get interesting. This basically
// means that a `[patch]` was used to augment `dep.source_id()`
// with `summary`.
//
// In this scenario we want to consider the activation key, as
// viewed from the perspective of `dep.source_id()`, as being
// fulfilled. This means that we need to add a second entry in
// the activations map for the source that was patched, in
// addition to the source of the actual `summary` itself.
//
// Without this it would be possible to have both 1.0.0 and
// 1.1.0 "from crates.io" in a dependency graph if one of those
// versions came from a `[patch]` source.
if let Some((_, dep)) = parent {
if dep.source_id() != id.source_id() {
let key = (id.name(), dep.source_id(), id.version().into());
let prev = self.activations.insert(key, (summary.clone(), age));
if let Some((previous_summary, _)) = prev {
return Err(
(previous_summary.package_id(), ConflictReason::Semver).into()
);
}
}
}
return Ok(false);
}
}
debug!("checking if {} is already activated", summary.package_id());
if opts.features.all_features {
return Ok(false);
}
let has_default_feature = summary.features().contains_key("default");
Ok(match self.resolve_features.get(&id) {
Some(prev) => {
opts.features.features.is_subset(prev)
&& (!opts.features.uses_default_features
|| prev.contains("default")
|| !has_default_feature)
}
None => {
opts.features.features.is_empty()
&& (!opts.features.uses_default_features || !has_default_feature)
}
})
}
/// If the package is active returns the `ContextAge` when it was added
pub fn is_active(&self, id: PackageId) -> Option<ContextAge> {
self.activations
.get(&id.as_activations_key())
.and_then(|(s, l)| if s.package_id() == id { Some(*l) } else { None })
}
/// If the conflict reason on the package still applies returns the `ContextAge` when it was added
pub fn still_applies(&self, id: PackageId, reason: &ConflictReason) -> Option<ContextAge> {
self.is_active(id).and_then(|mut max| {
match reason {
ConflictReason::PublicDependency(name) => {
if &id == name {
return Some(max);
}
max = std::cmp::max(max, self.is_active(*name)?);
max = std::cmp::max(
max,
self.public_dependency
.as_ref()
.unwrap()
.can_see_item(*name, id)?,
);
}
ConflictReason::PubliclyExports(name) => {
if &id == name {
return Some(max);
}
max = std::cmp::max(max, self.is_active(*name)?);
max = std::cmp::max(
max,
self.public_dependency
.as_ref()
.unwrap()
.publicly_exports_item(*name, id)?,
);
}
_ => {}
}
Some(max)
})
}
/// Checks whether all of `parent` and the keys of `conflicting activations`
/// are still active.
/// If so returns the `ContextAge` when the newest one was added.
pub fn is_conflicting(
&self,
parent: Option<PackageId>,
conflicting_activations: &ConflictMap,
) -> Option<usize> {
let mut max = 0;
if let Some(parent) = parent {
max = std::cmp::max(max, self.is_active(parent)?);
}
for (id, reason) in conflicting_activations.iter() {
max = std::cmp::max(max, self.still_applies(*id, reason)?);
}
Some(max)
}
pub fn resolve_replacements(
&self, | ) -> HashMap<PackageId, PackageId> {
self.activations
.values()
.filter_map(|(s, _)| registry.used_replacement_for(s.package_id()))
.collect()
}
pub fn graph(&self) -> Graph<PackageId, std::collections::HashSet<Dependency>> {
let mut graph: Graph<PackageId, std::collections::HashSet<Dependency>> = Graph::new();
self.activations
.values()
.for_each(|(r, _)| graph.add(r.package_id()));
for i in self.parents.iter() {
graph.add(*i);
for (o, e) in self.parents.edges(i) {
let old_link = graph.link(*o, *i);
assert!(old_link.is_empty());
*old_link = e.iter().cloned().collect();
}
}
graph
}
}
impl Graph<PackageId, im_rc::HashSet<Dependency>> {
pub fn parents_of(&self, p: PackageId) -> impl Iterator<Item = (PackageId, bool)> + '_ {
self.edges(&p)
.map(|(grand, d)| (*grand, d.iter().any(|x| x.is_public())))
}
}
#[derive(Clone, Debug, Default)]
pub struct PublicDependency {
/// For each active package the set of all the names it can see,
/// for each name the exact package that name resolves to,
/// the `ContextAge` when it was first visible,
/// and the `ContextAge` when it was first exported.
inner: im_rc::HashMap<
PackageId,
im_rc::HashMap<InternedString, (PackageId, ContextAge, Option<ContextAge>)>,
>,
}
impl PublicDependency {
fn new() -> Self {
PublicDependency {
inner: im_rc::HashMap::new(),
}
}
fn publicly_exports(&self, candidate_pid: PackageId) -> Vec<PackageId> {
self.inner
.get(&candidate_pid) // if we have seen it before
.iter()
.flat_map(|x| x.values()) // all the things we have stored
.filter(|x| x.2.is_some()) // as publicly exported
.map(|x| x.0)
.chain(Some(candidate_pid)) // but even if not we know that everything exports itself
.collect()
}
fn publicly_exports_item(
&self,
candidate_pid: PackageId,
target: PackageId,
) -> Option<ContextAge> {
debug_assert_ne!(candidate_pid, target);
let out = self
.inner
.get(&candidate_pid)
.and_then(|names| names.get(&target.name()))
.filter(|(p, _, _)| *p == target)
.and_then(|(_, _, age)| *age);
debug_assert_eq!(
out.is_some(),
self.publicly_exports(candidate_pid).contains(&target)
);
out
}
pub fn can_see_item(&self, candidate_pid: PackageId, target: PackageId) -> Option<ContextAge> {
self.inner
.get(&candidate_pid)
.and_then(|names| names.get(&target.name()))
.filter(|(p, _, _)| *p == target)
.map(|(_, age, _)| *age)
}
pub fn add_edge(
&mut self,
candidate_pid: PackageId,
parent_pid: PackageId,
is_public: bool,
age: ContextAge,
parents: &Graph<PackageId, im_rc::HashSet<Dependency>>,
) {
// one tricky part is that `candidate_pid` may already be active and
// have public dependencies of its own. So we not only need to mark
// `candidate_pid` as visible to its parents but also all of its existing
// publicly exported dependencies.
for c in self.publicly_exports(candidate_pid) {
// for each (transitive) parent that can newly see `t`
let mut stack = vec![(parent_pid, is_public)];
while let Some((p, public)) = stack.pop() {
match self.inner.entry(p).or_default().entry(c.name()) {
im_rc::hashmap::Entry::Occupied(mut o) => {
// the (transitive) parent can already see something by `c`s name, it had better be `c`.
assert_eq!(o.get().0, c);
if o.get().2.is_some() {
// The previous time the parent saw `c`, it was a public dependency.
// So all of its parents already know about `c`
// and we can save some time by stopping now.
continue;
}
if public {
// Mark that `c` has now bean seen publicly
let old_age = o.get().1;
o.insert((c, old_age, if public { Some(age) } else { None }));
}
}
im_rc::hashmap::Entry::Vacant(v) => {
// The (transitive) parent does not have anything by `c`s name,
// so we add `c`.
v.insert((c, age, if public { Some(age) } else { None }));
}
}
// if `candidate_pid` was a private dependency of `p` then `p` parents can't see `c` thru `p`
if public {
// if it was public, then we add all of `p`s parents to be checked
stack.extend(parents.parents_of(p));
}
}
}
}
pub fn can_add_edge(
&self,
b_id: PackageId,
parent: PackageId,
is_public: bool,
parents: &Graph<PackageId, im_rc::HashSet<Dependency>>,
) -> Result<
(),
(
((PackageId, ConflictReason), (PackageId, ConflictReason)),
Option<(PackageId, ConflictReason)>,
),
> {
// one tricky part is that `candidate_pid` may already be active and
// have public dependencies of its own. So we not only need to check
// `b_id` as visible to its parents but also all of its existing
// publicly exported dependencies.
for t in self.publicly_exports(b_id) {
// for each (transitive) parent that can newly see `t`
let mut stack = vec![(parent, is_public)];
while let Some((p, public)) = stack.pop() {
// TODO: don't look at the same thing more then once
if let Some(o) = self.inner.get(&p).and_then(|x| x.get(&t.name())) {
if o.0 != t {
// the (transitive) parent can already see a different version by `t`s name.
// So, adding `b` will cause `p` to have a public dependency conflict on `t`.
return Err((
(o.0, ConflictReason::PublicDependency(p)), // p can see the other version and
(parent, ConflictReason::PublicDependency(p)), // p can see us
))
.map_err(|e| {
if t == b_id {
(e, None)
} else {
(e, Some((t, ConflictReason::PubliclyExports(b_id))))
}
});
}
if o.2.is_some() {
// The previous time the parent saw `t`, it was a public dependency.
// So all of its parents already know about `t`
// and we can save some time by stopping now.
continue;
}
}
// if `b` was a private dependency of `p` then `p` parents can't see `t` thru `p`
if public {
// if it was public, then we add all of `p`s parents to be checked
stack.extend(parents.parents_of(p));
}
}
}
Ok(())
}
} | registry: &RegistryQueryer<'_>, |
build_test.go | /*
* Copyright 2018-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package insights_test
import (
"testing"
"github.com/buildpacks/libcnb"
. "github.com/onsi/gomega"
"github.com/paketo-buildpacks/libpak"
"github.com/sclevine/spec"
"github.com/paketo-buildpacks/azure-application-insights/insights"
)
func | (t *testing.T, context spec.G, it spec.S) {
var (
Expect = NewWithT(t).Expect
ctx libcnb.BuildContext
)
it("contributes Java agent", func() {
ctx.Plan.Entries = append(ctx.Plan.Entries, libcnb.BuildpackPlanEntry{Name: "azure-application-insights-java"})
ctx.Buildpack.Metadata = map[string]interface{}{
"dependencies": []map[string]interface{}{
{
"id": "azure-application-insights-java",
"version": "1.1.1",
"stacks": []interface{}{"test-stack-id"},
},
},
}
ctx.StackID = "test-stack-id"
result, err := insights.Build{}.Build(ctx)
Expect(err).NotTo(HaveOccurred())
Expect(result.Layers).To(HaveLen(2))
Expect(result.Layers[0].Name()).To(Equal("azure-application-insights-java"))
Expect(result.Layers[1].Name()).To(Equal("helper"))
Expect(result.Layers[1].(libpak.HelperLayerContributor).Names).To(Equal([]string{"properties"}))
Expect(result.BOM.Entries).To(HaveLen(2))
Expect(result.BOM.Entries[0].Name).To(Equal("azure-application-insights-java"))
Expect(result.BOM.Entries[1].Name).To(Equal("helper"))
})
it("contributes NodeJS agent", func() {
ctx.Plan.Entries = append(ctx.Plan.Entries, libcnb.BuildpackPlanEntry{Name: "azure-application-insights-nodejs"})
ctx.Buildpack.Metadata = map[string]interface{}{
"dependencies": []map[string]interface{}{
{
"id": "azure-application-insights-nodejs",
"version": "1.1.1",
"stacks": []interface{}{"test-stack-id"},
},
},
}
ctx.StackID = "test-stack-id"
result, err := insights.Build{}.Build(ctx)
Expect(err).NotTo(HaveOccurred())
Expect(result.Layers).To(HaveLen(2))
Expect(result.Layers[0].Name()).To(Equal("azure-application-insights-nodejs"))
Expect(result.Layers[1].Name()).To(Equal("helper"))
Expect(result.Layers[1].(libpak.HelperLayerContributor).Names).To(Equal([]string{"properties"}))
Expect(result.BOM.Entries).To(HaveLen(2))
Expect(result.BOM.Entries[0].Name).To(Equal("azure-application-insights-nodejs"))
Expect(result.BOM.Entries[1].Name).To(Equal("helper"))
})
}
| testBuild |
asset_audit.py | #!/usr/bin/env python3
# Script to audit the assets
# Reads the asset (amount has all issuances)
# Reads the balances in every address for the asset.
# Compares the two numbers to checks that qty of all assets are accounted for
import subprocess
import json
#Set this to your yiya-cli program
cli = "yiya-cli"
mode = "-testnet"
rpc_port = 15591
#mode = "-regtest"
#rpc_port = 15491
#Set this information in your yiya.conf file (in datadir, not testnet3)
rpc_user = 'rpcuser'
rpc_pass = 'rpcpass555'
def | (filter):
rpc_connection = get_rpc_connection()
result = rpc_connection.listassets(filter, True)
return(result)
def listaddressesbyasset(asset, bool, number, number2):
rpc_connection = get_rpc_connection()
result = rpc_connection.listaddressesbyasset(asset, bool, number, number2)
return(result)
def rpc_call(params):
process = subprocess.Popen([cli, mode, params], stdout=subprocess.PIPE)
out, err = process.communicate()
return(out)
def generate_blocks(n):
rpc_connection = get_rpc_connection()
hashes = rpc_connection.generate(n)
return(hashes)
def get_rpc_connection():
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
connection = "http://%s:%[email protected]:%s"%(rpc_user, rpc_pass, rpc_port)
#print("Connection: " + connection)
rpc_connection = AuthServiceProxy(connection)
return(rpc_connection)
def audit(filter):
assets = listassets(filter)
print("Auditing: " + filter)
#print(assets)
print("Asset count: " + str(len(assets)))
count = 0
max_dist_asset_name = ""
max_dist_address_count = 0
for asset, properties in assets.items():
count=count+1
total_issued = 0
total_for_asset = 0
print("Auditing asset (" + str(count) + "): " + asset)
for key, value in properties.items():
if (key == 'amount'):
total_issued += value
print("Total issued for " + asset + " is: " + str(value))
loop = True
loop_count = 0
number_of_addresses = 0
while loop:
# This call returns a max of 50000 items at a time
address_qtys = listaddressesbyasset(asset, False, 50000, loop_count * 50000)
number_of_addresses += len(address_qtys)
for address, qty in address_qtys.items():
#print(address + " -> " + str(qty))
total_for_asset += qty
# If the number of address is less than 50000, end the loop
if len(address_qtys) < 50000:
loop = False
loop_count += 1
print("Total in addresses for asset " + asset + " is " + str(total_for_asset))
# Calculate stats
if number_of_addresses > max_dist_address_count:
max_dist_asset_name = asset
max_dist_address_count = number_of_addresses
if (total_issued == total_for_asset):
print("Audit PASSED for " + asset)
print("")
else:
print("Audit FAILED for " + asset)
exit()
if len(assets) == count:
print("All " + str(len(assets)) + " assets audited.")
print("Stats:")
print(" Max Distribed Asset: " + max_dist_asset_name + " with " + str(max_dist_address_count) + " addresses.")
if mode == "-regtest": #If regtest then mine our own blocks
import os
os.system(cli + " " + mode + " generate 400")
audit("*") #Set to "*" for all.
| listassets |
incapsula.py | import base64
import datetime
import json
import logging
import time
import requests
from .. import exception, ssl
from . import base
logger = logging.getLogger(__name__)
class IncapsulaSite(base.Server):
BASE_URL = "https://my.incapsula.com:443"
def | (self, api_key, api_id, site_id, crt_name, **kwargs):
super(IncapsulaSite, self).__init__(crt_name=crt_name, **kwargs)
self.api_key = api_key
self.api_id = api_id
self.site_id = site_id
# The certificate must contain the full chain - root CA, intermediate CA, and the origin server certificates.
# see https://docs.imperva.com/bundle/cloud-application-security/page/more/upload-ssl.htm
self.deploy_full_chain = True
# enable retry on :
# - failed DNS lookups
# - socket connections
# - connection timeouts
# but never to requests where data has made it to the server
self.session = requests.Session()
self.session.mount('https://', requests.adapters.HTTPAdapter(max_retries=5))
# all queries must contain at least this parameters
self.basic_params = {'api_key': self.api_key, 'api_id': self.api_id, 'site_id': self.site_id}
self.site_status = self.session.post(
url='{}/api/prov/v1/sites/status'.format(self.BASE_URL),
data=self.basic_params,
).json()
def get_description(self):
return "[{} - {} ({})]".format(self.__class__.__name__, self.site_status['domain'], self.site_id)
def deploy_cert(self, key, cert, **kwargs):
# authentication data and site ID
parameters = self.basic_params.copy()
parameters.update({
'certificate': base64.b64encode(cert.read_bytes()),
'private_key': base64.b64encode(key.read_bytes()),
})
try:
json_response = {}
for _ in range(5):
json_response = self.session.post(
url='{}/api/prov/v1/sites/customCertificate/upload'.format(self.BASE_URL),
data=parameters,
).json()
# deployment is a success
if json_response['res'] == 0:
break
# specific behavior for Internal error code, that is returned quite often by Incapsula...
# => just retry, it generally works fine the 2nd or 3rd time
elif json_response['res'] == 3015:
time.sleep(1)
continue
# no success, just return exception with last error message
else:
raise exception.DeployCertificateError('Unable to deploy new certificate on {}: {}'.format(
self.get_description(), json.dumps(json_response, indent=4)))
# Upload successful
logger.info("Certificate/Key %s updated successfully on %s.", self.crt_name, self.get_description())
except requests.exceptions.RequestException as ex:
raise exception.DeployCertificateError(
msg='Unable to deploy new certificate on {}: {}'.format(self.get_description(), str(ex)),
original_exception=ex,
)
def is_same(self, common_name=None, sans=None, exact_match=False):
"""Check if domain for targeted Incapsula site is part of specified domains
:param common_name: Common name
:type common_name: str
:param sans: list of Subject Alternate Names
:type sans: list
:param exact_match: if True, certificate must exactly match input domains
if False, input domain will also match wildcard certificate and additional domains in certificate will
be ignored
:type exact_match: bool
:return: True if Incapsula site domain is covered by input domains
"""
blueprint_domains = ssl.get_domains(common_name=common_name, sans=sans)
# check if Incapsula domain is included in input domains
for blueprint_domain in blueprint_domains:
if ssl.is_domain_matching(domain_to_check=self.site_status['domain'],
reference_domain=blueprint_domain,
exact_match=exact_match):
return True
return False
def get_certificate_information(self):
"""Retrieve certificate information from Incapsula site.
:return: SSL certificate information
:rtype: autossl.ssl.SslCertificate
:raise autossl.exception.CertificateNotFound: if certificate does not exist yet on server
"""
custom_certificate = self.site_status.get('ssl', {}).get('custom_certificate')
# if invalid incapsula response or no custom_certificate deployed yet
if self.site_status['res'] != 0 or custom_certificate is None or custom_certificate.get('active') is not True:
raise exception.CertificateNotFound("No certificate found for site ID {}".format(self.get_description()))
# Get expiration date (in milliseconds, since 1970) from site configuration
expiration_date_ms = custom_certificate.get('expirationDate')
if expiration_date_ms is None:
raise RuntimeError(
"Unable to get certificate expiration date (path: ssl.custom_certificate.expirationDate)"
" for site ID {} in api response {}".format(self.get_description(),
json.dumps(self.site_status, indent=4))
)
return ssl.SslCertificate(
common_name=self.site_status['domain'],
sans=[],
expiration=datetime.datetime.utcfromtimestamp(float(expiration_date_ms)/1000),
)
def create_acme_challenge(self, token, key_authorization):
"""Create token on server with specified value
:param token: challenge key
:param key_authorization: challenge value
"""
logger.debug("No challenge to deploy for Incapsula that is just acting as a proxy to real server.")
def delete_acme_challenge(self, token):
"""Delete challenge created on server
:param token: challenge key to delete from server
"""
logger.debug("No challenge to cleanup for Incapsula that is just acting as a proxy to real server.")
| __init__ |
elasticsearch_plugin.py | # -*- coding: utf-8 -*-
import logging
from elasticsearch import Elasticsearch
from functools import partial
LOGGER = logging.getLogger(__name__)
STORAGE_DEFAULTS = {
'host': '127.0.0.1',
'port': '9200',
'db_name': 'bridge_db',
'alias': 'bridge'
}
class ElasticsearchStorage(object):
def __init__(self, conf, resource):
STORAGE_DEFAULTS.update(conf.get('storage', {}))
for name, value in STORAGE_DEFAULTS.items():
setattr(self, name, value)
self.doc_type = resource
self.db = Elasticsearch('{}:{}'.format(self.host, self.port))
self.db.indices.create(index=self.db_name, ignore=400)
self.db.indices.put_alias(index=self.db_name, name=self.alias)
settings = self.db.indices.get_settings(
index=self.db_name,
name='index.mapping.total_fields.limit')
if settings.get(self.db_name, {}).get(u'settings', {}) \
.get(u'index',{}).get(u'mapping', {}) \
.get(u'total_fields', {}).get(u'limit', u'1000') != u'4000':
self.db.indices.put_settings(
body={'index.mapping.total_fields.limit': 4000},
index=self.db_name)
self.db.index_get = partial(self.db.get, index=self.alias)
self.db.index_bulk = partial(self.db.bulk, index=self.alias)
def filter_bulk(self, bulk):
"""
Receiving list of docs ids and checking existing in storage, return
dict where key is doc_id and value - dateModified if doc exist
else value - False
:param keys: List of docs ids
:return: dict: key: doc_id, value: dateModified or False
"""
rows = self.db.mget(
index=self.alias, doc_type=self.doc_type.title(),
body={"ids": bulk.keys()}, _source_include="dateModified"
)
resp_dict = {k['_id']: (k['_source']['dateModified']
if '_source' in k else k['found'])
for k in rows['docs']}
return resp_dict
def | (self, bulk):
"""
Save to storage bulk data
:param bulk: Dict where key: doc_id, value: document
:return: list: List of tuples with id, success: boolean, message: str
"""
body = []
for k, v in bulk.items():
doc = v.copy()
del doc['_id']
if '_ver' in doc:
body.append({
"index": {"_id": k, "_type": self.doc_type.title(),
"_index": self.alias, '_version': doc['_ver']}
})
del doc['_ver']
else:
body.append({
"index": {"_id": k, "_type": self.doc_type.title(),
"_index": self.alias}
})
body.append(doc)
res = self.db.index_bulk(body=body,
doc_type=self.doc_type.title())
results = []
for item in res['items']:
success = item['index']['status'] in [200, 201]
doc_id = item['index']['_id']
result = item['index']['result'] if 'result' in item[
'index'] else \
item['index']['error']['reason']
if not success and result != u'Mapping reason message':
# TODO: Catch real mapping message and replace ^
result = 'skipped'
success = True
results.append((success, doc_id, result))
return results
def get_doc(self, doc_id):
"""
Trying get doc with doc_id from storage and return doc dict if
doc exist else None
:param doc_id:
:return: dict: or None
"""
doc = self.db.index_get(
doc_type=self.doc_type.title(), id=doc_id, ignore=[404]
)
if doc and '_source' in doc:
source = doc['_source']
ver = doc['_version']
doc = source
doc['_ver'] = ver
else:
doc = None
return doc
def includme(config):
resource = config.get('resource', 'tenders')[:-1]
config['storage_obj'] = ElasticsearchStorage(config, resource) | save_bulk |
dshield_medium.py | import sys
import os
import configparser
import requests
import pandas as pd
import hashlib
from io import StringIO
from datetime import datetime, timezone
## Django Setup
import django
import pymysql
pymysql.install_as_MySQLdb()
conffile = os.path.join(os.path.dirname(__file__), "../../conf/insert2db.conf")
conf = configparser.SafeConfigParser()
conf.read(conffile)
sys.path.append(conf.get('exist', 'syspath'))
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'intelligence.settings')
django.setup()
from apps.reputation.models import blacklist
import django.utils.timezone as tzone
from django.db import IntegrityError
## Logger Setup
from logging import getLogger, DEBUG, NullHandler
logger = getLogger(__name__)
logger.addHandler(NullHandler())
logger.setLevel(DEBUG)
logger.propagate = True
DataDir = os.path.join(os.path.dirname(__file__), '../data/')
class Tracker():
def __init__(self):
self.name = 'Dshield_Medium'
self.ID = 222
self.URL = 'https://www.dshield.org/feeds/suspiciousdomains_Medium.txt'
self.DataFilePath = DataDir + 'dshield/suspiciousdomains_Medium.txt'
self.header = [
'domain',
]
def cmpFiles(self, oldfile, newtext):
diffline = ''
if not os.path.exists(oldfile):
f = open(oldfile, 'w')
f.close()
oldsets = set(open(oldfile).readlines())
newsets = set(newtext.replace('\r\n','\n').splitlines(True))
results = newsets.difference(oldsets)
for result in results:
diffline += result
return diffline[:-1]
def delComment(self, s):
result = ''
for line in s.splitlines(True):
if not line.startswith('#') \
and line != "Site\n":
result += line
return result
def | (self):
df = pd.DataFrame()
newline = ''
try:
res = requests.get(self.URL)
if res.status_code != 200:
return df
newline = self.cmpFiles(self.DataFilePath, res.text)
newline = self.delComment(newline)
except Exception as e:
logger.error(e)
if not newline == '':
open(self.DataFilePath, 'w').write(res.text)
df = pd.read_csv(StringIO(newline), names=self.header)
return df
def parse(self):
logger.info("start parsing: %s", self.name)
df = self.makeDataframe()
queries = []
if not df.empty:
for i, v in df.iterrows():
line = str(self.ID) + ","
line += str(v.values)
md5 = hashlib.md5(line.encode('utf-8')).hexdigest()
try:
query = blacklist(
id = md5,
domain = v.domain,
datetime = tzone.now(),
source = self.ID,
referrer = 'https://www.dshield.org/feeds/suspiciousdomains_Medium.txt',
)
except Exception as e:
logger.error("%s: %s", e, line)
queries.append(query)
else:
logger.info("no update")
logger.info("done parsing: %s, %s queries were parsed", self.name, len(queries))
return queries
| makeDataframe |
date.ts | import { DataType } from '../data-type';
import { ChronoUnit, LocalDate } from '@js-joda/core';
// globalDate is to be used for JavaScript's global 'Date' object to avoid name clashing with the 'Date' constant below
const globalDate = global.Date;
const EPOCH_DATE = LocalDate.ofYearDay(1, 1);
const Date : DataType = {
id: 0x28,
type: 'DATEN',
name: 'Date',
declaration: function() {
return 'date';
},
writeTypeInfo: function(buffer) {
buffer.writeUInt8(this.id);
},
// ParameterData<any> is temporary solution. TODO: need to understand what type ParameterData<...> can be.
writeParameterData: function(buffer, { value }, options, cb) {
if (value != null) {
buffer.writeUInt8(3);
let date;
if (options.useUTC) {
date = LocalDate.of(value.getUTCFullYear(), value.getUTCMonth() + 1, value.getUTCDate());
} else {
date = LocalDate.of(value.getFullYear(), value.getMonth() + 1, value.getDate());
}
const days = EPOCH_DATE.until(date, ChronoUnit.DAYS);
buffer.writeUInt24LE(days);
} else {
buffer.writeUInt8(0);
}
cb();
},
// TODO: value is techincally of type 'unknown'.
validate: function(value): null | Date | TypeError {
if (value == null) {
return null; | }
if (isNaN(value)) {
return new TypeError('Invalid date.');
}
return value;
}
};
export default Date;
module.exports = Date; | }
if (!(value instanceof globalDate)) {
value = new globalDate(globalDate.parse(value)); |
bitcoin_et.ts | <?xml version="1.0" ?><!DOCTYPE TS><TS language="et" version="2.1">
<context>
<name>AboutDialog</name>
<message>
<location filename="../forms/aboutdialog.ui" line="+14"/>
<source>About Hopecoin</source>
<translation>Info Hopecoin'i kohta</translation>
</message>
<message>
<location line="+39"/>
<source><b>Hopecoin</b> version</source>
<translation><b>Hopecoin</b>'i versioon</translation>
</message>
<message>
<location line="+41"/>
<source>Copyright © 2009-2014 The Bitcoin developers
Copyright © 2012-2014 The NovaCoin developers
Copyright © 2014 The Blackcoin developers</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>
This is experimental software.
Distributed under the MIT/X11 software license, see the accompanying file COPYING or <a href="http://www.opensource.org/licenses/mit-license.php">http://www.opensource.org/licenses/mit-license.php</a>.
This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (<a href="https://www.openssl.org/">https://www.openssl.org/</a>) and cryptographic software written by Eric Young (<a href="mailto:[email protected]">[email protected]</a>) and UPnP software written by Thomas Bernard.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>AddressBookPage</name>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>Address Book</source>
<translation>Aadressiraamat</translation>
</message>
<message>
<location line="+22"/>
<source>Double-click to edit address or label</source>
<translation>Tee topeltklõps aadressi või märgise muutmiseks</translation>
</message>
<message>
<location line="+24"/>
<source>Create a new address</source>
<translation>Loo uus aadress</translation>
</message>
<message>
<location line="+10"/>
<source>Copy the currently selected address to the system clipboard</source>
<translation>Kopeeri valitud aadress vahemällu</translation>
</message>
<message>
<location line="-7"/>
<source>&New Address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-43"/>
<source>These are your Hopecoin addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source>
<translation>Need on Sinu Hopecoin'i aadressid maksete vastuvõtmiseks. Soovi korral saad anda igale saatjale eraldi aadressi, et hõlpsamini omada ülevaadet, et kellelt mündid laekuvad.</translation>
</message>
<message>
<location line="+53"/>
<source>&Copy Address</source>
<translation>&Aadressi kopeerimine</translation>
</message>
<message>
<location line="+7"/>
<source>Show &QR Code</source>
<translation>Näita &QR koodi.</translation>
</message>
<message>
<location line="+7"/>
<source>Sign a message to prove you own a Hopecoin address</source>
<translation>Allkirjasta sõnum Hopecoin'i aadressi omamise tõestamiseks.</translation>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>Delete the currently selected address from the list</source>
<translation>Kustuta märgistatud aadress loetelust</translation>
</message>
<message>
<location line="-10"/>
<source>Verify a message to ensure it was signed with a specified Hopecoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Verify Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>&Delete</source>
<translation>&Kustuta</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="+66"/>
<source>Copy &Label</source>
<translation>&Märgise kopeerimine</translation>
</message>
<message>
<location line="+1"/>
<source>&Edit</source>
<translation>&Muuda</translation>
</message>
<message>
<location line="+248"/>
<source>Export Address Book Data</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Komaeraldatud fail (*.csv)</translation>
</message>
<message>
<location line="+13"/>
<source>Error exporting</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>AddressTableModel</name>
<message>
<location filename="../addresstablemodel.cpp" line="+145"/>
<source>Label</source>
<translation>Silt</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>Aadress</translation>
</message>
<message>
<location line="+36"/>
<source>(no label)</source>
<translation>(silti pole)</translation>
</message>
</context>
<context>
<name>AskPassphraseDialog</name>
<message>
<location filename="../forms/askpassphrasedialog.ui" line="+26"/>
<source>Passphrase Dialog</source>
<translation>Salafraasi dialoog</translation>
</message>
<message>
<location line="+21"/>
<source>Enter passphrase</source>
<translation>Sisesta salasõna</translation>
</message>
<message>
<location line="+14"/>
<source>New passphrase</source>
<translation>Uus salasõna</translation>
</message>
<message>
<location line="+14"/>
<source>Repeat new passphrase</source>
<translation>Korda salafraasi</translation>
</message>
<message>
<location line="+33"/>
<source>Serves to disable the trivial sendmoney when OS account compromised. Provides no real security.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>For staking only</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="+38"/>
<source>Encrypt wallet</source>
<translation>Krüpteeri rahakott</translation>
</message>
<message>
<location line="+7"/>
<source>This operation needs your wallet passphrase to unlock the wallet.</source>
<translation>See toiming nõuab sinu rahakoti salafraasi.</translation>
</message>
<message>
<location line="+5"/>
<source>Unlock wallet</source>
<translation>Tee rahakott lukust lahti.</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to decrypt the wallet.</source>
<translation>See toiming nõuab sinu rahakoti salafraasi.</translation>
</message>
<message>
<location line="+5"/>
<source>Decrypt wallet</source>
<translation>Dekrüpteeri rahakott.</translation>
</message>
<message>
<location line="+3"/>
<source>Change passphrase</source>
<translation>Muuda salafraasi</translation>
</message>
<message>
<location line="+1"/>
<source>Enter the old and new passphrase to the wallet.</source>
<translation>Sisesta rahakoti vana ning uus salafraas.</translation>
</message>
<message>
<location line="+45"/>
<source>Confirm wallet encryption</source>
<translation>Kinnita rahakoti krüpteering</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: If you encrypt your wallet and lose your passphrase, you will <b>LOSE ALL OF YOUR COINS</b>!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Are you sure you wish to encrypt your wallet?</source>
<translation>Kas soovid oma rahakoti krüpteerida?</translation>
</message>
<message>
<location line="+15"/>
<source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source>
<translation>TÄHTIS: Kõik varasemad rahakoti varundfailid tuleks üle kirjutada äsja loodud krüpteeritud rahakoti failiga. Turvakaalutlustel tühistatakse krüpteerimata rahakoti failid alates uue, krüpteeritud rahakoti, kasutusele võtust.</translation>
</message>
<message>
<location line="+103"/>
<location line="+24"/>
<source>Warning: The Caps Lock key is on!</source>
<translation>Hoiatus: Caps Lock on sisse lülitatud!</translation>
</message>
<message>
<location line="-133"/>
<location line="+60"/>
<source>Wallet encrypted</source>
<translation>Rahakott krüpteeritud</translation>
</message>
<message>
<location line="-140"/>
<source>Enter the new passphrase to the wallet.<br/>Please use a passphrase of <b>ten or more random characters</b>, or <b>eight or more words</b>.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+82"/>
<source>Hopecoin will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your coins from being stolen by malware infecting your computer.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<location line="+7"/>
<location line="+44"/>
<location line="+6"/>
<source>Wallet encryption failed</source>
<translation>Tõrge rahakoti krüpteerimisel</translation>
</message>
<message>
<location line="-56"/>
<source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source>
<translation>Rahakoti krüpteering ebaõnnestus tõrke tõttu. Sinu rahakotti ei krüpteeritud.</translation>
</message>
<message>
<location line="+7"/>
<location line="+50"/>
<source>The supplied passphrases do not match.</source>
<translation>Salafraasid ei kattu.</translation>
</message>
<message>
<location line="-38"/>
<source>Wallet unlock failed</source>
<translation>Rahakoti avamine ebaõnnestus</translation>
</message>
<message>
<location line="+1"/>
<location line="+12"/>
<location line="+19"/>
<source>The passphrase entered for the wallet decryption was incorrect.</source>
<translation>Rahakoti salafraas ei ole õige.</translation>
</message>
<message>
<location line="-20"/>
<source>Wallet decryption failed</source>
<translation>Rahakoti dekrüpteerimine ei õnnestunud</translation>
</message>
<message>
<location line="+14"/>
<source>Wallet passphrase was successfully changed.</source>
<translation>Rahakoti salafraasi muutmine õnnestus.</translation>
</message>
</context>
<context>
<name>BitcoinGUI</name>
<message>
<location filename="../bitcoingui.cpp" line="+297"/>
<source>Sign &message...</source>
<translation>Signeeri &sõnum</translation>
</message>
<message>
<location line="-64"/>
<source>Show general overview of wallet</source>
<translation>Kuva rahakoti üld-ülevaade</translation>
</message>
<message>
<location line="+17"/>
<source>&Transactions</source>
<translation>&Tehingud</translation>
</message>
<message>
<location line="+1"/>
<source>Browse transaction history</source>
<translation>Sirvi tehingute ajalugu</translation>
</message>
<message>
<location line="+5"/>
<source>&Address Book</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Edit the list of stored addresses and labels</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-18"/>
<source>Show the list of addresses for receiving payments</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+34"/>
<source>E&xit</source>
<translation>V&älju</translation>
</message>
<message>
<location line="+1"/>
<source>Quit application</source>
<translation>Väljumine</translation>
</message>
<message>
<location line="+4"/>
<source>Show information about Hopecoin</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>About &Qt</source>
<translation>Teave &Qt kohta</translation>
</message>
<message>
<location line="+1"/>
<source>Show information about Qt</source>
<translation>Kuva Qt kohta käiv info</translation>
</message>
<message>
<location line="+2"/>
<source>&Options...</source>
<translation>&Valikud...</translation>
</message>
<message>
<location line="+4"/>
<source>&Encrypt Wallet...</source>
<translation>&Krüpteeri Rahakott</translation>
</message>
<message>
<location line="+2"/>
<source>&Backup Wallet...</source>
<translation>&Varunda Rahakott</translation>
</message>
<message>
<location line="+2"/>
<source>&Change Passphrase...</source>
<translation>&Salafraasi muutmine</translation>
</message>
<message>
<location line="+9"/>
<source>&Export...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-55"/>
<source>Send coins to a Hopecoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+39"/>
<source>Modify configuration options for Hopecoin</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>Export the data in the current tab to a file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-13"/>
<source>Encrypt or decrypt wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Backup wallet to another location</source>
<translation>Varunda rahakott teise asukohta</translation>
</message>
<message>
<location line="+2"/>
<source>Change the passphrase used for wallet encryption</source>
<translation>Rahakoti krüpteerimise salafraasi muutmine</translation>
</message>
<message>
<location line="+10"/>
<source>&Debug window</source>
<translation>&Debugimise aken</translation>
</message>
<message>
<location line="+1"/>
<source>Open debugging and diagnostic console</source>
<translation>Ava debugimise ja diagnostika konsool</translation>
</message>
<message>
<location line="-5"/>
<source>&Verify message...</source>
<translation>&Kontrolli sõnumit...</translation>
</message>
<message>
<location line="-214"/>
<location line="+555"/>
<source>Hopecoin</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-555"/>
<source>Wallet</source>
<translation>Rahakott</translation>
</message>
<message>
<location line="+193"/>
<source>&About Hopecoin</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>&Show / Hide</source>
<translation>&Näita / Peida</translation>
</message>
<message>
<location line="+8"/>
<source>Unlock wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>&Lock Wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Lock wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+32"/>
<source>&File</source>
<translation>&Fail</translation>
</message>
<message>
<location line="+8"/>
<source>&Settings</source>
<translation>&Seaded</translation>
</message>
<message>
<location line="+8"/>
<source>&Help</source>
<translation>&Abi</translation>
</message>
<message>
<location line="+17"/>
<source>Tabs toolbar</source>
<translation>Vahelehe tööriistariba</translation>
</message>
<message>
<location line="+46"/>
<location line="+9"/>
<source>[testnet]</source>
<translation>[testnet]</translation>
</message>
<message>
<location line="+0"/>
<location line="+58"/>
<source>Hopecoin client</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+70"/>
<source>%n active connection(s) to Hopecoin network</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+488"/>
<source>Staking.<br>Your weight is %1<br>Network weight is %2<br>Expected time to earn reward is %3</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Not staking because wallet is locked</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Not staking because wallet is offline</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Not staking because wallet is syncing</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Not staking because you don't have mature coins</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-812"/>
<source>&Dashboard</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>&Receive</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>&Send</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+49"/>
<source>&Unlock Wallet...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+277"/>
<source>Up to date</source>
<translation>Ajakohane</translation>
</message>
<message>
<location line="+43"/>
<source>Catching up...</source>
<translation>Jõuan...</translation>
</message>
<message>
<location line="+113"/>
<source>Confirm transaction fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+27"/>
<source>Sent transaction</source>
<translation>Saadetud tehing</translation>
</message>
<message>
<location line="+1"/>
<source>Incoming transaction</source>
<translation>Sisenev tehing</translation>
</message>
<message>
<location line="+1"/>
<source>Date: %1
Amount: %2
Type: %3
Address: %4
</source>
<translation>Kuupäev: %1⏎
Summa: %2⏎
Tüüp: %3⏎
Aadress: %4⏎</translation>
</message>
<message>
<location line="+100"/>
<location line="+15"/>
<source>URI handling</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-15"/>
<location line="+15"/>
<source>URI can not be parsed! This can be caused by an invalid Hopecoin address or malformed URI parameters.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>Wallet is <b>not encrypted</b></source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Wallet is <b>encrypted</b> and currently <b>unlocked</b></source>
<translation>Rahakott on <b>krüpteeritud</b> ning hetkel <b>avatud</b></translation>
</message>
<message>
<location line="+8"/>
<source>Wallet is <b>encrypted</b> and currently <b>locked</b></source>
<translation>Rahakott on <b>krüpteeritud</b> ning hetkel <b>suletud</b></translation>
</message>
<message>
<location line="+24"/>
<source>Backup Wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Wallet Data (*.dat)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Backup Failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>There was an error trying to save the wallet data to the new location.</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+91"/>
<source>%n second(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n minute(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="-429"/>
<location line="+433"/>
<source>%n hour(s)</source>
<translation><numerusform>%n tund</numerusform><numerusform>%n tundi</numerusform></translation>
</message>
<message>
<location line="-456"/>
<source>Processed %1 blocks of transaction history.</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+27"/>
<location line="+433"/>
<source>%n day(s)</source>
<translation><numerusform>%n päev</numerusform><numerusform>%n päeva</numerusform></translation>
</message>
<message numerus="yes">
<location line="-429"/>
<location line="+6"/>
<source>%n week(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+0"/>
<source>%1 and %2</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+0"/>
<source>%n year(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+5"/>
<source>%1 behind</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Last received block was generated %1 ago.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Transactions after this will not yet be visible.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Error</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+69"/>
<source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+324"/>
<source>Not staking</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoin.cpp" line="+104"/>
<source>A fatal error occurred. Hopecoin can no longer continue safely and will quit.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>ClientModel</name>
<message>
<location filename="../clientmodel.cpp" line="+110"/>
<source>Network Alert</source>
<translation>Võrgu Häire</translation>
</message>
</context>
<context>
<name>CoinControlDialog</name>
<message>
<location filename="../forms/coincontroldialog.ui" line="+14"/>
<source>Coin Control</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+31"/>
<source>Quantity:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+32"/>
<source>Bytes:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+48"/>
<source>Amount:</source>
<translation>Summa:</translation>
</message>
<message>
<location line="+32"/>
<source>Priority:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+48"/>
<source>Fee:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>Low Output:</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../coincontroldialog.cpp" line="+537"/>
<source>no</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/coincontroldialog.ui" line="+51"/>
<source>After Fee:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>Change:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+69"/>
<source>(un)select all</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Tree mode</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>List mode</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+45"/>
<source>Amount</source>
<translation>Kogus</translation>
</message>
<message>
<location line="+5"/>
<source>Label</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Address</source>
<translation>Aadress</translation>
</message>
<message>
<location line="+5"/>
<source>Date</source>
<translation>Kuupäev</translation>
</message>
<message>
<location line="+5"/>
<source>Confirmations</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Confirmed</source>
<translation>Kinnitatud</translation>
</message>
<message>
<location line="+5"/>
<source>Priority</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../coincontroldialog.cpp" line="-500"/>
<source>Copy address</source>
<translation>Aadressi kopeerimine</translation>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation>Märgise kopeerimine</translation>
</message>
<message>
<location line="+1"/>
<location line="+26"/>
<source>Copy amount</source>
<translation>Kopeeri summa</translation>
</message>
<message>
<location line="-25"/>
<source>Copy transaction ID</source>
<translation>Kopeeri tehingu ID</translation>
</message>
<message>
<location line="+24"/>
<source>Copy quantity</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Copy fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy after fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy bytes</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy priority</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy low output</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy change</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+317"/>
<source>highest</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>high</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>medium-high</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>medium</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>low-medium</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>low</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>lowest</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+140"/>
<source>DUST</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>yes</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>This label turns red, if the transaction size is bigger than 10000 bytes.
This means a fee of at least %1 per kb is required.
Can vary +/- 1 Byte per input.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Transactions with higher priority get more likely into a block.
This label turns red, if the priority is smaller than "medium".
This means a fee of at least %1 per kb is required.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>This label turns red, if any recipient receives an amount smaller than %1.
This means a fee of at least %2 is required.
Amounts below 0.546 times the minimum relay fee are shown as DUST.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>This label turns red, if the change is smaller than %1.
This means a fee of at least %2 is required.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+36"/>
<location line="+66"/>
<source>(no label)</source>
<translation>(silti pole)</translation>
</message>
<message>
<location line="-9"/>
<source>change from %1 (%2)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>(change)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>EditAddressDialog</name>
<message>
<location filename="../forms/editaddressdialog.ui" line="+14"/>
<source>Edit Address</source>
<translation>Muuda aadressi</translation>
</message>
<message>
<location line="+11"/>
<source>&Label</source>
<translation>&Märgis</translation>
</message>
<message>
<location line="+10"/>
<source>The label associated with this address book entry</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>&Address</source>
<translation>&Aadress</translation>
</message>
<message>
<location line="+10"/>
<source>The address associated with this address book entry. This can only be modified for sending addresses.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="+21"/>
<source>New receiving address</source>
<translation>Uus sissetulev aadress</translation>
</message>
<message>
<location line="+4"/>
<source>New sending address</source>
<translation>Uus väljaminev aadress</translation>
</message>
<message>
<location line="+3"/>
<source>Edit receiving address</source>
<translation>Sissetulevate aadresside muutmine</translation>
</message>
<message>
<location line="+4"/>
<source>Edit sending address</source>
<translation>Väljaminevate aadresside muutmine</translation>
</message>
<message>
<location line="+76"/>
<source>The entered address "%1" is already in the address book.</source>
<translation>Selline aadress on juba olemas: "%1"</translation>
</message>
<message>
<location line="-5"/>
<source>The entered address "%1" is not a valid Hopecoin address.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>Could not unlock wallet.</source>
<translation>Rahakotti ei avatud</translation>
</message>
<message>
<location line="+5"/>
<source>New key generation failed.</source>
<translation>Tõrge uue võtme loomisel.</translation>
</message>
</context>
<context>
<name>GUIUtil::HelpMessageBox</name>
<message>
<location filename="../guiutil.cpp" line="+426"/>
<location line="+12"/>
<source>Hopecoin-Qt</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-12"/>
<source>version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Usage:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>command-line options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>UI options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Set language, for example "de_DE" (default: system locale)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Start minimized</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Show splash screen on startup (default: 1)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OptionsDialog</name>
<message>
<location filename="../forms/optionsdialog.ui" line="+14"/>
<source>Options</source>
<translation>Valikud</translation>
</message>
<message>
<location line="+16"/>
<source>&Main</source>
<translation>%Peamine</translation>
</message>
<message>
<location line="+6"/>
<source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB. Fee 0.01 recommended.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Pay transaction &fee</source>
<translation>Tasu tehingu &fee</translation>
</message>
<message>
<location line="+31"/>
<source>Reserved amount does not participate in staking and is therefore spendable at any time.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Reserve</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+31"/>
<source>Automatically start Hopecoin after logging in to the system.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Start Hopecoin on system login</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>&Network</source>
<translation>&Võrk</translation>
</message>
<message>
<location line="+6"/>
<source>Automatically open the Hopecoin client port on the router. This only works when your router supports UPnP and it is enabled.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Map port using &UPnP</source>
<translation>Suuna port &UPnP kaudu</translation>
</message>
<message>
<location line="+19"/>
<source>Proxy &IP:</source>
<translation>Proxi &IP:</translation>
</message>
<message>
<location line="+19"/>
<source>IP address of the proxy (e.g. 127.0.0.1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>&Port:</source>
<translation>&Port:</translation>
</message>
<message>
<location line="+19"/>
<source>Port of the proxy (e.g. 9050)</source>
<translation>Proxi port (nt 9050)</translation>
</message>
<message>
<location line="-57"/>
<source>Connect to the Hopecoin network through a SOCKS5 proxy (e.g. when connecting through Tor).</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Connect through SOCKS5 proxy:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+90"/>
<source>&Window</source>
<translation>&Aken</translation>
</message>
<message>
<location line="+6"/>
<source>Show only a tray icon after minimizing the window.</source>
<translation>Minimeeri systray alale.</translation>
</message>
<message>
<location line="+3"/>
<source>&Minimize to the tray instead of the taskbar</source>
<translation>&Minimeeri systray alale</translation>
</message>
<message>
<location line="+7"/>
<source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source>
<translation>Sulgemise asemel minimeeri aken. Selle valiku tegemisel suletakse programm Menüüst "Välju" käsuga.</translation>
</message>
<message>
<location line="+3"/>
<source>M&inimize on close</source>
<translation>M&inimeeri sulgemisel</translation>
</message>
<message>
<location line="+21"/>
<source>&Display</source>
<translation>&Kuva</translation>
</message>
<message>
<location line="+8"/>
<source>User Interface &language:</source>
<translation>Kasutajaliidese &keel:</translation>
</message>
<message>
<location line="+13"/>
<source>The user interface language can be set here. This setting will take effect after restarting Hopecoin.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>&Unit to show amounts in:</source>
<translation>Summade kuvamise &Unit:</translation>
</message>
<message>
<location line="+13"/>
<source>Choose the default subdivision unit to show in the interface and when sending coins.</source>
<translation>Vali liideses ning müntide saatmisel kuvatav vaikimisi alajaotus.</translation>
</message>
<message>
<location line="+9"/>
<source>Whether to show coin control features or not.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Display coin &control features (experts only!)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Whether to select the coin outputs randomly or with minimal coin age.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Minimize weight consumption (experimental)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Use black visual theme (requires restart)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+71"/>
<source>&OK</source>
<translation>&OK</translation>
</message>
<message>
<location line="+7"/>
<source>&Cancel</source>
<translation>&Katkesta</translation>
</message>
<message>
<location line="+10"/>
<source>&Apply</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../optionsdialog.cpp" line="+47"/>
<source>default</source>
<translation>vaikeväärtus</translation>
</message>
<message>
<location line="+148"/>
<location line="+9"/>
<source>Warning</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-9"/>
<location line="+9"/>
<source>This setting will take effect after restarting Hopecoin.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>The supplied proxy address is invalid.</source>
<translation>Sisestatud kehtetu proxy aadress.</translation>
</message>
</context>
<context>
<name>OverviewPage</name>
<message>
<location filename="../forms/overviewpage.ui" line="+14"/>
<source>Form</source>
<translation>Vorm</translation>
</message>
<message>
<location line="+46"/>
<location line="+247"/>
<source>The displayed information may be out of date. Your wallet automatically synchronizes with the Hopecoin network after a connection is established, but this process has not completed yet.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-173"/>
<source>Stake:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+32"/>
<source>Unconfirmed:</source> | <message>
<location line="-113"/>
<source>Wallet</source>
<translation>Rahakott</translation>
</message>
<message>
<location line="+49"/>
<source>Spendable:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Your current spendable balance</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+80"/>
<source>Immature:</source>
<translation>Ebaküps:</translation>
</message>
<message>
<location line="+13"/>
<source>Mined balance that has not yet matured</source>
<translation>Mitte aegunud mine'itud jääk</translation>
</message>
<message>
<location line="+23"/>
<source>Total:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Your current total balance</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+50"/>
<source><b>Recent transactions</b></source>
<translation><b>Uuesti saadetud tehingud</b></translation>
</message>
<message>
<location line="-118"/>
<source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-32"/>
<source>Total of coins that was staked, and do not yet count toward the current balance</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../overviewpage.cpp" line="+116"/>
<location line="+1"/>
<source>out of sync</source>
<translation>sünkimata</translation>
</message>
</context>
<context>
<name>PaymentServer</name>
<message>
<location filename="../paymentserver.cpp" line="+107"/>
<source>Cannot start hopecoin: click-to-pay handler</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>QRCodeDialog</name>
<message>
<location filename="../forms/qrcodedialog.ui" line="+14"/>
<source>QR Code Dialog</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+59"/>
<source>Request Payment</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+56"/>
<source>Amount:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-44"/>
<source>Label:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>Message:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+71"/>
<source>&Save As...</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../qrcodedialog.cpp" line="+62"/>
<source>Error encoding URI into QR Code.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+40"/>
<source>The entered amount is invalid, please check.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Resulting URI too long, try to reduce the text for label / message.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+25"/>
<source>Save QR Code</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>PNG Images (*.png)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>RPCConsole</name>
<message>
<location filename="../forms/rpcconsole.ui" line="+46"/>
<source>Client name</source>
<translation>Kliendi nimi</translation>
</message>
<message>
<location line="+10"/>
<location line="+23"/>
<location line="+26"/>
<location line="+23"/>
<location line="+23"/>
<location line="+36"/>
<location line="+53"/>
<location line="+23"/>
<source>N/A</source>
<translation>N/A</translation>
</message>
<message>
<location line="-194"/>
<source>Client version</source>
<translation>Kliendi versioon</translation>
</message>
<message>
<location line="-45"/>
<source>&Information</source>
<translation>&Informatsioon</translation>
</message>
<message>
<location line="+68"/>
<source>Using OpenSSL version</source>
<translation>Kasutan OpenSSL versiooni</translation>
</message>
<message>
<location line="+49"/>
<source>Startup time</source>
<translation>Käivitamise hetk</translation>
</message>
<message>
<location line="+29"/>
<source>Network</source>
<translation>Võrgustik</translation>
</message>
<message>
<location line="+7"/>
<source>Number of connections</source>
<translation>Ühenduste arv</translation>
</message>
<message>
<location line="+23"/>
<source>On testnet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Block chain</source>
<translation>Ploki jada</translation>
</message>
<message>
<location line="+7"/>
<source>Current number of blocks</source>
<translation>Plokkide hetkearv</translation>
</message>
<message>
<location line="+197"/>
<source>&Network Traffic</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+52"/>
<source>&Clear</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Totals</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+64"/>
<source>In:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+80"/>
<source>Out:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-383"/>
<source>Last block time</source>
<translation>Viimane ploki aeg</translation>
</message>
<message>
<location line="+52"/>
<source>&Open</source>
<translation>&Ava</translation>
</message>
<message>
<location line="+16"/>
<source>Command-line options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Show the Hopecoin-Qt help message to get a list with possible Hopecoin command-line options.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Show</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+24"/>
<source>&Console</source>
<translation>&Konsool</translation>
</message>
<message>
<location line="-237"/>
<source>Build date</source>
<translation>Valmistusaeg</translation>
</message>
<message>
<location line="-104"/>
<source>Hopecoin - Debug window</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+25"/>
<source>Hopecoin Core</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+256"/>
<source>Debug log file</source>
<translation>Debugimise logifail</translation>
</message>
<message>
<location line="+7"/>
<source>Open the Hopecoin debug log file from the current data directory. This can take a few seconds for large log files.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+102"/>
<source>Clear console</source>
<translation>Puhasta konsool</translation>
</message>
<message>
<location filename="../rpcconsole.cpp" line="+325"/>
<source>Welcome to the Hopecoin RPC console.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Use up and down arrows to navigate history, and <b>Ctrl-L</b> to clear screen.</source>
<translation>Ajaloo sirvimiseks kasuta üles ja alla nooli, ekraani puhastamiseks <b>Ctrl-L</b>.</translation>
</message>
<message>
<location line="+1"/>
<source>Type <b>help</b> for an overview of available commands.</source>
<translation>Ülevaateks võimalikest käsklustest trüki <b>help</b>.</translation>
</message>
<message>
<location line="+127"/>
<source>%1 B</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>%1 KB</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>%1 MB</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>%1 GB</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>%1 m</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>%1 h</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>%1 h %2 m</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SendCoinsDialog</name>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="+14"/>
<location filename="../sendcoinsdialog.cpp" line="+182"/>
<location line="+5"/>
<location line="+5"/>
<location line="+5"/>
<location line="+6"/>
<location line="+5"/>
<location line="+5"/>
<source>Send Coins</source>
<translation>Müntide saatmine</translation>
</message>
<message>
<location line="+76"/>
<source>Coin Control Features</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+20"/>
<source>Inputs...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>automatically selected</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>Insufficient funds!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+77"/>
<source>Quantity:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+22"/>
<location line="+35"/>
<source>0</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-19"/>
<source>Bytes:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+51"/>
<source>Amount:</source>
<translation>Summa:</translation>
</message>
<message>
<location line="+35"/>
<source>Priority:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>medium</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+32"/>
<source>Fee:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>Low Output:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>no</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+32"/>
<source>After Fee:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>Change</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+50"/>
<source>custom change address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+106"/>
<source>Send to multiple recipients at once</source>
<translation>Saatmine mitmele korraga</translation>
</message>
<message>
<location line="+3"/>
<source>Add &Recipient</source>
<translation>Lisa &Saaja</translation>
</message>
<message>
<location line="+16"/>
<source>Remove all transaction fields</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Clear &All</source>
<translation>Puhasta &Kõik</translation>
</message>
<message>
<location line="+24"/>
<source>Balance:</source>
<translation>Jääk:</translation>
</message>
<message>
<location line="+47"/>
<source>Confirm the send action</source>
<translation>Saatmise kinnitamine</translation>
</message>
<message>
<location line="+3"/>
<source>S&end</source>
<translation>S&aada</translation>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="-174"/>
<source>Enter a Hopecoin address (e.g. WPT7ufM1tz2uzV7x9sG26z4CuhWs2B18Rw)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Copy quantity</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation>Kopeeri summa</translation>
</message>
<message>
<location line="+1"/>
<source>Copy fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy after fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy bytes</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy priority</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy low output</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy change</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+87"/>
<source><b>%1</b> to %2 (%3)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Confirm send coins</source>
<translation>Müntide saatmise kinnitamine</translation>
</message>
<message>
<location line="+1"/>
<source>Are you sure you want to send %1?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source> and </source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>The recipient address is not valid, please recheck.</source>
<translation>Saaja aadress ei ole kehtiv, palun kontrolli.</translation>
</message>
<message>
<location line="+5"/>
<source>The amount to pay must be larger than 0.</source>
<translation>Makstav summa peab olema suurem kui 0.</translation>
</message>
<message>
<location line="+5"/>
<source>The amount exceeds your balance.</source>
<translation>Summa ületab jäägi.</translation>
</message>
<message>
<location line="+5"/>
<source>The total exceeds your balance when the %1 transaction fee is included.</source>
<translation>Summa koos tehingu tasuga %1 ületab sinu jääki.</translation>
</message>
<message>
<location line="+6"/>
<source>Duplicate address found, can only send to each address once per send operation.</source>
<translation>Ühe saatmisega topelt-adressaati olla ei tohi.</translation>
</message>
<message>
<location line="+5"/>
<source>Error: Transaction creation failed!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+247"/>
<source>WARNING: Invalid Hopecoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>(no label)</source>
<translation>(silti pole)</translation>
</message>
<message>
<location line="+4"/>
<source>WARNING: unknown change address</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SendCoinsEntry</name>
<message>
<location filename="../forms/sendcoinsentry.ui" line="+14"/>
<source>Form</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>A&mount:</source>
<translation>S&umma:</translation>
</message>
<message>
<location line="+13"/>
<source>Pay &To:</source>
<translation>Maksa &:</translation>
</message>
<message>
<location line="+34"/>
<source>The address to send the payment to (e.g. WPT7ufM1tz2uzV7x9sG26z4CuhWs2B18Rw)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+60"/>
<location filename="../sendcoinsentry.cpp" line="+26"/>
<source>Enter a label for this address to add it to your address book</source>
<translation>Aadressiraamatusse sisestamiseks märgista aadress</translation>
</message>
<message>
<location line="-78"/>
<source>&Label:</source>
<translation>&Märgis</translation>
</message>
<message>
<location line="+28"/>
<source>Choose address from address book</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="+7"/>
<source>Paste address from clipboard</source>
<translation>Kleebi aadress vahemälust</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+7"/>
<source>Remove this recipient</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../sendcoinsentry.cpp" line="+1"/>
<source>Enter a Hopecoin address (e.g. WPT7ufM1tz2uzV7x9sG26z4CuhWs2B18Rw)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SignVerifyMessageDialog</name>
<message>
<location filename="../forms/signverifymessagedialog.ui" line="+14"/>
<source>Signatures - Sign / Verify a Message</source>
<translation>Signatuurid - Allkirjasta / Kinnita Sõnum</translation>
</message>
<message>
<location line="+13"/>
<location line="+124"/>
<source>&Sign Message</source>
<translation>&Allkirjastamise teade</translation>
</message>
<message>
<location line="-118"/>
<source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source>
<translation>Omandiõigsuse tõestamiseks saad sõnumeid allkirjastada oma aadressiga. Ettevaatust petturitega, kes üritavad saada sinu allkirja endale saada. Allkirjasta ainult korralikult täidetud avaldusi, millega nõustud.</translation>
</message>
<message>
<location line="+18"/>
<source>The address to sign the message with (e.g. WPT7ufM1tz2uzV7x9sG26z4CuhWs2B18Rw)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<location line="+203"/>
<source>Choose an address from the address book</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-193"/>
<location line="+203"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="-193"/>
<source>Paste address from clipboard</source>
<translation>Kleebi aadress vahemälust</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+12"/>
<source>Enter the message you want to sign here</source>
<translation>Sisesta siia allkirjastamise sõnum</translation>
</message>
<message>
<location line="+24"/>
<source>Copy the current signature to the system clipboard</source>
<translation>Kopeeri praegune signatuur vahemällu</translation>
</message>
<message>
<location line="+21"/>
<source>Sign the message to prove you own this Hopecoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>Reset all sign message fields</source>
<translation>Tühjenda kõik sõnumi allkirjastamise väljad</translation>
</message>
<message>
<location line="+3"/>
<location line="+146"/>
<source>Clear &All</source>
<translation>Puhasta &Kõik</translation>
</message>
<message>
<location line="-87"/>
<location line="+70"/>
<source>&Verify Message</source>
<translation>&Kinnita Sõnum</translation>
</message>
<message>
<location line="-64"/>
<source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source>
<translation>Kinnitamiseks sisesta allkirjastamise aadress, sõnum (kindlasti kopeeri täpselt ka reavahetused, tühikud, tabulaatorid jms) ning allolev signatuur.</translation>
</message>
<message>
<location line="+21"/>
<source>The address the message was signed with (e.g. WPT7ufM1tz2uzV7x9sG26z4CuhWs2B18Rw)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+40"/>
<source>Verify the message to ensure it was signed with the specified Hopecoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>Reset all verify message fields</source>
<translation>Tühjenda kõik sõnumi kinnitamise väljad</translation>
</message>
<message>
<location filename="../signverifymessagedialog.cpp" line="+27"/>
<location line="+3"/>
<source>Enter a Hopecoin address (e.g. WPT7ufM1tz2uzV7x9sG26z4CuhWs2B18Rw)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-2"/>
<source>Click "Sign Message" to generate signature</source>
<translation>Signatuuri genereerimiseks vajuta "Allkirjasta Sõnum"</translation>
</message>
<message>
<location line="+3"/>
<source>Enter Hopecoin signature</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+85"/>
<location line="+81"/>
<source>The entered address is invalid.</source>
<translation>Sisestatud aadress ei kehti.</translation>
</message>
<message>
<location line="-81"/>
<location line="+8"/>
<location line="+73"/>
<location line="+8"/>
<source>Please check the address and try again.</source>
<translation>Palun kontrolli aadressi ning proovi uuesti.</translation>
</message>
<message>
<location line="-81"/>
<location line="+81"/>
<source>The entered address does not refer to a key.</source>
<translation>Sisestatud aadress ei viita võtmele.</translation>
</message>
<message>
<location line="-73"/>
<source>Wallet unlock was cancelled.</source>
<translation>Rahakoti avamine katkestati.</translation>
</message>
<message>
<location line="+8"/>
<source>Private key for the entered address is not available.</source>
<translation>Sisestatud aadressi privaatvõti ei ole saadaval.</translation>
</message>
<message>
<location line="+12"/>
<source>Message signing failed.</source>
<translation>Sõnumi signeerimine ebaõnnestus.</translation>
</message>
<message>
<location line="+5"/>
<source>Message signed.</source>
<translation>Sõnum signeeritud.</translation>
</message>
<message>
<location line="+59"/>
<source>The signature could not be decoded.</source>
<translation>Signatuuri ei õnnestunud dekodeerida.</translation>
</message>
<message>
<location line="+0"/>
<location line="+13"/>
<source>Please check the signature and try again.</source>
<translation>Palun kontrolli signatuuri ning proovi uuesti.</translation>
</message>
<message>
<location line="+0"/>
<source>The signature did not match the message digest.</source>
<translation>Signatuur ei kattunud sõnumi kokkuvõttega.</translation>
</message>
<message>
<location line="+7"/>
<source>Message verification failed.</source>
<translation>Sõnumi kontroll ebaõnnestus.</translation>
</message>
<message>
<location line="+5"/>
<source>Message verified.</source>
<translation>Sõnum kontrollitud.</translation>
</message>
</context>
<context>
<name>TrafficGraphWidget</name>
<message>
<location filename="../trafficgraphwidget.cpp" line="+75"/>
<source>KB/s</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>TransactionDesc</name>
<message>
<location filename="../transactiondesc.cpp" line="+25"/>
<source>Open until %1</source>
<translation>Avatud kuni %1</translation>
</message>
<message>
<location line="+6"/>
<source>conflicted</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>%1/offline</source>
<translation>%/1offline'is</translation>
</message>
<message>
<location line="+2"/>
<source>%1/unconfirmed</source>
<translation>%1/kinnitamata</translation>
</message>
<message>
<location line="+2"/>
<source>%1 confirmations</source>
<translation>%1 kinnitust</translation>
</message>
<message>
<location line="+17"/>
<source>Status</source>
<translation>Staatus</translation>
</message>
<message numerus="yes">
<location line="+7"/>
<source>, broadcast through %n node(s)</source>
<translation><numerusform>, levita läbi %n node'i</numerusform><numerusform>, levita läbi %n node'i</numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>Date</source>
<translation>Kuupäev</translation>
</message>
<message>
<location line="+7"/>
<source>Source</source>
<translation>Allikas</translation>
</message>
<message>
<location line="+0"/>
<source>Generated</source>
<translation>Genereeritud</translation>
</message>
<message>
<location line="+5"/>
<location line="+13"/>
<source>From</source>
<translation>Saatja</translation>
</message>
<message>
<location line="+1"/>
<location line="+19"/>
<location line="+58"/>
<source>To</source>
<translation>Saaja</translation>
</message>
<message>
<location line="-74"/>
<location line="+2"/>
<source>own address</source>
<translation>oma aadress</translation>
</message>
<message>
<location line="-2"/>
<source>label</source>
<translation>märgis</translation>
</message>
<message>
<location line="+34"/>
<location line="+12"/>
<location line="+45"/>
<location line="+17"/>
<location line="+30"/>
<source>Credit</source>
<translation>Krediit</translation>
</message>
<message numerus="yes">
<location line="-102"/>
<source>matures in %n more block(s)</source>
<translation><numerusform>aegub %n bloki pärast</numerusform><numerusform>aegub %n bloki pärast</numerusform></translation>
</message>
<message>
<location line="+2"/>
<source>not accepted</source>
<translation>mitte aktsepteeritud</translation>
</message>
<message>
<location line="+44"/>
<location line="+8"/>
<location line="+15"/>
<location line="+30"/>
<source>Debit</source>
<translation>Deebet</translation>
</message>
<message>
<location line="-39"/>
<source>Transaction fee</source>
<translation>Tehingu tasu</translation>
</message>
<message>
<location line="+16"/>
<source>Net amount</source>
<translation>Neto summa</translation>
</message>
<message>
<location line="+6"/>
<source>Message</source>
<translation>Sõnum</translation>
</message>
<message>
<location line="+2"/>
<source>Comment</source>
<translation>Kommentaar</translation>
</message>
<message>
<location line="+2"/>
<source>Transaction ID</source>
<translation>Tehingu ID</translation>
</message>
<message>
<location line="+3"/>
<source>Generated coins must mature 510 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to "not accepted" and it won't be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Debug information</source>
<translation>Debug'imise info</translation>
</message>
<message>
<location line="+8"/>
<source>Transaction</source>
<translation>Tehing</translation>
</message>
<message>
<location line="+5"/>
<source>Inputs</source>
<translation>Sisendid</translation>
</message>
<message>
<location line="+21"/>
<source>Amount</source>
<translation>Kogus</translation>
</message>
<message>
<location line="+1"/>
<source>true</source>
<translation>õige</translation>
</message>
<message>
<location line="+0"/>
<source>false</source>
<translation>vale</translation>
</message>
<message>
<location line="-202"/>
<source>, has not been successfully broadcast yet</source>
<translation>, veel esitlemata</translation>
</message>
<message numerus="yes">
<location line="-36"/>
<source>Open for %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+67"/>
<source>unknown</source>
<translation>tundmatu</translation>
</message>
</context>
<context>
<name>TransactionDescDialog</name>
<message>
<location filename="../forms/transactiondescdialog.ui" line="+14"/>
<source>Transaction details</source>
<translation>Tehingu üksikasjad</translation>
</message>
<message>
<location line="+6"/>
<source>This pane shows a detailed description of the transaction</source>
<translation>Paan kuvab tehingu detailid</translation>
</message>
</context>
<context>
<name>TransactionTableModel</name>
<message>
<location filename="../transactiontablemodel.cpp" line="+231"/>
<source>Date</source>
<translation>Kuupäev</translation>
</message>
<message>
<location line="+0"/>
<source>Type</source>
<translation>Tüüp</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>Aadress</translation>
</message>
<message>
<location line="+0"/>
<source>Amount</source>
<translation>Kogus</translation>
</message>
<message>
<location line="+52"/>
<source>Open until %1</source>
<translation>Avatud kuni %1</translation>
</message>
<message>
<location line="+12"/>
<source>Confirmed (%1 confirmations)</source>
<translation>Kinnitatud (%1 kinnitust)</translation>
</message>
<message numerus="yes">
<location line="-15"/>
<source>Open for %n more block(s)</source>
<translation><numerusform>Avaneb %n bloki pärast</numerusform><numerusform>Avaneb %n bloki pärast</numerusform></translation>
</message>
<message>
<location line="+6"/>
<source>Offline</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Unconfirmed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Confirming (%1 of %2 recommended confirmations)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Conflicted</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Immature (%1 confirmations, will be available after %2)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>This block was not received by any other nodes and will probably not be accepted!</source>
<translation>Antud klotsi pole saanud ükski osapool ning tõenäoliselt seda ei aktsepteerita!</translation>
</message>
<message>
<location line="+3"/>
<source>Generated but not accepted</source>
<translation>Loodud, kuid aktsepteerimata</translation>
</message>
<message>
<location line="+42"/>
<source>Received with</source>
<translation>Saadud koos</translation>
</message>
<message>
<location line="+2"/>
<source>Received from</source>
<translation>Kellelt saadud</translation>
</message>
<message>
<location line="+3"/>
<source>Sent to</source>
<translation>Saadetud</translation>
</message>
<message>
<location line="+2"/>
<source>Payment to yourself</source>
<translation>Makse iseendale</translation>
</message>
<message>
<location line="+2"/>
<source>Mined</source>
<translation>Mine'itud</translation>
</message>
<message>
<location line="+38"/>
<source>(n/a)</source>
<translation>(n/a)</translation>
</message>
<message>
<location line="+194"/>
<source>Transaction status. Hover over this field to show number of confirmations.</source>
<translation>Tehingu staatus. Kinnituste arvu kuvamiseks liigu hiire noolega selle peale.</translation>
</message>
<message>
<location line="+2"/>
<source>Date and time that the transaction was received.</source>
<translation>Tehingu saamise kuupäev ning kellaaeg.</translation>
</message>
<message>
<location line="+2"/>
<source>Type of transaction.</source>
<translation>Tehingu tüüp.</translation>
</message>
<message>
<location line="+2"/>
<source>Destination address of transaction.</source>
<translation>Tehingu saaja aadress.</translation>
</message>
<message>
<location line="+2"/>
<source>Amount removed from or added to balance.</source>
<translation>Jäägile lisatud või eemaldatud summa.</translation>
</message>
</context>
<context>
<name>TransactionView</name>
<message>
<location filename="../transactionview.cpp" line="+54"/>
<location line="+17"/>
<source>All</source>
<translation>Kõik</translation>
</message>
<message>
<location line="-16"/>
<source>Today</source>
<translation>Täna</translation>
</message>
<message>
<location line="+1"/>
<source>This week</source>
<translation>Jooksev nädal</translation>
</message>
<message>
<location line="+1"/>
<source>This month</source>
<translation>Jooksev kuu</translation>
</message>
<message>
<location line="+1"/>
<source>Last month</source>
<translation>Eelmine kuu</translation>
</message>
<message>
<location line="+1"/>
<source>This year</source>
<translation>Jooksev aasta</translation>
</message>
<message>
<location line="+1"/>
<source>Range...</source>
<translation>Ulatus...</translation>
</message>
<message>
<location line="+12"/>
<source>Received with</source>
<translation>Saadud koos</translation>
</message>
<message>
<location line="+2"/>
<source>Sent to</source>
<translation>Saadetud</translation>
</message>
<message>
<location line="+2"/>
<source>To yourself</source>
<translation>Iseendale</translation>
</message>
<message>
<location line="+1"/>
<source>Mined</source>
<translation>Mine'itud</translation>
</message>
<message>
<location line="+1"/>
<source>Other</source>
<translation>Muu</translation>
</message>
<message>
<location line="+7"/>
<source>Enter address or label to search</source>
<translation>Otsimiseks sisesta märgis või aadress</translation>
</message>
<message>
<location line="+7"/>
<source>Min amount</source>
<translation>Vähim summa</translation>
</message>
<message>
<location line="+34"/>
<source>Copy address</source>
<translation>Aadressi kopeerimine</translation>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation>Märgise kopeerimine</translation>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation>Kopeeri summa</translation>
</message>
<message>
<location line="+1"/>
<source>Copy transaction ID</source>
<translation>Kopeeri tehingu ID</translation>
</message>
<message>
<location line="+1"/>
<source>Edit label</source>
<translation>Märgise muutmine</translation>
</message>
<message>
<location line="+1"/>
<source>Show transaction details</source>
<translation>Kuva tehingu detailid</translation>
</message>
<message>
<location line="+138"/>
<source>Export Transaction Data</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Komaeraldatud fail (*.csv)</translation>
</message>
<message>
<location line="+8"/>
<source>Confirmed</source>
<translation>Kinnitatud</translation>
</message>
<message>
<location line="+1"/>
<source>Date</source>
<translation>Kuupäev</translation>
</message>
<message>
<location line="+1"/>
<source>Type</source>
<translation>Tüüp</translation>
</message>
<message>
<location line="+1"/>
<source>Label</source>
<translation>Silt</translation>
</message>
<message>
<location line="+1"/>
<source>Address</source>
<translation>Aadress</translation>
</message>
<message>
<location line="+1"/>
<source>Amount</source>
<translation>Kogus</translation>
</message>
<message>
<location line="+1"/>
<source>ID</source>
<translation>ID</translation>
</message>
<message>
<location line="+4"/>
<source>Error exporting</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+100"/>
<source>Range:</source>
<translation>Ulatus:</translation>
</message>
<message>
<location line="+8"/>
<source>to</source>
<translation>saaja</translation>
</message>
</context>
<context>
<name>WalletModel</name>
<message>
<location filename="../walletmodel.cpp" line="+212"/>
<source>Sending...</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>bitcoin-core</name>
<message>
<location filename="../bitcoinstrings.cpp" line="+171"/>
<source>Hopecoin version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Usage:</source>
<translation>Kasutus:</translation>
</message>
<message>
<location line="+1"/>
<source>Send command to -server or hopecoind</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>List commands</source>
<translation>Käskluste loetelu</translation>
</message>
<message>
<location line="+1"/>
<source>Get help for a command</source>
<translation>Käskluste abiinfo</translation>
</message>
<message>
<location line="-145"/>
<source>Options:</source>
<translation>Valikud:</translation>
</message>
<message>
<location line="+2"/>
<source>Specify configuration file (default: hopecoin.conf)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Specify pid file (default: hopecoind.pid)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Specify wallet file (within data directory)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-1"/>
<source>Specify data directory</source>
<translation>Täpsusta andmekataloog</translation>
</message>
<message>
<location line="-25"/>
<source>%s, you must set a rpcpassword in the configuration file:
%s
It is recommended you use the following random password:
rpcuser=hopecoinrpc
rpcpassword=%s
(you do not need to remember this password)
The username and password MUST NOT be the same.
If the file does not exist, create it with owner-readable-only file permissions.
It is also recommended to set alertnotify so you are notified of problems;
for example: alertnotify=echo %%s | mail -s "Hopecoin Alert" [email protected]
</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+27"/>
<source>Set database cache size in megabytes (default: 25)</source>
<translation>Sea andmebaasi vahemälu suurus MB (vaikeväärtus: 25)</translation>
</message>
<message>
<location line="+1"/>
<source>Set database disk log size in megabytes (default: 100)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Listen for connections on <port> (default: 15722 or testnet: 24070)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Maintain at most <n> connections to peers (default: 125)</source>
<translation>Säilita vähemalt <n> ühendust peeridega (vaikeväärtus: 125)</translation>
</message>
<message>
<location line="+3"/>
<source>Connect to a node to retrieve peer addresses, and disconnect</source>
<translation>Peeri aadressi saamiseks ühendu korraks node'iga</translation>
</message>
<message>
<location line="+1"/>
<source>Specify your own public address</source>
<translation>Täpsusta enda avalik aadress</translation>
</message>
<message>
<location line="+4"/>
<source>Bind to given address. Use [host]:port notation for IPv6</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Query for peer addresses via DNS lookup, if low on addresses (default: 1 unless -connect)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Always query for peer addresses via DNS lookup (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Threshold for disconnecting misbehaving peers (default: 100)</source>
<translation>Ulakate peeride valulävi (vaikeväärtus: 100)</translation>
</message>
<message>
<location line="+1"/>
<source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source>
<translation>Mitme sekundi pärast ulakad peerid tagasi võivad tulla (vaikeväärtus: 86400)</translation>
</message>
<message>
<location line="-35"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source>
<translation>RPC pordi %u kuulamiseks seadistamisel ilmnes viga IPv4'l: %s</translation>
</message>
<message>
<location line="+62"/>
<source>Listen for JSON-RPC connections on <port> (default: 15733 or testnet: 24071)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-16"/>
<source>Accept command line and JSON-RPC commands</source>
<translation>Luba käsurea ning JSON-RPC käsklusi</translation>
</message>
<message>
<location line="+1"/>
<source>Run in the background as a daemon and accept commands</source>
<translation>Tööta taustal ning aktsepteeri käsklusi</translation>
</message>
<message>
<location line="+1"/>
<source>Use the test network</source>
<translation>Testvõrgu kasutamine</translation>
</message>
<message>
<location line="-23"/>
<source>Accept connections from outside (default: 1 if no -proxy or -connect)</source>
<translation>Luba välisühendusi (vaikeväärtus: 1 kui puudub -proxy või -connect)</translation>
</message>
<message>
<location line="-28"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source>
<translation>RPC pordi %u kuulamiseks seadistamisel ilmnes viga IPv6'l, lülitumine tagasi IPv4'le : %s</translation>
</message>
<message>
<location line="+93"/>
<source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source>
<translation>Hoiatus: -paytxfee on seatud väga kõrgeks! See on sinu poolt makstav tehingu lisatasu.</translation>
</message>
<message>
<location line="-103"/>
<source>Warning: Please check that your computer's date and time are correct! If your clock is wrong Hopecoin will not work properly.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+130"/>
<source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source>
<translation>Hoiatus: ilmnes tõrge wallet.dat faili lugemisel! Võtmed on terved, kuid tehingu andmed või aadressiraamatu kirjed võivad olla kadunud või vigased.</translation>
</message>
<message>
<location line="-16"/>
<source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source>
<translation>Hoiatus: toimus wallet.dat faili andmete päästmine! Originaal wallet.dat nimetati kaustas %s ümber wallet.{ajatempel}.bak'iks, jäägi või tehingute ebakõlade puhul tuleks teha backup'ist taastamine.</translation>
</message>
<message>
<location line="-34"/>
<source>Attempt to recover private keys from a corrupt wallet.dat</source>
<translation>Püüa vigasest wallet.dat failist taastada turvavõtmed</translation>
</message>
<message>
<location line="+5"/>
<source>Block creation options:</source>
<translation>Blokeeri loomise valikud:</translation>
</message>
<message>
<location line="-67"/>
<source>Connect only to the specified node(s)</source>
<translation>Ühendu ainult määratud node'i(de)ga</translation>
</message>
<message>
<location line="+4"/>
<source>Discover own IP address (default: 1 when listening and no -externalip)</source>
<translation>Leia oma IP aadress (vaikeväärtus: 1, kui kuulatakse ning puudub -externalip)</translation>
</message>
<message>
<location line="+101"/>
<source>Failed to listen on any port. Use -listen=0 if you want this.</source>
<translation>Pordi kuulamine nurjus. Soovikorral kasuta -listen=0.</translation>
</message>
<message>
<location line="-2"/>
<source>Invalid -tor address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Invalid amount for -reservebalance=<amount></source>
<translation type="unfinished"/>
</message>
<message>
<location line="-89"/>
<source>Maximum per-connection receive buffer, <n>*1000 bytes (default: 5000)</source>
<translation>Maksimaalne saamise puhver -connection kohta , <n>*1000 baiti (vaikeväärtus: 5000)</translation>
</message>
<message>
<location line="+1"/>
<source>Maximum per-connection send buffer, <n>*1000 bytes (default: 1000)</source>
<translation>Maksimaalne saatmise puhver -connection kohta , <n>*1000 baiti (vaikeväärtus: 1000)</translation>
</message>
<message>
<location line="-16"/>
<source>Only connect to nodes in network <net> (IPv4, IPv6 or Tor)</source>
<translation>Ühenda ainult node'idega <net> võrgus (IPv4, IPv6 või Tor)</translation>
</message>
<message>
<location line="+30"/>
<source>Prepend debug output with timestamp</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+40"/>
<source>SSL options: (see the Bitcoin Wiki for SSL setup instructions)</source>
<translation>SSL valikud: (vaata Bitcoini Wikist või SSL sätete juhendist)</translation>
</message>
<message>
<location line="-38"/>
<source>Send trace/debug info to console instead of debug.log file</source>
<translation>Saada jälitus/debug, debug.log faili asemel, konsooli</translation>
</message>
<message>
<location line="+34"/>
<source>Set maximum block size in bytes (default: 250000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-1"/>
<source>Set minimum block size in bytes (default: 0)</source>
<translation>Sea minimaalne bloki suurus baitides (vaikeväärtus: 0)</translation>
</message>
<message>
<location line="-34"/>
<source>Shrink debug.log file on client startup (default: 1 when no -debug)</source>
<translation>Kahanda programmi käivitamisel debug.log faili (vaikeväärtus: 1, kui ei ole -debug)</translation>
</message>
<message>
<location line="-41"/>
<source>Specify connection timeout in milliseconds (default: 5000)</source>
<translation>Sea ühenduse timeout millisekundites (vaikeväärtus: 5000)</translation>
</message>
<message>
<location line="+28"/>
<source>Use UPnP to map the listening port (default: 0)</source>
<translation>Kasuta kuulatava pordi määramiseks UPnP ühendust (vaikeväärtus: 0)</translation>
</message>
<message>
<location line="-1"/>
<source>Use UPnP to map the listening port (default: 1 when listening)</source>
<translation>Kasuta kuulatava pordi määramiseks UPnP ühendust (vaikeväärtus: 1, kui kuulatakse)</translation>
</message>
<message>
<location line="-25"/>
<source>Use proxy to reach tor hidden services (default: same as -proxy)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+45"/>
<source>Username for JSON-RPC connections</source>
<translation>JSON-RPC ühenduste kasutajatunnus</translation>
</message>
<message>
<location line="+54"/>
<source>Verifying database integrity...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+42"/>
<source>Error: Wallet locked, unable to create transaction!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Error: Transaction creation failed!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Error: The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Warning</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Warning: This version is obsolete, upgrade required!</source>
<translation>Hoiatus: versioon on aegunud, uuendus on nõutav!</translation>
</message>
<message>
<location line="-52"/>
<source>wallet.dat corrupt, salvage failed</source>
<translation>wallet.dat fail on katki, päästmine ebaõnnestus</translation>
</message>
<message>
<location line="-59"/>
<source>Password for JSON-RPC connections</source>
<translation>JSON-RPC ühenduste salasõna</translation>
</message>
<message>
<location line="-47"/>
<source>Connect through SOCKS5 proxy</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>Sync time with other nodes. Disable if time on your system is precise e.g. syncing with NTP (default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+12"/>
<source>When creating transactions, ignore inputs with value less than this (default: 0.01)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Output debugging information (default: 0, supplying <category> is optional)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>If <category> is not supplied, output all debugging information.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source><category> can be:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Enter regression test mode, which uses a special chain in which blocks can be solved instantly. This is intended for regression testing tools and app development.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Allow JSON-RPC connections from specified IP address</source>
<translation>JSON-RPC ühenduste lubamine kindla IP pealt</translation>
</message>
<message>
<location line="+1"/>
<source>Send commands to node running on <ip> (default: 127.0.0.1)</source>
<translation>Saada käsklusi node'ile IP'ga <ip> (vaikeväärtus: 127.0.0.1)</translation>
</message>
<message>
<location line="+1"/>
<source>Wait for RPC server to start</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Set the number of threads to service RPC calls (default: 4)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source>
<translation>Käivita käsklus, kui parim plokk muutub (käskluse %s asendatakse ploki hash'iga)</translation>
</message>
<message>
<location line="+3"/>
<source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source>
<translation>Käivita käsklus, kui rahakoti tehing muutub (%s cmd's muudetakse TxID'ks)</translation>
</message>
<message>
<location line="+3"/>
<source>Require a confirmations for change (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Upgrade wallet to latest format</source>
<translation>Uuenda rahakott uusimasse vormingusse</translation>
</message>
<message>
<location line="+1"/>
<source>Set key pool size to <n> (default: 100)</source>
<translation>Sea võtmete hulgaks <n> (vaikeväärtus: 100)</translation>
</message>
<message>
<location line="+1"/>
<source>Rescan the block chain for missing wallet transactions</source>
<translation>Otsi ploki jadast rahakoti kadunud tehinguid</translation>
</message>
<message>
<location line="+3"/>
<source>How thorough the block verification is (0-6, default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Imports blocks from external blk000?.dat file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Keep at most <n> MiB of unconnectable blocks in memory (default: %u)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Use OpenSSL (https) for JSON-RPC connections</source>
<translation>Kasuta JSON-RPC ühenduste jaoks OpenSSL'i (https)</translation>
</message>
<message>
<location line="+1"/>
<source>Server certificate file (default: server.cert)</source>
<translation>Serveri sertifikaadifail (vaikeväärtus: server.cert)</translation>
</message>
<message>
<location line="+1"/>
<source>Server private key (default: server.pem)</source>
<translation>Serveri privaatvõti (vaikeväärtus: server.pem)</translation>
</message>
<message>
<location line="+5"/>
<source>Error: Unsupported argument -socks found. Setting SOCKS version isn't possible anymore, only SOCKS5 proxies are supported.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Initialization sanity check failed. Hopecoin is shutting down.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+20"/>
<source>Error loading block database</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+28"/>
<source>Error: Wallet unlocked for staking only, unable to create transaction.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Error: Disk space is low!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>This is a pre-release test build - use at your own risk - do not use for mining or merchant applications</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-168"/>
<source>This help message</source>
<translation>Käesolev abitekst</translation>
</message>
<message>
<location line="+104"/>
<source>Wallet %s resides outside data directory %s.</source>
<translation>Rahakott %s paikenb väljaspool kataloogi %s.</translation>
</message>
<message>
<location line="+35"/>
<source>Unable to bind to %s on this computer (bind returned error %d, %s)</source>
<translation>Selle arvutiga ei ole võimalik siduda %s külge (katse nurjus %d, %s tõttu)</translation>
</message>
<message>
<location line="-129"/>
<source>Allow DNS lookups for -addnode, -seednode and -connect</source>
<translation>-addnode, -seednode ja -connect tohivad kasutada DNS lookup'i</translation>
</message>
<message>
<location line="+125"/>
<source>Loading addresses...</source>
<translation>Aadresside laadimine...</translation>
</message>
<message>
<location line="-10"/>
<source>Error loading wallet.dat: Wallet corrupted</source>
<translation>Viga wallet.dat käivitamisel. Vigane rahakkott</translation>
</message>
<message>
<location line="+4"/>
<source>Error loading wallet.dat: Wallet requires newer version of Hopecoin</source>
<translation>Viga faili wallet.dat laadimisel: rahakott vajab Hopecoin'i uuemat versiooni.</translation>
</message>
<message>
<location line="+1"/>
<source>Wallet needed to be rewritten: restart Hopecoin to complete</source>
<translation>Rahakott on vaja üle kirjutada: käivita Hopecoin uuesti toimingu lõpetamiseks</translation>
</message>
<message>
<location line="+1"/>
<source>Error loading wallet.dat</source>
<translation>Viga wallet.dat käivitamisel</translation>
</message>
<message>
<location line="-15"/>
<source>Invalid -proxy address: '%s'</source>
<translation>Vigane -proxi aadress: '%s'</translation>
</message>
<message>
<location line="-1"/>
<source>Unknown network specified in -onlynet: '%s'</source>
<translation>Kirjeldatud tundmatu võrgustik -onlynet'is: '%s'</translation>
</message>
<message>
<location line="+3"/>
<source>Cannot resolve -bind address: '%s'</source>
<translation>Tundmatu -bind aadress: '%s'</translation>
</message>
<message>
<location line="+2"/>
<source>Cannot resolve -externalip address: '%s'</source>
<translation>Tundmatu -externalip aadress: '%s'</translation>
</message>
<message>
<location line="-22"/>
<source>Invalid amount for -paytxfee=<amount>: '%s'</source>
<translation>-paytxfee=<amount> jaoks vigane kogus: '%s'</translation>
</message>
<message>
<location line="+58"/>
<source>Sending...</source>
<translation>Saatmine...</translation>
</message>
<message>
<location line="+5"/>
<source>Invalid amount</source>
<translation>Kehtetu summa</translation>
</message>
<message>
<location line="+1"/>
<source>Insufficient funds</source>
<translation>Liiga suur summa</translation>
</message>
<message>
<location line="-40"/>
<source>Loading block index...</source>
<translation>Klotside indeksi laadimine...</translation>
</message>
<message>
<location line="-109"/>
<source>Add a node to connect to and attempt to keep the connection open</source>
<translation>Lisa node ning hoia ühendus avatud</translation>
</message>
<message>
<location line="+124"/>
<source>Unable to bind to %s on this computer. Hopecoin is probably already running.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-101"/>
<source>Fee per KB to add to transactions you send</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+33"/>
<source>Minimize weight consumption (experimental) (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>How many blocks to check at startup (default: 500, 0 = all)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Acceptable ciphers (default: TLSv1.2+HIGH:TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!3DES:@STRENGTH)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning: Deprecated argument -debugnet ignored, use -debug=net</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Invalid amount for -mininput=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Cannot obtain a lock on data directory %s. Hopecoin is probably already running.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Error initializing wallet database environment %s!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Loading wallet...</source>
<translation>Rahakoti laadimine...</translation>
</message>
<message>
<location line="+8"/>
<source>Cannot downgrade wallet</source>
<translation>Rahakoti vanandamine ebaõnnestus</translation>
</message>
<message>
<location line="+1"/>
<source>Cannot write default address</source>
<translation>Tõrge vaikimisi aadressi kirjutamisel</translation>
</message>
<message>
<location line="+1"/>
<source>Rescanning...</source>
<translation>Üleskaneerimine...</translation>
</message>
<message>
<location line="+2"/>
<source>Done loading</source>
<translation>Laetud</translation>
</message>
<message>
<location line="-159"/>
<source>To use the %s option</source>
<translation>%s valiku kasutamine</translation>
</message>
<message>
<location line="+186"/>
<source>Error</source>
<translation>Tõrge</translation>
</message>
<message>
<location line="-18"/>
<source>You must set rpcpassword=<password> in the configuration file:
%s
If the file does not exist, create it with owner-readable-only file permissions.</source>
<translation>rpcpassword=<password> peab sätete failis olema seadistatud:⏎
%s⏎
Kui seda faili ei ole, loo see ainult-omanikule-lugemiseks faili õigustes.</translation>
</message>
</context>
</TS> | <translation type="unfinished"/>
</message> |
api_op_BuildBotLocale.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package lexmodelsv2
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/lexmodelsv2/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
"time"
)
// Builds a bot, its intents, and its slot types into a specific locale. A bot can
// be built into multiple locales. At runtime the locale is used to choose a
// specific build of the bot.
func (c *Client) BuildBotLocale(ctx context.Context, params *BuildBotLocaleInput, optFns ...func(*Options)) (*BuildBotLocaleOutput, error) {
if params == nil |
result, metadata, err := c.invokeOperation(ctx, "BuildBotLocale", params, optFns, c.addOperationBuildBotLocaleMiddlewares)
if err != nil {
return nil, err
}
out := result.(*BuildBotLocaleOutput)
out.ResultMetadata = metadata
return out, nil
}
type BuildBotLocaleInput struct {
// The identifier of the bot to build. The identifier is returned in the response
// from the operation.
//
// This member is required.
BotId *string
// The version of the bot to build. This can only be the draft version of the bot.
//
// This member is required.
BotVersion *string
// The identifier of the language and locale that the bot will be used in. The
// string must match one of the supported locales. All of the intents, slot types,
// and slots used in the bot must have the same locale. For more information, see
// Supported languages
// (https://docs.aws.amazon.com/lexv2/latest/dg/how-languages.html).
//
// This member is required.
LocaleId *string
}
type BuildBotLocaleOutput struct {
// The identifier of the specified bot.
BotId *string
// The bot's build status. When the status is ReadyExpressTesting you can test the
// bot using the utterances defined for the intents and slot types. When the status
// is Built, the bot is ready for use and can be tested using any utterance.
BotLocaleStatus types.BotLocaleStatus
// The version of the bot that was built. This is only the draft version of the
// bot.
BotVersion *string
// A timestamp indicating the date and time that the bot was last built for this
// locale.
LastBuildSubmittedDateTime *time.Time
// The language and locale specified of where the bot can be used.
LocaleId *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
}
func (c *Client) addOperationBuildBotLocaleMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsRestjson1_serializeOpBuildBotLocale{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsRestjson1_deserializeOpBuildBotLocale{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addOpBuildBotLocaleValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opBuildBotLocale(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opBuildBotLocale(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "lex",
OperationName: "BuildBotLocale",
}
}
| {
params = &BuildBotLocaleInput{}
} |
operation_deser.rs | // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
#[allow(clippy::unnecessary_wraps)]
pub fn parse_create_rule_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::CreateRuleOutput, crate::error::CreateRuleError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::CreateRuleError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::CreateRuleError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalServerException" => crate::error::CreateRuleError {
meta: generic,
kind: crate::error::CreateRuleErrorKind::InternalServerException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_server_exception_json_err(response.body().as_ref(), output).map_err(crate::error::CreateRuleError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ServiceQuotaExceededException" => crate::error::CreateRuleError {
meta: generic,
kind: crate::error::CreateRuleErrorKind::ServiceQuotaExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::service_quota_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_service_quota_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::CreateRuleError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::CreateRuleError {
meta: generic,
kind: crate::error::CreateRuleErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_validation_exception_json_err(response.body().as_ref(), output).map_err(crate::error::CreateRuleError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::CreateRuleError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_create_rule_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::CreateRuleOutput, crate::error::CreateRuleError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::create_rule_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_create_rule(
response.body().as_ref(),
output,
)
.map_err(crate::error::CreateRuleError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_delete_rule_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::DeleteRuleOutput, crate::error::DeleteRuleError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::DeleteRuleError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::DeleteRuleError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalServerException" => crate::error::DeleteRuleError {
meta: generic,
kind: crate::error::DeleteRuleErrorKind::InternalServerException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_server_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteRuleError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => crate::error::DeleteRuleError {
meta: generic,
kind: crate::error::DeleteRuleErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_resource_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteRuleError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::DeleteRuleError {
meta: generic,
kind: crate::error::DeleteRuleErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_validation_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteRuleError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::DeleteRuleError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_delete_rule_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::DeleteRuleOutput, crate::error::DeleteRuleError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::delete_rule_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_rule_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetRuleOutput, crate::error::GetRuleError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::GetRuleError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::GetRuleError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalServerException" => crate::error::GetRuleError {
meta: generic,
kind: crate::error::GetRuleErrorKind::InternalServerException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_server_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetRuleError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => crate::error::GetRuleError {
meta: generic,
kind: crate::error::GetRuleErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_resource_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetRuleError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::GetRuleError {
meta: generic,
kind: crate::error::GetRuleErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_validation_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetRuleError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::GetRuleError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_rule_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetRuleOutput, crate::error::GetRuleError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::get_rule_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_get_rule(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetRuleError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_rules_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::ListRulesOutput, crate::error::ListRulesError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::ListRulesError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::ListRulesError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalServerException" => crate::error::ListRulesError {
meta: generic,
kind: crate::error::ListRulesErrorKind::InternalServerException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_server_exception_json_err(response.body().as_ref(), output).map_err(crate::error::ListRulesError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::ListRulesError {
meta: generic,
kind: crate::error::ListRulesErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_validation_exception_json_err(response.body().as_ref(), output).map_err(crate::error::ListRulesError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::ListRulesError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_rules_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::ListRulesOutput, crate::error::ListRulesError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::list_rules_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_list_rules(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListRulesError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_tags_for_resource_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::ListTagsForResourceOutput,
crate::error::ListTagsForResourceError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::ListTagsForResourceError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::ListTagsForResourceError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalServerException" => crate::error::ListTagsForResourceError {
meta: generic,
kind: crate::error::ListTagsForResourceErrorKind::InternalServerException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_server_exception_json_err(response.body().as_ref(), output).map_err(crate::error::ListTagsForResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => crate::error::ListTagsForResourceError {
meta: generic,
kind: crate::error::ListTagsForResourceErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_resource_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::ListTagsForResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::ListTagsForResourceError {
meta: generic,
kind: crate::error::ListTagsForResourceErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_validation_exception_json_err(response.body().as_ref(), output).map_err(crate::error::ListTagsForResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::ListTagsForResourceError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_tags_for_resource_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::ListTagsForResourceOutput,
crate::error::ListTagsForResourceError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::list_tags_for_resource_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_list_tags_for_resource(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListTagsForResourceError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_tag_resource_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::TagResourceOutput, crate::error::TagResourceError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::TagResourceError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::TagResourceError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalServerException" => crate::error::TagResourceError {
meta: generic,
kind: crate::error::TagResourceErrorKind::InternalServerException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_server_exception_json_err(response.body().as_ref(), output).map_err(crate::error::TagResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => crate::error::TagResourceError {
meta: generic,
kind: crate::error::TagResourceErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_resource_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::TagResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ServiceQuotaExceededException" => crate::error::TagResourceError {
meta: generic,
kind: crate::error::TagResourceErrorKind::ServiceQuotaExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::service_quota_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_service_quota_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::TagResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::TagResourceError {
meta: generic,
kind: crate::error::TagResourceErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_validation_exception_json_err(response.body().as_ref(), output).map_err(crate::error::TagResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::TagResourceError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn | (
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::TagResourceOutput, crate::error::TagResourceError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::tag_resource_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_untag_resource_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::UntagResourceOutput, crate::error::UntagResourceError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::UntagResourceError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::UntagResourceError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalServerException" => crate::error::UntagResourceError {
meta: generic,
kind: crate::error::UntagResourceErrorKind::InternalServerException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_server_exception_json_err(response.body().as_ref(), output).map_err(crate::error::UntagResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => crate::error::UntagResourceError {
meta: generic,
kind: crate::error::UntagResourceErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_resource_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::UntagResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::UntagResourceError {
meta: generic,
kind: crate::error::UntagResourceErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_validation_exception_json_err(response.body().as_ref(), output).map_err(crate::error::UntagResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::UntagResourceError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_untag_resource_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::UntagResourceOutput, crate::error::UntagResourceError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::untag_resource_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_update_rule_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::UpdateRuleOutput, crate::error::UpdateRuleError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::UpdateRuleError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::UpdateRuleError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalServerException" => crate::error::UpdateRuleError {
meta: generic,
kind: crate::error::UpdateRuleErrorKind::InternalServerException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_server_exception_json_err(response.body().as_ref(), output).map_err(crate::error::UpdateRuleError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => crate::error::UpdateRuleError {
meta: generic,
kind: crate::error::UpdateRuleErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_resource_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::UpdateRuleError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::UpdateRuleError {
meta: generic,
kind: crate::error::UpdateRuleErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_validation_exception_json_err(response.body().as_ref(), output).map_err(crate::error::UpdateRuleError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::UpdateRuleError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_update_rule_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::UpdateRuleOutput, crate::error::UpdateRuleError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::update_rule_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_update_rule(
response.body().as_ref(),
output,
)
.map_err(crate::error::UpdateRuleError::unhandled)?;
output.build()
})
}
| parse_tag_resource_response |
StringHandle.rs | // This file is part of intel-seapi. It is subject to the license terms in the COPYRIGHT file found in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/intel-seapi/master/COPYRIGHT. No part of intel-seapi, including this file, may be copied, modified, propagated, or distributed except according to the terms contained in the COPYRIGHT file.
// Copyright © 2018 The developers of intel-seapi. See the COPYRIGHT file in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/intel-seapi/master/COPYRIGHT.
/// Represents a 'String Handle'.
///
/// Can never be destroyed or free'd.
///
/// Available to any thread, irrespective of which thread created it.
///
/// Calling `new()` again with the same value of the `name` parameter will cause a reference to an already created string handle to be returned; this is useful for instantiating the same string handle on different threads without having to pass data between them.
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct S | NonNull<__itt_string_handle >);
impl<'a> From<&'a str> for StringHandle
{
#[inline(always)]
fn from(name: &'a str) -> Self
{
Self::new(name)
}
}
impl StringHandle
{
/// Name can be almost anything (although it must not contain ASCII NUL), but a URI or Java-like style of `com.my_company.my_application` is recommended by Intel.
///
/// Call is thread-safe.
///
/// Calling more than once with the same `name` parameter will return a reference to the same string handle created for the first call.
#[cfg(unix)]
#[inline(always)]
pub fn new(name: &str) -> Self
{
let name = CString::new(name).unwrap();
let inner = unsafe { __itt_string_handle_create(name.as_ptr()) };
assert!(!inner.is_null());
StringHandle(unsafe { NonNull::new_unchecked(inner)})
}
/// Name can be almost anything (although it must not contain ASCII NUL), but a URI or Java-like style of `com.my_company.my_application` is recommended by Intel.
///
/// Call is thread-safe.
///
/// Calling more than once with the same `name` parameter will return a reference to the same string handle created for the first call.
#[cfg(windows)]
#[inline(always)]
pub fn new(name: &str) -> Result<Self, ()>
{
let name = CString::new(name).unwrap();
let inner = unsafe { __itt_string_handle_createA(name.as_ptr()) };
if inner.is_null()
{
Err(())
}
else
{
Ok(StringHandle(unsafe { NonNull::new_unchecked(inner)}))
}
}
#[inline(always)]
fn mutable_pointer(self) -> *mut __itt_string_handle
{
self.0.as_ptr()
}
}
| tringHandle( |
bind-decorator.acceptance.ts | // Copyright IBM Corp. 2019. All Rights Reserved.
// Node module: @tib/context
// This file is licensed under the MIT License.
// License text available at https://opensource.org/licenses/MIT
import {expect} from '@tib/testlab';
import {
bind,
BindingScope,
Context,
createBindingFromClass,
Provider,
} from '../..';
describe('@bind - customize classes with binding attributes', () => {
@bind({
scope: BindingScope.SINGLETON,
tags: ['service'],
})
class MyService {}
@bind.provider({
tags: {
key: 'my-date-provider',
},
})
class MyDateProvider implements Provider<Date> {
value() {
return new Date();
}
}
@bind({
tags: ['controller', {name: 'my-controller', type: 'controller'}],
})
class MyController {}
const discoveredClasses = [MyService, MyDateProvider, MyController];
it('allows discovery of classes to be bound', () => {
const ctx = new Context();
discoveredClasses.forEach(c => {
const binding = createBindingFromClass(c);
if (binding.tagMap.controller) {
ctx.add(binding);
}
});
expect(ctx.findByTag('controller').map(b => b.key)).eql([
'controllers.my-controller',
]);
expect(ctx.find().map(b => b.key)).eql(['controllers.my-controller']);
});
it('allows binding attributes to be customized', () => {
const ctx = new Context();
discoveredClasses.forEach(c => {
const binding = createBindingFromClass(c, {
typeNamespaceMapping: {
controller: 'controllers',
service: 'service-proxies',
},
});
ctx.add(binding);
});
expect(ctx.findByTag('provider').map(b => b.key)).eql(['my-date-provider']);
expect(ctx.getBinding('service-proxies.MyService').scope).to.eql(
BindingScope.SINGLETON,
);
expect(ctx.find().map(b => b.key)).eql([
'service-proxies.MyService',
'my-date-provider',
'controllers.my-controller',
]);
});
it('supports default binding scope in options', () => {
const binding = createBindingFromClass(MyController, {
defaultScope: BindingScope.SINGLETON,
});
expect(binding.scope).to.equal(BindingScope.SINGLETON);
});
describe('binding scope', () => {
@bind({
// Explicitly set the binding scope to be `SINGLETON` as the developer
// choose to implement the controller as a singleton without depending
// on request specific information
scope: BindingScope.SINGLETON,
})
class | {}
it('allows singleton controller with @bind', () => {
const binding = createBindingFromClass(MySingletonController, {
type: 'controller',
});
expect(binding.key).to.equal('controllers.MySingletonController');
expect(binding.tagMap).to.containEql({controller: 'controller'});
expect(binding.scope).to.equal(BindingScope.SINGLETON);
});
it('honors binding scope from @bind over defaultScope', () => {
const binding = createBindingFromClass(MySingletonController, {
defaultScope: BindingScope.TRANSIENT,
});
expect(binding.scope).to.equal(BindingScope.SINGLETON);
});
it('honors binding scope from @bind', () => {
const binding = createBindingFromClass(MySingletonController);
expect(binding.scope).to.equal(BindingScope.SINGLETON);
});
});
});
| MySingletonController |
main.py | from http import cookies
from Extractor import Extractor
from context import Context
import networkx as nx
from facebook_scraper import get_posts,get_friends,get_profile,get_group_info
cxt=Context(account,creds,limit_post,limit_friends,max,post,False,True)
#print(get_profile("100009975842374"))
#print(get_group_info("journalmaracanaalgerie") ) | ex =Extractor('Fb',cxt,Schema,cookie)
ex.create_Graphe_friends(file_graphe,cxt,Schema,cookie)
#ex.create_Graphe_group(file_graphe,cxt,Schema,cookies) | |
index.tsx | import React from 'react';
import whatsappIcon from '../../assets/icons/whatsapp.svg'
import './styles.css'
import api from '../../services/api';
export interface Teacher {
id: number;
avatar: string;
bio: string;
cost: number;
name: string;
subject: string;
whatsapp: string;
}
interface TeacherItemProps {
teacher: Teacher;
}
const TeacherItem: React.FC<TeacherItemProps> = ({ teacher }) => {
function | (){
api.post('connections', {
user_id: teacher.id,
})
}
return(
<article className="teacher-item">
<header>
<img src={teacher.avatar} alt={teacher.name} />
<div>
<strong>{teacher.name}</strong>
<span> {teacher.subject} </span>
</div>
</header>
<p>{teacher.bio}</p>
<footer>
<p>
Preço/hora
<strong>{teacher.cost}</strong>
</p>
<a target="_blank" onClick={createNewConnection} href={`https://wa.me/${teacher.whatsapp}`}>
<img src={whatsappIcon} alt="Whatsapp" />
Entrar em contato
</a>
</footer>
</article>
);
}
export default TeacherItem; | createNewConnection |
externref.rs | //! Small example of how to use `externref`s.
// You can execute this example with `cargo run --example externref`
use anyhow::Result;
use wasmtime::*;
fn main() -> Result<()> | {
println!("Initializing...");
let mut config = Config::new();
config.wasm_reference_types(true);
let engine = Engine::new(&config)?;
let mut store = Store::new(&engine, ());
println!("Compiling module...");
let module = Module::from_file(&engine, "examples/externref.wat")?;
println!("Instantiating module...");
let instance = Instance::new(&mut store, &module, &[])?;
println!("Creating new `externref`...");
let externref = ExternRef::new("Hello, World!");
assert!(externref.data().is::<&'static str>());
assert_eq!(
*externref.data().downcast_ref::<&'static str>().unwrap(),
"Hello, World!"
);
println!("Touching `externref` table...");
let table = instance.get_table(&mut store, "table").unwrap();
table.set(&mut store, 3, Some(externref.clone()).into())?;
let elem = table
.get(&mut store, 3)
.unwrap() // assert in bounds
.unwrap_externref() // assert it's an externref table
.unwrap(); // assert the externref isn't null
assert!(elem.ptr_eq(&externref));
println!("Touching `externref` global...");
let global = instance.get_global(&mut store, "global").unwrap();
global.set(&mut store, Some(externref.clone()).into())?;
let global_val = global.get(&mut store).unwrap_externref().unwrap();
assert!(global_val.ptr_eq(&externref));
println!("Calling `externref` func...");
let func =
instance.get_typed_func::<Option<ExternRef>, Option<ExternRef>, _>(&mut store, "func")?;
let ret = func.call(&mut store, Some(externref.clone()))?;
assert!(ret.is_some());
assert!(ret.unwrap().ptr_eq(&externref));
println!("GCing within the store...");
store.gc();
println!("Done.");
Ok(())
} |
|
shared_frn_all_table_dark_01.py | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/furniture/jedi/shared_frn_all_table_dark_01.iff"
result.attribute_template_id = 6 |
return result | result.stfName("frn_n","frn_all_jedi_table")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS #### |
mysql.go | package mysql
import (
"context"
"database/sql"
_ "github.com/go-sql-driver/mysql"
"log"
"time"
)
type (
ConnConfig struct {
ConnStr string `cnf:",NA"`
MaxOpen int `cnf:",def=100,range=[1:1000]"`
MaxIdle int `cnf:",NA"`
}
MSqlX struct {
Cli *sql.DB
Ctx context.Context
}
)
func | (cf *ConnConfig) *MSqlX {
mysqlX := MSqlX{Ctx: context.Background()}
db, err := sql.Open("mysql", cf.ConnStr)
if err != nil {
log.Fatalf("Conn %s err: %s", cf.ConnStr, err)
}
// See "Important settings" section.
db.SetConnMaxLifetime(time.Minute * 3)
db.SetMaxOpenConns(cf.MaxOpen)
db.SetMaxIdleConns(cf.MaxIdle)
mysqlX.Cli = db
return &mysqlX
}
| NewMysqlConn |
network.py | import pytest
from goji.util.network import get_host_addr
class | :
@pytest.mark.asyncio
async def test_get_host_addr4(self):
# Run these tests forcing IPv4 resolution
prefer_ipv6 = False
assert get_host_addr("127.0.0.1", prefer_ipv6) == "127.0.0.1"
assert get_host_addr("10.11.12.13", prefer_ipv6) == "10.11.12.13"
assert get_host_addr("localhost", prefer_ipv6) == "127.0.0.1"
assert get_host_addr("example.net", prefer_ipv6) == "93.184.216.34"
@pytest.mark.asyncio
async def test_get_host_addr6(self):
# Run these tests forcing IPv6 resolution
prefer_ipv6 = True
assert get_host_addr("::1", prefer_ipv6) == "::1"
assert get_host_addr("2000:1000::1234:abcd", prefer_ipv6) == "2000:1000::1234:abcd"
# ip6-localhost is not always available, and localhost is IPv4 only
# on some systems. Just test neither here.
# assert get_host_addr("ip6-localhost", prefer_ipv6) == "::1"
# assert get_host_addr("localhost", prefer_ipv6) == "::1"
assert get_host_addr("example.net", prefer_ipv6) == "2606:2800:220:1:248:1893:25c8:1946"
| TestNetwork |
panorama_get_addresses.py | # -*- coding: utf-8 -*-
# pragma pylint: disable=unused-argument, no-self-use
# (c) Copyright IBM Corp. 2010, 2020. All Rights Reserved.
"""Function implementation"""
import logging
from resilient_circuits import ResilientComponent, function, handler, StatusMessage, FunctionResult, FunctionError
from resilient_lib import ResultPayload
from fn_pa_panorama.util.panorama_util import PanoramaClient
log = logging.getLogger(__name__)
class FunctionComponent(ResilientComponent):
| """Component that implements Resilient function 'panorama_edit_address_group"""
def __init__(self, opts):
"""constructor provides access to the configuration options"""
super(FunctionComponent, self).__init__(opts)
self.options = opts.get("fn_pa_panorama", {})
@handler("reload")
def _reload(self, event, opts):
"""Configuration options have changed, save new values"""
self.opts = opts
self.options = opts.get("fn_pa_panorama", {})
@function("panorama_get_addresses")
def _panorama_get_addresses_function(self, event, *args, **kwargs):
"""Function: Panorama get addresses returns a list of the address objects"""
try:
yield StatusMessage("Getting list of addresses")
rp = ResultPayload("fn_pa_panorama", **kwargs)
# Get the function parameters:
location = self.get_select_param(kwargs.get("panorama_location")) # select
vsys = kwargs.get("panorama_vsys") # text
# Log inputs
if location is None:
raise ValueError("panorama_location needs to be set.")
log.info("panorama_location: {}".format(location))
log.info("panorama_vsys: {}".format(vsys))
panorama_util = PanoramaClient(self.opts, location, vsys)
response = panorama_util.get_addresses()
yield StatusMessage("{} addresses returned.".format(response["result"]["@count"]))
results = rp.done(True, response)
# Produce a FunctionResult with the results
yield FunctionResult(results)
except Exception as e:
yield FunctionError(e) |
|
reconcile_backup_job.go | /*
Copyright 2019 PlanetScale Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vitessshard
import (
"context"
"time"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
apilabels "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
planetscalev2 "planetscale.dev/vitess-operator/pkg/apis/planetscale/v2"
"planetscale.dev/vitess-operator/pkg/operator/reconciler"
"planetscale.dev/vitess-operator/pkg/operator/results"
"planetscale.dev/vitess-operator/pkg/operator/update"
"planetscale.dev/vitess-operator/pkg/operator/vitessbackup"
"planetscale.dev/vitess-operator/pkg/operator/vttablet"
)
func (r *ReconcileVitessShard) reconcileBackupJob(ctx context.Context, vts *planetscalev2.VitessShard) (reconcile.Result, error) {
resultBuilder := &results.Builder{}
// Break early if we find we are using an externally managed MySQL, or if any tablet pools have nil for Mysqld,
// because we should not be configuring backups in either case.
if vts.Spec.UsingExternalDatastore() || !vts.Spec.AllPoolsUsingMysqld() {
return resultBuilder.Result()
}
clusterName := vts.Labels[planetscalev2.ClusterLabel]
keyspaceName := vts.Labels[planetscalev2.KeyspaceLabel]
shardSafeName := vts.Spec.KeyRange.SafeName()
labels := map[string]string{
planetscalev2.ComponentLabel: planetscalev2.VtbackupComponentName,
planetscalev2.ClusterLabel: clusterName,
planetscalev2.KeyspaceLabel: keyspaceName,
planetscalev2.ShardLabel: shardSafeName,
vitessbackup.TypeLabel: vitessbackup.TypeInit,
}
// List all backups for this shard, across all storage locations.
// We'll use the latest observed state of backups to decide whether to take
// a new one. This list could be out of date because it's populated by
// polling the Vitess API (see the VitessBackupStorage controller), but as
// long as it's eventually consistent, we'll converge to the right behavior.
allBackups := &planetscalev2.VitessBackupList{}
listOpts := &client.ListOptions{
Namespace: vts.Namespace,
LabelSelector: apilabels.SelectorFromSet(apilabels.Set{
planetscalev2.ClusterLabel: clusterName,
planetscalev2.KeyspaceLabel: keyspaceName,
planetscalev2.ShardLabel: shardSafeName,
}),
}
if err := r.client.List(ctx, allBackups, listOpts); err != nil {
return resultBuilder.Error(err)
}
updateBackupStatus(vts, allBackups.Items)
// Here we only care about complete backups.
completeBackups := vitessbackup.CompleteBackups(allBackups.Items)
// Generate keys (object names) for all desired backup Pods and PVCs.
// Keep a map back from generated names to the backup specs.
podKeys := []client.ObjectKey{}
pvcKeys := []client.ObjectKey{}
specMap := map[client.ObjectKey]*vttablet.BackupSpec{}
// The object name for the initial backup Pod, if we end up needing one.
initPodName := vttablet.InitialBackupPodName(clusterName, keyspaceName, vts.Spec.KeyRange)
initPodKey := client.ObjectKey{
Namespace: vts.Namespace,
Name: initPodName,
}
if len(completeBackups) == 0 {
// Until we see at least one complete backup, we attempt to create an
// "initial backup", which is a special imaginary backup created from
// scratch (not from any tablet). If we're wrong and a backup exists
// already, the idempotent vtbackup "initial backup" mode will just do
// nothing and return success.
initSpec := vtbackupInitSpec(initPodKey, vts, labels)
if initSpec != nil {
podKeys = append(podKeys, initPodKey)
if initSpec.TabletSpec.DataVolumePVCSpec != nil {
pvcKeys = append(pvcKeys, initPodKey)
}
specMap[initPodKey] = initSpec
}
} else {
// We have at least one complete backup already.
vts.Status.HasInitialBackup = corev1.ConditionTrue
}
// Reconcile vtbackup PVCs. Use the same key as the corresponding Pod,
// but only if the Pod expects a PVC.
err := r.reconciler.ReconcileObjectSet(ctx, vts, pvcKeys, labels, reconciler.Strategy{
Kind: &corev1.PersistentVolumeClaim{},
New: func(key client.ObjectKey) runtime.Object {
return vttablet.NewPVC(key, specMap[key].TabletSpec)
},
PrepareForTurndown: func(key client.ObjectKey, obj runtime.Object) *planetscalev2.OrphanStatus {
// Same as reconcileTablets, keep PVCs of Pods in any Phase
pod := &corev1.Pod{}
if getErr := r.client.Get(ctx, key, pod); getErr == nil || !apierrors.IsNotFound(getErr) {
// If the get was successful, the Pod exists and we shouldn't delete the PVC.
// If the get failed for any reason other than NotFound, we don't know if it's safe.
return &planetscalev2.OrphanStatus{
Reason: "BackupRunning",
Message: "Not deleting vtbackup PVC because vtbackup Pod still exists",
}
}
return nil
},
})
if err != nil {
resultBuilder.Error(err)
}
// Reconcile vtbackup Pods.
err = r.reconciler.ReconcileObjectSet(ctx, vts, podKeys, labels, reconciler.Strategy{
Kind: &corev1.Pod{},
New: func(key client.ObjectKey) runtime.Object {
return vttablet.NewBackupPod(key, specMap[key])
},
Status: func(key client.ObjectKey, obj runtime.Object) {
pod := obj.(*corev1.Pod)
// If this status hook is telling us about the special init Pod,
// we can update HasInitialBackup.
if key == initPodKey {
// If the Pod is Suceeded or Failed, we can update status.
// Otherwise, we leave it as Unknown since we can't tell.
switch pod.Status.Phase {
case corev1.PodSucceeded:
vts.Status.HasInitialBackup = corev1.ConditionTrue
case corev1.PodFailed:
vts.Status.HasInitialBackup = corev1.ConditionFalse
}
}
},
PrepareForTurndown: func(key client.ObjectKey, obj runtime.Object) *planetscalev2.OrphanStatus {
// As soon as the new backup is complete, the backup policy logic
// will say the vtbackup Pod is no longer needed. However, we still
// need to give it a chance to finish running because it does
// pruning of old backups after the new backup is complete.
pod := obj.(*corev1.Pod)
if pod.Status.Phase == corev1.PodRunning {
return &planetscalev2.OrphanStatus{
Reason: "BackupRunning",
Message: "Not deleting vtbackup Pod while it's still running",
}
}
return nil
},
})
if err != nil {
resultBuilder.Error(err)
}
return resultBuilder.Result()
}
func vtbackupInitSpec(key client.ObjectKey, vts *planetscalev2.VitessShard, parentLabels map[string]string) *vttablet.BackupSpec {
// If we specifically set our cluster to avoid initial backups, bail early.
if !*vts.Spec.Replication.InitializeBackup {
return nil
}
if len(vts.Spec.TabletPools) == 0 {
// No tablet pools are defined for this shard.
// We don't know enough to make a vtbackup spec.
return nil
}
// Make a vtbackup spec that's a similar shape to the first tablet pool.
// This should give it enough resources to run mysqld and restore a backup,
// since all tablets need to be able to do that, regardless of type.
return vtbackupSpec(key, vts, parentLabels, &vts.Spec.TabletPools[0], vitessbackup.TypeInit)
}
func vtbackupSpec(key client.ObjectKey, vts *planetscalev2.VitessShard, parentLabels map[string]string, pool *planetscalev2.VitessShardTabletPool, backupType string) *vttablet.BackupSpec |
func updateBackupStatus(vts *planetscalev2.VitessShard, allBackups []planetscalev2.VitessBackup) {
// If no backup locations are configured, there's nothing to do.
if len(vts.Spec.BackupLocations) == 0 {
return
}
// Initialize status for each backup location.
locationStatus := map[string]*planetscalev2.ShardBackupLocationStatus{}
for i := range vts.Spec.BackupLocations {
location := &vts.Spec.BackupLocations[i]
status := planetscalev2.NewShardBackupLocationStatus(location.Name)
locationStatus[location.Name] = status
vts.Status.BackupLocations = append(vts.Status.BackupLocations, status)
}
// Report stats on backups, grouped by location.
for i := range allBackups {
backup := &allBackups[i]
locationName := backup.Labels[vitessbackup.LocationLabel]
location := locationStatus[locationName]
if location == nil {
// This is not one of the locations we care about.
continue
}
if backup.Status.Complete {
location.CompleteBackups++
if location.LatestCompleteBackupTime == nil || backup.Status.StartTime.After(location.LatestCompleteBackupTime.Time) {
location.LatestCompleteBackupTime = &backup.Status.StartTime
}
} else {
location.IncompleteBackups++
}
}
}
| {
keyspaceName := vts.Labels[planetscalev2.KeyspaceLabel]
// Find the backup location for this pool.
backupLocation := vts.Spec.BackupLocation(pool.BackupLocationName)
if backupLocation == nil {
// No backup location is configured, so we can't do anything.
return nil
}
// Copy parent labels map and add child-specific labels.
labels := map[string]string{
vitessbackup.LocationLabel: backupLocation.Name,
vitessbackup.TypeLabel: backupType,
}
for k, v := range parentLabels {
labels[k] = v
}
minBackupInterval := time.Duration(0)
minRetentionTime := time.Duration(0)
minRetentionCount := 1
// Allocate a new map so we don't mutate inputs.
annotations := map[string]string{}
update.Annotations(&annotations, pool.Annotations)
update.Annotations(&annotations, backupLocation.Annotations)
// Fill in the parts of a vttablet spec that make sense for vtbackup.
tabletSpec := &vttablet.Spec{
GlobalLockserver: vts.Spec.GlobalLockserver,
Labels: labels,
Images: vts.Spec.Images,
KeyRange: vts.Spec.KeyRange,
Vttablet: &pool.Vttablet,
Mysqld: pool.Mysqld,
DataVolumePVCName: key.Name,
DataVolumePVCSpec: pool.DataVolumeClaimTemplate,
KeyspaceName: keyspaceName,
DatabaseName: vts.Spec.DatabaseName,
DatabaseInitScriptSecret: vts.Spec.DatabaseInitScriptSecret,
BackupLocation: backupLocation,
BackupEngine: vts.Spec.BackupEngine,
InitContainers: pool.InitContainers,
SidecarContainers: pool.SidecarContainers,
ExtraEnv: pool.ExtraEnv,
Annotations: annotations,
Tolerations: pool.Tolerations,
ImagePullSecrets: vts.Spec.ImagePullSecrets,
}
return &vttablet.BackupSpec{
InitialBackup: backupType == vitessbackup.TypeInit,
MinBackupInterval: minBackupInterval,
MinRetentionTime: minRetentionTime,
MinRetentionCount: minRetentionCount,
TabletSpec: tabletSpec,
}
} |
alpha-numeric-message.component.ts | import { Component, OnInit } from '@angular/core';
import { FormGroup, FormBuilder } from "@angular/forms"
import { RxwebValidators } from '@rxweb/reactive-form-validators';
@Component({
selector: 'app-alphaNumeric-message-validator',
templateUrl: './alpha-numeric-message.component.html'
})
export class | implements OnInit {
locationFormGroup: FormGroup
constructor(
private formBuilder: FormBuilder)
{ }
ngOnInit() {
this.locationFormGroup = this.formBuilder.group({
postalAddress:['', RxwebValidators.alphaNumeric({allowWhiteSpace:true ,message:'Please enter only alphanumerics, special characters are not allowed and whitespace is allowed.' })],
});
}
}
| AlphaNumericMessageValidatorComponent |
device.js | /*===========================
Device/OS Detection
===========================*/
'use strict';
var device = {};
var ua = navigator.userAgent;
var android = ua.match(/(Android);?[\s\/]+([\d.]+)?/);
var ipad = ua.match(/(iPad).*OS\s([\d_]+)/);
var ipod = ua.match(/(iPod)(.*OS\s([\d_]+))?/);
var iphone = !ipad && ua.match(/(iPhone\sOS)\s([\d_]+)/);
device.ios = device.android = device.iphone = device.ipad = device.androidChrome = false;
// Android
if (android) {
device.os = 'android';
device.osVersion = android[2];
device.android = true;
device.androidChrome = ua.toLowerCase().indexOf('chrome') >= 0;
}
if (ipad || iphone || ipod) {
device.os = 'ios';
device.ios = true;
}
// iOS
if (iphone && !ipod) {
device.osVersion = iphone[2].replace(/_/g, '.');
device.iphone = true;
}
if (ipad) {
device.osVersion = ipad[2].replace(/_/g, '.');
device.ipad = true;
}
if (ipod) {
device.osVersion = ipod[3] ? ipod[3].replace(/_/g, '.') : null;
device.iphone = true;
}
// iOS 8+ changed UA
if (device.ios && device.osVersion && ua.indexOf('Version/') >= 0) {
if (device.osVersion.split('.')[0] === '10') {
device.osVersion = ua.toLowerCase().split('version/')[1].split(' ')[0];
}
}
// Webview
device.webView = (iphone || ipad || ipod) && ua.match(/.*AppleWebKit(?!.*Safari)/i);
/*
// Minimal UI
if (device.os && device.os === 'ios') {
var osVersionArr = device.osVersion.split('.');
device.minimalUi = !device.webView &&
(ipod || iphone) &&
(osVersionArr[0] * 1 === 7 ? osVersionArr[1] * 1 >= 1 : osVersionArr[0] * 1 > 7) &&
$('meta[name="viewport"]').length > 0 && $('meta[name="viewport"]').attr('content').indexOf('minimal-ui') >= 0;
}
*/
// Check for status bar and fullscreen app mode
var windowWidth = window.innerWidth;
var windowHeight = window.innerHeight;
device.statusBar = false;
if (device.webView && (windowWidth * windowHeight === screen.width * screen.height)) {
device.statusBar = true; | }
else {
device.statusBar = false;
}
// Classes
var classNames = [];
// Pixel Ratio
device.pixelRatio = window.devicePixelRatio || 1;
classNames.push('pixel-ratio-' + Math.floor(device.pixelRatio));
if (device.pixelRatio >= 2) {
classNames.push('retina');
}
// OS classes
if (device.os) {
classNames.push(device.os, device.os + '-' + device.osVersion.split('.')[0], device.os + '-' + device.osVersion.replace(/\./g, '-'));
if (device.os === 'ios') {
var major = parseInt(device.osVersion.split('.')[0], 10);
for (var i = major - 1; i >= 6; i--) {
classNames.push('ios-gt-' + i);
}
}
}
// Status bar classes
if (device.statusBar) {
classNames.push('with-statusbar-overlay');
}
// Add html classes
if (classNames.length > 0) document.documentElement.className += ' ' + classNames.join(' ');
device.isWeixin = /MicroMessenger/i.test(ua);
module.exports = device; | |
bing_wallpaper.py | #! /usr/bin/python3
# Author: Maximilian Muth <[email protected]>
# https://github.com/mammuth/bing-wallpaper
# Version: 1.0
# License: GPL-2.0
# Description: Downloads the Bing picture of the Day and sets it as wallpaper (Linux / Windows).
import datetime
from urllib.request import urlopen, urlretrieve
from xml.dom import minidom
import os
import sys
def join_path(*args):
# Takes an list of values or multiple values and returns an valid path.
if isinstance(args[0], list):
path_list = args[0]
else:
path_list = args
val = [str(v).strip(' ') for v in path_list]
return os.path.normpath('/'.join(val))
dir_path = os.path.dirname(os.path.realpath(__file__))
save_dir = join_path(dir_path, 'images')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
def set_wallpaper(pic_path):
if sys.platform.startswith('win32'):
cmd = 'REG ADD \"HKCU\Control Panel\Desktop\" /v Wallpaper /t REG_SZ /d \"%s\" /f' %pic_path
os.system(cmd)
os.system('rundll32.exe user32.dll, UpdatePerUserSystemParameters')
print('Wallpaper is set.')
elif sys.platform.startswith('linux2'):
os.system(''.join(['gsettings set org.gnome.desktop.background picture-uri file://', pic_path]))
print('Wallpaper is set.')
else:
print('OS not supported.')
return
return
def download_old_wallpapers(minus_days=False):
"""Uses download_wallpaper(set_wallpaper=False) to download the last 20 wallpapers.
If minus_days is given an integer a specific day in the past will be downloaded.
"""
if minus_days:
download_wallpaper(idx=minus_days, use_wallpaper=False)
return
for i in range(0, 20): # max 20
download_wallpaper(idx=i, use_wallpaper=False)
def | (idx=0, use_wallpaper=False):
# Getting the XML File
try:
usock = urlopen(''.join(['https://www.bing.com/HPImageArchive.aspx?format=xml&idx=',
str(idx), '&n=10&mkt=ru-RU'])) # ru-RU, because they always have 1920x1200 resolution
except Exception as e:
print('Error while downloading #', idx, e)
return
try:
xmldoc = minidom.parse(usock)
print(xmldoc)
# This is raised when there is trouble finding the image url.
except Exception as e:
print('Error while processing XML index #', idx, e)
return
# Parsing the XML File
print(len(xmldoc.getElementsByTagName('image')))
for image in xmldoc.getElementsByTagName('image'):
element = image.childNodes[3]
startdate = image.childNodes[0].firstChild.nodeValue
#print(element, fullstartdate)
url = 'https://www.bing.com' + element.firstChild.nodeValue
# Get Current Date as fileName for the downloaded Picture
now = datetime.datetime.now()
date = now - datetime.timedelta(days=int(idx))
#pic_path = join_path(save_dir, ''.join([date.strftime('bing_wp_%d-%m-%Y'), '.jpg']))
pic_path = join_path(save_dir, ''.join([startdate, '_', url.split("/")[-1] ]))
if os.path.isfile(pic_path):
print('Image of', date.strftime('%d-%m-%Y'), 'already downloaded.')
if use_wallpaper:
set_wallpaper(pic_path)
continue
print('Downloading: ', date.strftime('%d-%m-%Y'), 'index #', idx)
# Download and Save the Picture
# Get a higher resolution by replacing the file name
try:
urlretrieve(url.replace('_1366x768', '_1920x1200'), pic_path)
except Exception as e:
print('Error while downloading #', idx, e)
urlretrieve(url, pic_path)
# Set Wallpaper if wanted by user
if use_wallpaper:
set_wallpaper(pic_path)
if __name__ == "__main__":
download_wallpaper()
download_old_wallpapers(minus_days=False)
| download_wallpaper |
input_source.rs | // THIS FILE IS AUTO-GENERATED
use crate::{
accessory::{HapAccessory, HapAccessoryService, Accessory, Information},
service::{HapService, accessory_information::AccessoryInformation, input_source},
event::EventEmitterPtr,
Result,
};
/// Input Source Accessory.
pub type InputSource = Accessory<InputSourceInner>;
/// Inner type of the Input Source Accessory.
#[derive(Default)]
pub struct InputSourceInner {
/// ID of the Input Source Accessory.
id: u64,
/// Accessory Information Service.
pub accessory_information: AccessoryInformation,
/// Input Source Service.
pub input_source: input_source::InputSource,
}
impl HapAccessory for InputSourceInner {
fn get_id(&self) -> u64 {
self.id
}
fn set_id(&mut self, id: u64) {
self.id = id;
}
fn get_services(&self) -> Vec<&dyn HapAccessoryService> {
vec![
&self.accessory_information,
&self.input_source,
]
}
fn get_mut_services(&mut self) -> Vec<&mut dyn HapAccessoryService> {
vec![
&mut self.accessory_information,
&mut self.input_source,
]
}
fn get_mut_information(&mut self) -> &mut AccessoryInformation |
fn init_iids(&mut self, accessory_id: u64, event_emitter: EventEmitterPtr) -> Result<()> {
let mut next_iid = 1;
for service in self.get_mut_services() {
service.set_id(next_iid);
next_iid += 1;
for characteristic in service.get_mut_characteristics() {
characteristic.set_id(next_iid)?;
characteristic.set_accessory_id(accessory_id)?;
characteristic.set_event_emitter(Some(event_emitter.clone()))?;
next_iid += 1;
}
}
Ok(())
}
}
/// Creates a new Input Source Accessory.
pub fn new(information: Information) -> Result<InputSource> {
let mut input_source = input_source::new();
input_source.set_primary(true);
Ok(InputSource::new(InputSourceInner {
accessory_information: information.to_service()?,
input_source,
..Default::default()
}))
}
| {
&mut self.accessory_information
} |
handlers.rs | use std::collections::HashMap;
use r68k_common::constants::*;
use super::super::Handler;
use super::opcodes::*;
use super::super::InstructionSet;
use super::*;
#[allow(dead_code)]
pub struct OpcodeHandler<T: Core> {
mask: u32,
matching: u32,
pub name: &'static str,
handler: Handler<T>
}
macro_rules! op_entry {
($mask:expr, $matching:expr, $handler:ident) => (OpcodeHandler { mask: $mask, matching: $matching, handler: $handler, name: stringify!($handler) })
}
pub struct InstructionSetGenerator<T: Core> {
optable: Vec<OpcodeHandler<T>>
}
impl<T: Core> InstructionSetGenerator<T> {
pub fn new() -> InstructionSetGenerator<T> {
InstructionSetGenerator {
optable: generate_optable()
}
}
pub fn generate(&self) -> InstructionSet<T> {
self.generate_with(illegal, |ref op| op.handler)
}
pub fn generate_with<F: Clone, G>(&self, def: F, with: G) -> Vec<F>
where G: for<'a> Fn(&OpcodeHandler<T>) -> F
{
// Covers all possible IR values (64k entries)
let mut handler: Vec<F> = Vec::with_capacity(0x10000);
for _ in 0..0x10000 { handler.push(def.clone()); }
// two of the commonly used op-masks (MASK_OUT_X (280+ uses) and
// MASK_OUT_X_Y (500+)) are non-contiguous, so optimize for that.
// This saves millions of iterations of the innermost loop below.
// The odd mask MASK_LOBYTX (8 blocks of 256 opcodes) is used only
// for the MOVEQ instruction, saving 1792 iterations, but was cheap
// to include.
// The X register is selected by bit 9-11, which gives the offsets
// in this table
fn x_offset(len: u32) -> [(u32, u32); 8] {
[ (0, len),
(512, len),
(1024, len),
(1536, len),
(2048, len),
(2560, len),
(3072, len),
(3584, len)]
}
let mut offset_cache = HashMap::new();
offset_cache.insert(MASK_OUT_X, x_offset(1));
offset_cache.insert(MASK_OUT_X_Y, x_offset(8));
offset_cache.insert(MASK_LOBYTX, x_offset(256));
let _ops = self.optable.len();
let mut _implemented = 0;
for op in &self.optable {
match offset_cache.get(&op.mask) {
Some(offsets) => {
for opcode in offsets.iter().flat_map(|&(start, len)| (start..(start+len)).map(|o| o + op.matching)) {
handler[opcode as usize] = with(&op);
_implemented += 1;
}
},
None => {
// the remaining masks are all contiguous, and already optimal
let max_count = 1 << (op.mask as u16).count_zeros();
let mut matching = 0;
for opcode in op.matching..0x10000 {
if (opcode & op.mask) == op.matching {
handler[opcode as usize] = with(&op);
_implemented += 1;
matching += 1;
if matching >= max_count {
break;
}
}
}
}
}
}
// According to Musashi opcode handler jump table;
// M68000 implements 54007 opcodes (11529 illegal)
// M68010 implements 54194 opcodes (11342 illegal)
// M68020 implements 55611 opcodes (9925 illegal)
// println!("{:?} opcodes implemented ({:.2}% done) in {:?} instruction variants", _implemented, _implemented as f32 / 540.07f32, _ops);
handler
}
}
fn generate_optable<T: Core>() -> Vec<OpcodeHandler<T>> {
// the optable contains opcode mask, matching mask and the corresponding handler + name
vec![
op_entry!(MASK_LO3NIB, OP_UNIMPLEMENTED_1010, unimplemented_1010),
op_entry!(MASK_LO3NIB, OP_UNIMPLEMENTED_1111, unimplemented_1111),
op_entry!(MASK_OUT_X_Y, OP_ABCD_8_RR, abcd_8_rr),
op_entry!(MASK_OUT_X_Y, OP_ABCD_8_MM, abcd_8_mm),
op_entry!(MASK_OUT_X_Y, OP_ADD_8_ER_DN, add_8_er_dn),
op_entry!(MASK_OUT_X_Y, OP_ADD_8_ER_AI, add_8_er_ai),
op_entry!(MASK_OUT_X_Y, OP_ADD_8_ER_PI, add_8_er_pi),
op_entry!(MASK_OUT_X_Y, OP_ADD_8_ER_PD, add_8_er_pd),
op_entry!(MASK_OUT_X_Y, OP_ADD_8_ER_DI, add_8_er_di),
op_entry!(MASK_OUT_X_Y, OP_ADD_8_ER_IX, add_8_er_ix),
op_entry!(MASK_OUT_X, OP_ADD_8_ER_AW, add_8_er_aw),
op_entry!(MASK_OUT_X, OP_ADD_8_ER_AL, add_8_er_al),
op_entry!(MASK_OUT_X, OP_ADD_8_ER_PCDI, add_8_er_pcdi),
op_entry!(MASK_OUT_X, OP_ADD_8_ER_PCIX, add_8_er_pcix),
op_entry!(MASK_OUT_X, OP_ADD_8_ER_IMM, add_8_er_imm),
op_entry!(MASK_OUT_X_Y, OP_ADD_8_RE_AI, add_8_re_ai),
op_entry!(MASK_OUT_X_Y, OP_ADD_8_RE_PI, add_8_re_pi),
op_entry!(MASK_OUT_X_Y, OP_ADD_8_RE_PD, add_8_re_pd),
op_entry!(MASK_OUT_X_Y, OP_ADD_8_RE_DI, add_8_re_di),
op_entry!(MASK_OUT_X_Y, OP_ADD_8_RE_IX, add_8_re_ix),
op_entry!(MASK_OUT_X, OP_ADD_8_RE_AW, add_8_re_aw),
op_entry!(MASK_OUT_X, OP_ADD_8_RE_AL, add_8_re_al),
op_entry!(MASK_OUT_X_Y, OP_ADD_16_ER_DN, add_16_er_dn),
op_entry!(MASK_OUT_X_Y, OP_ADD_16_ER_AN, add_16_er_an),
op_entry!(MASK_OUT_X_Y, OP_ADD_16_ER_AI, add_16_er_ai),
op_entry!(MASK_OUT_X_Y, OP_ADD_16_ER_PI, add_16_er_pi),
op_entry!(MASK_OUT_X_Y, OP_ADD_16_ER_PD, add_16_er_pd),
op_entry!(MASK_OUT_X_Y, OP_ADD_16_ER_DI, add_16_er_di),
op_entry!(MASK_OUT_X_Y, OP_ADD_16_ER_IX, add_16_er_ix),
op_entry!(MASK_OUT_X, OP_ADD_16_ER_AW, add_16_er_aw),
op_entry!(MASK_OUT_X, OP_ADD_16_ER_AL, add_16_er_al),
op_entry!(MASK_OUT_X, OP_ADD_16_ER_PCDI, add_16_er_pcdi),
op_entry!(MASK_OUT_X, OP_ADD_16_ER_PCIX, add_16_er_pcix),
op_entry!(MASK_OUT_X, OP_ADD_16_ER_IMM, add_16_er_imm),
op_entry!(MASK_OUT_X_Y, OP_ADD_16_RE_AI, add_16_re_ai),
op_entry!(MASK_OUT_X_Y, OP_ADD_16_RE_PI, add_16_re_pi),
op_entry!(MASK_OUT_X_Y, OP_ADD_16_RE_PD, add_16_re_pd),
op_entry!(MASK_OUT_X_Y, OP_ADD_16_RE_DI, add_16_re_di),
op_entry!(MASK_OUT_X_Y, OP_ADD_16_RE_IX, add_16_re_ix),
op_entry!(MASK_OUT_X, OP_ADD_16_RE_AW, add_16_re_aw),
op_entry!(MASK_OUT_X, OP_ADD_16_RE_AL, add_16_re_al),
op_entry!(MASK_OUT_X_Y, OP_ADD_32_ER_DN, add_32_er_dn),
op_entry!(MASK_OUT_X_Y, OP_ADD_32_ER_AN, add_32_er_an),
op_entry!(MASK_OUT_X_Y, OP_ADD_32_ER_AI, add_32_er_ai),
op_entry!(MASK_OUT_X_Y, OP_ADD_32_ER_PI, add_32_er_pi),
op_entry!(MASK_OUT_X_Y, OP_ADD_32_ER_PD, add_32_er_pd),
op_entry!(MASK_OUT_X_Y, OP_ADD_32_ER_DI, add_32_er_di),
op_entry!(MASK_OUT_X_Y, OP_ADD_32_ER_IX, add_32_er_ix),
op_entry!(MASK_OUT_X, OP_ADD_32_ER_AW, add_32_er_aw),
op_entry!(MASK_OUT_X, OP_ADD_32_ER_AL, add_32_er_al),
op_entry!(MASK_OUT_X, OP_ADD_32_ER_PCDI, add_32_er_pcdi),
op_entry!(MASK_OUT_X, OP_ADD_32_ER_PCIX, add_32_er_pcix),
op_entry!(MASK_OUT_X, OP_ADD_32_ER_IMM, add_32_er_imm),
op_entry!(MASK_OUT_X_Y, OP_ADD_32_RE_AI, add_32_re_ai),
op_entry!(MASK_OUT_X_Y, OP_ADD_32_RE_PI, add_32_re_pi),
op_entry!(MASK_OUT_X_Y, OP_ADD_32_RE_PD, add_32_re_pd),
op_entry!(MASK_OUT_X_Y, OP_ADD_32_RE_DI, add_32_re_di),
op_entry!(MASK_OUT_X_Y, OP_ADD_32_RE_IX, add_32_re_ix),
op_entry!(MASK_OUT_X, OP_ADD_32_RE_AW, add_32_re_aw),
op_entry!(MASK_OUT_X, OP_ADD_32_RE_AL, add_32_re_al),
op_entry!(MASK_OUT_X_Y, OP_ADDA_16_DN, adda_16_dn),
op_entry!(MASK_OUT_X_Y, OP_ADDA_16_AN, adda_16_an),
op_entry!(MASK_OUT_X_Y, OP_ADDA_16_AI, adda_16_ai),
op_entry!(MASK_OUT_X_Y, OP_ADDA_16_PI, adda_16_pi),
op_entry!(MASK_OUT_X_Y, OP_ADDA_16_PD, adda_16_pd),
op_entry!(MASK_OUT_X_Y, OP_ADDA_16_DI, adda_16_di),
op_entry!(MASK_OUT_X_Y, OP_ADDA_16_IX, adda_16_ix),
op_entry!(MASK_OUT_X, OP_ADDA_16_AW, adda_16_aw),
op_entry!(MASK_OUT_X, OP_ADDA_16_AL, adda_16_al),
op_entry!(MASK_OUT_X, OP_ADDA_16_PCDI, adda_16_pcdi),
op_entry!(MASK_OUT_X, OP_ADDA_16_PCIX, adda_16_pcix),
op_entry!(MASK_OUT_X, OP_ADDA_16_IMM, adda_16_imm),
op_entry!(MASK_OUT_X_Y, OP_ADDA_32_DN, adda_32_dn),
op_entry!(MASK_OUT_X_Y, OP_ADDA_32_AN, adda_32_an),
op_entry!(MASK_OUT_X_Y, OP_ADDA_32_AI, adda_32_ai),
op_entry!(MASK_OUT_X_Y, OP_ADDA_32_PI, adda_32_pi),
op_entry!(MASK_OUT_X_Y, OP_ADDA_32_PD, adda_32_pd),
op_entry!(MASK_OUT_X_Y, OP_ADDA_32_DI, adda_32_di),
op_entry!(MASK_OUT_X_Y, OP_ADDA_32_IX, adda_32_ix),
op_entry!(MASK_OUT_X, OP_ADDA_32_AW, adda_32_aw),
op_entry!(MASK_OUT_X, OP_ADDA_32_AL, adda_32_al),
op_entry!(MASK_OUT_X, OP_ADDA_32_PCDI, adda_32_pcdi),
op_entry!(MASK_OUT_X, OP_ADDA_32_PCIX, adda_32_pcix),
op_entry!(MASK_OUT_X, OP_ADDA_32_IMM, adda_32_imm),
op_entry!(MASK_OUT_Y, OP_ADDI_8_DN, addi_8_dn),
op_entry!(MASK_OUT_Y, OP_ADDI_8_AI, addi_8_ai),
op_entry!(MASK_OUT_Y, OP_ADDI_8_PI, addi_8_pi),
op_entry!(MASK_OUT_Y, OP_ADDI_8_PD, addi_8_pd),
op_entry!(MASK_OUT_Y, OP_ADDI_8_DI, addi_8_di),
op_entry!(MASK_OUT_Y, OP_ADDI_8_IX, addi_8_ix),
op_entry!(MASK_EXACT, OP_ADDI_8_AW, addi_8_aw),
op_entry!(MASK_EXACT, OP_ADDI_8_AL, addi_8_al),
op_entry!(MASK_OUT_Y, OP_ADDI_16_DN, addi_16_dn),
op_entry!(MASK_OUT_Y, OP_ADDI_16_AI, addi_16_ai),
op_entry!(MASK_OUT_Y, OP_ADDI_16_PI, addi_16_pi),
op_entry!(MASK_OUT_Y, OP_ADDI_16_PD, addi_16_pd),
op_entry!(MASK_OUT_Y, OP_ADDI_16_DI, addi_16_di),
op_entry!(MASK_OUT_Y, OP_ADDI_16_IX, addi_16_ix),
op_entry!(MASK_EXACT, OP_ADDI_16_AW, addi_16_aw),
op_entry!(MASK_EXACT, OP_ADDI_16_AL, addi_16_al),
op_entry!(MASK_OUT_Y, OP_ADDI_32_DN, addi_32_dn),
op_entry!(MASK_OUT_Y, OP_ADDI_32_AI, addi_32_ai),
op_entry!(MASK_OUT_Y, OP_ADDI_32_PI, addi_32_pi),
op_entry!(MASK_OUT_Y, OP_ADDI_32_PD, addi_32_pd),
op_entry!(MASK_OUT_Y, OP_ADDI_32_DI, addi_32_di),
op_entry!(MASK_OUT_Y, OP_ADDI_32_IX, addi_32_ix),
op_entry!(MASK_EXACT, OP_ADDI_32_AW, addi_32_aw),
op_entry!(MASK_EXACT, OP_ADDI_32_AL, addi_32_al),
op_entry!(MASK_OUT_X_Y, OP_ADDQ_8_DN, addq_8_dn),
op_entry!(MASK_OUT_X_Y, OP_ADDQ_8_AI, addq_8_ai),
op_entry!(MASK_OUT_X_Y, OP_ADDQ_8_PI, addq_8_pi),
op_entry!(MASK_OUT_X_Y, OP_ADDQ_8_PD, addq_8_pd),
op_entry!(MASK_OUT_X_Y, OP_ADDQ_8_DI, addq_8_di),
op_entry!(MASK_OUT_X_Y, OP_ADDQ_8_IX, addq_8_ix),
op_entry!(MASK_OUT_X, OP_ADDQ_8_AW, addq_8_aw),
op_entry!(MASK_OUT_X, OP_ADDQ_8_AL, addq_8_al),
op_entry!(MASK_OUT_X_Y, OP_ADDQ_16_DN, addq_16_dn),
op_entry!(MASK_OUT_X_Y, OP_ADDQ_16_AN, addq_16_an),
op_entry!(MASK_OUT_X_Y, OP_ADDQ_16_AI, addq_16_ai),
op_entry!(MASK_OUT_X_Y, OP_ADDQ_16_PI, addq_16_pi),
op_entry!(MASK_OUT_X_Y, OP_ADDQ_16_PD, addq_16_pd),
op_entry!(MASK_OUT_X_Y, OP_ADDQ_16_DI, addq_16_di),
op_entry!(MASK_OUT_X_Y, OP_ADDQ_16_IX, addq_16_ix),
op_entry!(MASK_OUT_X, OP_ADDQ_16_AW, addq_16_aw),
op_entry!(MASK_OUT_X, OP_ADDQ_16_AL, addq_16_al),
op_entry!(MASK_OUT_X_Y, OP_ADDQ_32_DN, addq_32_dn),
op_entry!(MASK_OUT_X_Y, OP_ADDQ_32_AN, addq_32_an),
op_entry!(MASK_OUT_X_Y, OP_ADDQ_32_AI, addq_32_ai),
op_entry!(MASK_OUT_X_Y, OP_ADDQ_32_PI, addq_32_pi),
op_entry!(MASK_OUT_X_Y, OP_ADDQ_32_PD, addq_32_pd),
op_entry!(MASK_OUT_X_Y, OP_ADDQ_32_DI, addq_32_di),
op_entry!(MASK_OUT_X_Y, OP_ADDQ_32_IX, addq_32_ix),
op_entry!(MASK_OUT_X, OP_ADDQ_32_AW, addq_32_aw),
op_entry!(MASK_OUT_X, OP_ADDQ_32_AL, addq_32_al),
op_entry!(MASK_OUT_X_Y, OP_ADDX_8_RR, addx_8_rr),
op_entry!(MASK_OUT_X_Y, OP_ADDX_8_MM, addx_8_mm),
op_entry!(MASK_OUT_X_Y, OP_ADDX_16_RR, addx_16_rr),
op_entry!(MASK_OUT_X_Y, OP_ADDX_16_MM, addx_16_mm),
op_entry!(MASK_OUT_X_Y, OP_ADDX_32_RR, addx_32_rr),
op_entry!(MASK_OUT_X_Y, OP_ADDX_32_MM, addx_32_mm),
op_entry!(MASK_OUT_X_Y, OP_AND_8_ER_DN, and_8_er_dn),
op_entry!(MASK_OUT_X_Y, OP_AND_8_ER_AI, and_8_er_ai),
op_entry!(MASK_OUT_X_Y, OP_AND_8_ER_PI, and_8_er_pi),
op_entry!(MASK_OUT_X_Y, OP_AND_8_ER_PD, and_8_er_pd),
op_entry!(MASK_OUT_X_Y, OP_AND_8_ER_DI, and_8_er_di),
op_entry!(MASK_OUT_X_Y, OP_AND_8_ER_IX, and_8_er_ix),
op_entry!(MASK_OUT_X, OP_AND_8_ER_AW, and_8_er_aw),
op_entry!(MASK_OUT_X, OP_AND_8_ER_AL, and_8_er_al),
op_entry!(MASK_OUT_X, OP_AND_8_ER_PCDI, and_8_er_pcdi),
op_entry!(MASK_OUT_X, OP_AND_8_ER_PCIX, and_8_er_pcix),
op_entry!(MASK_OUT_X, OP_AND_8_ER_IMM, and_8_er_imm),
op_entry!(MASK_OUT_X_Y, OP_AND_8_RE_AI, and_8_re_ai),
op_entry!(MASK_OUT_X_Y, OP_AND_8_RE_PI, and_8_re_pi),
op_entry!(MASK_OUT_X_Y, OP_AND_8_RE_PD, and_8_re_pd),
op_entry!(MASK_OUT_X_Y, OP_AND_8_RE_DI, and_8_re_di),
op_entry!(MASK_OUT_X_Y, OP_AND_8_RE_IX, and_8_re_ix),
op_entry!(MASK_OUT_X, OP_AND_8_RE_AW, and_8_re_aw),
op_entry!(MASK_OUT_X, OP_AND_8_RE_AL, and_8_re_al),
op_entry!(MASK_OUT_X_Y, OP_AND_16_ER_DN, and_16_er_dn),
op_entry!(MASK_OUT_X_Y, OP_AND_16_ER_AI, and_16_er_ai),
op_entry!(MASK_OUT_X_Y, OP_AND_16_ER_PI, and_16_er_pi),
op_entry!(MASK_OUT_X_Y, OP_AND_16_ER_PD, and_16_er_pd),
op_entry!(MASK_OUT_X_Y, OP_AND_16_ER_DI, and_16_er_di),
op_entry!(MASK_OUT_X_Y, OP_AND_16_ER_IX, and_16_er_ix),
op_entry!(MASK_OUT_X, OP_AND_16_ER_AW, and_16_er_aw),
op_entry!(MASK_OUT_X, OP_AND_16_ER_AL, and_16_er_al),
op_entry!(MASK_OUT_X, OP_AND_16_ER_PCDI, and_16_er_pcdi),
op_entry!(MASK_OUT_X, OP_AND_16_ER_PCIX, and_16_er_pcix),
op_entry!(MASK_OUT_X, OP_AND_16_ER_IMM, and_16_er_imm),
op_entry!(MASK_OUT_X_Y, OP_AND_16_RE_AI, and_16_re_ai),
op_entry!(MASK_OUT_X_Y, OP_AND_16_RE_PI, and_16_re_pi),
op_entry!(MASK_OUT_X_Y, OP_AND_16_RE_PD, and_16_re_pd),
op_entry!(MASK_OUT_X_Y, OP_AND_16_RE_DI, and_16_re_di),
op_entry!(MASK_OUT_X_Y, OP_AND_16_RE_IX, and_16_re_ix),
op_entry!(MASK_OUT_X, OP_AND_16_RE_AW, and_16_re_aw),
op_entry!(MASK_OUT_X, OP_AND_16_RE_AL, and_16_re_al),
op_entry!(MASK_OUT_X_Y, OP_AND_32_ER_DN, and_32_er_dn),
op_entry!(MASK_OUT_X_Y, OP_AND_32_ER_AI, and_32_er_ai),
op_entry!(MASK_OUT_X_Y, OP_AND_32_ER_PI, and_32_er_pi),
op_entry!(MASK_OUT_X_Y, OP_AND_32_ER_PD, and_32_er_pd),
op_entry!(MASK_OUT_X_Y, OP_AND_32_ER_DI, and_32_er_di),
op_entry!(MASK_OUT_X_Y, OP_AND_32_ER_IX, and_32_er_ix),
op_entry!(MASK_OUT_X, OP_AND_32_ER_AW, and_32_er_aw),
op_entry!(MASK_OUT_X, OP_AND_32_ER_AL, and_32_er_al),
op_entry!(MASK_OUT_X, OP_AND_32_ER_PCDI, and_32_er_pcdi),
op_entry!(MASK_OUT_X, OP_AND_32_ER_PCIX, and_32_er_pcix),
op_entry!(MASK_OUT_X, OP_AND_32_ER_IMM, and_32_er_imm),
op_entry!(MASK_OUT_X_Y, OP_AND_32_RE_AI, and_32_re_ai),
op_entry!(MASK_OUT_X_Y, OP_AND_32_RE_PI, and_32_re_pi),
op_entry!(MASK_OUT_X_Y, OP_AND_32_RE_PD, and_32_re_pd),
op_entry!(MASK_OUT_X_Y, OP_AND_32_RE_DI, and_32_re_di),
op_entry!(MASK_OUT_X_Y, OP_AND_32_RE_IX, and_32_re_ix),
op_entry!(MASK_OUT_X, OP_AND_32_RE_AW, and_32_re_aw),
op_entry!(MASK_OUT_X, OP_AND_32_RE_AL, and_32_re_al),
op_entry!(MASK_OUT_Y, OP_ANDI_8_DN, andi_8_dn),
op_entry!(MASK_OUT_Y, OP_ANDI_8_AI, andi_8_ai),
op_entry!(MASK_OUT_Y, OP_ANDI_8_PI, andi_8_pi),
op_entry!(MASK_OUT_Y, OP_ANDI_8_PD, andi_8_pd),
op_entry!(MASK_OUT_Y, OP_ANDI_8_DI, andi_8_di),
op_entry!(MASK_OUT_Y, OP_ANDI_8_IX, andi_8_ix),
op_entry!(MASK_EXACT, OP_ANDI_8_AW, andi_8_aw),
op_entry!(MASK_EXACT, OP_ANDI_8_AL, andi_8_al),
op_entry!(MASK_OUT_Y, OP_ANDI_16_DN, andi_16_dn),
op_entry!(MASK_OUT_Y, OP_ANDI_16_AI, andi_16_ai),
op_entry!(MASK_OUT_Y, OP_ANDI_16_PI, andi_16_pi),
op_entry!(MASK_OUT_Y, OP_ANDI_16_PD, andi_16_pd),
op_entry!(MASK_OUT_Y, OP_ANDI_16_DI, andi_16_di),
op_entry!(MASK_OUT_Y, OP_ANDI_16_IX, andi_16_ix),
op_entry!(MASK_EXACT, OP_ANDI_16_AW, andi_16_aw),
op_entry!(MASK_EXACT, OP_ANDI_16_AL, andi_16_al),
op_entry!(MASK_OUT_Y, OP_ANDI_32_DN, andi_32_dn),
op_entry!(MASK_OUT_Y, OP_ANDI_32_AI, andi_32_ai),
op_entry!(MASK_OUT_Y, OP_ANDI_32_PI, andi_32_pi),
op_entry!(MASK_OUT_Y, OP_ANDI_32_PD, andi_32_pd),
op_entry!(MASK_OUT_Y, OP_ANDI_32_DI, andi_32_di),
op_entry!(MASK_OUT_Y, OP_ANDI_32_IX, andi_32_ix),
op_entry!(MASK_EXACT, OP_ANDI_32_AW, andi_32_aw),
op_entry!(MASK_EXACT, OP_ANDI_32_AL, andi_32_al),
op_entry!(MASK_EXACT, OP_ANDI_8_TOC, andi_8_toc),
op_entry!(MASK_EXACT, OP_ANDI_16_TOS, andi_16_tos),
op_entry!(MASK_OUT_X_Y, OP_ASL_8_R , asl_8_r),
op_entry!(MASK_OUT_X_Y, OP_ASL_8_S , asl_8_s),
op_entry!(MASK_OUT_X_Y, OP_ASL_16_R , asl_16_r),
op_entry!(MASK_OUT_X_Y, OP_ASL_16_S , asl_16_s),
op_entry!(MASK_OUT_X_Y, OP_ASL_32_R , asl_32_r),
op_entry!(MASK_OUT_X_Y, OP_ASL_32_S , asl_32_s),
op_entry!(MASK_OUT_X_Y, OP_ASR_8_R , asr_8_r),
op_entry!(MASK_OUT_X_Y, OP_ASR_8_S , asr_8_s),
op_entry!(MASK_OUT_X_Y, OP_ASR_16_R , asr_16_r),
op_entry!(MASK_OUT_X_Y, OP_ASR_16_S , asr_16_s),
op_entry!(MASK_OUT_X_Y, OP_ASR_32_R , asr_32_r),
op_entry!(MASK_OUT_X_Y, OP_ASR_32_S , asr_32_s),
op_entry!(MASK_OUT_Y, OP_ASL_16_AI, asl_16_ai),
op_entry!(MASK_OUT_Y, OP_ASL_16_PI, asl_16_pi),
op_entry!(MASK_OUT_Y, OP_ASL_16_PD, asl_16_pd),
op_entry!(MASK_OUT_Y, OP_ASL_16_DI, asl_16_di),
op_entry!(MASK_OUT_Y, OP_ASL_16_IX, asl_16_ix),
op_entry!(MASK_EXACT, OP_ASL_16_AW, asl_16_aw),
op_entry!(MASK_EXACT, OP_ASL_16_AL, asl_16_al),
op_entry!(MASK_OUT_Y, OP_ASR_16_AI, asr_16_ai),
op_entry!(MASK_OUT_Y, OP_ASR_16_PI, asr_16_pi),
op_entry!(MASK_OUT_Y, OP_ASR_16_PD, asr_16_pd),
op_entry!(MASK_OUT_Y, OP_ASR_16_DI, asr_16_di),
op_entry!(MASK_OUT_Y, OP_ASR_16_IX, asr_16_ix),
op_entry!(MASK_EXACT, OP_ASR_16_AW, asr_16_aw),
op_entry!(MASK_EXACT, OP_ASR_16_AL, asr_16_al),
op_entry!(MASK_LOBYTE, OP_BHI_8, bhi_8),
op_entry!(MASK_LOBYTE, OP_BLS_8, bls_8),
op_entry!(MASK_LOBYTE, OP_BCC_8, bcc_8),
op_entry!(MASK_LOBYTE, OP_BCS_8, bcs_8),
op_entry!(MASK_LOBYTE, OP_BNE_8, bne_8),
op_entry!(MASK_LOBYTE, OP_BEQ_8, beq_8),
op_entry!(MASK_LOBYTE, OP_BVC_8, bvc_8),
op_entry!(MASK_LOBYTE, OP_BVS_8, bvs_8),
op_entry!(MASK_LOBYTE, OP_BPL_8, bpl_8),
op_entry!(MASK_LOBYTE, OP_BMI_8, bmi_8),
op_entry!(MASK_LOBYTE, OP_BGE_8, bge_8),
op_entry!(MASK_LOBYTE, OP_BLT_8, blt_8),
op_entry!(MASK_LOBYTE, OP_BGT_8, bgt_8),
op_entry!(MASK_LOBYTE, OP_BLE_8, ble_8),
op_entry!(MASK_LOBYTE, OP_BRA_8, bra_8),
op_entry!(MASK_LOBYTE, OP_BSR_8, bsr_8),
op_entry!(MASK_EXACT, OP_BHI_16, bhi_16),
op_entry!(MASK_EXACT, OP_BLS_16, bls_16),
op_entry!(MASK_EXACT, OP_BCC_16, bcc_16),
op_entry!(MASK_EXACT, OP_BCS_16, bcs_16),
op_entry!(MASK_EXACT, OP_BNE_16, bne_16),
op_entry!(MASK_EXACT, OP_BEQ_16, beq_16),
op_entry!(MASK_EXACT, OP_BVC_16, bvc_16),
op_entry!(MASK_EXACT, OP_BVS_16, bvs_16),
op_entry!(MASK_EXACT, OP_BPL_16, bpl_16),
op_entry!(MASK_EXACT, OP_BMI_16, bmi_16),
op_entry!(MASK_EXACT, OP_BGE_16, bge_16),
op_entry!(MASK_EXACT, OP_BLT_16, blt_16),
op_entry!(MASK_EXACT, OP_BGT_16, bgt_16),
op_entry!(MASK_EXACT, OP_BLE_16, ble_16),
op_entry!(MASK_EXACT, OP_BRA_16, bra_16),
op_entry!(MASK_EXACT, OP_BSR_16, bsr_16),
// for M68000, Bcc does not support 32-bit displacements
op_entry!(MASK_EXACT, OP_BHI_32, illegal),
op_entry!(MASK_EXACT, OP_BLS_32, illegal),
op_entry!(MASK_EXACT, OP_BCC_32, illegal),
op_entry!(MASK_EXACT, OP_BCS_32, illegal),
op_entry!(MASK_EXACT, OP_BNE_32, illegal),
op_entry!(MASK_EXACT, OP_BEQ_32, illegal),
op_entry!(MASK_EXACT, OP_BVC_32, illegal),
op_entry!(MASK_EXACT, OP_BVS_32, illegal),
op_entry!(MASK_EXACT, OP_BPL_32, illegal),
op_entry!(MASK_EXACT, OP_BMI_32, illegal),
op_entry!(MASK_EXACT, OP_BGE_32, illegal),
op_entry!(MASK_EXACT, OP_BLT_32, illegal),
op_entry!(MASK_EXACT, OP_BGT_32, illegal),
op_entry!(MASK_EXACT, OP_BLE_32, illegal),
op_entry!(MASK_EXACT, OP_BRA_32, illegal),
op_entry!(MASK_EXACT, OP_BSR_32, illegal),
op_entry!(MASK_OUT_X_Y, OP_BCHG_32_R_DN,bchg_32_r_dn),
op_entry!(MASK_OUT_Y, OP_BCHG_32_S_DN,bchg_32_s_dn),
op_entry!(MASK_OUT_X_Y, OP_BCHG_8_R_AI, bchg_8_r_ai),
op_entry!(MASK_OUT_X_Y, OP_BCHG_8_R_PI, bchg_8_r_pi),
op_entry!(MASK_OUT_X_Y, OP_BCHG_8_R_PD, bchg_8_r_pd),
op_entry!(MASK_OUT_X_Y, OP_BCHG_8_R_DI, bchg_8_r_di),
op_entry!(MASK_OUT_X_Y, OP_BCHG_8_R_IX, bchg_8_r_ix),
op_entry!(MASK_OUT_X, OP_BCHG_8_R_AW, bchg_8_r_aw),
op_entry!(MASK_OUT_X, OP_BCHG_8_R_AL, bchg_8_r_al),
op_entry!(MASK_OUT_Y, OP_BCHG_8_S_AI, bchg_8_s_ai),
op_entry!(MASK_OUT_Y, OP_BCHG_8_S_PI, bchg_8_s_pi),
op_entry!(MASK_OUT_Y, OP_BCHG_8_S_PD, bchg_8_s_pd),
op_entry!(MASK_OUT_Y, OP_BCHG_8_S_DI, bchg_8_s_di),
op_entry!(MASK_OUT_Y, OP_BCHG_8_S_IX, bchg_8_s_ix),
op_entry!(MASK_EXACT, OP_BCHG_8_S_AW, bchg_8_s_aw),
op_entry!(MASK_EXACT, OP_BCHG_8_S_AL, bchg_8_s_al),
op_entry!(MASK_OUT_X_Y, OP_BCLR_32_R_DN,bclr_32_r_dn),
op_entry!(MASK_OUT_Y, OP_BCLR_32_S_DN,bclr_32_s_dn),
op_entry!(MASK_OUT_X_Y, OP_BCLR_8_R_AI, bclr_8_r_ai),
op_entry!(MASK_OUT_X_Y, OP_BCLR_8_R_PI, bclr_8_r_pi),
op_entry!(MASK_OUT_X_Y, OP_BCLR_8_R_PD, bclr_8_r_pd),
op_entry!(MASK_OUT_X_Y, OP_BCLR_8_R_DI, bclr_8_r_di),
op_entry!(MASK_OUT_X_Y, OP_BCLR_8_R_IX, bclr_8_r_ix),
op_entry!(MASK_OUT_X, OP_BCLR_8_R_AW, bclr_8_r_aw),
op_entry!(MASK_OUT_X, OP_BCLR_8_R_AL, bclr_8_r_al),
op_entry!(MASK_OUT_Y, OP_BCLR_8_S_AI, bclr_8_s_ai),
op_entry!(MASK_OUT_Y, OP_BCLR_8_S_PI, bclr_8_s_pi),
op_entry!(MASK_OUT_Y, OP_BCLR_8_S_PD, bclr_8_s_pd),
op_entry!(MASK_OUT_Y, OP_BCLR_8_S_DI, bclr_8_s_di),
op_entry!(MASK_OUT_Y, OP_BCLR_8_S_IX, bclr_8_s_ix),
op_entry!(MASK_EXACT, OP_BCLR_8_S_AW, bclr_8_s_aw),
op_entry!(MASK_EXACT, OP_BCLR_8_S_AL, bclr_8_s_al),
op_entry!(MASK_OUT_X_Y, OP_BSET_32_R_DN,bset_32_r_dn),
op_entry!(MASK_OUT_Y, OP_BSET_32_S_DN,bset_32_s_dn),
op_entry!(MASK_OUT_X_Y, OP_BSET_8_R_AI, bset_8_r_ai),
op_entry!(MASK_OUT_X_Y, OP_BSET_8_R_PI, bset_8_r_pi),
op_entry!(MASK_OUT_X_Y, OP_BSET_8_R_PD, bset_8_r_pd),
op_entry!(MASK_OUT_X_Y, OP_BSET_8_R_DI, bset_8_r_di),
op_entry!(MASK_OUT_X_Y, OP_BSET_8_R_IX, bset_8_r_ix),
op_entry!(MASK_OUT_X, OP_BSET_8_R_AW, bset_8_r_aw),
op_entry!(MASK_OUT_X, OP_BSET_8_R_AL, bset_8_r_al),
op_entry!(MASK_OUT_Y, OP_BSET_8_S_AI, bset_8_s_ai),
op_entry!(MASK_OUT_Y, OP_BSET_8_S_PI, bset_8_s_pi),
op_entry!(MASK_OUT_Y, OP_BSET_8_S_PD, bset_8_s_pd),
op_entry!(MASK_OUT_Y, OP_BSET_8_S_DI, bset_8_s_di),
op_entry!(MASK_OUT_Y, OP_BSET_8_S_IX, bset_8_s_ix),
op_entry!(MASK_EXACT, OP_BSET_8_S_AW, bset_8_s_aw),
op_entry!(MASK_EXACT, OP_BSET_8_S_AL, bset_8_s_al),
op_entry!(MASK_OUT_X_Y, OP_BTST_32_R_DN,btst_32_r_dn),
op_entry!(MASK_OUT_Y, OP_BTST_32_S_DN,btst_32_s_dn),
op_entry!(MASK_OUT_X_Y, OP_BTST_8_R_AI, btst_8_r_ai),
op_entry!(MASK_OUT_X_Y, OP_BTST_8_R_PI, btst_8_r_pi),
op_entry!(MASK_OUT_X_Y, OP_BTST_8_R_PD, btst_8_r_pd),
op_entry!(MASK_OUT_X_Y, OP_BTST_8_R_DI, btst_8_r_di),
op_entry!(MASK_OUT_X_Y, OP_BTST_8_R_IX, btst_8_r_ix),
op_entry!(MASK_OUT_X, OP_BTST_8_R_AW, btst_8_r_aw),
op_entry!(MASK_OUT_X, OP_BTST_8_R_AL, btst_8_r_al),
op_entry!(MASK_OUT_X, OP_BTST_8_R_PCDI, btst_8_r_pcdi),
op_entry!(MASK_OUT_X, OP_BTST_8_R_PCIX, btst_8_r_pcix),
op_entry!(MASK_OUT_X, OP_BTST_8_R_IMM, btst_8_r_imm),
op_entry!(MASK_OUT_Y, OP_BTST_8_S_AI, btst_8_s_ai),
op_entry!(MASK_OUT_Y, OP_BTST_8_S_PI, btst_8_s_pi),
op_entry!(MASK_OUT_Y, OP_BTST_8_S_PD, btst_8_s_pd),
op_entry!(MASK_OUT_Y, OP_BTST_8_S_DI, btst_8_s_di),
op_entry!(MASK_OUT_Y, OP_BTST_8_S_IX, btst_8_s_ix),
op_entry!(MASK_EXACT, OP_BTST_8_S_AW, btst_8_s_aw),
op_entry!(MASK_EXACT, OP_BTST_8_S_AL, btst_8_s_al),
op_entry!(MASK_EXACT, OP_BTST_8_S_PCDI, btst_8_s_pcdi),
op_entry!(MASK_EXACT, OP_BTST_8_S_PCIX, btst_8_s_pcix),
op_entry!(MASK_EXACT, OP_BTST_8_S_IMM, btst_8_s_imm),
op_entry!(MASK_OUT_X_Y, OP_CHK_16_AI, chk_16_ai),
op_entry!(MASK_OUT_X, OP_CHK_16_AL, chk_16_al),
op_entry!(MASK_OUT_X, OP_CHK_16_AW, chk_16_aw),
op_entry!(MASK_OUT_X_Y, OP_CHK_16_DN, chk_16_dn),
op_entry!(MASK_OUT_X_Y, OP_CHK_16_DI, chk_16_di),
op_entry!(MASK_OUT_X, OP_CHK_16_IMM, chk_16_imm),
op_entry!(MASK_OUT_X_Y, OP_CHK_16_IX, chk_16_ix),
op_entry!(MASK_OUT_X, OP_CHK_16_PCDI, chk_16_pcdi),
op_entry!(MASK_OUT_X, OP_CHK_16_PCIX, chk_16_pcix),
op_entry!(MASK_OUT_X_Y, OP_CHK_16_PD, chk_16_pd),
op_entry!(MASK_OUT_X_Y, OP_CHK_16_PI, chk_16_pi),
op_entry!(MASK_OUT_Y, OP_CLR_8_DN, clr_8_dn),
op_entry!(MASK_OUT_Y, OP_CLR_8_AI, clr_8_ai),
op_entry!(MASK_OUT_Y, OP_CLR_8_PI, clr_8_pi),
op_entry!(MASK_OUT_Y, OP_CLR_8_PD, clr_8_pd),
op_entry!(MASK_OUT_Y, OP_CLR_8_DI, clr_8_di),
op_entry!(MASK_OUT_Y, OP_CLR_8_IX, clr_8_ix),
op_entry!(MASK_EXACT, OP_CLR_8_AW, clr_8_aw),
op_entry!(MASK_EXACT, OP_CLR_8_AL, clr_8_al),
op_entry!(MASK_OUT_Y, OP_CLR_16_DN, clr_16_dn),
op_entry!(MASK_OUT_Y, OP_CLR_16_AI, clr_16_ai),
op_entry!(MASK_OUT_Y, OP_CLR_16_PI, clr_16_pi),
op_entry!(MASK_OUT_Y, OP_CLR_16_PD, clr_16_pd),
op_entry!(MASK_OUT_Y, OP_CLR_16_DI, clr_16_di),
op_entry!(MASK_OUT_Y, OP_CLR_16_IX, clr_16_ix),
op_entry!(MASK_EXACT, OP_CLR_16_AW, clr_16_aw),
op_entry!(MASK_EXACT, OP_CLR_16_AL, clr_16_al),
op_entry!(MASK_OUT_Y, OP_CLR_32_DN, clr_32_dn),
op_entry!(MASK_OUT_Y, OP_CLR_32_AI, clr_32_ai),
op_entry!(MASK_OUT_Y, OP_CLR_32_PI, clr_32_pi),
op_entry!(MASK_OUT_Y, OP_CLR_32_PD, clr_32_pd),
op_entry!(MASK_OUT_Y, OP_CLR_32_DI, clr_32_di),
op_entry!(MASK_OUT_Y, OP_CLR_32_IX, clr_32_ix),
op_entry!(MASK_EXACT, OP_CLR_32_AW, clr_32_aw),
op_entry!(MASK_EXACT, OP_CLR_32_AL, clr_32_al),
op_entry!(MASK_OUT_X_Y, OP_CMP_8_DN, cmp_8_dn),
op_entry!(MASK_OUT_X_Y, OP_CMP_8_AI, cmp_8_ai),
op_entry!(MASK_OUT_X_Y, OP_CMP_8_PI, cmp_8_pi),
op_entry!(MASK_OUT_X_Y, OP_CMP_8_PD, cmp_8_pd),
op_entry!(MASK_OUT_X_Y, OP_CMP_8_DI, cmp_8_di),
op_entry!(MASK_OUT_X_Y, OP_CMP_8_IX, cmp_8_ix),
op_entry!(MASK_OUT_X, OP_CMP_8_AW, cmp_8_aw),
op_entry!(MASK_OUT_X, OP_CMP_8_AL, cmp_8_al),
op_entry!(MASK_OUT_X, OP_CMP_8_PCDI, cmp_8_pcdi),
op_entry!(MASK_OUT_X, OP_CMP_8_PCIX, cmp_8_pcix),
op_entry!(MASK_OUT_X, OP_CMP_8_IMM, cmp_8_imm),
op_entry!(MASK_OUT_X_Y, OP_CMP_16_DN, cmp_16_dn),
op_entry!(MASK_OUT_X_Y, OP_CMP_16_AN, cmp_16_an),
op_entry!(MASK_OUT_X_Y, OP_CMP_16_AI, cmp_16_ai),
op_entry!(MASK_OUT_X_Y, OP_CMP_16_PI, cmp_16_pi),
op_entry!(MASK_OUT_X_Y, OP_CMP_16_PD, cmp_16_pd),
op_entry!(MASK_OUT_X_Y, OP_CMP_16_DI, cmp_16_di),
op_entry!(MASK_OUT_X_Y, OP_CMP_16_IX, cmp_16_ix),
op_entry!(MASK_OUT_X, OP_CMP_16_AW, cmp_16_aw),
op_entry!(MASK_OUT_X, OP_CMP_16_AL, cmp_16_al),
op_entry!(MASK_OUT_X, OP_CMP_16_PCDI, cmp_16_pcdi),
op_entry!(MASK_OUT_X, OP_CMP_16_PCIX, cmp_16_pcix),
op_entry!(MASK_OUT_X, OP_CMP_16_IMM, cmp_16_imm),
op_entry!(MASK_OUT_X_Y, OP_CMP_32_DN, cmp_32_dn),
op_entry!(MASK_OUT_X_Y, OP_CMP_32_AN, cmp_32_an),
op_entry!(MASK_OUT_X_Y, OP_CMP_32_AI, cmp_32_ai),
op_entry!(MASK_OUT_X_Y, OP_CMP_32_PI, cmp_32_pi),
op_entry!(MASK_OUT_X_Y, OP_CMP_32_PD, cmp_32_pd),
op_entry!(MASK_OUT_X_Y, OP_CMP_32_DI, cmp_32_di),
op_entry!(MASK_OUT_X_Y, OP_CMP_32_IX, cmp_32_ix),
op_entry!(MASK_OUT_X, OP_CMP_32_AW, cmp_32_aw),
op_entry!(MASK_OUT_X, OP_CMP_32_AL, cmp_32_al),
op_entry!(MASK_OUT_X, OP_CMP_32_PCDI, cmp_32_pcdi),
op_entry!(MASK_OUT_X, OP_CMP_32_PCIX, cmp_32_pcix),
op_entry!(MASK_OUT_X, OP_CMP_32_IMM, cmp_32_imm),
op_entry!(MASK_OUT_X_Y, OP_CMPA_16_DN, cmpa_16_dn),
op_entry!(MASK_OUT_X_Y, OP_CMPA_16_AN, cmpa_16_an),
op_entry!(MASK_OUT_X_Y, OP_CMPA_16_AI, cmpa_16_ai),
op_entry!(MASK_OUT_X_Y, OP_CMPA_16_PI, cmpa_16_pi),
op_entry!(MASK_OUT_X_Y, OP_CMPA_16_PD, cmpa_16_pd),
op_entry!(MASK_OUT_X_Y, OP_CMPA_16_DI, cmpa_16_di),
op_entry!(MASK_OUT_X_Y, OP_CMPA_16_IX, cmpa_16_ix),
op_entry!(MASK_OUT_X, OP_CMPA_16_AW, cmpa_16_aw),
op_entry!(MASK_OUT_X, OP_CMPA_16_AL, cmpa_16_al),
op_entry!(MASK_OUT_X, OP_CMPA_16_PCDI, cmpa_16_pcdi),
op_entry!(MASK_OUT_X, OP_CMPA_16_PCIX, cmpa_16_pcix),
op_entry!(MASK_OUT_X, OP_CMPA_16_IMM, cmpa_16_imm),
op_entry!(MASK_OUT_X_Y, OP_CMPA_32_DN, cmpa_32_dn),
op_entry!(MASK_OUT_X_Y, OP_CMPA_32_AN, cmpa_32_an),
op_entry!(MASK_OUT_X_Y, OP_CMPA_32_AI, cmpa_32_ai),
op_entry!(MASK_OUT_X_Y, OP_CMPA_32_PI, cmpa_32_pi),
op_entry!(MASK_OUT_X_Y, OP_CMPA_32_PD, cmpa_32_pd),
op_entry!(MASK_OUT_X_Y, OP_CMPA_32_DI, cmpa_32_di),
op_entry!(MASK_OUT_X_Y, OP_CMPA_32_IX, cmpa_32_ix),
op_entry!(MASK_OUT_X, OP_CMPA_32_AW, cmpa_32_aw),
op_entry!(MASK_OUT_X, OP_CMPA_32_AL, cmpa_32_al),
op_entry!(MASK_OUT_X, OP_CMPA_32_PCDI, cmpa_32_pcdi),
op_entry!(MASK_OUT_X, OP_CMPA_32_PCIX, cmpa_32_pcix),
op_entry!(MASK_OUT_X, OP_CMPA_32_IMM, cmpa_32_imm),
op_entry!(MASK_OUT_Y, OP_CMPI_8_DN, cmpi_8_dn),
op_entry!(MASK_OUT_Y, OP_CMPI_8_AI, cmpi_8_ai),
op_entry!(MASK_OUT_Y, OP_CMPI_8_PI, cmpi_8_pi),
op_entry!(MASK_OUT_Y, OP_CMPI_8_PD, cmpi_8_pd),
op_entry!(MASK_OUT_Y, OP_CMPI_8_DI, cmpi_8_di),
op_entry!(MASK_OUT_Y, OP_CMPI_8_IX, cmpi_8_ix),
op_entry!(MASK_EXACT, OP_CMPI_8_AW, cmpi_8_aw),
op_entry!(MASK_EXACT, OP_CMPI_8_AL, cmpi_8_al),
op_entry!(MASK_OUT_Y, OP_CMPI_16_DN, cmpi_16_dn),
op_entry!(MASK_OUT_Y, OP_CMPI_16_AI, cmpi_16_ai),
op_entry!(MASK_OUT_Y, OP_CMPI_16_PI, cmpi_16_pi),
op_entry!(MASK_OUT_Y, OP_CMPI_16_PD, cmpi_16_pd),
op_entry!(MASK_OUT_Y, OP_CMPI_16_DI, cmpi_16_di),
op_entry!(MASK_OUT_Y, OP_CMPI_16_IX, cmpi_16_ix),
op_entry!(MASK_EXACT, OP_CMPI_16_AW, cmpi_16_aw),
op_entry!(MASK_EXACT, OP_CMPI_16_AL, cmpi_16_al),
op_entry!(MASK_OUT_Y, OP_CMPI_32_DN, cmpi_32_dn),
op_entry!(MASK_OUT_Y, OP_CMPI_32_AI, cmpi_32_ai),
op_entry!(MASK_OUT_Y, OP_CMPI_32_PI, cmpi_32_pi),
op_entry!(MASK_OUT_Y, OP_CMPI_32_PD, cmpi_32_pd),
op_entry!(MASK_OUT_Y, OP_CMPI_32_DI, cmpi_32_di),
op_entry!(MASK_OUT_Y, OP_CMPI_32_IX, cmpi_32_ix),
op_entry!(MASK_EXACT, OP_CMPI_32_AW, cmpi_32_aw),
op_entry!(MASK_EXACT, OP_CMPI_32_AL, cmpi_32_al),
op_entry!(MASK_OUT_X_Y, OP_CMPM_8, cmpm_8),
op_entry!(MASK_OUT_X_Y, OP_CMPM_16, cmpm_16),
op_entry!(MASK_OUT_X_Y, OP_CMPM_32, cmpm_32),
// Put op-entries for DBcc here
op_entry!(MASK_OUT_Y, OP_DBT_16, dbt_16),
op_entry!(MASK_OUT_Y, OP_DBF_16, dbf_16),
op_entry!(MASK_OUT_Y, OP_DBHI_16, dbhi_16),
op_entry!(MASK_OUT_Y, OP_DBLS_16, dbls_16),
op_entry!(MASK_OUT_Y, OP_DBCC_16, dbcc_16),
op_entry!(MASK_OUT_Y, OP_DBCS_16, dbcs_16),
op_entry!(MASK_OUT_Y, OP_DBNE_16, dbne_16),
op_entry!(MASK_OUT_Y, OP_DBEQ_16, dbeq_16),
op_entry!(MASK_OUT_Y, OP_DBVC_16, dbvc_16),
op_entry!(MASK_OUT_Y, OP_DBVS_16, dbvs_16),
op_entry!(MASK_OUT_Y, OP_DBPL_16, dbpl_16),
op_entry!(MASK_OUT_Y, OP_DBMI_16, dbmi_16),
op_entry!(MASK_OUT_Y, OP_DBGE_16, dbge_16),
op_entry!(MASK_OUT_Y, OP_DBLT_16, dblt_16),
op_entry!(MASK_OUT_Y, OP_DBGT_16, dbgt_16),
op_entry!(MASK_OUT_Y, OP_DBLE_16, dble_16),
// Put op-entries for DIVS here
op_entry!(MASK_OUT_X_Y, OP_DIVS_16_AI, divs_16_ai),
op_entry!(MASK_OUT_X, OP_DIVS_16_AL, divs_16_al),
op_entry!(MASK_OUT_X, OP_DIVS_16_AW, divs_16_aw),
op_entry!(MASK_OUT_X_Y, OP_DIVS_16_DN, divs_16_dn),
op_entry!(MASK_OUT_X_Y, OP_DIVS_16_DI, divs_16_di),
op_entry!(MASK_OUT_X, OP_DIVS_16_IMM, divs_16_imm),
op_entry!(MASK_OUT_X_Y, OP_DIVS_16_IX, divs_16_ix),
op_entry!(MASK_OUT_X, OP_DIVS_16_PCDI, divs_16_pcdi),
op_entry!(MASK_OUT_X, OP_DIVS_16_PCIX, divs_16_pcix),
op_entry!(MASK_OUT_X_Y, OP_DIVS_16_PD, divs_16_pd),
op_entry!(MASK_OUT_X_Y, OP_DIVS_16_PI, divs_16_pi),
// Put op-entries for DIVU here
op_entry!(MASK_OUT_X_Y, OP_DIVU_16_AI, divu_16_ai),
op_entry!(MASK_OUT_X, OP_DIVU_16_AL, divu_16_al),
op_entry!(MASK_OUT_X, OP_DIVU_16_AW, divu_16_aw),
op_entry!(MASK_OUT_X_Y, OP_DIVU_16_DN, divu_16_dn),
op_entry!(MASK_OUT_X_Y, OP_DIVU_16_DI, divu_16_di),
op_entry!(MASK_OUT_X, OP_DIVU_16_IMM, divu_16_imm),
op_entry!(MASK_OUT_X_Y, OP_DIVU_16_IX, divu_16_ix),
op_entry!(MASK_OUT_X, OP_DIVU_16_PCDI, divu_16_pcdi),
op_entry!(MASK_OUT_X, OP_DIVU_16_PCIX, divu_16_pcix),
op_entry!(MASK_OUT_X_Y, OP_DIVU_16_PD, divu_16_pd),
op_entry!(MASK_OUT_X_Y, OP_DIVU_16_PI, divu_16_pi),
// Put op-entries for EOR, EORI, EORI to CCR and EORI to SR here
op_entry!(MASK_OUT_X_Y, OP_EOR_8_DN, eor_8_dn),
op_entry!(MASK_OUT_X_Y, OP_EOR_8_AI, eor_8_ai),
op_entry!(MASK_OUT_X_Y, OP_EOR_8_PI, eor_8_pi),
op_entry!(MASK_OUT_X_Y, OP_EOR_8_PD, eor_8_pd),
op_entry!(MASK_OUT_X_Y, OP_EOR_8_DI, eor_8_di),
op_entry!(MASK_OUT_X_Y, OP_EOR_8_IX, eor_8_ix),
op_entry!(MASK_OUT_X, OP_EOR_8_AW, eor_8_aw),
op_entry!(MASK_OUT_X, OP_EOR_8_AL, eor_8_al),
op_entry!(MASK_OUT_X_Y, OP_EOR_16_DN, eor_16_dn),
op_entry!(MASK_OUT_X_Y, OP_EOR_16_AI, eor_16_ai),
op_entry!(MASK_OUT_X_Y, OP_EOR_16_PI, eor_16_pi),
op_entry!(MASK_OUT_X_Y, OP_EOR_16_PD, eor_16_pd),
op_entry!(MASK_OUT_X_Y, OP_EOR_16_DI, eor_16_di),
op_entry!(MASK_OUT_X_Y, OP_EOR_16_IX, eor_16_ix),
op_entry!(MASK_OUT_X, OP_EOR_16_AW, eor_16_aw),
op_entry!(MASK_OUT_X, OP_EOR_16_AL, eor_16_al),
op_entry!(MASK_OUT_X_Y, OP_EOR_32_DN, eor_32_dn),
op_entry!(MASK_OUT_X_Y, OP_EOR_32_AI, eor_32_ai),
op_entry!(MASK_OUT_X_Y, OP_EOR_32_PI, eor_32_pi),
op_entry!(MASK_OUT_X_Y, OP_EOR_32_PD, eor_32_pd),
op_entry!(MASK_OUT_X_Y, OP_EOR_32_DI, eor_32_di),
op_entry!(MASK_OUT_X_Y, OP_EOR_32_IX, eor_32_ix),
op_entry!(MASK_OUT_X, OP_EOR_32_AW, eor_32_aw),
op_entry!(MASK_OUT_X, OP_EOR_32_AL, eor_32_al),
op_entry!(MASK_OUT_Y, OP_EORI_8_DN, eori_8_dn),
op_entry!(MASK_OUT_Y, OP_EORI_8_AI, eori_8_ai),
op_entry!(MASK_OUT_Y, OP_EORI_8_PI, eori_8_pi),
op_entry!(MASK_OUT_Y, OP_EORI_8_PD, eori_8_pd),
op_entry!(MASK_OUT_Y, OP_EORI_8_DI, eori_8_di),
op_entry!(MASK_OUT_Y, OP_EORI_8_IX, eori_8_ix),
op_entry!(MASK_EXACT, OP_EORI_8_AW, eori_8_aw),
op_entry!(MASK_EXACT, OP_EORI_8_AL, eori_8_al),
op_entry!(MASK_OUT_Y, OP_EORI_16_DN, eori_16_dn),
op_entry!(MASK_OUT_Y, OP_EORI_16_AI, eori_16_ai),
op_entry!(MASK_OUT_Y, OP_EORI_16_PI, eori_16_pi),
op_entry!(MASK_OUT_Y, OP_EORI_16_PD, eori_16_pd),
op_entry!(MASK_OUT_Y, OP_EORI_16_DI, eori_16_di),
op_entry!(MASK_OUT_Y, OP_EORI_16_IX, eori_16_ix),
op_entry!(MASK_EXACT, OP_EORI_16_AW, eori_16_aw),
op_entry!(MASK_EXACT, OP_EORI_16_AL, eori_16_al),
op_entry!(MASK_OUT_Y, OP_EORI_32_DN, eori_32_dn),
op_entry!(MASK_OUT_Y, OP_EORI_32_AI, eori_32_ai),
op_entry!(MASK_OUT_Y, OP_EORI_32_PI, eori_32_pi),
op_entry!(MASK_OUT_Y, OP_EORI_32_PD, eori_32_pd),
op_entry!(MASK_OUT_Y, OP_EORI_32_DI, eori_32_di),
op_entry!(MASK_OUT_Y, OP_EORI_32_IX, eori_32_ix),
op_entry!(MASK_EXACT, OP_EORI_32_AW, eori_32_aw),
op_entry!(MASK_EXACT, OP_EORI_32_AL, eori_32_al),
op_entry!(MASK_EXACT, OP_EORI_8_TOC, eori_8_toc),
op_entry!(MASK_EXACT, OP_EORI_16_TOS, eori_16_tos),
// Put op-entries for EXG here
op_entry!(MASK_OUT_X_Y, OP_EXG_32_DD, exg_32_dd),
op_entry!(MASK_OUT_X_Y, OP_EXG_32_AA, exg_32_aa),
op_entry!(MASK_OUT_X_Y, OP_EXG_32_DA, exg_32_da),
// Put op-entries for EXT here
op_entry!(MASK_OUT_Y, OP_EXT_BW, ext_bw),
op_entry!(MASK_OUT_Y, OP_EXT_WL, ext_wl),
// Put op-entries for ILLEGAL here
op_entry!(MASK_EXACT, OP_ILLEGAL, real_illegal),
// Put op-entries for JMP here
op_entry!(MASK_OUT_Y, OP_JMP_32_AI, jmp_32_ai),
op_entry!(MASK_EXACT, OP_JMP_32_AL, jmp_32_al),
op_entry!(MASK_EXACT, OP_JMP_32_AW, jmp_32_aw),
op_entry!(MASK_OUT_Y, OP_JMP_32_DI, jmp_32_di),
op_entry!(MASK_OUT_Y, OP_JMP_32_IX, jmp_32_ix),
op_entry!(MASK_EXACT, OP_JMP_32_PCDI, jmp_32_pcdi),
op_entry!(MASK_EXACT, OP_JMP_32_PCIX, jmp_32_pcix),
// Put op-entries for JSR here
op_entry!(MASK_OUT_Y, OP_JSR_32_AI, jsr_32_ai),
op_entry!(MASK_EXACT, OP_JSR_32_AL, jsr_32_al),
op_entry!(MASK_EXACT, OP_JSR_32_AW, jsr_32_aw),
op_entry!(MASK_OUT_Y, OP_JSR_32_DI, jsr_32_di),
op_entry!(MASK_OUT_Y, OP_JSR_32_IX, jsr_32_ix),
op_entry!(MASK_EXACT, OP_JSR_32_PCDI, jsr_32_pcdi),
op_entry!(MASK_EXACT, OP_JSR_32_PCIX, jsr_32_pcix),
// Put op-entries for LEA here
op_entry!(MASK_OUT_X_Y, OP_LEA_32_AI, lea_32_ai),
op_entry!(MASK_OUT_X, OP_LEA_32_AL, lea_32_al),
op_entry!(MASK_OUT_X, OP_LEA_32_AW, lea_32_aw),
op_entry!(MASK_OUT_X_Y, OP_LEA_32_DI, lea_32_di),
op_entry!(MASK_OUT_X_Y, OP_LEA_32_IX, lea_32_ix),
op_entry!(MASK_OUT_X, OP_LEA_32_PCDI, lea_32_pcdi),
op_entry!(MASK_OUT_X, OP_LEA_32_PCIX, lea_32_pcix),
// Put op-entries for LINK here
op_entry!(MASK_OUT_Y, OP_LINK_16, link_16),
// Put op-entries for LSL, LSR here
op_entry!(MASK_OUT_X_Y, OP_LSR_8_S, lsr_8_s),
op_entry!(MASK_OUT_X_Y, OP_LSR_16_S, lsr_16_s),
op_entry!(MASK_OUT_X_Y, OP_LSR_32_S, lsr_32_s),
op_entry!(MASK_OUT_X_Y, OP_LSR_8_R, lsr_8_r),
op_entry!(MASK_OUT_X_Y, OP_LSR_16_R, lsr_16_r),
op_entry!(MASK_OUT_X_Y, OP_LSR_32_R, lsr_32_r),
op_entry!(MASK_OUT_X_Y, OP_LSL_8_S, lsl_8_s),
op_entry!(MASK_OUT_X_Y, OP_LSL_16_S, lsl_16_s),
op_entry!(MASK_OUT_X_Y, OP_LSL_32_S, lsl_32_s),
op_entry!(MASK_OUT_X_Y, OP_LSL_8_R, lsl_8_r),
op_entry!(MASK_OUT_X_Y, OP_LSL_16_R, lsl_16_r),
op_entry!(MASK_OUT_X_Y, OP_LSL_32_R, lsl_32_r),
op_entry!(MASK_OUT_Y, OP_LSL_16_AI, lsl_16_ai),
op_entry!(MASK_OUT_Y, OP_LSL_16_PI, lsl_16_pi),
op_entry!(MASK_OUT_Y, OP_LSL_16_PD, lsl_16_pd),
op_entry!(MASK_OUT_Y, OP_LSL_16_DI, lsl_16_di),
op_entry!(MASK_OUT_Y, OP_LSL_16_IX, lsl_16_ix),
op_entry!(MASK_EXACT, OP_LSL_16_AW, lsl_16_aw),
op_entry!(MASK_EXACT, OP_LSL_16_AL, lsl_16_al),
op_entry!(MASK_OUT_Y, OP_LSR_16_AI, lsr_16_ai),
op_entry!(MASK_OUT_Y, OP_LSR_16_PI, lsr_16_pi),
op_entry!(MASK_OUT_Y, OP_LSR_16_PD, lsr_16_pd),
op_entry!(MASK_OUT_Y, OP_LSR_16_DI, lsr_16_di),
op_entry!(MASK_OUT_Y, OP_LSR_16_IX, lsr_16_ix),
op_entry!(MASK_EXACT, OP_LSR_16_AW, lsr_16_aw),
op_entry!(MASK_EXACT, OP_LSR_16_AL, lsr_16_al),
// Put op-entries for MOVE here
op_entry!(MASK_OUT_X_Y, OP_MOVE_8_DN_DN, move_8_dn_dn),
op_entry!(MASK_OUT_X_Y, OP_MOVE_8_AI_DN, move_8_ai_dn),
op_entry!(MASK_OUT_X_Y, OP_MOVE_8_PI_DN, move_8_pi_dn),
op_entry!(MASK_OUT_X_Y, OP_MOVE_8_PD_DN, move_8_pd_dn),
op_entry!(MASK_OUT_X_Y, OP_MOVE_8_DI_DN, move_8_di_dn),
op_entry!(MASK_OUT_X_Y, OP_MOVE_8_IX_DN, move_8_ix_dn),
op_entry!(MASK_OUT_Y, OP_MOVE_8_AW_DN, move_8_aw_dn),
op_entry!(MASK_OUT_Y, OP_MOVE_8_AL_DN, move_8_al_dn),
op_entry!(MASK_OUT_X_Y, OP_MOVE_8_DN_AI, move_8_dn_ai),
op_entry!(MASK_OUT_X_Y, OP_MOVE_8_AI_AI, move_8_ai_ai),
op_entry!(MASK_OUT_X_Y, OP_MOVE_8_PI_AI, move_8_pi_ai),
op_entry!(MASK_OUT_X_Y, OP_MOVE_8_PD_AI, move_8_pd_ai),
op_entry!(MASK_OUT_X_Y, OP_MOVE_8_DI_AI, move_8_di_ai),
op_entry!(MASK_OUT_X_Y, OP_MOVE_8_IX_AI, move_8_ix_ai),
op_entry!(MASK_OUT_Y, OP_MOVE_8_AW_AI, move_8_aw_ai),
op_entry!(MASK_OUT_Y, OP_MOVE_8_AL_AI, move_8_al_ai),
op_entry!(MASK_OUT_X_Y, OP_MOVE_8_DN_PI, move_8_dn_pi),
op_entry!(MASK_OUT_X_Y, OP_MOVE_8_AI_PI, move_8_ai_pi),
op_entry!(MASK_OUT_X_Y, OP_MOVE_8_PI_PI, move_8_pi_pi),
op_entry!(MASK_OUT_X_Y, OP_MOVE_8_PD_PI, move_8_pd_pi),
op_entry!(MASK_OUT_X_Y, OP_MOVE_8_DI_PI, move_8_di_pi),
op_entry!(MASK_OUT_X_Y, OP_MOVE_8_IX_PI, move_8_ix_pi),
op_entry!(MASK_OUT_Y, OP_MOVE_8_AW_PI, move_8_aw_pi),
op_entry!(MASK_OUT_Y, OP_MOVE_8_AL_PI, move_8_al_pi),
op_entry!(MASK_OUT_X_Y, OP_MOVE_8_DN_PD, move_8_dn_pd),
op_entry!(MASK_OUT_X_Y, OP_MOVE_8_AI_PD, move_8_ai_pd),
op_entry!(MASK_OUT_X_Y, OP_MOVE_8_PI_PD, move_8_pi_pd),
op_entry!(MASK_OUT_X_Y, OP_MOVE_8_PD_PD, move_8_pd_pd),
op_entry!(MASK_OUT_X_Y, OP_MOVE_8_DI_PD, move_8_di_pd),
op_entry!(MASK_OUT_X_Y, OP_MOVE_8_IX_PD, move_8_ix_pd),
op_entry!(MASK_OUT_Y, OP_MOVE_8_AW_PD, move_8_aw_pd),
op_entry!(MASK_OUT_Y, OP_MOVE_8_AL_PD, move_8_al_pd),
op_entry!(MASK_OUT_X_Y, OP_MOVE_8_DN_DI, move_8_dn_di),
op_entry!(MASK_OUT_X_Y, OP_MOVE_8_AI_DI, move_8_ai_di),
op_entry!(MASK_OUT_X_Y, OP_MOVE_8_PI_DI, move_8_pi_di),
op_entry!(MASK_OUT_X_Y, OP_MOVE_8_PD_DI, move_8_pd_di),
op_entry!(MASK_OUT_X_Y, OP_MOVE_8_DI_DI, move_8_di_di),
op_entry!(MASK_OUT_X_Y, OP_MOVE_8_IX_DI, move_8_ix_di),
op_entry!(MASK_OUT_Y, OP_MOVE_8_AW_DI, move_8_aw_di),
op_entry!(MASK_OUT_Y, OP_MOVE_8_AL_DI, move_8_al_di),
op_entry!(MASK_OUT_X_Y, OP_MOVE_8_DN_IX, move_8_dn_ix),
op_entry!(MASK_OUT_X_Y, OP_MOVE_8_AI_IX, move_8_ai_ix),
op_entry!(MASK_OUT_X_Y, OP_MOVE_8_PI_IX, move_8_pi_ix),
op_entry!(MASK_OUT_X_Y, OP_MOVE_8_PD_IX, move_8_pd_ix),
op_entry!(MASK_OUT_X_Y, OP_MOVE_8_DI_IX, move_8_di_ix),
op_entry!(MASK_OUT_X_Y, OP_MOVE_8_IX_IX, move_8_ix_ix),
op_entry!(MASK_OUT_Y, OP_MOVE_8_AW_IX, move_8_aw_ix),
op_entry!(MASK_OUT_Y, OP_MOVE_8_AL_IX, move_8_al_ix),
op_entry!(MASK_OUT_X, OP_MOVE_8_DN_AW, move_8_dn_aw),
op_entry!(MASK_OUT_X, OP_MOVE_8_AI_AW, move_8_ai_aw),
op_entry!(MASK_OUT_X, OP_MOVE_8_PI_AW, move_8_pi_aw),
op_entry!(MASK_OUT_X, OP_MOVE_8_PD_AW, move_8_pd_aw),
op_entry!(MASK_OUT_X, OP_MOVE_8_DI_AW, move_8_di_aw),
op_entry!(MASK_OUT_X, OP_MOVE_8_IX_AW, move_8_ix_aw),
op_entry!(MASK_EXACT, OP_MOVE_8_AW_AW, move_8_aw_aw),
op_entry!(MASK_EXACT, OP_MOVE_8_AL_AW, move_8_al_aw),
op_entry!(MASK_OUT_X, OP_MOVE_8_DN_AL, move_8_dn_al),
op_entry!(MASK_OUT_X, OP_MOVE_8_AI_AL, move_8_ai_al),
op_entry!(MASK_OUT_X, OP_MOVE_8_PI_AL, move_8_pi_al),
op_entry!(MASK_OUT_X, OP_MOVE_8_PD_AL, move_8_pd_al),
op_entry!(MASK_OUT_X, OP_MOVE_8_DI_AL, move_8_di_al),
op_entry!(MASK_OUT_X, OP_MOVE_8_IX_AL, move_8_ix_al),
op_entry!(MASK_EXACT, OP_MOVE_8_AW_AL, move_8_aw_al),
op_entry!(MASK_EXACT, OP_MOVE_8_AL_AL, move_8_al_al),
op_entry!(MASK_OUT_X, OP_MOVE_8_DN_PCDI, move_8_dn_pcdi),
op_entry!(MASK_OUT_X, OP_MOVE_8_AI_PCDI, move_8_ai_pcdi),
op_entry!(MASK_OUT_X, OP_MOVE_8_PI_PCDI, move_8_pi_pcdi),
op_entry!(MASK_OUT_X, OP_MOVE_8_PD_PCDI, move_8_pd_pcdi),
op_entry!(MASK_OUT_X, OP_MOVE_8_DI_PCDI, move_8_di_pcdi),
op_entry!(MASK_OUT_X, OP_MOVE_8_IX_PCDI, move_8_ix_pcdi),
op_entry!(MASK_EXACT, OP_MOVE_8_AW_PCDI, move_8_aw_pcdi),
op_entry!(MASK_EXACT, OP_MOVE_8_AL_PCDI, move_8_al_pcdi),
op_entry!(MASK_OUT_X, OP_MOVE_8_DN_PCIX, move_8_dn_pcix),
op_entry!(MASK_OUT_X, OP_MOVE_8_AI_PCIX, move_8_ai_pcix),
op_entry!(MASK_OUT_X, OP_MOVE_8_PI_PCIX, move_8_pi_pcix),
op_entry!(MASK_OUT_X, OP_MOVE_8_PD_PCIX, move_8_pd_pcix),
op_entry!(MASK_OUT_X, OP_MOVE_8_DI_PCIX, move_8_di_pcix),
op_entry!(MASK_OUT_X, OP_MOVE_8_IX_PCIX, move_8_ix_pcix),
op_entry!(MASK_EXACT, OP_MOVE_8_AW_PCIX, move_8_aw_pcix),
op_entry!(MASK_EXACT, OP_MOVE_8_AL_PCIX, move_8_al_pcix),
op_entry!(MASK_OUT_X, OP_MOVE_8_DN_IMM, move_8_dn_imm),
op_entry!(MASK_OUT_X, OP_MOVE_8_AI_IMM, move_8_ai_imm),
op_entry!(MASK_OUT_X, OP_MOVE_8_PI_IMM, move_8_pi_imm),
op_entry!(MASK_OUT_X, OP_MOVE_8_PD_IMM, move_8_pd_imm),
op_entry!(MASK_OUT_X, OP_MOVE_8_DI_IMM, move_8_di_imm),
op_entry!(MASK_OUT_X, OP_MOVE_8_IX_IMM, move_8_ix_imm),
op_entry!(MASK_EXACT, OP_MOVE_8_AW_IMM, move_8_aw_imm),
op_entry!(MASK_EXACT, OP_MOVE_8_AL_IMM, move_8_al_imm),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_DN_DN, move_16_dn_dn),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_AI_DN, move_16_ai_dn),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_PI_DN, move_16_pi_dn),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_PD_DN, move_16_pd_dn),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_DI_DN, move_16_di_dn),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_IX_DN, move_16_ix_dn),
op_entry!(MASK_OUT_Y, OP_MOVE_16_AW_DN, move_16_aw_dn),
op_entry!(MASK_OUT_Y, OP_MOVE_16_AL_DN, move_16_al_dn),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_DN_AN, move_16_dn_an),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_AI_AN, move_16_ai_an),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_PI_AN, move_16_pi_an),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_PD_AN, move_16_pd_an),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_DI_AN, move_16_di_an),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_IX_AN, move_16_ix_an),
op_entry!(MASK_OUT_Y, OP_MOVE_16_AW_AN, move_16_aw_an),
op_entry!(MASK_OUT_Y, OP_MOVE_16_AL_AN, move_16_al_an),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_DN_AI, move_16_dn_ai),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_AI_AI, move_16_ai_ai),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_PI_AI, move_16_pi_ai),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_PD_AI, move_16_pd_ai),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_DI_AI, move_16_di_ai),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_IX_AI, move_16_ix_ai),
op_entry!(MASK_OUT_Y, OP_MOVE_16_AW_AI, move_16_aw_ai),
op_entry!(MASK_OUT_Y, OP_MOVE_16_AL_AI, move_16_al_ai),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_DN_PI, move_16_dn_pi),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_AI_PI, move_16_ai_pi),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_PI_PI, move_16_pi_pi),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_PD_PI, move_16_pd_pi),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_DI_PI, move_16_di_pi),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_IX_PI, move_16_ix_pi),
op_entry!(MASK_OUT_Y, OP_MOVE_16_AW_PI, move_16_aw_pi),
op_entry!(MASK_OUT_Y, OP_MOVE_16_AL_PI, move_16_al_pi),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_DN_PD, move_16_dn_pd),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_AI_PD, move_16_ai_pd),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_PI_PD, move_16_pi_pd),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_PD_PD, move_16_pd_pd),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_DI_PD, move_16_di_pd),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_IX_PD, move_16_ix_pd),
op_entry!(MASK_OUT_Y, OP_MOVE_16_AW_PD, move_16_aw_pd),
op_entry!(MASK_OUT_Y, OP_MOVE_16_AL_PD, move_16_al_pd),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_DN_DI, move_16_dn_di),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_AI_DI, move_16_ai_di),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_PI_DI, move_16_pi_di),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_PD_DI, move_16_pd_di),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_DI_DI, move_16_di_di),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_IX_DI, move_16_ix_di),
op_entry!(MASK_OUT_Y, OP_MOVE_16_AW_DI, move_16_aw_di),
op_entry!(MASK_OUT_Y, OP_MOVE_16_AL_DI, move_16_al_di),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_DN_IX, move_16_dn_ix),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_AI_IX, move_16_ai_ix),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_PI_IX, move_16_pi_ix),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_PD_IX, move_16_pd_ix),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_DI_IX, move_16_di_ix),
op_entry!(MASK_OUT_X_Y, OP_MOVE_16_IX_IX, move_16_ix_ix),
op_entry!(MASK_OUT_Y, OP_MOVE_16_AW_IX, move_16_aw_ix),
op_entry!(MASK_OUT_Y, OP_MOVE_16_AL_IX, move_16_al_ix),
op_entry!(MASK_OUT_X, OP_MOVE_16_DN_AW, move_16_dn_aw),
op_entry!(MASK_OUT_X, OP_MOVE_16_AI_AW, move_16_ai_aw),
op_entry!(MASK_OUT_X, OP_MOVE_16_PI_AW, move_16_pi_aw),
op_entry!(MASK_OUT_X, OP_MOVE_16_PD_AW, move_16_pd_aw),
op_entry!(MASK_OUT_X, OP_MOVE_16_DI_AW, move_16_di_aw),
op_entry!(MASK_OUT_X, OP_MOVE_16_IX_AW, move_16_ix_aw),
op_entry!(MASK_EXACT, OP_MOVE_16_AW_AW, move_16_aw_aw),
op_entry!(MASK_EXACT, OP_MOVE_16_AL_AW, move_16_al_aw),
op_entry!(MASK_OUT_X, OP_MOVE_16_DN_AL, move_16_dn_al),
op_entry!(MASK_OUT_X, OP_MOVE_16_AI_AL, move_16_ai_al),
op_entry!(MASK_OUT_X, OP_MOVE_16_PI_AL, move_16_pi_al),
op_entry!(MASK_OUT_X, OP_MOVE_16_PD_AL, move_16_pd_al),
op_entry!(MASK_OUT_X, OP_MOVE_16_DI_AL, move_16_di_al),
op_entry!(MASK_OUT_X, OP_MOVE_16_IX_AL, move_16_ix_al),
op_entry!(MASK_EXACT, OP_MOVE_16_AW_AL, move_16_aw_al),
op_entry!(MASK_EXACT, OP_MOVE_16_AL_AL, move_16_al_al),
op_entry!(MASK_OUT_X, OP_MOVE_16_DN_PCDI, move_16_dn_pcdi),
op_entry!(MASK_OUT_X, OP_MOVE_16_AI_PCDI, move_16_ai_pcdi),
op_entry!(MASK_OUT_X, OP_MOVE_16_PI_PCDI, move_16_pi_pcdi),
op_entry!(MASK_OUT_X, OP_MOVE_16_PD_PCDI, move_16_pd_pcdi),
op_entry!(MASK_OUT_X, OP_MOVE_16_DI_PCDI, move_16_di_pcdi),
op_entry!(MASK_OUT_X, OP_MOVE_16_IX_PCDI, move_16_ix_pcdi),
op_entry!(MASK_EXACT, OP_MOVE_16_AW_PCDI, move_16_aw_pcdi),
op_entry!(MASK_EXACT, OP_MOVE_16_AL_PCDI, move_16_al_pcdi),
op_entry!(MASK_OUT_X, OP_MOVE_16_DN_PCIX, move_16_dn_pcix),
op_entry!(MASK_OUT_X, OP_MOVE_16_AI_PCIX, move_16_ai_pcix),
op_entry!(MASK_OUT_X, OP_MOVE_16_PI_PCIX, move_16_pi_pcix),
op_entry!(MASK_OUT_X, OP_MOVE_16_PD_PCIX, move_16_pd_pcix),
op_entry!(MASK_OUT_X, OP_MOVE_16_DI_PCIX, move_16_di_pcix),
op_entry!(MASK_OUT_X, OP_MOVE_16_IX_PCIX, move_16_ix_pcix),
op_entry!(MASK_EXACT, OP_MOVE_16_AW_PCIX, move_16_aw_pcix),
op_entry!(MASK_EXACT, OP_MOVE_16_AL_PCIX, move_16_al_pcix),
op_entry!(MASK_OUT_X, OP_MOVE_16_DN_IMM, move_16_dn_imm),
op_entry!(MASK_OUT_X, OP_MOVE_16_AI_IMM, move_16_ai_imm),
op_entry!(MASK_OUT_X, OP_MOVE_16_PI_IMM, move_16_pi_imm),
op_entry!(MASK_OUT_X, OP_MOVE_16_PD_IMM, move_16_pd_imm),
op_entry!(MASK_OUT_X, OP_MOVE_16_DI_IMM, move_16_di_imm),
op_entry!(MASK_OUT_X, OP_MOVE_16_IX_IMM, move_16_ix_imm),
op_entry!(MASK_EXACT, OP_MOVE_16_AW_IMM, move_16_aw_imm),
op_entry!(MASK_EXACT, OP_MOVE_16_AL_IMM, move_16_al_imm),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_DN_DN, move_32_dn_dn),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_AI_DN, move_32_ai_dn),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_PI_DN, move_32_pi_dn),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_PD_DN, move_32_pd_dn),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_DI_DN, move_32_di_dn),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_IX_DN, move_32_ix_dn),
op_entry!(MASK_OUT_Y, OP_MOVE_32_AW_DN, move_32_aw_dn),
op_entry!(MASK_OUT_Y, OP_MOVE_32_AL_DN, move_32_al_dn),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_DN_AN, move_32_dn_an),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_AI_AN, move_32_ai_an),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_PI_AN, move_32_pi_an),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_PD_AN, move_32_pd_an),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_DI_AN, move_32_di_an),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_IX_AN, move_32_ix_an),
op_entry!(MASK_OUT_Y, OP_MOVE_32_AW_AN, move_32_aw_an),
op_entry!(MASK_OUT_Y, OP_MOVE_32_AL_AN, move_32_al_an),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_DN_AI, move_32_dn_ai),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_AI_AI, move_32_ai_ai),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_PI_AI, move_32_pi_ai),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_PD_AI, move_32_pd_ai),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_DI_AI, move_32_di_ai),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_IX_AI, move_32_ix_ai),
op_entry!(MASK_OUT_Y, OP_MOVE_32_AW_AI, move_32_aw_ai),
op_entry!(MASK_OUT_Y, OP_MOVE_32_AL_AI, move_32_al_ai),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_DN_PI, move_32_dn_pi),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_AI_PI, move_32_ai_pi),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_PI_PI, move_32_pi_pi),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_PD_PI, move_32_pd_pi),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_DI_PI, move_32_di_pi),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_IX_PI, move_32_ix_pi),
op_entry!(MASK_OUT_Y, OP_MOVE_32_AW_PI, move_32_aw_pi),
op_entry!(MASK_OUT_Y, OP_MOVE_32_AL_PI, move_32_al_pi),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_DN_PD, move_32_dn_pd),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_AI_PD, move_32_ai_pd),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_PI_PD, move_32_pi_pd),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_PD_PD, move_32_pd_pd),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_DI_PD, move_32_di_pd),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_IX_PD, move_32_ix_pd),
op_entry!(MASK_OUT_Y, OP_MOVE_32_AW_PD, move_32_aw_pd),
op_entry!(MASK_OUT_Y, OP_MOVE_32_AL_PD, move_32_al_pd),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_DN_DI, move_32_dn_di),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_AI_DI, move_32_ai_di),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_PI_DI, move_32_pi_di),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_PD_DI, move_32_pd_di),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_DI_DI, move_32_di_di),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_IX_DI, move_32_ix_di),
op_entry!(MASK_OUT_Y, OP_MOVE_32_AW_DI, move_32_aw_di),
op_entry!(MASK_OUT_Y, OP_MOVE_32_AL_DI, move_32_al_di),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_DN_IX, move_32_dn_ix),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_AI_IX, move_32_ai_ix),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_PI_IX, move_32_pi_ix),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_PD_IX, move_32_pd_ix),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_DI_IX, move_32_di_ix),
op_entry!(MASK_OUT_X_Y, OP_MOVE_32_IX_IX, move_32_ix_ix),
op_entry!(MASK_OUT_Y, OP_MOVE_32_AW_IX, move_32_aw_ix),
op_entry!(MASK_OUT_Y, OP_MOVE_32_AL_IX, move_32_al_ix),
op_entry!(MASK_OUT_X, OP_MOVE_32_DN_AW, move_32_dn_aw),
op_entry!(MASK_OUT_X, OP_MOVE_32_AI_AW, move_32_ai_aw),
op_entry!(MASK_OUT_X, OP_MOVE_32_PI_AW, move_32_pi_aw),
op_entry!(MASK_OUT_X, OP_MOVE_32_PD_AW, move_32_pd_aw),
op_entry!(MASK_OUT_X, OP_MOVE_32_DI_AW, move_32_di_aw),
op_entry!(MASK_OUT_X, OP_MOVE_32_IX_AW, move_32_ix_aw),
op_entry!(MASK_EXACT, OP_MOVE_32_AW_AW, move_32_aw_aw),
op_entry!(MASK_EXACT, OP_MOVE_32_AL_AW, move_32_al_aw),
op_entry!(MASK_OUT_X, OP_MOVE_32_DN_AL, move_32_dn_al),
op_entry!(MASK_OUT_X, OP_MOVE_32_AI_AL, move_32_ai_al),
op_entry!(MASK_OUT_X, OP_MOVE_32_PI_AL, move_32_pi_al),
op_entry!(MASK_OUT_X, OP_MOVE_32_PD_AL, move_32_pd_al),
op_entry!(MASK_OUT_X, OP_MOVE_32_DI_AL, move_32_di_al),
op_entry!(MASK_OUT_X, OP_MOVE_32_IX_AL, move_32_ix_al),
op_entry!(MASK_EXACT, OP_MOVE_32_AW_AL, move_32_aw_al),
op_entry!(MASK_EXACT, OP_MOVE_32_AL_AL, move_32_al_al),
op_entry!(MASK_OUT_X, OP_MOVE_32_DN_PCDI, move_32_dn_pcdi),
op_entry!(MASK_OUT_X, OP_MOVE_32_AI_PCDI, move_32_ai_pcdi),
op_entry!(MASK_OUT_X, OP_MOVE_32_PI_PCDI, move_32_pi_pcdi),
op_entry!(MASK_OUT_X, OP_MOVE_32_PD_PCDI, move_32_pd_pcdi),
op_entry!(MASK_OUT_X, OP_MOVE_32_DI_PCDI, move_32_di_pcdi),
op_entry!(MASK_OUT_X, OP_MOVE_32_IX_PCDI, move_32_ix_pcdi),
op_entry!(MASK_EXACT, OP_MOVE_32_AW_PCDI, move_32_aw_pcdi),
op_entry!(MASK_EXACT, OP_MOVE_32_AL_PCDI, move_32_al_pcdi),
op_entry!(MASK_OUT_X, OP_MOVE_32_DN_PCIX, move_32_dn_pcix),
op_entry!(MASK_OUT_X, OP_MOVE_32_AI_PCIX, move_32_ai_pcix),
op_entry!(MASK_OUT_X, OP_MOVE_32_PI_PCIX, move_32_pi_pcix),
op_entry!(MASK_OUT_X, OP_MOVE_32_PD_PCIX, move_32_pd_pcix),
op_entry!(MASK_OUT_X, OP_MOVE_32_DI_PCIX, move_32_di_pcix),
op_entry!(MASK_OUT_X, OP_MOVE_32_IX_PCIX, move_32_ix_pcix),
op_entry!(MASK_EXACT, OP_MOVE_32_AW_PCIX, move_32_aw_pcix),
op_entry!(MASK_EXACT, OP_MOVE_32_AL_PCIX, move_32_al_pcix),
op_entry!(MASK_OUT_X, OP_MOVE_32_DN_IMM, move_32_dn_imm),
op_entry!(MASK_OUT_X, OP_MOVE_32_AI_IMM, move_32_ai_imm),
op_entry!(MASK_OUT_X, OP_MOVE_32_PI_IMM, move_32_pi_imm),
op_entry!(MASK_OUT_X, OP_MOVE_32_PD_IMM, move_32_pd_imm),
op_entry!(MASK_OUT_X, OP_MOVE_32_DI_IMM, move_32_di_imm),
op_entry!(MASK_OUT_X, OP_MOVE_32_IX_IMM, move_32_ix_imm),
op_entry!(MASK_EXACT, OP_MOVE_32_AW_IMM, move_32_aw_imm),
op_entry!(MASK_EXACT, OP_MOVE_32_AL_IMM, move_32_al_imm),
// Put op-entries for MOVEA here
op_entry!(MASK_OUT_X_Y, OP_MOVEA_16_DN, movea_16_dn),
op_entry!(MASK_OUT_X_Y, OP_MOVEA_16_AN, movea_16_an),
op_entry!(MASK_OUT_X_Y, OP_MOVEA_16_AI, movea_16_ai),
op_entry!(MASK_OUT_X_Y, OP_MOVEA_16_PI, movea_16_pi),
op_entry!(MASK_OUT_X_Y, OP_MOVEA_16_PD, movea_16_pd),
op_entry!(MASK_OUT_X_Y, OP_MOVEA_16_DI, movea_16_di),
op_entry!(MASK_OUT_X_Y, OP_MOVEA_16_IX, movea_16_ix),
op_entry!(MASK_OUT_X, OP_MOVEA_16_AW, movea_16_aw),
op_entry!(MASK_OUT_X, OP_MOVEA_16_AL, movea_16_al),
op_entry!(MASK_OUT_X, OP_MOVEA_16_PCDI, movea_16_pcdi),
op_entry!(MASK_OUT_X, OP_MOVEA_16_PCIX, movea_16_pcix),
op_entry!(MASK_OUT_X, OP_MOVEA_16_IMM, movea_16_imm),
op_entry!(MASK_OUT_X_Y, OP_MOVEA_32_DN, movea_32_dn),
op_entry!(MASK_OUT_X_Y, OP_MOVEA_32_AN, movea_32_an),
op_entry!(MASK_OUT_X_Y, OP_MOVEA_32_AI, movea_32_ai),
op_entry!(MASK_OUT_X_Y, OP_MOVEA_32_PI, movea_32_pi),
op_entry!(MASK_OUT_X_Y, OP_MOVEA_32_PD, movea_32_pd),
op_entry!(MASK_OUT_X_Y, OP_MOVEA_32_DI, movea_32_di),
op_entry!(MASK_OUT_X_Y, OP_MOVEA_32_IX, movea_32_ix),
op_entry!(MASK_OUT_X, OP_MOVEA_32_AW, movea_32_aw),
op_entry!(MASK_OUT_X, OP_MOVEA_32_AL, movea_32_al),
op_entry!(MASK_OUT_X, OP_MOVEA_32_PCDI, movea_32_pcdi),
op_entry!(MASK_OUT_X, OP_MOVEA_32_PCIX, movea_32_pcix),
op_entry!(MASK_OUT_X, OP_MOVEA_32_IMM, movea_32_imm),
// Put op-entries for MOVE to CCR here
op_entry!(MASK_OUT_Y, OP_MOVE_16_TOC_DN, move_16_toc_dn),
op_entry!(MASK_OUT_Y, OP_MOVE_16_TOC_AI, move_16_toc_ai),
op_entry!(MASK_OUT_Y, OP_MOVE_16_TOC_PI, move_16_toc_pi),
op_entry!(MASK_OUT_Y, OP_MOVE_16_TOC_PD, move_16_toc_pd),
op_entry!(MASK_OUT_Y, OP_MOVE_16_TOC_DI, move_16_toc_di),
op_entry!(MASK_OUT_Y, OP_MOVE_16_TOC_IX, move_16_toc_ix),
op_entry!(MASK_EXACT, OP_MOVE_16_TOC_AW, move_16_toc_aw),
op_entry!(MASK_EXACT, OP_MOVE_16_TOC_AL, move_16_toc_al),
op_entry!(MASK_EXACT, OP_MOVE_16_TOC_PCDI, move_16_toc_pcdi),
op_entry!(MASK_EXACT, OP_MOVE_16_TOC_PCIX, move_16_toc_pcix),
op_entry!(MASK_EXACT, OP_MOVE_16_TOC_IMM, move_16_toc_imm),
// Put op-entries for MOVE from SR here
op_entry!(MASK_OUT_Y, OP_MOVE_16_FRS_DN, move_16_frs_dn),
op_entry!(MASK_OUT_Y, OP_MOVE_16_FRS_AI, move_16_frs_ai),
op_entry!(MASK_OUT_Y, OP_MOVE_16_FRS_PI, move_16_frs_pi),
op_entry!(MASK_OUT_Y, OP_MOVE_16_FRS_PD, move_16_frs_pd),
op_entry!(MASK_OUT_Y, OP_MOVE_16_FRS_DI, move_16_frs_di),
op_entry!(MASK_OUT_Y, OP_MOVE_16_FRS_IX, move_16_frs_ix),
op_entry!(MASK_EXACT, OP_MOVE_16_FRS_AW, move_16_frs_aw),
op_entry!(MASK_EXACT, OP_MOVE_16_FRS_AL, move_16_frs_al),
// Put op-entries for MOVE to SR here
op_entry!(MASK_OUT_Y, OP_MOVE_16_TOS_DN, move_16_tos_dn),
op_entry!(MASK_OUT_Y, OP_MOVE_16_TOS_AI, move_16_tos_ai),
op_entry!(MASK_OUT_Y, OP_MOVE_16_TOS_PI, move_16_tos_pi),
op_entry!(MASK_OUT_Y, OP_MOVE_16_TOS_PD, move_16_tos_pd),
op_entry!(MASK_OUT_Y, OP_MOVE_16_TOS_DI, move_16_tos_di),
op_entry!(MASK_OUT_Y, OP_MOVE_16_TOS_IX, move_16_tos_ix),
op_entry!(MASK_EXACT, OP_MOVE_16_TOS_AW, move_16_tos_aw),
op_entry!(MASK_EXACT, OP_MOVE_16_TOS_AL, move_16_tos_al),
op_entry!(MASK_EXACT, OP_MOVE_16_TOS_PCDI, move_16_tos_pcdi),
op_entry!(MASK_EXACT, OP_MOVE_16_TOS_PCIX, move_16_tos_pcix),
op_entry!(MASK_EXACT, OP_MOVE_16_TOS_IMM, move_16_tos_imm),
// Put op-entries for MOVE USP here
op_entry!(MASK_OUT_Y, OP_MOVE_32_TOU, move_32_tou),
op_entry!(MASK_OUT_Y, OP_MOVE_32_FRU, move_32_fru),
// Put op-entries for MOVEM here
op_entry!(MASK_OUT_Y, OP_MOVEM_16_RE_AI, movem_16_re_ai),
op_entry!(MASK_OUT_Y, OP_MOVEM_16_RE_PD, movem_16_re_pd),
op_entry!(MASK_OUT_Y, OP_MOVEM_16_RE_DI, movem_16_re_di),
op_entry!(MASK_OUT_Y, OP_MOVEM_16_RE_IX, movem_16_re_ix),
op_entry!(MASK_EXACT, OP_MOVEM_16_RE_AW, movem_16_re_aw),
op_entry!(MASK_EXACT, OP_MOVEM_16_RE_AL, movem_16_re_al),
op_entry!(MASK_OUT_Y, OP_MOVEM_16_ER_AI, movem_16_er_ai),
op_entry!(MASK_OUT_Y, OP_MOVEM_16_ER_PI, movem_16_er_pi),
op_entry!(MASK_OUT_Y, OP_MOVEM_16_ER_DI, movem_16_er_di),
op_entry!(MASK_OUT_Y, OP_MOVEM_16_ER_IX, movem_16_er_ix),
op_entry!(MASK_EXACT, OP_MOVEM_16_ER_AW, movem_16_er_aw),
op_entry!(MASK_EXACT, OP_MOVEM_16_ER_AL, movem_16_er_al),
op_entry!(MASK_EXACT, OP_MOVEM_16_ER_PCDI, movem_16_er_pcdi),
op_entry!(MASK_EXACT, OP_MOVEM_16_ER_PCIX, movem_16_er_pcix),
op_entry!(MASK_OUT_Y, OP_MOVEM_32_RE_AI, movem_32_re_ai),
op_entry!(MASK_OUT_Y, OP_MOVEM_32_RE_PD, movem_32_re_pd),
op_entry!(MASK_OUT_Y, OP_MOVEM_32_RE_DI, movem_32_re_di),
op_entry!(MASK_OUT_Y, OP_MOVEM_32_RE_IX, movem_32_re_ix),
op_entry!(MASK_EXACT, OP_MOVEM_32_RE_AW, movem_32_re_aw),
op_entry!(MASK_EXACT, OP_MOVEM_32_RE_AL, movem_32_re_al),
op_entry!(MASK_OUT_Y, OP_MOVEM_32_ER_AI, movem_32_er_ai),
op_entry!(MASK_OUT_Y, OP_MOVEM_32_ER_PI, movem_32_er_pi),
op_entry!(MASK_OUT_Y, OP_MOVEM_32_ER_DI, movem_32_er_di),
op_entry!(MASK_OUT_Y, OP_MOVEM_32_ER_IX, movem_32_er_ix),
op_entry!(MASK_EXACT, OP_MOVEM_32_ER_AW, movem_32_er_aw),
op_entry!(MASK_EXACT, OP_MOVEM_32_ER_AL, movem_32_er_al),
op_entry!(MASK_EXACT, OP_MOVEM_32_ER_PCDI, movem_32_er_pcdi),
op_entry!(MASK_EXACT, OP_MOVEM_32_ER_PCIX, movem_32_er_pcix),
// Put op-entries for MOVEP here
op_entry!(MASK_OUT_X_Y, OP_MOVEP_16_ER, movep_16_er),
op_entry!(MASK_OUT_X_Y, OP_MOVEP_16_RE, movep_16_re),
op_entry!(MASK_OUT_X_Y, OP_MOVEP_32_ER, movep_32_er),
op_entry!(MASK_OUT_X_Y, OP_MOVEP_32_RE, movep_32_re),
// Put op-entries for MOVEQ here
op_entry!(MASK_LOBYTX, OP_MOVEQ_32, moveq_32),
// Put op-entries for MULS here
op_entry!(MASK_OUT_X_Y, OP_MULS_16_DN, muls_16_dn),
op_entry!(MASK_OUT_X_Y, OP_MULS_16_AI, muls_16_ai),
op_entry!(MASK_OUT_X_Y, OP_MULS_16_PI, muls_16_pi),
op_entry!(MASK_OUT_X_Y, OP_MULS_16_PD, muls_16_pd),
op_entry!(MASK_OUT_X_Y, OP_MULS_16_DI, muls_16_di),
op_entry!(MASK_OUT_X_Y, OP_MULS_16_IX, muls_16_ix),
op_entry!(MASK_OUT_X, OP_MULS_16_AW, muls_16_aw),
op_entry!(MASK_OUT_X, OP_MULS_16_AL, muls_16_al),
op_entry!(MASK_OUT_X, OP_MULS_16_PCDI, muls_16_pcdi),
op_entry!(MASK_OUT_X, OP_MULS_16_PCIX, muls_16_pcix),
op_entry!(MASK_OUT_X, OP_MULS_16_IMM, muls_16_imm),
// Put op-entries for MULU here
op_entry!(MASK_OUT_X_Y, OP_MULU_16_DN, mulu_16_dn),
op_entry!(MASK_OUT_X_Y, OP_MULU_16_AI, mulu_16_ai),
op_entry!(MASK_OUT_X_Y, OP_MULU_16_PI, mulu_16_pi),
op_entry!(MASK_OUT_X_Y, OP_MULU_16_PD, mulu_16_pd),
op_entry!(MASK_OUT_X_Y, OP_MULU_16_DI, mulu_16_di),
op_entry!(MASK_OUT_X_Y, OP_MULU_16_IX, mulu_16_ix),
op_entry!(MASK_OUT_X, OP_MULU_16_AW, mulu_16_aw),
op_entry!(MASK_OUT_X, OP_MULU_16_AL, mulu_16_al),
op_entry!(MASK_OUT_X, OP_MULU_16_PCDI, mulu_16_pcdi),
op_entry!(MASK_OUT_X, OP_MULU_16_PCIX, mulu_16_pcix),
op_entry!(MASK_OUT_X, OP_MULU_16_IMM, mulu_16_imm),
// Put op-entries for NBCD here
op_entry!(MASK_OUT_Y, OP_NBCD_8_DN, nbcd_8_dn),
op_entry!(MASK_OUT_Y, OP_NBCD_8_AI, nbcd_8_ai),
op_entry!(MASK_OUT_Y, OP_NBCD_8_PI, nbcd_8_pi),
op_entry!(MASK_OUT_Y, OP_NBCD_8_PD, nbcd_8_pd),
op_entry!(MASK_OUT_Y, OP_NBCD_8_DI, nbcd_8_di),
op_entry!(MASK_OUT_Y, OP_NBCD_8_IX, nbcd_8_ix),
op_entry!(MASK_EXACT, OP_NBCD_8_AW, nbcd_8_aw),
op_entry!(MASK_EXACT, OP_NBCD_8_AL, nbcd_8_al),
// Put op-entries for NEG here
op_entry!(MASK_OUT_Y, OP_NEG_8_DN, neg_8_dn),
op_entry!(MASK_OUT_Y, OP_NEG_8_AI, neg_8_ai),
op_entry!(MASK_OUT_Y, OP_NEG_8_PI, neg_8_pi),
op_entry!(MASK_OUT_Y, OP_NEG_8_PD, neg_8_pd),
op_entry!(MASK_OUT_Y, OP_NEG_8_DI, neg_8_di),
op_entry!(MASK_OUT_Y, OP_NEG_8_IX, neg_8_ix),
op_entry!(MASK_EXACT, OP_NEG_8_AW, neg_8_aw),
op_entry!(MASK_EXACT, OP_NEG_8_AL, neg_8_al),
op_entry!(MASK_OUT_Y, OP_NEG_16_DN, neg_16_dn),
op_entry!(MASK_OUT_Y, OP_NEG_16_AI, neg_16_ai),
op_entry!(MASK_OUT_Y, OP_NEG_16_PI, neg_16_pi),
op_entry!(MASK_OUT_Y, OP_NEG_16_PD, neg_16_pd),
op_entry!(MASK_OUT_Y, OP_NEG_16_DI, neg_16_di),
op_entry!(MASK_OUT_Y, OP_NEG_16_IX, neg_16_ix),
op_entry!(MASK_EXACT, OP_NEG_16_AW, neg_16_aw),
op_entry!(MASK_EXACT, OP_NEG_16_AL, neg_16_al),
op_entry!(MASK_OUT_Y, OP_NEG_32_DN, neg_32_dn),
op_entry!(MASK_OUT_Y, OP_NEG_32_AI, neg_32_ai),
op_entry!(MASK_OUT_Y, OP_NEG_32_PI, neg_32_pi),
op_entry!(MASK_OUT_Y, OP_NEG_32_PD, neg_32_pd),
op_entry!(MASK_OUT_Y, OP_NEG_32_DI, neg_32_di),
op_entry!(MASK_OUT_Y, OP_NEG_32_IX, neg_32_ix),
op_entry!(MASK_EXACT, OP_NEG_32_AW, neg_32_aw),
op_entry!(MASK_EXACT, OP_NEG_32_AL, neg_32_al),
// Put op-entries for NEGX here
op_entry!(MASK_OUT_Y, OP_NEGX_8_DN, negx_8_dn),
op_entry!(MASK_OUT_Y, OP_NEGX_8_AI, negx_8_ai),
op_entry!(MASK_OUT_Y, OP_NEGX_8_PI, negx_8_pi),
op_entry!(MASK_OUT_Y, OP_NEGX_8_PD, negx_8_pd),
op_entry!(MASK_OUT_Y, OP_NEGX_8_DI, negx_8_di),
op_entry!(MASK_OUT_Y, OP_NEGX_8_IX, negx_8_ix),
op_entry!(MASK_EXACT, OP_NEGX_8_AW, negx_8_aw),
op_entry!(MASK_EXACT, OP_NEGX_8_AL, negx_8_al),
op_entry!(MASK_OUT_Y, OP_NEGX_16_DN, negx_16_dn),
op_entry!(MASK_OUT_Y, OP_NEGX_16_AI, negx_16_ai),
op_entry!(MASK_OUT_Y, OP_NEGX_16_PI, negx_16_pi),
op_entry!(MASK_OUT_Y, OP_NEGX_16_PD, negx_16_pd),
op_entry!(MASK_OUT_Y, OP_NEGX_16_DI, negx_16_di),
op_entry!(MASK_OUT_Y, OP_NEGX_16_IX, negx_16_ix),
op_entry!(MASK_EXACT, OP_NEGX_16_AW, negx_16_aw),
op_entry!(MASK_EXACT, OP_NEGX_16_AL, negx_16_al),
op_entry!(MASK_OUT_Y, OP_NEGX_32_DN, negx_32_dn),
op_entry!(MASK_OUT_Y, OP_NEGX_32_AI, negx_32_ai),
op_entry!(MASK_OUT_Y, OP_NEGX_32_PI, negx_32_pi),
op_entry!(MASK_OUT_Y, OP_NEGX_32_PD, negx_32_pd),
op_entry!(MASK_OUT_Y, OP_NEGX_32_DI, negx_32_di),
op_entry!(MASK_OUT_Y, OP_NEGX_32_IX, negx_32_ix),
op_entry!(MASK_EXACT, OP_NEGX_32_AW, negx_32_aw),
op_entry!(MASK_EXACT, OP_NEGX_32_AL, negx_32_al),
// Put op-entries for NOP here
op_entry!(MASK_EXACT, OP_NOP, nop),
// Put op-entries for NOT here
op_entry!(MASK_OUT_Y, OP_NOT_8_DN, not_8_dn),
op_entry!(MASK_OUT_Y, OP_NOT_8_AI, not_8_ai),
op_entry!(MASK_OUT_Y, OP_NOT_8_PI, not_8_pi),
op_entry!(MASK_OUT_Y, OP_NOT_8_PD, not_8_pd),
op_entry!(MASK_OUT_Y, OP_NOT_8_DI, not_8_di),
op_entry!(MASK_OUT_Y, OP_NOT_8_IX, not_8_ix),
op_entry!(MASK_EXACT, OP_NOT_8_AW, not_8_aw),
op_entry!(MASK_EXACT, OP_NOT_8_AL, not_8_al),
op_entry!(MASK_OUT_Y, OP_NOT_16_DN, not_16_dn),
op_entry!(MASK_OUT_Y, OP_NOT_16_AI, not_16_ai),
op_entry!(MASK_OUT_Y, OP_NOT_16_PI, not_16_pi),
op_entry!(MASK_OUT_Y, OP_NOT_16_PD, not_16_pd),
op_entry!(MASK_OUT_Y, OP_NOT_16_DI, not_16_di),
op_entry!(MASK_OUT_Y, OP_NOT_16_IX, not_16_ix),
op_entry!(MASK_EXACT, OP_NOT_16_AW, not_16_aw),
op_entry!(MASK_EXACT, OP_NOT_16_AL, not_16_al),
op_entry!(MASK_OUT_Y, OP_NOT_32_DN, not_32_dn),
op_entry!(MASK_OUT_Y, OP_NOT_32_AI, not_32_ai),
op_entry!(MASK_OUT_Y, OP_NOT_32_PI, not_32_pi),
op_entry!(MASK_OUT_Y, OP_NOT_32_PD, not_32_pd),
op_entry!(MASK_OUT_Y, OP_NOT_32_DI, not_32_di),
op_entry!(MASK_OUT_Y, OP_NOT_32_IX, not_32_ix),
op_entry!(MASK_EXACT, OP_NOT_32_AW, not_32_aw),
op_entry!(MASK_EXACT, OP_NOT_32_AL, not_32_al),
// Put op-entries for OR here
op_entry!(MASK_OUT_X_Y, OP_OR_8_ER_DN, or_8_er_dn),
op_entry!(MASK_OUT_X_Y, OP_OR_8_ER_AI, or_8_er_ai),
op_entry!(MASK_OUT_X_Y, OP_OR_8_ER_PI, or_8_er_pi),
op_entry!(MASK_OUT_X_Y, OP_OR_8_ER_PD, or_8_er_pd),
op_entry!(MASK_OUT_X_Y, OP_OR_8_ER_DI, or_8_er_di),
op_entry!(MASK_OUT_X_Y, OP_OR_8_ER_IX, or_8_er_ix),
op_entry!(MASK_OUT_X, OP_OR_8_ER_AW, or_8_er_aw),
op_entry!(MASK_OUT_X, OP_OR_8_ER_AL, or_8_er_al),
op_entry!(MASK_OUT_X, OP_OR_8_ER_PCDI, or_8_er_pcdi),
op_entry!(MASK_OUT_X, OP_OR_8_ER_PCIX, or_8_er_pcix),
op_entry!(MASK_OUT_X, OP_OR_8_ER_IMM, or_8_er_imm),
op_entry!(MASK_OUT_X_Y, OP_OR_8_RE_AI, or_8_re_ai),
op_entry!(MASK_OUT_X_Y, OP_OR_8_RE_PI, or_8_re_pi),
op_entry!(MASK_OUT_X_Y, OP_OR_8_RE_PD, or_8_re_pd),
op_entry!(MASK_OUT_X_Y, OP_OR_8_RE_DI, or_8_re_di),
op_entry!(MASK_OUT_X_Y, OP_OR_8_RE_IX, or_8_re_ix),
op_entry!(MASK_OUT_X, OP_OR_8_RE_AW, or_8_re_aw),
op_entry!(MASK_OUT_X, OP_OR_8_RE_AL, or_8_re_al),
op_entry!(MASK_OUT_X_Y, OP_OR_16_ER_DN, or_16_er_dn),
op_entry!(MASK_OUT_X_Y, OP_OR_16_ER_AI, or_16_er_ai),
op_entry!(MASK_OUT_X_Y, OP_OR_16_ER_PI, or_16_er_pi),
op_entry!(MASK_OUT_X_Y, OP_OR_16_ER_PD, or_16_er_pd),
op_entry!(MASK_OUT_X_Y, OP_OR_16_ER_DI, or_16_er_di),
op_entry!(MASK_OUT_X_Y, OP_OR_16_ER_IX, or_16_er_ix),
op_entry!(MASK_OUT_X, OP_OR_16_ER_AW, or_16_er_aw),
op_entry!(MASK_OUT_X, OP_OR_16_ER_AL, or_16_er_al),
op_entry!(MASK_OUT_X, OP_OR_16_ER_PCDI, or_16_er_pcdi),
op_entry!(MASK_OUT_X, OP_OR_16_ER_PCIX, or_16_er_pcix),
op_entry!(MASK_OUT_X, OP_OR_16_ER_IMM, or_16_er_imm),
op_entry!(MASK_OUT_X_Y, OP_OR_16_RE_AI, or_16_re_ai),
op_entry!(MASK_OUT_X_Y, OP_OR_16_RE_PI, or_16_re_pi),
op_entry!(MASK_OUT_X_Y, OP_OR_16_RE_PD, or_16_re_pd),
op_entry!(MASK_OUT_X_Y, OP_OR_16_RE_DI, or_16_re_di),
op_entry!(MASK_OUT_X_Y, OP_OR_16_RE_IX, or_16_re_ix),
op_entry!(MASK_OUT_X, OP_OR_16_RE_AW, or_16_re_aw),
op_entry!(MASK_OUT_X, OP_OR_16_RE_AL, or_16_re_al),
op_entry!(MASK_OUT_X_Y, OP_OR_32_ER_DN, or_32_er_dn),
op_entry!(MASK_OUT_X_Y, OP_OR_32_ER_AI, or_32_er_ai),
op_entry!(MASK_OUT_X_Y, OP_OR_32_ER_PI, or_32_er_pi),
op_entry!(MASK_OUT_X_Y, OP_OR_32_ER_PD, or_32_er_pd),
op_entry!(MASK_OUT_X_Y, OP_OR_32_ER_DI, or_32_er_di),
op_entry!(MASK_OUT_X_Y, OP_OR_32_ER_IX, or_32_er_ix),
op_entry!(MASK_OUT_X, OP_OR_32_ER_AW, or_32_er_aw),
op_entry!(MASK_OUT_X, OP_OR_32_ER_AL, or_32_er_al),
op_entry!(MASK_OUT_X, OP_OR_32_ER_PCDI, or_32_er_pcdi),
op_entry!(MASK_OUT_X, OP_OR_32_ER_PCIX, or_32_er_pcix),
op_entry!(MASK_OUT_X, OP_OR_32_ER_IMM, or_32_er_imm),
op_entry!(MASK_OUT_X_Y, OP_OR_32_RE_AI, or_32_re_ai),
op_entry!(MASK_OUT_X_Y, OP_OR_32_RE_PI, or_32_re_pi),
op_entry!(MASK_OUT_X_Y, OP_OR_32_RE_PD, or_32_re_pd),
op_entry!(MASK_OUT_X_Y, OP_OR_32_RE_DI, or_32_re_di),
op_entry!(MASK_OUT_X_Y, OP_OR_32_RE_IX, or_32_re_ix),
op_entry!(MASK_OUT_X, OP_OR_32_RE_AW, or_32_re_aw),
op_entry!(MASK_OUT_X, OP_OR_32_RE_AL, or_32_re_al),
// Put op-entries for ORI here
op_entry!(MASK_OUT_Y, OP_ORI_8_DN, ori_8_dn),
op_entry!(MASK_OUT_Y, OP_ORI_8_AI, ori_8_ai),
op_entry!(MASK_OUT_Y, OP_ORI_8_PI, ori_8_pi),
op_entry!(MASK_OUT_Y, OP_ORI_8_PD, ori_8_pd),
op_entry!(MASK_OUT_Y, OP_ORI_8_DI, ori_8_di),
op_entry!(MASK_OUT_Y, OP_ORI_8_IX, ori_8_ix),
op_entry!(MASK_EXACT, OP_ORI_8_AW, ori_8_aw),
op_entry!(MASK_EXACT, OP_ORI_8_AL, ori_8_al),
op_entry!(MASK_OUT_Y, OP_ORI_16_DN, ori_16_dn),
op_entry!(MASK_OUT_Y, OP_ORI_16_AI, ori_16_ai),
op_entry!(MASK_OUT_Y, OP_ORI_16_PI, ori_16_pi),
op_entry!(MASK_OUT_Y, OP_ORI_16_PD, ori_16_pd),
op_entry!(MASK_OUT_Y, OP_ORI_16_DI, ori_16_di),
op_entry!(MASK_OUT_Y, OP_ORI_16_IX, ori_16_ix),
op_entry!(MASK_EXACT, OP_ORI_16_AW, ori_16_aw),
op_entry!(MASK_EXACT, OP_ORI_16_AL, ori_16_al),
op_entry!(MASK_OUT_Y, OP_ORI_32_DN, ori_32_dn),
op_entry!(MASK_OUT_Y, OP_ORI_32_AI, ori_32_ai),
op_entry!(MASK_OUT_Y, OP_ORI_32_PI, ori_32_pi),
op_entry!(MASK_OUT_Y, OP_ORI_32_PD, ori_32_pd),
op_entry!(MASK_OUT_Y, OP_ORI_32_DI, ori_32_di),
op_entry!(MASK_OUT_Y, OP_ORI_32_IX, ori_32_ix),
op_entry!(MASK_EXACT, OP_ORI_32_AW, ori_32_aw),
op_entry!(MASK_EXACT, OP_ORI_32_AL, ori_32_al),
// Put op-entries for ORI to CCR here
op_entry!(MASK_EXACT, OP_ORI_8_TOC, ori_8_toc),
// Put op-entries for ORI to SR here
op_entry!(MASK_EXACT, OP_ORI_16_TOS, ori_16_tos),
// Put op-entries for PEA here
op_entry!(MASK_OUT_Y, OP_PEA_32_AI, pea_32_ai),
op_entry!(MASK_OUT_Y, OP_PEA_32_DI, pea_32_di),
op_entry!(MASK_OUT_Y, OP_PEA_32_IX, pea_32_ix),
op_entry!(MASK_EXACT, OP_PEA_32_AW, pea_32_aw),
op_entry!(MASK_EXACT, OP_PEA_32_AL, pea_32_al),
op_entry!(MASK_EXACT, OP_PEA_32_PCDI, pea_32_pcdi),
op_entry!(MASK_EXACT, OP_PEA_32_PCIX, pea_32_pcix),
// Put op-entries for RESET here
op_entry!(MASK_EXACT, OP_RESET, reset),
// Put op-entries for ROL, ROR here
op_entry!(MASK_OUT_X_Y, OP_ROR_8_S, ror_8_s),
op_entry!(MASK_OUT_X_Y, OP_ROR_16_S, ror_16_s),
op_entry!(MASK_OUT_X_Y, OP_ROR_32_S, ror_32_s),
op_entry!(MASK_OUT_X_Y, OP_ROR_8_R, ror_8_r),
op_entry!(MASK_OUT_X_Y, OP_ROR_16_R, ror_16_r),
op_entry!(MASK_OUT_X_Y, OP_ROR_32_R, ror_32_r),
op_entry!(MASK_OUT_X_Y, OP_ROL_8_S, rol_8_s),
op_entry!(MASK_OUT_X_Y, OP_ROL_16_S, rol_16_s),
op_entry!(MASK_OUT_X_Y, OP_ROL_32_S, rol_32_s),
op_entry!(MASK_OUT_X_Y, OP_ROL_8_R, rol_8_r),
op_entry!(MASK_OUT_X_Y, OP_ROL_16_R, rol_16_r),
op_entry!(MASK_OUT_X_Y, OP_ROL_32_R, rol_32_r),
op_entry!(MASK_OUT_Y, OP_ROL_16_AI, rol_16_ai),
op_entry!(MASK_OUT_Y, OP_ROL_16_PI, rol_16_pi),
op_entry!(MASK_OUT_Y, OP_ROL_16_PD, rol_16_pd),
op_entry!(MASK_OUT_Y, OP_ROL_16_DI, rol_16_di),
op_entry!(MASK_OUT_Y, OP_ROL_16_IX, rol_16_ix),
op_entry!(MASK_EXACT, OP_ROL_16_AW, rol_16_aw),
op_entry!(MASK_EXACT, OP_ROL_16_AL, rol_16_al),
op_entry!(MASK_OUT_Y, OP_ROR_16_AI, ror_16_ai),
op_entry!(MASK_OUT_Y, OP_ROR_16_PI, ror_16_pi),
op_entry!(MASK_OUT_Y, OP_ROR_16_PD, ror_16_pd),
op_entry!(MASK_OUT_Y, OP_ROR_16_DI, ror_16_di),
op_entry!(MASK_OUT_Y, OP_ROR_16_IX, ror_16_ix),
op_entry!(MASK_EXACT, OP_ROR_16_AW, ror_16_aw),
op_entry!(MASK_EXACT, OP_ROR_16_AL, ror_16_al),
// Put op-entries for ROXL, ROXR here
op_entry!(MASK_OUT_X_Y, OP_ROXR_8_S, roxr_8_s),
op_entry!(MASK_OUT_X_Y, OP_ROXR_16_S, roxr_16_s),
op_entry!(MASK_OUT_X_Y, OP_ROXR_32_S, roxr_32_s),
op_entry!(MASK_OUT_X_Y, OP_ROXR_8_R, roxr_8_r),
op_entry!(MASK_OUT_X_Y, OP_ROXR_16_R, roxr_16_r),
op_entry!(MASK_OUT_X_Y, OP_ROXR_32_R, roxr_32_r),
op_entry!(MASK_OUT_X_Y, OP_ROXL_8_S, roxl_8_s),
op_entry!(MASK_OUT_X_Y, OP_ROXL_16_S, roxl_16_s),
op_entry!(MASK_OUT_X_Y, OP_ROXL_32_S, roxl_32_s),
op_entry!(MASK_OUT_X_Y, OP_ROXL_8_R, roxl_8_r),
op_entry!(MASK_OUT_X_Y, OP_ROXL_16_R, roxl_16_r),
op_entry!(MASK_OUT_X_Y, OP_ROXL_32_R, roxl_32_r),
op_entry!(MASK_OUT_Y, OP_ROXL_16_AI, roxl_16_ai),
op_entry!(MASK_OUT_Y, OP_ROXL_16_PI, roxl_16_pi),
op_entry!(MASK_OUT_Y, OP_ROXL_16_PD, roxl_16_pd),
op_entry!(MASK_OUT_Y, OP_ROXL_16_DI, roxl_16_di),
op_entry!(MASK_OUT_Y, OP_ROXL_16_IX, roxl_16_ix),
op_entry!(MASK_EXACT, OP_ROXL_16_AW, roxl_16_aw),
op_entry!(MASK_EXACT, OP_ROXL_16_AL, roxl_16_al),
op_entry!(MASK_OUT_Y, OP_ROXR_16_AI, roxr_16_ai),
op_entry!(MASK_OUT_Y, OP_ROXR_16_PI, roxr_16_pi),
op_entry!(MASK_OUT_Y, OP_ROXR_16_PD, roxr_16_pd),
op_entry!(MASK_OUT_Y, OP_ROXR_16_DI, roxr_16_di),
op_entry!(MASK_OUT_Y, OP_ROXR_16_IX, roxr_16_ix),
op_entry!(MASK_EXACT, OP_ROXR_16_AW, roxr_16_aw),
op_entry!(MASK_EXACT, OP_ROXR_16_AL, roxr_16_al),
// Put op-entries for RTE here
op_entry!(MASK_EXACT, OP_RTE_32, rte_32),
// Put op-entries for RTR here
op_entry!(MASK_EXACT, OP_RTR_32, rtr_32),
// Put op-entries for RTS here
op_entry!(MASK_EXACT, OP_RTS_32, rts_32),
// Put op-entries for SBCD here
op_entry!(MASK_OUT_X_Y, OP_SBCD_8_RR, sbcd_8_rr),
op_entry!(MASK_OUT_X_Y, OP_SBCD_8_MM, sbcd_8_mm),
// Put op-entries for Scc here
op_entry!(MASK_OUT_Y, OP_SCC_8_AI, scc_8_ai),
op_entry!(MASK_EXACT, OP_SCC_8_AL, scc_8_al),
op_entry!(MASK_EXACT, OP_SCC_8_AW, scc_8_aw),
op_entry!(MASK_OUT_Y, OP_SCC_8_DN, scc_8_dn),
op_entry!(MASK_OUT_Y, OP_SCC_8_DI, scc_8_di),
op_entry!(MASK_OUT_Y, OP_SCC_8_IX, scc_8_ix),
op_entry!(MASK_OUT_Y, OP_SCC_8_PD, scc_8_pd),
op_entry!(MASK_OUT_Y, OP_SCC_8_PI, scc_8_pi),
op_entry!(MASK_OUT_Y, OP_SCS_8_AI, scs_8_ai),
op_entry!(MASK_EXACT, OP_SCS_8_AL, scs_8_al),
op_entry!(MASK_EXACT, OP_SCS_8_AW, scs_8_aw),
op_entry!(MASK_OUT_Y, OP_SCS_8_DN, scs_8_dn),
op_entry!(MASK_OUT_Y, OP_SCS_8_DI, scs_8_di),
op_entry!(MASK_OUT_Y, OP_SCS_8_IX, scs_8_ix),
op_entry!(MASK_OUT_Y, OP_SCS_8_PD, scs_8_pd),
op_entry!(MASK_OUT_Y, OP_SCS_8_PI, scs_8_pi),
op_entry!(MASK_OUT_Y, OP_SEQ_8_AI, seq_8_ai),
op_entry!(MASK_EXACT, OP_SEQ_8_AL, seq_8_al),
op_entry!(MASK_EXACT, OP_SEQ_8_AW, seq_8_aw),
op_entry!(MASK_OUT_Y, OP_SEQ_8_DN, seq_8_dn),
op_entry!(MASK_OUT_Y, OP_SEQ_8_DI, seq_8_di),
op_entry!(MASK_OUT_Y, OP_SEQ_8_IX, seq_8_ix),
op_entry!(MASK_OUT_Y, OP_SEQ_8_PD, seq_8_pd),
op_entry!(MASK_OUT_Y, OP_SEQ_8_PI, seq_8_pi),
op_entry!(MASK_OUT_Y, OP_SF_8_AI, sf_8_ai),
op_entry!(MASK_EXACT, OP_SF_8_AL, sf_8_al),
op_entry!(MASK_EXACT, OP_SF_8_AW, sf_8_aw),
op_entry!(MASK_OUT_Y, OP_SF_8_DN, sf_8_dn),
op_entry!(MASK_OUT_Y, OP_SF_8_DI, sf_8_di),
op_entry!(MASK_OUT_Y, OP_SF_8_IX, sf_8_ix),
op_entry!(MASK_OUT_Y, OP_SF_8_PD, sf_8_pd),
op_entry!(MASK_OUT_Y, OP_SF_8_PI, sf_8_pi),
op_entry!(MASK_OUT_Y, OP_SGE_8_AI, sge_8_ai),
op_entry!(MASK_EXACT, OP_SGE_8_AL, sge_8_al),
op_entry!(MASK_EXACT, OP_SGE_8_AW, sge_8_aw),
op_entry!(MASK_OUT_Y, OP_SGE_8_DN, sge_8_dn),
op_entry!(MASK_OUT_Y, OP_SGE_8_DI, sge_8_di),
op_entry!(MASK_OUT_Y, OP_SGE_8_IX, sge_8_ix),
op_entry!(MASK_OUT_Y, OP_SGE_8_PD, sge_8_pd),
op_entry!(MASK_OUT_Y, OP_SGE_8_PI, sge_8_pi),
op_entry!(MASK_OUT_Y, OP_SGT_8_AI, sgt_8_ai),
op_entry!(MASK_EXACT, OP_SGT_8_AL, sgt_8_al),
op_entry!(MASK_EXACT, OP_SGT_8_AW, sgt_8_aw),
op_entry!(MASK_OUT_Y, OP_SGT_8_DN, sgt_8_dn),
op_entry!(MASK_OUT_Y, OP_SGT_8_DI, sgt_8_di),
op_entry!(MASK_OUT_Y, OP_SGT_8_IX, sgt_8_ix),
op_entry!(MASK_OUT_Y, OP_SGT_8_PD, sgt_8_pd),
op_entry!(MASK_OUT_Y, OP_SGT_8_PI, sgt_8_pi),
op_entry!(MASK_OUT_Y, OP_SHI_8_AI, shi_8_ai),
op_entry!(MASK_EXACT, OP_SHI_8_AL, shi_8_al),
op_entry!(MASK_EXACT, OP_SHI_8_AW, shi_8_aw),
op_entry!(MASK_OUT_Y, OP_SHI_8_DN, shi_8_dn),
op_entry!(MASK_OUT_Y, OP_SHI_8_DI, shi_8_di),
op_entry!(MASK_OUT_Y, OP_SHI_8_IX, shi_8_ix),
op_entry!(MASK_OUT_Y, OP_SHI_8_PD, shi_8_pd),
op_entry!(MASK_OUT_Y, OP_SHI_8_PI, shi_8_pi),
op_entry!(MASK_OUT_Y, OP_SLE_8_AI, sle_8_ai),
op_entry!(MASK_EXACT, OP_SLE_8_AL, sle_8_al),
op_entry!(MASK_EXACT, OP_SLE_8_AW, sle_8_aw),
op_entry!(MASK_OUT_Y, OP_SLE_8_DN, sle_8_dn),
op_entry!(MASK_OUT_Y, OP_SLE_8_DI, sle_8_di),
op_entry!(MASK_OUT_Y, OP_SLE_8_IX, sle_8_ix),
op_entry!(MASK_OUT_Y, OP_SLE_8_PD, sle_8_pd),
op_entry!(MASK_OUT_Y, OP_SLE_8_PI, sle_8_pi),
op_entry!(MASK_OUT_Y, OP_SLS_8_AI, sls_8_ai),
op_entry!(MASK_EXACT, OP_SLS_8_AL, sls_8_al),
op_entry!(MASK_EXACT, OP_SLS_8_AW, sls_8_aw),
op_entry!(MASK_OUT_Y, OP_SLS_8_DN, sls_8_dn),
op_entry!(MASK_OUT_Y, OP_SLS_8_DI, sls_8_di),
op_entry!(MASK_OUT_Y, OP_SLS_8_IX, sls_8_ix),
op_entry!(MASK_OUT_Y, OP_SLS_8_PD, sls_8_pd),
op_entry!(MASK_OUT_Y, OP_SLS_8_PI, sls_8_pi),
op_entry!(MASK_OUT_Y, OP_SLT_8_AI, slt_8_ai),
op_entry!(MASK_EXACT, OP_SLT_8_AL, slt_8_al),
op_entry!(MASK_EXACT, OP_SLT_8_AW, slt_8_aw),
op_entry!(MASK_OUT_Y, OP_SLT_8_DN, slt_8_dn),
op_entry!(MASK_OUT_Y, OP_SLT_8_DI, slt_8_di),
op_entry!(MASK_OUT_Y, OP_SLT_8_IX, slt_8_ix),
op_entry!(MASK_OUT_Y, OP_SLT_8_PD, slt_8_pd),
op_entry!(MASK_OUT_Y, OP_SLT_8_PI, slt_8_pi),
op_entry!(MASK_OUT_Y, OP_SMI_8_AI, smi_8_ai),
op_entry!(MASK_EXACT, OP_SMI_8_AL, smi_8_al),
op_entry!(MASK_EXACT, OP_SMI_8_AW, smi_8_aw),
op_entry!(MASK_OUT_Y, OP_SMI_8_DN, smi_8_dn),
op_entry!(MASK_OUT_Y, OP_SMI_8_DI, smi_8_di),
op_entry!(MASK_OUT_Y, OP_SMI_8_IX, smi_8_ix),
op_entry!(MASK_OUT_Y, OP_SMI_8_PD, smi_8_pd),
op_entry!(MASK_OUT_Y, OP_SMI_8_PI, smi_8_pi),
op_entry!(MASK_OUT_Y, OP_SNE_8_AI, sne_8_ai),
op_entry!(MASK_EXACT, OP_SNE_8_AL, sne_8_al),
op_entry!(MASK_EXACT, OP_SNE_8_AW, sne_8_aw),
op_entry!(MASK_OUT_Y, OP_SNE_8_DN, sne_8_dn),
op_entry!(MASK_OUT_Y, OP_SNE_8_DI, sne_8_di),
op_entry!(MASK_OUT_Y, OP_SNE_8_IX, sne_8_ix),
op_entry!(MASK_OUT_Y, OP_SNE_8_PD, sne_8_pd),
op_entry!(MASK_OUT_Y, OP_SNE_8_PI, sne_8_pi),
op_entry!(MASK_OUT_Y, OP_SPL_8_AI, spl_8_ai),
op_entry!(MASK_EXACT, OP_SPL_8_AL, spl_8_al),
op_entry!(MASK_EXACT, OP_SPL_8_AW, spl_8_aw),
op_entry!(MASK_OUT_Y, OP_SPL_8_DN, spl_8_dn),
op_entry!(MASK_OUT_Y, OP_SPL_8_DI, spl_8_di),
op_entry!(MASK_OUT_Y, OP_SPL_8_IX, spl_8_ix),
op_entry!(MASK_OUT_Y, OP_SPL_8_PD, spl_8_pd),
op_entry!(MASK_OUT_Y, OP_SPL_8_PI, spl_8_pi),
op_entry!(MASK_OUT_Y, OP_ST_8_AI, st_8_ai),
op_entry!(MASK_EXACT, OP_ST_8_AL, st_8_al),
op_entry!(MASK_EXACT, OP_ST_8_AW, st_8_aw),
op_entry!(MASK_OUT_Y, OP_ST_8_DN, st_8_dn),
op_entry!(MASK_OUT_Y, OP_ST_8_DI, st_8_di),
op_entry!(MASK_OUT_Y, OP_ST_8_IX, st_8_ix),
op_entry!(MASK_OUT_Y, OP_ST_8_PD, st_8_pd),
op_entry!(MASK_OUT_Y, OP_ST_8_PI, st_8_pi),
op_entry!(MASK_OUT_Y, OP_SVC_8_AI, svc_8_ai),
op_entry!(MASK_EXACT, OP_SVC_8_AL, svc_8_al),
op_entry!(MASK_EXACT, OP_SVC_8_AW, svc_8_aw),
op_entry!(MASK_OUT_Y, OP_SVC_8_DN, svc_8_dn),
op_entry!(MASK_OUT_Y, OP_SVC_8_DI, svc_8_di),
op_entry!(MASK_OUT_Y, OP_SVC_8_IX, svc_8_ix),
op_entry!(MASK_OUT_Y, OP_SVC_8_PD, svc_8_pd),
op_entry!(MASK_OUT_Y, OP_SVC_8_PI, svc_8_pi),
op_entry!(MASK_OUT_Y, OP_SVS_8_AI, svs_8_ai),
op_entry!(MASK_EXACT, OP_SVS_8_AL, svs_8_al),
op_entry!(MASK_EXACT, OP_SVS_8_AW, svs_8_aw),
op_entry!(MASK_OUT_Y, OP_SVS_8_DN, svs_8_dn),
op_entry!(MASK_OUT_Y, OP_SVS_8_DI, svs_8_di),
op_entry!(MASK_OUT_Y, OP_SVS_8_IX, svs_8_ix),
op_entry!(MASK_OUT_Y, OP_SVS_8_PD, svs_8_pd),
op_entry!(MASK_OUT_Y, OP_SVS_8_PI, svs_8_pi),
// Put op-entries for STOP here
op_entry!(MASK_EXACT, OP_STOP, stop),
// Put op-entries for SUB here
op_entry!(MASK_OUT_X_Y, OP_SUB_8_ER_DN, sub_8_er_dn),
op_entry!(MASK_OUT_X_Y, OP_SUB_8_ER_AI, sub_8_er_ai),
op_entry!(MASK_OUT_X_Y, OP_SUB_8_ER_PI, sub_8_er_pi),
op_entry!(MASK_OUT_X_Y, OP_SUB_8_ER_PD, sub_8_er_pd),
op_entry!(MASK_OUT_X_Y, OP_SUB_8_ER_DI, sub_8_er_di),
op_entry!(MASK_OUT_X_Y, OP_SUB_8_ER_IX, sub_8_er_ix),
op_entry!(MASK_OUT_X, OP_SUB_8_ER_AW, sub_8_er_aw),
op_entry!(MASK_OUT_X, OP_SUB_8_ER_AL, sub_8_er_al),
op_entry!(MASK_OUT_X, OP_SUB_8_ER_PCDI, sub_8_er_pcdi),
op_entry!(MASK_OUT_X, OP_SUB_8_ER_PCIX, sub_8_er_pcix),
op_entry!(MASK_OUT_X, OP_SUB_8_ER_IMM, sub_8_er_imm),
op_entry!(MASK_OUT_X_Y, OP_SUB_8_RE_AI, sub_8_re_ai),
op_entry!(MASK_OUT_X_Y, OP_SUB_8_RE_PI, sub_8_re_pi),
op_entry!(MASK_OUT_X_Y, OP_SUB_8_RE_PD, sub_8_re_pd),
op_entry!(MASK_OUT_X_Y, OP_SUB_8_RE_DI, sub_8_re_di),
op_entry!(MASK_OUT_X_Y, OP_SUB_8_RE_IX, sub_8_re_ix),
op_entry!(MASK_OUT_X, OP_SUB_8_RE_AW, sub_8_re_aw),
op_entry!(MASK_OUT_X, OP_SUB_8_RE_AL, sub_8_re_al),
op_entry!(MASK_OUT_X_Y, OP_SUB_16_ER_DN, sub_16_er_dn),
op_entry!(MASK_OUT_X_Y, OP_SUB_16_ER_AN, sub_16_er_an),
op_entry!(MASK_OUT_X_Y, OP_SUB_16_ER_AI, sub_16_er_ai),
op_entry!(MASK_OUT_X_Y, OP_SUB_16_ER_PI, sub_16_er_pi),
op_entry!(MASK_OUT_X_Y, OP_SUB_16_ER_PD, sub_16_er_pd),
op_entry!(MASK_OUT_X_Y, OP_SUB_16_ER_DI, sub_16_er_di),
op_entry!(MASK_OUT_X_Y, OP_SUB_16_ER_IX, sub_16_er_ix),
op_entry!(MASK_OUT_X, OP_SUB_16_ER_AW, sub_16_er_aw),
op_entry!(MASK_OUT_X, OP_SUB_16_ER_AL, sub_16_er_al),
op_entry!(MASK_OUT_X, OP_SUB_16_ER_PCDI, sub_16_er_pcdi),
op_entry!(MASK_OUT_X, OP_SUB_16_ER_PCIX, sub_16_er_pcix),
op_entry!(MASK_OUT_X, OP_SUB_16_ER_IMM, sub_16_er_imm),
op_entry!(MASK_OUT_X_Y, OP_SUB_16_RE_AI, sub_16_re_ai),
op_entry!(MASK_OUT_X_Y, OP_SUB_16_RE_PI, sub_16_re_pi),
op_entry!(MASK_OUT_X_Y, OP_SUB_16_RE_PD, sub_16_re_pd),
op_entry!(MASK_OUT_X_Y, OP_SUB_16_RE_DI, sub_16_re_di),
op_entry!(MASK_OUT_X_Y, OP_SUB_16_RE_IX, sub_16_re_ix),
op_entry!(MASK_OUT_X, OP_SUB_16_RE_AW, sub_16_re_aw),
op_entry!(MASK_OUT_X, OP_SUB_16_RE_AL, sub_16_re_al),
op_entry!(MASK_OUT_X_Y, OP_SUB_32_ER_DN, sub_32_er_dn),
op_entry!(MASK_OUT_X_Y, OP_SUB_32_ER_AN, sub_32_er_an),
op_entry!(MASK_OUT_X_Y, OP_SUB_32_ER_AI, sub_32_er_ai),
op_entry!(MASK_OUT_X_Y, OP_SUB_32_ER_PI, sub_32_er_pi),
op_entry!(MASK_OUT_X_Y, OP_SUB_32_ER_PD, sub_32_er_pd),
op_entry!(MASK_OUT_X_Y, OP_SUB_32_ER_DI, sub_32_er_di),
op_entry!(MASK_OUT_X_Y, OP_SUB_32_ER_IX, sub_32_er_ix),
op_entry!(MASK_OUT_X, OP_SUB_32_ER_AW, sub_32_er_aw),
op_entry!(MASK_OUT_X, OP_SUB_32_ER_AL, sub_32_er_al),
op_entry!(MASK_OUT_X, OP_SUB_32_ER_PCDI, sub_32_er_pcdi),
op_entry!(MASK_OUT_X, OP_SUB_32_ER_PCIX, sub_32_er_pcix),
op_entry!(MASK_OUT_X, OP_SUB_32_ER_IMM, sub_32_er_imm),
op_entry!(MASK_OUT_X_Y, OP_SUB_32_RE_AI, sub_32_re_ai),
op_entry!(MASK_OUT_X_Y, OP_SUB_32_RE_PI, sub_32_re_pi),
op_entry!(MASK_OUT_X_Y, OP_SUB_32_RE_PD, sub_32_re_pd),
op_entry!(MASK_OUT_X_Y, OP_SUB_32_RE_DI, sub_32_re_di),
op_entry!(MASK_OUT_X_Y, OP_SUB_32_RE_IX, sub_32_re_ix),
op_entry!(MASK_OUT_X, OP_SUB_32_RE_AW, sub_32_re_aw),
op_entry!(MASK_OUT_X, OP_SUB_32_RE_AL, sub_32_re_al),
op_entry!(MASK_OUT_X_Y, OP_SUBA_16_DN, suba_16_dn),
op_entry!(MASK_OUT_X_Y, OP_SUBA_16_AN, suba_16_an),
op_entry!(MASK_OUT_X_Y, OP_SUBA_16_AI, suba_16_ai),
op_entry!(MASK_OUT_X_Y, OP_SUBA_16_PI, suba_16_pi),
op_entry!(MASK_OUT_X_Y, OP_SUBA_16_PD, suba_16_pd),
op_entry!(MASK_OUT_X_Y, OP_SUBA_16_DI, suba_16_di),
op_entry!(MASK_OUT_X_Y, OP_SUBA_16_IX, suba_16_ix),
op_entry!(MASK_OUT_X, OP_SUBA_16_AW, suba_16_aw),
op_entry!(MASK_OUT_X, OP_SUBA_16_AL, suba_16_al),
op_entry!(MASK_OUT_X, OP_SUBA_16_PCDI, suba_16_pcdi),
op_entry!(MASK_OUT_X, OP_SUBA_16_PCIX, suba_16_pcix),
op_entry!(MASK_OUT_X, OP_SUBA_16_IMM, suba_16_imm),
op_entry!(MASK_OUT_X_Y, OP_SUBA_32_DN, suba_32_dn),
op_entry!(MASK_OUT_X_Y, OP_SUBA_32_AN, suba_32_an),
op_entry!(MASK_OUT_X_Y, OP_SUBA_32_AI, suba_32_ai),
op_entry!(MASK_OUT_X_Y, OP_SUBA_32_PI, suba_32_pi),
op_entry!(MASK_OUT_X_Y, OP_SUBA_32_PD, suba_32_pd),
op_entry!(MASK_OUT_X_Y, OP_SUBA_32_DI, suba_32_di),
op_entry!(MASK_OUT_X_Y, OP_SUBA_32_IX, suba_32_ix),
op_entry!(MASK_OUT_X, OP_SUBA_32_AW, suba_32_aw),
op_entry!(MASK_OUT_X, OP_SUBA_32_AL, suba_32_al),
op_entry!(MASK_OUT_X, OP_SUBA_32_PCDI, suba_32_pcdi),
op_entry!(MASK_OUT_X, OP_SUBA_32_PCIX, suba_32_pcix),
op_entry!(MASK_OUT_X, OP_SUBA_32_IMM, suba_32_imm),
op_entry!(MASK_OUT_Y, OP_SUBI_8_DN, subi_8_dn),
op_entry!(MASK_OUT_Y, OP_SUBI_8_AI, subi_8_ai),
op_entry!(MASK_OUT_Y, OP_SUBI_8_PI, subi_8_pi),
op_entry!(MASK_OUT_Y, OP_SUBI_8_PD, subi_8_pd),
op_entry!(MASK_OUT_Y, OP_SUBI_8_DI, subi_8_di),
op_entry!(MASK_OUT_Y, OP_SUBI_8_IX, subi_8_ix),
op_entry!(MASK_EXACT, OP_SUBI_8_AW, subi_8_aw),
op_entry!(MASK_EXACT, OP_SUBI_8_AL, subi_8_al),
op_entry!(MASK_OUT_Y, OP_SUBI_16_DN, subi_16_dn),
op_entry!(MASK_OUT_Y, OP_SUBI_16_AI, subi_16_ai),
op_entry!(MASK_OUT_Y, OP_SUBI_16_PI, subi_16_pi),
op_entry!(MASK_OUT_Y, OP_SUBI_16_PD, subi_16_pd),
op_entry!(MASK_OUT_Y, OP_SUBI_16_DI, subi_16_di),
op_entry!(MASK_OUT_Y, OP_SUBI_16_IX, subi_16_ix),
op_entry!(MASK_EXACT, OP_SUBI_16_AW, subi_16_aw),
op_entry!(MASK_EXACT, OP_SUBI_16_AL, subi_16_al),
op_entry!(MASK_OUT_Y, OP_SUBI_32_DN, subi_32_dn),
op_entry!(MASK_OUT_Y, OP_SUBI_32_AI, subi_32_ai),
op_entry!(MASK_OUT_Y, OP_SUBI_32_PI, subi_32_pi),
op_entry!(MASK_OUT_Y, OP_SUBI_32_PD, subi_32_pd),
op_entry!(MASK_OUT_Y, OP_SUBI_32_DI, subi_32_di),
op_entry!(MASK_OUT_Y, OP_SUBI_32_IX, subi_32_ix),
op_entry!(MASK_EXACT, OP_SUBI_32_AW, subi_32_aw),
op_entry!(MASK_EXACT, OP_SUBI_32_AL, subi_32_al),
op_entry!(MASK_OUT_X_Y, OP_SUBQ_8_DN, subq_8_dn),
op_entry!(MASK_OUT_X_Y, OP_SUBQ_8_AI, subq_8_ai),
op_entry!(MASK_OUT_X_Y, OP_SUBQ_8_PI, subq_8_pi),
op_entry!(MASK_OUT_X_Y, OP_SUBQ_8_PD, subq_8_pd),
op_entry!(MASK_OUT_X_Y, OP_SUBQ_8_DI, subq_8_di),
op_entry!(MASK_OUT_X_Y, OP_SUBQ_8_IX, subq_8_ix),
op_entry!(MASK_OUT_X, OP_SUBQ_8_AW, subq_8_aw),
op_entry!(MASK_OUT_X, OP_SUBQ_8_AL, subq_8_al),
op_entry!(MASK_OUT_X_Y, OP_SUBQ_16_DN, subq_16_dn),
op_entry!(MASK_OUT_X_Y, OP_SUBQ_16_AN, subq_16_an),
op_entry!(MASK_OUT_X_Y, OP_SUBQ_16_AI, subq_16_ai),
op_entry!(MASK_OUT_X_Y, OP_SUBQ_16_PI, subq_16_pi),
op_entry!(MASK_OUT_X_Y, OP_SUBQ_16_PD, subq_16_pd),
op_entry!(MASK_OUT_X_Y, OP_SUBQ_16_DI, subq_16_di),
op_entry!(MASK_OUT_X_Y, OP_SUBQ_16_IX, subq_16_ix),
op_entry!(MASK_OUT_X, OP_SUBQ_16_AW, subq_16_aw),
op_entry!(MASK_OUT_X, OP_SUBQ_16_AL, subq_16_al),
op_entry!(MASK_OUT_X_Y, OP_SUBQ_32_DN, subq_32_dn),
op_entry!(MASK_OUT_X_Y, OP_SUBQ_32_AN, subq_32_an),
op_entry!(MASK_OUT_X_Y, OP_SUBQ_32_AI, subq_32_ai),
op_entry!(MASK_OUT_X_Y, OP_SUBQ_32_PI, subq_32_pi),
op_entry!(MASK_OUT_X_Y, OP_SUBQ_32_PD, subq_32_pd),
op_entry!(MASK_OUT_X_Y, OP_SUBQ_32_DI, subq_32_di),
op_entry!(MASK_OUT_X_Y, OP_SUBQ_32_IX, subq_32_ix),
op_entry!(MASK_OUT_X, OP_SUBQ_32_AW, subq_32_aw),
op_entry!(MASK_OUT_X, OP_SUBQ_32_AL, subq_32_al),
op_entry!(MASK_OUT_X_Y, OP_SUBX_8_RR, subx_8_rr),
op_entry!(MASK_OUT_X_Y, OP_SUBX_8_MM, subx_8_mm),
op_entry!(MASK_OUT_X_Y, OP_SUBX_16_RR, subx_16_rr),
op_entry!(MASK_OUT_X_Y, OP_SUBX_16_MM, subx_16_mm),
op_entry!(MASK_OUT_X_Y, OP_SUBX_32_RR, subx_32_rr),
op_entry!(MASK_OUT_X_Y, OP_SUBX_32_MM, subx_32_mm),
// Put op-entries for SWAP here
op_entry!(MASK_OUT_Y, OP_SWAP_32_DN, swap_32_dn),
// Put op-entries for TAS here
op_entry!(MASK_OUT_Y, OP_TAS_8_DN, tas_8_dn),
op_entry!(MASK_OUT_Y, OP_TAS_8_AI, tas_8_ai),
op_entry!(MASK_OUT_Y, OP_TAS_8_PI, tas_8_pi),
op_entry!(MASK_OUT_Y, OP_TAS_8_PD, tas_8_pd),
op_entry!(MASK_OUT_Y, OP_TAS_8_DI, tas_8_di),
op_entry!(MASK_OUT_Y, OP_TAS_8_IX, tas_8_ix),
op_entry!(MASK_EXACT, OP_TAS_8_AW, tas_8_aw),
op_entry!(MASK_EXACT, OP_TAS_8_AL, tas_8_al),
// Put op-entries for TRAP here
op_entry!(MASK_LONIB, OP_TRAP, trap),
// Put op-entries for TRAPV here
op_entry!(MASK_EXACT, OP_TRAPV, trapv),
// Put op-entries for TST here
op_entry!(MASK_OUT_Y, OP_TST_8_DN, tst_8_dn),
op_entry!(MASK_OUT_Y, OP_TST_8_AI, tst_8_ai),
op_entry!(MASK_OUT_Y, OP_TST_8_PI, tst_8_pi),
op_entry!(MASK_OUT_Y, OP_TST_8_PD, tst_8_pd),
op_entry!(MASK_OUT_Y, OP_TST_8_DI, tst_8_di),
op_entry!(MASK_OUT_Y, OP_TST_8_IX, tst_8_ix),
op_entry!(MASK_EXACT, OP_TST_8_AW, tst_8_aw),
op_entry!(MASK_EXACT, OP_TST_8_AL, tst_8_al),
op_entry!(MASK_OUT_Y, OP_TST_16_DN, tst_16_dn),
op_entry!(MASK_OUT_Y, OP_TST_16_AI, tst_16_ai),
op_entry!(MASK_OUT_Y, OP_TST_16_PI, tst_16_pi),
op_entry!(MASK_OUT_Y, OP_TST_16_PD, tst_16_pd),
op_entry!(MASK_OUT_Y, OP_TST_16_DI, tst_16_di),
op_entry!(MASK_OUT_Y, OP_TST_16_IX, tst_16_ix),
op_entry!(MASK_EXACT, OP_TST_16_AW, tst_16_aw),
op_entry!(MASK_EXACT, OP_TST_16_AL, tst_16_al),
op_entry!(MASK_OUT_Y, OP_TST_32_DN, tst_32_dn),
op_entry!(MASK_OUT_Y, OP_TST_32_AI, tst_32_ai),
op_entry!(MASK_OUT_Y, OP_TST_32_PI, tst_32_pi),
op_entry!(MASK_OUT_Y, OP_TST_32_PD, tst_32_pd),
op_entry!(MASK_OUT_Y, OP_TST_32_DI, tst_32_di),
op_entry!(MASK_OUT_Y, OP_TST_32_IX, tst_32_ix),
op_entry!(MASK_EXACT, OP_TST_32_AW, tst_32_aw),
op_entry!(MASK_EXACT, OP_TST_32_AL, tst_32_al),
// Put op-entries for UNLK here
op_entry!(MASK_OUT_Y, OP_UNLK_32, unlk_32),
]
}
#[cfg(test)]
mod tests {
use super::*;
use cpu::TestCore;
#[test]
fn optable_mask_and_matching_makes_sense() |
}
| {
let optable = super::generate_optable::<TestCore>();
for op in optable {
if op.mask & op.matching != op.matching {
panic!("Error generating op handler table: Op mask {:16b} and matching {:16b} is inconsistent for {}", op.mask, op.matching, op.name);
}
}
} |
run_tutorial.py | #!/usr/bin/env python2
from stripstream.fts.constraint import Eq, ConType, Unconstrained
from stripstream.fts.variable import VarType, Par, Var, X, U, nX
from stripstream.fts.clause import Clause
from stripstream.fts.sampler import Sampler
from stripstream.fts.problem import FTSProblem
from stripstream.fts.stripstream_conversion import constraint_to_stripstream
from stripstream.fts.utils import convert_plan, rename_variables
from stripstream.algorithms.search.fast_downward import get_fast_downward
from stripstream.algorithms.incremental.incremental_planner import incremental_planner
# TODO - make the name of the variable a parameter?
def | ():
"""
Creates the 1D task and motion planning FTSProblem problem.
:return: a :class:`.FTSProblem`
"""
blocks = ['block%i'%i for i in range(2)]
num_poses = pow(10, 10)
initial_config = 0 # the initial robot configuration is 0
initial_poses = {block: i for i, block in enumerate(blocks)} # the initial pose for block i is i
goal_poses = {block: i+1 for i, block in enumerate(blocks)} # the goal pose for block i is i+1
#goal_poses = {blocks[0]: 1}
####################
# TODO - rethink the naming...
R_Q, B_P, B_H = 'R_Q', 'B_P', 'B_H'
R_T = 'R_T'
# NOTE - these should really just be param type
CONF = VarType()
BOOL = VarType(domain=[True, False])
POSE = VarType()
BLOCK = VarType(domain=blocks) # TODO - VarType vs ParamType?
TRAJ = VarType()
B, Q, P = Par(BLOCK), Par(CONF), Par(POSE)
T, Q2 = Par(TRAJ), Par(CONF)
LegalKin = ConType([POSE, CONF])
CollisionFree = ConType([POSE, POSE], test=lambda p1, p2: None in (p1, p2) or p1 != p2)
Motion = ConType([CONF, TRAJ, CONF])
rename_variables(locals()) # Trick to make debugging easier
# NOTE - can make a holding variable for each object or just one holding variable
# TODO - maybe declare the variables upfront
state_vars = [Var(R_Q, CONF), Var(B_P, POSE, args=[BLOCK]), Var(B_H, BOOL, args=[BLOCK])]
control_vars = [Var(R_T, TRAJ)]
##########
# NOTE - didn't I have some kind of bug where things had to be effects for this to work?
# NOTE - this is because I have to write things in the form where we only mention things that change....
# THus, I need the identify constraint or something
transition = [
Clause([LegalKin(X[B_P, B], X[R_Q]), Eq(nX[B_P, B], None), Eq(X[B_H, B], False), Eq(nX[B_H, B], True)] +
[Eq(X[B_H, block], False) for block in blocks], name='pick'),
#Clause([LegalKin(X[B_P, B], X[R_Q]), Eq(nX[B_P, B], None), Eq(nX[B_H, B], True)] +
# [Eq(X[B_H, block], False) for block in blocks], name='pick'), # NOTE - this makes bool free params internally
Clause([LegalKin(nX[B_P, B], X[R_Q]), Eq(X[B_P, B], None), Eq(X[B_H, B], True), Eq(nX[B_H, B], False)] +
[CollisionFree(X[B_P, block], nX[B_P, B]) for block in blocks], name='place'),
#Clause([LegalKin(nX[B_P, B], X[R_Q]), Eq(X[B_P, B], None), Eq(nX[B_H, B], False)], # +
##Clause([LegalKin(nX[B_P, B], X[R_Q]), Eq(X[B_H, B], True), Eq(nX[B_H, B], False)], # +
# [CollisionFree(X[B_P, block], nX[B_P, B]) for block in blocks], name='place'),
#Clause([Unconstrained(nX[R_Q])], name='move'), # NOTE - only write what changes
Clause([Motion(X[R_Q], U[R_T], nX[R_Q])], name='move'),
]
##########
# TODO - expand so we don't need to return lists of lists
samplers = [
Sampler([P], gen=lambda: ([(p,)] for p in xrange(num_poses)), inputs=[]),
Sampler([LegalKin(P, Q)], gen=lambda p: [[(p,)]] if p is not None else [], inputs=[P]),
Sampler([Motion(Q, T, Q2)], gen=lambda q1, q2: [[((q1, q2),)]], inputs=[Q, Q2]), # TODO - avoid this much nesting
]
##########
initial_state = [Eq(X[R_Q], initial_config)] + \
[Eq(X[B_H, block], False) for block in blocks] + \
[Eq(X[B_P, block], pose) for block, pose in initial_poses.iteritems()]
goal_constraints = [Eq(X[B_P, block], pose) for block, pose in goal_poses.iteritems()]
#goal_constraints = [Eq(X[R_Q], 1)]
return FTSProblem(state_vars, control_vars, transition, samplers,
initial_state, goal_constraints)
##################################################
def main():
"""
Creates and solves the 1D task and motion planning FTSProblem problem.
"""
constraint_problem = create_problem()
print
print constraint_problem
stream_problem = constraint_to_stripstream(constraint_problem)
print
print stream_problem
search_fn = get_fast_downward('eager') # 'dijkstra | astar | wastar1 | wastar2 | wastar3 | eager | lazy
plan, _ = incremental_planner(stream_problem, search=search_fn)
print
print 'Plan:', convert_plan(plan)
# TODO - visualize by applying actions to env state
if __name__ == '__main__':
main()
| create_problem |
app_test.go | package types
import "testing"
func TestAppValid(t *testing.T) {
tests := []App{
App{
Exec: []string{"/bin/httpd"},
User: "0",
Group: "0",
WorkingDirectory: "/tmp",
},
App{
Exec: []string{"/app"},
User: "0",
Group: "0",
EventHandlers: []EventHandler{
{Name: "pre-start"},
{Name: "post-stop"},
},
WorkingDirectory: "/tmp",
},
App{
Exec: []string{"/app", "arg1", "arg2"},
User: "0",
Group: "0",
WorkingDirectory: "/tmp",
},
}
for i, tt := range tests {
if err := tt.assertValid(); err != nil {
t.Errorf("#%d: err == %v, want nil", i, err)
}
}
}
func TestAppExecInvalid(t *testing.T) {
tests := []App{
App{
Exec: nil,
},
App{
Exec: []string{},
User: "0",
Group: "0",
},
App{
Exec: []string{"app"},
User: "0",
Group: "0",
},
App{
Exec: []string{"bin/app", "arg1"},
User: "0",
Group: "0",
},
}
for i, tt := range tests {
if err := tt.assertValid(); err == nil {
t.Errorf("#%d: err == nil, want non-nil", i)
}
}
}
func TestAppEventHandlersInvalid(t *testing.T) {
tests := []App{
App{
Exec: []string{"/bin/httpd"},
User: "0",
Group: "0",
EventHandlers: []EventHandler{
EventHandler{
Name: "pre-start",
},
EventHandler{
Name: "pre-start",
},
},
},
App{
Exec: []string{"/bin/httpd"},
User: "0",
Group: "0",
EventHandlers: []EventHandler{
EventHandler{
Name: "post-stop",
},
EventHandler{
Name: "pre-start",
},
EventHandler{
Name: "post-stop",
},
},
},
}
for i, tt := range tests {
if err := tt.assertValid(); err == nil {
t.Errorf("#%d: err == nil, want non-nil", i)
}
}
}
func | (t *testing.T) {
tests := []App{
App{
Exec: []string{"/app"},
},
App{
Exec: []string{"/app"},
User: "0",
},
App{
Exec: []string{"app"},
Group: "0",
},
}
for i, tt := range tests {
if err := tt.assertValid(); err == nil {
t.Errorf("#%d: err == nil, want non-nil", i)
}
}
}
func TestAppWorkingDirectoryInvalid(t *testing.T) {
tests := []App{
App{
WorkingDirectory: "stuff",
},
}
for i, tt := range tests {
if err := tt.assertValid(); err == nil {
t.Errorf("#%d: err == nil, want non-nil", i)
}
}
}
| TestUserGroupInvalid |
ridge.py | """
Ridge regression
"""
# Author: Mathieu Blondel <[email protected]>
# Reuben Fletcher-Costin <[email protected]>
# Fabian Pedregosa <[email protected]>
# Michael Eickenberg <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel, _rescale_data
from .sag import sag_solver
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import row_norms
from ..utils import check_X_y
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..model_selection import GridSearchCV
from ..metrics.scorer import check_scoring
from ..exceptions import ConvergenceWarning
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
# FIXME atol
try:
coef, info = sp_linalg.cg(C, y_column, tol=tol, atol='legacy')
except TypeError:
# old scipy
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
# FIXME atol
try:
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol, atol='legacy')
except TypeError:
# old scipy
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info, ConvergenceWarning)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
n_iter = np.empty(y.shape[1], dtype=np.int32)
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
info = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)
coefs[i] = info[0]
n_iter[i] = info[2]
return coefs, n_iter
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features], dtype=X.dtype)
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples], K.dtype)
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size), dtype=X.dtype)
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0, random_state=None,
return_n_iter=False, return_intercept=False):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC. If an array is passed, penalties are
assumed to be specific to the targets. Hence they must correspond in
number.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample. If sample_weight is not None and
solver='auto', the solver will be set to 'cholesky'.
.. versionadded:: 0.17
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative
procedure.
- 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses
its improved, unbiased version named SAGA. Both methods also use an
iterative procedure, and are often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' and
'saga' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
All last five solvers support both dense and sparse data. However, only
'sag' and 'saga' supports sparse input when`fit_intercept` is True.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For the 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' and saga solver, the default value is
1000.
tol : float
Precision of the solution.
verbose : int
Verbosity level. Setting verbose > 0 will display additional
information depending on the solver used.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`. Used when ``solver`` == 'sag'.
return_n_iter : boolean, default False
If True, the method also returns `n_iter`, the actual number of
iteration performed by the solver.
.. versionadded:: 0.17
return_intercept : boolean, default False
If True and if X is sparse, the method also returns the intercept,
and the solver is automatically changed to 'sag'. This is only a
temporary fix for fitting the intercept with sparse data. For dense
data, use sklearn.linear_model._preprocess_data before your regression.
.. versionadded:: 0.17
Returns
-------
coef : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
n_iter : int, optional
The actual number of iteration performed by the solver.
Only returned if `return_n_iter` is True.
intercept : float or array, shape = [n_targets]
The intercept of the model. Only returned if `return_intercept`
is True and if X is a scipy sparse array.
Notes
-----
This function won't compute the intercept.
"""
if return_intercept and sparse.issparse(X) and solver != 'sag':
if solver != 'auto':
warnings.warn("In Ridge, only 'sag' solver can currently fit the "
"intercept when X is sparse. Solver has been "
"automatically changed into 'sag'.")
solver = 'sag'
_dtype = [np.float64, np.float32]
# SAG needs X and y columns to be C-contiguous and np.float64
if solver in ['sag', 'saga']:
X = check_array(X, accept_sparse=['csr'],
dtype=np.float64, order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='F')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=_dtype)
y = check_array(y, dtype=X.dtype, ensure_2d=False)
check_consistent_length(X, y)
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
if solver == 'auto':
# cholesky if it's a dense array and cg in any other case
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
if solver not in ['sag', 'saga']:
# SAG supports sample_weight directly. For other solvers,
# we implement sample_weight via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha, dtype=X.dtype).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr', 'sag', 'saga'):
raise ValueError('Solver %s not understood' % solver)
n_iter = None
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == 'lsqr':
coef, n_iter = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
elif solver in ['sag', 'saga']:
# precompute max_squared_sum for all targets
max_squared_sum = row_norms(X, squared=True).max()
coef = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
intercept = np.zeros((y.shape[1], ))
for i, (alpha_i, target) in enumerate(zip(alpha, y.T)):
init = {'coef': np.zeros((n_features + int(return_intercept), 1))}
coef_, n_iter_, _ = sag_solver(
X, target.ravel(), sample_weight, 'squared', alpha_i, 0,
max_iter, tol, verbose, random_state, False, max_squared_sum,
init,
is_saga=solver == 'saga')
if return_intercept:
coef[i] = coef_[:-1]
intercept[i] = coef_[-1]
else:
coef[i] = coef_
n_iter[i] = n_iter_
if intercept.shape[0] == 1:
intercept = intercept[0]
coef = np.asarray(coef)
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
if return_n_iter and return_intercept:
return coef, n_iter, intercept
elif return_intercept:
return coef, intercept
elif return_n_iter:
return coef, n_iter
else:
return coef
class _BaseRidge(LinearModel, metaclass=ABCMeta):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
if self.solver in ('sag', 'saga'):
_dtype = np.float64
else:
# all other solvers work at both float precision levels
_dtype = [np.float64, np.float32]
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=_dtype,
multi_output=True, y_numeric=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
# temporary fix for fitting the intercept with sparse data using 'sag'
if sparse.issparse(X) and self.fit_intercept:
self.coef_, self.n_iter_, self.intercept_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True,
return_intercept=True)
self.intercept_ += y_offset
else:
self.coef_, self.n_iter_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True,
return_intercept=False)
self._set_intercept(X_offset, y_offset, X_scale)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
Minimizes the objective function::
||y - Xw||^2_2 + alpha * ||w||^2_2
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, array-like}, shape (n_targets)
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC. If an array is passed, penalties are
assumed to be specific to the targets. Hence they must correspond in
number.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
tol : float
Precision of the solution.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative
procedure.
- 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses
its improved, unbiased version named SAGA. Both methods also use an
iterative procedure, and are often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' and
'saga' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
All last five solvers support both dense and sparse data. However,
only 'sag' and 'saga' supports sparse input when `fit_intercept` is
True.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`. Used when ``solver`` == 'sag'.
.. versionadded:: 0.17
*random_state* to support Stochastic Average Gradient.
Attributes
----------
coef_ : array, shape (n_features,) or (n_targets, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
.. versionadded:: 0.17
See also
--------
RidgeClassifier : Ridge classifier
RidgeCV : Ridge regression with built-in cross validation
:class:`sklearn.kernel_ridge.KernelRidge` : Kernel ridge regression
combines ridge regression with the kernel trick
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
super().__init__(
alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super().fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
tol : float
Precision of the solution.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative
procedure.
- 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses
its unbiased and more flexible version named SAGA. Both methods
use an iterative procedure, and are often faster than other solvers
when both n_samples and n_features are large. Note that 'sag' and
'saga' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`. Used when ``solver`` == 'sag'.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
Examples
--------
>>> from sklearn.datasets import load_breast_cancer
>>> from sklearn.linear_model import RidgeClassifier
>>> X, y = load_breast_cancer(return_X_y=True)
>>> clf = RidgeClassifier().fit(X, y)
>>> clf.score(X, y) # doctest: +ELLIPSIS
0.9595...
See also
--------
Ridge : Ridge regression
RidgeClassifierCV : Ridge classifier with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto", random_state=None):
super().__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
.. versionadded:: 0.17
*sample_weight* support to Classifier.
Returns
-------
self : returns an instance of self.
"""
check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'],
multi_output=True)
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
else:
# we don't (yet) support multi-label classification in Ridge
raise ValueError(
"%s doesn't support multi-label classification" % (
self.__class__.__name__))
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
super().fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/publications/ps/MIT-CSAIL-TR-2007-025.pdf
https://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y, centered_kernel=True):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
# the following emulates an additional constant regressor
# corresponding to fit_intercept=True
# but this is done only when the features have been centered
if centered_kernel:
K += np.ones_like(K)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors_and_values_helper(self, alpha, y, v, Q, QT_y):
"""Helper function to avoid code duplication between self._errors and
self._values.
Notes
-----
We don't construct matrix G, instead compute action on y & diagonal.
"""
w = 1. / (v + alpha)
constant_column = np.var(Q, 0) < 1.e-12
# detect constant columns
w[constant_column] = 0 # cancel the regularization for the intercept
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return G_diag, c
def _errors(self, alpha, y, v, Q, QT_y):
G_diag, c = self._errors_and_values_helper(alpha, y, v, Q, QT_y)
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
G_diag, c = self._errors_and_values_helper(alpha, y, v, Q, QT_y)
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y, centered_kernel=True):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
if centered_kernel:
X = np.hstack((X, np.ones((X.shape[0], 1))))
# to emulate fit_intercept=True situation, add a column on ones
# Note that by centering, the other columns are orthogonal to that one
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_and_values_svd_helper(self, alpha, y, v, U, UT_y):
|
def _errors_svd(self, alpha, y, v, U, UT_y):
G_diag, c = self._errors_and_values_svd_helper(alpha, y, v, U, UT_y)
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
G_diag, c = self._errors_and_values_svd_helper(alpha, y, v, U, UT_y)
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values. Will be cast to X's dtype if necessary
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : object
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64,
multi_output=True, y_numeric=True)
if sample_weight is not None and not isinstance(sample_weight, float):
sample_weight = check_array(sample_weight, ensure_2d=False)
n_samples, n_features = X.shape
X, y, X_offset, y_offset, X_scale = LinearModel._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
if sample_weight is not None:
X, y = _rescale_data(X, y, sample_weight)
centered_kernel = not sparse.issparse(X) and self.fit_intercept
v, Q, QT_y = _pre_compute(X, y, centered_kernel)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
if np.any(self.alphas < 0):
raise ValueError("alphas cannot be negative. "
"Got {} containing some "
"negative value instead.".format(self.alphas))
for i, alpha in enumerate(self.alphas):
if error:
out, c = _errors(float(alpha), y, v, Q, QT_y)
else:
out, c = _values(float(alpha), y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_offset, y_offset, X_scale)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values. Will be cast to X's dtype if necessary
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : object
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept,
normalize=self.normalize),
parameters, cv=self.cv, scoring=self.scoring)
gs.fit(X, y, sample_weight=sample_weight)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
See glossary entry for :term:`cross-validation estimator`.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used, else,
:class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features or when X is a sparse
matrix, otherwise use eigen
'svd' : force computation via singular value decomposition of X
(does not work for sparse matrices)
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending upon the shape and format of the training
data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the ``cv_values_`` attribute (see
below). This flag is only compatible with ``cv=None`` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if ``store_cv_values=True``\
and ``cv=None``). After ``fit()`` has been called, this attribute \
will contain the mean squared errors (by default) or the values \
of the ``{loss,score}_func`` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter.
Examples
--------
>>> from sklearn.datasets import load_diabetes
>>> from sklearn.linear_model import RidgeCV
>>> X, y = load_diabetes(return_X_y=True)
>>> clf = RidgeCV(alphas=[1e-3, 1e-2, 1e-1, 1]).fit(X, y)
>>> clf.score(X, y) # doctest: +ELLIPSIS
0.5166...
See also
--------
Ridge : Ridge regression
RidgeClassifier : Ridge classifier
RidgeClassifierCV : Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
See glossary entry for :term:`cross-validation estimator`.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the ``cv_values_`` attribute (see
below). This flag is only compatible with ``cv=None`` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if ``store_cv_values=True`` and
``cv=None``). After ``fit()`` has been called, this attribute will
contain the mean squared errors (by default) or the values of the
``{loss,score}_func`` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter
Examples
--------
>>> from sklearn.datasets import load_breast_cancer
>>> from sklearn.linear_model import RidgeClassifierCV
>>> X, y = load_breast_cancer(return_X_y=True)
>>> clf = RidgeClassifierCV(alphas=[1e-3, 1e-2, 1e-1, 1]).fit(X, y)
>>> clf.score(X, y) # doctest: +ELLIPSIS
0.9630...
See also
--------
Ridge : Ridge regression
RidgeClassifier : Ridge classifier
RidgeCV : Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None,
store_cv_values=False):
super().__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv, store_cv_values=store_cv_values)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values. Will be cast to X's dtype if necessary
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : object
"""
check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'],
multi_output=True)
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
| """Helper function to avoid code duplication between self._errors_svd
and self._values_svd.
"""
constant_column = np.var(U, 0) < 1.e-12
# detect columns colinear to ones
w = ((v + alpha) ** -1) - (alpha ** -1)
w[constant_column] = - (alpha ** -1)
# cancel the regularization for the intercept
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return G_diag, c |
directory_iterator.rs | use crate::directory_entry::DirectoryEntry;
use std;
use crate::zim::Zim;
pub struct DirectoryIterator<'a> {
max: u32,
next: u32,
zim: &'a Zim,
}
| DirectoryIterator {
max: zim.header.article_count,
next: 0,
zim: zim,
}
}
}
impl<'a> std::iter::Iterator for DirectoryIterator<'a> {
type Item = DirectoryEntry;
fn next(&mut self) -> Option<Self::Item> {
if self.next >= self.max {
return None;
}
let dir_entry_ptr = self.zim.url_list[self.next as usize] as usize;
self.next += 1;
let len = self.zim.master_view.len();
let slice = self
.zim
.master_view
.get(dir_entry_ptr..(len - dir_entry_ptr));
match slice {
Some(slice) => DirectoryEntry::new(self.zim, slice).ok(),
None => None,
}
}
} | impl<'a> DirectoryIterator<'a> {
pub fn new(zim: &'a Zim) -> DirectoryIterator<'a> { |
capability_read.go | /*-
* Copyright (c) 2016-2017, Jörg Pernfuß
* Copyright (c) 2016, 1&1 Internet SE
*
* Use of this source code is governed by a 2-clause BSD license
* that can be found in the LICENSE file.
*/
package soma
import (
"database/sql"
"fmt"
"github.com/sirupsen/logrus"
"github.com/mjolnir42/soma/internal/handler"
"github.com/mjolnir42/soma/internal/msg"
"github.com/mjolnir42/soma/internal/stmt"
"github.com/mjolnir42/soma/lib/proto"
)
// CapabilityRead handles read requests for capabilities
type CapabilityRead struct {
Input chan msg.Request
Shutdown chan struct{}
handlerName string
conn *sql.DB
stmtList *sql.Stmt
stmtShow *sql.Stmt
appLog *logrus.Logger
reqLog *logrus.Logger
errLog *logrus.Logger
}
// newCapabilityRead return a new CapabilityRead handler with input buffer of length
func newCapabilityRead(length int) (string, *CapabilityRead) {
| // Register initializes resources provided by the Soma app
func (r *CapabilityRead) Register(c *sql.DB, l ...*logrus.Logger) {
r.conn = c
r.appLog = l[0]
r.reqLog = l[1]
r.errLog = l[2]
}
// RegisterRequests links the handler inside the handlermap to the requests
// it processes
func (r *CapabilityRead) RegisterRequests(hmap *handler.Map) {
for _, action := range []string{
msg.ActionList,
msg.ActionShow,
msg.ActionSearch,
} {
hmap.Request(msg.SectionCapability, action, r.handlerName)
}
}
// Intake exposes the Input channel as part of the handler interface
func (r *CapabilityRead) Intake() chan msg.Request {
return r.Input
}
// PriorityIntake aliases Intake as part of the handler interface
func (r *CapabilityRead) PriorityIntake() chan msg.Request {
return r.Intake()
}
// Run is the event loop for CapabilityRead
func (r *CapabilityRead) Run() {
var err error
for statement, prepStmt := range map[string]**sql.Stmt{
stmt.ListAllCapabilities: &r.stmtList,
stmt.ShowCapability: &r.stmtShow,
} {
if *prepStmt, err = r.conn.Prepare(statement); err != nil {
r.errLog.Fatal(`capability`, err, stmt.Name(statement))
}
defer (*prepStmt).Close()
}
runloop:
for {
select {
case <-r.Shutdown:
break runloop
case req := <-r.Input:
go func() {
r.process(&req)
}()
}
}
}
// process is the request dispatcher
func (r *CapabilityRead) process(q *msg.Request) {
result := msg.FromRequest(q)
logRequest(r.reqLog, q)
switch q.Action {
case msg.ActionList:
r.list(q, &result)
case msg.ActionShow:
r.show(q, &result)
case msg.ActionSearch:
// XXX BUG r.search(q, &result)
r.list(q, &result)
default:
result.UnknownRequest(q)
}
q.Reply <- result
}
// list returns all capabilities
func (r *CapabilityRead) list(q *msg.Request, mr *msg.Result) {
var (
id, monitoring, metric, view, monName string
rows *sql.Rows
err error
)
if rows, err = r.stmtList.Query(); err != nil {
mr.ServerError(err, q.Section)
return
}
for rows.Next() {
if err = rows.Scan(
&id,
&monitoring,
&metric,
&view,
&monName,
); err != nil {
rows.Close()
mr.ServerError(err, q.Section)
return
}
mr.Capability = append(mr.Capability, proto.Capability{
ID: id,
MonitoringID: monitoring,
Metric: metric,
View: view,
Name: fmt.Sprintf("%s.%s.%s", monName, view,
metric),
})
}
if err = rows.Err(); err != nil {
mr.ServerError(err, q.Section)
return
}
mr.OK()
}
// show returns the details of a specific capability
func (r *CapabilityRead) show(q *msg.Request, mr *msg.Result) {
var (
id, monitoring, metric, view, monName string
thresholds int
err error
)
if err = r.stmtShow.QueryRow(
q.Capability.ID,
).Scan(
&id,
&monitoring,
&metric,
&view,
&thresholds,
&monName,
); err == sql.ErrNoRows {
mr.NotFound(err, q.Section)
return
} else if err != nil {
mr.ServerError(err, q.Section)
return
}
mr.Capability = append(mr.Capability, proto.Capability{
ID: id,
MonitoringID: monitoring,
Metric: metric,
View: view,
Thresholds: uint64(thresholds),
Name: fmt.Sprintf("%s.%s.%s", monName, view, metric),
})
mr.OK()
}
// ShutdownNow signals the handler to shut down
func (r *CapabilityRead) ShutdownNow() {
close(r.Shutdown)
}
// vim: ts=4 sw=4 sts=4 noet fenc=utf-8 ffs=unix
| r := &CapabilityRead{}
r.handlerName = generateHandlerName() + `_r`
r.Input = make(chan msg.Request, length)
r.Shutdown = make(chan struct{})
return r.handlerName, r
}
|
provider.go | package provider
import (
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/terraform"
"github.com/kyma-incubator/terraform-provider-gardener/client"
"github.com/kyma-incubator/terraform-provider-gardener/shoot"
)
func Provider() terraform.ResourceProvider {
return &schema.Provider{
Schema: map[string]*schema.Schema{
"kube_file": {
Type: schema.TypeString,
Required: true,
},
},
ResourcesMap: map[string]*schema.Resource{
"gardener_shoot": shoot.ResourceShoot(),
},
ConfigureFunc: providerConfigure,
}
}
func | (d *schema.ResourceData) (interface{}, error) {
config := &client.Config{
KubeFile: d.Get("kube_file").(string),
}
return client.New(config)
}
| providerConfigure |
httplog.go | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package httplog
import (
"bufio"
"context"
"fmt"
"net"
"net/http"
"runtime"
"strings"
"time"
"k8s.io/apiserver/pkg/endpoints/metrics"
"k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/klog/v2"
)
// StacktracePred returns true if a stacktrace should be logged for this status.
type StacktracePred func(httpStatus int) (logStacktrace bool)
type logger interface {
Addf(format string, data ...interface{})
}
type respLoggerContextKeyType int
// respLoggerContextKey is used to store the respLogger pointer in the request context.
const respLoggerContextKey respLoggerContextKeyType = iota
// Add a layer on top of ResponseWriter, so we can track latency and error
// message sources.
//
// TODO now that we're using go-restful, we shouldn't need to be wrapping
// the http.ResponseWriter. We can recover panics from go-restful, and
// the logging value is questionable.
type respLogger struct {
hijacked bool
statusRecorded bool
status int
statusStack string
addedInfo strings.Builder
startTime time.Time
isTerminating bool
captureErrorOutput bool
req *http.Request
w http.ResponseWriter
logStacktracePred StacktracePred
}
// Simple logger that logs immediately when Addf is called
type passthroughLogger struct{}
//lint:ignore SA1019 Interface implementation check to make sure we don't drop CloseNotifier again
var _ http.CloseNotifier = &respLogger{}
// Addf logs info immediately.
func (passthroughLogger) Addf(format string, data ...interface{}) {
klog.V(2).Info(fmt.Sprintf(format, data...))
}
// DefaultStacktracePred is the default implementation of StacktracePred.
func DefaultStacktracePred(status int) bool {
return (status < http.StatusOK || status >= http.StatusInternalServerError) && status != http.StatusSwitchingProtocols
}
// WithLogging wraps the handler with logging.
func WithLogging(handler http.Handler, pred StacktracePred, isTerminatingFn func() bool) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
ctx := req.Context()
if old := respLoggerFromRequest(req); old != nil {
panic("multiple WithLogging calls!")
}
startTime := time.Now()
if receivedTimestamp, ok := request.ReceivedTimestampFrom(ctx); ok {
startTime = receivedTimestamp
}
isTerminating := false
if isTerminatingFn != nil {
isTerminating = isTerminatingFn()
}
rl := newLoggedWithStartTime(req, w, startTime).StacktraceWhen(pred).IsTerminating(isTerminating)
req = req.WithContext(context.WithValue(ctx, respLoggerContextKey, rl))
if klog.V(3).Enabled() || (rl.isTerminating && klog.V(1).Enabled()) {
defer rl.Log()
}
handler.ServeHTTP(rl, req)
})
}
// respLoggerFromContext returns the respLogger or nil.
func respLoggerFromContext(ctx context.Context) *respLogger {
val := ctx.Value(respLoggerContextKey)
if rl, ok := val.(*respLogger); ok {
return rl
}
return nil
}
func respLoggerFromRequest(req *http.Request) *respLogger {
return respLoggerFromContext(req.Context())
}
func newLoggedWithStartTime(req *http.Request, w http.ResponseWriter, startTime time.Time) *respLogger {
return &respLogger{
startTime: startTime,
req: req,
w: w,
logStacktracePred: DefaultStacktracePred,
}
}
// newLogged turns a normal response writer into a logged response writer.
func newLogged(req *http.Request, w http.ResponseWriter) *respLogger {
return newLoggedWithStartTime(req, w, time.Now())
}
// LogOf returns the logger hiding in w. If there is not an existing logger
// then a passthroughLogger will be created which will log to stdout immediately
// when Addf is called.
func LogOf(req *http.Request, w http.ResponseWriter) logger {
if rl := respLoggerFromRequest(req); rl != nil {
return rl
}
return &passthroughLogger{}
}
// Unlogged returns the original ResponseWriter, or w if it is not our inserted logger.
func Unlogged(req *http.Request, w http.ResponseWriter) http.ResponseWriter {
if rl := respLoggerFromRequest(req); rl != nil {
return rl.w
}
return w
}
// DisableStackTraceForRequest stops putting a stacktrace into the log.
func DisableStackTraceForRequest(req *http.Request) |
// StacktraceWhen sets the stacktrace logging predicate, which decides when to log a stacktrace.
// There's a default, so you don't need to call this unless you don't like the default.
func (rl *respLogger) StacktraceWhen(pred StacktracePred) *respLogger {
rl.logStacktracePred = pred
return rl
}
// IsTerminating informs the logger that the server is terminating.
func (rl *respLogger) IsTerminating(is bool) *respLogger {
rl.isTerminating = is
return rl
}
// StatusIsNot returns a StacktracePred which will cause stacktraces to be logged
// for any status *not* in the given list.
func StatusIsNot(statuses ...int) StacktracePred {
statusesNoTrace := map[int]bool{}
for _, s := range statuses {
statusesNoTrace[s] = true
}
return func(status int) bool {
_, ok := statusesNoTrace[status]
return !ok
}
}
// Addf adds additional data to be logged with this request.
func (rl *respLogger) Addf(format string, data ...interface{}) {
rl.addedInfo.WriteString("\n")
rl.addedInfo.WriteString(fmt.Sprintf(format, data...))
}
func AddInfof(ctx context.Context, format string, data ...interface{}) {
if rl := respLoggerFromContext(ctx); rl != nil {
rl.Addf(format, data...)
}
}
// Log is intended to be called once at the end of your request handler, via defer
func (rl *respLogger) Log() {
latency := time.Since(rl.startTime)
auditID := request.GetAuditIDTruncated(rl.req)
verb := rl.req.Method
if requestInfo, ok := request.RequestInfoFrom(rl.req.Context()); ok {
// If we can find a requestInfo, we can get a scope, and then
// we can convert GETs to LISTs when needed.
scope := metrics.CleanScope(requestInfo)
verb = metrics.CanonicalVerb(strings.ToUpper(verb), scope)
}
// mark APPLY requests and WATCH requests correctly.
verb = metrics.CleanVerb(verb, rl.req)
keysAndValues := []interface{}{
"verb", verb,
"URI", rl.req.RequestURI,
"latency", latency,
"userAgent", rl.req.UserAgent(),
"audit-ID", auditID,
"srcIP", rl.req.RemoteAddr,
}
if rl.hijacked {
keysAndValues = append(keysAndValues, "hijacked", true)
} else {
keysAndValues = append(keysAndValues, "resp", rl.status)
if len(rl.statusStack) > 0 {
keysAndValues = append(keysAndValues, "statusStack", rl.statusStack)
}
info := rl.addedInfo.String()
if len(info) > 0 {
keysAndValues = append(keysAndValues, "addedInfo", info)
}
}
klog.InfoSDepth(1, "HTTP", keysAndValues...)
}
// Header implements http.ResponseWriter.
func (rl *respLogger) Header() http.Header {
return rl.w.Header()
}
// Write implements http.ResponseWriter.
func (rl *respLogger) Write(b []byte) (int, error) {
if !rl.statusRecorded {
rl.recordStatus(http.StatusOK) // Default if WriteHeader hasn't been called
}
if rl.captureErrorOutput {
rl.Addf("logging error output: %q\n", string(b))
}
return rl.w.Write(b)
}
// Flush implements http.Flusher even if the underlying http.Writer doesn't implement it.
// Flush is used for streaming purposes and allows to flush buffered data to the client.
func (rl *respLogger) Flush() {
if flusher, ok := rl.w.(http.Flusher); ok {
flusher.Flush()
} else if klog.V(2).Enabled() {
klog.InfoDepth(1, fmt.Sprintf("Unable to convert %+v into http.Flusher", rl.w))
}
}
// WriteHeader implements http.ResponseWriter.
func (rl *respLogger) WriteHeader(status int) {
rl.recordStatus(status)
rl.w.WriteHeader(status)
}
// Hijack implements http.Hijacker.
func (rl *respLogger) Hijack() (net.Conn, *bufio.ReadWriter, error) {
rl.hijacked = true
return rl.w.(http.Hijacker).Hijack()
}
// CloseNotify implements http.CloseNotifier
func (rl *respLogger) CloseNotify() <-chan bool {
//lint:ignore SA1019 There are places in the code base requiring the CloseNotifier interface to be implemented.
return rl.w.(http.CloseNotifier).CloseNotify()
}
func (rl *respLogger) recordStatus(status int) {
rl.status = status
rl.statusRecorded = true
if rl.logStacktracePred(status) {
// Only log stacks for errors
stack := make([]byte, 50*1024)
stack = stack[:runtime.Stack(stack, false)]
rl.statusStack = "\n" + string(stack)
rl.captureErrorOutput = true
} else {
rl.statusStack = ""
}
}
| {
if req == nil {
return
}
rl := respLoggerFromContext(req.Context())
if rl == nil {
return
}
rl.StacktraceWhen(func(int) bool { return false })
} |
target_ext_tunnel.py | #! /usr/bin/python3
#
# Copyright (c) 2017 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
"""
Create and remove network tunnels to the target via the server
--------------------------------------------------------------
"""
from . import tc
from . import ttb_client
class | (tc.target_extension_c):
"""
Extension to :py:class:`tcfl.tc.target_c` to create IP tunnels to
targets with IP connectivity.
Use by indicating a default IP address to use for interconnect
*ic* or explicitly indicating it in the :meth:`add` function:
>>> target.tunnel.ip_addr = target.addr_get(ic, "ipv4")
>>> target.tunnel.add(PORT)
>>> target.tunnel.remove(PORT)
>>> target.tunnel.list()
Note that for tunnels to work, the target has to be acquired and
IP has to be up on it, which might requires it to be connected to
some IP network (it can be a TCF interconnect or any other
network).
"""
def __init__(self, target):
self.target = target
# Tunnels can always be added, even the target is not in an
# interconnect
self.ip_addr = None
def _ip_addr_get(self, ip_addr):
# FIXME: this shall validate the IP address using python-ipaddress
if ip_addr:
return ip_addr
if self.ip_addr:
return self.ip_addr
ip_addr = self.target.rt.get(
'ipv4_addr', self.target.rt.get('ipv6_addr', None))
if ip_addr:
return ip_addr
raise RuntimeError(
"Cannot identify any IPv4 or IPv6 address to use; "
"please set it in "
"`TARGET.tunnel.ip_addr = TARGET.addr_get(ic, \"ipv4\")` "
"or pass it explicitly")
def add(self, port, ip_addr = None, proto = None):
"""
Setup a TCP/UDP/SCTP v4 or v5 tunnel to the target
A local port of the given protocol in the server is fowarded
to the target's port. Teardown with :meth:`remove`.
If the tunnel already exists, it is not recreated, but the
port it uses is returned.
Redirects targets TCP4 port 3000 to server_port in the server
that provides ``target`` (target.kws['server']).
>>> server_name = target.rtb.parsed_url.hostname
>>> server_port = target.tunnel.add(3000)
Now connecting to ``server_name:server_port`` takes you to the
target's port 3000.
:param int port: port to redirect to
:param str ip_addr: (optional) target's IP address to use (it
must be listed on the targets's tags *ipv4_address* or
*ipv6_address*).
:param str proto: (optional) Protocol to tunnel:
{udp,sctp,tcp}[{4,6}] (defaults to v4 and to TCP)
:returns int local_port: port in the server where to connect
to in order to access the target.
"""
if proto == None:
proto = 'tcp'
else:
assert isinstance(proto, str)
assert isinstance(port, int)
target = self.target
ip_addr = self._ip_addr_get(ip_addr)
r = target.rtb.rest_tb_target_ip_tunnel_add(
target.rt, ip_addr, port, proto, ticket = target.ticket)
self.target.report_info("%s tunnel added from %s:%d to %s:%d"
% (proto, target.rtb.parsed_url.hostname, r,
ip_addr, port))
return r
def remove(self, port, ip_addr = None, proto = None):
"""
Teardown a TCP/UDP/SCTP v4 or v5 tunnel to the target
previously created with :meth:`add`.
:param int port: port to redirect to
:param str ip_addr: (optional) target's IP address to use (it
must be listed on the targets's tags *ipv4_address* or
*ipv6_address*).
:param str proto: (optional) Protocol to tunnel:
{udp,sctp,tcp}[{4,6}] (defaults to v4 and to TCP)
"""
if proto == None:
proto = 'tcp'
else:
assert isinstance(proto, str)
assert isinstance(port, int)
ip_addr = self._ip_addr_get(ip_addr)
target = self.target
target.rtb.rest_tb_target_ip_tunnel_remove(
target.rt, ip_addr, port, proto, ticket = target.ticket)
def list(self):
"""
List existing IP tunnels
:returns: list of tuples (protocol, target-ip-address, port,
port-in-server)
"""
target = self.target
return target.rtb.rest_tb_target_ip_tunnel_list(target.rt,
ticket = target.ticket)
# FIXME: work out tcf creating target_c instances, so it is easier to
# automate creating cmdline wrappers
def cmdline_tunnel_add(args):
rtb, rt = ttb_client._rest_target_find_by_id(args.target)
port = rtb.rest_tb_target_ip_tunnel_add(rt, args.ip_addr,
args.port, args.protocol,
ticket = args.ticket)
print("%s:%d" % (rtb.parsed_url.hostname, port))
def cmdline_tunnel_remove(args):
rtb, rt = ttb_client._rest_target_find_by_id(args.target)
rtb.rest_tb_target_ip_tunnel_remove(rt, args.ip_addr,
args.port, args.protocol,
ticket = args.ticket)
def cmdline_tunnel_list(args):
rtb, rt = ttb_client._rest_target_find_by_id(args.target)
tunnels = rtb.rest_tb_target_ip_tunnel_list(rt, ticket = args.ticket)
for tunnel in tunnels:
print("%s %s:%s %s:%s" % (tunnel[0],
rtb.parsed_url.hostname, tunnel[3],
tunnel[1], tunnel[2]))
def cmdline_setup(argsp):
ap = argsp.add_parser("tunnel-add", help = "create an IP tunnel")
ap.add_argument("target", metavar = "TARGET", action = "store", type = str,
default = None, help = "Target's name or URL")
ap.add_argument("port", metavar = "PORT", action = "store", type = int,
help = "Port to tunnel to")
ap.add_argument("protocol", metavar = "PROTOCOL", action = "store",
nargs = "?", default = None, type = str,
help = "Protocol to tunnel {tcp,udp,sctp}[{4,6}] "
"(defaults to tcp and to IPv4)")
ap.add_argument("ip_addr", metavar = "IP-ADDR", action = "store",
nargs = "?", default = None, type = str,
help = "target's IP address to tunnel to "
"(default is the first IP address the target declares)")
ap.set_defaults(func = cmdline_tunnel_add)
ap = argsp.add_parser("tunnel-remove",
help = "remove an existing IP tunnel")
ap.add_argument("target", metavar = "TARGET", action = "store", type = str,
default = None, help = "Target's name or URL")
ap.add_argument("port", metavar = "PORT", action = "store",
help = "Port to tunnel to")
ap.add_argument("protocol", metavar = "PROTOCOL", action = "store",
nargs = "?", default = None,
help = "Protocol to tunnel {tcp,udp,sctp}[{4,6}] "
"(defaults to tcp and to IPv4)")
ap.add_argument("ip_addr", metavar = "IP-ADDR", action = "store",
nargs = "?", default = None,
help = "target's IP address to tunnel to "
"(default is the first IP address the target declares)")
ap.set_defaults(func = cmdline_tunnel_remove)
ap = argsp.add_parser("tunnel-list", help = "List existing IP tunnels")
ap.add_argument("target", metavar = "TARGET", action = "store", type = str,
default = None, help = "Target's name or URL")
ap.set_defaults(func = cmdline_tunnel_list)
| tunnel |
0019_document.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('leonardo_form_pegastudio', '0018_pegastudioorders_file'),
]
operations = [
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('description', models.CharField(max_length=255, blank=True)),
('document', models.FileField(upload_to=b'documents/')),
('uploaded_at', models.DateTimeField(auto_now_add=True)),
], | ] | ), |
index.ts | #!/usr/bin/env node
import * as chalk from 'chalk';
import * as program from 'commander';
import * as figlet from 'figlet';
import * as inquirer from 'inquirer';
import { modelQuestion } from './helper_scripts/Definitions/Entity';
import { serviceQuestion } from './helper_scripts/Definitions/Service';
console.clear();
console.log(
chalk.default(
figlet.textSync('microservice-cli')
)
);
const questions = [ | {
choices: ['Entity', 'Service'],
message: 'What would you like to add?',
name: 'fileType',
type: 'list'
}
];
program
.action(async () => {
const answers: { fileType: string } = await inquirer.prompt(questions);
switch (answers.fileType) {
case 'Entity':
await modelQuestion.showQuestions();
break;
case 'Service':
await serviceQuestion.showQuestions();
break;
default:
break;
}
});
program.parse(process.argv); | |
debug.rs | use std::fs::File;
use std::io::Write;
const PREFIX: &str = "nimbus";
pub fn | (post: &[u8], out: &[u8]) {
// Create dump file for lighthouse post
// in case lighthouse return an error, this file will be the pre-state
let mut file_post =
File::create(&format!("{}_debug_post.ssz", PREFIX)).expect("Cannot open debug_post file");
// write the content
file_post
.write(&post)
.expect("Cannot write debug_post file");
// Create dump file for other client post
let mut file_out =
File::create(&format!("{}_debug_out.ssz", PREFIX)).expect("Cannot open debug_out file");
// write the content
file_out.write(&out).expect("Cannot write debug_out file");
}
| dump_post_state |
script.js | $(function(){
'use strict';
//Owl-coursel
var $owl = $('.owl');
$owl.each( function() {
var $a = $(this);
$a.owlCarousel({
autoPlay: JSON.parse($a.attr('data-autoplay')),
singleItem: JSON.parse($a.attr('data-singleItem')),
items : $a.attr('data-items'),
itemsDesktop : [1199,$a.attr('data-itemsDesktop')],
itemsDesktopSmall : [992,$a.attr('data-itemsDesktopSmall')],
itemsTablet: [797,$a.attr('data-itemsTablet')],
itemsMobile : [479,$a.attr('data-itemsMobile')],
navigation : JSON.parse($a.attr('data-buttons')),
pagination: JSON.parse($a.attr('data-pag')),
navigationText: ["",""]
});
});
//zoom image
$('.image-zoom').magnificPopup({
type:'image'
});
//Menu
$('.menu-btn').on('click',function(e){
if($(this).hasClass('active'))
{
$('.menu-rs').animate({right: '-250px'},500);
}
else
{
$('.menu-rs').animate({right: '0px'},500);
}
});
$('.r-mv').on('click',function(){
$('.menu-rs').animate({right: '-250px'},500);
});
//Cart
$('.cart .dropdown-menu').on('click',function(e) {
e.stopPropagation();
});
//Reloader
$(window).load(function()
{
$('.preloader i').fadeOut();
$('.preloader').delay(500).fadeOut('slow');
$('body').delay(600).css({'overflow':'visible'});
});
//Search
$('.search-box').removeClass('active');
$('.icon-search').on('click',function(e)
{
e.preventDefault();
var $searchb = $('.search-box');
if(!$searchb.hasClass('active'))
{
$searchb.addClass('active');
$('.search-box input').val('');
$searchb.fadeIn();
$('.search-box input').focus();
$(this).addClass('fa-remove');
$(this).removeClass('fa-search');
}
else
{
$searchb.fadeOut();
$searchb.removeClass('active');
$(this).removeClass('fa-remove');
$(this).addClass('fa-search');
}
});
$('.search-box input').keypress(function(event){
if(event.keyCode == 13){
$('.icon-search').click();
}
});
//Header resize
window.addEventListener('scroll', function(e){
var $header=$('header');
var $tr=0;
var distanceY = window.pageYOffset || document.documentElement.scrollTop,
shrinkOn = $tr;
var header = document.querySelector("header");
if (distanceY > shrinkOn) {
classie.add(header,"smaller");
} else {
if (classie.has(header,"smaller")) {
classie.remove(header,"smaller");
}
} | }); | }); |
server.ts | import express, { Application, Request, Response, NextFunction } from 'express'; | export default function createServer() {
const app: Application = express();
app.get('/'),
(req: Request, res: Response, next: NextFunction) => {
res.sendStatus(200);
};
app.use(routes);
return app;
} | import routes from './routes';
|
ui_dialog_report_compare_coder_file.py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_dialog_report_compare_coder_file.ui'
#
# Created by: PyQt5 UI code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog_reportCompareCoderFile(object):
def setupUi(self, Dialog_reportCompareCoderFile):
Dialog_reportCompareCoderFile.setObjectName("Dialog_reportCompareCoderFile")
Dialog_reportCompareCoderFile.setWindowModality(QtCore.Qt.NonModal)
Dialog_reportCompareCoderFile.resize(989, 580)
self.verticalLayout = QtWidgets.QVBoxLayout(Dialog_reportCompareCoderFile)
self.verticalLayout.setContentsMargins(1, 1, 1, 1)
self.verticalLayout.setSpacing(1)
self.verticalLayout.setObjectName("verticalLayout")
self.groupBox = QtWidgets.QGroupBox(Dialog_reportCompareCoderFile)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox.sizePolicy().hasHeightForWidth())
self.groupBox.setSizePolicy(sizePolicy)
self.groupBox.setMinimumSize(QtCore.QSize(0, 120))
self.groupBox.setMaximumSize(QtCore.QSize(16777215, 120))
self.groupBox.setTitle("")
self.groupBox.setObjectName("groupBox")
self.label_2 = QtWidgets.QLabel(self.groupBox)
self.label_2.setGeometry(QtCore.QRect(10, 20, 101, 22))
self.label_2.setObjectName("label_2")
self.comboBox_coders = QtWidgets.QComboBox(self.groupBox)
self.comboBox_coders.setGeometry(QtCore.QRect(112, 20, 211, 28))
self.comboBox_coders.setObjectName("comboBox_coders")
self.label_title = QtWidgets.QLabel(self.groupBox)
self.label_title.setGeometry(QtCore.QRect(10, -2, 291, 22))
self.label_title.setObjectName("label_title")
self.label_matrix = QtWidgets.QLabel(self.groupBox)
self.label_matrix.setGeometry(QtCore.QRect(600, 20, 30, 30))
self.label_matrix.setText("")
self.label_matrix.setObjectName("label_matrix")
self.label_memos = QtWidgets.QLabel(self.groupBox)
self.label_memos.setGeometry(QtCore.QRect(600, 70, 30, 30))
self.label_memos.setText("")
self.label_memos.setObjectName("label_memos")
self.label_selections = QtWidgets.QLabel(self.groupBox)
self.label_selections.setGeometry(QtCore.QRect(330, 20, 611, 28))
self.label_selections.setObjectName("label_selections")
self.pushButton_clear = QtWidgets.QPushButton(self.groupBox)
self.pushButton_clear.setGeometry(QtCore.QRect(50, 60, 32, 32))
self.pushButton_clear.setText("")
self.pushButton_clear.setObjectName("pushButton_clear")
self.pushButton_export_odt = QtWidgets.QPushButton(self.groupBox)
self.pushButton_export_odt.setGeometry(QtCore.QRect(90, 60, 32, 32))
self.pushButton_export_odt.setText("")
self.pushButton_export_odt.setObjectName("pushButton_export_odt")
self.pushButton_run = QtWidgets.QPushButton(self.groupBox)
self.pushButton_run.setGeometry(QtCore.QRect(10, 60, 32, 32))
self.pushButton_run.setText("")
self.pushButton_run.setObjectName("pushButton_run")
self.pushButton_help1 = QtWidgets.QPushButton(self.groupBox)
self.pushButton_help1.setGeometry(QtCore.QRect(130, 60, 32, 32))
self.pushButton_help1.setText("")
self.pushButton_help1.setObjectName("pushButton_help1")
self.verticalLayout.addWidget(self.groupBox)
self.groupBox_2 = QtWidgets.QGroupBox(Dialog_reportCompareCoderFile)
self.groupBox_2.setTitle("")
self.groupBox_2.setObjectName("groupBox_2")
self.gridLayout = QtWidgets.QGridLayout(self.groupBox_2)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName("gridLayout")
self.splitter = QtWidgets.QSplitter(self.groupBox_2)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName("splitter")
self.splitter_vert = QtWidgets.QSplitter(self.splitter)
self.splitter_vert.setOrientation(QtCore.Qt.Vertical)
self.splitter_vert.setObjectName("splitter_vert")
self.listWidget_files = QtWidgets.QListWidget(self.splitter_vert)
self.listWidget_files.setObjectName("listWidget_files")
self.treeWidget = QtWidgets.QTreeWidget(self.splitter_vert)
self.treeWidget.setObjectName("treeWidget")
self.treeWidget.headerItem().setText(0, "Code Tree")
self.textEdit = QtWidgets.QTextEdit(self.splitter)
self.textEdit.setObjectName("textEdit")
self.gridLayout.addWidget(self.splitter, 0, 0, 1, 1)
self.verticalLayout.addWidget(self.groupBox_2)
self.retranslateUi(Dialog_reportCompareCoderFile)
QtCore.QMetaObject.connectSlotsByName(Dialog_reportCompareCoderFile)
Dialog_reportCompareCoderFile.setTabOrder(self.comboBox_coders, self.treeWidget)
Dialog_reportCompareCoderFile.setTabOrder(self.treeWidget, self.textEdit)
def | (self, Dialog_reportCompareCoderFile):
_translate = QtCore.QCoreApplication.translate
Dialog_reportCompareCoderFile.setWindowTitle(_translate("Dialog_reportCompareCoderFile", "Reports"))
self.label_2.setText(_translate("Dialog_reportCompareCoderFile", "Coders:"))
self.label_title.setToolTip(_translate("Dialog_reportCompareCoderFile", "To compare coding.\n"
"Select two coders, one file, one code."))
self.label_title.setText(_translate("Dialog_reportCompareCoderFile", "Coder comparisons by file"))
self.label_matrix.setToolTip(_translate("Dialog_reportCompareCoderFile", "<html><head/><body><p>Matrix options</p></body></html>"))
self.label_memos.setToolTip(_translate("Dialog_reportCompareCoderFile", "Memo reporting options"))
self.label_selections.setText(_translate("Dialog_reportCompareCoderFile", "Coders selected"))
self.pushButton_clear.setToolTip(_translate("Dialog_reportCompareCoderFile", "<html><head/><body><p>Clear selection</p></body></html>"))
self.pushButton_export_odt.setToolTip(_translate("Dialog_reportCompareCoderFile", "Export ODT file"))
self.pushButton_run.setToolTip(_translate("Dialog_reportCompareCoderFile", "<html><head/><body><p>Run comparison</p></body></html>"))
self.pushButton_help1.setToolTip(_translate("Dialog_reportCompareCoderFile", "Statistics explanation"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog_reportCompareCoderFile = QtWidgets.QDialog()
ui = Ui_Dialog_reportCompareCoderFile()
ui.setupUi(Dialog_reportCompareCoderFile)
Dialog_reportCompareCoderFile.show()
sys.exit(app.exec_())
| retranslateUi |
addrmanager.go | // Copyright (c) 2013-2016 The btcsuite developers
// Copyright (c) 2018 The Flo developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package addrmgr
import (
"container/list"
crand "crypto/rand" // for seeding
"encoding/base32"
"encoding/binary"
"encoding/json"
"fmt"
"io"
"math/rand"
"net"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/bitspill/flod/chaincfg/chainhash"
"github.com/bitspill/flod/wire"
)
// AddrManager provides a concurrency safe address manager for caching potential
// peers on the bitcoin network.
type AddrManager struct {
mtx sync.Mutex
peersFile string
lookupFunc func(string) ([]net.IP, error)
rand *rand.Rand
key [32]byte
addrIndex map[string]*KnownAddress // address key to ka for all addrs.
addrNew [newBucketCount]map[string]*KnownAddress
addrTried [triedBucketCount]*list.List
started int32
shutdown int32
wg sync.WaitGroup
quit chan struct{}
nTried int
nNew int
lamtx sync.Mutex
localAddresses map[string]*localAddress
}
type serializedKnownAddress struct {
Addr string
Src string
Attempts int
TimeStamp int64
LastAttempt int64
LastSuccess int64
// no refcount or tried, that is available from context.
}
type serializedAddrManager struct {
Version int
Key [32]byte
Addresses []*serializedKnownAddress
NewBuckets [newBucketCount][]string // string is NetAddressKey
TriedBuckets [triedBucketCount][]string
}
type localAddress struct {
na *wire.NetAddress
score AddressPriority
}
// AddressPriority type is used to describe the hierarchy of local address
// discovery methods.
type AddressPriority int
const (
// InterfacePrio signifies the address is on a local interface
InterfacePrio AddressPriority = iota
// BoundPrio signifies the address has been explicitly bounded to.
BoundPrio
// UpnpPrio signifies the address was obtained from UPnP.
UpnpPrio
// HTTPPrio signifies the address was obtained from an external HTTP service.
HTTPPrio
// ManualPrio signifies the address was provided by --externalip.
ManualPrio
)
const (
// needAddressThreshold is the number of addresses under which the
// address manager will claim to need more addresses.
needAddressThreshold = 1000
// dumpAddressInterval is the interval used to dump the address
// cache to disk for future use.
dumpAddressInterval = time.Minute * 10
// triedBucketSize is the maximum number of addresses in each
// tried address bucket.
triedBucketSize = 256
// triedBucketCount is the number of buckets we split tried
// addresses over.
triedBucketCount = 64
// newBucketSize is the maximum number of addresses in each new address
// bucket.
newBucketSize = 64
// newBucketCount is the number of buckets that we spread new addresses
// over.
newBucketCount = 1024
// triedBucketsPerGroup is the number of tried buckets over which an
// address group will be spread.
triedBucketsPerGroup = 8
// newBucketsPerGroup is the number of new buckets over which an
// source address group will be spread.
newBucketsPerGroup = 64
// newBucketsPerAddress is the number of buckets a frequently seen new
// address may end up in.
newBucketsPerAddress = 8
// numMissingDays is the number of days before which we assume an
// address has vanished if we have not seen it announced in that long.
numMissingDays = 30
// numRetries is the number of tried without a single success before
// we assume an address is bad.
numRetries = 3
// maxFailures is the maximum number of failures we will accept without
// a success before considering an address bad.
maxFailures = 10
// minBadDays is the number of days since the last success before we
// will consider evicting an address.
minBadDays = 7
// getAddrMax is the most addresses that we will send in response
// to a getAddr (in practise the most addresses we will return from a
// call to AddressCache()).
getAddrMax = 2500
// getAddrPercent is the percentage of total addresses known that we
// will share with a call to AddressCache.
getAddrPercent = 23
// serialisationVersion is the current version of the on-disk format.
serialisationVersion = 1
)
// updateAddress is a helper function to either update an address already known
// to the address manager, or to add the address if not already known.
func (a *AddrManager) updateAddress(netAddr, srcAddr *wire.NetAddress) {
// Filter out non-routable addresses. Note that non-routable
// also includes invalid and local addresses.
if !IsRoutable(netAddr) {
return
}
addr := NetAddressKey(netAddr)
ka := a.find(netAddr)
if ka != nil {
// TODO: only update addresses periodically.
// Update the last seen time and services.
// note that to prevent causing excess garbage on getaddr
// messages the netaddresses in addrmaanger are *immutable*,
// if we need to change them then we replace the pointer with a
// new copy so that we don't have to copy every na for getaddr.
if netAddr.Timestamp.After(ka.na.Timestamp) ||
(ka.na.Services&netAddr.Services) !=
netAddr.Services {
naCopy := *ka.na
naCopy.Timestamp = netAddr.Timestamp
naCopy.AddService(netAddr.Services)
ka.na = &naCopy
}
// If already in tried, we have nothing to do here.
if ka.tried {
return
}
// Already at our max?
if ka.refs == newBucketsPerAddress {
return
}
// The more entries we have, the less likely we are to add more.
// likelihood is 2N.
factor := int32(2 * ka.refs)
if a.rand.Int31n(factor) != 0 {
return
}
} else {
// Make a copy of the net address to avoid races since it is
// updated elsewhere in the addrmanager code and would otherwise
// change the actual netaddress on the peer.
netAddrCopy := *netAddr
ka = &KnownAddress{na: &netAddrCopy, srcAddr: srcAddr}
a.addrIndex[addr] = ka
a.nNew++
// XXX time penalty?
}
bucket := a.getNewBucket(netAddr, srcAddr)
// Already exists?
if _, ok := a.addrNew[bucket][addr]; ok {
return
}
// Enforce max addresses.
if len(a.addrNew[bucket]) > newBucketSize {
log.Tracef("new bucket is full, expiring old")
a.expireNew(bucket)
}
// Add to new bucket.
ka.refs++
a.addrNew[bucket][addr] = ka
log.Tracef("Added new address %s for a total of %d addresses", addr,
a.nTried+a.nNew)
}
// expireNew makes space in the new buckets by expiring the really bad entries.
// If no bad entries are available we look at a few and remove the oldest.
func (a *AddrManager) expireNew(bucket int) {
// First see if there are any entries that are so bad we can just throw
// them away. otherwise we throw away the oldest entry in the cache.
// Bitcoind here chooses four random and just throws the oldest of
// those away, but we keep track of oldest in the initial traversal and
// use that information instead.
var oldest *KnownAddress
for k, v := range a.addrNew[bucket] {
if v.isBad() {
log.Tracef("expiring bad address %v", k)
delete(a.addrNew[bucket], k)
v.refs--
if v.refs == 0 {
a.nNew--
delete(a.addrIndex, k)
}
continue
}
if oldest == nil {
oldest = v
} else if !v.na.Timestamp.After(oldest.na.Timestamp) {
oldest = v
}
}
if oldest != nil {
key := NetAddressKey(oldest.na)
log.Tracef("expiring oldest address %v", key)
delete(a.addrNew[bucket], key)
oldest.refs--
if oldest.refs == 0 {
a.nNew--
delete(a.addrIndex, key)
}
}
}
// pickTried selects an address from the tried bucket to be evicted.
// We just choose the eldest. Bitcoind selects 4 random entries and throws away
// the older of them.
func (a *AddrManager) pickTried(bucket int) *list.Element {
var oldest *KnownAddress
var oldestElem *list.Element
for e := a.addrTried[bucket].Front(); e != nil; e = e.Next() {
ka := e.Value.(*KnownAddress)
if oldest == nil || oldest.na.Timestamp.After(ka.na.Timestamp) {
oldestElem = e
oldest = ka
}
}
return oldestElem
}
func (a *AddrManager) getNewBucket(netAddr, srcAddr *wire.NetAddress) int {
// bitcoind:
// doublesha256(key + sourcegroup + int64(doublesha256(key + group + sourcegroup))%bucket_per_source_group) % num_new_buckets
data1 := []byte{}
data1 = append(data1, a.key[:]...)
data1 = append(data1, []byte(GroupKey(netAddr))...)
data1 = append(data1, []byte(GroupKey(srcAddr))...)
hash1 := chainhash.DoubleHashB(data1)
hash64 := binary.LittleEndian.Uint64(hash1)
hash64 %= newBucketsPerGroup
var hashbuf [8]byte
binary.LittleEndian.PutUint64(hashbuf[:], hash64)
data2 := []byte{}
data2 = append(data2, a.key[:]...)
data2 = append(data2, GroupKey(srcAddr)...)
data2 = append(data2, hashbuf[:]...)
hash2 := chainhash.DoubleHashB(data2)
return int(binary.LittleEndian.Uint64(hash2) % newBucketCount)
}
func (a *AddrManager) getTriedBucket(netAddr *wire.NetAddress) int {
// bitcoind hashes this as:
// doublesha256(key + group + truncate_to_64bits(doublesha256(key)) % buckets_per_group) % num_buckets
data1 := []byte{}
data1 = append(data1, a.key[:]...)
data1 = append(data1, []byte(NetAddressKey(netAddr))...)
hash1 := chainhash.DoubleHashB(data1)
hash64 := binary.LittleEndian.Uint64(hash1)
hash64 %= triedBucketsPerGroup
var hashbuf [8]byte
binary.LittleEndian.PutUint64(hashbuf[:], hash64)
data2 := []byte{}
data2 = append(data2, a.key[:]...)
data2 = append(data2, GroupKey(netAddr)...)
data2 = append(data2, hashbuf[:]...)
hash2 := chainhash.DoubleHashB(data2)
return int(binary.LittleEndian.Uint64(hash2) % triedBucketCount)
}
// addressHandler is the main handler for the address manager. It must be run
// as a goroutine.
func (a *AddrManager) addressHandler() {
dumpAddressTicker := time.NewTicker(dumpAddressInterval)
defer dumpAddressTicker.Stop()
out:
for {
select {
case <-dumpAddressTicker.C:
a.savePeers()
case <-a.quit:
break out
}
}
a.savePeers()
a.wg.Done()
log.Trace("Address handler done")
}
// savePeers saves all the known addresses to a file so they can be read back
// in at next run.
func (a *AddrManager) savePeers() {
a.mtx.Lock()
defer a.mtx.Unlock()
// First we make a serialisable datastructure so we can encode it to
// json.
sam := new(serializedAddrManager)
sam.Version = serialisationVersion
copy(sam.Key[:], a.key[:])
sam.Addresses = make([]*serializedKnownAddress, len(a.addrIndex))
i := 0
for k, v := range a.addrIndex {
ska := new(serializedKnownAddress)
ska.Addr = k
ska.TimeStamp = v.na.Timestamp.Unix()
ska.Src = NetAddressKey(v.srcAddr)
ska.Attempts = v.attempts
ska.LastAttempt = v.lastattempt.Unix()
ska.LastSuccess = v.lastsuccess.Unix()
// Tried and refs are implicit in the rest of the structure
// and will be worked out from context on unserialisation.
sam.Addresses[i] = ska
i++
}
for i := range a.addrNew {
sam.NewBuckets[i] = make([]string, len(a.addrNew[i]))
j := 0
for k := range a.addrNew[i] {
sam.NewBuckets[i][j] = k
j++
}
}
for i := range a.addrTried {
sam.TriedBuckets[i] = make([]string, a.addrTried[i].Len())
j := 0
for e := a.addrTried[i].Front(); e != nil; e = e.Next() {
ka := e.Value.(*KnownAddress)
sam.TriedBuckets[i][j] = NetAddressKey(ka.na)
j++
}
}
w, err := os.Create(a.peersFile)
if err != nil {
log.Errorf("Error opening file %s: %v", a.peersFile, err)
return
}
enc := json.NewEncoder(w)
defer w.Close()
if err := enc.Encode(&sam); err != nil {
log.Errorf("Failed to encode file %s: %v", a.peersFile, err)
return
}
}
// loadPeers loads the known address from the saved file. If empty, missing, or
// malformed file, just don't load anything and start fresh
func (a *AddrManager) loadPeers() {
a.mtx.Lock()
defer a.mtx.Unlock()
err := a.deserializePeers(a.peersFile)
if err != nil {
log.Errorf("Failed to parse file %s: %v", a.peersFile, err)
// if it is invalid we nuke the old one unconditionally.
err = os.Remove(a.peersFile)
if err != nil {
log.Warnf("Failed to remove corrupt peers file %s: %v",
a.peersFile, err)
}
a.reset()
return
}
log.Infof("Loaded %d addresses from file '%s'", a.numAddresses(), a.peersFile)
}
func (a *AddrManager) deserializePeers(filePath string) error {
_, err := os.Stat(filePath)
if os.IsNotExist(err) {
return nil
}
r, err := os.Open(filePath)
if err != nil {
return fmt.Errorf("%s error opening file: %v", filePath, err)
}
defer r.Close()
var sam serializedAddrManager
dec := json.NewDecoder(r)
err = dec.Decode(&sam)
if err != nil {
return fmt.Errorf("error reading %s: %v", filePath, err)
}
if sam.Version != serialisationVersion {
return fmt.Errorf("unknown version %v in serialized "+
"addrmanager", sam.Version)
}
copy(a.key[:], sam.Key[:])
for _, v := range sam.Addresses {
ka := new(KnownAddress)
ka.na, err = a.DeserializeNetAddress(v.Addr)
if err != nil {
return fmt.Errorf("failed to deserialize netaddress "+
"%s: %v", v.Addr, err)
}
ka.srcAddr, err = a.DeserializeNetAddress(v.Src)
if err != nil {
return fmt.Errorf("failed to deserialize netaddress "+
"%s: %v", v.Src, err)
}
ka.attempts = v.Attempts
ka.lastattempt = time.Unix(v.LastAttempt, 0)
ka.lastsuccess = time.Unix(v.LastSuccess, 0)
a.addrIndex[NetAddressKey(ka.na)] = ka
}
for i := range sam.NewBuckets {
for _, val := range sam.NewBuckets[i] {
ka, ok := a.addrIndex[val]
if !ok {
return fmt.Errorf("newbucket contains %s but "+
"none in address list", val)
}
if ka.refs == 0 {
a.nNew++
}
ka.refs++
a.addrNew[i][val] = ka
}
}
for i := range sam.TriedBuckets {
for _, val := range sam.TriedBuckets[i] {
ka, ok := a.addrIndex[val]
if !ok {
return fmt.Errorf("Newbucket contains %s but "+
"none in address list", val)
}
ka.tried = true
a.nTried++
a.addrTried[i].PushBack(ka)
}
}
// Sanity checking.
for k, v := range a.addrIndex {
if v.refs == 0 && !v.tried {
return fmt.Errorf("address %s after serialisation "+
"with no references", k)
}
if v.refs > 0 && v.tried {
return fmt.Errorf("address %s after serialisation "+
"which is both new and tried!", k)
}
}
return nil
}
// DeserializeNetAddress converts a given address string to a *wire.NetAddress
func (a *AddrManager) DeserializeNetAddress(addr string) (*wire.NetAddress, error) {
host, portStr, err := net.SplitHostPort(addr)
if err != nil {
return nil, err
}
port, err := strconv.ParseUint(portStr, 10, 16)
if err != nil {
return nil, err
}
return a.HostToNetAddress(host, uint16(port), wire.SFNodeNetwork)
}
// Start begins the core address handler which manages a pool of known
// addresses, timeouts, and interval based writes.
func (a *AddrManager) Start() {
// Already started?
if atomic.AddInt32(&a.started, 1) != 1 {
return
}
log.Trace("Starting address manager")
// Load peers we already know about from file.
a.loadPeers()
// Start the address ticker to save addresses periodically.
a.wg.Add(1)
go a.addressHandler()
}
// Stop gracefully shuts down the address manager by stopping the main handler.
func (a *AddrManager) Stop() error {
if atomic.AddInt32(&a.shutdown, 1) != 1 {
log.Warnf("Address manager is already in the process of " +
"shutting down")
return nil
}
log.Infof("Address manager shutting down")
close(a.quit)
a.wg.Wait()
return nil
}
// AddAddresses adds new addresses to the address manager. It enforces a max
// number of addresses and silently ignores duplicate addresses. It is
// safe for concurrent access.
func (a *AddrManager) AddAddresses(addrs []*wire.NetAddress, srcAddr *wire.NetAddress) {
a.mtx.Lock()
defer a.mtx.Unlock()
for _, na := range addrs {
a.updateAddress(na, srcAddr)
}
}
// AddAddress adds a new address to the address manager. It enforces a max
// number of addresses and silently ignores duplicate addresses. It is
// safe for concurrent access.
func (a *AddrManager) AddAddress(addr, srcAddr *wire.NetAddress) {
a.mtx.Lock()
defer a.mtx.Unlock()
a.updateAddress(addr, srcAddr)
}
// AddAddressByIP adds an address where we are given an ip:port and not a
// wire.NetAddress.
func (a *AddrManager) AddAddressByIP(addrIP string) error {
// Split IP and port
addr, portStr, err := net.SplitHostPort(addrIP)
if err != nil {
return err
}
// Put it in wire.Netaddress
ip := net.ParseIP(addr)
if ip == nil {
return fmt.Errorf("invalid ip address %s", addr)
}
port, err := strconv.ParseUint(portStr, 10, 0)
if err != nil {
return fmt.Errorf("invalid port %s: %v", portStr, err)
}
na := wire.NewNetAddressIPPort(ip, uint16(port), 0)
a.AddAddress(na, na) // XXX use correct src address
return nil
}
// NumAddresses returns the number of addresses known to the address manager.
func (a *AddrManager) numAddresses() int {
return a.nTried + a.nNew
}
// NumAddresses returns the number of addresses known to the address manager.
func (a *AddrManager) NumAddresses() int {
a.mtx.Lock()
defer a.mtx.Unlock()
return a.numAddresses()
}
// NeedMoreAddresses returns whether or not the address manager needs more
// addresses.
func (a *AddrManager) NeedMoreAddresses() bool {
a.mtx.Lock()
defer a.mtx.Unlock()
return a.numAddresses() < needAddressThreshold
}
// AddressCache returns the current address cache. It must be treated as
// read-only (but since it is a copy now, this is not as dangerous).
func (a *AddrManager) AddressCache() []*wire.NetAddress {
a.mtx.Lock()
defer a.mtx.Unlock()
addrIndexLen := len(a.addrIndex)
if addrIndexLen == 0 {
return nil
}
allAddr := make([]*wire.NetAddress, 0, addrIndexLen)
// Iteration order is undefined here, but we randomise it anyway.
for _, v := range a.addrIndex {
allAddr = append(allAddr, v.na)
}
numAddresses := addrIndexLen * getAddrPercent / 100
if numAddresses > getAddrMax {
numAddresses = getAddrMax
}
// Fisher-Yates shuffle the array. We only need to do the first
// `numAddresses' since we are throwing the rest.
for i := 0; i < numAddresses; i++ {
// pick a number between current index and the end
j := rand.Intn(addrIndexLen-i) + i
allAddr[i], allAddr[j] = allAddr[j], allAddr[i]
}
// slice off the limit we are willing to share.
return allAddr[0:numAddresses]
}
// reset resets the address manager by reinitialising the random source
// and allocating fresh empty bucket storage.
func (a *AddrManager) reset() {
a.addrIndex = make(map[string]*KnownAddress)
// fill key with bytes from a good random source.
io.ReadFull(crand.Reader, a.key[:])
for i := range a.addrNew {
a.addrNew[i] = make(map[string]*KnownAddress)
}
for i := range a.addrTried {
a.addrTried[i] = list.New()
}
}
// HostToNetAddress returns a netaddress given a host address. If the address
// is a Tor .onion address this will be taken care of. Else if the host is
// not an IP address it will be resolved (via Tor if required).
func (a *AddrManager) HostToNetAddress(host string, port uint16, services wire.ServiceFlag) (*wire.NetAddress, error) {
// Tor address is 16 char base32 + ".onion"
var ip net.IP
if len(host) == 22 && host[16:] == ".onion" {
// go base32 encoding uses capitals (as does the rfc
// but Tor and bitcoind tend to user lowercase, so we switch
// case here.
data, err := base32.StdEncoding.DecodeString(
strings.ToUpper(host[:16]))
if err != nil {
return nil, err
}
prefix := []byte{0xfd, 0x87, 0xd8, 0x7e, 0xeb, 0x43}
ip = net.IP(append(prefix, data...))
} else if ip = net.ParseIP(host); ip == nil {
ips, err := a.lookupFunc(host)
if err != nil {
return nil, err
}
if len(ips) == 0 {
return nil, fmt.Errorf("no addresses found for %s", host)
}
ip = ips[0]
}
return wire.NewNetAddressIPPort(ip, port, services), nil
}
// ipString returns a string for the ip from the provided NetAddress. If the
// ip is in the range used for Tor addresses then it will be transformed into
// the relevant .onion address.
func ipString(na *wire.NetAddress) string {
if IsOnionCatTor(na) {
// We know now that na.IP is long enough.
base32 := base32.StdEncoding.EncodeToString(na.IP[6:])
return strings.ToLower(base32) + ".onion"
}
return na.IP.String()
}
// NetAddressKey returns a string key in the form of ip:port for IPv4 addresses
// or [ip]:port for IPv6 addresses.
func NetAddressKey(na *wire.NetAddress) string {
port := strconv.FormatUint(uint64(na.Port), 10)
return net.JoinHostPort(ipString(na), port)
}
// GetAddress returns a single address that should be routable. It picks a
// random one from the possible addresses with preference given to ones that
// have not been used recently and should not pick 'close' addresses
// consecutively.
func (a *AddrManager) GetAddress() *KnownAddress {
// Protect concurrent access.
a.mtx.Lock()
defer a.mtx.Unlock()
if a.numAddresses() == 0 {
return nil
}
// Use a 50% chance for choosing between tried and new table entries.
if a.nTried > 0 && (a.nNew == 0 || a.rand.Intn(2) == 0) {
// Tried entry.
large := 1 << 30
factor := 1.0
for {
// pick a random bucket.
bucket := a.rand.Intn(len(a.addrTried))
if a.addrTried[bucket].Len() == 0 {
continue
}
| a.rand.Int63n(int64(a.addrTried[bucket].Len())); i > 0; i-- {
e = e.Next()
}
ka := e.Value.(*KnownAddress)
randval := a.rand.Intn(large)
if float64(randval) < (factor * ka.chance() * float64(large)) {
log.Tracef("Selected %v from tried bucket",
NetAddressKey(ka.na))
return ka
}
factor *= 1.2
}
} else {
// new node.
// XXX use a closure/function to avoid repeating this.
large := 1 << 30
factor := 1.0
for {
// Pick a random bucket.
bucket := a.rand.Intn(len(a.addrNew))
if len(a.addrNew[bucket]) == 0 {
continue
}
// Then, a random entry in it.
var ka *KnownAddress
nth := a.rand.Intn(len(a.addrNew[bucket]))
for _, value := range a.addrNew[bucket] {
if nth == 0 {
ka = value
}
nth--
}
randval := a.rand.Intn(large)
if float64(randval) < (factor * ka.chance() * float64(large)) {
log.Tracef("Selected %v from new bucket",
NetAddressKey(ka.na))
return ka
}
factor *= 1.2
}
}
}
func (a *AddrManager) find(addr *wire.NetAddress) *KnownAddress {
return a.addrIndex[NetAddressKey(addr)]
}
// Attempt increases the given address' attempt counter and updates
// the last attempt time.
func (a *AddrManager) Attempt(addr *wire.NetAddress) {
a.mtx.Lock()
defer a.mtx.Unlock()
// find address.
// Surely address will be in tried by now?
ka := a.find(addr)
if ka == nil {
return
}
// set last tried time to now
ka.attempts++
ka.lastattempt = time.Now()
}
// Connected Marks the given address as currently connected and working at the
// current time. The address must already be known to AddrManager else it will
// be ignored.
func (a *AddrManager) Connected(addr *wire.NetAddress) {
a.mtx.Lock()
defer a.mtx.Unlock()
ka := a.find(addr)
if ka == nil {
return
}
// Update the time as long as it has been 20 minutes since last we did
// so.
now := time.Now()
if now.After(ka.na.Timestamp.Add(time.Minute * 20)) {
// ka.na is immutable, so replace it.
naCopy := *ka.na
naCopy.Timestamp = time.Now()
ka.na = &naCopy
}
}
// Good marks the given address as good. To be called after a successful
// connection and version exchange. If the address is unknown to the address
// manager it will be ignored.
func (a *AddrManager) Good(addr *wire.NetAddress) {
a.mtx.Lock()
defer a.mtx.Unlock()
ka := a.find(addr)
if ka == nil {
return
}
// ka.Timestamp is not updated here to avoid leaking information
// about currently connected peers.
now := time.Now()
ka.lastsuccess = now
ka.lastattempt = now
ka.attempts = 0
// move to tried set, optionally evicting other addresses if neeed.
if ka.tried {
return
}
// ok, need to move it to tried.
// remove from all new buckets.
// record one of the buckets in question and call it the `first'
addrKey := NetAddressKey(addr)
oldBucket := -1
for i := range a.addrNew {
// we check for existence so we can record the first one
if _, ok := a.addrNew[i][addrKey]; ok {
delete(a.addrNew[i], addrKey)
ka.refs--
if oldBucket == -1 {
oldBucket = i
}
}
}
a.nNew--
if oldBucket == -1 {
// What? wasn't in a bucket after all.... Panic?
return
}
bucket := a.getTriedBucket(ka.na)
// Room in this tried bucket?
if a.addrTried[bucket].Len() < triedBucketSize {
ka.tried = true
a.addrTried[bucket].PushBack(ka)
a.nTried++
return
}
// No room, we have to evict something else.
entry := a.pickTried(bucket)
rmka := entry.Value.(*KnownAddress)
// First bucket it would have been put in.
newBucket := a.getNewBucket(rmka.na, rmka.srcAddr)
// If no room in the original bucket, we put it in a bucket we just
// freed up a space in.
if len(a.addrNew[newBucket]) >= newBucketSize {
newBucket = oldBucket
}
// replace with ka in list.
ka.tried = true
entry.Value = ka
rmka.tried = false
rmka.refs++
// We don't touch a.nTried here since the number of tried stays the same
// but we decemented new above, raise it again since we're putting
// something back.
a.nNew++
rmkey := NetAddressKey(rmka.na)
log.Tracef("Replacing %s with %s in tried", rmkey, addrKey)
// We made sure there is space here just above.
a.addrNew[newBucket][rmkey] = rmka
}
// AddLocalAddress adds na to the list of known local addresses to advertise
// with the given priority.
func (a *AddrManager) AddLocalAddress(na *wire.NetAddress, priority AddressPriority) error {
if !IsRoutable(na) {
return fmt.Errorf("address %s is not routable", na.IP)
}
a.lamtx.Lock()
defer a.lamtx.Unlock()
key := NetAddressKey(na)
la, ok := a.localAddresses[key]
if !ok || la.score < priority {
if ok {
la.score = priority + 1
} else {
a.localAddresses[key] = &localAddress{
na: na,
score: priority,
}
}
}
return nil
}
// getReachabilityFrom returns the relative reachability of the provided local
// address to the provided remote address.
func getReachabilityFrom(localAddr, remoteAddr *wire.NetAddress) int {
const (
Unreachable = 0
Default = iota
Teredo
Ipv6Weak
Ipv4
Ipv6Strong
Private
)
if !IsRoutable(remoteAddr) {
return Unreachable
}
if IsOnionCatTor(remoteAddr) {
if IsOnionCatTor(localAddr) {
return Private
}
if IsRoutable(localAddr) && IsIPv4(localAddr) {
return Ipv4
}
return Default
}
if IsRFC4380(remoteAddr) {
if !IsRoutable(localAddr) {
return Default
}
if IsRFC4380(localAddr) {
return Teredo
}
if IsIPv4(localAddr) {
return Ipv4
}
return Ipv6Weak
}
if IsIPv4(remoteAddr) {
if IsRoutable(localAddr) && IsIPv4(localAddr) {
return Ipv4
}
return Unreachable
}
/* ipv6 */
var tunnelled bool
// Is our v6 is tunnelled?
if IsRFC3964(localAddr) || IsRFC6052(localAddr) || IsRFC6145(localAddr) {
tunnelled = true
}
if !IsRoutable(localAddr) {
return Default
}
if IsRFC4380(localAddr) {
return Teredo
}
if IsIPv4(localAddr) {
return Ipv4
}
if tunnelled {
// only prioritise ipv6 if we aren't tunnelling it.
return Ipv6Weak
}
return Ipv6Strong
}
// GetBestLocalAddress returns the most appropriate local address to use
// for the given remote address.
func (a *AddrManager) GetBestLocalAddress(remoteAddr *wire.NetAddress) *wire.NetAddress {
a.lamtx.Lock()
defer a.lamtx.Unlock()
bestreach := 0
var bestscore AddressPriority
var bestAddress *wire.NetAddress
for _, la := range a.localAddresses {
reach := getReachabilityFrom(la.na, remoteAddr)
if reach > bestreach ||
(reach == bestreach && la.score > bestscore) {
bestreach = reach
bestscore = la.score
bestAddress = la.na
}
}
if bestAddress != nil {
log.Debugf("Suggesting address %s:%d for %s:%d", bestAddress.IP,
bestAddress.Port, remoteAddr.IP, remoteAddr.Port)
} else {
log.Debugf("No worthy address for %s:%d", remoteAddr.IP,
remoteAddr.Port)
// Send something unroutable if nothing suitable.
var ip net.IP
if !IsIPv4(remoteAddr) && !IsOnionCatTor(remoteAddr) {
ip = net.IPv6zero
} else {
ip = net.IPv4zero
}
services := wire.SFNodeNetwork | wire.SFNodeWitness | wire.SFNodeBloom
bestAddress = wire.NewNetAddressIPPort(ip, 0, services)
}
return bestAddress
}
// New returns a new bitcoin address manager.
// Use Start to begin processing asynchronous address updates.
func New(dataDir string, lookupFunc func(string) ([]net.IP, error)) *AddrManager {
am := AddrManager{
peersFile: filepath.Join(dataDir, "peers.json"),
lookupFunc: lookupFunc,
rand: rand.New(rand.NewSource(time.Now().UnixNano())),
quit: make(chan struct{}),
localAddresses: make(map[string]*localAddress),
}
am.reset()
return &am
} | // Pick a random entry in the list
e := a.addrTried[bucket].Front()
for i := |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.