file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
register.rs | use std::os::unix::io::RawFd;
use std::{io, mem, ptr};
use crate::sys;
pub(crate) fn execute(
fd: RawFd,
opcode: libc::c_uint,
arg: *const libc::c_void,
len: libc::c_uint,
) -> io::Result<i32> {
unsafe {
let ret = sys::io_uring_register(fd, opcode, arg, len);
if ret >= 0 {
Ok(ret)
} else {
Err(io::Error::last_os_error())
}
}
}
pub struct Probe(ptr::NonNull<sys::io_uring_probe>);
impl Probe {
pub(crate) const COUNT: usize = 256;
pub(crate) const SIZE: usize = mem::size_of::<sys::io_uring_probe>()
+ Self::COUNT * mem::size_of::<sys::io_uring_probe_op>();
#[allow(clippy::cast_ptr_alignment)]
pub fn new() -> Probe {
use std::alloc::{alloc_zeroed, Layout};
let probe_align = Layout::new::<sys::io_uring_probe>().align();
let ptr = unsafe {
let probe_layout = Layout::from_size_align_unchecked(Probe::SIZE, probe_align);
alloc_zeroed(probe_layout)
};
ptr::NonNull::new(ptr)
.map(ptr::NonNull::cast)
.map(Probe)
.expect("Probe alloc failed!")
}
#[inline]
pub(crate) fn as_mut_ptr(&mut self) -> *mut sys::io_uring_probe {
self.0.as_ptr()
}
pub fn is_supported(&self, opcode: u8) -> bool {
unsafe {
let probe = &*self.0.as_ptr();
if opcode <= probe.last_op {
let ops = probe.ops.as_slice(Self::COUNT);
ops[opcode as usize].flags & (sys::IO_URING_OP_SUPPORTED as u16) != 0
} else {
false
}
}
}
}
impl Default for Probe {
#[inline]
fn default() -> Probe {
Probe::new()
}
}
impl Drop for Probe {
fn drop(&mut self) {
use std::alloc::{dealloc, Layout};
let probe_align = Layout::new::<sys::io_uring_probe>().align();
unsafe {
let probe_layout = Layout::from_size_align_unchecked(Probe::SIZE, probe_align);
dealloc(self.0.as_ptr() as *mut _, probe_layout);
}
}
}
#[cfg(feature = "unstable")]
#[repr(transparent)]
pub struct Restriction(sys::io_uring_restriction);
/// inline zeroed to improve codegen
#[cfg(feature = "unstable")]
#[inline(always)]
fn res_zeroed() -> sys::io_uring_restriction {
unsafe { std::mem::zeroed() }
}
#[cfg(feature = "unstable")]
impl Restriction {
pub fn register_op(op: u8) -> Restriction {
let mut res = res_zeroed();
res.opcode = sys::IORING_RESTRICTION_REGISTER_OP as _;
res.__bindgen_anon_1.register_op = op;
Restriction(res)
}
pub fn | (op: u8) -> Restriction {
let mut res = res_zeroed();
res.opcode = sys::IORING_RESTRICTION_SQE_OP as _;
res.__bindgen_anon_1.sqe_op = op;
Restriction(res)
}
pub fn sqe_flags_allowed(flags: u8) -> Restriction {
let mut res = res_zeroed();
res.opcode = sys::IORING_RESTRICTION_SQE_FLAGS_ALLOWED as _;
res.__bindgen_anon_1.sqe_flags = flags;
Restriction(res)
}
pub fn sqe_flags_required(flags: u8) -> Restriction {
let mut res = res_zeroed();
res.opcode = sys::IORING_RESTRICTION_SQE_FLAGS_REQUIRED as _;
res.__bindgen_anon_1.sqe_flags = flags;
Restriction(res)
}
}
| sqe_op |
Livechat.d.ts | export = Livechat;
declare class Livechat extends EventEmitter {
constructor(session: any, token: any, channel_id: any, video_id: any);
ctoken: any;
session: any;
video_id: any;
channel_id: any;
message_queue: any[];
id_cache: any[];
poll_intervals_ms: number;
running: boolean;
metadata_ctoken: any;
livechat_poller: NodeJS.Timeout;
sendMessage(text: any): Promise<any>;
/**
* Blocks a user.
* @todo Implement this method.
* @param {object} msg_params
*/
blockUser(msg_params: object): Promise<void>; | }
import EventEmitter = require("events"); | stop(): void;
#private; |
test_nint32.py | import pytest
import math
import numpy as np
from numpy.testing import assert_equal
from numtypes import nint32
def test_basic():
x = nint32(3)
assert x == 3
assert int(x) == 3
| def test_init_np_types(typ):
x = nint32(typ(123))
assert x == 123
def test_init_str_type():
x = nint32("123")
assert x == 123
def test_comparison():
x = nint32(100)
y = nint32(-500)
assert x > 0
assert x < 200
assert x < 123.4
assert x <= 200
assert 200 >= x
assert x == 100
assert x > y
assert x >= y
assert y < x
assert y <= x
assert x != y
def test_true_division():
x = nint32(20)
y = nint32(10)
z = x / y
assert isinstance(z, float)
assert z == 2.0
@pytest.mark.parametrize('nanstr', ['nan', '\t+NAN ', '-nAn'])
def test_nan_str(nanstr):
z = nint32(nanstr)
assert math.isnan(float(z))
assert math.isnan(z + 1.5)
def test_nan():
z = nint32(math.nan)
assert math.isnan(float(z))
assert z != z
def test_bool():
assert bool(nint32(123))
assert bool(nint32('nan'))
assert not bool(nint32(0))
def test_other():
z = 1.0 + 2.0j
a = nint32(2)
w = z / a
assert w == z/2
@pytest.mark.parametrize('value', [2**31, -2**31, 2**65])
def test_init_arg_too_big(value):
with pytest.raises(OverflowError, match='int too big to convert'):
nint32(value)
@pytest.mark.parametrize('arg', [2.5, None, 'abc'])
def test_init_bad_arg(arg):
with pytest.raises(TypeError, match='argument must be'):
nint32(arg)
@pytest.mark.parametrize('extreme_func, expected',
[(np.maximum, [20, 10, 18]),
(np.minimum, [10, -2, 10])])
def test_extreme_func(extreme_func, expected):
a = np.array([10, -2, 18], dtype=np.int32).astype(nint32)
b = np.array([20, 10, 10], dtype=np.int32).astype(nint32)
m = extreme_func(a, b)
assert m.dtype == nint32
assert_equal(m, expected)
@pytest.mark.parametrize('methodname, expected', [('min', -2), ('max', 18)])
def test_extreme_method(methodname, expected):
a = np.array([10, -2, 18], dtype=nint32)
m = getattr(a, methodname)()
assert m.dtype == nint32
assert m == expected
@pytest.mark.parametrize('methodname', ['min', 'max'])
def test_extreme_method_with_nan(methodname):
a = np.array([10, np.nan, -2, 18], dtype=nint32)
m = getattr(a, methodname)()
assert m.dtype == nint32
assert np.isnan(m) | @pytest.mark.parametrize('typ', [np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]) |
fun.py | """Module with functions which are supposed to be as fast as possible"""
from stat import S_ISDIR
__all__ = ('tree_to_stream', 'tree_entries_from_data', 'traverse_trees_recursive',
'traverse_tree_recursive')
def tree_to_stream(entries, write):
"""Write the give list of entries into a stream using its write method
:param entries: **sorted** list of tuples with (binsha, mode, name)
:param write: write method which takes a data string"""
ord_zero = ord('0')
bit_mask = 7 # 3 bits set
for binsha, mode, name in entries:
mode_str = ''
for i in xrange(6):
mode_str = chr(((mode >> (i*3)) & bit_mask) + ord_zero) + mode_str
# END for each 8 octal value
# git slices away the first octal if its zero
if mode_str[0] == '0':
mode_str = mode_str[1:]
# END save a byte
# here it comes: if the name is actually unicode, the replacement below
# will not work as the binsha is not part of the ascii unicode encoding -
# hence we must convert to an utf8 string for it to work properly.
# According to my tests, this is exactly what git does, that is it just
# takes the input literally, which appears to be utf8 on linux.
if isinstance(name, unicode):
name = name.encode("utf8")
write("%s %s\0%s" % (mode_str, name, binsha))
# END for each item
def tree_entries_from_data(data):
"""Reads the binary representation of a tree and returns tuples of Tree items
:param data: data block with tree data
:return: list(tuple(binsha, mode, tree_relative_path), ...)"""
ord_zero = ord('0')
len_data = len(data)
i = 0
out = list()
while i < len_data:
mode = 0
# read mode
# Some git versions truncate the leading 0, some don't
# The type will be extracted from the mode later
while data[i] != ' ':
# move existing mode integer up one level being 3 bits
# and add the actual ordinal value of the character
mode = (mode << 3) + (ord(data[i]) - ord_zero)
i += 1
# END while reading mode
# byte is space now, skip it
i += 1
# parse name, it is NULL separated
ns = i
while data[i] != '\0':
i += 1
# END while not reached NULL
# default encoding for strings in git is utf8
# Only use the respective unicode object if the byte stream was encoded
name = data[ns:i]
name_enc = name.decode("utf-8")
if len(name) > len(name_enc):
name = name_enc
# END handle encoding
# byte is NULL, get next 20
i += 1
sha = data[i:i+20]
i = i + 20
out.append((sha, mode, name))
# END for each byte in data stream
return out
def _find_by_name(tree_data, name, is_dir, start_at):
"""return data entry matching the given name and tree mode
or None.
Before the item is returned, the respective data item is set
None in the tree_data list to mark it done"""
try:
item = tree_data[start_at]
if item and item[2] == name and S_ISDIR(item[1]) == is_dir:
tree_data[start_at] = None
return item
except IndexError:
pass
# END exception handling
for index, item in enumerate(tree_data):
if item and item[2] == name and S_ISDIR(item[1]) == is_dir:
tree_data[index] = None
return item
# END if item matches
# END for each item
return None
def _to_full_path(item, path_prefix):
"""Rebuild entry with given path prefix"""
if not item:
return item
return (item[0], item[1], path_prefix+item[2])
def traverse_trees_recursive(odb, tree_shas, path_prefix):
"""
:return: list with entries according to the given binary tree-shas.
The result is encoded in a list
of n tuple|None per blob/commit, (n == len(tree_shas)), where
* [0] == 20 byte sha
* [1] == mode as int
* [2] == path relative to working tree root
The entry tuple is None if the respective blob/commit did not
exist in the given tree.
:param tree_shas: iterable of shas pointing to trees. All trees must
be on the same level. A tree-sha may be None in which case None
:param path_prefix: a prefix to be added to the returned paths on this level,
set it '' for the first iteration
:note: The ordering of the returned items will be partially lost"""
trees_data = list()
nt = len(tree_shas)
for tree_sha in tree_shas:
if tree_sha is None:
data = list()
else:
data = tree_entries_from_data(odb.stream(tree_sha).read())
# END handle muted trees
trees_data.append(data)
# END for each sha to get data for
out = list()
out_append = out.append
# find all matching entries and recursively process them together if the match
# is a tree. If the match is a non-tree item, put it into the result.
# Processed items will be set None
for ti, tree_data in enumerate(trees_data):
for ii, item in enumerate(tree_data): | entries[ti] = item
sha, mode, name = item # its faster to unpack
is_dir = S_ISDIR(mode) # type mode bits
# find this item in all other tree data items
# wrap around, but stop one before our current index, hence
# ti+nt, not ti+1+nt
for tio in range(ti+1, ti+nt):
tio = tio % nt
entries[tio] = _find_by_name(trees_data[tio], name, is_dir, ii)
# END for each other item data
# if we are a directory, enter recursion
if is_dir:
out.extend(traverse_trees_recursive(odb, [((ei and ei[0]) or None) for ei in entries], path_prefix+name+'/'))
else:
out_append(tuple(_to_full_path(e, path_prefix) for e in entries))
# END handle recursion
# finally mark it done
tree_data[ii] = None
# END for each item
# we are done with one tree, set all its data empty
del(tree_data[:])
# END for each tree_data chunk
return out
def traverse_tree_recursive(odb, tree_sha, path_prefix):
"""
:return: list of entries of the tree pointed to by the binary tree_sha. An entry
has the following format:
* [0] 20 byte sha
* [1] mode as int
* [2] path relative to the repository
:param path_prefix: prefix to prepend to the front of all returned paths"""
entries = list()
data = tree_entries_from_data(odb.stream(tree_sha).read())
# unpacking/packing is faster than accessing individual items
for sha, mode, name in data:
if S_ISDIR(mode):
entries.extend(traverse_tree_recursive(odb, sha, path_prefix+name+'/'))
else:
entries.append((sha, mode, path_prefix+name))
# END for each item
return entries | if not item:
continue
# END skip already done items
entries = [ None for n in range(nt) ] |
compact.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::proptest::gen_main_struct;
use anyhow::Result;
use fbthrift::compact_protocol::{deserialize, serialize};
use fbthrift::ttype::TType;
use fbthrift_test_if::Un;
use proptest::prelude::*;
#[test]
fn test_unknown_union() -> Result<()> |
proptest! {
#[test]
fn test_prop_serialize_deserialize(s in gen_main_struct()) {
let processed = deserialize(serialize(&s)).unwrap();
prop_assert_eq!(s, processed);
}
}
| {
// Build the empty union
let u = Un::default();
let s = serialize(&u);
// only TType::Stop
assert_eq!(&[TType::Stop as u8], s.as_ref());
// Assert that deserialize builds the exact some struct
assert_eq!(u, deserialize(s).unwrap());
// ...
// extra weirdness
// Build an explicit unknown
let explicit_unknown = Un::UnknownField(100);
let s2 = serialize(&explicit_unknown);
// only Stop
assert_eq!(&[TType::Stop as u8], s2.as_ref());
Ok(())
} |
TextProps.js | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*
* @flow strict-local
* @format
*/
'use strict';
import type {LayoutEvent, PressEvent, TextLayoutEvent} from 'CoreEventTypes';
import type React from 'React';
import type {TextStyleProp} from 'StyleSheet';
import type {
AccessibilityRole,
AccessibilityStates,
AccessibilityTrait,
} from 'ViewAccessibility';
export type PressRetentionOffset = $ReadOnly<{|
top: number,
left: number,
bottom: number,
right: number,
|}>;
/**
* @see https://facebook.github.io/react-native/docs/text.html#reference
*/
export type TextProps = $ReadOnly<{
/**
* Indicates whether the view is an accessibility element.
*
* See https://facebook.github.io/react-native/docs/text.html#accessible
*/
accessible?: ?boolean,
accessibilityRole?: ?AccessibilityRole,
accessibilityStates?: ?AccessibilityStates,
accessibilityTraits?: ?(AccessibilityTrait | Array<AccessibilityTrait>),
/**
* Whether font should be scaled down automatically.
*
* See https://facebook.github.io/react-native/docs/text.html#adjustsfontsizetofit
*/
adjustsFontSizeToFit?: ?boolean,
/**
* Whether fonts should scale to respect Text Size accessibility settings.
*
* See https://facebook.github.io/react-native/docs/text.html#allowfontscaling
*/
allowFontScaling?: ?boolean,
children?: ?React.Node,
/**
* When `numberOfLines` is set, this prop defines how text will be
* truncated.
*
* See https://facebook.github.io/react-native/docs/text.html#ellipsizemode
*/
ellipsizeMode?: ?('clip' | 'head' | 'middle' | 'tail'),
/**
* Specifies largest possible scale a font can reach when `allowFontScaling` is enabled.
* Possible values:
* `null/undefined` (default): inherit from the parent node or the global default (0)
* `0`: no max, ignore parent/global default
* `>= 1`: sets the maxFontSizeMultiplier of this node to this value
*/
maxFontSizeMultiplier?: ?number,
/**
* Used to locate this view from native code.
*
* See https://facebook.github.io/react-native/docs/text.html#nativeid
*/
nativeID?: ?string,
/**
* Used to truncate the text with an ellipsis.
*
* See https://facebook.github.io/react-native/docs/text.html#numberoflines
*/
numberOfLines?: ?number,
/**
* Invoked on mount and layout changes.
*
* See https://facebook.github.io/react-native/docs/text.html#onlayout
*/
onLayout?: ?(event: LayoutEvent) => mixed,
/**
* This function is called on long press.
*
* See https://facebook.github.io/react-native/docs/text.html#onlongpress
*/
onLongPress?: ?(event: PressEvent) => mixed,
/**
* This function is called on press.
*
* See https://facebook.github.io/react-native/docs/text.html#onpress
*/
onPress?: ?(event: PressEvent) => mixed,
onResponderGrant?: ?(event: PressEvent, dispatchID: string) => void,
onResponderMove?: ?(event: PressEvent) => void,
onResponderRelease?: ?(event: PressEvent) => void,
onResponderTerminate?: ?(event: PressEvent) => void,
onResponderTerminationRequest?: ?() => boolean,
onStartShouldSetResponder?: ?() => boolean,
onTextLayout?: ?(event: TextLayoutEvent) => mixed,
/**
* Defines how far your touch may move off of the button, before
* deactivating the button.
*
* See https://facebook.github.io/react-native/docs/text.html#pressretentionoffset
*/
pressRetentionOffset?: ?PressRetentionOffset,
/**
* Lets the user select text.
*
* See https://facebook.github.io/react-native/docs/text.html#selectable
*/
selectable?: ?boolean,
style?: ?TextStyleProp,
/**
* Used to locate this view in end-to-end tests.
*
* See https://facebook.github.io/react-native/docs/text.html#testid
*/
testID?: ?string,
/**
* Android Only
*/
/**
* Specifies the disabled state of the text view for testing purposes.
*
* See https://facebook.github.io/react-native/docs/text.html#disabled
*/
disabled?: ?boolean,
/**
* The highlight color of the text.
*
* See https://facebook.github.io/react-native/docs/text.html#selectioncolor
*/
selectionColor?: ?string,
/**
* Set text break strategy on Android.
*
* See https://facebook.github.io/react-native/docs/text.html#textbreakstrategy
*/
textBreakStrategy?: ?('balanced' | 'highQuality' | 'simple'),
/**
* iOS Only
*/
adjustsFontSizeToFit?: ?boolean,
/**
* Smallest possible scale a font can reach.
*
* See https://facebook.github.io/react-native/docs/text.html#minimumfontscale
*/
minimumFontScale?: ?number,
/**
* When `true`, no visual change is made when text is pressed down. | * See https://facebook.github.io/react-native/docs/text.html#supperhighlighting
*/
suppressHighlighting?: ?boolean,
}>; | * |
origin_test.go | // Go Substrate RPC Client (GSRPC) provides APIs and types around Polkadot and any Substrate-based chain RPC calls
//
// Copyright 2019 Centrifuge GmbH
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package types_test
import (
"testing"
| )
// newOrigin creates a new Origin type. This function is not exported by purpose – Origin should be ignored and not be
// allowed to be constructed.
func newOrigin() Origin {
return Origin(0x00)
}
func TestOrigin_EncodeDecode(t *testing.T) {
assertRoundtrip(t, newOrigin())
}
func TestOrigin_EncodedLength(t *testing.T) {
assertEncodedLength(t, []encodedLengthAssert{
{newOrigin(), 0},
})
}
func TestOrigin_Encode(t *testing.T) {
assertEncode(t, []encodingAssert{
{newOrigin(), MustHexDecodeString("0x")},
})
}
func TestOrigin_Hash(t *testing.T) {
assertHash(t, []hashAssert{
{newOrigin(), MustHexDecodeString(
"0x0e5751c026e543b2e8ab2eb06099daa1d1e5df47778f7787faab45cdf12fe3a8")},
})
}
func TestOrigin_Hex(t *testing.T) {
assertEncodeToHex(t, []encodeToHexAssert{
{newOrigin(), ""},
})
}
func TestOrigin_String(t *testing.T) {
assertString(t, []stringAssert{
{newOrigin(), ""},
})
}
func TestOrigin_Eq(t *testing.T) {
assertEq(t, []eqAssert{
{newOrigin(), newOrigin(), true},
{newOrigin(), NewBytes([]byte{}), false},
{newOrigin(), NewBool(true), false},
{newOrigin(), NewBool(false), false},
})
} | . "github.com/kinosang/go-substrate-rpc-client/v3/types" |
runtime.py | from functools import partial
import six
from dagster import check
from dagster.core.storage.type_storage import TypeStoragePlugin
from .builtin_enum import BuiltinEnum
from .builtin_config_schemas import BuiltinSchemas
from .config import ConfigType
from .config import List as ConfigList
from .config import Nullable as ConfigNullable
from .config_schema import InputHydrationConfig, OutputMaterializationConfig
from .marshal import SerializationStrategy, PickleSerializationStrategy
from .dagster_type import check_dagster_type_param
from .wrapping import WrappingListType, WrappingNullableType
def check_opt_config_cls_param(config_cls, param_name):
if config_cls is None:
return config_cls
check.invariant(isinstance(config_cls, type))
check.param_invariant(issubclass(config_cls, ConfigType), param_name)
return config_cls
class RuntimeType(object):
'''
The class backing DagsterTypes as they are used during execution.
'''
def __init__(
self,
key,
name,
is_builtin=False,
description=None,
input_hydration_config=None,
output_materialization_config=None,
serialization_strategy=None,
auto_plugins=None,
):
type_obj = type(self)
if type_obj in RuntimeType.__cache:
check.failed(
(
'{type_obj} already in cache. You **must** use the inst() class method '
'to construct RuntimeType and not the ctor'.format(type_obj=type_obj)
)
)
self.key = check.str_param(key, 'key')
self.name = check.opt_str_param(name, 'name')
self.description = check.opt_str_param(description, 'description')
self.input_hydration_config = check.opt_inst_param(
input_hydration_config, 'input_hydration_config', InputHydrationConfig
)
self.output_materialization_config = check.opt_inst_param(
output_materialization_config,
'output_materialization_config',
OutputMaterializationConfig,
)
self.serialization_strategy = check.opt_inst_param(
serialization_strategy,
'serialization_strategy',
SerializationStrategy,
PickleSerializationStrategy(),
)
auto_plugins = check.opt_list_param(auto_plugins, 'auto_plugins', of_type=type)
check.param_invariant(
all(
issubclass(auto_plugin_type, TypeStoragePlugin) for auto_plugin_type in auto_plugins
),
'auto_plugins',
)
self.auto_plugins = auto_plugins
self.is_builtin = check.bool_param(is_builtin, 'is_builtin')
__cache = {}
@classmethod
def inst(cls):
if cls not in RuntimeType.__cache:
RuntimeType.__cache[cls] = cls() # pylint: disable=E1120
return RuntimeType.__cache[cls]
@staticmethod
def from_builtin_enum(builtin_enum):
check.invariant(BuiltinEnum.contains(builtin_enum), 'must be member of BuiltinEnum')
return _RUNTIME_MAP[builtin_enum]
@property
def display_name(self):
return self.name
def type_check(self, value):
pass
@property
def is_any(self):
return False
@property
def is_scalar(self):
return False
@property
def is_list(self):
return False
@property
def is_nullable(self):
return False
@property
def inner_types(self):
return []
@property
def is_nothing(self):
return False
class BuiltinScalarRuntimeType(RuntimeType):
def __init__(self, *args, **kwargs):
name = type(self).__name__
super(BuiltinScalarRuntimeType, self).__init__(
key=name, name=name, is_builtin=True, *args, **kwargs
)
@property
def is_scalar(self):
return True
class Int(BuiltinScalarRuntimeType):
def __init__(self):
super(Int, self).__init__(
input_hydration_config=BuiltinSchemas.INT_INPUT,
output_materialization_config=BuiltinSchemas.INT_OUTPUT,
)
def type_check(self, value):
from dagster.core.definitions.events import Failure
if not isinstance(value, six.integer_types):
raise Failure(_typemismatch_error_str(value, 'int'))
def _typemismatch_error_str(value, expected_type_desc):
return 'Value "{value}" of python type "{python_type}" must be a {type_desc}.'.format(
value=value, python_type=type(value).__name__, type_desc=expected_type_desc
)
def _throw_if_not_string(value):
from dagster.core.definitions.events import Failure
if not isinstance(value, six.string_types):
raise Failure(_typemismatch_error_str(value, 'string'))
class String(BuiltinScalarRuntimeType):
def __init__(self):
super(String, self).__init__(
input_hydration_config=BuiltinSchemas.STRING_INPUT,
output_materialization_config=BuiltinSchemas.STRING_OUTPUT,
)
def type_check(self, value):
_throw_if_not_string(value)
class Path(BuiltinScalarRuntimeType):
def __init__(self):
super(Path, self).__init__(
input_hydration_config=BuiltinSchemas.PATH_INPUT,
output_materialization_config=BuiltinSchemas.PATH_OUTPUT,
)
def type_check(self, value):
_throw_if_not_string(value)
class Float(BuiltinScalarRuntimeType):
def __init__(self):
super(Float, self).__init__(
input_hydration_config=BuiltinSchemas.FLOAT_INPUT,
output_materialization_config=BuiltinSchemas.FLOAT_OUTPUT,
)
def type_check(self, value):
from dagster.core.definitions.events import Failure
if not isinstance(value, float):
raise Failure(_typemismatch_error_str(value, 'float'))
class Bool(BuiltinScalarRuntimeType):
def __init__(self):
super(Bool, self).__init__(
input_hydration_config=BuiltinSchemas.BOOL_INPUT,
output_materialization_config=BuiltinSchemas.BOOL_OUTPUT,
)
def type_check(self, value):
from dagster.core.definitions.events import Failure
if not isinstance(value, bool):
raise Failure(_typemismatch_error_str(value, 'bool'))
class Anyish(RuntimeType):
def __init__(
self,
key,
name,
input_hydration_config=None,
output_materialization_config=None,
is_builtin=False,
description=None,
):
super(Anyish, self).__init__(
key=key,
name=name,
input_hydration_config=input_hydration_config,
output_materialization_config=output_materialization_config,
is_builtin=is_builtin,
description=description,
)
@property
def is_any(self):
return True
class Any(Anyish):
def __init__(self):
super(Any, self).__init__(
key='Any',
name='Any',
input_hydration_config=BuiltinSchemas.ANY_INPUT,
output_materialization_config=BuiltinSchemas.ANY_OUTPUT,
is_builtin=True,
)
def define_any_type(name, description=None):
class NamedAnyType(Anyish):
def __init__(self):
super(NamedAnyType, self).__init__(key=name, name=name, description=description)
return NamedAnyType
class Nothing(RuntimeType):
def __init__(self):
super(Nothing, self).__init__(
key='Nothing',
name='Nothing',
input_hydration_config=None,
output_materialization_config=None,
is_builtin=True,
)
@property
def is_nothing(self):
return True
def type_check(self, value):
from dagster.core.definitions.events import Failure
if value is not None:
raise Failure('Value {value} must be None.')
class PythonObjectType(RuntimeType):
def __init__(self, python_type, key=None, name=None, typecheck_metadata_fn=None, **kwargs):
name = check.opt_str_param(name, 'name', type(self).__name__)
key = check.opt_str_param(key, 'key', name)
super(PythonObjectType, self).__init__(key=key, name=name, **kwargs)
self.python_type = check.type_param(python_type, 'python_type')
self.typecheck_metadata_fn = check.opt_callable_param(
typecheck_metadata_fn, 'typecheck_metadata_fn'
)
def type_check(self, value):
from dagster.core.definitions.events import Failure
if not isinstance(value, self.python_type):
raise Failure(
'Value {value} should be of type {type_name}.'.format(
value=value, type_name=self.python_type.__name__
)
)
if self.typecheck_metadata_fn:
return self.typecheck_metadata_fn(value)
def define_python_dagster_type(
python_type,
name=None,
description=None,
input_hydration_config=None,
output_materialization_config=None,
serialization_strategy=None,
auto_plugins=None,
typecheck_metadata_fn=None,
):
'''
The dagster typesystem is very flexible, and the body of a typecheck can be
a function that does *anything*. (For that level of flexiblity one should inherit
from RuntimeType directly) However its very common to want to generate a dagster
type whose only typecheck is against a python type:
DateTime = define_python_dagster_type(datetime.datetime, name='DateTime')
Args:
python_type (cls)
The python type you want check against.
name (Optional[str]):
Name of the dagster type. Defaults to the name of the python_type.
description (Optiona[str]):
input_hydration_config (Optional[InputHydrationConfig]):
An instance of a class that inherits from :py:class:`InputHydrationConfig` that
can map config data to a value of this type.
output_materialization_config (Optiona[OutputMaterializationConfig]):
An instance of a class that inherits from :py:class:`OutputMaterializationConfig` that
can map config data to persisting values of this type.
serialization_strategy (Optional[SerializationStrategy]):
The default behavior for how to serialize this value for persisting between execution
steps.
auto_plugins (Optional[List[type]]):
types *must* subclass from TypeStoragePlugin.
This allows for types to specify serialization that depends on what storage
is being used to serialize intermediates. In these cases the serialization_strategy
is not sufficient because serialization requires specialized API calls, e.g.
to call an s3 API directly instead of using a generic file object. See
dagster_pyspark.DataFrame for an example of auto_plugins.
typecheck_metadata_fn (Callable):
It is used to emit metadata when you successfully check a type. This allows
the user specifiy that function that emits that metadata object whenever the typecheck
succeeds. The passed in function takes the value being evaluated and returns a
TypeCheck event.
See dagster_pandas.DataFrame for an example
'''
check.type_param(python_type, 'python_type') | check.opt_str_param(description, 'description')
check.opt_inst_param(input_hydration_config, 'input_hydration_config', InputHydrationConfig)
check.opt_inst_param(
output_materialization_config, 'output_materialization_config', OutputMaterializationConfig
)
check.opt_inst_param(
serialization_strategy,
'serialization_strategy',
SerializationStrategy,
default=PickleSerializationStrategy(),
)
auto_plugins = check.opt_list_param(auto_plugins, 'auto_plugins', of_type=type)
check.param_invariant(
all(issubclass(auto_plugin_type, TypeStoragePlugin) for auto_plugin_type in auto_plugins),
'auto_plugins',
)
check.opt_callable_param(typecheck_metadata_fn, 'typecheck_metadata_fn')
class _ObjectType(PythonObjectType):
def __init__(self):
super(_ObjectType, self).__init__(
python_type=python_type,
name=name,
description=description,
input_hydration_config=input_hydration_config,
output_materialization_config=output_materialization_config,
serialization_strategy=serialization_strategy,
auto_plugins=auto_plugins,
typecheck_metadata_fn=typecheck_metadata_fn,
)
return _ObjectType
def _create_nullable_input_schema(inner_type):
if not inner_type.input_hydration_config:
return None
nullable_type = ConfigNullable(inner_type.input_hydration_config.schema_type).inst()
class _NullableSchema(InputHydrationConfig):
@property
def schema_type(self):
return nullable_type
def construct_from_config_value(self, context, config_value):
if config_value is None:
return None
return inner_type.input_hydration_config.construct_from_config_value(
context, config_value
)
return _NullableSchema()
class NullableType(RuntimeType):
def __init__(self, inner_type):
key = 'Optional.' + inner_type.key
super(NullableType, self).__init__(
key=key, name=None, input_hydration_config=_create_nullable_input_schema(inner_type)
)
self.inner_type = inner_type
@property
def display_name(self):
return self.inner_type.display_name + '?'
def type_check(self, value):
return None if value is None else self.inner_type.type_check(value)
@property
def is_nullable(self):
return True
@property
def inner_types(self):
return [self.inner_type] + self.inner_type.inner_types
def _create_list_input_schema(inner_type):
if not inner_type.input_hydration_config:
return None
list_type = ConfigList(inner_type.input_hydration_config.schema_type).inst()
class _ListSchema(InputHydrationConfig):
@property
def schema_type(self):
return list_type
def construct_from_config_value(self, context, config_value):
convert_item = partial(
inner_type.input_hydration_config.construct_from_config_value, context
)
return list(map(convert_item, config_value))
return _ListSchema()
class ListType(RuntimeType):
def __init__(self, inner_type):
key = 'List.' + inner_type.key
super(ListType, self).__init__(
key=key, name=None, input_hydration_config=_create_list_input_schema(inner_type)
)
self.inner_type = inner_type
@property
def display_name(self):
return '[' + self.inner_type.display_name + ']'
def type_check(self, value):
from dagster.core.definitions.events import Failure
if not isinstance(value, list):
raise Failure('Value must be a list, got {value}'.format(value=value))
for item in value:
self.inner_type.type_check(item)
@property
def is_list(self):
return True
@property
def inner_types(self):
return [self.inner_type] + self.inner_type.inner_types
def Optional(inner_type):
check.inst_param(inner_type, 'inner_type', RuntimeType)
class _Nullable(NullableType):
def __init__(self):
super(_Nullable, self).__init__(inner_type)
return _Nullable.inst()
def List(inner_type):
check.inst_param(inner_type, 'inner_type', RuntimeType)
class _List(ListType):
def __init__(self):
super(_List, self).__init__(inner_type)
return _List.inst()
class Stringish(RuntimeType):
def __init__(self, key=None, name=None, **kwargs):
name = check.opt_str_param(name, 'name', type(self).__name__)
key = check.opt_str_param(key, 'key', name)
super(Stringish, self).__init__(key=key, name=name, **kwargs)
def is_scalar(self):
return True
def type_check(self, value):
return _throw_if_not_string(value)
_RUNTIME_MAP = {
BuiltinEnum.ANY: Any.inst(),
BuiltinEnum.BOOL: Bool.inst(),
BuiltinEnum.FLOAT: Float.inst(),
BuiltinEnum.INT: Int.inst(),
BuiltinEnum.PATH: Path.inst(),
BuiltinEnum.STRING: String.inst(),
BuiltinEnum.NOTHING: Nothing.inst(),
}
def resolve_to_runtime_type(dagster_type):
# circular dep
from .decorator import is_runtime_type_decorated_klass, get_runtime_type_on_decorated_klass
from .mapping import remap_python_type
dagster_type = remap_python_type(dagster_type)
check_dagster_type_param(dagster_type, 'dagster_type', RuntimeType)
if dagster_type is None:
return Any.inst()
if BuiltinEnum.contains(dagster_type):
return RuntimeType.from_builtin_enum(dagster_type)
if isinstance(dagster_type, WrappingListType):
return resolve_to_runtime_list(dagster_type)
if isinstance(dagster_type, WrappingNullableType):
return resolve_to_runtime_nullable(dagster_type)
if is_runtime_type_decorated_klass(dagster_type):
return get_runtime_type_on_decorated_klass(dagster_type)
if issubclass(dagster_type, RuntimeType):
return dagster_type.inst()
check.failed('should not reach')
def resolve_to_runtime_list(list_type):
check.inst_param(list_type, 'list_type', WrappingListType)
return List(resolve_to_runtime_type(list_type.inner_type))
def resolve_to_runtime_nullable(nullable_type):
check.inst_param(nullable_type, 'nullable_type', WrappingNullableType)
return Optional(resolve_to_runtime_type(nullable_type.inner_type))
ALL_RUNTIME_BUILTINS = set(_RUNTIME_MAP.values())
def construct_runtime_type_dictionary(solid_defs):
type_dict = {t.name: t for t in ALL_RUNTIME_BUILTINS}
for solid_def in solid_defs:
for runtime_type in solid_def.all_runtime_types():
type_dict[runtime_type.name] = runtime_type
return type_dict | check.opt_str_param(name, 'name') |
hid_common.rs | // This file is part of ctap, a Rust implementation of the FIDO2 protocol.
// Copyright (c) Ariën Holthuizen <[email protected]>
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use std::path::PathBuf;
#[derive(Debug, Clone)]
/// Storage for device related information
pub struct DeviceInfo {
pub path: PathBuf,
pub usage_page: u16,
pub usage: u16, | pub report_size: u16,
} |
|
__init__.py | from mc2p import MC2PClient as MC2PClientPython
__title__ = 'MyChoice2Pay Django'
__version__ = '0.1.3'
__author__ = 'MyChoice2Pay'
__license__ = 'BSD 2-Clause'
__copyright__ = 'Copyright 2017 MyChoice2Pay'
# Version synonym
VERSION = __version__
# Header encoding (see RFC5987)
HTTP_HEADER_ENCODING = 'iso-8859-1'
# Default datetime input and output formats
ISO_8601 = 'iso-8601'
default_app_config = 'django_mc2p.apps.DjangoMC2PConfig'
class MC2PClient(MC2PClientPython):
"""
Wrapper of MC2PClient of Python
"""
def __init__(self):
"""
Initializes a MC2PClient getting key and secret key from DB
"""
from .models import MC2PConfig
| secret_key = mc2p_config.secret_key
except:
key = ''
secret_key = ''
super(MC2PClient, self).__init__(key, secret_key) | try:
mc2p_config = MC2PConfig.objects.get()
key = mc2p_config.key |
fold.go | // Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package hujson
import (
"bytes"
"unicode/utf8"
)
const (
caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
kelvin = '\u212a'
smallLongEss = '\u017f'
)
// foldFunc returns one of four different case folding equivalence
// functions, from most general (and slow) to fastest:
//
// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
// 3) asciiEqualFold, no special, but includes non-letters (including _)
// 4) simpleLetterEqualFold, no specials, no non-letters.
//
// The letters S and K are special because they map to 3 runes, not just 2:
// * S maps to s and to U+017F 'ſ' Latin small letter long s
// * k maps to K and to U+212A 'K' Kelvin sign
// See https://play.golang.org/p/tTxjOc0OGo
//
// The returned function is specialized for matching against s and
// should only be given s. It's not curried for performance reasons.
func foldFunc(s []byte) func(s, t []byte) bool {
nonLetter := false
special := false // special letter
for _, b := range s {
if b >= utf8.RuneSelf {
return bytes.EqualFold
}
upper := b & caseMask
if upper < 'A' || upper > 'Z' {
nonLetter = true
} else if upper == 'K' || upper == 'S' {
// See above for why these letters are special.
special = true
}
}
if special {
return equalFoldRight
}
if nonLetter {
return asciiEqualFold
}
return simpleLetterEqualFold
}
// equalFoldRight is a specialization of bytes.EqualFold when s is
// known to be all ASCII (including punctuation), but contains an 's',
// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
// See comments on foldFunc.
func equalFoldRight(s, t []byte) bool {
for _, sb := range s {
if len(t) == 0 {
return false
}
tb := t[0]
if tb < utf8.RuneSelf {
if sb != tb {
sbUpper := sb & caseMask | if 'A' <= sbUpper && sbUpper <= 'Z' {
if sbUpper != tb&caseMask {
return false
}
} else {
return false
}
}
t = t[1:]
continue
}
// sb is ASCII and t is not. t must be either kelvin
// sign or long s; sb must be s, S, k, or K.
tr, size := utf8.DecodeRune(t)
switch sb {
case 's', 'S':
if tr != smallLongEss {
return false
}
case 'k', 'K':
if tr != kelvin {
return false
}
default:
return false
}
t = t[size:]
}
if len(t) > 0 {
return false
}
return true
}
// asciiEqualFold is a specialization of bytes.EqualFold for use when
// s is all ASCII (but may contain non-letters) and contains no
// special-folding letters.
// See comments on foldFunc.
func asciiEqualFold(s, t []byte) bool {
if len(s) != len(t) {
return false
}
for i, sb := range s {
tb := t[i]
if sb == tb {
continue
}
if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
if sb&caseMask != tb&caseMask {
return false
}
} else {
return false
}
}
return true
}
// simpleLetterEqualFold is a specialization of bytes.EqualFold for
// use when s is all ASCII letters (no underscores, etc) and also
// doesn't contain 'k', 'K', 's', or 'S'.
// See comments on foldFunc.
func simpleLetterEqualFold(s, t []byte) bool {
if len(s) != len(t) {
return false
}
for i, b := range s {
if b&caseMask != t[i]&caseMask {
return false
}
}
return true
} | |
mod.rs | #[doc = r" Value read from the register"]
pub struct R {
bits: u16,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u16,
}
impl super::PACKET_RAM_0_161 {
#[doc = r" Modifies the contents of the register"]
#[inline]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = r" Value of the field"]
pub struct LSBYTER {
bits: u8,
}
impl LSBYTER {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
self.bits
}
}
#[doc = r" Value of the field"]
pub struct MSBYTER {
bits: u8,
}
impl MSBYTER {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
self.bits
}
}
#[doc = r" Proxy"]
pub struct _LSBYTEW<'a> {
w: &'a mut W,
}
impl<'a> _LSBYTEW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
const MASK: u8 = 255;
const OFFSET: u8 = 0;
self.w.bits &= !((MASK as u16) << OFFSET);
self.w.bits |= ((value & MASK) as u16) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _MSBYTEW<'a> {
w: &'a mut W,
}
impl<'a> _MSBYTEW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
const MASK: u8 = 255;
const OFFSET: u8 = 8;
self.w.bits &= !((MASK as u16) << OFFSET);
self.w.bits |= ((value & MASK) as u16) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u16 {
self.bits
}
#[doc = "Bits 0:7 - LSBYTE"]
#[inline]
pub fn lsbyte(&self) -> LSBYTER {
let bits = {
const MASK: u8 = 255;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u16) as u8
};
LSBYTER { bits }
}
#[doc = "Bits 8:15 - MSBYTE"]
#[inline]
pub fn msbyte(&self) -> MSBYTER {
let bits = {
const MASK: u8 = 255;
const OFFSET: u8 = 8;
((self.bits >> OFFSET) & MASK as u16) as u8
};
MSBYTER { bits }
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn | (&mut self, bits: u16) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bits 0:7 - LSBYTE"]
#[inline]
pub fn lsbyte(&mut self) -> _LSBYTEW {
_LSBYTEW { w: self }
}
#[doc = "Bits 8:15 - MSBYTE"]
#[inline]
pub fn msbyte(&mut self) -> _MSBYTEW {
_MSBYTEW { w: self }
}
}
| bits |
main.go | package main
import (
"fmt"
"os"
"github.com/mikeraimondi/gurnel/internal/gurnel"
)
func main() {
if err := gurnel.Do(); err != nil |
}
| {
fmt.Fprintf(os.Stderr, "Error: %s\n", err)
os.Exit(2)
} |
tag.entity.ts | import { Column, Entity, PrimaryGeneratedColumn } from 'typeorm';
import { Event } from './event.entity';
@Entity() | @PrimaryGeneratedColumn()
id: number;
@Column({ unique: true })
name: string;
} | export class Tag { |
users.controller.ts | import { Controller, Get, Param } from '@nestjs/common';
import { UsersService } from './users.service';
@Controller('users')
export class | {
constructor(
private readonly usersService: UsersService
){}
@Get('/validation/:eMail')
async checkUserValidation(@Param('eMail') eMail) {
return await this.usersService.checkUserValidation(eMail);
}
}
| UsersController |
polysol.py | #cas
| def poly_fill_rect(x, y, w, h, c):
ti_graphics.setColor(c)
ti_graphics.fillRect(x, y + screen_y0, w, h)
def poly_set_pixel(x, y, c):
ti_graphics.setPixel(x, y + screen_y0, c)
def poly_draw_ellipse(x, y, rx, ry, c):
ti_graphics.setColor(c)
x0, y0 = x - rx, y - ry
for dy in range(1 + (y0 > int(y0))):
for dx in range(1 + (x0 > int(x0))):
ti_graphics.drawArc(x0 + dx, y0 + dy + screen_y0, 2 * rx, 2 * ry, 0, 3600)
def poly_fill_circle(x, y, r, c):
ti_graphics.setColor(c)
ti_graphics.fillCircle(xx, y + screen_y0, r)
def poly_draw_string(s, x, y, cf, cb):
poly_fill_rect(x, y, font_w, font_h, cb)
ti_graphics.setColor(cf)
ti_graphics.drawString(s, x, y + screen_y0)
return screen_w, screen_h, font_h, poly_set_pixel, poly_fill_rect, poly_draw_ellipse, poly_fill_circle, poly_draw_string, poly_get_key | def get_infos():
import ti_graphics, ti_system
fnop = lambda : None
screen_w, screen_h, screen_y0, font_w, font_h, poly_set_pixel, poly_fill_rect, poly_draw_ellipse, poly_fill_circle, poly_get_key, poly_draw_string = 320, 210, 30, 10, 15, fnop, fnop, fnop, fnop, ti_system.wait_key, fnop
|
threadpoolex.py | #!/usr/bin/env python
#coding:utf-8
"""
Author: --<v1ll4n>
Purpose: ThreadPool From Twisted and Add Common Resource
Created: 05/13/17
"""
from __future__ import unicode_literals
import uuid
import threading
import traceback
try:
import queue
except:
import Queue as queue
import time
import random
class LaborQuit(Exception):
pass
_callback_chain_lock = threading.Lock()
########################################################################
class ThreadPoolXLabor(threading.Thread):
""""""
#----------------------------------------------------------------------
def __init__(self, name, debug=True, loop_interval=0.2):
"""Constructor"""
threading.Thread.__init__(self, name=name)
#
# key flag
#
self.working = False
self._debug = True
self._waiting_quit = False
#
# cache flag
#
self._cb_cached = False
self._ecb_cached = False
#
# fields
#
self.loop_interval = loop_interval
#
# private attributes
#
self._task_buffer = queue.Queue(1)
self._callback_chains_ = []
self._exception_handle_callback_chains = []
self._cb_c_buffer = self._callback_chains_
self._ehcb_c_buffer = self._exception_handle_callback_chains
self._inuse = False
#----------------------------------------------------------------------
def run(self):
""""""
try:
self._run()
except LaborQuit as e:
pass
#print(self.name, 'exited!')
#----------------------------------------------------------------------
def _run(self):
""""""
self.working = True
while self.working:
#
# if quit, before get task
#
if self._waiting_quit:
if self._task_buffer.qsize() <= 0:
self._quit()
#
# get task from task_buffer
#
_task = self._get_current_task()
#
# process _task
#
if _task:
_callabled = _task[0]
_vargs = _task[1]
_kwargs = _task[2]
#
# define exception
#
exc = None
try:
_result = _callabled(*_vargs, **_kwargs)
self._handle_result(_result)
#
# recovery callback chains
#
if self._cb_cached:
self._recovery_cb_chain()
self._cb_cached = False
except Exception as e:
self._handle_exception(e)
#
# recovery error_callback
#
if self._ecb_cached:
self._recovery_ecb_chain()
self._ecb_cached = False
self.inuse = False
else:
if self._waiting_quit:
if self._task_buffer.qsize() == 0:
self._quit()
time.sleep(self.loop_interval)
#----------------------------------------------------------------------
@property
def busy(self):
""""""
if self._task_buffer.qsize() == 0:
return False
else:
return True
@property
def inuse(self):
""""""
return self._inuse
@inuse.setter
def inuse(self, flag):
""""""
self._inuse = flag
#----------------------------------------------------------------------
def _handle_exception(self, exception_obj):
""""""
e = exception_obj
#
# exception callback
#
if self._exception_handle_callback_chains == []:
if self._debug:
raise e
else:
for i in self._exception_handle_callback_chains:
e = i(e)
@property
def cb_chain(self):
""""""
#if isinstance(self._callback_chains_, list):
return self._callback_chains_
#else:
# return self._cb_c_buffer
#----------------------------------------------------------------------
def _handle_result(self, result):
""""""
_result = result
#
# process result callback
#
_cs = self.cb_chain
for i in _cs:
_cb = i[0]
_exc_cb = i[1]
try:
_result = _cb(_result)
except Exception as e:
#
# if some exception happend
#
if _exc_cb:
_result = _exc_cb(_result)
else:
raise e
#----------------------------------------------------------------------
def _get_current_task(self):
""""""
try:
_ret = self._task_buffer.get_nowait()
except:
_ret = None
return _ret
#----------------------------------------------------------------------
def add_callback(self, callback, exception_callback=None):
"""result callback function"""
assert callable(callback), 'callback cannot be called'
self._callback_chains_.append((callback, exception_callback))
#----------------------------------------------------------------------
def add_task_exception_callback(self, callback):
""""""
assert callable(callback), 'exception callback'
self._exception_handle_callback_chains.append(callback)
#----------------------------------------------------------------------
def _execute(self, target, var_args=tuple(), keyword_args={}):
""""""
assert callable(target), 'target function cannot be called'
assert not self._waiting_quit, 'waiting for quiting, cannot execute task'
self._task_buffer.put(tuple([target, var_args, keyword_args]))
#----------------------------------------------------------------------
def execute(self, target, var_args=tuple(), keyword_args={}):
""""""
if self._cb_cached:
self._recovery_cb_chain()
if self._ecb_cached:
self._recovery_ecb_chain()
self._execute(target, var_args, keyword_args)
#----------------------------------------------------------------------
def execute_with_callback(self, target, var_args=tuple(), keyword_args={},
callback=None, callback_exc=None, error_callback=None,):
""""""
assert callable(target), 'target function cannot be called'
assert not self._waiting_quit, 'waiting for quiting, cannot execute task'
#
# process callback
#
if callback:
self._cache_cb_chain()
self.add_callback(callback, callback_exc)
if error_callback:
self._cache_ecb_chain()
self.add_task_exception_callback(error_callback)
#
# execute called
#
self._execute(target, var_args, keyword_args)
self.quit()
#----------------------------------------------------------------------
def execute_with_callback_chains(self, target, var_args=tuple(), keyword_args={},
callback_chain=None, error_callback_chains=None):
""""""
assert callable(target), 'target function cannot be called'
assert not self._waiting_quit, 'waiting for quiting, cannot execute task'
assert callback_chain == None or isinstance(callback_chain, list), \
'callback chain has to be None or list'
assert error_callback_chains == None or isinstance(error_callback_chains, list), \
'callback chain has to be None or list'
#
# proccess callback
#
if callback_chain:
self._cache_cb_chain()
self._callback_chains_ = callback_chain
if error_callback_chains:
self._cache_ecb_chain()
self._exception_handle_callback_chains = error_callback_chains
self._execute(target, var_args, keyword_args)
self.quit()
#----------------------------------------------------------------------
def _cache_cb_chain(self):
""""""
#
# cache callback
#
self._cb_c_buffer = self._callback_chains_
#
# change flag
#
self._cb_cached = True
#
# reset chains
#
self._callback_chains_ = []
#----------------------------------------------------------------------
def _recovery_cb_chain(self):
""""""
self._cb_cached = False
self._callback_chains_ = self._cb_c_buffer
#----------------------------------------------------------------------
def _recovery_ecb_chain(self):
""""""
self._ecb_cached = False
self._exception_handle_callback_chains = self._cache_ecb_chain
#----------------------------------------------------------------------
def _cache_ecb_chain(self):
"""The same as self._cache_cb_chain"""
self._ehcb_c_buffer = self._exception_handle_callback_chains
self._ecb_cached = True
self._exception_handle_callback_chains = []
#----------------------------------------------------------------------
def quit(self):
""""""
self._waiting_quit = True
#----------------------------------------------------------------------
def _quit(self):
""""""
raise LaborQuit('normal exit labor')
########################################################################
class _LaborFactory(object):
""""""
count = 0
Labor = ThreadPoolXLabor
#----------------------------------------------------------------------
def __init__(self, debug, loop_interval):
""""""
self._loop_interval = loop_interval
self._debug = debug
self._callback_chains = []
self._exception_callback_chains = []
#----------------------------------------------------------------------
def add_callbacks(self, callback, callback_exc=None):
""""""
assert callable(callback), 'callback not callable'
assert callback_exc == None or callable(callback_exc), 'result exception callback not callable.'
self._callback_chains.append(tuple([callback, callback_exc]))
#----------------------------------------------------------------------
def add_callback_chain(self, callback_chain):
""""""
assert isinstance(callback_chain, list), 'callback chain must be list'
self._callback_chains = callback_chain
#----------------------------------------------------------------------
def add_exception_callback(self, callback):
""""""
assert callable(callback), 'exception callback cannot be called'
self._exception_callback_chains.append(callback)
#----------------------------------------------------------------------
def add_exception_callback_chain(self, callback_chain):
""""""
assert isinstance(callback_chain, list), 'callback chain must be list'
self._exception_callback_chains = callback_chain
@property
def callback_chains(self):
""""""
return self._callback_chains
@property
def exception_callback_chains(self):
""""""
return self._exception_callback_chains
#----------------------------------------------------------------------
def build_labor(self):
""""""
_lb = self.Labor(name=self.gen_labor_name(),
debug=self._debug,
loop_interval=self._loop_interval)
_lb.daemon = True
#
# add result callback
#
for i in self.callback_chains:
_lb.add_callback(*i)
#
# add exception callback
#
for i in self.exception_callback_chains:
_lb.add_task_exception_callback(i)
return _lb
#----------------------------------------------------------------------
def gen_labor_name(self):
""""""
return 'labor-starttime:{ts}'.format(index=self.count,
ts=int(time.time()*100))
########################################################################
class _ThreadTeam(object):
""""""
#----------------------------------------------------------------------
def __init__(self, factory):
"""Constructor"""
assert isinstance(factory, _LaborFactory)
self._factory = factory
self._labors = []
#----------------------------------------------------------------------
@property
def labors(self):
""""""
return self._labors
#----------------------------------------------------------------------
def add(self):
""""""
lb = self._factory.build_labor()
#
# start labor
#
lb.daemon = True
lb.start()
self.labors.append(lb)
return lb
#----------------------------------------------------------------------
def shrink(self):
""""""
#
# shrink
#
map(self._exit_idle_labor, self.labors)
#----------------------------------------------------------------------
def _exit_idle_labor(self, labor):
""""""
assert isinstance(labor, ThreadPoolXLabor)
if labor.is_alive():
if labor.busy:
pass
else:
self.labors.remove(labor)
labor.quit()
else:
self.labors.remove(labor)
#----------------------------------------------------------------------
@property
def size(self):
""""""
return len(filter(lambda x: x.is_alive(), self.labors))
#----------------------------------------------------------------------
def quitall(self):
""""""
#
# shrink first
#
self.shrink()
#
# quit all labor
#
for i in self.labors:
i.quit()
try:
i.join()
except:
pass
while i.is_alive():
pass
#----------------------------------------------------------------------
def select(self):
""""""
_idle_labors = filter(lambda x: not x.inuse, self.labors)
if _idle_labors == []:
return None
else:
labor = random.choice(_idle_labors)
labor.inuse = True
#while labor in self._selected_labor:
#labor = random.choice(_idle_labors)
#self._selected_labor_lock.acquire()
#if labor in self._selected_labor:
#pass
#else:
#self._selected_labor.append(labor)
#self._selected_labor_lock.release()
return labor
#----------------------------------------------------------------------
def release_labor(self, labor):
""""""
#
# sync selected labor
#
#self._selected_labor_lock.acquire()
#self._selected_labor.remove(labor)
#self._selected_labor_lock.release()
########################################################################
class ThreadPoolX(object):
""""""
min_size = 5
max_size = 20
joined = False
started = False
workers = 0
name = 'default'
#----------------------------------------------------------------------
def __init__(self, min_threads=5, max_threads=20,
name='default', debug=True, loop_interval=0.2,
adjuest_interval=3, diviation_ms=100):
"""Create threadpool object
@param min_threads: min size of thread pool
@type min_threads: L{int}
@param max_threads: max size of thread pool
@type max_threads: L{int}
@param name: The name to give this threadpool; visible in log msg.
@type name: native L{str}"""
assert min_threads >= 0, 'minimum is negative'
assert min_threads <= max_threads, 'maximum is less than minimum'
self.min_size = min_threads
self.max_size = max_threads
self.name = name
self.diviation_ms = diviation_ms
#self.threads = []
#
# init factory
#
self._factory = _LaborFactory(debug, loop_interval)
#
# init _team
#
self._team = _ThreadTeam(self._factory)
#
# private entity
#
self._task_queue = queue.Queue()
#
# control adjust pool size interval
#
self._lp_itrvl = loop_interval
self._adj_itrvl = adjuest_interval
#
# dispatcher
#
def _dispatcher_factory():
_ret = threading.Thread(name='ThreadPoolX:{name} Dispatcher'\
.format(name=self.name),
target=self._start)
_ret.daemon = True
return _ret
self._dispatcher = _dispatcher_factory()
self._temp_factory = _LaborFactory(debug, loop_interval)
#
# set callback
#
#----------------------------------------------------------------------
def add_callbacks(self, callback, callback_exc=None):
""""""
self._factory.add_callbacks(callback, callback_exc)
#----------------------------------------------------------------------
def add_callback_chain(self, callback_chain):
""""""
self._factory.add_callback_chain(callback_chain)
#----------------------------------------------------------------------
def add_exception_callback(self, callback):
""""""
self._factory.add_exception_callback(callback)
#----------------------------------------------------------------------
def add_exception_callback_chain(self, callback_chain):
""""""
self._factory.add_exception_callback_chain(callback_chain)
#----------------------------------------------------------------------
def _start(self):
""""""
self.started = True
self.adjust_pool_size()
while self.started:
if int(int(time.time()*1000000) % (self._adj_itrvl*1000000)) <= self.diviation_ms:
#print 'adpool'
self.adjust_pool_size()
#print self.dumped_status()
#
# consume task
#
self.consume()
#
#----------------------------------------------------------------------
def start(self):
""""""
self._dispatcher.start()
#----------------------------------------------------------------------
def join(self):
""""""
self._dispatcher.join()
#----------------------------------------------------------------------
def quit(self):
""""""
self.started = False
#self.join()
while self._dispatcher.is_alive():
pass
print('Existed ThreadPool MainLoop(Dispatcher!)')
self._team.quitall()
print('Quit All Labor!')
#----------------------------------------------------------------------
def dumped_status(self):
""""""
state = {}
state['name'] = self.name
state['current_size'] = self._team.size
state['idle_labor_size'] = len(filter(lambda x: x.is_alive() and not x.busy, self._team.labors))
return state
| def consume(self):
""""""
#
# check team and select a idle labor
#
_labor = self._team.select()
if _labor:
pass
else:
if self._team.size < self.max_size:
self._team.add()
_labor = self._team.select()
else:
_labor = None
#
# labor existed, execute task
#
if _labor:
try:
_task = self._task_queue.get_nowait()
except queue.Empty:
_task = None
if _task != None:
#print("******")
_labor.execute(*_task)
else:
_labor.inuse = False
#self._team.release_labor(_labor)
else:
pass
#----------------------------------------------------------------------
def adjust_pool_size(self):
""""""
while self._team.size < self.min_size:
self._team.add()
if self._team.size > self.max_size:
self._team.shrink()
#----------------------------------------------------------------------
def feed(self, target, vargs=tuple(), kwargs={}):
""""""
assert callable(target), 'target function cannot be executed'
self._task_queue.put(tuple([target, vargs, kwargs]))
#----------------------------------------------------------------------
def feed_with_callback(self, target, vargs=tuple(), kwargs={},
callback=None, callback_exc=None, error_callback=None):
""""""
assert callable(target), 'target function cannot be executed'
_temp_labor = self._temp_factory.build_labor()
_temp_labor.execute_with_callback(target, vargs, kwargs, callback,
callback_exc, error_callback)
_temp_labor.start()
#----------------------------------------------------------------------
def feed_with_callback_chain(self, target, vargs=tuple(), kwargs={},
callback_chain=None, error_callback_chain=None):
""""""
assert callable(target), 'target function cannot be executed'
_temp_labor = self._temp_factory.build_labor()
_temp_labor.execute_with_callback_chains(target, vargs, kwargs,
callback_chain=callback_chain,
error_callback_chains=error_callback_chain)
_temp_labor.start() | #---------------------------------------------------------------------- |
data_structures.py | """
Skeleton data structures
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
from yt.data_objects.grid_patch import \
AMRGridPatch
from yt.geometry.grid_geometry_handler import \
GridIndex
from yt.data_objects.static_output import \
Dataset
from .fields import SkeletonFieldInfo
class SkeletonGrid(AMRGridPatch):
_id_offset = 0
def __init__(self, id, index, level, start, dimensions):
AMRGridPatch.__init__(self, id, filename=index.index_filename,
index=index)
self.Parent = []
self.Children = []
self.Level = level
self.start_index = start.copy()
self.stop_index = self.start_index + dimensions
self.ActiveDimensions = dimensions.copy()
def __repr__(self):
return "SkeletonGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
class SkeletonHierarchy(GridIndex):
grid = SkeletonGrid
def __init__(self, ds, dataset_type='skeleton'):
self.dataset_type = dataset_type
# for now, the index file is the dataset!
self.index_filename = self.dataset.parameter_filename
self.directory = os.path.dirname(self.index_filename)
GridIndex.__init__(self, ds, dataset_type)
def _detect_output_fields(self):
# This needs to set a self.field_list that contains all the available,
# on-disk fields.
# NOTE: Each should be a tuple, where the first element is the on-disk
# fluid type or particle type. Convention suggests that the on-disk
# fluid type is usually the dataset_type and the on-disk particle type
# (for a single population of particles) is "io".
pass
def _count_grids(self):
# This needs to set self.num_grids
pass
def _parse_index(self):
# This needs to fill the following arrays, where N is self.num_grids:
# self.grid_left_edge (N, 3) <= float64
# self.grid_right_edge (N, 3) <= float64
# self.grid_dimensions (N, 3) <= int
# self.grid_particle_count (N, 1) <= int
# self.grid_levels (N, 1) <= int
# self.grids (N, 1) <= grid objects
#
pass
def | (self):
# For each grid, this must call:
# grid._prepare_grid()
# grid._setup_dx()
# This must also set:
# grid.Children <= list of child grids
# grid.Parent <= parent grid
# This is handled by the frontend because often the children must be
# identified.
pass
class SkeletonDataset(Dataset):
_index_class = SkeletonHierarchy
_field_info_class = SkeletonFieldInfo
def __init__(self, filename, dataset_type='skeleton',
storage_filename=None,
units_override=None):
self.fluid_types += ('skeleton',)
Dataset.__init__(self, filename, dataset_type,
units_override=units_override)
self.storage_filename = storage_filename
def _set_code_unit_attributes(self):
# This is where quantities are created that represent the various
# on-disk units. These are the currently available quantities which
# should be set, along with examples of how to set them to standard
# values.
#
# self.length_unit = self.quan(1.0, "cm")
# self.mass_unit = self.quan(1.0, "g")
# self.time_unit = self.quan(1.0, "s")
# self.time_unit = self.quan(1.0, "s")
#
# These can also be set:
# self.velocity_unit = self.quan(1.0, "cm/s")
# self.magnetic_unit = self.quan(1.0, "gauss")
pass
def _parse_parameter_file(self):
# This needs to set up the following items. Note that these are all
# assumed to be in code units; domain_left_edge and domain_right_edge
# will be updated to be in code units at a later time. This includes
# the cosmological parameters.
#
# self.unique_identifier
# self.parameters <= full of code-specific items of use
# self.domain_left_edge <= array of float64
# self.domain_right_edge <= array of float64
# self.dimensionality <= int
# self.domain_dimensions <= array of int64
# self.periodicity <= three-element tuple of booleans
# self.current_time <= simulation time in code units
#
# We also set up cosmological information. Set these to zero if
# non-cosmological.
#
# self.cosmological_simulation <= int, 0 or 1
# self.current_redshift <= float
# self.omega_lambda <= float
# self.omega_matter <= float
# self.hubble_constant <= float
pass
@classmethod
def _is_valid(self, *args, **kwargs):
# This accepts a filename or a set of arguments and returns True or
# False depending on if the file is of the type requested.
return False
| _populate_grid_objects |
lib.rs | // Copyright 2020 Parity Technologies (UK) Ltd.
#![cfg_attr(not(feature = "std"), no_std)]
use frame_support::{
decl_event, decl_module,
dispatch::DispatchResult,
traits::{Currency, ExistenceRequirement, WithdrawReason},
};
use frame_system::ensure_signed;
use codec::{Decode, Encode};
use cumulus_primitives::{
relay_chain::DownwardMessage,
xcmp::{XCMPMessageHandler, XCMPMessageSender},
DownwardMessageHandler, ParaId, UpwardMessageOrigin, UpwardMessageSender,
};
use cumulus_upward_message::BalancesMessage;
use polkadot_parachain::primitives::AccountIdConversion;
use orml_nft::{TokenInfoOf, TokenInfo};
#[derive(Encode, Decode)]
pub enum XCMPMessage<XAccountId, XBalance, XTokenInfo> {
/// Transfer tokens to the given account from the Parachain account.
TransferToken(XAccountId, XBalance),
TransferNft(XAccountId, XTokenInfo),
}
type BalanceOf<T> =
<<T as Trait>::Currency as Currency<<T as frame_system::Trait>::AccountId>>::Balance;
/// Configuration trait of this pallet.
pub trait Trait: frame_system::Trait + orml_nft::Trait + template::Trait {
/// Event type used by the runtime.
type Event: From<Event<Self>> + Into<<Self as frame_system::Trait>::Event>;
/// The sender of upward messages.
type UpwardMessageSender: UpwardMessageSender<Self::UpwardMessage>;
/// The upward message type used by the Parachain runtime.
type UpwardMessage: codec::Codec + BalancesMessage<Self::AccountId, BalanceOf<Self>>;
/// Currency of the runtime.
type Currency: Currency<Self::AccountId>;
/// The sender of XCMP messages.
type XCMPMessageSender: XCMPMessageSender<XCMPMessage<Self::AccountId, BalanceOf<Self>, TokenInfoOf<Self>>>;
}
decl_event! {
pub enum Event<T> where
AccountId = <T as frame_system::Trait>::AccountId,
Balance = BalanceOf<T>,
TokenInfo = TokenInfoOf<T>,
{
/// Transferred tokens to the account on the relay chain.
TransferredTokensToRelayChain(AccountId, Balance),
/// Transferred tokens to the account on request from the relay chain.
TransferredTokensFromRelayChain(AccountId, Balance),
/// Transferred tokens to the account from the given parachain account.
TransferredTokensViaXCMP(ParaId, AccountId, Balance, DispatchResult),
/// Transferred NFT to the account from the given account.
TransferredNftViaXCMP(ParaId, AccountId, TokenInfo, DispatchResult),
}
}
decl_module! {
pub struct Module<T: Trait> for enum Call where origin: T::Origin {
/// Transfer `amount` of tokens on the relay chain from the Parachain account to
/// the given `dest` account.
#[weight = 10]
fn transfer_tokens_to_relay_chain(origin, dest: T::AccountId, amount: BalanceOf<T>) {
let who = ensure_signed(origin)?;
let _ = T::Currency::withdraw(
&who,
amount,
WithdrawReason::Transfer.into(),
ExistenceRequirement::AllowDeath,
)?;
let msg = <T as Trait>::UpwardMessage::transfer(dest.clone(), amount.clone());
<T as Trait>::UpwardMessageSender::send_upward_message(&msg, UpwardMessageOrigin::Signed)
.expect("Should not fail; qed");
Self::deposit_event(Event::<T>::TransferredTokensToRelayChain(dest, amount));
}
/// Transfer `amount` of tokens to another parachain.
#[weight = 10]
fn transfer_tokens_to_parachain_chain(
origin,
para_id: u32,
dest: T::AccountId,
amount: BalanceOf<T>,
) {
//TODO we don't make sure that the parachain has some tokens on the other parachain.
let who = ensure_signed(origin)?;
let _ = T::Currency::withdraw(
&who,
amount,
WithdrawReason::Transfer.into(),
ExistenceRequirement::AllowDeath,
)?;
T::XCMPMessageSender::send_xcmp_message(
para_id.into(),
&XCMPMessage::TransferToken(dest, amount),
).expect("Should not fail; qed");
}
/// Transfer nft token to another parachain.
#[weight = 10]
fn transfer_nft_to_parachain_chain(
origin,
para_id: u32,
dest: T::AccountId,
token_id: T::TokenId,
) {
let who = ensure_signed(origin)?;
let token_class: T::ClassId = 0.into();
let token_info = <orml_nft::Module<T>>::tokens(token_class, &token_id);
<template::Module<T>>::burn_nft(who, token_id)?;
T::XCMPMessageSender::send_xcmp_message(
para_id.into(),
&XCMPMessage::TransferNft(dest, token_info.unwrap()),
).expect("Should not fail; qed");
}
fn deposit_event() = default;
}
}
/// This is a hack to convert from one generic type to another where we are sure that both are the
/// same type/use the same encoding.
fn | <O: Decode>(input: &impl Encode) -> O {
input.using_encoded(|e| Decode::decode(&mut &e[..]).expect("Must be compatible; qed"))
}
impl<T: Trait> DownwardMessageHandler for Module<T> {
fn handle_downward_message(msg: &DownwardMessage) {
match msg {
DownwardMessage::TransferInto(dest, amount, _) => {
let dest = convert_hack(&dest);
let amount: BalanceOf<T> = convert_hack(amount);
let _ = T::Currency::deposit_creating(&dest, amount.clone());
Self::deposit_event(Event::<T>::TransferredTokensFromRelayChain(dest, amount));
}
_ => {}
}
}
}
impl<T: Trait> XCMPMessageHandler<XCMPMessage<T::AccountId, BalanceOf<T>, TokenInfoOf<T>>> for Module<T> {
fn handle_xcmp_message(src: ParaId, msg: &XCMPMessage<T::AccountId, BalanceOf<T>, TokenInfoOf<T>>) {
match msg {
XCMPMessage::TransferToken(dest, amount) => {
let para_account = src.clone().into_account();
let res = T::Currency::transfer(
¶_account,
dest,
amount.clone(),
ExistenceRequirement::AllowDeath,
);
Self::deposit_event(Event::<T>::TransferredTokensViaXCMP(
src,
dest.clone(),
amount.clone(),
res,
));
},
XCMPMessage::TransferNft(dest, token_info) => {
let TokenInfo {metadata, owner: _, data} = token_info;
let res = <template::Module<T>>::mint_nft(
dest.clone(),
metadata.clone(),
data.clone()
);
Self::deposit_event(Event::<T>::TransferredNftViaXCMP(
src,
dest.clone(),
token_info.clone(),
res,
));
}
}
}
}
| convert_hack |
x_6_8.py | # x_6_8
#
#
class | (Exception):
pass
class NumberError(Exception):
pass
order_count = input('きび団子を何個注文しますか?:')
card_number = input('カード番号を入力してください?(例、0000-0000-0000-0000):')
try:
if int(order_count) > 100:
raise StockError
if card_number != '1111-1111-1111-1111':
raise NumberError
except StockError:
print('在庫切れです')
except NumberError:
print('カードエラー')
else:
print('ご購入ありがとうございます')
| StockError |
empty_runme.go | package main
import _ "./empty"
func main() { | } |
|
mod.rs | //! Utilities for testing.
use diff;
use lalrpop_util::ParseError;
use std::fmt::{Debug, Error, Formatter};
use util::tok::Tok;
// a simple tokenizer
pub mod tok;
pub fn test<R: Debug + Eq, F>(parse_fn: F, input: &str, expected: R)
where
F: FnOnce(Vec<Tok>) -> Result<R, ParseError<(), Tok, &'static str>>,
{
// create tokens
let tokens = tok::tokenize(input);
// filter to tokens
let tokens = tokens.into_iter().map(|(_, tok, _)| tok).collect();
// parse, expecting input to be totally consumed
let r = parse_fn(tokens).unwrap();
// expect output to be correct
assert!(
r == expected,
"parsing {:?}, got {:#?}, expected {:#?}",
input,
r,
expected
);
}
pub fn test_loc<R: Debug + Eq, F>(parse_fn: F, input: &str, expected: R)
where
F: FnOnce(Vec<(usize, Tok, usize)>) -> Result<R, ParseError<usize, Tok, &'static str>>,
{
// create tokens
let tokens = tok::tokenize(input);
// parse, expecting input to be totally consumed
let r = parse_fn(tokens).unwrap();
// expect output to be correct
assert!(
r == expected,
"parsing {:?}, got {:#?}, expected {:#?}",
input,
r,
expected
);
}
pub fn | <R, F>(parse_fn: F, input: &str) -> R
where
F: FnOnce(Vec<(usize, Tok, usize)>) -> R,
{
// create tokens
let tokens = tok::tokenize(input);
// parse, expecting input to be totally consumed
parse_fn(tokens)
}
struct ExpectedDebug<'a>(&'a str);
impl<'a> Debug for ExpectedDebug<'a> {
fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> {
// Ignore trailing commas in multiline Debug representation.
// Needed to work around rust-lang/rust#59076.
let s = self.0.replace(",\n", "\n");
write!(fmt, "{}", s)
}
}
pub fn expect_debug<D: Debug>(actual: D, expected: &str) {
compare(
ExpectedDebug(&format!("{:#?}", actual)),
ExpectedDebug(expected),
)
}
pub fn compare<D: Debug, E: Debug>(actual: D, expected: E) {
let actual_s = format!("{:?}", actual);
let expected_s = format!("{:?}", expected);
if actual_s != expected_s {
let actual_s = format!("{:#?}", actual);
let expected_s = format!("{:#?}", expected);
compare_str(&actual_s, &expected_s, "");
}
}
pub fn compare_str(actual: &str, expected: &str, msg: &str) {
if actual != expected {
let lines = diff::lines(actual, expected);
for diff in lines.iter().take(100) {
match diff {
diff::Result::Right(r) => println!("- {}", r),
diff::Result::Left(l) => println!("+ {}", l),
diff::Result::Both(l, _) if lines.len() < 100 => println!(" {}", l),
_ => (),
}
}
if lines.len() >= 100 {
println!("... more");
}
panic!("{}", msg);
}
}
| test_err_gen |
callee_cleanup_finder.py | from . import Analysis, register_analysis
from .. import SIM_PROCEDURES
import logging
l = logging.getLogger('angr.analyses.callee_cleanup_finder')
class CalleeCleanupFinder(Analysis):
def __init__(self, starts=None, hook_all=False):
self.results = {}
if starts is None:
starts = [imp.resolvedby.rebased_addr for imp in self.project.loader.main_object.imports.itervalues()]
for addr in starts:
with self._resilience():
size = self.analyze(addr)
if size is None:
l.info("Couldn't find return for function at %#x", addr)
else:
self.results[addr] = size
if hook_all:
for addr, size in self.results.iteritems():
if self.project.is_hooked(addr):
continue
if size % self.project.arch.bytes != 0:
l.error("Function at %#x has a misaligned return?", addr)
continue
args = size / self.project.arch.bytes
cc = self.project.factory.cc_from_arg_kinds([False]*args)
cc.CALLEE_CLEANUP = True
sym = self.project.loader.find_symbol(addr)
name = sym.name if sym is not None else None
lib = self.project.loader.find_object_containing(addr)
libname = lib.provides if lib is not None else None
self.project.hook(addr, SIM_PROCEDURES['stubs']['ReturnUnconstrained'](cc=cc, display_name=name, library_name=libname, is_stub=True))
def analyze(self, addr):
|
register_analysis(CalleeCleanupFinder, 'CalleeCleanupFinder')
| seen = set()
todo = [addr]
while todo:
addr = todo.pop(0)
seen.add(addr)
irsb = self.project.factory.block(addr, opt_level=0).vex
if irsb.jumpkind == 'Ijk_Ret':
# got it!
for stmt in reversed(irsb.statements):
if stmt.tag == 'Ist_IMark':
l.error("VERY strange return instruction at %#x...", addr)
break
if stmt.tag == 'Ist_WrTmp':
if stmt.data.tag == 'Iex_Binop':
if stmt.data.op.startswith('Iop_Add'):
return stmt.data.args[1].con.value - self.project.arch.bytes
elif irsb.jumpkind == 'Ijk_Call':
if addr + irsb.size not in seen:
todo.append(addr + irsb.size)
else:
todo.extend(irsb.constant_jump_targets - seen)
return None |
test_framework.py | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
from enum import Enum
import logging
import optparse
import os
import pdb
import shutil
import sys
import tempfile
import time
from .authproxy import JSONRPCException
from . import coverage
from .test_node import TestNode
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
check_json_precision,
connect_nodes_bi,
disconnect_nodes,
get_datadir_path,
initialize_datadir,
p2p_port,
set_node_times,
sync_blocks,
sync_mempools,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
class BitcoinTestFramework():
"""Base class for a simulacrum test script.
Individual simulacrum test scripts should subclass this class and override the set_test_params() and run_test() methods.
Individual tests can also override the following methods to customize the test setup:
- add_options()
- setup_chain()
- setup_network()
- setup_nodes()
The __init__() and main() methods should not be overridden.
This class also contains various public and private helper methods."""
def __init__(self):
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
self.setup_clean_chain = False
self.nodes = []
self.mocktime = 0
self.supports_cli = False
self.set_test_params()
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
def main(self):
"""Main function. This should not be overridden by the subclass test scripts."""
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave simulacrumds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop simulacrumds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
help="Source directory containing simulacrumd/simulacrum-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_option("--configfile", dest="configfile",
help="Location of the test framework config file")
parser.add_option("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
parser.add_option("--usecli", dest="usecli", default=False, action="store_true",
help="use bitcoin-cli instead of RPC for all commands")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir + ":" + self.options.srcdir + "/qt:" + os.environ['PATH']
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix="test")
self._start_logging()
success = TestStatus.FAILED
try:
if self.options.usecli and not self.supports_cli:
raise SkipTest("--usecli specified but test does not support using CLI")
self.setup_chain()
self.setup_network()
time.sleep(5)
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
for node in self.nodes:
node.cleanup_on_exit = False
self.log.info("Note: simulacrumds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
self.log.info("Cleaning up")
shutil.rmtree(self.options.tmpdir)
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
if success == TestStatus.PASSED:
self.log.info("Tests successful")
exit_code = TEST_EXIT_PASSED
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
exit_code = TEST_EXIT_SKIPPED
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
exit_code = TEST_EXIT_FAILED
logging.shutdown()
sys.exit(exit_code)
# Methods to override in subclass test scripts.
def set_test_params(self):
"""Tests must this method to change default values for number of nodes, topology, etc"""
raise NotImplementedError
def add_options(self, parser):
"""Override this method to add command-line options to the test"""
pass
def setup_chain(self):
"""Override this method to customize blockchain setup"""
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
"""Override this method to customize test network topology"""
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
for i in range(self.num_nodes - 1):
connect_nodes_bi(self.nodes, i, i + 1)
self.sync_all()
def setup_nodes(self):
"""Override this method to customize test node setup"""
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
def run_test(self):
"""Tests must override this method to define test logic"""
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes, extra_args=None, rpchost=None, timewait=None, binary=None):
"""Instantiate TestNode objects"""
if extra_args is None:
extra_args = [[]] * num_nodes
if binary is None:
binary = [None] * num_nodes
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
for i in range(num_nodes):
self.nodes.append(TestNode(i, self.options.tmpdir, extra_args[i], rpchost, timewait=timewait, binary=binary[i], stderr=None, mocktime=self.mocktime, coverage_dir=self.options.coveragedir, use_cli=self.options.usecli))
def start_node(self, i, *args, **kwargs):
"""Start a simulacrumd"""
node = self.nodes[i]
node.start(*args, **kwargs)
node.wait_for_rpc_connection()
time.sleep(10)
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None, *args, **kwargs):
"""Start multiple simulacrumds"""
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i], *args, **kwargs)
for node in self.nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
time.sleep(10)
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i):
"""Stop a simulacrumd test node"""
self.nodes[i].stop_node()
self.nodes[i].wait_until_stopped()
def stop_nodes(self):
"""Stop multiple simulacrumd test nodes"""
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node()
for node in self.nodes:
# Wait for nodes to stop
time.sleep(5)
node.wait_until_stopped()
def restart_node(self, i, extra_args=None):
"""Stop and start a test node"""
self.stop_node(i)
self.start_node(i, extra_args)
def assert_start_raises_init_error(self, i, extra_args=None, expected_msg=None, *args, **kwargs):
with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr:
try:
self.start_node(i, extra_args, stderr=log_stderr, *args, **kwargs)
self.stop_node(i)
except Exception as e:
assert 'simulacrumd exited' in str(e) # node must have shutdown
self.nodes[i].running = False
self.nodes[i].process = None
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8')
if expected_msg not in stderr:
raise AssertionError("Expected error \"" + expected_msg + "\" not found in:\n" + stderr)
else:
if expected_msg is None:
assert_msg = "simulacrumd should have exited with an error"
else:
assert_msg = "simulacrumd should have exited with expected error " + expected_msg
raise AssertionError(assert_msg)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all([self.nodes[:2], self.nodes[2:]])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes_bi(self.nodes, 1, 2)
self.sync_all()
def sync_all(self, node_groups=None):
if not node_groups:
node_groups = [self.nodes]
for group in node_groups:
sync_blocks(group)
sync_mempools(group)
def enable_mocktime(self):
"""Enable mocktime for the script.
mocktime may be needed for scripts that use the cached version of the
blockchain. If the cached version of the blockchain is used without
mocktime then the mempools will not sync due to IBD.
For backwared compatibility of the python scripts with previous
versions of the cache, this helper function sets mocktime to Jan 1,
2014 + (201 * 10 * 60)"""
self.mocktime = 1454124732 + (201 * 10 * 60)
def disable_mocktime(self):
self.mocktime = 0
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as simulacrumd's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache."""
assert self.num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(get_datadir_path(self.options.cachedir, i)):
create_cache = True
break
if create_cache:
self.log.debug("Creating data directories from cached datadir")
# find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(get_datadir_path(self.options.cachedir, i)):
shutil.rmtree(get_datadir_path(self.options.cachedir, i))
# Create cache directories, run bitcoinds:
for i in range(MAX_NODES):
datadir = initialize_datadir(self.options.cachedir, i)
args = [os.getenv("BITCOIND", "simulacrumd"), "-spendzeroconfchange=1", "-server", "-keypool=1", "-datadir=" + datadir, "-discover=0"]
if i > 0:
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
self.nodes.append(TestNode(i, self.options.cachedir, extra_args=[], rpchost=None, timewait=None, binary=None, stderr=None, mocktime=self.mocktime, coverage_dir=None))
self.nodes[i].args = args
self.start_node(i)
# Wait for RPC connections to be ready
for node in self.nodes:
node.wait_for_rpc_connection()
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
self.enable_mocktime()
block_time = self.mocktime - (201 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generate(1)
block_time += 60
# Must sync before next peer starts generating blocks
sync_blocks(self.nodes)
# Shut them down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
self.disable_mocktime()
def cache_path(n, *paths):
return os.path.join(get_datadir_path(self.options.cachedir, n), "regtest", *paths)
for i in range(MAX_NODES):
for entry in os.listdir(cache_path(i)):
if entry not in ['wallet.dat', 'chainstate', 'blocks', 'sporks', 'zerocoin', 'backups']:
os.remove(cache_path(i, entry))
for i in range(self.num_nodes):
from_dir = get_datadir_path(self.options.cachedir, i)
to_dir = get_datadir_path(self.options.tmpdir, i)
shutil.copytree(from_dir, to_dir)
initialize_datadir(self.options.tmpdir, i) # Overwrite port/rpcport in bitcoin.conf
def _initialize_chain_clean(self):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i)
class ComparisonTestFramework(BitcoinTestFramework):
"""Test framework for doing p2p comparison testing
Sets up some simulacrumd binaries:
- 1 binary: test binary
- 2 binaries: 1 test binary, 1 ref binary
- n>2 binaries: 1 test binary, n-1 ref binaries"""
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser): | parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("BITCOIND", "simulacrumd"),
help="simulacrumd binary to use for reference nodes (if any)")
def setup_network(self):
extra_args = [['-whitelist=127.0.0.1']] * self.num_nodes
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args,
binary=[self.options.testbinary] +
[self.options.refbinary] * (self.num_nodes - 1))
self.start_nodes()
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message | parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "simulacrumd"),
help="simulacrumd binary to test") |
helper_test.go | // +build all utilities helper
package utilities
import (
"errors"
"math"
"reflect"
"testing"
a "github.com/LuigiAndrea/test-helper/assertions"
m "github.com/LuigiAndrea/test-helper/messages"
)
func TestPreAppend(t *testing.T) {
type testData struct {
expectedValues, newElements, testValues []interface{}
}
tests := []testData{
{testValues: []interface{}{1, 2, -6, 111},
expectedValues: []interface{}{12, 3, 1, 2, -6, 111},
newElements: []interface{}{3, 12}},
{testValues: []interface{}{"Ciao", "Hello", "Car"},
expectedValues: []interface{}{"NewCiao", "Ciao", "Hello", "Car"},
newElements: []interface{}{"NewCiao"}},
{testValues: []interface{}{"Ciao", "Hello", "Car"},
expectedValues: []interface{}{123, "Ciao", "Hello", "Car"},
newElements: []interface{}{123}},
}
for i, test := range tests {
data := PreAppend(test.testValues, test.newElements...)
if err := a.AssertSlicesEqual(a.DataSlicesMatch{Expected: test.expectedValues, Actual: data}); err != nil {
t.Error(m.ErrorMessageTestCount(i+1, err))
}
}
}
func TestReverse(t *testing.T) {
type testData struct {
in, out []byte
}
tests := []testData{
{in: []byte{125, 55, 90, 127}, out: []byte{127, 90, 55, 125}},
{in: []byte{}, out: []byte{}},
{in: nil, out: nil},
}
for i, test := range tests {
Reverse(ByteSlice(test.in), 0, len(test.in)-1)
if err := a.AssertSlicesEqual(a.ByteSlicesMatch{Expected: test.out, Actual: test.in}); err != nil {
t.Error(m.ErrorMessageTestCount(i+1, err))
}
}
}
func TestByteSliceType(t *testing.T) {
type swapLessIndex struct {
i, j int
}
type testData struct {
in, out []byte
lengthOut int
lessOut bool
swapLessIndexes swapLessIndex
}
tests := []testData{
{in: []byte{125, 55, 90, 112}, lengthOut: 4, lessOut: false, swapLessIndexes: swapLessIndex{i: 0, j: 3}},
{in: []byte{125, 55, 90}, lengthOut: 3, lessOut: true, swapLessIndexes: swapLessIndex{i: 1, j: 2}},
}
for i, test := range tests {
byteSlice := ByteSlice(test.in)
if err := a.AssertDeepEqual(test.lengthOut, byteSlice.Len()); err != nil {
t.Error(m.ErrorMessageTestCount(i+1, err))
}
if err := a.AssertDeepEqual(test.lessOut, byteSlice.Less(test.swapLessIndexes.i, test.swapLessIndexes.j)); err != nil {
t.Error(m.ErrorMessageTestCount(i+1, err))
}
byteSlice.Swap(test.swapLessIndexes.i, test.swapLessIndexes.j)
if err := a.AssertSlicesEqual(a.ByteSlicesMatch{Expected: byteSlice, Actual: test.in}); err != nil {
t.Error(m.ErrorMessageTestCount(i+1, err))
}
}
}
func | (t *testing.T) {
type testData struct {
in []float64
out float64
}
tests := []testData{
{in: []float64{18.2, 12.4, 3.5}, out: 18.2},
{in: []float64{}, out: math.Inf(-1)},
{in: nil, out: math.Inf(-1)},
{in: []float64{1.2, -12.4, 3.5}, out: 3.5},
{in: []float64{3.75}, out: 3.75},
{in: []float64{3.75, math.Inf(-1)}, out: 3.75},
{in: []float64{3.75, math.Inf(1)}, out: math.Inf(5)},
{in: []float64{-3.75, math.Inf(-1)}, out: -3.75},
{in: []float64{math.Inf(1), math.Inf(-1)}, out: math.Inf(0)},
}
for i, test := range tests {
if err := a.AssertDeepEqual(test.out, Max(test.in...)); err != nil {
t.Error(m.ErrorMessageTestCount(i+1, err))
}
}
}
func TestRound(t *testing.T) {
type testData struct {
in float64
in2 int
out float64
}
tests := []testData{
{in: 183.467, in2: 2, out: 183.47},
{in: 183.467, in2: 5, out: 183.46700},
{in: 146.7032, in2: 0, out: 147},
{in: -12.455787, in2: 2, out: -12.46},
{in: 1.79, in2: 308, out: 1.79},
}
for i, test := range tests {
num, _ := Round(test.in, test.in2)
if err := a.AssertDeepEqual(test.out, num); err != nil {
t.Error(m.ErrorMessageTestCount(i+1, err))
}
}
}
func TestRoundEdgeCases(t *testing.T) {
type testException struct {
number float64
decimalPlaces int
expected error
}
errorString := "parameters too big"
tests := []testException{
{number: 1.80, decimalPlaces: 308, expected: &RoundError{Err: errorString}},
{number: 180.43, decimalPlaces: 3108, expected: &RoundError{Err: errorString}},
{number: 1.80, decimalPlaces: -3, expected: &RoundError{Err: "utilities.Round: decimalPlace parameter must be a positive number"}},
}
for i, test := range tests {
_, e := Round(test.number, test.decimalPlaces)
if err := a.AssertDeepException(test.expected, e); err != nil {
t.Error(m.ErrorMessageTestCount(i+1, err))
}
}
}
func TestRoundWrongException(t *testing.T) {
type testException struct {
number float64
decimalPlaces int
expected error
}
errorString := "utilities.Round: decimalPlace parameter must be a positive number"
tests := []testException{
{number: 1.80, decimalPlaces: -3, expected: errors.New(errorString)},
}
for _, test := range tests {
_, e := Round(test.number, test.decimalPlaces)
if err := a.AssertDeepException(test.expected, e); err == nil { //The expected exception should be RoundError
t.Error("Expected exception!")
} else {
a.AssertDeepEqual(errorString, e.Error()) //check the message returned
}
}
}
//Just run the function and check the type of the function returned
func TestElapse(t *testing.T) {
var funcExpected func()
funcReturned := Elapse(m.GetFuncName(Elapse))
//do some work
for i := 0; i < 1000000; i++ {
math.Cos(float64(i))
}
funcReturned()
if err := a.AssertDeepEqual(reflect.TypeOf(funcExpected), reflect.TypeOf(funcReturned)); err != nil {
t.Error(err.Error())
}
}
func TestGetDistance(t *testing.T) {
type testData struct {
value1, value2, result float64
}
tests := []testData{
{value1: 12, value2: -1, result: 13},
{value1: 0, value2: -1, result: 1},
{value1: 12, value2: 12, result: 0},
{value1: 12, value2: math.Inf(0), result: math.Inf(0)},
{value1: math.Inf(-1), value2: 12, result: math.Inf(0)},
}
for i, test := range tests {
if err := a.AssertDeepEqual(GetDistance(test.value1, test.value2), test.result); err != nil {
t.Error(m.ErrorMessageTestCount(i+1, err))
}
}
}
| TestMax |
volume_host.go | /*
Copyright 2016 The Kubernetes Authors.
Copyright 2020 Authors of Arktos - file modified.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"fmt"
"net"
"runtime"
"k8s.io/klog"
authenticationv1 "k8s.io/api/authentication/v1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
storagelisters "k8s.io/client-go/listers/storage/v1beta1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/configmap"
"k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/mountpod"
"k8s.io/kubernetes/pkg/kubelet/secret"
"k8s.io/kubernetes/pkg/kubelet/token"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
execmnt "k8s.io/kubernetes/pkg/volume/util/exec"
"k8s.io/kubernetes/pkg/volume/util/subpath"
)
// NewInitializedVolumePluginMgr returns a new instance of
// volume.VolumePluginMgr initialized with kubelets implementation of the
// volume.VolumeHost interface.
//
// kubelet - used by VolumeHost methods to expose kubelet specific parameters
// plugins - used to initialize volumePluginMgr
func NewInitializedVolumePluginMgr(
kubelet *Kubelet,
secretManager secret.Manager,
configMapManager configmap.Manager,
tokenManager *token.Manager,
plugins []volume.VolumePlugin,
prober volume.DynamicPluginProber) (*volume.VolumePluginMgr, error) |
// Compile-time check to ensure kubeletVolumeHost implements the VolumeHost interface
var _ volume.VolumeHost = &kubeletVolumeHost{}
var _ volume.KubeletVolumeHost = &kubeletVolumeHost{}
func (kvh *kubeletVolumeHost) GetPluginDir(pluginName string) string {
return kvh.kubelet.getPluginDir(pluginName)
}
type kubeletVolumeHost struct {
kubelet *Kubelet
volumePluginMgr volume.VolumePluginMgr
secretManager secret.Manager
tokenManager *token.Manager
configMapManager configmap.Manager
mountPodManager mountpod.Manager
informerFactory informers.SharedInformerFactory
csiDriverLister storagelisters.CSIDriverLister
csiDriversSynced cache.InformerSynced
}
func (kvh *kubeletVolumeHost) SetKubeletError(err error) {
kvh.kubelet.runtimeState.setStorageState(err)
}
func (kvh *kubeletVolumeHost) GetVolumeDevicePluginDir(pluginName string) string {
return kvh.kubelet.getVolumeDevicePluginDir(pluginName)
}
func (kvh *kubeletVolumeHost) GetPodsDir() string {
return kvh.kubelet.getPodsDir()
}
func (kvh *kubeletVolumeHost) GetPodVolumeDir(podUID types.UID, pluginName string, volumeName string) string {
dir := kvh.kubelet.getPodVolumeDir(podUID, pluginName, volumeName)
if runtime.GOOS == "windows" {
dir = util.GetWindowsPath(dir)
}
return dir
}
func (kvh *kubeletVolumeHost) GetPodVolumeDeviceDir(podUID types.UID, pluginName string) string {
return kvh.kubelet.getPodVolumeDeviceDir(podUID, pluginName)
}
func (kvh *kubeletVolumeHost) GetPodPluginDir(podUID types.UID, pluginName string) string {
return kvh.kubelet.getPodPluginDir(podUID, pluginName)
}
func (kvh *kubeletVolumeHost) GetKubeClient() clientset.Interface {
return kvh.kubelet.heartbeatClient
}
func (kvh *kubeletVolumeHost) GetSubpather() subpath.Interface {
return kvh.kubelet.subpather
}
func (kvh *kubeletVolumeHost) GetInformerFactory() informers.SharedInformerFactory {
return kvh.informerFactory
}
func (kvh *kubeletVolumeHost) CSIDriverLister() storagelisters.CSIDriverLister {
return kvh.csiDriverLister
}
func (kvh *kubeletVolumeHost) CSIDriversSynced() cache.InformerSynced {
return kvh.csiDriversSynced
}
// WaitForCacheSync is a helper function that waits for cache sync for CSIDriverLister
func (kvh *kubeletVolumeHost) WaitForCacheSync() error {
if kvh.csiDriversSynced == nil {
klog.Error("csiDriversSynced not found on KubeletVolumeHost")
return fmt.Errorf("csiDriversSynced not found on KubeletVolumeHost")
}
synced := []cache.InformerSynced{kvh.csiDriversSynced}
if !cache.WaitForCacheSync(wait.NeverStop, synced...) {
klog.Warning("failed to wait for cache sync for CSIDriverLister")
return fmt.Errorf("failed to wait for cache sync for CSIDriverLister")
}
return nil
}
func (kvh *kubeletVolumeHost) NewWrapperMounter(
volName string,
spec volume.Spec,
pod *v1.Pod,
opts volume.VolumeOptions) (volume.Mounter, error) {
// The name of wrapper volume is set to "wrapped_{wrapped_volume_name}"
wrapperVolumeName := "wrapped_" + volName
if spec.Volume != nil {
spec.Volume.Name = wrapperVolumeName
}
return kvh.kubelet.newVolumeMounterFromPlugins(&spec, pod, opts)
}
func (kvh *kubeletVolumeHost) NewWrapperUnmounter(volName string, spec volume.Spec, podUID types.UID) (volume.Unmounter, error) {
// The name of wrapper volume is set to "wrapped_{wrapped_volume_name}"
wrapperVolumeName := "wrapped_" + volName
if spec.Volume != nil {
spec.Volume.Name = wrapperVolumeName
}
plugin, err := kvh.kubelet.volumePluginMgr.FindPluginBySpec(&spec)
if err != nil {
return nil, err
}
return plugin.NewUnmounter(spec.Name(), podUID)
}
func (kvh *kubeletVolumeHost) GetCloudProvider() cloudprovider.Interface {
return kvh.kubelet.cloud
}
func (kvh *kubeletVolumeHost) GetMounter(pluginName string) mount.Interface {
exec, err := kvh.getMountExec(pluginName)
if err != nil {
klog.V(2).Infof("Error finding mount pod for plugin %s: %s", pluginName, err.Error())
// Use the default mounter
exec = nil
}
if exec == nil {
return kvh.kubelet.mounter
}
return execmnt.NewExecMounter(exec, kvh.kubelet.mounter)
}
func (kvh *kubeletVolumeHost) GetHostName() string {
return kvh.kubelet.hostname
}
func (kvh *kubeletVolumeHost) GetHostIP() (net.IP, error) {
return kvh.kubelet.GetHostIP()
}
func (kvh *kubeletVolumeHost) GetNodeAllocatable() (v1.ResourceList, error) {
node, err := kvh.kubelet.getNodeAnyWay()
if err != nil {
return nil, fmt.Errorf("error retrieving node: %v", err)
}
return node.Status.Allocatable, nil
}
func (kvh *kubeletVolumeHost) GetSecretFunc() func(tenant, namespace, name string) (*v1.Secret, error) {
return kvh.secretManager.GetSecret
}
func (kvh *kubeletVolumeHost) GetConfigMapFunc() func(tenant, namespace, name string) (*v1.ConfigMap, error) {
return kvh.configMapManager.GetConfigMap
}
func (kvh *kubeletVolumeHost) GetServiceAccountTokenFunc() func(namespace, name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
return kvh.tokenManager.GetServiceAccountToken
}
func (kvh *kubeletVolumeHost) DeleteServiceAccountTokenFunc() func(podUID types.UID) {
return kvh.tokenManager.DeleteServiceAccountToken
}
func (kvh *kubeletVolumeHost) GetNodeLabels() (map[string]string, error) {
node, err := kvh.kubelet.GetNode()
if err != nil {
return nil, fmt.Errorf("error retrieving node: %v", err)
}
return node.Labels, nil
}
func (kvh *kubeletVolumeHost) GetNodeName() types.NodeName {
return kvh.kubelet.nodeName
}
func (kvh *kubeletVolumeHost) GetEventRecorder() record.EventRecorder {
return kvh.kubelet.recorder
}
func (kvh *kubeletVolumeHost) GetExec(pluginName string) mount.Exec {
exec, err := kvh.getMountExec(pluginName)
if err != nil {
klog.V(2).Infof("Error finding mount pod for plugin %s: %s", pluginName, err.Error())
// Use the default exec
exec = nil
}
if exec == nil {
return mount.NewOsExec()
}
return exec
}
// getMountExec returns mount.Exec implementation that leads to pod with mount
// utilities. It returns nil,nil when there is no such pod and default mounter /
// os.Exec should be used.
func (kvh *kubeletVolumeHost) getMountExec(pluginName string) (mount.Exec, error) {
if !utilfeature.DefaultFeatureGate.Enabled(features.MountContainers) {
klog.V(5).Infof("using default mounter/exec for %s", pluginName)
return nil, nil
}
pod, container, err := kvh.mountPodManager.GetMountPod(pluginName)
if err != nil {
return nil, err
}
if pod == nil {
// Use default mounter/exec for this plugin
klog.V(5).Infof("using default mounter/exec for %s", pluginName)
return nil, nil
}
klog.V(5).Infof("using container %s/%s/%s to execute mount utilities for %s", pod.Namespace, pod.Name, container, pluginName)
return &containerExec{
pod: pod,
containerName: container,
kl: kvh.kubelet,
}, nil
}
// containerExec is implementation of mount.Exec that executes commands in given
// container in given pod.
type containerExec struct {
pod *v1.Pod
containerName string
kl *Kubelet
}
var _ mount.Exec = &containerExec{}
func (e *containerExec) Run(cmd string, args ...string) ([]byte, error) {
cmdline := append([]string{cmd}, args...)
klog.V(5).Infof("Exec mounter running in pod %s/%s/%s: %v", e.pod.Namespace, e.pod.Name, e.containerName, cmdline)
return e.kl.RunInContainer(container.GetPodFullName(e.pod), e.pod.UID, e.containerName, cmdline)
}
| {
// Initialize csiDriverLister before calling InitPlugins
var informerFactory informers.SharedInformerFactory
var csiDriverLister storagelisters.CSIDriverLister
var csiDriversSynced cache.InformerSynced
const resyncPeriod = 0
if utilfeature.DefaultFeatureGate.Enabled(features.CSIDriverRegistry) {
if kubelet.heartbeatClient != nil {
informerFactory = informers.NewSharedInformerFactory(kubelet.heartbeatClient, resyncPeriod)
csiDriverInformer := informerFactory.Storage().V1beta1().CSIDrivers()
csiDriverLister = csiDriverInformer.Lister()
csiDriversSynced = csiDriverInformer.Informer().HasSynced
} else {
klog.Warning("kubeClient is nil. Skip initialization of CSIDriverLister")
}
}
mountPodManager, err := mountpod.NewManager(kubelet.getRootDir(), kubelet.podManager)
if err != nil {
return nil, err
}
kvh := &kubeletVolumeHost{
kubelet: kubelet,
volumePluginMgr: volume.VolumePluginMgr{},
secretManager: secretManager,
configMapManager: configMapManager,
tokenManager: tokenManager,
mountPodManager: mountPodManager,
informerFactory: informerFactory,
csiDriverLister: csiDriverLister,
csiDriversSynced: csiDriversSynced,
}
if err := kvh.volumePluginMgr.InitPlugins(plugins, prober, kvh); err != nil {
return nil, fmt.Errorf(
"Could not initialize volume plugins for KubeletVolumePluginMgr: %v",
err)
}
return &kvh.volumePluginMgr, nil
} |
refresh_tasks.py | ####
# This script demonstrates how to use the Tableau Server Client
# to query extract refresh tasks and run them as needed.
#
# To run the script, you must have installed Python 3.5 or later.
####
import argparse
import getpass
import logging
import tableauserverclient as TSC
def handle_run(server, args):
task = server.tasks.get_by_id(args.id)
print(server.tasks.run(task))
def handle_list(server, _):
tasks, pagination = server.tasks.get()
for task in tasks:
print("{}".format(task))
def handle_info(server, args):
task = server.tasks.get_by_id(args.id)
print("{}".format(task))
def main():
parser = argparse.ArgumentParser(description='Get all of the refresh tasks available on a server')
parser.add_argument('--server', '-s', required=True, help='server address')
parser.add_argument('--username', '-u', required=True, help='username to sign into server')
parser.add_argument('--site', '-S', default=None)
parser.add_argument('-p', default=None)
parser.add_argument('--logging-level', '-l', choices=['debug', 'info', 'error'], default='error',
help='desired logging level (set to error by default)')
subcommands = parser.add_subparsers()
list_arguments = subcommands.add_parser('list')
list_arguments.set_defaults(func=handle_list)
run_arguments = subcommands.add_parser('run')
run_arguments.add_argument('id', default=None)
run_arguments.set_defaults(func=handle_run)
info_arguments = subcommands.add_parser('info')
info_arguments.add_argument('id', default=None)
info_arguments.set_defaults(func=handle_info)
args = parser.parse_args()
if args.p is None:
password = getpass.getpass("Password: ")
else:
password = args.p
# Set logging level based on user input, or error by default
logging_level = getattr(logging, args.logging_level.upper())
logging.basicConfig(level=logging_level)
| server.version = '2.6'
with server.auth.sign_in(tableau_auth):
args.func(server, args)
if __name__ == '__main__':
main() | # SIGN IN
tableau_auth = TSC.TableauAuth(args.username, password, args.site)
server = TSC.Server(args.server) |
downloads.py | """Download example datasets from https://github.com/pyansys/example-data"""
import shutil
import os
import urllib.request
EXAMPLE_REPO = "https://github.com/pyansys/example-data/raw/master/result_files/"
def delete_downloads():
"""Delete all downloaded examples to free space or update the files"""
from ansys.dpf.core import LOCAL_DOWNLOADED_EXAMPLES_PATH
shutil.rmtree(LOCAL_DOWNLOADED_EXAMPLES_PATH)
os.makedirs(LOCAL_DOWNLOADED_EXAMPLES_PATH)
def _get_file_url(directory, filename):
return EXAMPLE_REPO + "/".join([directory, filename])
def _retrieve_file(url, filename, directory):
"""Download a file from a url"""
from ansys.dpf.core import LOCAL_DOWNLOADED_EXAMPLES_PATH, path_utilities
# First check if file has already been downloaded
local_path = os.path.join(LOCAL_DOWNLOADED_EXAMPLES_PATH, directory,
os.path.basename(filename))
local_path_no_zip = local_path.replace(".zip", "")
if os.path.isfile(local_path_no_zip) or os.path.isdir(local_path_no_zip):
return path_utilities.to_server_os(local_path_no_zip.replace(
LOCAL_DOWNLOADED_EXAMPLES_PATH,
path_utilities.downloaded_example_path()))
# grab the correct url retriever
urlretrieve = urllib.request.urlretrieve
dirpath = os.path.dirname(local_path)
if not os.path.isdir(dirpath):
os.mkdir(dirpath)
# Perform download
_, resp = urlretrieve(url, local_path)
return path_utilities.to_server_os(local_path.replace(
LOCAL_DOWNLOADED_EXAMPLES_PATH,
path_utilities.downloaded_example_path()))
def _download_file(directory, filename):
url = _get_file_url(directory, filename)
local_path = _retrieve_file(url, filename, directory)
return local_path
###############################################################################
# front-facing functions
def | () -> str:
"""Download an example transient result file and return the download path.
Examples files are downloaded to a persistent cache to avoid
re-downloading the same file twice.
Returns
-------
str
Path to the example file.
Examples
--------
Download an example result file and return the path of the file
>>> from ansys.dpf.core import examples
>>> path = examples.transient_result
>>> path
'C:/Users/user/AppData/local/temp/transient.rst'
"""
return _download_file("transient", "transient.rst")
def download_all_kinds_of_complexity() -> str:
"""Download an example static result and return the download path.
Examples files are downloaded to a persistent cache to avoid
re-downloading the same file twice.
Returns
-------
str
Path to the example file.
Examples
--------
Download an example result file and return the path of the file
>>> from ansys.dpf.core import examples
>>> path = examples.download_all_kinds_of_complexity
>>> path
'C:/Users/user/AppData/local/temp/allKindOfComplexity.rst'
"""
return _download_file("testing", "allKindOfComplexity.rst")
def download_all_kinds_of_complexity_modal() -> str:
"""Download an example result file from a static modal analysis and
return the download path.
Examples files are downloaded to a persistent cache to avoid
re-downloading the same file twice.
Returns
-------
str
Path to the example file.
Examples
--------
Download an example result file and return the path of the file
>>> from ansys.dpf.core import examples
>>> path = examples.download_all_kinds_of_complexity_modal()
>>> path
'C:/Users/user/AppData/local/temp/modal_allKindOfComplexity.rst'
"""
return _download_file("testing", "modal_allKindOfComplexity.rst")
def download_pontoon() -> str:
"""Download an example result file from a static modal analsys and
return the download path.
Examples files are downloaded to a persistent cache to avoid
re-downloading the same file twice.
Returns
-------
str
Path to the example file.
Examples
--------
Download an example result file and return the path of the file
>>> from ansys.dpf.core import examples
>>> path = examples.download_pontoon()
>>> path
'C:/Users/user/AppData/local/temp/pontoon.rst'
"""
return _download_file("docs", "pontoon.rst")
def download_multi_harmonic_result() -> str:
"""Download an example multi-harmonic result file and return the
download path.
Examples files are downloaded to a persistent cache to avoid
re-downloading the same file twice.
Returns
-------
str
Path to the example file.
Examples
--------
Download an example result file and return the path of the file
>>> from ansys.dpf.core import examples
>>> path = examples.download_multi_harmonic_result()
>>> path
'C:/Users/user/AppData/local/temp/file_harmonic_5rpms.rst'
"""
return _download_file("harmonic", "file_harmonic_5rpms.rst")
def download_multi_stage_cyclic_result() -> str:
"""Download an example multi stage result file and return the
download path.
Examples files are downloaded to a persistent cache to avoid
re-downloading the same file twice.
Returns
-------
str
Path to the example file.
Examples
--------
Download an example result file and return the path of the file
>>> from ansys.dpf.core import examples
>>> path = examples.download_multi_stage_cyclic_result()
>>> path
'C:/Users/user/AppData/local/temp/multistage.rst'
"""
return _download_file("multistage", "multistage.rst")
def download_sub_file() -> str:
"""Download an example .sub result file containing matrices and return the
download path.
Examples files are downloaded to a persistent cache to avoid
re-downloading the same file twice.
Returns
-------
str
Path to the example file.
Examples
--------
Download an example result file and return the path of the file
>>> from ansys.dpf.core import examples
>>> path = examples.download_sub_file()
>>> path
'C:\\Users\\user\\AppData\\Local\\ansys-dpf-core\\ansys-dpf-core\\examples\\sub\\cp56.sub'
"""
return _download_file("sub", "cp56.sub")
def download_msup_files_to_dict() -> dict:
"""Download all the files necessary for a msup expansion and return the
download paths into a dictionary extension->path.
Examples files are downloaded to a persistent cache to avoid
re-downloading the same file twice.
Returns
-------
dict[str:str]
Path to the example files.
Examples
--------
Download an example result file and return the path of the file
>>> from ansys.dpf.core import examples
>>> paths = examples.download_msup_files_to_dict()
>>> paths
{'rfrq': 'C:\\Users\\user\\AppData\\Local\\ansys-dpf-core\\ansys-dpf-core\\examples\\msup\\file.rfrq',
'mode': 'C:\\Users\\user\\AppData\\Local\\ansys-dpf-core\\ansys-dpf-core\\examples\\msup\\file.mode',
'rst': 'C:\\Users\\user\\AppData\\Local\\ansys-dpf-core\\ansys-dpf-core\\examples\\msup\\file.rst'} # noqa: E501
"""
return {
"rfrq": _download_file("msup", "file.rfrq"),
"mode": _download_file("msup", "file.mode"),
"rst": _download_file("msup", "file.rst"),
}
def download_distributed_files() -> dict:
"""Download distributed rst files and return the
download paths into a dictionary domain id->path.
Examples files are downloaded to a persistent cache to avoid
re-downloading the same file twice.
Returns
-------
dict[int:str]
Path to the example files.
Examples
--------
Download an example result file and return the path of the file
>>> from ansys.dpf.core import examples
>>> paths = examples.download_distributed_files()
>>> paths
{0: 'C:\\Users\\user\\AppData\\Local\\ansys-dpf-core\\ansys-dpf-core\\examples\\distributed\\file0.rst',
1: 'C:\\Users\\user\\AppData\\Local\\ansys-dpf-core\\ansys-dpf-core\\examples\\distributed\\file1.rst'} # noqa: E501
"""
return {
0: _download_file("distributed", "file0.rst"),
1: _download_file("distributed", "file1.rst"),
}
def download_fluent_files() -> dict:
"""Download the cas and dat file of a fluent analysis and return the
download paths into a dictionary extension->path.
Examples files are downloaded to a persistent cache to avoid
re-downloading the same file twice.
Returns
-------
dict[str:str]
Path to the example files.
Examples
--------
Download an example result file and return the path of the file
>>> from ansys.dpf.core import examples
>>> paths = examples.download_fluent_files()
>>> paths
{'cas': 'C:\\Users\\user\\AppData\\Local\\ansys-dpf-core\\ansys-dpf-core\\examples\\fluent\\FFF.cas.h5',
'dat': 'C:\\Users\\user\\AppData\\Local\\ansys-dpf-core\\ansys-dpf-core\\examples\\fluent\\FFF.dat.h5'} # noqa: E501
"""
return {
"cas": _download_file("fluent", "FFF.cas.h5"),
"dat": _download_file("fluent", "FFF.dat.h5"),
}
def download_extrapolation_3d_result() -> dict:
"""Download example static results of reference and integrated points
for extrapolation of 3d-element and return return the dictionary of 2 download paths.
Examples files are downloaded to a persistent cache to avoid
re-downloading the same file twice.
Returns
-------
dict
containing path to the example file of ref and path to the example
file of integrated points.
Examples
--------
Download 2 example result files and return the dictionary containing 2 files
>>> from ansys.dpf.core import examples
>>> dict = examples.download_extrapolation_ref_result
>>> dict
{
'file_ref': 'C:/Users/user/AppData/local/temp/file_ref.rst',
'file_integrated': 'C:/Users/user/AppData/local/temp/file.rst'
}
"""
dict = {
"file_ref": _download_file("extrapolate", "file_ref.rst"),
"file_integrated": _download_file("extrapolate", "file.rst"),
}
return dict
def download_extrapolation_2d_result() -> dict:
"""Download example static results of reference and integrated points
for extrapolation of 2d-element and return the dictionary of 2 download paths.
Examples files are downloaded to a persistent cache to avoid
re-downloading the same file twice.
Returns
-------
dict
Contains path to the example file of ref and path to the example
file of integrated points.
Examples
--------
Download 2 example result files and return the dictionary containing 2 files
>>> from ansys.dpf.core import examples
>>> dict = examples.download_extrapolation_ref_result
>>> dict
{
'file_ref': 'C:/Users/user/AppData/local/temp/extrapolate_2d_ref.rst',
'file_integrated': 'C:/Users/user/AppData/local/temp/extrapolate_2d.rst'
}
"""
dict = {
"file_ref": _download_file("extrapolate", "extrapolate_2d_ref.rst"),
"file_integrated": _download_file("extrapolate", "extrapolate_2d.rst"),
}
return dict
def download_hemisphere() -> str:
"""Download an example result file from a static analysis and
return the download path.
Examples files are downloaded to a persistent cache to avoid
re-downloading the same file twice.
Returns
-------
str
Path to the example file.
Examples
--------
Download an example result file and return the path of the file
>>> from ansys.dpf.core import examples
>>> path = examples.download_hemisphere()
>>> path
'C:/Users/user/AppData/local/temp/hemisphere.rst'
"""
return _download_file("hemisphere", "hemisphere.rst")
| download_transient_result |
mail.module.ts | import { Module } from '@nestjs/common';
import { MailService } from './mail.service';
@Module({
providers: [MailService],
exports: [MailService]
})
export class MailModule {
| } |
|
code.py | # --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
data = pd.read_csv(path)
data.rename(columns={"Total":"Total_Medals"}, inplace=True)
data.head(10)
#Code starts here
# --------------
#Code starts here
data['Better_Event'] = np.where(data['Total_Summer']>data['Total_Winter'],'Summer', 'Winter')
data['Better_Event'] = np.where(data['Total_Summer']==data['Total_Winter'], 'Both', data['Better_Event'])
better_event = data.Better_Event.value_counts().index[0]
print(better_event)
# --------------
#Code starts here
top_countries = data[['Country_Name', 'Total_Summer', 'Total_Winter', 'Total_Medals']]
top_countries.drop(top_countries.tail(1).index, inplace=True, axis=0)
print(top_countries)
def | (top_countries,c_name):
country_list=[]
df = top_countries.nlargest(10, [c_name])
country_list = list(df['Country_Name'])
return(country_list)
top_10_summer = top_ten(top_countries,'Total_Summer')
top_10_winter = top_ten(top_countries,'Total_Winter')
top_10 = top_ten(top_countries,'Total_Medals')
common = list(set(top_10_summer) & set(top_10_winter) & set(top_10))
print(common)
# --------------
#Code starts here
summer_df = data[data['Country_Name'].isin(top_10_summer)]
winter_df = data[data['Country_Name'].isin(top_10_winter)]
top_df = data[data['Country_Name'].isin(top_10)]
summer_df.plot(kind='bar')
winter_df.plot(kind='bar')
top_df.plot(kind='bar')
# --------------
#Code starts here
summer_df['Golden_Ratio'] = summer_df['Gold_Summer']/summer_df['Total_Summer']
summer_max_ratio = summer_df.Golden_Ratio.max()
summer_country_gold = summer_df.loc[summer_df.Golden_Ratio==summer_df.Golden_Ratio.max(), 'Country_Name'].values[0]
winter_df['Golden_Ratio'] = winter_df['Gold_Winter']/winter_df['Total_Winter']
winter_max_ratio = winter_df.Golden_Ratio.max()
winter_country_gold = winter_df.loc[winter_df.Golden_Ratio==winter_df.Golden_Ratio.max(), 'Country_Name'].values[0]
top_df['Golden_Ratio'] = top_df['Gold_Total']/top_df['Total_Medals']
top_max_ratio = top_df.Golden_Ratio.max()
top_country_gold = top_df.loc[top_df.Golden_Ratio==top_df.Golden_Ratio.max(), 'Country_Name'].values[0]
# --------------
#Code starts here
data_1=data[:-1]
data_1['Total_Points']=data_1['Gold_Total']*3 + data_1['Silver_Total']*2 + data_1['Bronze_Total']*1
most_points = data_1.Total_Points.max()
best_country = data_1.loc[data_1.Total_Points==data_1.Total_Points.max(), 'Country_Name'].values[0]
# --------------
#Code starts here
best = data[data['Country_Name']==best_country]
best=best[['Gold_Total','Silver_Total','Bronze_Total']]
best.plot.bar(stacked=True)
plt.xlabel('United States')
plt.ylabel('Medals Tally')
plt.xticks(rotation=45)
| top_ten |
data_stream.py | import time
from threading import Event, RLock
from mltk.utils import hexdump
from .device_interface import DeviceInterface, MAX_BUFFER_SIZE
WAIT_FOREVER = 4294967.0
class JLinkDataStream(object):
"""JLink data stream"""
def __init__(
self,
name:str,
mode:str,
ifc: DeviceInterface,
stream_context: dict
):
self._name = name
self._mode = mode
self._ifc = ifc
self._context = stream_context
self._is_opened = Event()
self._buffer = bytearray()
self._buffer_lock = RLock()
self._buffer_event = Event()
self._notify_event = None
self._max_read_size = -1
self._timeout = -1
self._end_time = -1
self._requires_processing = False
self._id_mask = (1 << stream_context['id'])
self._is_opened.set()
@property
def name(self) -> str:
"""The name of the opened stream"""
return self._name
@property
def mode(self) -> str:
"""The mode the for which the stream was opened, r or w"""
return self._mode
@property
def is_opened(self) -> bool:
"""If the stream is opened to the device"""
return self._is_opened.is_set()
@property
def max_read_size(self) -> int:
"""The maximum amount of data to read
Set to -1 to disable limit
After each read, this value will decrement by the amount of data read.
One this value reaches zero, it must be reset otherwise subsequent reads
will always return zero.
"""
return self._max_read_size
@max_read_size.setter
def max_read_size(self, val:int):
if val is None:
val = -1
self._max_read_size = val
@property
def timeout(self) -> float:
"""The maximum about of time in seconds to read or write data.
This is only used if the 'timeout' argument to the read() or write() APIs is None
Set to -1 to never timeout
"""
return self._timeout
@timeout.setter
def timeout(self, val: float):
if val is None:
val = -1
self._timeout = val
@property
def end_time(self) -> float:
"""The absolute time in seconds to timeout reading or writing
Set to None to disable.
If end_time > time.time(), then return from the read() or write() API
"""
return self._end_time
@end_time.setter
def end_time(self, val:float):
if val is None:
val = -1
self._end_time = val
@property
def buffer_used(self) -> int:
"""The amount of the device data buffer used
If the stream was opened for reading then this
is the amount of data that was previous received from
the device and is waiting to be read by the python script.
If the stream was opened for writing, then this is
the amount of data that was previously written and is
pending to be sent to the device.
"""
with self._buffer_lock:
retval = len(self._buffer)
return retval
@property
def buffer_unused(self) -> int:
"""The amount of the device data buffer that is available"""
with self._buffer_lock:
retval = MAX_BUFFER_SIZE - len(self._buffer)
return retval
@property
def read_data_available(self) -> int:
"""The amount of data that is ready to be read by the python script"""
return self.buffer_used
@property
def write_data_available(self) -> int:
"""The amount of data that can immediately be written"""
return self.buffer_unused
@property
def buffer_hexdump(self, length=64) -> str:
"""Return a hexdump string"""
length = min(length, self.buffer_used)
return hexdump.hexdump(self._buffer[:length], result='return')
def close(self):
"""Close the data stream with the device"""
if self._is_opened.is_set():
self._is_opened.clear()
self._buffer_event.set()
self._ifc.close(self._name)
def read(self, max_size:int = None, timeout:float=None) -> bytes:
"""Read data from data stream opened for reading
NOTE: The only returns the data that is immediately available.
The amount of data returned may be less than max_size.
"""
if self.mode != 'r':
raise Exception(f'Stream: {self.name} not opened for reading')
timeout = self._get_timeout(timeout)
max_size = self._get_max_size(max_size)
start_time = time.time()
while True:
self._buffer_event.clear()
if not self.is_opened:
raise Exception(f'Stream: {self.name} closed')
if max_size == 0:
return None
bufsize = min(self.read_data_available, max_size)
if bufsize > 0:
retval = self._consume_buffer(bufsize)
self._notify_event.set()
return bytes(retval)
elapsed = (time.time() - start_time)
if elapsed >= timeout:
return None
if self._end_time > 0:
time_remaining = self._end_time - time.time()
if time_remaining <= 0:
return None
else:
time_remaining = WAIT_FOREVER
self._buffer_event.wait(min(min(timeout - elapsed, time_remaining), 0.100))
def read_all(self, amount:int, timeout:float=None, initial_timeout:float=None, throw_exception=True) -> bytes:
"""The the specified amount of data"""
if initial_timeout is None:
initial_timeout = timeout
retval = bytearray()
remaining = amount
while remaining > 0:
chunk_timeout = initial_timeout if len(retval) == 0 else timeout
chunk = self.read(max_size=remaining, timeout=chunk_timeout)
if chunk is None:
break
remaining -= len(chunk)
retval.extend(chunk)
if len(retval) != amount and throw_exception:
raise Exception('Failed to read all data')
return bytes(retval)
def write(self, data:bytes, timeout:float=None, flush=False) -> int:
"""Write data to a data stream opened for writing"""
if self.mode != 'w':
raise Exception(f'Stream: {self.name} not opened for writing')
timeout = self._get_timeout(timeout)
total_write_len = 0
start_time = time.time()
while len(data) > 0:
self._buffer_event.clear()
if not self.is_opened:
raise Exception(f'Stream: {self.name} closed')
bufsize = min(self.write_data_available, len(data))
if bufsize > 0:
self._populate_buffer(data[:bufsize])
data = data[bufsize:]
total_write_len += bufsize
self._requires_processing = True
self._notify_event.set()
if len(data) == 0:
break
elapsed = (time.time() - start_time)
if elapsed >= timeout:
break
if self._end_time > 0:
time_remaining = self._end_time - time.time()
if time_remaining <= 0:
break
else:
time_remaining = WAIT_FOREVER
self._buffer_event.wait(min(min(timeout - elapsed, time_remaining), 0.100))
if flush:
self.flush(timeout=timeout)
return total_write_len
def flush(self, timeout:float=None):
"""Wait while any pending data is transferred to/from the device"""
timeout = self._get_timeout(timeout)
start_time = time.time()
while self.buffer_used > 0:
self._buffer_event.clear()
if not self.is_opened:
raise Exception(f'Stream: {self.name} closed')
elapsed = (time.time() - start_time)
if elapsed >= timeout:
raise Exception('Time-out waiting for buffer to flush')
if self._end_time > 0:
time_remaining = self._end_time - time.time()
if time_remaining <= 0:
break
else:
time_remaining = WAIT_FOREVER
self._buffer_event.wait(min(min(timeout - elapsed, time_remaining), 0.100))
def _set_notify_event(self, event):
self._notify_event = event
def _process(self, buffer_status_mask):
if not self._requires_processing and (buffer_status_mask & self._id_mask) == 0:
return
self._requires_processing = False
if self.mode == 'r':
max_read_len = self.buffer_unused
if max_read_len > 0:
data = self._ifc.read(self._context, max_read_len)
if data:
self._populate_buffer(data)
else:
self._requires_processing = True
elif self.mode == 'w':
write_len = self._ifc.write(self._context, self._buffer)
if write_len:
self._consume_buffer(write_len)
if self.buffer_used > 0:
self._requires_processing = True
|
def _consume_buffer(self, size) -> bytes:
with self._buffer_lock:
retval = self._buffer[:size]
self._buffer = self._buffer[size:]
if self._max_read_size != -1:
if size <= self._max_read_size:
self._max_read_size -= size
else:
self._max_read_size = 0
if self.mode == 'w':
self._buffer_event.set()
return retval
def _populate_buffer(self, data):
with self._buffer_lock:
if isinstance(data, str):
data = data.encode()
self._buffer.extend(data)
if self.mode == 'r':
self._buffer_event.set()
def _get_timeout(self, timeout:float) -> float:
if timeout is None:
timeout = self._timeout
if timeout == -1:
timeout = WAIT_FOREVER
return timeout
def _get_max_size(self, max_size:int) -> int:
if max_size is None:
max_size = self._max_read_size
if max_size == -1:
max_size = MAX_BUFFER_SIZE
return max_size
def __iter__(self):
return self
def __next__(self):
retval = self.read()
if retval is None:
raise StopIteration # Done iterating.
return retval
def __enter__(self):
return self
def __exit__(self, dtype, value, traceback):
self.close() | |
sync_manager.rs | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Coordinate status between different devices in the current netemul test.
use anyhow::{format_err, Context as _, Error};
use futures::StreamExt;
use log::error;
use fidl_fuchsia_netemul_sync as nsync;
use fuchsia_component as component;
use crate::device;
const BUS_NAME: &str = "nmh::nat::bus";
/// Publishes status of the current device and waits for status updates from other devices in the
/// netemul test.
pub struct | {
device: device::Type,
proxy: nsync::BusProxy,
events: nsync::BusEventStream,
status: [device::Status; 3],
}
impl SyncManager {
/// Attach to the SyncManager, subscribing to the bus as the given device. Waits for all other
/// devices to attach as well before returning a SyncManager object.
pub async fn attach(device: device::Type) -> Result<Self, Error> {
let sync_mgr = component::client::connect_to_protocol::<nsync::SyncManagerMarker>()
.context("unable to connect to sync manager")?;
let (proxy, server_end) = fidl::endpoints::create_proxy::<nsync::BusMarker>()
.context("error creating bus endpoints")?;
sync_mgr
.bus_subscribe(BUS_NAME, &device.to_string(), server_end)
.context("error subscribing to bus")?;
let events = proxy.take_event_stream();
let mut mgr = SyncManager { device, proxy, events, status: [device::STATUS_UNKNOWN; 3] };
mgr.wait_for_status_or_error(device::STATUS_ATTACHED).await?;
Ok(mgr)
}
/// Status of the device with the given name.
fn status_by_name(&mut self, name: String) -> &mut device::Status {
&mut self.status[device::Type::from(name) as usize]
}
/// Handle the event received on the sync bus.
fn handle_event(&mut self, event: nsync::BusEvent) {
match event {
nsync::BusEvent::OnClientAttached { client } => {
self.status_by_name(client).add(device::STATUS_ATTACHED)
}
nsync::BusEvent::OnClientDetached { client } => {
self.status_by_name(client).remove(device::STATUS_ATTACHED)
}
nsync::BusEvent::OnBusData {
data: nsync::Event { code: Some(code), message: Some(message), arguments: _, .. },
} => self.status_by_name(message).add(code.into()),
event @ _ => error!("unrecognized sync bus event: {:?}", event),
}
}
/// Check that all other devices' last known status contains the given status. If any device
/// has an ERROR status, this function returns an error. Otherwise returns true if all devices
/// match the expected status, or false if any of them does not.
fn others_last_known_status_or_error(&self, expected: device::Status) -> Result<bool, Error> {
self.device.others().iter().try_fold(true, |acc, &dev| {
let status = self.status[dev as usize];
if status.contains(device::STATUS_ERROR) {
Err(format_err!(
"error on device {}, expected status {:?}",
dev.to_string(),
expected
))
} else {
Ok(acc && status.contains(expected))
}
})
}
/// Waits for all other devices to reach the expected status. If any device reaches an ERROR
/// state, returns immediately with an error.
pub async fn wait_for_status_or_error(
&mut self,
expected: device::Status,
) -> Result<(), Error> {
loop {
if self.others_last_known_status_or_error(expected)? {
return Ok(());
}
match self.events.next().await {
Some(Ok(event)) => {
self.handle_event(event);
}
err @ _ => error!("error receiving from sync event bus: {:?}", err),
}
}
}
/// Publishes the status of the current device.
pub async fn publish_status(&mut self, status: device::Status) -> Result<(), Error> {
self.status[self.device as usize] = status;
let name: String = self.device.to_string();
self.proxy
.ensure_publish(nsync::Event {
code: Some(status.into()),
message: Some(name),
arguments: None,
..nsync::Event::EMPTY
})
.await
.map_err(|e| e.into())
}
}
| SyncManager |
tendermint.rs | //! Integration tests
/// tendermint kvstore RPC integration tests.
///
/// If you have a kvstore app running on 127.0.0.1:26657,
/// these can be run using:
///
/// cargo test
///
/// Or else, if you have docker installed, you can tell the tests to run an endpoint,
/// by running:
///
/// cargo make
///
/// (Make sure you install cargo-make using `cargo install cargo-make` first.)
///
mod rpc {
use std::cmp::min;
use tendermint_rpc::{
Client, HttpClient, Id, Order, SubscriptionClient, WebSocketClient, WebSocketClientDriver,
};
use futures::StreamExt;
use std::convert::TryFrom;
use std::sync::atomic::{AtomicU8, Ordering};
use tendermint::abci::Log;
use tendermint::abci::{Code, Transaction};
use tendermint::block::Height;
use tendermint::merkle::simple_hash_from_byte_vectors;
use tendermint_rpc::endpoint::tx::Response as ResultTx;
use tendermint_rpc::event::{Event, EventData, TxInfo};
use tendermint_rpc::query::{EventType, Query};
use tokio::time::Duration;
static LOGGING_INIT: AtomicU8 = AtomicU8::new(0);
fn init_logging() {
// Try to only initialize the logging once
if LOGGING_INIT.fetch_add(1, Ordering::SeqCst) == 0 {
tracing_subscriber::fmt::init();
tracing::info!("Test logging initialized");
}
}
pub fn localhost_http_client() -> HttpClient {
init_logging();
HttpClient::new("http://127.0.0.1:26657").unwrap()
}
pub async fn localhost_websocket_client() -> (WebSocketClient, WebSocketClientDriver) {
init_logging();
WebSocketClient::new("ws://127.0.0.1:26657/websocket")
.await
.unwrap()
}
/// `/health` endpoint
#[tokio::test]
async fn health() {
let result = localhost_http_client().health().await;
assert!(result.is_ok(), "health check failed");
}
/// `/abci_info` endpoint
#[tokio::test]
async fn abci_info() {
let abci_info = localhost_http_client().abci_info().await.unwrap();
assert_eq!(abci_info.app_version, 1u64);
assert_eq!(abci_info.data.is_empty(), false);
}
/// `/abci_query` endpoint
#[tokio::test]
async fn abci_query() {
let key = "unpopulated_key".parse().unwrap();
let abci_query = localhost_http_client()
.abci_query(Some(key), vec![], None, false)
.await
.unwrap();
assert_eq!(abci_query.code, Code::Ok);
assert_eq!(abci_query.log, Log::from("does not exist"));
assert_eq!(abci_query.info, String::new());
assert_eq!(abci_query.index, 0);
assert_eq!(&abci_query.key, &Vec::<u8>::new());
assert!(&abci_query.key.is_empty());
assert_eq!(abci_query.value, Vec::<u8>::new());
assert!(abci_query.proof.is_none());
assert!(abci_query.height.value() > 0);
assert_eq!(abci_query.codespace, String::new());
}
/// `/block` endpoint
#[tokio::test]
async fn block() {
let height = 1u64;
let block_info = localhost_http_client()
.block(Height::try_from(height).unwrap())
.await
.unwrap();
assert!(block_info.block.last_commit.is_none());
assert_eq!(block_info.block.header.height.value(), height);
// Check for empty merkle root.
// See: https://github.com/informalsystems/tendermint-rs/issues/562
let computed_data_hash = simple_hash_from_byte_vectors(
block_info
.block
.data
.iter()
.map(|t| t.to_owned().into())
.collect(),
);
assert_eq!(
computed_data_hash,
block_info
.block
.header
.data_hash
.unwrap_or_default()
.as_bytes()
);
}
/// `/block_results` endpoint
#[tokio::test]
async fn block_results() {
let height = 1u64;
let block_results = localhost_http_client()
.block_results(Height::try_from(height).unwrap())
.await
.unwrap();
assert_eq!(block_results.height.value(), height);
assert!(block_results.txs_results.is_none());
}
/// `/blockchain` endpoint
#[tokio::test]
async fn blockchain() {
let max_height = 10u64;
let blockchain_info = localhost_http_client()
.blockchain(Height::from(1u32), Height::try_from(max_height).unwrap())
.await
.unwrap();
assert_eq!(
blockchain_info.block_metas.len() as u64,
min(max_height, blockchain_info.last_height.value())
);
}
/// `/commit` endpoint
#[tokio::test]
async fn commit() {
let height = 1u64;
let commit_info = localhost_http_client()
.commit(Height::try_from(height).unwrap())
.await
.unwrap();
assert_eq!(commit_info.signed_header.header.height.value(), height);
assert_eq!(commit_info.canonical, true);
assert_eq!(
commit_info.signed_header.header.hash(),
commit_info.signed_header.commit.block_id.hash
);
}
/// `/consensus_state` endpoint
#[tokio::test]
async fn consensus_state() {
// TODO(thane): Test more than just the deserialization.
localhost_http_client().consensus_state().await.unwrap();
}
/// `/genesis` endpoint
#[tokio::test]
async fn genesis() {
let genesis = localhost_http_client().genesis().await.unwrap(); // https://github.com/tendermint/tendermint/issues/5549
assert_eq!(
genesis.consensus_params.validator.pub_key_types[0].to_string(),
"ed25519"
);
}
/// `/net_info` endpoint integration test
#[tokio::test]
async fn net_info() |
/// `/status` endpoint integration test
#[tokio::test]
async fn status_integration() {
let status = localhost_http_client().status().await.unwrap();
// For lack of better things to test
assert_eq!(status.validator_info.voting_power.value(), 10);
}
#[tokio::test]
async fn subscription_interface() {
let (client, driver) = localhost_websocket_client().await;
let driver_handle = tokio::spawn(async move { driver.run().await });
let mut subs = client.subscribe(EventType::NewBlock.into()).await.unwrap();
let mut ev_count = 5_i32;
println!("Attempting to grab {} new blocks", ev_count);
while let Some(res) = subs.next().await {
let ev = res.unwrap();
println!("Got event: {:?}", ev);
ev_count -= 1;
if ev_count < 0 {
break;
}
}
client.close().unwrap();
let _ = driver_handle.await.unwrap();
}
#[tokio::test]
async fn transaction_subscription() {
// We run these sequentially wrapped within a single test to ensure
// that Tokio doesn't execute them simultaneously. If they are executed
// simultaneously, their submitted transactions interfere with each
// other and one of them will (incorrectly) fail.
simple_transaction_subscription().await;
concurrent_subscriptions().await;
tx_search().await;
}
async fn simple_transaction_subscription() {
let (client, driver) = localhost_websocket_client().await;
let driver_handle = tokio::spawn(async move { driver.run().await });
let mut subs = client.subscribe(EventType::Tx.into()).await.unwrap();
// We use Id::uuid_v4() here as a quick hack to generate a random value.
let mut expected_tx_values = (0..10_u32)
.map(|_| Id::uuid_v4().to_string())
.collect::<Vec<String>>();
let broadcast_tx_values = expected_tx_values.clone();
// We can clone the WebSocket client, because it's just a handle to the
// driver.
let inner_client = client.clone();
tokio::spawn(async move {
for (tx_count, val) in broadcast_tx_values.into_iter().enumerate() {
let tx = format!("tx{}={}", tx_count, val);
inner_client
.broadcast_tx_async(Transaction::from(tx.into_bytes()))
.await
.unwrap();
}
});
println!(
"Attempting to grab {} transaction events",
expected_tx_values.len()
);
let mut cur_tx_id = 0_u32;
while !expected_tx_values.is_empty() {
let delay = tokio::time::sleep(Duration::from_secs(5));
tokio::pin!(delay);
tokio::select! {
Some(res) = subs.next() => {
let ev = res.unwrap();
//println!("Got event: {:?}", ev);
let next_val = expected_tx_values.remove(0);
match ev.data {
EventData::Tx { tx_result } => match String::from_utf8(tx_result.tx) {
Ok(decoded_tx_str) => {
let decoded_tx_split = decoded_tx_str
.split('=')
.map(|s| s.to_string())
.collect::<Vec<String>>();
assert_eq!(2, decoded_tx_split.len());
let key = decoded_tx_split.get(0).unwrap();
let val = decoded_tx_split.get(1).unwrap();
println!("Got tx: {}={}", key, val);
assert_eq!(format!("tx{}", cur_tx_id), *key);
assert_eq!(next_val, *val);
}
Err(e) => panic!("Failed to convert decoded tx to string: {}", e),
},
_ => panic!("Unexpected event type: {:?}", ev),
}
cur_tx_id += 1;
},
_ = &mut delay => panic!("Timed out waiting for an event"),
}
}
client.close().unwrap();
let _ = driver_handle.await.unwrap();
}
async fn concurrent_subscriptions() {
let (client, driver) = localhost_websocket_client().await;
let driver_handle = tokio::spawn(async move { driver.run().await });
let new_block_subs = client.subscribe(EventType::NewBlock.into()).await.unwrap();
let tx_subs = client.subscribe(EventType::Tx.into()).await.unwrap();
// We use Id::uuid_v4() here as a quick hack to generate a random value.
let mut expected_tx_values = (0..10_u32)
.map(|_| Id::uuid_v4().to_string())
.collect::<Vec<String>>();
let broadcast_tx_values = expected_tx_values.clone();
let mut expected_new_blocks = 5_i32;
let inner_client = client.clone();
tokio::spawn(async move {
for (tx_count, val) in broadcast_tx_values.into_iter().enumerate() {
let tx = format!("tx{}={}", tx_count, val);
inner_client
.broadcast_tx_async(Transaction::from(tx.into_bytes()))
.await
.unwrap();
tokio::time::sleep(Duration::from_millis(100)).await;
}
});
let mut combined_subs = futures::stream::select_all(vec![new_block_subs, tx_subs]);
println!(
"Attempting to receive {} transactions and {} new blocks",
expected_tx_values.len(),
expected_new_blocks
);
while expected_new_blocks > 0 && !expected_tx_values.is_empty() {
let timeout = tokio::time::sleep(Duration::from_secs(5));
tokio::pin!(timeout);
tokio::select! {
Some(res) = combined_subs.next() => {
let ev: Event = res.unwrap();
println!("Got event: {:?}", ev);
match ev.data {
EventData::NewBlock { .. } => {
println!("Got new block event");
expected_new_blocks -= 1;
},
EventData::Tx { .. } => {
println!("Got new transaction event");
let _ = expected_tx_values.pop();
},
_ => panic!("Unexpected event received: {:?}", ev),
}
},
_ = &mut timeout => panic!("Timed out waiting for an event"),
}
}
client.close().unwrap();
let _ = driver_handle.await.unwrap();
}
async fn tx_search() {
let rpc_client = localhost_http_client();
let (mut subs_client, driver) = localhost_websocket_client().await;
let driver_handle = tokio::spawn(async move { driver.run().await });
let tx = "tx_search_key=tx_search_value".to_string();
let tx_info = broadcast_tx(
&rpc_client,
&mut subs_client,
Transaction::from(tx.into_bytes()),
)
.await
.unwrap();
println!("Got tx_info: {:?}", tx_info);
// TODO(thane): Find a better way of accomplishing this. This might
// still be nondeterministic.
tokio::time::sleep(Duration::from_millis(500)).await;
let res = rpc_client
.tx_search(
Query::eq("app.key", "tx_search_key"),
true,
1,
1,
Order::Ascending,
)
.await
.unwrap();
assert!(res.total_count > 0);
// We don't have more than 1 page of results
assert_eq!(res.total_count as usize, res.txs.len());
// Find our transaction
let txs = res
.txs
.iter()
.filter(|tx| tx.height.value() == (tx_info.height as u64))
.collect::<Vec<&ResultTx>>();
assert_eq!(1, txs.len());
assert_eq!(tx_info.tx, txs[0].tx.as_bytes());
subs_client.close().unwrap();
driver_handle.await.unwrap().unwrap();
}
async fn broadcast_tx(
http_client: &HttpClient,
websocket_client: &mut WebSocketClient,
tx: Transaction,
) -> Result<TxInfo, tendermint_rpc::Error> {
let mut subs = websocket_client.subscribe(EventType::Tx.into()).await?;
let _ = http_client.broadcast_tx_async(tx.clone()).await?;
let timeout = tokio::time::sleep(Duration::from_secs(3));
tokio::pin!(timeout);
tokio::select! {
Some(res) = subs.next() => {
let ev = res?;
match ev.data {
EventData::Tx { tx_result } => {
let tx_result_bytes: &[u8] = tx_result.tx.as_ref();
// Make sure we have the right transaction here
assert_eq!(tx.as_bytes(), tx_result_bytes);
Ok(tx_result)
},
_ => panic!("Unexpected event: {:?}", ev),
}
}
_ = &mut timeout => panic!("Timed out waiting for transaction"),
}
}
}
| {
let net_info = localhost_http_client().net_info().await.unwrap();
assert!(net_info.listening);
} |
handson2_q_qi.py | import os
import math
import argparse
import gym
from agents.q_agent import Q, Agent, Trainer
RECORD_PATH = os.path.join(os.path.dirname(__file__), "./upload")
def main(episodes, render, monitor):
|
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="train & run cartpole ")
parser.add_argument("--episode", type=int, default=1000, help="episode to train")
parser.add_argument("--render", action="store_true", help="render the screen")
parser.add_argument("--monitor", action="store_true", help="monitor")
parser.add_argument("--upload", type=str, default="", help="upload key to openai gym (training is not executed)")
args = parser.parse_args()
if args.upload:
if os.path.isdir(RECORD_PATH):
gym.upload(RECORD_PATH, api_key=args.upload)
else:
main(args.episode, args.render, args.monitor)
| env = gym.make("CartPole-v0")
q = Q(
env.action_space.n,
env.observation_space,
bin_size=[7, 7, 7, 7],
low_bound=[-5, -0.5, -5, -0.5],
high_bound=[5, 0.5, 5, 0.5]
)
agent = Agent(q, epsilon=0.05)
learning_decay = lambda lr, t: 1 / (t + 1) ** 0.5
epsilon_decay = lambda eps, t: 1 / (t + 1) ** 0.5
trainer = Trainer(
agent,
gamma=0.95,
learning_rate=0.1, learning_rate_decay=learning_decay,
epsilon=1.0, epsilon_decay=epsilon_decay,
max_step=250)
if monitor:
env.monitor.start(RECORD_PATH)
trainer.train(env, episode_count=episodes, render=render)
if monitor:
env.monitor.close() |
mod.rs | //! Runner for a voice connection.
//!
//! Songbird's driver is a mixed-sync system, using:
//! * Asynchronous connection management, event-handling, and gateway integration.
//! * Synchronous audio mixing, packet generation, and encoding.
//!
//! This splits up work according to its IO/compute bound nature, preventing packet
//! generation from being slowed down past its deadline, or from affecting other
//! asynchronous tasks your bot must handle.
mod config;
pub(crate) mod connection;
mod crypto;
mod decode_mode;
pub(crate) mod tasks;
pub use config::Config;
use connection::error::Result;
pub use crypto::*;
pub use decode_mode::DecodeMode;
use crate::{
events::EventData,
input::Input,
tracks::{Track, TrackHandle},
ConnectionInfo,
Event,
EventHandler,
};
use audiopus::Bitrate;
use flume::{Receiver, SendError, Sender};
use tasks::message::CoreMessage;
use tracing::instrument;
/// The control object for a Discord voice connection, handling connection,
/// mixing, encoding, en/decryption, and event generation.
#[derive(Clone, Debug)]
pub struct Driver {
config: Config,
self_mute: bool,
sender: Sender<CoreMessage>,
}
impl Driver {
/// Creates a new voice driver.
///
/// This will create the core voice tasks in the background.
#[inline]
pub fn new(config: Config) -> Self {
let sender = Self::start_inner(config.clone());
Driver {
config,
self_mute: false,
sender,
}
}
fn start_inner(config: Config) -> Sender<CoreMessage> {
let (tx, rx) = flume::unbounded();
tasks::start(config, rx, tx.clone());
tx
}
fn restart_inner(&mut self) {
self.sender = Self::start_inner(self.config.clone());
self.mute(self.self_mute);
}
/// Connects to a voice channel using the specified server.
#[instrument(skip(self))]
pub fn connect(&mut self, info: ConnectionInfo) -> Receiver<Result<()>> {
let (tx, rx) = flume::bounded(1);
self.raw_connect(info, tx);
rx
}
/// Connects to a voice channel using the specified server.
#[instrument(skip(self))]
pub(crate) fn raw_connect(&mut self, info: ConnectionInfo, tx: Sender<Result<()>>) {
self.send(CoreMessage::ConnectWithResult(info, tx));
}
/// Leaves the current voice channel, disconnecting from it.
///
/// This does *not* forget settings, like whether to be self-deafened or
/// self-muted.
#[instrument(skip(self))]
pub fn leave(&mut self) {
self.send(CoreMessage::Disconnect);
}
/// Sets whether the current connection is to be muted.
///
/// If there is no live voice connection, then this only acts as a settings
/// update for future connections.
#[instrument(skip(self))]
pub fn mute(&mut self, mute: bool) {
self.self_mute = mute;
self.send(CoreMessage::Mute(mute));
}
/// Returns whether the driver is muted (i.e., processes audio internally
/// but submits none).
#[instrument(skip(self))]
pub fn | (&self) -> bool {
self.self_mute
}
/// Plays audio from a source, returning a handle for further control.
///
/// This can be a source created via [`ffmpeg`] or [`ytdl`].
///
/// [`ffmpeg`]: ../input/fn.ffmpeg.html
/// [`ytdl`]: ../input/fn.ytdl.html
#[instrument(skip(self))]
pub fn play_source(&mut self, source: Input) -> TrackHandle {
let (player, handle) = super::create_player(source);
self.send(CoreMessage::AddTrack(player));
handle
}
/// Plays audio from a source, returning a handle for further control.
///
/// Unlike [`play_source`], this stops all other sources attached
/// to the channel.
///
/// [`play_source`]: #method.play_source
#[instrument(skip(self))]
pub fn play_only_source(&mut self, source: Input) -> TrackHandle {
let (player, handle) = super::create_player(source);
self.send(CoreMessage::SetTrack(Some(player)));
handle
}
/// Plays audio from a [`Track`] object.
///
/// This will be one half of the return value of [`create_player`].
/// The main difference between this function and [`play_source`] is
/// that this allows for direct manipulation of the [`Track`] object
/// before it is passed over to the voice and mixing contexts.
///
/// [`create_player`]: ../tracks/fn.create_player.html
/// [`Track`]: ../tracks/struct.Track.html
/// [`play_source`]: #method.play_source
#[instrument(skip(self))]
pub fn play(&mut self, track: Track) {
self.send(CoreMessage::AddTrack(track));
}
/// Exclusively plays audio from a [`Track`] object.
///
/// This will be one half of the return value of [`create_player`].
/// As in [`play_only_source`], this stops all other sources attached to the
/// channel. Like [`play`], however, this allows for direct manipulation of the
/// [`Track`] object before it is passed over to the voice and mixing contexts.
///
/// [`create_player`]: ../tracks/fn.create_player.html
/// [`Track`]: ../tracks/struct.Track.html
/// [`play_only_source`]: #method.play_only_source
/// [`play`]: #method.play
#[instrument(skip(self))]
pub fn play_only(&mut self, track: Track) {
self.send(CoreMessage::SetTrack(Some(track)));
}
/// Sets the bitrate for encoding Opus packets sent along
/// the channel being managed.
///
/// The default rate is 128 kbps.
/// Sensible values range between `Bits(512)` and `Bits(512_000)`
/// bits per second.
/// Alternatively, `Auto` and `Max` remain available.
#[instrument(skip(self))]
pub fn set_bitrate(&mut self, bitrate: Bitrate) {
self.send(CoreMessage::SetBitrate(bitrate))
}
/// Stops playing audio from all sources, if any are set.
#[instrument(skip(self))]
pub fn stop(&mut self) {
self.send(CoreMessage::SetTrack(None))
}
/// Sets the configuration for this driver.
#[instrument(skip(self))]
pub fn set_config(&mut self, config: Config) {
self.config = config.clone();
self.send(CoreMessage::SetConfig(config))
}
/// Attach a global event handler to an audio context. Global events may receive
/// any [`EventContext`].
///
/// Global timing events will tick regardless of whether audio is playing,
/// so long as the bot is connected to a voice channel, and have no tracks.
/// [`TrackEvent`]s will respond to all relevant tracks, giving some audio elements.
///
/// Users **must** ensure that no costly work or blocking occurs
/// within the supplied function or closure. *Taking excess time could prevent
/// timely sending of packets, causing audio glitches and delays*.
///
/// [`Track`]: ../tracks/struct.Track.html
/// [`TrackEvent`]: ../events/enum.TrackEvent.html
/// [`EventContext`]: ../events/enum.EventContext.html
#[instrument(skip(self, action))]
pub fn add_global_event<F: EventHandler + 'static>(&mut self, event: Event, action: F) {
self.send(CoreMessage::AddEvent(EventData::new(event, action)));
}
/// Sends a message to the inner tasks, restarting it if necessary.
fn send(&mut self, status: CoreMessage) {
// Restart thread if it errored.
if let Err(SendError(status)) = self.sender.send(status) {
self.restart_inner();
self.sender.send(status).unwrap();
}
}
}
impl Default for Driver {
fn default() -> Self {
Self::new(Default::default())
}
}
impl Drop for Driver {
/// Leaves the current connected voice channel, if connected to one, and
/// forgets all configurations relevant to this Handler.
fn drop(&mut self) {
self.leave();
let _ = self.sender.send(CoreMessage::Poison);
}
}
| is_mute |
chain_spec.rs | ///////////////////////////////////////////////////////////////////////////////
//
// Copyright 2018-2020 Airalab <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////////////
//! Chain specification and utils.
use node_primitives::{AccountId, Balance, Block, Signature};
use robonomics_runtime::{
wasm_binary_unwrap, BabeConfig, BalancesConfig, GenesisConfig, GrandpaConfig, IndicesConfig,
SudoConfig, SystemConfig,
};
use sc_chain_spec::ChainSpecExtension;
use sc_service::ChainType;
use serde::{Deserialize, Serialize};
use sp_consensus_babe::AuthorityId as BabeId;
use sp_core::{sr25519, Pair, Public};
use sp_finality_grandpa::AuthorityId as GrandpaId;
use sp_runtime::traits::{IdentifyAccount, Verify};
/// Robonomics runtime family chains.
pub enum RobonomicsFamily {
/// Development chain (used for local tests only).
Development,
/// DAO IPCI (ipci.io) chain (https://telemetry.polkadot.io/#list/DAO%20IPCI).
DaoIpci,
/// Robonomics Network parachain (https://telemetry.polkadot.io/#list/Robonomics).
Parachain,
}
/// Robonomics family chains idetify.
pub trait RobonomicsChain {
fn family(&self) -> RobonomicsFamily;
}
impl RobonomicsChain for Box<dyn sc_chain_spec::ChainSpec> {
fn family(&self) -> RobonomicsFamily |
}
const DAO_IPCI_ID: &str = "ipci";
/*
const IPCI_PROTOCOL_ID: &str = "mito";
const IPCI_PROPERTIES: &str = r#"
{
"ss58Format": 32,
"tokenDecimals": 12,
"tokenSymbol": "MITO"
}"#;
*/
type AccountPublic = <Signature as Verify>::Signer;
/// Node `ChainSpec` extensions.
///
/// Additional parameters for some Substrate core modules,
/// customizable from the chain spec.
#[derive(Default, Clone, Serialize, Deserialize, ChainSpecExtension)]
#[serde(rename_all = "camelCase")]
pub struct Extensions {
/// Block numbers with known hashes.
pub fork_blocks: sc_client_api::ForkBlocks<Block>,
/// Known bad block hashes.
pub bad_blocks: sc_client_api::BadBlocks<Block>,
}
/// Specialized `ChainSpec`.
pub type ChainSpec = sc_service::GenericChainSpec<GenesisConfig, Extensions>;
/// Helper function to generate a crypto pair from seed
fn get_from_seed<TPublic: Public>(seed: &str) -> <TPublic::Pair as Pair>::Public {
TPublic::Pair::from_string(&format!("//{}", seed), None)
.expect("static values are valid; qed")
.public()
}
/// Helper function to generate an account ID from seed
pub fn get_account_id_from_seed<TPublic: Public>(seed: &str) -> AccountId
where
AccountPublic: From<<TPublic::Pair as Pair>::Public>,
{
AccountPublic::from(get_from_seed::<TPublic>(seed)).into_account()
}
/// Helper function to generate stash, controller and session key from seed
fn get_authority_keys_from_seed(seed: &str) -> (AccountId, BabeId, GrandpaId) {
(
get_account_id_from_seed::<sr25519::Public>(seed),
get_from_seed::<BabeId>(seed),
get_from_seed::<GrandpaId>(seed),
)
}
fn development_genesis(
initial_authorities: Vec<(AccountId, BabeId, GrandpaId)>,
endowed_accounts: Option<Vec<AccountId>>,
sudo_key: AccountId,
) -> GenesisConfig {
const ENDOWMENT: Balance = 1_000_000_000_000_000_000;
let endowed_accounts: Vec<(AccountId, Balance)> = endowed_accounts
.unwrap_or_else(|| {
vec![
get_account_id_from_seed::<sr25519::Public>("Alice"),
get_account_id_from_seed::<sr25519::Public>("Bob"),
get_account_id_from_seed::<sr25519::Public>("Charlie"),
get_account_id_from_seed::<sr25519::Public>("Dave"),
get_account_id_from_seed::<sr25519::Public>("Eve"),
get_account_id_from_seed::<sr25519::Public>("Ferdie"),
get_account_id_from_seed::<sr25519::Public>("Alice//stash"),
get_account_id_from_seed::<sr25519::Public>("Bob//stash"),
get_account_id_from_seed::<sr25519::Public>("Charlie//stash"),
get_account_id_from_seed::<sr25519::Public>("Dave//stash"),
get_account_id_from_seed::<sr25519::Public>("Eve//stash"),
get_account_id_from_seed::<sr25519::Public>("Ferdie//stash"),
]
})
.iter()
.cloned()
.map(|acc| (acc, ENDOWMENT))
.collect();
mk_genesis(
initial_authorities,
endowed_accounts,
sudo_key,
wasm_binary_unwrap().to_vec(),
)
}
/// Helper function to create GenesisConfig
fn mk_genesis(
initial_authorities: Vec<(AccountId, BabeId, GrandpaId)>,
balances: Vec<(AccountId, Balance)>,
sudo_key: AccountId,
code: Vec<u8>,
) -> GenesisConfig {
GenesisConfig {
frame_system: Some(SystemConfig {
code,
changes_trie_config: Default::default(),
}),
pallet_indices: Some(IndicesConfig { indices: vec![] }),
pallet_balances: Some(BalancesConfig { balances }),
pallet_babe: Some(BabeConfig {
authorities: initial_authorities
.iter()
.map(|x| (x.1.clone(), 1))
.collect(),
}),
pallet_grandpa: Some(GrandpaConfig {
authorities: initial_authorities
.iter()
.map(|x| (x.2.clone(), 1))
.collect(),
}),
//pallet_elections_phragmen: Some(ElectionsConfig { members: vec![] }),
//pallet_collective_Instance1: Some(CouncilConfig::default()),
//pallet_treasury: Some(Default::default()),
pallet_sudo: Some(SudoConfig { key: sudo_key }),
}
}
/// IPCI blockchain config.
pub fn ipci_config() -> ChainSpec {
ChainSpec::from_json_bytes(&include_bytes!("../res/ipci.json")[..]).unwrap()
}
/// Development config (single validator Alice)
pub fn development_config() -> ChainSpec {
let genesis = || {
development_genesis(
vec![get_authority_keys_from_seed("Alice")],
None,
get_account_id_from_seed::<sr25519::Public>("Alice"),
)
};
ChainSpec::from_genesis(
"Development",
"dev",
ChainType::Development,
genesis,
vec![],
None,
None,
None,
Default::default(),
)
}
| {
if self.id() == DAO_IPCI_ID {
return RobonomicsFamily::DaoIpci;
}
if self.id() == "dev" {
return RobonomicsFamily::Development;
}
RobonomicsFamily::Parachain
} |
base.py | import urllib.parse
from ..util import (
get_module,
LoggingMixin,
)
def get_network_module(name):
return get_module('.' + name, package='bos_consensus.network')
class BaseTransport(LoggingMixin):
blockchain = None
config = None
message_received_callback = None
def __init__(self, **config):
super(BaseTransport, self).__init__()
self.config = config
self.set_requests()
| def write(self, data):
raise NotImplementedError()
def send(self, addr, message):
raise NotImplementedError()
def set_requests(self):
raise NotImplementedError()
def start(self, blockchain, message_received_callback):
from ..blockchain.base import BaseBlockchain
assert isinstance(blockchain, BaseBlockchain)
self.set_logging('transport', node=blockchain.node_name)
self.blockchain = blockchain
self.message_received_callback = message_received_callback
self._start()
return
def _start(self):
raise NotImplementedError()
def stop(self):
raise NotImplementedError()
class BaseServer(LoggingMixin):
blockchain = None
config = None
def __init__(self, blockchain, **config):
super(BaseServer, self).__init__()
self.set_logging('network', node=blockchain.node_name)
self.blockchain = blockchain
self.config = config
def start(self):
self.blockchain.transport.start(self.blockchain, self.message_received_callback)
return
def message_received_callback(self):
raise NotImplementedError()
def stop(self):
self.blockchain.transport.stop()
return
class Endpoint:
scheme = None
host = None
port = None
extras = None
@classmethod
def from_uri(cls, uri):
parsed = urllib.parse.urlparse(uri)
assert len(parsed.scheme) > 0
assert len(parsed.hostname) > 0
assert type(parsed.port) in (int,)
return cls(
parsed.scheme,
parsed.hostname,
parsed.port,
**dict(
map(
lambda x: (x[0], x[1][0]),
urllib.parse.parse_qs(parsed.query).items()
)
)
)
def __init__(self, scheme, host, port, **extras):
assert type(host) in (str,)
assert type(port) in (int,)
self.scheme = scheme
self.host = host
self.port = port
self.extras = extras
@property
def uri_full(self):
return '%s%s' % (self.uri, self.get_extras())
def get_extras(self):
extras = ''
if self.extras:
extras = '?%s' % urllib.parse.urlencode(self.extras)
return extras
@property
def uri(self):
return '%(scheme)s://%(host)s:%(port)d' % self.__dict__
def __repr__(self):
return '<Endpoint: %s>' % self.uri_full
def __eq__(self, b):
assert isinstance(b, self.__class__)
return self.host == b.host and self.port == b.port
def update(self, **extras):
self.extras.update(extras)
return
def serialize(self):
return self.uri_full
def get(self, k, default=None):
return self.extras.get(k, default)
def join(self, u):
return urllib.parse.urljoin(self.uri, u) + self.get_extras() | def receive(self, data):
raise NotImplementedError()
|
user.ts | import { notImplementedInLocalWorkspaceRepository } from "../repository/utils"; | export default {
Query: {
users: notImplementedInLocalWorkspaceRepository,
user: notImplementedInLocalWorkspaceRepository,
},
Mutation: { updateUser: notImplementedInLocalWorkspaceRepository },
User: { memberships: notImplementedInLocalWorkspaceRepository },
}; | |
payline-utils.d.ts | import * as moment from "moment-timezone";
/**
* Payline helpers
*/
declare class PaylineUtils {
static timezone: string;
/**
* Return the payline date format for this date
*/
static getPaylineDateFormat(value: moment.Moment): string;
/**
* Return the payline date format for this date
*/
static getPaylineNowDateFormat(): string;
/**
* Check result for success
*/
static isSuccessful(response: any): boolean;
/**
* Check errors
*/
static parseErrors(error: any): Promise<any>;
} | export { PaylineUtils }; |
|
flag.rs | use std::convert::TryFrom;
use nom::combinator::map_res;
use nom::number::complete::le_i8;
use crate::ParseResult;
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
#[repr(i8)]
pub enum Flag {
Unknown = -1,
None = 0,
Green = 1,
Blue = 2,
Yellow = 3,
Red = 4,
}
#[non_exhaustive]
pub struct InvalidFlag(());
impl InvalidFlag {
fn new() -> Self {
InvalidFlag(())
}
}
impl TryFrom<i8> for Flag {
type Error = InvalidFlag;
fn try_from(item: i8) -> Result<Self, Self::Error> {
match item {
-1 => Ok(Flag::Unknown),
0 => Ok(Flag::None),
1 => Ok(Flag::Green),
2 => Ok(Flag::Blue),
3 => Ok(Flag::Yellow),
4 => Ok(Flag::Red),
_ => Err(InvalidFlag::new()),
}
}
}
impl Flag {
pub fn | (input: &[u8]) -> ParseResult<Self> {
map_res(le_i8, Flag::try_from)(input)
}
}
#[cfg(test)]
mod tests {
use super::*;
use nom::error::ErrorKind;
use nom::Err;
#[test]
fn test_parse() {
let packet = (-1i8).to_le_bytes();
let result = Flag::parse(&packet[..]);
assert_eq!(result, Ok((&[][..], Flag::Unknown)));
let packet = 0i8.to_le_bytes();
let result = Flag::parse(&packet[..]);
assert_eq!(result, Ok((&[][..], Flag::None)));
let packet = 1i8.to_le_bytes();
let result = Flag::parse(&packet[..]);
assert_eq!(result, Ok((&[][..], Flag::Green)));
let packet = 2i8.to_le_bytes();
let result = Flag::parse(&packet[..]);
assert_eq!(result, Ok((&[][..], Flag::Blue)));
let packet = 3i8.to_le_bytes();
let result = Flag::parse(&packet[..]);
assert_eq!(result, Ok((&[][..], Flag::Yellow)));
let packet = 4i8.to_le_bytes();
let result = Flag::parse(&packet[..]);
assert_eq!(result, Ok((&[][..], Flag::Red)));
let packet = (-2i8).to_le_bytes();
let result = Flag::parse(&packet[..]);
assert_eq!(result, Err(Err::Error((&packet[..], ErrorKind::MapRes))));
let packet = 5i8.to_le_bytes();
let result = Flag::parse(&packet[..]);
assert_eq!(result, Err(Err::Error((&packet[..], ErrorKind::MapRes))));
}
}
| parse |
misc.py | import re
def camel_to_snake_case(name: str) -> str:
| """
Source: https://stackoverflow.com/a/1176023
Args:
name: A CamelCase name
Returns:
A snake_case version of the input name
"""
name = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
name = re.sub('__([A-Z])', r'_\1', name)
name = re.sub('([a-z0-9])([A-Z])', r'\1_\2', name)
return name.lower() |
|
version.go | /*
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"io"
"github.com/GoogleCloudPlatform/skaffold/cmd/skaffold/app/flags"
"github.com/GoogleCloudPlatform/skaffold/pkg/skaffold/version"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
var versionFlag = flags.NewTemplateFlag("{{.Version}}\n", version.Info{})
func | (out io.Writer) *cobra.Command {
cmd := &cobra.Command{
Use: "version",
Short: "print the version of skaffold",
RunE: func(cmd *cobra.Command, args []string) error {
return RunVersion(out, cmd)
},
}
cmd.Flags().VarP(versionFlag, "output", "o", versionFlag.Usage())
return cmd
}
func RunVersion(out io.Writer, cmd *cobra.Command) error {
if err := versionFlag.Template().Execute(out, version.Get()); err != nil {
return errors.Wrap(err, "executing template")
}
return nil
}
| NewCmdVersion |
test_qos.py | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from heat.common import template_format
from heat.engine.clients.os import neutron
from heat.engine import rsrc_defn
from heat.engine import stack
from heat.engine import template
from heat.tests import common
from heat.tests import utils
qos_policy_template = '''
heat_template_version: 2016-04-08
description: This template to define a neutron qos policy.
resources:
my_qos_policy:
type: OS::Neutron::QoSPolicy
properties:
description: a policy for test
shared: true
tenant_id: d66c74c01d6c41b9846088c1ad9634d0
'''
bandwidth_limit_rule_template = '''
heat_template_version: 2016-04-08
description: This template to define a neutron bandwidth limit rule.
resources:
my_bandwidth_limit_rule:
type: OS::Neutron::QoSBandwidthLimitRule
properties:
policy: 477e8273-60a7-4c41-b683-fdb0bc7cd151
max_kbps: 1000
max_burst_kbps: 1000
tenant_id: d66c74c01d6c41b9846088c1ad9634d0
'''
dscp_marking_rule_template = '''
heat_template_version: 2016-04-08
description: This template to define a neutron DSCP marking rule.
resources:
my_dscp_marking_rule:
type: OS::Neutron::QoSDscpMarkingRule
properties:
policy: 477e8273-60a7-4c41-b683-fdb0bc7cd151
dscp_mark: 16
tenant_id: d66c74c01d6c41b9846088c1ad9634d0
'''
class NeutronQoSPolicyTest(common.HeatTestCase):
def setUp(self):
super(NeutronQoSPolicyTest, self).setUp()
self.ctx = utils.dummy_context()
tpl = template_format.parse(qos_policy_template)
self.stack = stack.Stack(
self.ctx,
'neutron_qos_policy_test',
template.Template(tpl)
)
self.neutronclient = mock.MagicMock()
self.patchobject(neutron.NeutronClientPlugin, 'has_extension',
return_value=True)
self.my_qos_policy = self.stack['my_qos_policy']
self.my_qos_policy.client = mock.MagicMock(
return_value=self.neutronclient)
self.patchobject(self.my_qos_policy, 'physical_resource_name',
return_value='test_policy')
def test_qos_policy_handle_create(self):
policy = {
'policy': {
'description': 'a policy for test',
'id': '9c1eb3fe-7bba-479d-bd43-1d497e53c384',
'rules': [],
'tenant_id': 'd66c74c01d6c41b9846088c1ad9634d0',
'shared': True
}
}
create_props = {'name': 'test_policy',
'description': 'a policy for test',
'shared': True,
'tenant_id': 'd66c74c01d6c41b9846088c1ad9634d0'}
self.neutronclient.create_qos_policy.return_value = policy
self.my_qos_policy.handle_create()
self.assertEqual('9c1eb3fe-7bba-479d-bd43-1d497e53c384',
self.my_qos_policy.resource_id)
self.neutronclient.create_qos_policy.assert_called_once_with(
{'policy': create_props}
)
def test_qos_policy_handle_delete(self):
policy_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
self.my_qos_policy.resource_id = policy_id
self.neutronclient.delete_qos_policy.return_value = None
self.assertIsNone(self.my_qos_policy.handle_delete())
self.neutronclient.delete_qos_policy.assert_called_once_with(
self.my_qos_policy.resource_id)
def test_qos_policy_handle_delete_not_found(self):
policy_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
self.my_qos_policy.resource_id = policy_id
not_found = self.neutronclient.NotFound
self.neutronclient.delete_qos_policy.side_effect = not_found
self.assertIsNone(self.my_qos_policy.handle_delete())
self.neutronclient.delete_qos_policy.assert_called_once_with(
self.my_qos_policy.resource_id)
def test_qos_policy_handle_delete_resource_id_is_none(self):
self.my_qos_policy.resource_id = None
self.assertIsNone(self.my_qos_policy.handle_delete())
self.assertEqual(0, self.neutronclient.delete_qos_policy.call_count)
def test_qos_policy_handle_update(self):
policy_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
self.my_qos_policy.resource_id = policy_id
props = {
'name': 'test_policy',
'description': 'test',
'shared': False
}
prop_dict = props.copy()
update_snippet = rsrc_defn.ResourceDefinition(
self.my_qos_policy.name,
self.my_qos_policy.type(),
props)
# with name
self.my_qos_policy.handle_update(json_snippet=update_snippet,
tmpl_diff={},
prop_diff=props)
# without name
props['name'] = None
self.my_qos_policy.handle_update(json_snippet=update_snippet,
tmpl_diff={},
prop_diff=props)
self.assertEqual(2, self.neutronclient.update_qos_policy.call_count)
self.neutronclient.update_qos_policy.assert_called_with(
policy_id, {'policy': prop_dict})
def test_qos_policy_get_attr(self):
self.my_qos_policy.resource_id = 'test policy'
policy = {
'policy': {
'name': 'test_policy',
'description': 'a policy for test',
'id': '9c1eb3fe-7bba-479d-bd43-1d497e53c384',
'rules': [],
'tenant_id': 'd66c74c01d6c41b9846088c1ad9634d0',
'shared': True
}
}
self.neutronclient.show_qos_policy.return_value = policy
self.assertEqual([], self.my_qos_policy.FnGetAtt('rules'))
self.assertEqual(policy['policy'],
self.my_qos_policy.FnGetAtt('show'))
self.neutronclient.show_qos_policy.assert_has_calls(
[mock.call(self.my_qos_policy.resource_id)] * 2)
class NeutronQoSBandwidthLimitRuleTest(common.HeatTestCase):
def setUp(self):
super(NeutronQoSBandwidthLimitRuleTest, self).setUp()
self.ctx = utils.dummy_context()
tpl = template_format.parse(bandwidth_limit_rule_template)
self.stack = stack.Stack(
self.ctx,
'neutron_bandwidth_limit_rule_test',
template.Template(tpl)
)
self.neutronclient = mock.MagicMock()
self.patchobject(neutron.NeutronClientPlugin, 'has_extension',
return_value=True)
self.bandwidth_limit_rule = self.stack['my_bandwidth_limit_rule']
self.bandwidth_limit_rule.client = mock.MagicMock(
return_value=self.neutronclient)
self.find_mock = self.patchobject(
neutron.neutronV20,
'find_resourceid_by_name_or_id')
self.policy_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
self.find_mock.return_value = self.policy_id
def test_rule_handle_create(self):
rule = {
'bandwidth_limit_rule': {
'id': 'cf0eab12-ef8b-4a62-98d0-70576583c17a',
'max_kbps': 1000,
'max_burst_kbps': 1000,
'direction': 'egress',
'tenant_id': 'd66c74c01d6c41b9846088c1ad9634d0'
}
}
create_props = {'max_kbps': 1000,
'max_burst_kbps': 1000,
'direction': 'egress',
'tenant_id': 'd66c74c01d6c41b9846088c1ad9634d0'}
self.neutronclient.create_bandwidth_limit_rule.return_value = rule
self.bandwidth_limit_rule.handle_create()
self.assertEqual('cf0eab12-ef8b-4a62-98d0-70576583c17a',
self.bandwidth_limit_rule.resource_id)
self.neutronclient.create_bandwidth_limit_rule.assert_called_once_with(
self.policy_id,
{'bandwidth_limit_rule': create_props})
def test_rule_handle_delete(self):
rule_id = 'cf0eab12-ef8b-4a62-98d0-70576583c17a'
self.bandwidth_limit_rule.resource_id = rule_id
self.neutronclient.delete_bandwidth_limit_rule.return_value = None
self.assertIsNone(self.bandwidth_limit_rule.handle_delete())
self.neutronclient.delete_bandwidth_limit_rule.assert_called_once_with(
rule_id, self.policy_id)
def test_rule_handle_delete_not_found(self):
rule_id = 'cf0eab12-ef8b-4a62-98d0-70576583c17a'
self.bandwidth_limit_rule.resource_id = rule_id
not_found = self.neutronclient.NotFound
self.neutronclient.delete_bandwidth_limit_rule.side_effect = not_found
self.assertIsNone(self.bandwidth_limit_rule.handle_delete())
self.neutronclient.delete_bandwidth_limit_rule.assert_called_once_with(
rule_id, self.policy_id)
def test_rule_handle_delete_resource_id_is_none(self):
self.bandwidth_limit_rule.resource_id = None
self.assertIsNone(self.bandwidth_limit_rule.handle_delete())
self.assertEqual(0,
self.neutronclient.bandwidth_limit_rule.call_count)
def test_rule_handle_update(self):
rule_id = 'cf0eab12-ef8b-4a62-98d0-70576583c17a'
self.bandwidth_limit_rule.resource_id = rule_id
prop_diff = {
'max_kbps': 500,
'max_burst_kbps': 400
}
self.bandwidth_limit_rule.handle_update(
json_snippet={},
tmpl_diff={},
prop_diff=prop_diff.copy())
self.neutronclient.update_bandwidth_limit_rule.assert_called_once_with(
rule_id,
self.policy_id,
{'bandwidth_limit_rule': prop_diff})
def test_rule_get_attr(self):
self.bandwidth_limit_rule.resource_id = 'test rule'
rule = {
'bandwidth_limit_rule': {
'id': 'cf0eab12-ef8b-4a62-98d0-70576583c17a',
'max_kbps': 1000,
'max_burst_kbps': 1000,
'direction': 'egress',
'tenant_id': 'd66c74c01d6c41b9846088c1ad9634d0'
} | self.bandwidth_limit_rule.FnGetAtt('show'))
self.neutronclient.show_bandwidth_limit_rule.assert_called_once_with(
self.bandwidth_limit_rule.resource_id, self.policy_id)
class NeutronQoSDscpMarkingRuleTest(common.HeatTestCase):
def setUp(self):
super(NeutronQoSDscpMarkingRuleTest, self).setUp()
self.ctx = utils.dummy_context()
tpl = template_format.parse(dscp_marking_rule_template)
self.stack = stack.Stack(
self.ctx,
'neutron_dscp_marking_rule_test',
template.Template(tpl)
)
self.neutronclient = mock.MagicMock()
self.patchobject(neutron.NeutronClientPlugin, 'has_extension',
return_value=True)
self.dscp_marking_rule = self.stack['my_dscp_marking_rule']
self.dscp_marking_rule.client = mock.MagicMock(
return_value=self.neutronclient)
self.find_mock = self.patchobject(
neutron.neutronV20,
'find_resourceid_by_name_or_id')
self.policy_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
self.find_mock.return_value = self.policy_id
def test_rule_handle_create(self):
rule = {
'dscp_marking_rule': {
'id': 'cf0eab12-ef8b-4a62-98d0-70576583c17a',
'dscp_mark': 16,
'tenant_id': 'd66c74c01d6c41b9846088c1ad9634d0'
}
}
create_props = {'dscp_mark': 16,
'tenant_id': 'd66c74c01d6c41b9846088c1ad9634d0'}
self.neutronclient.create_dscp_marking_rule.return_value = rule
self.dscp_marking_rule.handle_create()
self.assertEqual('cf0eab12-ef8b-4a62-98d0-70576583c17a',
self.dscp_marking_rule.resource_id)
self.neutronclient.create_dscp_marking_rule.assert_called_once_with(
self.policy_id,
{'dscp_marking_rule': create_props})
def test_rule_handle_delete(self):
rule_id = 'cf0eab12-ef8b-4a62-98d0-70576583c17a'
self.dscp_marking_rule.resource_id = rule_id
self.neutronclient.delete_dscp_marking_rule.return_value = None
self.assertIsNone(self.dscp_marking_rule.handle_delete())
self.neutronclient.delete_dscp_marking_rule.assert_called_once_with(
rule_id, self.policy_id)
def test_rule_handle_delete_not_found(self):
rule_id = 'cf0eab12-ef8b-4a62-98d0-70576583c17a'
self.dscp_marking_rule.resource_id = rule_id
not_found = self.neutronclient.NotFound
self.neutronclient.delete_dscp_marking_rule.side_effect = not_found
self.assertIsNone(self.dscp_marking_rule.handle_delete())
self.neutronclient.delete_dscp_marking_rule.assert_called_once_with(
rule_id, self.policy_id)
def test_rule_handle_delete_resource_id_is_none(self):
self.dscp_marking_rule.resource_id = None
self.assertIsNone(self.dscp_marking_rule.handle_delete())
self.assertEqual(0,
self.neutronclient.dscp_marking_rule.call_count)
def test_rule_handle_update(self):
rule_id = 'cf0eab12-ef8b-4a62-98d0-70576583c17a'
self.dscp_marking_rule.resource_id = rule_id
prop_diff = {
'dscp_mark': 8
}
self.dscp_marking_rule.handle_update(
json_snippet={},
tmpl_diff={},
prop_diff=prop_diff)
self.neutronclient.update_dscp_marking_rule.assert_called_once_with(
rule_id,
self.policy_id,
{'dscp_marking_rule': prop_diff})
def test_rule_get_attr(self):
self.dscp_marking_rule.resource_id = 'test rule'
rule = {
'dscp_marking_rule': {
'id': 'cf0eab12-ef8b-4a62-98d0-70576583c17a',
'dscp_mark': 8,
'tenant_id': 'd66c74c01d6c41b9846088c1ad9634d0'
}
}
self.neutronclient.show_dscp_marking_rule.return_value = rule
self.assertEqual(rule['dscp_marking_rule'],
self.dscp_marking_rule.FnGetAtt('show'))
self.neutronclient.show_dscp_marking_rule.assert_called_once_with(
self.dscp_marking_rule.resource_id, self.policy_id) | }
self.neutronclient.show_bandwidth_limit_rule.return_value = rule
self.assertEqual(rule['bandwidth_limit_rule'], |
flask_app.py | import random
import numpy as np
from typing import Dict, List
from flask import Flask
from flask_restx import Resource, Api
# from numpy import genfromtxt
from ubatch import ubatch_decorator
# from keras.models import load_model
from sklearn.datasets import fetch_20newsgroups
from sklearn.model_selection import train_test_split
from joblib import load
ngd = fetch_20newsgroups(subset="all")
X = ngd.data
y = ngd.target
_, X_test, _, _ = train_test_split(X, y, test_size=0.33)
model = load("xgbregressor.joblib")
# X_test = genfromtxt("xgbregressor_inputs.csv", delimiter=",")
app = Flask(__name__)
api = Api(app)
@ubatch_decorator(max_size=100, timeout=0.01)
def predict(data: List[np.array]) -> List[np.float32]:
return model.predict(np.array(data)) # type: ignore
@api.route("/predict_ubatch")
class BatchPredict(Resource):
def post(self) -> Dict[str, float]:
output = predict.ubatch(random.choice(X_test))
return {"prediction": float(output)}
@api.route("/predict")
class Predict(Resource):
def | (self) -> Dict[str, float]:
output = predict([random.choice(X_test)])[0]
return {"prediction": float(output)}
| post |
exposure.py | import warnings
import numpy as np
from skimage import img_as_float
from skimage.util.dtype import dtype_range, dtype_limits
from skimage._shared.utils import deprecated
__all__ = ['histogram', 'cumulative_distribution', 'equalize',
'rescale_intensity', 'adjust_gamma',
'adjust_log', 'adjust_sigmoid']
def histogram(image, nbins=256):
"""Return histogram of image.
Unlike `numpy.histogram`, this function returns the centers of bins and
does not rebin integer arrays. For integer arrays, each integer value has
its own bin, which improves speed and intensity-resolution.
The histogram is computed on the flattened image: for color images, the
function should be used separately on each channel to obtain a histogram
for each color channel.
Parameters
----------
image : array
Input image.
nbins : int
Number of bins used to calculate histogram. This value is ignored for
integer arrays.
Returns
-------
hist : array
The values of the histogram.
bin_centers : array
The values at the center of the bins.
Examples
--------
>>> from skimage import data, exposure, util
>>> image = util.img_as_float(data.camera())
>>> np.histogram(image, bins=2)
(array([107432, 154712]), array([ 0. , 0.5, 1. ]))
>>> exposure.histogram(image, nbins=2)
(array([107432, 154712]), array([ 0.25, 0.75]))
"""
sh = image.shape
if len(sh) == 3 and sh[-1] < 4:
warnings.warn("This might be a color image. The histogram will be "
"computed on the flattened image. You can instead "
"apply this function to each color channel.")
# For integer types, histogramming with bincount is more efficient.
if np.issubdtype(image.dtype, np.integer):
offset = 0
if np.min(image) < 0:
offset = np.min(image)
hist = np.bincount(image.ravel() - offset)
bin_centers = np.arange(len(hist)) + offset
# clip histogram to start with a non-zero bin
idx = np.nonzero(hist)[0][0]
return hist[idx:], bin_centers[idx:]
else:
hist, bin_edges = np.histogram(image.flat, nbins)
bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2.
return hist, bin_centers
def cumulative_distribution(image, nbins=256):
"""Return cumulative distribution function (cdf) for the given image.
Parameters
----------
image : array
Image array.
nbins : int
Number of bins for image histogram.
Returns
-------
img_cdf : array
Values of cumulative distribution function.
bin_centers : array
Centers of bins.
References
----------
.. [1] http://en.wikipedia.org/wiki/Cumulative_distribution_function
"""
hist, bin_centers = histogram(image, nbins)
img_cdf = hist.cumsum()
img_cdf = img_cdf / float(img_cdf[-1])
return img_cdf, bin_centers
@deprecated('equalize_hist')
def equalize(image, nbins=256):
return equalize_hist(image, nbins)
def equalize_hist(image, nbins=256):
"""Return image after histogram equalization.
Parameters
----------
image : array
Image array.
nbins : int
Number of bins for image histogram.
Returns
-------
out : float array
Image array after histogram equalization.
Notes
-----
This function is adapted from [1]_ with the author's permission.
References
----------
.. [1] http://www.janeriksolem.net/2009/06/histogram-equalization-with-python-and.html
.. [2] http://en.wikipedia.org/wiki/Histogram_equalization
"""
image = img_as_float(image)
cdf, bin_centers = cumulative_distribution(image, nbins)
out = np.interp(image.flat, bin_centers, cdf)
return out.reshape(image.shape)
def rescale_intensity(image, in_range=None, out_range=None):
"""Return image after stretching or shrinking its intensity levels.
The image intensities are uniformly rescaled such that the minimum and
maximum values given by `in_range` match those given by `out_range`.
Parameters
----------
image : array
Image array.
in_range : 2-tuple (float, float)
Min and max *allowed* intensity values of input image. If None, the
*allowed* min/max values are set to the *actual* min/max values in the
input image.
out_range : 2-tuple (float, float)
Min and max intensity values of output image. If None, use the min/max
intensities of the image data type. See `skimage.util.dtype` for
details.
Returns
-------
out : array
Image array after rescaling its intensity. This image is the same dtype
as the input image.
Examples
--------
By default, intensities are stretched to the limits allowed by the dtype:
>>> image = np.array([51, 102, 153], dtype=np.uint8)
>>> rescale_intensity(image)
array([ 0, 127, 255], dtype=uint8)
It's easy to accidentally convert an image dtype from uint8 to float:
>>> 1.0 * image
array([ 51., 102., 153.])
Use `rescale_intensity` to rescale to the proper range for float dtypes:
>>> image_float = 1.0 * image
>>> rescale_intensity(image_float)
array([ 0. , 0.5, 1. ])
To maintain the low contrast of the original, use the `in_range` parameter:
>>> rescale_intensity(image_float, in_range=(0, 255))
array([ 0.2, 0.4, 0.6])
If the min/max value of `in_range` is more/less than the min/max image
intensity, then the intensity levels are clipped:
>>> rescale_intensity(image_float, in_range=(0, 102))
array([ 0.5, 1. , 1. ])
If you have an image with signed integers but want to rescale the image to
just the positive range, use the `out_range` parameter:
>>> image = np.array([-10, 0, 10], dtype=np.int8)
>>> rescale_intensity(image, out_range=(0, 127))
array([ 0, 63, 127], dtype=int8)
"""
dtype = image.dtype.type
if in_range is None:
imin = np.min(image)
imax = np.max(image)
else:
imin, imax = in_range
if out_range is None:
omin, omax = dtype_range[dtype]
if imin >= 0:
omin = 0
else:
omin, omax = out_range
image = np.clip(image, imin, imax)
image = (image - imin) / float(imax - imin)
return dtype(image * (omax - omin) + omin)
def | (image):
if np.any(image < 0):
raise ValueError('Image Correction methods work correctly only on '
'images with non-negative values. Use '
'skimage.exposure.rescale_intensity.')
def adjust_gamma(image, gamma=1, gain=1):
"""Performs Gamma Correction on the input image.
Also known as Power Law Transform.
This function transforms the input image pixelwise according to the
equation ``O = I**gamma`` after scaling each pixel to the range 0 to 1.
Parameters
----------
image : ndarray
Input image.
gamma : float
Non negative real number. Default value is 1.
gain : float
The constant multiplier. Default value is 1.
Returns
-------
out : ndarray
Gamma corrected output image.
Notes
-----
For gamma greater than 1, the histogram will shift towards left and
the output image will be darker than the input image.
For gamma less than 1, the histogram will shift towards right and
the output image will be brighter than the input image.
References
----------
.. [1] http://en.wikipedia.org/wiki/Gamma_correction
"""
_assert_non_negative(image)
dtype = image.dtype.type
if gamma < 0:
return "Gamma should be a non-negative real number"
scale = float(dtype_limits(image, True)[1] - dtype_limits(image, True)[0])
out = ((image / scale) ** gamma) * scale * gain
return dtype(out)
def adjust_log(image, gain=1, inv=False):
"""Performs Logarithmic correction on the input image.
This function transforms the input image pixelwise according to the
equation ``O = gain*log(1 + I)`` after scaling each pixel to the range 0 to 1.
For inverse logarithmic correction, the equation is ``O = gain*(2**I - 1)``.
Parameters
----------
image : ndarray
Input image.
gain : float
The constant multiplier. Default value is 1.
inv : float
If True, it performs inverse logarithmic correction,
else correction will be logarithmic. Defaults to False.
Returns
-------
out : ndarray
Logarithm corrected output image.
References
----------
.. [1] http://www.ece.ucsb.edu/Faculty/Manjunath/courses/ece178W03/EnhancePart1.pdf
"""
_assert_non_negative(image)
dtype = image.dtype.type
scale = float(dtype_limits(image, True)[1] - dtype_limits(image, True)[0])
if inv:
out = (2 ** (image / scale) - 1) * scale * gain
return dtype(out)
out = np.log2(1 + image / scale) * scale * gain
return dtype(out)
def adjust_sigmoid(image, cutoff=0.5, gain=10, inv=False):
"""Performs Sigmoid Correction on the input image.
Also known as Contrast Adjustment.
This function transforms the input image pixelwise according to the
equation ``O = 1/(1 + exp*(gain*(cutoff - I)))`` after scaling each pixel
to the range 0 to 1.
Parameters
----------
image : ndarray
Input image.
cutoff : float
Cutoff of the sigmoid function that shifts the characteristic curve
in horizontal direction. Default value is 0.5.
gain : float
The constant multiplier in exponential's power of sigmoid function.
Default value is 10.
inv : bool
If True, returns the negative sigmoid correction. Defaults to False.
Returns
-------
out : ndarray
Sigmoid corrected output image.
References
----------
.. [1] Gustav J. Braun, "Image Lightness Rescaling Using Sigmoidal Contrast
Enhancement Functions",
http://www.cis.rit.edu/fairchild/PDFs/PAP07.pdf
"""
_assert_non_negative(image)
dtype = image.dtype.type
scale = float(dtype_limits(image, True)[1] - dtype_limits(image, True)[0])
if inv:
out = (1 - 1 / (1 + np.exp(gain * (cutoff - image/scale)))) * scale
return dtype(out)
out = (1 / (1 + np.exp(gain * (cutoff - image/scale)))) * scale
return dtype(out)
| _assert_non_negative |
getSelections.js | import invariant from 'invariant';
import { GraphQLObjectType, GraphQLInterfaceType } from 'graphql';
import {
unwrap,
isConnection,
getConnectionType,
flattenSelections,
findSelections,
getArguments,
quoteValue
} from '../utils';
function getParams (
server,
info,
selection,
typeName,
isRoot,
isCount
) {
const args = getArguments(info, selection.arguments || [])
if (args.id) {
// if we have an id we can bail early
return `(func:uid(${args.id}))`
}
let query = ''
let params = []
if (isRoot) {
// root queries in dgraph query everything, so specify has(typeTypeName)
params.push(`func:has(type${typeName})`)
}
if (!isCount) {
if (args.first) {
// overfetch by one in relay so that we can determine hasNextPage
params.push(`first: ${args.first + (server.relay ? 1 : 0)}`)
}
if (args.after) {
params.push(`after: ${args.after}`)
}
}
if (args.order) {
const order = args.order
const index = order.indexOf('_')
params.push(`order${order.substr(index + 1)}: ${order.substr(0, index)}`)
}
query += params.length ? `(${params.join(', ')})` : ''
const filter = args.filter
if (filter) {
// filters are specified as a directive, so process those last
const params = Object.keys(filter).reduce((filters, key) => {
const i = key.indexOf('_')
const value = quoteValue(filter[key])
filters.push(`${key.substr(i + 1)}(${key.substr(0, i)}, ${value})`)
return filters
}, [])
query += ` @filter(${params.join(' AND ')})`
}
return query
}
function getSelection (
info,
context,
selection,
type,
field,
indent,
isRoot,
map
) {
const name = selection.name.value;
if (name === 'id' || name.indexOf('__') === 0) {
return ''
}
let fieldType = unwrap(field.type)
let selections = selection.selectionSet
? selection.selectionSet.selections
: null
const fieldName = context.server.localizePredicate(name, context.language)
const connection = isConnection(fieldType)
if (connection && selections) {
selections = flattenSelections(selections, info)
selections = findSelections(selections, 'edges')
selections = findSelections(selections, 'node')
invariant(
fieldType instanceof GraphQLObjectType,
'Field is not object type'
)
fieldType = getConnectionType(fieldType)
}
let query = ''
if (isRoot || !map.has(fieldName)) {
map.add(fieldName) | if (isRoot) {
if (selection.alias) query += indent + selection.alias.value
else query += indent + name
} else {
let alias = name === fieldName ? '' : name + ':'
query += indent + alias + fieldName
}
}
if (
fieldType instanceof GraphQLObjectType ||
fieldType instanceof GraphQLInterfaceType
) {
let args = getParams(
context.server,
info,
selection,
fieldType.name,
isRoot,
false
)
query += args
if (selections) {
query += ' {\n'
query += `${indent} uid\n`
query += `${indent} __typename\n`
query += getSelections(
info,
context,
selections,
fieldType,
indent + ' ',
false,
null
)
query += indent + '}'
}
if (!isRoot && connection) {
query += `\n${indent}count(${fieldName}${args})`
}
if (isRoot && connection) {
args = getParams(
context.server,
info,
selection,
fieldType.name,
isRoot,
true
)
query += `\n${indent}_count_${fieldName}_${args} { count(uid) }`
}
}
return query + '\n'
}
export default function getSelections (
info,
context,
selections,
type,
indent,
isRoot,
map
) {
let nextMap = map || new Set();
let query = '';
const fields = type.getFields();
selections.forEach(selection => {
if (selection.kind === 'Field') {
query += getSelection(
info,
context,
selection,
type,
fields[selection.name.value],
indent,
isRoot,
nextMap
)
} else {
let fragment = null
if (selection.kind === 'InlineFragment') {
fragment = selection
} else {
fragment = info.fragments[selection.name.value]
}
invariant(fragment.typeCondition, 'No type condition found on fragment')
const fragmentType = info.schema.getType(
fragment.typeCondition.name.value
)
invariant(fragmentType, 'Fragment type not found')
invariant(
fragmentType instanceof GraphQLObjectType,
'Fragment must be instance of GraphQLObjectType'
)
query += getSelections(
info,
context,
fragment.selectionSet.selections,
fragmentType,
indent,
false,
nextMap
)
}
})
return query
} | |
looper.go | package main
import (
"fmt"
. "github.com/Gessiux/go-common"
"github.com/Gessiux/go-db"
"github.com/Gessiux/go-merkle"
)
func main() | {
db := db.NewMemDB()
t := merkle.NewIAVLTree(0, db)
// 23000ns/op, 43000ops/s
// for i := 0; i < 10000000; i++ {
// for i := 0; i < 1000000; i++ {
for i := 0; i < 1000; i++ {
t.Set(RandBytes(12), nil)
}
t.Save()
fmt.Println("ok, starting")
for i := 0; ; i++ {
key := RandBytes(12)
t.Set(key, nil)
t.Remove(key)
if i%1000 == 0 {
t.Save()
}
}
} |
|
inputs.rs | use clap::{App, Arg, ArgMatches, SubCommand};
use crate::geometry::{SimpleMesh, ToSimpleMeshWithMaterial, ToSimpleMesh};
use crate::context::{Context};
use std::fs::OpenOptions;
use std::path::Path;
use std::error::Error;
pub fn cli_matches<'a>() -> ArgMatches<'a> {
commands_for_subcommands(App::new("Sloth")
.version("0.1")
.author("Mitchell Hynes. <[email protected]>")
.about("A toy for rendering 3D objects in the command line")
.subcommand(commands_for_subcommands(SubCommand::with_name("image")
.about("Generates a colorless terminal output as lines of text")
.author("Mitchell Hynes <[email protected]>")
.arg(
Arg::with_name("frame count")
.short("j")
.long("webify")
.help("Generates a portable JS based render of your object for the web")
.takes_value(true)
)
.arg(
Arg::with_name("width")
.short("w")
.help("Sets the width of the image to generate")
.takes_value(true)
.required(true)
)
.arg(
Arg::with_name("height")
.short("h")
.help("Sets the height of the image to generate")
.takes_value(true)
)))
.arg(
Arg::with_name("input filename(s)")
.help("Sets the input file to render")
.required(true)
.multiple(true)
.index(1)
))
.get_matches()
}
fn commands_for_subcommands<'a, 'b>(app: App<'a, 'b>) -> App<'a, 'b> {
command_flag_color(command_rotates(app))
}
fn command_flag_color<'a, 'b>(app: App<'a, 'b>) -> App<'a, 'b> {
app.arg(
Arg::with_name("no color")
.short("b")
.help("Flags the rasterizer to render without color")
)
}
fn command_rotates<'a, 'b>(app: App<'a, 'b>) -> App<'a, 'b> {
app.arg(
Arg::with_name("x")
.short("x")
.long("yaw")
.help("Sets the object's static X rotation (in radians)")
.takes_value(true)
)
.arg(
Arg::with_name("y")
.short("y")
.long("pitch")
.help("Sets the object's static Y rotation (in radians)")
.takes_value(true)
)
.arg(
Arg::with_name("z")
.short("z")
.long("roll")
.help("Sets the object's static Z rotation (in radians)")
.takes_value(true)
)
}
pub fn to_meshes(models: Vec<tobj::Model>, materials: Vec<tobj::Material>) -> Vec<SimpleMesh> {
let mut meshes: Vec<SimpleMesh> = vec![];
for model in models {
meshes.push(model.mesh.to_simple_mesh_with_materials(&materials));
}
meshes
}
pub fn match_meshes(matches: &ArgMatches) -> Result<Vec<SimpleMesh>, Box<Error>> {
let mut mesh_queue: Vec<SimpleMesh> = vec![];
for slice in matches.value_of("input filename(s)").unwrap().split(' ') {
let error = |s: &str, e: &str| -> Result<Vec<SimpleMesh>, Box<Error>> {
Err(format!("filename: [{}] couldn't load, {}. {}", slice, s, e).into())
};
// Fill list with file inputs (Splits for spaces -> multiple files)
let path = Path::new(slice);
let meshes = match path.extension() {
None => error("couldn't determine filename extension", ""),
Some(ext) => match ext.to_str() {
None => error("couldn't parse filename extension", ""),
Some(extstr) => match &*extstr.to_lowercase() {
"obj" => match tobj::load_obj(&path) {
Err(e) => error("tobj couldnt load/parse OBJ", &e.to_string()),
Ok(present) => Ok(to_meshes(present.0, present.1)),
},
"stl" => match OpenOptions::new().read(true).open(&path) {
Err(e) => error("STL load failed", &e.to_string()),
Ok(mut file) => match stl_io::read_stl(&mut file) {
Err(e) => error("stl_io couldnt parse STL", &e.to_string()),
Ok(stlio_mesh) => Ok(vec![stlio_mesh.to_simple_mesh()]),
},
},
_ => error("unknown filename extension", ""),
},
},
};
mesh_queue.append(&mut meshes.unwrap());
}
Ok(mesh_queue)
}
pub fn match_turntable(matches: &ArgMatches) -> Result<(f32, f32, f32, f32), Box<Error>> {
let mut turntable = (0.0, 0.0, 0.0, 0.0);
if let Some(x) = matches.value_of("x") {
turntable.0 = x.parse()?;
}
if let Some(y) = matches.value_of("y") {
turntable.1 = y.parse()?;
}
if let Some(z) = matches.value_of("z") {
turntable.2 = z.parse()?;
}
if let Some(s) = matches.value_of("speed") {
turntable.3 = s.parse()?;
} else {
turntable.3 = 1.0; // No speed defined -> 1.0 rad/s
}
turntable.1 += std::f32::consts::PI; // All models for some reason are backwards, this fixes that
Ok(turntable)
}
pub fn | (matches: &ArgMatches) -> bool {
matches.is_present("image")
}
pub fn match_no_color_mode(matches: &ArgMatches) -> bool {
matches.is_present("no color")
}
pub fn match_dimensions(context: &mut Context, matches: &ArgMatches) -> Result<(), Box<Error>> {
if let Some(x) = matches.value_of("width") {
context.width = x.parse()?;
if let Some(y) = matches.value_of("height") {
context.height = y.parse()?;
} else {
context.height = context.width;
}
}
Ok(())
}
| match_image_mode |
logdb.go | // Copyright 2017-2019 Lei Ni ([email protected]) and other Dragonboat authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package logdb implements the persistent log storage used by Dragonboat.
This package is internally used by Dragonboat, applications are not expected
to import this package.
*/
package logdb
import (
"fmt"
"os"
"path/filepath"
"github.com/lni/dragonboat/v3/config"
"github.com/lni/dragonboat/v3/internal/logdb/kv"
"github.com/lni/dragonboat/v3/logger"
"github.com/lni/dragonboat/v3/raftio"
"github.com/lni/goutils/fileutil"
"github.com/lni/goutils/random"
)
var (
plog = logger.GetLogger("logdb")
)
type kvFactory func(string, string) (kv.IKVStore, error)
// NewDefaultLogDB creates a Log DB instance using the default KV store
// implementation. The created Log DB tries to store entry records in
// plain format but it switches to the batched mode if there is already
// batched entries saved in the existing DB.
func | (dirs []string, lldirs []string) (raftio.ILogDB, error) {
return NewLogDB(dirs, lldirs, false, true, newDefaultKVStore)
}
// NewDefaultBatchedLogDB creates a Log DB instance using the default KV store
// implementation with batched entry support.
func NewDefaultBatchedLogDB(dirs []string,
lldirs []string) (raftio.ILogDB, error) {
return NewLogDB(dirs, lldirs, true, false, newDefaultKVStore)
}
// NewLogDB creates a Log DB instance based on provided configuration
// parameters. The underlying KV store used by the Log DB instance is created
// by the provided factory function.
func NewLogDB(dirs []string, lldirs []string,
batched bool, check bool, f kvFactory) (raftio.ILogDB, error) {
checkDirs(dirs, lldirs)
llDirRequired := len(lldirs) == 1
if len(dirs) == 1 {
for i := uint64(1); i < numOfRocksDBInstance; i++ {
dirs = append(dirs, dirs[0])
if llDirRequired {
lldirs = append(lldirs, lldirs[0])
}
}
}
return OpenShardedRDB(dirs, lldirs, batched, check, f)
}
func checkDirs(dirs []string, lldirs []string) {
if len(dirs) == 1 {
if len(lldirs) != 0 && len(lldirs) != 1 {
plog.Panicf("only 1 regular dir but %d low latency dirs", len(lldirs))
}
} else if len(dirs) > 1 {
if uint64(len(dirs)) != numOfRocksDBInstance {
plog.Panicf("%d regular dirs, but expect to have %d rdb instances",
len(dirs), numOfRocksDBInstance)
}
if len(lldirs) > 0 {
if len(dirs) != len(lldirs) {
plog.Panicf("%v regular dirs, but %v low latency dirs", dirs, lldirs)
}
}
} else {
panic("no regular dir")
}
}
// GetLogDBInfo returns logdb type name.
func GetLogDBInfo(f config.LogDBFactoryFunc,
nhDirs []string) (name string, err error) {
tmpDirs := make([]string, 0)
for _, dir := range nhDirs {
tmp := fmt.Sprintf("tmp-%d", random.LockGuardedRand.Uint64())
td := filepath.Join(dir, tmp)
if err := fileutil.Mkdir(td); err != nil {
return "", err
}
tmpDirs = append(tmpDirs, td)
}
ldb, err := f(tmpDirs, tmpDirs)
if err != nil {
return "", err
}
name = ldb.Name()
defer func() {
ldb.Close()
for _, dir := range tmpDirs {
if cerr := os.RemoveAll(dir); err == nil {
err = cerr
}
}
}()
return name, nil
}
| NewDefaultLogDB |
config_app.rs | // <config>
use actix_service::ServiceFactory;
use actix_web::dev::{MessageBody, ServiceRequest, ServiceResponse};
use actix_web::{web, App, Error, HttpResponse};
fn | () -> App<
impl ServiceFactory<
Config = (),
Request = ServiceRequest,
Response = ServiceResponse<impl MessageBody>,
Error = Error,
>,
impl MessageBody,
> {
App::new().service(
web::scope("/app")
.route("/index.html", web::get().to(|| HttpResponse::Ok())),
)
}
// </config>
| create_app |
serviceWorker.ts | // This optional code is used to register a service worker.
// register() is not called by default.
| // existing tabs open on the page have been closed, since previously cached
// resources are updated in the background.
// To learn more about the benefits of this model and instructions on how to
// opt-in, read https://bit.ly/CRA-PWA
const isLocalhost = Boolean(
window.location.hostname === 'localhost' ||
// [::1] is the IPv6 localhost address.
window.location.hostname === '[::1]' ||
// 127.0.0.1/8 is considered localhost for IPv4.
window.location.hostname.match(/^127(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$/),
);
type Config = {
onSuccess?: (registration: ServiceWorkerRegistration) => void;
onUpdate?: (registration: ServiceWorkerRegistration) => void;
};
function registerValidSW(swUrl: string, config?: Config) {
navigator.serviceWorker
.register(swUrl)
.then(registration => {
registration.onupdatefound = () => {
const installingWorker = registration.installing;
if (installingWorker == null) {
return;
}
installingWorker.onstatechange = () => {
if (installingWorker.state === 'installed') {
if (navigator.serviceWorker.controller) {
// At this point, the updated precached content has been fetched,
// but the previous service worker will still serve the older
// content until all client tabs are closed.
console.log(
'New content is available and will be used when all ' +
'tabs for this page are closed. See https://bit.ly/CRA-PWA.',
);
// Execute callback
if (config && config.onUpdate) {
config.onUpdate(registration);
}
} else {
// At this point, everything has been precached.
// It's the perfect time to display a
// "Content is cached for offline use." message.
console.log('Content is cached for offline use.');
// Execute callback
if (config && config.onSuccess) {
config.onSuccess(registration);
}
}
}
};
};
})
.catch(error => {
console.error('Error during service worker registration:', error);
});
}
function checkValidServiceWorker(swUrl: string, config?: Config) {
// Check if the service worker can be found. If it can't reload the page.
fetch(swUrl)
.then(response => {
// Ensure service worker exists, and that we really are getting a JS file.
const contentType = response.headers.get('content-type');
if (response.status === 404 || (contentType != null && contentType.indexOf('javascript') === -1)) {
// No service worker found. Probably a different app. Reload the page.
navigator.serviceWorker.ready.then(registration => {
registration.unregister().then(() => {
window.location.reload();
});
});
} else {
// Service worker found. Proceed as normal.
registerValidSW(swUrl, config);
}
})
.catch(() => {
console.log('No internet connection found. App is running in offline mode.');
});
}
export function register(config?: Config) {
if (process.env.NODE_ENV === 'production' && 'serviceWorker' in navigator) {
// The URL constructor is available in all browsers that support SW.
const publicUrl = new URL((process as { env: { [key: string]: string } }).env.PUBLIC_URL, window.location.href);
if (publicUrl.origin !== window.location.origin) {
// Our service worker won't work if PUBLIC_URL is on a different origin
// from what our page is served on. This might happen if a CDN is used to
// serve assets; see https://github.com/facebook/create-react-app/issues/2374
return;
}
window.addEventListener('load', () => {
const swUrl = `${process.env.PUBLIC_URL}/service-worker.js`;
if (isLocalhost) {
// This is running on localhost. Let's check if a service worker still exists or not.
checkValidServiceWorker(swUrl, config);
// Add some additional logging to localhost, pointing developers to the
// service worker/PWA documentation.
navigator.serviceWorker.ready.then(() => {
console.log(
'This web app is being served cache-first by a service ' +
'worker. To learn more, visit https://bit.ly/CRA-PWA',
);
});
} else {
// Is not localhost. Just register service worker
registerValidSW(swUrl, config);
}
});
}
}
export function unregister() {
if ('serviceWorker' in navigator) {
navigator.serviceWorker.ready.then(registration => {
registration.unregister();
});
}
} | // This lets the app load faster on subsequent visits in production, and gives
// it offline capabilities. However, it also means that developers (and users)
// will only see deployed updates on subsequent visits to a page, after all the |
application.py | from selenium import webdriver
from fixture.session import SessionHelper
from fixture.group import GroupHelper
from fixture.contact import ContactHelper
class Application:
def __init__(self, browser, base_url):
if browser == "firefox":
self.wd = webdriver.Firefox()
elif browser == "chrome":
self.wd = webdriver.Chrome()
elif browser == "ie":
self.wd = webdriver.Ie()
else:
raise ValueError("Unrecognized browser %s" % browser)
self.wd.implicitly_wait(5)
self.session = SessionHelper(self)
self.group = GroupHelper(self)
self.contact = ContactHelper(self)
self.base_url=base_url
def is_valid(self):
try:
self.wd.current_url
return True
except: | def open_home_page(self):
wd = self.wd
wd.get(self.base_url)
def destroy(self):
self.wd.quit() | return False
|
set-org-user-contact-email.js | import React from 'react';
import PropTypes from 'prop-types';
import { Modal, ModalHeader, ModalBody, ModalFooter } from 'reactstrap';
import { gettext } from '../../utils/constants';
import { seafileAPI } from '../../utils/seafile-api';
import { Utils } from '../../utils/utils';
const propTypes = {
orgID: PropTypes.string.isRequired,
email: PropTypes.string.isRequired,
contactEmail: PropTypes.string.isRequired,
updateContactEmail: PropTypes.func.isRequired,
toggleDialog: PropTypes.func.isRequired
};
class | extends React.Component {
constructor(props) {
super(props);
this.state = {
inputValue: this.props.contactEmail,
submitBtnDisabled: false
};
}
handleInputChange = (e) => {
this.setState({
inputValue: e.target.value
});
}
formSubmit = () => {
const { orgID, email } = this.props;
const contactEmail = this.state.inputValue.trim();
this.setState({
submitBtnDisabled: true
});
seafileAPI.orgAdminSetOrgUserContactEmail(orgID, email, contactEmail).then((res) => {
const newContactEmail = contactEmail ? res.data.contact_email : '';
this.props.updateContactEmail(newContactEmail);
this.props.toggleDialog();
}).catch((error) => {
let errorMsg = Utils.getErrorMsg(error);
this.setState({
formErrorMsg: errorMsg,
submitBtnDisabled: false
});
});
}
render() {
const { inputValue, formErrorMsg, submitBtnDisabled } = this.state;
return (
<Modal isOpen={true} centered={true} toggle={this.props.toggleDialog}>
<ModalHeader toggle={this.props.toggleDialog}>{gettext('Set user contact email')}</ModalHeader>
<ModalBody>
<React.Fragment>
<input type="text" className="form-control" value={inputValue} onChange={this.handleInputChange} />
{formErrorMsg && <p className="error m-0 mt-2">{formErrorMsg}</p>}
</React.Fragment>
</ModalBody>
<ModalFooter>
<button className="btn btn-secondary" onClick={this.props.toggleDialog}>{gettext('Cancel')}</button>
<button className="btn btn-primary" disabled={submitBtnDisabled} onClick={this.formSubmit}>{gettext('Submit')}</button>
</ModalFooter>
</Modal>
);
}
}
SetOrgUserContactEmail.propTypes = propTypes;
export default SetOrgUserContactEmail;
| SetOrgUserContactEmail |
Simplex1Geometry.ts | import { GraphicsProgramSymbols } from '../core/GraphicsProgramSymbols';
import { Simplex } from '../geometries/Simplex';
import { SimplexPrimitivesBuilder } from '../geometries/SimplexPrimitivesBuilder';
import { Vector3 } from '../math/Vector3';
/**
* @hidden
*/
export class Simplex1Geometry extends SimplexPrimitivesBuilder {
public head: Vector3 = new Vector3([1, 0, 0]);
public tail: Vector3 = new Vector3([0, 1, 0]);
constructor() {
super();
this.calculate();
}
public calculate(): void {
const pos: Vector3[] = [0, 1].map(function (index) { return void 0; });
pos[0] = this.tail;
pos[1] = this.head;
function | (indices: number[]): Simplex {
const simplex = new Simplex(indices.length - 1);
for (let i = 0; i < indices.length; i++) {
simplex.vertices[i].attributes[GraphicsProgramSymbols.ATTRIBUTE_POSITION] = pos[indices[i]];
}
return simplex;
}
this.data = [[0, 1]].map(function (line: number[]) { return simplex(line); });
// Compute the meta data.
this.check();
}
}
| simplex |
main.rs | extern crate reqwest;
extern crate select;
extern crate url;
use std::io::{self, Read, Write};
use std::thread::sleep;
use std::time;
use select::document::Document;
use select::predicate::{Class, Predicate, Name};
fn main() {
let site = "https://pastebin.com";
let delay = time::Duration::from_secs(1);
loop {
let mut response = reqwest::blocking::get(site).unwrap();
let mut body = String::new();
response.read_to_string(&mut body).unwrap();
let dom = Document::from(body.as_str());
let pre_node = dom.find(Class("sidebar__menu")
.descendant(Name("a")))
.next()
.unwrap();
let link = pre_node.attr("href").unwrap();
sleep(delay);
response = reqwest::blocking::get(format!("{}/raw{}", site, link).as_str()).unwrap();
body = String::new();
response.read_to_string(&mut body).unwrap();
slow_print(body);
sleep(delay);
}
}
fn | (data: String) {
let delay = time::Duration::from_millis(125); // ~120 WPM
for c in data.chars() {
print!("{}", c);
io::stdout().flush().unwrap();
sleep(delay);
}
println!();
}
| slow_print |
mod.rs | pub type sa_family_t = u16;
pub type speed_t = ::c_uint;
pub type tcflag_t = ::c_uint;
pub type clockid_t = ::c_int;
pub type key_t = ::c_int;
pub type id_t = ::c_uint;
#[cfg_attr(feature = "extra_traits", derive(Debug))]
pub enum timezone {}
impl ::Copy for timezone {}
impl ::Clone for timezone {
fn clone(&self) -> timezone {
*self
}
}
s! {
pub struct in_addr {
pub s_addr: ::in_addr_t,
}
pub struct ip_mreq {
pub imr_multiaddr: in_addr,
pub imr_interface: in_addr,
}
pub struct ip_mreq_source {
pub imr_multiaddr: in_addr,
pub imr_interface: in_addr,
pub imr_sourceaddr: in_addr,
}
pub struct sockaddr {
pub sa_family: sa_family_t,
pub sa_data: [::c_char; 14],
}
pub struct sockaddr_in {
pub sin_family: sa_family_t,
pub sin_port: ::in_port_t,
pub sin_addr: ::in_addr,
pub sin_zero: [u8; 8],
}
pub struct sockaddr_in6 {
pub sin6_family: sa_family_t,
pub sin6_port: ::in_port_t,
pub sin6_flowinfo: u32,
pub sin6_addr: ::in6_addr,
pub sin6_scope_id: u32,
}
pub struct addrinfo {
pub ai_flags: ::c_int,
pub ai_family: ::c_int,
pub ai_socktype: ::c_int,
pub ai_protocol: ::c_int,
pub ai_addrlen: socklen_t,
#[cfg(any(target_os = "linux",
target_os = "emscripten"))]
pub ai_addr: *mut ::sockaddr,
pub ai_canonname: *mut c_char,
#[cfg(target_os = "android")]
pub ai_addr: *mut ::sockaddr,
pub ai_next: *mut addrinfo,
}
pub struct sockaddr_ll {
pub sll_family: ::c_ushort,
pub sll_protocol: ::c_ushort,
pub sll_ifindex: ::c_int,
pub sll_hatype: ::c_ushort,
pub sll_pkttype: ::c_uchar,
pub sll_halen: ::c_uchar,
pub sll_addr: [::c_uchar; 8]
}
pub struct fd_set {
fds_bits: [::c_ulong; FD_SETSIZE / ULONG_SIZE],
}
pub struct tm {
pub tm_sec: ::c_int,
pub tm_min: ::c_int,
pub tm_hour: ::c_int,
pub tm_mday: ::c_int,
pub tm_mon: ::c_int,
pub tm_year: ::c_int,
pub tm_wday: ::c_int,
pub tm_yday: ::c_int,
pub tm_isdst: ::c_int,
pub tm_gmtoff: ::c_long,
pub tm_zone: *const ::c_char,
}
pub struct sched_param {
pub sched_priority: ::c_int,
#[cfg(any(target_env = "musl", target_os = "emscripten"))]
pub sched_ss_low_priority: ::c_int,
#[cfg(any(target_env = "musl", target_os = "emscripten"))]
pub sched_ss_repl_period: ::timespec,
#[cfg(any(target_env = "musl", target_os = "emscripten"))]
pub sched_ss_init_budget: ::timespec,
#[cfg(any(target_env = "musl", target_os = "emscripten"))]
pub sched_ss_max_repl: ::c_int,
}
pub struct Dl_info {
pub dli_fname: *const ::c_char,
pub dli_fbase: *mut ::c_void,
pub dli_sname: *const ::c_char,
pub dli_saddr: *mut ::c_void,
}
pub struct lconv {
pub decimal_point: *mut ::c_char,
pub thousands_sep: *mut ::c_char,
pub grouping: *mut ::c_char,
pub int_curr_symbol: *mut ::c_char,
pub currency_symbol: *mut ::c_char,
pub mon_decimal_point: *mut ::c_char,
pub mon_thousands_sep: *mut ::c_char,
pub mon_grouping: *mut ::c_char,
pub positive_sign: *mut ::c_char,
pub negative_sign: *mut ::c_char,
pub int_frac_digits: ::c_char,
pub frac_digits: ::c_char,
pub p_cs_precedes: ::c_char,
pub p_sep_by_space: ::c_char,
pub n_cs_precedes: ::c_char,
pub n_sep_by_space: ::c_char,
pub p_sign_posn: ::c_char,
pub n_sign_posn: ::c_char,
pub int_p_cs_precedes: ::c_char,
pub int_p_sep_by_space: ::c_char,
pub int_n_cs_precedes: ::c_char,
pub int_n_sep_by_space: ::c_char,
pub int_p_sign_posn: ::c_char,
pub int_n_sign_posn: ::c_char,
}
pub struct in_pktinfo {
pub ipi_ifindex: ::c_int,
pub ipi_spec_dst: ::in_addr,
pub ipi_addr: ::in_addr,
}
pub struct ifaddrs {
pub ifa_next: *mut ifaddrs,
pub ifa_name: *mut c_char,
pub ifa_flags: ::c_uint,
pub ifa_addr: *mut ::sockaddr,
pub ifa_netmask: *mut ::sockaddr,
pub ifa_ifu: *mut ::sockaddr, // FIXME This should be a union
pub ifa_data: *mut ::c_void
}
pub struct in6_rtmsg {
rtmsg_dst: ::in6_addr,
rtmsg_src: ::in6_addr,
rtmsg_gateway: ::in6_addr,
rtmsg_type: u32,
rtmsg_dst_len: u16,
rtmsg_src_len: u16,
rtmsg_metric: u32,
rtmsg_info: ::c_ulong,
rtmsg_flags: u32,
rtmsg_ifindex: ::c_int,
}
pub struct arpreq {
pub arp_pa: ::sockaddr,
pub arp_ha: ::sockaddr,
pub arp_flags: ::c_int,
pub arp_netmask: ::sockaddr,
pub arp_dev: [::c_char; 16],
}
pub struct arpreq_old {
pub arp_pa: ::sockaddr,
pub arp_ha: ::sockaddr,
pub arp_flags: ::c_int,
pub arp_netmask: ::sockaddr,
}
pub struct arphdr {
pub ar_hrd: u16,
pub ar_pro: u16,
pub ar_hln: u8,
pub ar_pln: u8,
pub ar_op: u16,
}
pub struct mmsghdr {
pub msg_hdr: ::msghdr,
pub msg_len: ::c_uint,
}
}
s_no_extra_traits! {
#[cfg_attr(
any(
all(
target_arch = "x86",
not(target_env = "musl"),
not(target_os = "android")),
target_arch = "x86_64"),
repr(packed))]
pub struct epoll_event {
pub events: u32,
pub u64: u64,
}
pub struct sockaddr_un {
pub sun_family: sa_family_t,
pub sun_path: [::c_char; 108]
}
pub struct sockaddr_storage {
pub ss_family: sa_family_t,
__ss_align: ::size_t,
#[cfg(target_pointer_width = "32")]
__ss_pad2: [u8; 128 - 2 * 4],
#[cfg(target_pointer_width = "64")]
__ss_pad2: [u8; 128 - 2 * 8],
}
pub struct utsname {
pub sysname: [::c_char; 65],
pub nodename: [::c_char; 65],
pub release: [::c_char; 65],
pub version: [::c_char; 65],
pub machine: [::c_char; 65],
pub domainname: [::c_char; 65]
}
pub struct sigevent {
pub sigev_value: ::sigval,
pub sigev_signo: ::c_int,
pub sigev_notify: ::c_int,
// Actually a union. We only expose sigev_notify_thread_id because it's
// the most useful member
pub sigev_notify_thread_id: ::c_int,
#[cfg(target_pointer_width = "64")]
__unused1: [::c_int; 11],
#[cfg(target_pointer_width = "32")]
__unused1: [::c_int; 12]
}
}
cfg_if! {
if #[cfg(feature = "extra_traits")] {
impl PartialEq for epoll_event {
fn eq(&self, other: &epoll_event) -> bool {
self.events == other.events
&& self.u64 == other.u64
}
}
impl Eq for epoll_event {}
impl ::fmt::Debug for epoll_event {
fn fmt(&self, f: &mut ::fmt::Formatter) -> ::fmt::Result {
let events = self.events;
let u64 = self.u64;
f.debug_struct("epoll_event")
.field("events", &events)
.field("u64", &u64)
.finish()
}
}
impl ::hash::Hash for epoll_event {
fn hash<H: ::hash::Hasher>(&self, state: &mut H) {
let events = self.events;
let u64 = self.u64;
events.hash(state);
u64.hash(state);
}
}
impl PartialEq for sockaddr_un {
fn eq(&self, other: &sockaddr_un) -> bool {
self.sun_family == other.sun_family
&& self
.sun_path
.iter()
.zip(other.sun_path.iter())
.all(|(a, b)| a == b)
}
}
impl Eq for sockaddr_un {}
impl ::fmt::Debug for sockaddr_un {
fn fmt(&self, f: &mut ::fmt::Formatter) -> ::fmt::Result {
f.debug_struct("sockaddr_un")
.field("sun_family", &self.sun_family)
// FIXME: .field("sun_path", &self.sun_path)
.finish()
}
}
impl ::hash::Hash for sockaddr_un {
fn hash<H: ::hash::Hasher>(&self, state: &mut H) {
self.sun_family.hash(state);
self.sun_path.hash(state);
}
}
impl PartialEq for sockaddr_storage {
fn eq(&self, other: &sockaddr_storage) -> bool {
self.ss_family == other.ss_family
&& self
.__ss_pad2
.iter()
.zip(other.__ss_pad2.iter())
.all(|(a, b)| a == b)
}
}
impl Eq for sockaddr_storage {}
impl ::fmt::Debug for sockaddr_storage {
fn fmt(&self, f: &mut ::fmt::Formatter) -> ::fmt::Result {
f.debug_struct("sockaddr_storage")
.field("ss_family", &self.ss_family)
.field("__ss_align", &self.__ss_align)
// FIXME: .field("__ss_pad2", &self.__ss_pad2)
.finish()
}
}
impl ::hash::Hash for sockaddr_storage {
fn hash<H: ::hash::Hasher>(&self, state: &mut H) {
self.ss_family.hash(state);
self.__ss_pad2.hash(state);
}
}
impl PartialEq for utsname {
fn eq(&self, other: &utsname) -> bool {
self.sysname
.iter()
.zip(other.sysname.iter())
.all(|(a, b)| a == b)
&& self
.nodename
.iter()
.zip(other.nodename.iter())
.all(|(a, b)| a == b)
&& self
.release
.iter()
.zip(other.release.iter())
.all(|(a, b)| a == b)
&& self
.version
.iter()
.zip(other.version.iter())
.all(|(a, b)| a == b)
&& self
.machine
.iter()
.zip(other.machine.iter())
.all(|(a, b)| a == b)
&& self
.domainname
.iter()
.zip(other.domainname.iter())
.all(|(a, b)| a == b)
}
}
impl Eq for utsname {}
impl ::fmt::Debug for utsname {
fn fmt(&self, f: &mut ::fmt::Formatter) -> ::fmt::Result {
f.debug_struct("utsname")
// FIXME: .field("sysname", &self.sysname)
// FIXME: .field("nodename", &self.nodename)
// FIXME: .field("release", &self.release)
// FIXME: .field("version", &self.version)
// FIXME: .field("machine", &self.machine)
// FIXME: .field("domainname", &self.domainname)
.finish()
}
}
impl ::hash::Hash for utsname {
fn hash<H: ::hash::Hasher>(&self, state: &mut H) {
self.sysname.hash(state);
self.nodename.hash(state);
self.release.hash(state);
self.version.hash(state);
self.machine.hash(state);
self.domainname.hash(state);
}
}
impl PartialEq for sigevent {
fn eq(&self, other: &sigevent) -> bool {
self.sigev_value == other.sigev_value
&& self.sigev_signo == other.sigev_signo
&& self.sigev_notify == other.sigev_notify
&& self.sigev_notify_thread_id
== other.sigev_notify_thread_id
}
}
impl Eq for sigevent {}
impl ::fmt::Debug for sigevent {
fn fmt(&self, f: &mut ::fmt::Formatter) -> ::fmt::Result {
f.debug_struct("sigevent")
.field("sigev_value", &self.sigev_value)
.field("sigev_signo", &self.sigev_signo)
.field("sigev_notify", &self.sigev_notify)
.field("sigev_notify_thread_id",
&self.sigev_notify_thread_id)
.finish()
}
}
impl ::hash::Hash for sigevent {
fn hash<H: ::hash::Hasher>(&self, state: &mut H) {
self.sigev_value.hash(state);
self.sigev_signo.hash(state);
self.sigev_notify.hash(state);
self.sigev_notify_thread_id.hash(state);
}
}
}
}
// intentionally not public, only used for fd_set
cfg_if! {
if #[cfg(target_pointer_width = "32")] {
const ULONG_SIZE: usize = 32;
} else if #[cfg(target_pointer_width = "64")] {
const ULONG_SIZE: usize = 64;
} else {
// Unknown target_pointer_width
}
}
pub const EXIT_FAILURE: ::c_int = 1;
pub const EXIT_SUCCESS: ::c_int = 0;
pub const RAND_MAX: ::c_int = 2147483647;
pub const EOF: ::c_int = -1;
pub const SEEK_SET: ::c_int = 0;
pub const SEEK_CUR: ::c_int = 1;
pub const SEEK_END: ::c_int = 2;
pub const _IOFBF: ::c_int = 0;
pub const _IONBF: ::c_int = 2;
pub const _IOLBF: ::c_int = 1;
pub const F_DUPFD: ::c_int = 0;
pub const F_GETFD: ::c_int = 1;
pub const F_SETFD: ::c_int = 2;
pub const F_GETFL: ::c_int = 3;
pub const F_SETFL: ::c_int = 4;
// Linux-specific fcntls
pub const F_SETLEASE: ::c_int = 1024;
pub const F_GETLEASE: ::c_int = 1025;
pub const F_NOTIFY: ::c_int = 1026;
pub const F_CANCELLK: ::c_int = 1029;
pub const F_DUPFD_CLOEXEC: ::c_int = 1030;
pub const F_SETPIPE_SZ: ::c_int = 1031;
pub const F_GETPIPE_SZ: ::c_int = 1032;
pub const F_ADD_SEALS: ::c_int = 1033;
pub const F_GET_SEALS: ::c_int = 1034;
pub const F_SEAL_SEAL: ::c_int = 0x0001;
pub const F_SEAL_SHRINK: ::c_int = 0x0002;
pub const F_SEAL_GROW: ::c_int = 0x0004;
pub const F_SEAL_WRITE: ::c_int = 0x0008;
// TODO(#235): Include file sealing fcntls once we have a way to verify them.
pub const SIGTRAP: ::c_int = 5;
pub const PTHREAD_CREATE_JOINABLE: ::c_int = 0;
pub const PTHREAD_CREATE_DETACHED: ::c_int = 1;
pub const CLOCK_REALTIME: ::clockid_t = 0;
pub const CLOCK_MONOTONIC: ::clockid_t = 1;
pub const CLOCK_PROCESS_CPUTIME_ID: ::clockid_t = 2;
pub const CLOCK_THREAD_CPUTIME_ID: ::clockid_t = 3;
pub const CLOCK_MONOTONIC_RAW: ::clockid_t = 4;
pub const CLOCK_REALTIME_COARSE: ::clockid_t = 5;
pub const CLOCK_MONOTONIC_COARSE: ::clockid_t = 6;
pub const CLOCK_BOOTTIME: ::clockid_t = 7;
pub const CLOCK_REALTIME_ALARM: ::clockid_t = 8;
pub const CLOCK_BOOTTIME_ALARM: ::clockid_t = 9;
// TODO(#247) Someday our Travis shall have glibc 2.21 (released in Sep
// 2014.) See also musl/mod.rs
// pub const CLOCK_SGI_CYCLE: ::clockid_t = 10;
// pub const CLOCK_TAI: ::clockid_t = 11;
pub const TIMER_ABSTIME: ::c_int = 1;
pub const RUSAGE_SELF: ::c_int = 0;
pub const O_RDONLY: ::c_int = 0;
pub const O_WRONLY: ::c_int = 1;
pub const O_RDWR: ::c_int = 2;
pub const SOCK_CLOEXEC: ::c_int = O_CLOEXEC;
pub const S_IFIFO: ::mode_t = 4096;
pub const S_IFCHR: ::mode_t = 8192;
pub const S_IFBLK: ::mode_t = 24576;
pub const S_IFDIR: ::mode_t = 16384;
pub const S_IFREG: ::mode_t = 32768;
pub const S_IFLNK: ::mode_t = 40960;
pub const S_IFSOCK: ::mode_t = 49152;
pub const S_IFMT: ::mode_t = 61440;
pub const S_IRWXU: ::mode_t = 448;
pub const S_IXUSR: ::mode_t = 64;
pub const S_IWUSR: ::mode_t = 128;
pub const S_IRUSR: ::mode_t = 256;
pub const S_IRWXG: ::mode_t = 56;
pub const S_IXGRP: ::mode_t = 8;
pub const S_IWGRP: ::mode_t = 16;
pub const S_IRGRP: ::mode_t = 32;
pub const S_IRWXO: ::mode_t = 7;
pub const S_IXOTH: ::mode_t = 1;
pub const S_IWOTH: ::mode_t = 2;
pub const S_IROTH: ::mode_t = 4;
pub const F_OK: ::c_int = 0;
pub const R_OK: ::c_int = 4;
pub const W_OK: ::c_int = 2;
pub const X_OK: ::c_int = 1;
pub const STDIN_FILENO: ::c_int = 0;
pub const STDOUT_FILENO: ::c_int = 1;
pub const STDERR_FILENO: ::c_int = 2;
pub const SIGHUP: ::c_int = 1;
pub const SIGINT: ::c_int = 2;
pub const SIGQUIT: ::c_int = 3;
pub const SIGILL: ::c_int = 4;
pub const SIGABRT: ::c_int = 6;
pub const SIGFPE: ::c_int = 8;
pub const SIGKILL: ::c_int = 9;
pub const SIGSEGV: ::c_int = 11;
pub const SIGPIPE: ::c_int = 13;
pub const SIGALRM: ::c_int = 14;
pub const SIGTERM: ::c_int = 15;
pub const PROT_NONE: ::c_int = 0;
pub const PROT_READ: ::c_int = 1;
pub const PROT_WRITE: ::c_int = 2;
pub const PROT_EXEC: ::c_int = 4;
pub const LC_CTYPE: ::c_int = 0;
pub const LC_NUMERIC: ::c_int = 1;
pub const LC_TIME: ::c_int = 2;
pub const LC_COLLATE: ::c_int = 3;
pub const LC_MONETARY: ::c_int = 4;
pub const LC_MESSAGES: ::c_int = 5;
pub const LC_ALL: ::c_int = 6;
pub const LC_CTYPE_MASK: ::c_int = (1 << LC_CTYPE);
pub const LC_NUMERIC_MASK: ::c_int = (1 << LC_NUMERIC);
pub const LC_TIME_MASK: ::c_int = (1 << LC_TIME);
pub const LC_COLLATE_MASK: ::c_int = (1 << LC_COLLATE);
pub const LC_MONETARY_MASK: ::c_int = (1 << LC_MONETARY);
pub const LC_MESSAGES_MASK: ::c_int = (1 << LC_MESSAGES);
// LC_ALL_MASK defined per platform
pub const MAP_FILE: ::c_int = 0x0000;
pub const MAP_SHARED: ::c_int = 0x0001;
pub const MAP_PRIVATE: ::c_int = 0x0002;
pub const MAP_FIXED: ::c_int = 0x0010;
pub const MAP_FAILED: *mut ::c_void = !0 as *mut ::c_void;
// MS_ flags for msync(2)
pub const MS_ASYNC: ::c_int = 0x0001;
pub const MS_INVALIDATE: ::c_int = 0x0002;
pub const MS_SYNC: ::c_int = 0x0004;
// MS_ flags for mount(2)
pub const MS_RDONLY: ::c_ulong = 0x01;
pub const MS_NOSUID: ::c_ulong = 0x02;
pub const MS_NODEV: ::c_ulong = 0x04;
pub const MS_NOEXEC: ::c_ulong = 0x08;
pub const MS_SYNCHRONOUS: ::c_ulong = 0x10;
pub const MS_REMOUNT: ::c_ulong = 0x20;
pub const MS_MANDLOCK: ::c_ulong = 0x40;
pub const MS_DIRSYNC: ::c_ulong = 0x80;
pub const MS_NOATIME: ::c_ulong = 0x0400;
pub const MS_NODIRATIME: ::c_ulong = 0x0800;
pub const MS_BIND: ::c_ulong = 0x1000;
pub const MS_MOVE: ::c_ulong = 0x2000;
pub const MS_REC: ::c_ulong = 0x4000;
pub const MS_SILENT: ::c_ulong = 0x8000;
pub const MS_POSIXACL: ::c_ulong = 0x010000;
pub const MS_UNBINDABLE: ::c_ulong = 0x020000;
pub const MS_PRIVATE: ::c_ulong = 0x040000;
pub const MS_SLAVE: ::c_ulong = 0x080000;
pub const MS_SHARED: ::c_ulong = 0x100000;
pub const MS_RELATIME: ::c_ulong = 0x200000;
pub const MS_KERNMOUNT: ::c_ulong = 0x400000;
pub const MS_I_VERSION: ::c_ulong = 0x800000;
pub const MS_STRICTATIME: ::c_ulong = 0x1000000;
pub const MS_ACTIVE: ::c_ulong = 0x40000000;
pub const MS_MGC_VAL: ::c_ulong = 0xc0ed0000;
pub const MS_MGC_MSK: ::c_ulong = 0xffff0000;
pub const EPERM: ::c_int = 1;
pub const ENOENT: ::c_int = 2;
pub const ESRCH: ::c_int = 3;
pub const EINTR: ::c_int = 4;
pub const EIO: ::c_int = 5;
pub const ENXIO: ::c_int = 6;
pub const E2BIG: ::c_int = 7;
pub const ENOEXEC: ::c_int = 8;
pub const EBADF: ::c_int = 9;
pub const ECHILD: ::c_int = 10;
pub const EAGAIN: ::c_int = 11;
pub const ENOMEM: ::c_int = 12;
pub const EACCES: ::c_int = 13;
pub const EFAULT: ::c_int = 14;
pub const ENOTBLK: ::c_int = 15;
pub const EBUSY: ::c_int = 16;
pub const EEXIST: ::c_int = 17;
pub const EXDEV: ::c_int = 18;
pub const ENODEV: ::c_int = 19;
pub const ENOTDIR: ::c_int = 20;
pub const EISDIR: ::c_int = 21;
pub const EINVAL: ::c_int = 22;
pub const ENFILE: ::c_int = 23;
pub const EMFILE: ::c_int = 24;
pub const ENOTTY: ::c_int = 25;
pub const ETXTBSY: ::c_int = 26;
pub const EFBIG: ::c_int = 27;
pub const ENOSPC: ::c_int = 28;
pub const ESPIPE: ::c_int = 29;
pub const EROFS: ::c_int = 30;
pub const EMLINK: ::c_int = 31;
pub const EPIPE: ::c_int = 32;
pub const EDOM: ::c_int = 33;
pub const ERANGE: ::c_int = 34;
pub const EWOULDBLOCK: ::c_int = EAGAIN;
pub const SCM_RIGHTS: ::c_int = 0x01;
pub const SCM_CREDENTIALS: ::c_int = 0x02;
pub const PROT_GROWSDOWN: ::c_int = 0x1000000;
pub const PROT_GROWSUP: ::c_int = 0x2000000;
pub const MAP_TYPE: ::c_int = 0x000f;
pub const MADV_NORMAL: ::c_int = 0;
pub const MADV_RANDOM: ::c_int = 1;
pub const MADV_SEQUENTIAL: ::c_int = 2;
pub const MADV_WILLNEED: ::c_int = 3;
pub const MADV_DONTNEED: ::c_int = 4;
pub const MADV_FREE: ::c_int = 8;
pub const MADV_REMOVE: ::c_int = 9;
pub const MADV_DONTFORK: ::c_int = 10;
pub const MADV_DOFORK: ::c_int = 11;
pub const MADV_MERGEABLE: ::c_int = 12;
pub const MADV_UNMERGEABLE: ::c_int = 13;
pub const MADV_HUGEPAGE: ::c_int = 14;
pub const MADV_NOHUGEPAGE: ::c_int = 15;
pub const MADV_DONTDUMP: ::c_int = 16;
pub const MADV_DODUMP: ::c_int = 17;
pub const MADV_HWPOISON: ::c_int = 100;
pub const IFF_UP: ::c_int = 0x1;
pub const IFF_BROADCAST: ::c_int = 0x2;
pub const IFF_DEBUG: ::c_int = 0x4;
pub const IFF_LOOPBACK: ::c_int = 0x8;
pub const IFF_POINTOPOINT: ::c_int = 0x10;
pub const IFF_NOTRAILERS: ::c_int = 0x20;
pub const IFF_RUNNING: ::c_int = 0x40;
pub const IFF_NOARP: ::c_int = 0x80;
pub const IFF_PROMISC: ::c_int = 0x100;
pub const IFF_ALLMULTI: ::c_int = 0x200;
pub const IFF_MASTER: ::c_int = 0x400;
pub const IFF_SLAVE: ::c_int = 0x800;
pub const IFF_MULTICAST: ::c_int = 0x1000;
pub const IFF_PORTSEL: ::c_int = 0x2000;
pub const IFF_AUTOMEDIA: ::c_int = 0x4000;
pub const IFF_DYNAMIC: ::c_int = 0x8000;
pub const SOL_IP: ::c_int = 0;
pub const SOL_TCP: ::c_int = 6;
pub const SOL_UDP: ::c_int = 17;
pub const SOL_IPV6: ::c_int = 41;
pub const SOL_ICMPV6: ::c_int = 58;
pub const SOL_RAW: ::c_int = 255;
pub const SOL_DECNET: ::c_int = 261;
pub const SOL_X25: ::c_int = 262;
pub const SOL_PACKET: ::c_int = 263;
pub const SOL_ATM: ::c_int = 264;
pub const SOL_AAL: ::c_int = 265;
pub const SOL_IRDA: ::c_int = 266;
pub const SOL_NETBEUI: ::c_int = 267;
pub const SOL_LLC: ::c_int = 268;
pub const SOL_DCCP: ::c_int = 269;
pub const SOL_NETLINK: ::c_int = 270;
pub const SOL_TIPC: ::c_int = 271;
pub const SOL_BLUETOOTH: ::c_int = 274;
pub const SOL_ALG: ::c_int = 279;
pub const AF_UNSPEC: ::c_int = 0;
pub const AF_UNIX: ::c_int = 1;
pub const AF_LOCAL: ::c_int = 1;
pub const AF_INET: ::c_int = 2;
pub const AF_AX25: ::c_int = 3;
pub const AF_IPX: ::c_int = 4;
pub const AF_APPLETALK: ::c_int = 5;
pub const AF_NETROM: ::c_int = 6;
pub const AF_BRIDGE: ::c_int = 7;
pub const AF_ATMPVC: ::c_int = 8;
pub const AF_X25: ::c_int = 9;
pub const AF_INET6: ::c_int = 10;
pub const AF_ROSE: ::c_int = 11;
pub const AF_DECnet: ::c_int = 12;
pub const AF_NETBEUI: ::c_int = 13;
pub const AF_SECURITY: ::c_int = 14;
pub const AF_KEY: ::c_int = 15;
pub const AF_NETLINK: ::c_int = 16;
pub const AF_ROUTE: ::c_int = AF_NETLINK;
pub const AF_PACKET: ::c_int = 17;
pub const AF_ASH: ::c_int = 18;
pub const AF_ECONET: ::c_int = 19;
pub const AF_ATMSVC: ::c_int = 20;
pub const AF_RDS: ::c_int = 21;
pub const AF_SNA: ::c_int = 22;
pub const AF_IRDA: ::c_int = 23;
pub const AF_PPPOX: ::c_int = 24;
pub const AF_WANPIPE: ::c_int = 25;
pub const AF_LLC: ::c_int = 26;
pub const AF_CAN: ::c_int = 29;
pub const AF_TIPC: ::c_int = 30;
pub const AF_BLUETOOTH: ::c_int = 31;
pub const AF_IUCV: ::c_int = 32;
pub const AF_RXRPC: ::c_int = 33;
pub const AF_ISDN: ::c_int = 34;
pub const AF_PHONET: ::c_int = 35;
pub const AF_IEEE802154: ::c_int = 36;
pub const AF_CAIF: ::c_int = 37;
pub const AF_ALG: ::c_int = 38;
pub const PF_UNSPEC: ::c_int = AF_UNSPEC;
pub const PF_UNIX: ::c_int = AF_UNIX;
pub const PF_LOCAL: ::c_int = AF_LOCAL;
pub const PF_INET: ::c_int = AF_INET;
pub const PF_AX25: ::c_int = AF_AX25;
pub const PF_IPX: ::c_int = AF_IPX;
pub const PF_APPLETALK: ::c_int = AF_APPLETALK;
pub const PF_NETROM: ::c_int = AF_NETROM;
pub const PF_BRIDGE: ::c_int = AF_BRIDGE;
pub const PF_ATMPVC: ::c_int = AF_ATMPVC;
pub const PF_X25: ::c_int = AF_X25;
pub const PF_INET6: ::c_int = AF_INET6;
pub const PF_ROSE: ::c_int = AF_ROSE;
pub const PF_DECnet: ::c_int = AF_DECnet;
pub const PF_NETBEUI: ::c_int = AF_NETBEUI;
pub const PF_SECURITY: ::c_int = AF_SECURITY;
pub const PF_KEY: ::c_int = AF_KEY;
pub const PF_NETLINK: ::c_int = AF_NETLINK;
pub const PF_ROUTE: ::c_int = AF_ROUTE;
pub const PF_PACKET: ::c_int = AF_PACKET;
pub const PF_ASH: ::c_int = AF_ASH;
pub const PF_ECONET: ::c_int = AF_ECONET;
pub const PF_ATMSVC: ::c_int = AF_ATMSVC;
pub const PF_RDS: ::c_int = AF_RDS;
pub const PF_SNA: ::c_int = AF_SNA;
pub const PF_IRDA: ::c_int = AF_IRDA;
pub const PF_PPPOX: ::c_int = AF_PPPOX;
pub const PF_WANPIPE: ::c_int = AF_WANPIPE;
pub const PF_LLC: ::c_int = AF_LLC;
pub const PF_CAN: ::c_int = AF_CAN;
pub const PF_TIPC: ::c_int = AF_TIPC;
pub const PF_BLUETOOTH: ::c_int = AF_BLUETOOTH;
pub const PF_IUCV: ::c_int = AF_IUCV;
pub const PF_RXRPC: ::c_int = AF_RXRPC;
pub const PF_ISDN: ::c_int = AF_ISDN;
pub const PF_PHONET: ::c_int = AF_PHONET;
pub const PF_IEEE802154: ::c_int = AF_IEEE802154;
pub const PF_CAIF: ::c_int = AF_CAIF;
pub const PF_ALG: ::c_int = AF_ALG;
pub const SOMAXCONN: ::c_int = 128;
pub const MSG_OOB: ::c_int = 1;
pub const MSG_PEEK: ::c_int = 2;
pub const MSG_DONTROUTE: ::c_int = 4;
pub const MSG_CTRUNC: ::c_int = 8;
pub const MSG_TRUNC: ::c_int = 0x20;
pub const MSG_DONTWAIT: ::c_int = 0x40;
pub const MSG_EOR: ::c_int = 0x80;
pub const MSG_WAITALL: ::c_int = 0x100;
pub const MSG_FIN: ::c_int = 0x200;
pub const MSG_SYN: ::c_int = 0x400;
pub const MSG_CONFIRM: ::c_int = 0x800;
pub const MSG_RST: ::c_int = 0x1000;
pub const MSG_ERRQUEUE: ::c_int = 0x2000;
pub const MSG_NOSIGNAL: ::c_int = 0x4000;
pub const MSG_MORE: ::c_int = 0x8000;
pub const MSG_WAITFORONE: ::c_int = 0x10000;
pub const MSG_FASTOPEN: ::c_int = 0x20000000;
pub const MSG_CMSG_CLOEXEC: ::c_int = 0x40000000;
pub const SCM_TIMESTAMP: ::c_int = SO_TIMESTAMP;
pub const SOCK_RAW: ::c_int = 3;
pub const SOCK_RDM: ::c_int = 4;
pub const IP_MULTICAST_IF: ::c_int = 32;
pub const IP_MULTICAST_TTL: ::c_int = 33;
pub const IP_MULTICAST_LOOP: ::c_int = 34;
pub const IP_TOS: ::c_int = 1;
pub const IP_TTL: ::c_int = 2;
pub const IP_HDRINCL: ::c_int = 3;
pub const IP_PKTINFO: ::c_int = 8;
pub const IP_RECVTOS: ::c_int = 13;
pub const IP_RECVERR: ::c_int = 11;
pub const IP_ADD_MEMBERSHIP: ::c_int = 35;
pub const IP_DROP_MEMBERSHIP: ::c_int = 36;
pub const IP_ADD_SOURCE_MEMBERSHIP: ::c_int = 39;
pub const IP_DROP_SOURCE_MEMBERSHIP: ::c_int = 40;
pub const IP_TRANSPARENT: ::c_int = 19;
pub const IPV6_ADDRFORM: ::c_int = 1;
pub const IPV6_2292PKTINFO: ::c_int = 2;
pub const IPV6_2292HOPOPTS: ::c_int = 3;
pub const IPV6_2292DSTOPTS: ::c_int = 4;
pub const IPV6_2292RTHDR: ::c_int = 5;
pub const IPV6_2292PKTOPTIONS: ::c_int = 6;
pub const IPV6_CHECKSUM: ::c_int = 7;
pub const IPV6_2292HOPLIMIT: ::c_int = 8;
pub const IPV6_NEXTHOP: ::c_int = 9;
pub const IPV6_FLOWINFO: ::c_int = 11;
pub const IPV6_UNICAST_HOPS: ::c_int = 16;
pub const IPV6_MULTICAST_IF: ::c_int = 17;
pub const IPV6_MULTICAST_HOPS: ::c_int = 18;
pub const IPV6_MULTICAST_LOOP: ::c_int = 19;
pub const IPV6_ADD_MEMBERSHIP: ::c_int = 20;
pub const IPV6_DROP_MEMBERSHIP: ::c_int = 21;
pub const IPV6_ROUTER_ALERT: ::c_int = 22;
pub const IPV6_MTU_DISCOVER: ::c_int = 23;
pub const IPV6_MTU: ::c_int = 24;
pub const IPV6_RECVERR: ::c_int = 25;
pub const IPV6_V6ONLY: ::c_int = 26;
pub const IPV6_JOIN_ANYCAST: ::c_int = 27;
pub const IPV6_LEAVE_ANYCAST: ::c_int = 28;
pub const IPV6_RECVPKTINFO: ::c_int = 49;
pub const IPV6_PKTINFO: ::c_int = 50;
pub const IPV6_RECVTCLASS: ::c_int = 66;
pub const IPV6_TCLASS: ::c_int = 67;
pub const TCP_NODELAY: ::c_int = 1;
pub const TCP_MAXSEG: ::c_int = 2;
pub const TCP_CORK: ::c_int = 3;
pub const TCP_KEEPIDLE: ::c_int = 4;
pub const TCP_KEEPINTVL: ::c_int = 5;
pub const TCP_KEEPCNT: ::c_int = 6;
pub const TCP_SYNCNT: ::c_int = 7;
pub const TCP_LINGER2: ::c_int = 8;
pub const TCP_DEFER_ACCEPT: ::c_int = 9;
pub const TCP_WINDOW_CLAMP: ::c_int = 10;
pub const TCP_INFO: ::c_int = 11;
pub const TCP_QUICKACK: ::c_int = 12;
pub const TCP_CONGESTION: ::c_int = 13;
pub const SO_DEBUG: ::c_int = 1;
pub const SHUT_RD: ::c_int = 0;
pub const SHUT_WR: ::c_int = 1;
pub const SHUT_RDWR: ::c_int = 2;
pub const LOCK_SH: ::c_int = 1;
pub const LOCK_EX: ::c_int = 2;
pub const LOCK_NB: ::c_int = 4;
pub const LOCK_UN: ::c_int = 8;
pub const SS_ONSTACK: ::c_int = 1;
pub const SS_DISABLE: ::c_int = 2;
pub const PATH_MAX: ::c_int = 4096;
pub const FD_SETSIZE: usize = 1024;
pub const EPOLLIN: ::c_int = 0x1;
pub const EPOLLPRI: ::c_int = 0x2;
pub const EPOLLOUT: ::c_int = 0x4;
pub const EPOLLRDNORM: ::c_int = 0x40;
pub const EPOLLRDBAND: ::c_int = 0x80;
pub const EPOLLWRNORM: ::c_int = 0x100;
pub const EPOLLWRBAND: ::c_int = 0x200;
pub const EPOLLMSG: ::c_int = 0x400;
pub const EPOLLERR: ::c_int = 0x8;
pub const EPOLLHUP: ::c_int = 0x10;
pub const EPOLLET: ::c_int = 0x80000000;
pub const EPOLL_CTL_ADD: ::c_int = 1;
pub const EPOLL_CTL_MOD: ::c_int = 3;
pub const EPOLL_CTL_DEL: ::c_int = 2;
pub const MNT_DETACH: ::c_int = 0x2;
pub const MNT_EXPIRE: ::c_int = 0x4;
pub const Q_GETFMT: ::c_int = 0x800004;
pub const Q_GETINFO: ::c_int = 0x800005;
pub const Q_SETINFO: ::c_int = 0x800006;
pub const QIF_BLIMITS: u32 = 1;
pub const QIF_SPACE: u32 = 2;
pub const QIF_ILIMITS: u32 = 4;
pub const QIF_INODES: u32 = 8;
pub const QIF_BTIME: u32 = 16;
pub const QIF_ITIME: u32 = 32;
pub const QIF_LIMITS: u32 = 5;
pub const QIF_USAGE: u32 = 10;
pub const QIF_TIMES: u32 = 48;
pub const QIF_ALL: u32 = 63;
pub const MNT_FORCE: ::c_int = 0x1;
pub const Q_SYNC: ::c_int = 0x800001;
pub const Q_QUOTAON: ::c_int = 0x800002;
pub const Q_QUOTAOFF: ::c_int = 0x800003;
pub const Q_GETQUOTA: ::c_int = 0x800007;
pub const Q_SETQUOTA: ::c_int = 0x800008;
pub const TCIOFF: ::c_int = 2;
pub const TCION: ::c_int = 3;
pub const TCOOFF: ::c_int = 0;
pub const TCOON: ::c_int = 1;
pub const TCIFLUSH: ::c_int = 0;
pub const TCOFLUSH: ::c_int = 1;
pub const TCIOFLUSH: ::c_int = 2;
pub const NL0: ::tcflag_t = 0x00000000;
pub const NL1: ::tcflag_t = 0x00000100;
pub const TAB0: ::tcflag_t = 0x00000000;
pub const CR0: ::tcflag_t = 0x00000000;
pub const FF0: ::tcflag_t = 0x00000000;
pub const BS0: ::tcflag_t = 0x00000000;
pub const VT0: ::tcflag_t = 0x00000000;
pub const VERASE: usize = 2;
pub const VKILL: usize = 3;
pub const VINTR: usize = 0;
pub const VQUIT: usize = 1;
pub const VLNEXT: usize = 15;
pub const IGNBRK: ::tcflag_t = 0x00000001;
pub const BRKINT: ::tcflag_t = 0x00000002;
pub const IGNPAR: ::tcflag_t = 0x00000004;
pub const PARMRK: ::tcflag_t = 0x00000008;
pub const INPCK: ::tcflag_t = 0x00000010;
pub const ISTRIP: ::tcflag_t = 0x00000020;
pub const INLCR: ::tcflag_t = 0x00000040;
pub const IGNCR: ::tcflag_t = 0x00000080;
pub const ICRNL: ::tcflag_t = 0x00000100;
pub const IXANY: ::tcflag_t = 0x00000800;
pub const IMAXBEL: ::tcflag_t = 0x00002000;
pub const OPOST: ::tcflag_t = 0x1;
pub const CS5: ::tcflag_t = 0x00000000;
pub const CRTSCTS: ::tcflag_t = 0x80000000;
pub const ECHO: ::tcflag_t = 0x00000008;
pub const OCRNL: ::tcflag_t = 0o000010;
pub const ONOCR: ::tcflag_t = 0o000020;
pub const ONLRET: ::tcflag_t = 0o000040;
pub const OFILL: ::tcflag_t = 0o000100;
pub const OFDEL: ::tcflag_t = 0o000200;
pub const CLONE_VM: ::c_int = 0x100;
pub const CLONE_FS: ::c_int = 0x200;
pub const CLONE_FILES: ::c_int = 0x400;
pub const CLONE_SIGHAND: ::c_int = 0x800;
pub const CLONE_PTRACE: ::c_int = 0x2000;
pub const CLONE_VFORK: ::c_int = 0x4000;
pub const CLONE_PARENT: ::c_int = 0x8000;
pub const CLONE_THREAD: ::c_int = 0x10000;
pub const CLONE_NEWNS: ::c_int = 0x20000;
pub const CLONE_SYSVSEM: ::c_int = 0x40000;
pub const CLONE_SETTLS: ::c_int = 0x80000;
pub const CLONE_PARENT_SETTID: ::c_int = 0x100000;
pub const CLONE_CHILD_CLEARTID: ::c_int = 0x200000;
pub const CLONE_DETACHED: ::c_int = 0x400000;
pub const CLONE_UNTRACED: ::c_int = 0x800000;
pub const CLONE_CHILD_SETTID: ::c_int = 0x01000000;
pub const CLONE_NEWUTS: ::c_int = 0x04000000;
pub const CLONE_NEWIPC: ::c_int = 0x08000000;
pub const CLONE_NEWUSER: ::c_int = 0x10000000;
pub const CLONE_NEWPID: ::c_int = 0x20000000;
pub const CLONE_NEWNET: ::c_int = 0x40000000;
pub const CLONE_IO: ::c_int = 0x80000000;
pub const CLONE_NEWCGROUP: ::c_int = 0x02000000;
pub const WNOHANG: ::c_int = 0x00000001;
pub const WUNTRACED: ::c_int = 0x00000002;
pub const WSTOPPED: ::c_int = WUNTRACED;
pub const WEXITED: ::c_int = 0x00000004;
pub const WCONTINUED: ::c_int = 0x00000008;
pub const WNOWAIT: ::c_int = 0x01000000;
// Options set using PTRACE_SETOPTIONS.
pub const PTRACE_O_TRACESYSGOOD: ::c_int = 0x00000001;
pub const PTRACE_O_TRACEFORK: ::c_int = 0x00000002;
pub const PTRACE_O_TRACEVFORK: ::c_int = 0x00000004;
pub const PTRACE_O_TRACECLONE: ::c_int = 0x00000008;
pub const PTRACE_O_TRACEEXEC: ::c_int = 0x00000010;
pub const PTRACE_O_TRACEVFORKDONE: ::c_int = 0x00000020;
pub const PTRACE_O_TRACEEXIT: ::c_int = 0x00000040;
pub const PTRACE_O_TRACESECCOMP: ::c_int = 0x00000080;
pub const PTRACE_O_EXITKILL: ::c_int = 0x00100000;
pub const PTRACE_O_SUSPEND_SECCOMP: ::c_int = 0x00200000;
pub const PTRACE_O_MASK: ::c_int = 0x003000ff;
// Wait extended result codes for the above trace options.
pub const PTRACE_EVENT_FORK: ::c_int = 1;
pub const PTRACE_EVENT_VFORK: ::c_int = 2;
pub const PTRACE_EVENT_CLONE: ::c_int = 3;
pub const PTRACE_EVENT_EXEC: ::c_int = 4;
pub const PTRACE_EVENT_VFORK_DONE: ::c_int = 5;
pub const PTRACE_EVENT_EXIT: ::c_int = 6;
pub const PTRACE_EVENT_SECCOMP: ::c_int = 7;
// PTRACE_EVENT_STOP was added to glibc in 2.26
// pub const PTRACE_EVENT_STOP: ::c_int = 128;
pub const __WNOTHREAD: ::c_int = 0x20000000;
pub const __WALL: ::c_int = 0x40000000;
pub const __WCLONE: ::c_int = 0x80000000;
pub const SPLICE_F_MOVE: ::c_uint = 0x01;
pub const SPLICE_F_NONBLOCK: ::c_uint = 0x02;
pub const SPLICE_F_MORE: ::c_uint = 0x04;
pub const SPLICE_F_GIFT: ::c_uint = 0x08;
pub const RTLD_LOCAL: ::c_int = 0;
pub const RTLD_LAZY: ::c_int = 1;
pub const POSIX_FADV_NORMAL: ::c_int = 0;
pub const POSIX_FADV_RANDOM: ::c_int = 1;
pub const POSIX_FADV_SEQUENTIAL: ::c_int = 2;
pub const POSIX_FADV_WILLNEED: ::c_int = 3;
pub const AT_FDCWD: ::c_int = -100;
pub const AT_SYMLINK_NOFOLLOW: ::c_int = 0x100;
pub const AT_REMOVEDIR: ::c_int = 0x200;
pub const AT_SYMLINK_FOLLOW: ::c_int = 0x400;
pub const AT_NO_AUTOMOUNT: ::c_int = 0x800;
pub const AT_EMPTY_PATH: ::c_int = 0x1000;
pub const LOG_CRON: ::c_int = 9 << 3;
pub const LOG_AUTHPRIV: ::c_int = 10 << 3;
pub const LOG_FTP: ::c_int = 11 << 3;
pub const LOG_PERROR: ::c_int = 0x20;
pub const PIPE_BUF: usize = 4096;
pub const SI_LOAD_SHIFT: ::c_uint = 16;
pub const SIGEV_SIGNAL: ::c_int = 0;
pub const SIGEV_NONE: ::c_int = 1;
pub const SIGEV_THREAD: ::c_int = 2;
pub const P_ALL: idtype_t = 0;
pub const P_PID: idtype_t = 1;
pub const P_PGID: idtype_t = 2;
pub const UTIME_OMIT: c_long = 1073741822;
pub const UTIME_NOW: c_long = 1073741823;
pub const POLLIN: ::c_short = 0x1;
pub const POLLPRI: ::c_short = 0x2;
pub const POLLOUT: ::c_short = 0x4;
pub const POLLERR: ::c_short = 0x8;
pub const POLLHUP: ::c_short = 0x10;
pub const POLLNVAL: ::c_short = 0x20;
pub const POLLRDNORM: ::c_short = 0x040;
pub const POLLRDBAND: ::c_short = 0x080;
pub const IPTOS_LOWDELAY: u8 = 0x10;
pub const IPTOS_THROUGHPUT: u8 = 0x08;
pub const IPTOS_RELIABILITY: u8 = 0x04;
pub const IPTOS_MINCOST: u8 = 0x02;
pub const IPTOS_PREC_NETCONTROL: u8 = 0xe0;
pub const IPTOS_PREC_INTERNETCONTROL: u8 = 0xc0;
pub const IPTOS_PREC_CRITIC_ECP: u8 = 0xa0;
pub const IPTOS_PREC_FLASHOVERRIDE: u8 = 0x80;
pub const IPTOS_PREC_FLASH: u8 = 0x60;
pub const IPTOS_PREC_IMMEDIATE: u8 = 0x40;
pub const IPTOS_PREC_PRIORITY: u8 = 0x20;
pub const IPTOS_PREC_ROUTINE: u8 = 0x00;
pub const IPTOS_ECN_MASK: u8 = 0x03;
pub const IPTOS_ECN_ECT1: u8 = 0x01;
pub const IPTOS_ECN_ECT0: u8 = 0x02;
pub const IPTOS_ECN_CE: u8 = 0x03;
pub const IPOPT_COPY: u8 = 0x80;
pub const IPOPT_CLASS_MASK: u8 = 0x60;
pub const IPOPT_NUMBER_MASK: u8 = 0x1f;
pub const IPOPT_CONTROL: u8 = 0x00;
pub const IPOPT_RESERVED1: u8 = 0x20;
pub const IPOPT_MEASUREMENT: u8 = 0x40;
pub const IPOPT_RESERVED2: u8 = 0x60;
pub const IPOPT_END: u8 = (0 | IPOPT_CONTROL);
pub const IPOPT_NOOP: u8 = (1 | IPOPT_CONTROL);
pub const IPOPT_SEC: u8 = (2 | IPOPT_CONTROL | IPOPT_COPY);
pub const IPOPT_LSRR: u8 = (3 | IPOPT_CONTROL | IPOPT_COPY);
pub const IPOPT_TIMESTAMP: u8 = (4 | IPOPT_MEASUREMENT);
pub const IPOPT_RR: u8 = (7 | IPOPT_CONTROL);
pub const IPOPT_SID: u8 = (8 | IPOPT_CONTROL | IPOPT_COPY);
pub const IPOPT_SSRR: u8 = (9 | IPOPT_CONTROL | IPOPT_COPY);
pub const IPOPT_RA: u8 = (20 | IPOPT_CONTROL | IPOPT_COPY);
pub const IPVERSION: u8 = 4;
pub const MAXTTL: u8 = 255;
pub const IPDEFTTL: u8 = 64;
pub const IPOPT_OPTVAL: u8 = 0;
pub const IPOPT_OLEN: u8 = 1;
pub const IPOPT_OFFSET: u8 = 2;
pub const IPOPT_MINOFF: u8 = 4;
pub const MAX_IPOPTLEN: u8 = 40;
pub const IPOPT_NOP: u8 = IPOPT_NOOP;
pub const IPOPT_EOL: u8 = IPOPT_END;
pub const IPOPT_TS: u8 = IPOPT_TIMESTAMP;
pub const IPOPT_TS_TSONLY: u8 = 0;
pub const IPOPT_TS_TSANDADDR: u8 = 1;
pub const IPOPT_TS_PRESPEC: u8 = 3;
pub const ARPOP_RREQUEST: u16 = 3;
pub const ARPOP_RREPLY: u16 = 4;
pub const ARPOP_InREQUEST: u16 = 8;
pub const ARPOP_InREPLY: u16 = 9;
pub const ARPOP_NAK: u16 = 10;
pub const ATF_NETMASK: ::c_int = 0x20;
pub const ATF_DONTPUB: ::c_int = 0x40;
pub const ARPHRD_NETROM: u16 = 0;
pub const ARPHRD_ETHER: u16 = 1;
pub const ARPHRD_EETHER: u16 = 2;
pub const ARPHRD_AX25: u16 = 3;
pub const ARPHRD_PRONET: u16 = 4;
pub const ARPHRD_CHAOS: u16 = 5;
pub const ARPHRD_IEEE802: u16 = 6;
pub const ARPHRD_ARCNET: u16 = 7;
pub const ARPHRD_APPLETLK: u16 = 8;
pub const ARPHRD_DLCI: u16 = 15;
pub const ARPHRD_ATM: u16 = 19;
pub const ARPHRD_METRICOM: u16 = 23;
pub const ARPHRD_IEEE1394: u16 = 24;
pub const ARPHRD_EUI64: u16 = 27;
pub const ARPHRD_INFINIBAND: u16 = 32;
pub const ARPHRD_SLIP: u16 = 256;
pub const ARPHRD_CSLIP: u16 = 257;
pub const ARPHRD_SLIP6: u16 = 258;
pub const ARPHRD_CSLIP6: u16 = 259;
pub const ARPHRD_RSRVD: u16 = 260;
pub const ARPHRD_ADAPT: u16 = 264;
pub const ARPHRD_ROSE: u16 = 270;
pub const ARPHRD_X25: u16 = 271;
pub const ARPHRD_HWX25: u16 = 272;
pub const ARPHRD_PPP: u16 = 512;
pub const ARPHRD_CISCO: u16 = 513;
pub const ARPHRD_HDLC: u16 = ARPHRD_CISCO;
pub const ARPHRD_LAPB: u16 = 516;
pub const ARPHRD_DDCMP: u16 = 517;
pub const ARPHRD_RAWHDLC: u16 = 518;
pub const ARPHRD_TUNNEL: u16 = 768;
pub const ARPHRD_TUNNEL6: u16 = 769;
pub const ARPHRD_FRAD: u16 = 770;
pub const ARPHRD_SKIP: u16 = 771;
pub const ARPHRD_LOOPBACK: u16 = 772;
pub const ARPHRD_LOCALTLK: u16 = 773;
pub const ARPHRD_FDDI: u16 = 774;
pub const ARPHRD_BIF: u16 = 775;
pub const ARPHRD_SIT: u16 = 776;
pub const ARPHRD_IPDDP: u16 = 777;
pub const ARPHRD_IPGRE: u16 = 778;
pub const ARPHRD_PIMREG: u16 = 779;
pub const ARPHRD_HIPPI: u16 = 780;
pub const ARPHRD_ASH: u16 = 781;
pub const ARPHRD_ECONET: u16 = 782;
pub const ARPHRD_IRDA: u16 = 783;
pub const ARPHRD_FCPP: u16 = 784;
pub const ARPHRD_FCAL: u16 = 785;
pub const ARPHRD_FCPL: u16 = 786;
pub const ARPHRD_FCFABRIC: u16 = 787;
pub const ARPHRD_IEEE802_TR: u16 = 800;
pub const ARPHRD_IEEE80211: u16 = 801;
pub const ARPHRD_IEEE80211_PRISM: u16 = 802;
pub const ARPHRD_IEEE80211_RADIOTAP: u16 = 803;
pub const ARPHRD_IEEE802154: u16 = 804;
pub const ARPHRD_VOID: u16 = 0xFFFF;
pub const ARPHRD_NONE: u16 = 0xFFFE;
const_fn! {
{const} fn CMSG_ALIGN(len: usize) -> usize {
len + ::mem::size_of::<usize>() - 1 & !(::mem::size_of::<usize>() - 1)
}
}
f! {
pub fn CMSG_FIRSTHDR(mhdr: *const msghdr) -> *mut cmsghdr {
if (*mhdr).msg_controllen as usize >= ::mem::size_of::<cmsghdr>() {
(*mhdr).msg_control as *mut cmsghdr
} else {
0 as *mut cmsghdr
}
}
pub fn CMSG_DATA(cmsg: *const cmsghdr) -> *mut ::c_uchar {
cmsg.offset(1) as *mut ::c_uchar
}
pub {const} fn CMSG_SPACE(length: ::c_uint) -> ::c_uint {
(CMSG_ALIGN(length as usize) + CMSG_ALIGN(::mem::size_of::<cmsghdr>()))
as ::c_uint
}
pub fn CMSG_LEN(length: ::c_uint) -> ::c_uint {
CMSG_ALIGN(::mem::size_of::<cmsghdr>()) as ::c_uint + length
}
pub fn FD_CLR(fd: ::c_int, set: *mut fd_set) -> () {
let fd = fd as usize;
let size = ::mem::size_of_val(&(*set).fds_bits[0]) * 8;
(*set).fds_bits[fd / size] &= !(1 << (fd % size));
return
}
pub fn FD_ISSET(fd: ::c_int, set: *mut fd_set) -> bool {
let fd = fd as usize;
let size = ::mem::size_of_val(&(*set).fds_bits[0]) * 8;
return ((*set).fds_bits[fd / size] & (1 << (fd % size))) != 0
}
pub fn FD_SET(fd: ::c_int, set: *mut fd_set) -> () {
let fd = fd as usize;
let size = ::mem::size_of_val(&(*set).fds_bits[0]) * 8;
(*set).fds_bits[fd / size] |= 1 << (fd % size);
return
}
pub fn FD_ZERO(set: *mut fd_set) -> () {
for slot in (*set).fds_bits.iter_mut() {
*slot = 0;
}
}
pub fn WIFSTOPPED(status: ::c_int) -> bool {
(status & 0xff) == 0x7f
}
| }
pub fn WIFCONTINUED(status: ::c_int) -> bool {
status == 0xffff
}
pub fn WIFSIGNALED(status: ::c_int) -> bool {
((status & 0x7f) + 1) as i8 >= 2
}
pub fn WTERMSIG(status: ::c_int) -> ::c_int {
status & 0x7f
}
pub fn WIFEXITED(status: ::c_int) -> bool {
(status & 0x7f) == 0
}
pub fn WEXITSTATUS(status: ::c_int) -> ::c_int {
(status >> 8) & 0xff
}
pub fn WCOREDUMP(status: ::c_int) -> bool {
(status & 0x80) != 0
}
pub fn QCMD(cmd: ::c_int, type_: ::c_int) -> ::c_int {
(cmd << 8) | (type_ & 0x00ff)
}
pub fn IPOPT_COPIED(o: u8) -> u8 {
o & IPOPT_COPY
}
pub fn IPOPT_CLASS(o: u8) -> u8 {
o & IPOPT_CLASS_MASK
}
pub fn IPOPT_NUMBER(o: u8) -> u8 {
o & IPOPT_NUMBER_MASK
}
pub fn IPTOS_ECN(x: u8) -> u8 {
x & ::IPTOS_ECN_MASK
}
}
extern "C" {
pub fn sem_destroy(sem: *mut sem_t) -> ::c_int;
pub fn sem_init(
sem: *mut sem_t,
pshared: ::c_int,
value: ::c_uint,
) -> ::c_int;
pub fn fdatasync(fd: ::c_int) -> ::c_int;
pub fn mincore(
addr: *mut ::c_void,
len: ::size_t,
vec: *mut ::c_uchar,
) -> ::c_int;
pub fn clock_getres(clk_id: ::clockid_t, tp: *mut ::timespec) -> ::c_int;
pub fn clock_gettime(clk_id: ::clockid_t, tp: *mut ::timespec) -> ::c_int;
pub fn clock_settime(
clk_id: ::clockid_t,
tp: *const ::timespec,
) -> ::c_int;
pub fn dirfd(dirp: *mut ::DIR) -> ::c_int;
pub fn pthread_getattr_np(
native: ::pthread_t,
attr: *mut ::pthread_attr_t,
) -> ::c_int;
pub fn pthread_attr_getstack(
attr: *const ::pthread_attr_t,
stackaddr: *mut *mut ::c_void,
stacksize: *mut ::size_t,
) -> ::c_int;
pub fn memalign(align: ::size_t, size: ::size_t) -> *mut ::c_void;
pub fn setgroups(ngroups: ::size_t, ptr: *const ::gid_t) -> ::c_int;
pub fn pipe2(fds: *mut ::c_int, flags: ::c_int) -> ::c_int;
pub fn statfs(path: *const ::c_char, buf: *mut statfs) -> ::c_int;
pub fn statfs64(path: *const ::c_char, buf: *mut statfs64) -> ::c_int;
pub fn fstatfs(fd: ::c_int, buf: *mut statfs) -> ::c_int;
pub fn fstatfs64(fd: ::c_int, buf: *mut statfs64) -> ::c_int;
pub fn statvfs64(path: *const ::c_char, buf: *mut statvfs64) -> ::c_int;
pub fn fstatvfs64(fd: ::c_int, buf: *mut statvfs64) -> ::c_int;
pub fn memrchr(
cx: *const ::c_void,
c: ::c_int,
n: ::size_t,
) -> *mut ::c_void;
pub fn posix_fadvise(
fd: ::c_int,
offset: ::off_t,
len: ::off_t,
advise: ::c_int,
) -> ::c_int;
pub fn futimens(fd: ::c_int, times: *const ::timespec) -> ::c_int;
pub fn utimensat(
dirfd: ::c_int,
path: *const ::c_char,
times: *const ::timespec,
flag: ::c_int,
) -> ::c_int;
pub fn duplocale(base: ::locale_t) -> ::locale_t;
pub fn freelocale(loc: ::locale_t);
pub fn newlocale(
mask: ::c_int,
locale: *const ::c_char,
base: ::locale_t,
) -> ::locale_t;
pub fn uselocale(loc: ::locale_t) -> ::locale_t;
pub fn creat64(path: *const c_char, mode: mode_t) -> ::c_int;
pub fn fstat64(fildes: ::c_int, buf: *mut stat64) -> ::c_int;
pub fn fstatat64(
dirfd: ::c_int,
pathname: *const c_char,
buf: *mut stat64,
flags: ::c_int,
) -> ::c_int;
pub fn ftruncate64(fd: ::c_int, length: off64_t) -> ::c_int;
pub fn lseek64(fd: ::c_int, offset: off64_t, whence: ::c_int) -> off64_t;
pub fn lstat64(path: *const c_char, buf: *mut stat64) -> ::c_int;
pub fn mmap64(
addr: *mut ::c_void,
len: ::size_t,
prot: ::c_int,
flags: ::c_int,
fd: ::c_int,
offset: off64_t,
) -> *mut ::c_void;
pub fn open64(path: *const c_char, oflag: ::c_int, ...) -> ::c_int;
pub fn openat64(
fd: ::c_int,
path: *const c_char,
oflag: ::c_int,
...
) -> ::c_int;
pub fn pread64(
fd: ::c_int,
buf: *mut ::c_void,
count: ::size_t,
offset: off64_t,
) -> ::ssize_t;
pub fn preadv64(
fd: ::c_int,
iov: *const ::iovec,
iovcnt: ::c_int,
offset: ::off64_t,
) -> ::ssize_t;
pub fn pwrite64(
fd: ::c_int,
buf: *const ::c_void,
count: ::size_t,
offset: off64_t,
) -> ::ssize_t;
pub fn pwritev64(
fd: ::c_int,
iov: *const ::iovec,
iovcnt: ::c_int,
offset: ::off64_t,
) -> ::ssize_t;
pub fn readdir64(dirp: *mut ::DIR) -> *mut ::dirent64;
pub fn readdir64_r(
dirp: *mut ::DIR,
entry: *mut ::dirent64,
result: *mut *mut ::dirent64,
) -> ::c_int;
pub fn stat64(path: *const c_char, buf: *mut stat64) -> ::c_int;
pub fn truncate64(path: *const c_char, length: off64_t) -> ::c_int;
pub fn mknodat(
dirfd: ::c_int,
pathname: *const ::c_char,
mode: ::mode_t,
dev: dev_t,
) -> ::c_int;
pub fn pthread_condattr_getclock(
attr: *const pthread_condattr_t,
clock_id: *mut clockid_t,
) -> ::c_int;
pub fn pthread_condattr_setclock(
attr: *mut pthread_condattr_t,
clock_id: ::clockid_t,
) -> ::c_int;
pub fn pthread_condattr_setpshared(
attr: *mut pthread_condattr_t,
pshared: ::c_int,
) -> ::c_int;
pub fn accept4(
fd: ::c_int,
addr: *mut ::sockaddr,
len: *mut ::socklen_t,
flg: ::c_int,
) -> ::c_int;
pub fn pthread_mutexattr_setpshared(
attr: *mut pthread_mutexattr_t,
pshared: ::c_int,
) -> ::c_int;
pub fn pthread_rwlockattr_getpshared(
attr: *const pthread_rwlockattr_t,
val: *mut ::c_int,
) -> ::c_int;
pub fn pthread_rwlockattr_setpshared(
attr: *mut pthread_rwlockattr_t,
val: ::c_int,
) -> ::c_int;
pub fn ptsname_r(
fd: ::c_int,
buf: *mut ::c_char,
buflen: ::size_t,
) -> ::c_int;
pub fn clearenv() -> ::c_int;
pub fn waitid(
idtype: idtype_t,
id: id_t,
infop: *mut ::siginfo_t,
options: ::c_int,
) -> ::c_int;
pub fn setreuid(ruid: ::uid_t, euid: ::uid_t) -> ::c_int;
pub fn setregid(rgid: ::gid_t, egid: ::gid_t) -> ::c_int;
pub fn getresuid(
ruid: *mut ::uid_t,
euid: *mut ::uid_t,
suid: *mut ::uid_t,
) -> ::c_int;
pub fn getresgid(
rgid: *mut ::gid_t,
egid: *mut ::gid_t,
sgid: *mut ::gid_t,
) -> ::c_int;
pub fn acct(filename: *const ::c_char) -> ::c_int;
pub fn brk(addr: *mut ::c_void) -> ::c_int;
pub fn sbrk(increment: ::intptr_t) -> *mut ::c_void;
#[deprecated(
since = "0.2.66",
note = "causes memory corruption, see rust-lang/libc#1596"
)]
pub fn vfork() -> ::pid_t;
pub fn setresgid(rgid: ::gid_t, egid: ::gid_t, sgid: ::gid_t) -> ::c_int;
pub fn setresuid(ruid: ::uid_t, euid: ::uid_t, suid: ::uid_t) -> ::c_int;
pub fn wait4(
pid: ::pid_t,
status: *mut ::c_int,
options: ::c_int,
rusage: *mut ::rusage,
) -> ::pid_t;
pub fn openpty(
amaster: *mut ::c_int,
aslave: *mut ::c_int,
name: *mut ::c_char,
termp: *const termios,
winp: *const ::winsize,
) -> ::c_int;
pub fn forkpty(
amaster: *mut ::c_int,
name: *mut ::c_char,
termp: *const termios,
winp: *const ::winsize,
) -> ::pid_t;
pub fn login_tty(fd: ::c_int) -> ::c_int;
pub fn execvpe(
file: *const ::c_char,
argv: *const *const ::c_char,
envp: *const *const ::c_char,
) -> ::c_int;
pub fn fexecve(
fd: ::c_int,
argv: *const *const ::c_char,
envp: *const *const ::c_char,
) -> ::c_int;
pub fn getifaddrs(ifap: *mut *mut ::ifaddrs) -> ::c_int;
pub fn freeifaddrs(ifa: *mut ::ifaddrs);
pub fn bind(
socket: ::c_int,
address: *const ::sockaddr,
address_len: ::socklen_t,
) -> ::c_int;
pub fn writev(
fd: ::c_int,
iov: *const ::iovec,
iovcnt: ::c_int,
) -> ::ssize_t;
pub fn readv(
fd: ::c_int,
iov: *const ::iovec,
iovcnt: ::c_int,
) -> ::ssize_t;
pub fn sendmsg(
fd: ::c_int,
msg: *const ::msghdr,
flags: ::c_int,
) -> ::ssize_t;
pub fn recvmsg(
fd: ::c_int,
msg: *mut ::msghdr,
flags: ::c_int,
) -> ::ssize_t;
pub fn uname(buf: *mut ::utsname) -> ::c_int;
}
cfg_if! {
if #[cfg(target_os = "emscripten")] {
mod emscripten;
pub use self::emscripten::*;
} else if #[cfg(target_os = "linux")] {
mod linux;
pub use self::linux::*;
} else if #[cfg(target_os = "android")] {
mod android;
pub use self::android::*;
} else {
// Unknown target_os
}
} | pub fn WSTOPSIG(status: ::c_int) -> ::c_int {
(status >> 8) & 0xff |
mod.rs | #[cfg(feature = "UI_Xaml_Automation")]
pub mod Automation;
#[cfg(feature = "UI_Xaml_Controls")]
pub mod Controls;
#[cfg(feature = "UI_Xaml_Core")]
pub mod Core;
#[cfg(feature = "UI_Xaml_Data")]
pub mod Data;
#[cfg(feature = "UI_Xaml_Documents")]
pub mod Documents;
#[cfg(feature = "UI_Xaml_Hosting")]
pub mod Hosting;
#[cfg(feature = "UI_Xaml_Input")]
pub mod Input;
#[cfg(feature = "UI_Xaml_Interop")]
pub mod Interop;
#[cfg(feature = "UI_Xaml_Markup")]
pub mod Markup;
#[cfg(feature = "UI_Xaml_Media")]
pub mod Media;
#[cfg(feature = "UI_Xaml_Navigation")]
pub mod Navigation;
#[cfg(feature = "UI_Xaml_Printing")]
pub mod Printing;
#[cfg(feature = "UI_Xaml_Resources")]
pub mod Resources;
#[cfg(feature = "UI_Xaml_Shapes")]
pub mod Shapes;
pub type AdaptiveTrigger = *mut ::core::ffi::c_void;
pub type Application = *mut ::core::ffi::c_void;
#[doc = "*Required features: `\"UI_Xaml\"`*"]
#[repr(transparent)]
pub struct ApplicationHighContrastAdjustment(pub u32);
impl ApplicationHighContrastAdjustment {
pub const None: Self = Self(0u32);
pub const Auto: Self = Self(4294967295u32);
}
impl ::core::marker::Copy for ApplicationHighContrastAdjustment {}
impl ::core::clone::Clone for ApplicationHighContrastAdjustment {
fn clone(&self) -> Self {
*self
}
}
pub type ApplicationInitializationCallback = *mut ::core::ffi::c_void;
pub type ApplicationInitializationCallbackParams = *mut ::core::ffi::c_void;
#[doc = "*Required features: `\"UI_Xaml\"`*"]
#[repr(transparent)]
pub struct ApplicationRequiresPointerMode(pub i32);
impl ApplicationRequiresPointerMode {
pub const Auto: Self = Self(0i32);
pub const WhenRequested: Self = Self(1i32);
}
impl ::core::marker::Copy for ApplicationRequiresPointerMode {}
impl ::core::clone::Clone for ApplicationRequiresPointerMode {
fn clone(&self) -> Self {
*self
}
}
#[doc = "*Required features: `\"UI_Xaml\"`*"]
#[repr(transparent)]
pub struct ApplicationTheme(pub i32);
impl ApplicationTheme {
pub const Light: Self = Self(0i32);
pub const Dark: Self = Self(1i32);
}
impl ::core::marker::Copy for ApplicationTheme {}
impl ::core::clone::Clone for ApplicationTheme {
fn clone(&self) -> Self {
*self
}
}
#[doc = "*Required features: `\"UI_Xaml\"`*"]
#[repr(transparent)]
pub struct AutomationTextAttributesEnum(pub i32);
impl AutomationTextAttributesEnum {
pub const AnimationStyleAttribute: Self = Self(40000i32);
pub const BackgroundColorAttribute: Self = Self(40001i32);
pub const BulletStyleAttribute: Self = Self(40002i32);
pub const CapStyleAttribute: Self = Self(40003i32);
pub const CultureAttribute: Self = Self(40004i32);
pub const FontNameAttribute: Self = Self(40005i32);
pub const FontSizeAttribute: Self = Self(40006i32);
pub const FontWeightAttribute: Self = Self(40007i32);
pub const ForegroundColorAttribute: Self = Self(40008i32);
pub const HorizontalTextAlignmentAttribute: Self = Self(40009i32);
pub const IndentationFirstLineAttribute: Self = Self(40010i32);
pub const IndentationLeadingAttribute: Self = Self(40011i32);
pub const IndentationTrailingAttribute: Self = Self(40012i32);
pub const IsHiddenAttribute: Self = Self(40013i32);
pub const IsItalicAttribute: Self = Self(40014i32);
pub const IsReadOnlyAttribute: Self = Self(40015i32);
pub const IsSubscriptAttribute: Self = Self(40016i32);
pub const IsSuperscriptAttribute: Self = Self(40017i32);
pub const MarginBottomAttribute: Self = Self(40018i32);
pub const MarginLeadingAttribute: Self = Self(40019i32);
pub const MarginTopAttribute: Self = Self(40020i32);
pub const MarginTrailingAttribute: Self = Self(40021i32);
pub const OutlineStylesAttribute: Self = Self(40022i32);
pub const OverlineColorAttribute: Self = Self(40023i32);
pub const OverlineStyleAttribute: Self = Self(40024i32);
pub const StrikethroughColorAttribute: Self = Self(40025i32);
pub const StrikethroughStyleAttribute: Self = Self(40026i32);
pub const TabsAttribute: Self = Self(40027i32);
pub const TextFlowDirectionsAttribute: Self = Self(40028i32);
pub const UnderlineColorAttribute: Self = Self(40029i32);
pub const UnderlineStyleAttribute: Self = Self(40030i32);
pub const AnnotationTypesAttribute: Self = Self(40031i32);
pub const AnnotationObjectsAttribute: Self = Self(40032i32);
pub const StyleNameAttribute: Self = Self(40033i32);
pub const StyleIdAttribute: Self = Self(40034i32);
pub const LinkAttribute: Self = Self(40035i32);
pub const IsActiveAttribute: Self = Self(40036i32);
pub const SelectionActiveEndAttribute: Self = Self(40037i32);
pub const CaretPositionAttribute: Self = Self(40038i32);
pub const CaretBidiModeAttribute: Self = Self(40039i32);
}
impl ::core::marker::Copy for AutomationTextAttributesEnum {}
impl ::core::clone::Clone for AutomationTextAttributesEnum {
fn clone(&self) -> Self {
*self
}
}
pub type BindingFailedEventArgs = *mut ::core::ffi::c_void;
pub type BindingFailedEventHandler = *mut ::core::ffi::c_void;
pub type BringIntoViewOptions = *mut ::core::ffi::c_void;
pub type BringIntoViewRequestedEventArgs = *mut ::core::ffi::c_void;
pub type BrushTransition = *mut ::core::ffi::c_void;
pub type ColorPaletteResources = *mut ::core::ffi::c_void;
#[repr(C)]
#[doc = "*Required features: `\"UI_Xaml\"`*"]
pub struct CornerRadius {
pub TopLeft: f64,
pub TopRight: f64,
pub BottomRight: f64,
pub BottomLeft: f64,
}
impl ::core::marker::Copy for CornerRadius {}
impl ::core::clone::Clone for CornerRadius {
fn clone(&self) -> Self {
*self
}
}
pub type CornerRadiusHelper = *mut ::core::ffi::c_void;
pub type CreateDefaultValueCallback = *mut ::core::ffi::c_void;
pub type DataContextChangedEventArgs = *mut ::core::ffi::c_void;
pub type DataTemplate = *mut ::core::ffi::c_void;
pub type DataTemplateKey = *mut ::core::ffi::c_void;
pub type DebugSettings = *mut ::core::ffi::c_void;
pub type DependencyObject = *mut ::core::ffi::c_void;
pub type DependencyObjectCollection = *mut ::core::ffi::c_void;
pub type DependencyProperty = *mut ::core::ffi::c_void;
pub type DependencyPropertyChangedCallback = *mut ::core::ffi::c_void;
pub type DependencyPropertyChangedEventArgs = *mut ::core::ffi::c_void;
pub type DependencyPropertyChangedEventHandler = *mut ::core::ffi::c_void;
pub type DispatcherTimer = *mut ::core::ffi::c_void;
pub type DragEventArgs = *mut ::core::ffi::c_void;
pub type DragEventHandler = *mut ::core::ffi::c_void;
pub type DragOperationDeferral = *mut ::core::ffi::c_void;
pub type DragStartingEventArgs = *mut ::core::ffi::c_void;
pub type DragUI = *mut ::core::ffi::c_void;
pub type DragUIOverride = *mut ::core::ffi::c_void;
pub type DropCompletedEventArgs = *mut ::core::ffi::c_void;
#[repr(C)]
#[doc = "*Required features: `\"UI_Xaml\"`, `\"Foundation\"`*"]
#[cfg(feature = "Foundation")]
pub struct Duration {
pub TimeSpan: super::super::Foundation::TimeSpan,
pub Type: DurationType,
}
#[cfg(feature = "Foundation")]
impl ::core::marker::Copy for Duration {}
#[cfg(feature = "Foundation")]
impl ::core::clone::Clone for Duration {
fn clone(&self) -> Self {
*self
}
}
pub type DurationHelper = *mut ::core::ffi::c_void;
#[doc = "*Required features: `\"UI_Xaml\"`*"]
#[repr(transparent)]
pub struct DurationType(pub i32);
impl DurationType {
pub const Automatic: Self = Self(0i32);
pub const TimeSpan: Self = Self(1i32);
pub const Forever: Self = Self(2i32);
}
impl ::core::marker::Copy for DurationType {}
impl ::core::clone::Clone for DurationType {
fn clone(&self) -> Self {
*self
}
}
pub type EffectiveViewportChangedEventArgs = *mut ::core::ffi::c_void;
pub type ElementFactoryGetArgs = *mut ::core::ffi::c_void;
pub type ElementFactoryRecycleArgs = *mut ::core::ffi::c_void;
#[doc = "*Required features: `\"UI_Xaml\"`*"]
#[repr(transparent)]
pub struct ElementHighContrastAdjustment(pub u32);
impl ElementHighContrastAdjustment {
pub const None: Self = Self(0u32);
pub const Application: Self = Self(2147483648u32);
pub const Auto: Self = Self(4294967295u32);
}
impl ::core::marker::Copy for ElementHighContrastAdjustment {}
impl ::core::clone::Clone for ElementHighContrastAdjustment {
fn clone(&self) -> Self {
*self
}
}
#[doc = "*Required features: `\"UI_Xaml\"`*"]
#[repr(transparent)]
pub struct ElementSoundKind(pub i32);
impl ElementSoundKind {
pub const Focus: Self = Self(0i32);
pub const Invoke: Self = Self(1i32);
pub const Show: Self = Self(2i32);
pub const Hide: Self = Self(3i32);
pub const MovePrevious: Self = Self(4i32);
pub const MoveNext: Self = Self(5i32);
pub const GoBack: Self = Self(6i32);
}
impl ::core::marker::Copy for ElementSoundKind {}
impl ::core::clone::Clone for ElementSoundKind {
fn clone(&self) -> Self {
*self
}
}
#[doc = "*Required features: `\"UI_Xaml\"`*"]
#[repr(transparent)]
pub struct ElementSoundMode(pub i32);
impl ElementSoundMode {
pub const Default: Self = Self(0i32);
pub const FocusOnly: Self = Self(1i32);
pub const Off: Self = Self(2i32);
}
impl ::core::marker::Copy for ElementSoundMode {}
impl ::core::clone::Clone for ElementSoundMode {
fn clone(&self) -> Self {
*self
}
}
pub type ElementSoundPlayer = *mut ::core::ffi::c_void;
#[doc = "*Required features: `\"UI_Xaml\"`*"]
#[repr(transparent)]
pub struct ElementSoundPlayerState(pub i32);
impl ElementSoundPlayerState {
pub const Auto: Self = Self(0i32);
pub const Off: Self = Self(1i32);
pub const On: Self = Self(2i32);
}
impl ::core::marker::Copy for ElementSoundPlayerState {}
impl ::core::clone::Clone for ElementSoundPlayerState {
fn clone(&self) -> Self {
*self
}
}
#[doc = "*Required features: `\"UI_Xaml\"`*"]
#[repr(transparent)]
pub struct ElementSpatialAudioMode(pub i32);
impl ElementSpatialAudioMode {
pub const Auto: Self = Self(0i32);
pub const Off: Self = Self(1i32);
pub const On: Self = Self(2i32);
}
impl ::core::marker::Copy for ElementSpatialAudioMode {}
impl ::core::clone::Clone for ElementSpatialAudioMode {
fn clone(&self) -> Self {
*self
}
}
#[doc = "*Required features: `\"UI_Xaml\"`*"]
#[repr(transparent)]
pub struct ElementTheme(pub i32);
impl ElementTheme {
pub const Default: Self = Self(0i32);
pub const Light: Self = Self(1i32);
pub const Dark: Self = Self(2i32);
}
impl ::core::marker::Copy for ElementTheme {}
impl ::core::clone::Clone for ElementTheme {
fn clone(&self) -> Self {
*self
}
}
pub type EnteredBackgroundEventHandler = *mut ::core::ffi::c_void;
pub type EventTrigger = *mut ::core::ffi::c_void;
pub type ExceptionRoutedEventArgs = *mut ::core::ffi::c_void;
pub type ExceptionRoutedEventHandler = *mut ::core::ffi::c_void;
#[doc = "*Required features: `\"UI_Xaml\"`*"]
#[repr(transparent)]
pub struct FlowDirection(pub i32);
impl FlowDirection {
pub const LeftToRight: Self = Self(0i32);
pub const RightToLeft: Self = Self(1i32);
}
impl ::core::marker::Copy for FlowDirection {} | }
}
#[doc = "*Required features: `\"UI_Xaml\"`*"]
#[repr(transparent)]
pub struct FocusState(pub i32);
impl FocusState {
pub const Unfocused: Self = Self(0i32);
pub const Pointer: Self = Self(1i32);
pub const Keyboard: Self = Self(2i32);
pub const Programmatic: Self = Self(3i32);
}
impl ::core::marker::Copy for FocusState {}
impl ::core::clone::Clone for FocusState {
fn clone(&self) -> Self {
*self
}
}
#[doc = "*Required features: `\"UI_Xaml\"`*"]
#[repr(transparent)]
pub struct FocusVisualKind(pub i32);
impl FocusVisualKind {
pub const DottedLine: Self = Self(0i32);
pub const HighVisibility: Self = Self(1i32);
pub const Reveal: Self = Self(2i32);
}
impl ::core::marker::Copy for FocusVisualKind {}
impl ::core::clone::Clone for FocusVisualKind {
fn clone(&self) -> Self {
*self
}
}
#[doc = "*Required features: `\"UI_Xaml\"`*"]
#[repr(transparent)]
pub struct FontCapitals(pub i32);
impl FontCapitals {
pub const Normal: Self = Self(0i32);
pub const AllSmallCaps: Self = Self(1i32);
pub const SmallCaps: Self = Self(2i32);
pub const AllPetiteCaps: Self = Self(3i32);
pub const PetiteCaps: Self = Self(4i32);
pub const Unicase: Self = Self(5i32);
pub const Titling: Self = Self(6i32);
}
impl ::core::marker::Copy for FontCapitals {}
impl ::core::clone::Clone for FontCapitals {
fn clone(&self) -> Self {
*self
}
}
#[doc = "*Required features: `\"UI_Xaml\"`*"]
#[repr(transparent)]
pub struct FontEastAsianLanguage(pub i32);
impl FontEastAsianLanguage {
pub const Normal: Self = Self(0i32);
pub const HojoKanji: Self = Self(1i32);
pub const Jis04: Self = Self(2i32);
pub const Jis78: Self = Self(3i32);
pub const Jis83: Self = Self(4i32);
pub const Jis90: Self = Self(5i32);
pub const NlcKanji: Self = Self(6i32);
pub const Simplified: Self = Self(7i32);
pub const Traditional: Self = Self(8i32);
pub const TraditionalNames: Self = Self(9i32);
}
impl ::core::marker::Copy for FontEastAsianLanguage {}
impl ::core::clone::Clone for FontEastAsianLanguage {
fn clone(&self) -> Self {
*self
}
}
#[doc = "*Required features: `\"UI_Xaml\"`*"]
#[repr(transparent)]
pub struct FontEastAsianWidths(pub i32);
impl FontEastAsianWidths {
pub const Normal: Self = Self(0i32);
pub const Full: Self = Self(1i32);
pub const Half: Self = Self(2i32);
pub const Proportional: Self = Self(3i32);
pub const Quarter: Self = Self(4i32);
pub const Third: Self = Self(5i32);
}
impl ::core::marker::Copy for FontEastAsianWidths {}
impl ::core::clone::Clone for FontEastAsianWidths {
fn clone(&self) -> Self {
*self
}
}
#[doc = "*Required features: `\"UI_Xaml\"`*"]
#[repr(transparent)]
pub struct FontFraction(pub i32);
impl FontFraction {
pub const Normal: Self = Self(0i32);
pub const Stacked: Self = Self(1i32);
pub const Slashed: Self = Self(2i32);
}
impl ::core::marker::Copy for FontFraction {}
impl ::core::clone::Clone for FontFraction {
fn clone(&self) -> Self {
*self
}
}
#[doc = "*Required features: `\"UI_Xaml\"`*"]
#[repr(transparent)]
pub struct FontNumeralAlignment(pub i32);
impl FontNumeralAlignment {
pub const Normal: Self = Self(0i32);
pub const Proportional: Self = Self(1i32);
pub const Tabular: Self = Self(2i32);
}
impl ::core::marker::Copy for FontNumeralAlignment {}
impl ::core::clone::Clone for FontNumeralAlignment {
fn clone(&self) -> Self {
*self
}
}
#[doc = "*Required features: `\"UI_Xaml\"`*"]
#[repr(transparent)]
pub struct FontNumeralStyle(pub i32);
impl FontNumeralStyle {
pub const Normal: Self = Self(0i32);
pub const Lining: Self = Self(1i32);
pub const OldStyle: Self = Self(2i32);
}
impl ::core::marker::Copy for FontNumeralStyle {}
impl ::core::clone::Clone for FontNumeralStyle {
fn clone(&self) -> Self {
*self
}
}
#[doc = "*Required features: `\"UI_Xaml\"`*"]
#[repr(transparent)]
pub struct FontVariants(pub i32);
impl FontVariants {
pub const Normal: Self = Self(0i32);
pub const Superscript: Self = Self(1i32);
pub const Subscript: Self = Self(2i32);
pub const Ordinal: Self = Self(3i32);
pub const Inferior: Self = Self(4i32);
pub const Ruby: Self = Self(5i32);
}
impl ::core::marker::Copy for FontVariants {}
impl ::core::clone::Clone for FontVariants {
fn clone(&self) -> Self {
*self
}
}
pub type FrameworkElement = *mut ::core::ffi::c_void;
pub type FrameworkTemplate = *mut ::core::ffi::c_void;
pub type FrameworkView = *mut ::core::ffi::c_void;
pub type FrameworkViewSource = *mut ::core::ffi::c_void;
#[repr(C)]
#[doc = "*Required features: `\"UI_Xaml\"`*"]
pub struct GridLength {
pub Value: f64,
pub GridUnitType: GridUnitType,
}
impl ::core::marker::Copy for GridLength {}
impl ::core::clone::Clone for GridLength {
fn clone(&self) -> Self {
*self
}
}
pub type GridLengthHelper = *mut ::core::ffi::c_void;
#[doc = "*Required features: `\"UI_Xaml\"`*"]
#[repr(transparent)]
pub struct GridUnitType(pub i32);
impl GridUnitType {
pub const Auto: Self = Self(0i32);
pub const Pixel: Self = Self(1i32);
pub const Star: Self = Self(2i32);
}
impl ::core::marker::Copy for GridUnitType {}
impl ::core::clone::Clone for GridUnitType {
fn clone(&self) -> Self {
*self
}
}
#[doc = "*Required features: `\"UI_Xaml\"`*"]
#[repr(transparent)]
pub struct HorizontalAlignment(pub i32);
impl HorizontalAlignment {
pub const Left: Self = Self(0i32);
pub const Center: Self = Self(1i32);
pub const Right: Self = Self(2i32);
pub const Stretch: Self = Self(3i32);
}
impl ::core::marker::Copy for HorizontalAlignment {}
impl ::core::clone::Clone for HorizontalAlignment {
fn clone(&self) -> Self {
*self
}
}
pub type IDataTemplateExtension = *mut ::core::ffi::c_void;
pub type IElementFactory = *mut ::core::ffi::c_void;
pub type LeavingBackgroundEventHandler = *mut ::core::ffi::c_void;
#[doc = "*Required features: `\"UI_Xaml\"`*"]
#[repr(transparent)]
pub struct LineStackingStrategy(pub i32);
impl LineStackingStrategy {
pub const MaxHeight: Self = Self(0i32);
pub const BlockLineHeight: Self = Self(1i32);
pub const BaselineToBaseline: Self = Self(2i32);
}
impl ::core::marker::Copy for LineStackingStrategy {}
impl ::core::clone::Clone for LineStackingStrategy {
fn clone(&self) -> Self {
*self
}
}
pub type MediaFailedRoutedEventArgs = *mut ::core::ffi::c_void;
#[doc = "*Required features: `\"UI_Xaml\"`*"]
#[repr(transparent)]
pub struct OpticalMarginAlignment(pub i32);
impl OpticalMarginAlignment {
pub const None: Self = Self(0i32);
pub const TrimSideBearings: Self = Self(1i32);
}
impl ::core::marker::Copy for OpticalMarginAlignment {}
impl ::core::clone::Clone for OpticalMarginAlignment {
fn clone(&self) -> Self {
*self
}
}
pub type PointHelper = *mut ::core::ffi::c_void;
pub type PropertyChangedCallback = *mut ::core::ffi::c_void;
pub type PropertyMetadata = *mut ::core::ffi::c_void;
pub type PropertyPath = *mut ::core::ffi::c_void;
pub type RectHelper = *mut ::core::ffi::c_void;
pub type ResourceDictionary = *mut ::core::ffi::c_void;
pub type RoutedEvent = *mut ::core::ffi::c_void;
pub type RoutedEventArgs = *mut ::core::ffi::c_void;
pub type RoutedEventHandler = *mut ::core::ffi::c_void;
pub type ScalarTransition = *mut ::core::ffi::c_void;
pub type Setter = *mut ::core::ffi::c_void;
pub type SetterBase = *mut ::core::ffi::c_void;
pub type SetterBaseCollection = *mut ::core::ffi::c_void;
pub type SizeChangedEventArgs = *mut ::core::ffi::c_void;
pub type SizeChangedEventHandler = *mut ::core::ffi::c_void;
pub type SizeHelper = *mut ::core::ffi::c_void;
pub type StateTrigger = *mut ::core::ffi::c_void;
pub type StateTriggerBase = *mut ::core::ffi::c_void;
pub type Style = *mut ::core::ffi::c_void;
pub type SuspendingEventHandler = *mut ::core::ffi::c_void;
pub type TargetPropertyPath = *mut ::core::ffi::c_void;
#[doc = "*Required features: `\"UI_Xaml\"`*"]
#[repr(transparent)]
pub struct TextAlignment(pub i32);
impl TextAlignment {
pub const Center: Self = Self(0i32);
pub const Left: Self = Self(1i32);
pub const Start: Self = Self(1i32);
pub const Right: Self = Self(2i32);
pub const End: Self = Self(2i32);
pub const Justify: Self = Self(3i32);
pub const DetectFromContent: Self = Self(4i32);
}
impl ::core::marker::Copy for TextAlignment {}
impl ::core::clone::Clone for TextAlignment {
fn clone(&self) -> Self {
*self
}
}
#[doc = "*Required features: `\"UI_Xaml\"`*"]
#[repr(transparent)]
pub struct TextLineBounds(pub i32);
impl TextLineBounds {
pub const Full: Self = Self(0i32);
pub const TrimToCapHeight: Self = Self(1i32);
pub const TrimToBaseline: Self = Self(2i32);
pub const Tight: Self = Self(3i32);
}
impl ::core::marker::Copy for TextLineBounds {}
impl ::core::clone::Clone for TextLineBounds {
fn clone(&self) -> Self {
*self
}
}
#[doc = "*Required features: `\"UI_Xaml\"`*"]
#[repr(transparent)]
pub struct TextReadingOrder(pub i32);
impl TextReadingOrder {
pub const Default: Self = Self(0i32);
pub const UseFlowDirection: Self = Self(0i32);
pub const DetectFromContent: Self = Self(1i32);
}
impl ::core::marker::Copy for TextReadingOrder {}
impl ::core::clone::Clone for TextReadingOrder {
fn clone(&self) -> Self {
*self
}
}
#[doc = "*Required features: `\"UI_Xaml\"`*"]
#[repr(transparent)]
pub struct TextTrimming(pub i32);
impl TextTrimming {
pub const None: Self = Self(0i32);
pub const CharacterEllipsis: Self = Self(1i32);
pub const WordEllipsis: Self = Self(2i32);
pub const Clip: Self = Self(3i32);
}
impl ::core::marker::Copy for TextTrimming {}
impl ::core::clone::Clone for TextTrimming {
fn clone(&self) -> Self {
*self
}
}
#[doc = "*Required features: `\"UI_Xaml\"`*"]
#[repr(transparent)]
pub struct TextWrapping(pub i32);
impl TextWrapping {
pub const NoWrap: Self = Self(1i32);
pub const Wrap: Self = Self(2i32);
pub const WrapWholeWords: Self = Self(3i32);
}
impl ::core::marker::Copy for TextWrapping {}
impl ::core::clone::Clone for TextWrapping {
fn clone(&self) -> Self {
*self
}
}
#[repr(C)]
#[doc = "*Required features: `\"UI_Xaml\"`*"]
pub struct Thickness {
pub Left: f64,
pub Top: f64,
pub Right: f64,
pub Bottom: f64,
}
impl ::core::marker::Copy for Thickness {}
impl ::core::clone::Clone for Thickness {
fn clone(&self) -> Self {
*self
}
}
pub type ThicknessHelper = *mut ::core::ffi::c_void;
pub type TriggerAction = *mut ::core::ffi::c_void;
pub type TriggerActionCollection = *mut ::core::ffi::c_void;
pub type TriggerBase = *mut ::core::ffi::c_void;
pub type TriggerCollection = *mut ::core::ffi::c_void;
pub type UIElement = *mut ::core::ffi::c_void;
pub type UIElementWeakCollection = *mut ::core::ffi::c_void;
pub type UnhandledExceptionEventArgs = *mut ::core::ffi::c_void;
pub type UnhandledExceptionEventHandler = *mut ::core::ffi::c_void;
pub type Vector3Transition = *mut ::core::ffi::c_void;
#[doc = "*Required features: `\"UI_Xaml\"`*"]
#[repr(transparent)]
pub struct Vector3TransitionComponents(pub u32);
impl Vector3TransitionComponents {
pub const X: Self = Self(1u32);
pub const Y: Self = Self(2u32);
pub const Z: Self = Self(4u32);
}
impl ::core::marker::Copy for Vector3TransitionComponents {}
impl ::core::clone::Clone for Vector3TransitionComponents {
fn clone(&self) -> Self {
*self
}
}
#[doc = "*Required features: `\"UI_Xaml\"`*"]
#[repr(transparent)]
pub struct VerticalAlignment(pub i32);
impl VerticalAlignment {
pub const Top: Self = Self(0i32);
pub const Center: Self = Self(1i32);
pub const Bottom: Self = Self(2i32);
pub const Stretch: Self = Self(3i32);
}
impl ::core::marker::Copy for VerticalAlignment {}
impl ::core::clone::Clone for VerticalAlignment {
fn clone(&self) -> Self {
*self
}
}
#[doc = "*Required features: `\"UI_Xaml\"`*"]
#[repr(transparent)]
pub struct Visibility(pub i32);
impl Visibility {
pub const Visible: Self = Self(0i32);
pub const Collapsed: Self = Self(1i32);
}
impl ::core::marker::Copy for Visibility {}
impl ::core::clone::Clone for Visibility {
fn clone(&self) -> Self {
*self
}
}
pub type VisualState = *mut ::core::ffi::c_void;
pub type VisualStateChangedEventArgs = *mut ::core::ffi::c_void;
pub type VisualStateChangedEventHandler = *mut ::core::ffi::c_void;
pub type VisualStateGroup = *mut ::core::ffi::c_void;
pub type VisualStateManager = *mut ::core::ffi::c_void;
pub type VisualTransition = *mut ::core::ffi::c_void;
pub type Window = *mut ::core::ffi::c_void;
pub type WindowActivatedEventHandler = *mut ::core::ffi::c_void;
pub type WindowClosedEventHandler = *mut ::core::ffi::c_void;
pub type WindowCreatedEventArgs = *mut ::core::ffi::c_void;
pub type WindowSizeChangedEventHandler = *mut ::core::ffi::c_void;
pub type WindowVisibilityChangedEventHandler = *mut ::core::ffi::c_void;
pub type XamlRoot = *mut ::core::ffi::c_void;
pub type XamlRootChangedEventArgs = *mut ::core::ffi::c_void; | impl ::core::clone::Clone for FlowDirection {
fn clone(&self) -> Self {
*self |
mod.rs | //! The norm module
//!
//! This module contains implementations of various linear algebra norms.
//! The implementations are contained within the `VectorNorm` and
//! `MatrixNorm` traits. This module also contains `VectorMetric` and
//! `MatrixMetric` traits which are used to compute the metric distance.
//!
//! These traits can be used directly by importing implementors from
//! this module. In most cases it will be easier to use the `norm` and
//! `metric` functions which exist for both vectors and matrices. These
//! functions take generic arguments for the norm to be used.
//!
//! In general you should use the least generic norm that fits your purpose.
//! For example you would choose to use a `Euclidean` norm instead of an
//! `Lp(2.0)` norm - despite them being mathematically equivalent.
//!
//! # Defining your own norm
//!
//! Note that these traits enforce no requirements on the norm. It is up
//! to the user to ensure that they define a norm correctly.
//!
//! To define your own norm you need to implement the `MatrixNorm`
//! and/or the `VectorNorm` on your own struct. When you have defined
//! a norm you get the _induced metric_ for free. This means that any
//! object which implements the `VectorNorm` or `MatrixNorm` will
//! automatically implement the `VectorMetric` and `MatrixMetric` traits
//! respectively. This induced metric will compute the norm of the
//! difference between the vectors or matrices.
use matrix::BaseMatrix;
use vector::Vector;
use utils;
use std::ops::Sub;
use libnum::Float;
/// Trait for vector norms
pub trait VectorNorm<T> {
/// Computes the vector norm.
fn norm(&self, v: &Vector<T>) -> T;
}
/// Trait for vector metrics.
pub trait VectorMetric<T> {
/// Computes the metric distance between two vectors.
fn metric(&self, v1: &Vector<T>, v2: &Vector<T>) -> T;
}
/// Trait for matrix norms.
pub trait MatrixNorm<T, M: BaseMatrix<T>> {
/// Computes the matrix norm.
fn norm(&self, m: &M) -> T;
}
/// Trait for matrix metrics.
pub trait MatrixMetric<'a, 'b, T, M1: 'a + BaseMatrix<T>, M2: 'b + BaseMatrix<T>> {
/// Computes the metric distance between two matrices.
fn metric(&self, m1: &'a M1, m2: &'b M2) -> T;
}
/// The induced vector metric
///
/// Given a norm `N`, the induced vector metric `M` computes
/// the metric distance, `d`, between two vectors `v1` and `v2`
/// as follows:
///
/// `d = M(v1, v2) = N(v1 - v2)`
impl<U, T> VectorMetric<T> for U
where U: VectorNorm<T>, T: Copy + Sub<T, Output=T> {
fn metric(&self, v1: &Vector<T>, v2: &Vector<T>) -> T {
self.norm(&(v1 - v2))
}
}
/// The induced matrix metric
///
/// Given a norm `N`, the induced matrix metric `M` computes
/// the metric distance, `d`, between two matrices `m1` and `m2`
/// as follows:
///
/// `d = M(m1, m2) = N(m1 - m2)`
impl<'a, 'b, U, T, M1, M2> MatrixMetric<'a, 'b, T, M1, M2> for U
where U: MatrixNorm<T, ::matrix::Matrix<T>>,
M1: 'a + BaseMatrix<T>,
M2: 'b + BaseMatrix<T>,
&'a M1: Sub<&'b M2, Output=::matrix::Matrix<T>> {
fn metric(&self, m1: &'a M1, m2: &'b M2) -> T {
self.norm(&(m1 - m2))
}
}
/// The Euclidean norm
///
/// The Euclidean norm computes the square-root
/// of the sum of squares.
///
/// `||v|| = SQRT(SUM(v_i * v_i))`
#[derive(Debug)]
pub struct Euclidean;
impl<T: Float> VectorNorm<T> for Euclidean {
fn norm(&self, v: &Vector<T>) -> T {
utils::dot(v.data(), v.data()).sqrt()
}
} | impl<T: Float, M: BaseMatrix<T>> MatrixNorm<T, M> for Euclidean {
fn norm(&self, m: &M) -> T {
let mut s = T::zero();
for row in m.row_iter() {
s = s + utils::dot(row.raw_slice(), row.raw_slice());
}
s.sqrt()
}
}
/// The Lp norm
///
/// The
/// [Lp norm](https://en.wikipedia.org/wiki/Norm_(mathematics)#p-norm)
/// computes the `p`th root of the sum of elements
/// to the `p`th power.
///
/// The Lp norm requires `p` to be greater than
/// or equal `1`.
///
/// We use an enum for this norm to allow us to explicitly handle
/// special cases at compile time. For example, we have an `Infinity`
/// variant which handles the special case when the `Lp` norm is a
/// supremum over absolute values. The `Integer` variant gives us a
/// performance boost when `p` is an integer.
///
/// You should avoid matching directly against this enum as it is likely
/// to grow.
#[derive(Debug)]
pub enum Lp<T: Float> {
/// The L-infinity norm (supremum)
Infinity,
/// The Lp norm where p is an integer
Integer(i32),
/// The Lp norm where p is a float
Float(T)
}
impl<T: Float> VectorNorm<T> for Lp<T> {
fn norm(&self, v: &Vector<T>) -> T {
match *self {
Lp::Infinity => {
// Compute supremum
let mut abs_sup = T::zero();
for d in v.iter().map(|d| d.abs()) {
if d > abs_sup {
abs_sup = d;
}
}
abs_sup
},
Lp::Integer(p) => {
assert!(p >= 1, "p value in Lp norm must be >= 1");
// Compute standard lp norm
let mut s = T::zero();
for x in v {
s = s + x.abs().powi(p);
}
s.powf(T::from(p).expect("Could not cast i32 to float").recip())
},
Lp::Float(p) => {
assert!(p >= T::one(), "p value in Lp norm must be >= 1");
// Compute standard lp norm
let mut s = T::zero();
for x in v {
s = s + x.abs().powf(p);
}
s.powf(p.recip())
}
}
}
}
impl<T: Float, M: BaseMatrix<T>> MatrixNorm<T, M> for Lp<T> {
fn norm(&self, m: &M) -> T {
match *self {
Lp::Infinity => {
// Compute supremum
let mut abs_sup = T::zero();
for d in m.iter().map(|d| d.abs()) {
if d > abs_sup {
abs_sup = d;
}
}
abs_sup
},
Lp::Integer(p) => {
assert!(p >= 1, "p value in Lp norm must be >= 1");
// Compute standard lp norm
let mut s = T::zero();
for x in m.iter() {
s = s + x.abs().powi(p);
}
s.powf(T::from(p).expect("Could not cast i32 to float").recip())
},
Lp::Float(p) => {
assert!(p >= T::one(), "p value in Lp norm must be >= 1");
// Compute standard lp norm
let mut s = T::zero();
for x in m.iter() {
s = s + x.abs().powf(p);
}
s.powf(p.recip())
}
}
}
}
#[cfg(test)]
mod tests {
use libnum::Float;
use std::f64;
use super::*;
use vector::Vector;
use matrix::{Matrix, MatrixSlice};
#[test]
fn test_euclidean_vector_norm() {
let v = vector![3.0, 4.0];
assert_scalar_eq!(VectorNorm::norm(&Euclidean, &v), 5.0, comp = float);
}
#[test]
fn test_euclidean_matrix_norm() {
let m = matrix![3.0, 4.0;
1.0, 3.0];
assert_scalar_eq!(MatrixNorm::norm(&Euclidean, &m), 35.0.sqrt(), comp = float);
}
#[test]
fn test_euclidean_matrix_slice_norm() {
let m = matrix![3.0, 4.0;
1.0, 3.0];
let slice = MatrixSlice::from_matrix(&m, [0,0], 1, 2);
assert_scalar_eq!(MatrixNorm::norm(&Euclidean, &slice), 5.0, comp = float);
}
#[test]
fn test_euclidean_vector_metric() {
let v = vector![3.0, 4.0];
assert_scalar_eq!(VectorMetric::metric(&Euclidean, &v, &v), 0.0, comp = float);
let v1 = vector![0.0, 0.0];
assert_scalar_eq!(VectorMetric::metric(&Euclidean, &v, &v1), 5.0, comp = float);
let v2 = vector![4.0, 3.0];
assert_scalar_eq!(VectorMetric::metric(&Euclidean, &v, &v2), 2.0.sqrt(), comp = float);
}
#[test]
#[should_panic]
fn test_euclidean_vector_metric_bad_dim() {
let v = vector![3.0, 4.0];
let v2 = vector![1.0, 2.0, 3.0];
VectorMetric::metric(&Euclidean, &v, &v2);
}
#[test]
fn test_euclidean_matrix_metric() {
let m = matrix![3.0, 4.0;
1.0, 3.0];
assert_scalar_eq!(MatrixMetric::metric(&Euclidean, &m, &m), 0.0, comp = float);
let m1 = Matrix::zeros(2, 2);
assert_scalar_eq!(MatrixMetric::metric(&Euclidean, &m, &m1), 35.0.sqrt(), comp = float);
let m2 = matrix![2.0, 3.0;
2.0, 4.0];
assert_scalar_eq!(MatrixMetric::metric(&Euclidean, &m, &m2), 2.0, comp = float);
}
#[test]
#[should_panic]
fn test_euclidean_matrix_metric_bad_dim() {
let m = matrix![3.0, 4.0];
let m2 = matrix![1.0, 2.0, 3.0];
MatrixMetric::metric(&Euclidean, &m, &m2);
}
#[test]
fn test_euclidean_matrix_slice_metric() {
let m = matrix![
1.0, 1.0, 1.0;
1.0, 1.0, 1.0;
1.0, 1.0, 1.0
];
let m2 = matrix![
0.0, 0.0, 0.0;
0.0, 0.0, 0.0;
0.0, 0.0, 0.0
];
let m_slice = MatrixSlice::from_matrix(
&m, [0; 2], 1, 2
);
let m2_slice = MatrixSlice::from_matrix(
&m2, [0; 2], 1, 2
);
assert_scalar_eq!(MatrixMetric::metric(&Euclidean, &m_slice, &m2_slice), 2.0.sqrt(), comp = exact);
}
#[test]
#[should_panic]
fn test_euclidean_matrix_slice_metric_bad_dim() {
let m = matrix![3.0, 4.0];
let m2 = matrix![1.0, 2.0, 3.0];
let m_slice = MatrixSlice::from_matrix(
&m, [0; 2], 1, 1
);
let m2_slice = MatrixSlice::from_matrix(
&m2, [0; 2], 1, 2
);
MatrixMetric::metric(&Euclidean, &m_slice, &m2_slice);
}
#[test]
fn test_lp_vector_supremum() {
let v = vector![-5.0, 3.0];
let sup = VectorNorm::norm(&Lp::Infinity, &v);
assert_eq!(sup, 5.0);
}
#[test]
fn test_lp_matrix_supremum() {
let m = matrix![0.0, -2.0;
3.5, 1.0];
let sup = MatrixNorm::norm(&Lp::Infinity, &m);
assert_eq!(sup, 3.5);
}
#[test]
fn test_lp_vector_one() {
let v = vector![1.0, 2.0, -2.0];
assert_eq!(VectorNorm::norm(&Lp::Integer(1), &v), 5.0);
}
#[test]
fn test_lp_matrix_one() {
let m = matrix![1.0, -2.0;
0.5, 1.0];
assert_eq!(MatrixNorm::norm(&Lp::Integer(1), &m), 4.5);
}
#[test]
fn test_lp_vector_float() {
let v = vector![1.0, 2.0, -2.0];
assert_eq!(VectorNorm::norm(&Lp::Float(1.0), &v), 5.0);
}
#[test]
fn test_lp_matrix_float() {
let m = matrix![1.0, -2.0;
0.5, 1.0];
assert_eq!(MatrixNorm::norm(&Lp::Float(1.0), &m), 4.5);
}
#[test]
#[should_panic]
fn test_lp_vector_bad_p() {
let v = Vector::new(vec![]);
VectorNorm::norm(&Lp::Float(0.5), &v);
}
#[test]
#[should_panic]
fn test_lp_matrix_bad_p() {
let m = matrix![];
MatrixNorm::norm(&Lp::Float(0.5), &m);
}
#[test]
#[should_panic]
fn test_lp_vector_bad_int_p() {
let v: Vector<f64> = Vector::new(vec![]);
VectorNorm::norm(&Lp::Integer(0), &v);
}
#[test]
#[should_panic]
fn test_lp_matrix_bad_int_p() {
let m: Matrix<f64> = matrix![];
MatrixNorm::norm(&Lp::Integer(0), &m);
}
} | |
Week.js | import React, { Component } from 'react';
import Day from './Day';
import uuid from 'uuid/v4';
import s from './Calendar.module.scss';
import moment from 'moment/moment';
class Week extends Component {
render() {
let days = [];
let date = this.props.previousCurrentNextView;
const { selectedMonthEvents, selected, currentMonthView } = this.props;
for (var i = 0; i < 7; i++) {
let dayHasEvents = false,
title = '',
info = '',
itemStyle = '',
link = '';
for (var j = 0; j < selectedMonthEvents.length; j++) {
if (moment(selectedMonthEvents[j].date).isSame(date, 'day')) {
dayHasEvents = true;
title = selectedMonthEvents[j].title
? selectedMonthEvents[j].title
: '';
info = selectedMonthEvents[j].info ? selectedMonthEvents[j].info : '';
itemStyle = selectedMonthEvents[j].itemStyle
? selectedMonthEvents[j].itemStyle
: '';
link = selectedMonthEvents[j].link ? selectedMonthEvents[j].link : '';
}
}
let day = {
name: date.format('dd').substring(0, 1),
number: date.date(), | isToday: date.isSame(new Date(), 'day'),
date: date,
hasEvents: dayHasEvents,
title: title,
info: info,
itemStyle: itemStyle,
link: link,
};
days.push(<Day key={uuid()} day={day} selected={selected} />);
date = date.clone();
date.add(1, 'd');
}
return <div className={`${s.calendarRow} ${s.week}`}>{days}</div>;
}
}
export default Week; | isCurrentMonth: date.month() === currentMonthView.month(), |
decode-ways.py | # https://leetcode.com/problems/decode-ways/
import string
import fileinput
from typing import Dict
class Solution:
|
if __name__ == "__main__":
s = Solution()
for line in fileinput.input():
print(s.numDecodings(line.strip()))
| MAPPING = dict(zip(map(str, range(1, 28)), string.ascii_uppercase))
def _numDecodings(self, s: str, mem: Dict[str, int]) -> int:
if s in mem:
return mem[s]
mem[s] = 0
if len(s) >= 1 and s[:1] in self.MAPPING:
mem[s] += self._numDecodings(s[1:], mem)
if len(s) >= 2 and s[:2] in self.MAPPING:
mem[s] += self._numDecodings(s[2:], mem)
return mem[s]
def numDecodings(self, s: str) -> int:
mem = {"": 1}
return self._numDecodings(s, mem) |
containers.py | # -*- coding: utf-8 -*-
"""
fysql.containers
~~~~~~~~~~~~~~~~
:copyright: (c) 2016 by Gasquez Florian
:license: MIT, see LICENSE for more details.
"""
from __future__ import unicode_literals
from functools import wraps
import copy
import hashlib
from .entities import SQLEntity, SQLJoin, SQLCondition, SQLColumn
from .columns import FKeyColumn, PKeyColumn, IntegerColumn
from .static import Tables
'''
class ContainerWalkerType(type):
_instances = {}
def __new__(cls, *args, **kwargs):
if not args[2]:
return super(ContainerWalker, cls).__new__(cls, *args, **kwargs)
key = hashlib.md5(args[0].encode('utf-8')).hexdigest()
if key not in ContainerWalkerType._instances.keys():
ContainerWalkerType._instances[key] = super(ContainerWalker, cls).__new__(cls, *args, **kwargs)
return ContainerWalkerType._instances[key]
'''
class ContainerWalker(object):
"""ContainerWalker: walk through a list of SQLEntity and EntityContainer.
Attributes:
_sql (str): description of the SQL query filled by the walker.
"""
def __init__(self, entities, separator, executable, *args, **kwargs):
self._sql = False
self.entities = entities
self.separator = separator
def prepare(self):
sql = []
for entity in self.entities:
if isinstance(entity, EntityContainer):
sql.append(
entity.separator.join(
map(str, entity.walker.prepare())
)
)
else:
sql.append(str(entity))
self._sql = self.separator.join(map(str, sql)).strip()
return sql
@property
def sql(self):
if self._sql is False:
self.prepare()
return self._sql
@staticmethod
def _sql_entity(value):
return '{0}{1}'.format(str(value))
class ResultContainer(object):
"""Assign sql select datas to Table._data"""
def __init__(self, table, cursor):
self.table = table
self.cursor = cursor
self.sql2py = {}
self.result = []
if self.cursor.description is not None:
for i in range(len(self.cursor.description)):
desc = self.cursor.description[i][0]
if isinstance(desc, bytes):
desc = desc.decode('utf-8')
self.sql2py[i] = desc
self.parse()
def parse(self):
"""Parse rows
Todo:
* Allow cursor.fetchone()? (memory issue)
"""
rows = self.cursor.fetchall()
for row in rows:
self.parse_row(row)
self.cursor.close()
def parse_row(self, row):
item = self.table()
for k, f in self.sql2py.items():
tables = Tables.tables
id_table = f.split('_')[0]
id_column = f.split('_', 1)[1]
if id_table != self.table._db_table:
id_table = self.table._backrefs[id_table]
if '_py' in dir(tables[id_table]._columns[id_column]):
item._data[f] = tables[id_table]._columns[id_column]._py(row[k])
else:
item._data[f] = row[k]
item.__load__()
self.result.append(item)
class EntityContainer(object):
"""List of SQLEntity
Attributes:
entities (list) SQLEntity and EntityContainer
seperator (str) Separator for each element of entities
"""
def __init__(self, separator=' '):
self._walker = False
self.entities = []
self.separator = separator
self.executable = False
def __add__(self, entity):
self.entities.append(entity)
return self
def __len__(self):
return len(self.entities)
@property
def walker(self):
if not self._walker:
self._walker = ContainerWalker(self.entities, self.separator, self.executable)
return self._walker
class EntityExecutableContainer(EntityContainer):
"""List of SQLEntity that can be converted to an executable SQL query."""
def __init__(self, table):
super(EntityExecutableContainer, self).__init__()
self.table = table
self.executable = True
@property
def sql(self):
return self.walker.sql
def execute(self, commit=False):
return self.table._database.execute(self.sql, commit=commit)
class DropContainer(EntityExecutableContainer):
"""DROP TABLE SQL query."""
def __init__(self, table):
super(DropContainer, self).__init__(table)
self += SQLEntity('DROP TABLE IF EXISTS {0};'.format(self.table._sql_entity))
self.execute()
class CreateTableContainer(EntityExecutableContainer):
"""CREATE TABLE SQL query."""
def __init__(self, table):
super(CreateTableContainer, self).__init__(table)
self += SQLEntity('CREATE TABLE IF NOT EXISTS {0} ('.format(self.table._sql_entity))
args_create = EntityContainer(separator=', ')
indexes = EntityContainer(separator=', ')
indexes += SQLEntity('PRIMARY KEY ({0})'.format(self.table._pkey.sql_entities['name']))
for key, column in self.table._columns.items():
column_create = EntityContainer(separator=' ')
column_create += column.sql_entities['name']
if column.sql_type_size is not None:
column_create += SQLEntity('{0}({1})'.format(column.sql_type, column.sql_type_size))
else:
column_create += SQLEntity(column.sql_type)
if isinstance(column, FKeyColumn) or isinstance(column, PKeyColumn):
column_create += SQLEntity('UNSIGNED')
if column.unique and not column.index:
column_create += SQLEntity('UNIQUE')
if column.null is False:
column_create += SQLEntity('NOT NULL')
else:
column_create += SQLEntity('NULL')
# if column.default:
# column_create += SQLEntity('DEFAULT {0}'.format(column.escape(column.default)))
if column.pkey and isinstance(column, IntegerColumn):
column_create += SQLEntity('AUTO_INCREMENT')
args_create += column_create
if column.index:
unique = '' if not column.unique else 'UNIQUE'
indexes += SQLEntity('{0} INDEX {1} ({2})'.format(unique, column.sql_entities['index'], column.sql_entities['name']))
args_create += indexes
self += args_create
self += SQLEntity(') ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;')
DropContainer(self.table)
self.execute()
class InsertContainer(EntityExecutableContainer):
"""Table.insert(table_instance)"""
def __init__(self, table, instance):
super(InsertContainer, self).__init__(table)
self.filled = []
self.instance = instance
self.pkey_id = False
self += SQLEntity('INSERT INTO')
self += self.table._sql_entity |
columns_names = EntityContainer(separator=', ')
columns_values = EntityContainer(separator=', ')
for key, column in self.table._columns.items():
value = getattr(self.instance, key)
print (key +':'+ value)
if value:
if column.pkey is True:
self.pkey_id = value
columns_names += column.sql_entities['name']
columns_values += column.escape(getattr(self.instance, key))
for k, v in self.table._defaults.items():
if not value and key == k:
columns_names += self.table._columns[k].sql_entities['name']
columns_values += column.escape(v)
self += columns_names
self += SQLEntity(')')
self += SQLEntity('VALUES (')
self += columns_values
self += SQLEntity(');')
def execute(self):
cursor = self.table._database.execute(self.sql)
if self.pkey_id is False:
self.pkey_id = self.table._database.insert_id(cursor)
self.table._database.commit()
return self.table.get(self.table._pkey == self.pkey_id)
class CreateContainer(EntityExecutableContainer):
"""INSERT INTO SQL query. Used for Table.create()"""
def __init__(self, table, **kwargs):
super(CreateContainer, self).__init__(table)
self.filled = []
self.pkey_id = False
self += SQLEntity('INSERT INTO')
self += self.table._sql_entity
self += SQLEntity('(')
columns_names = EntityContainer(separator=',')
columns_values = EntityContainer(separator=',')
for attr, value in kwargs.items():
if attr in self.table._columns.keys():
columns_names += self.table._columns[attr].sql_entities['name']
columns_values += self.table._columns[attr].escape(value)
if self.table._columns[attr].pkey is True:
self.pkey_id = value
self.filled.append(attr)
for key, column in self.table._defaults.items():
if key not in self.filled:
columns_names += self.table._columns[key].sql_entities['name']
columns_values += self.table._columns[key].escape(self.table._columns[key].default)
self += columns_names
self += SQLEntity(')')
self += SQLEntity('VALUES (')
self += columns_values
self += SQLEntity(');')
def execute(self):
cursor = self.table._database.execute(self.sql)
if self.pkey_id is False:
self.pkey_id = self.table._database.insert_id(cursor)
self.table._database.commit()
return self.table.get(self.table._pkey == self.pkey_id)
class SaveContainer(EntityExecutableContainer):
"""UPDATE SQL Query. Used for TableInstance.save()"""
def __init__(self, table, instance):
super(SaveContainer, self).__init__(table)
self += SQLEntity('UPDATE')
self += self.table._sql_entity
self += SQLEntity('SET')
columns = EntityContainer(separator=',')
to_update = []
for key, column in self.table._columns.items():
columns += SQLEntity('{0}={1}'.format(
column,
column.escape(getattr(instance, key))
)
)
if isinstance(column, FKeyColumn):
to_update.append(getattr(instance, column.reference))
self += columns
self += SQLEntity('WHERE {0}={1} LIMIT 1'.format(
self.table._pkey,
self.table._pkey.escape(getattr(instance, self.table._pkey.name))
))
self.execute(commit=True)
for item in to_update:
if item:
item.save()
class RemoveContainer(EntityExecutableContainer):
"""DELETE SQL Query. Used for TableInstance.remove()"""
def __init__(self, table, instance):
super(RemoveContainer, self).__init__(table)
self += SQLEntity('DELETE FROM')
self += self.table._sql_entity
self += SQLEntity('WHERE {0}={1} LIMIT 1'.format(
self.table._pkey,
self.table._pkey.escape(getattr(instance, self.table._pkey.name))
))
self.execute(commit=True)
def _generative(func):
"""Chainable method"""
@wraps(func)
def decorator(self, *args, **kwargs):
func(self, *args, **kwargs)
return self
return decorator
class ConditionableExecutableContainer(EntityExecutableContainer):
"""Conditionable query, with where, limit, group, having..."""
def __init__(self, table, *args, **kwargs):
super(ConditionableExecutableContainer, self).__init__(table)
self._where = False
self._group = False
self._order = False
def clone(self):
return copy.deepcopy(self)
@_generative
def where(self, *conditions):
if self._where is False:
self += SQLEntity('WHERE')
self._where = True
else:
self += SQLEntity('AND')
size = len(conditions) - 1
i = 0
if size == 0:
if isinstance(conditions[0], SQLCondition):
self += conditions[0]
else:
self += SQLEntity(conditions[0])
else:
for condition in conditions:
if isinstance(condition, SQLCondition):
self += SQLEntity('(')
self += condition
self += SQLEntity(')')
if i < size:
self += SQLEntity('AND')
i += 1
@_generative
def order_by(self, column, order='DESC'):
if self._order is False:
self += SQLEntity('ORDER BY')
self._order = True
else:
self += SQLEntity(',')
if isinstance(column, str):
self += SQLEntity(column)
else:
self += column
self += SQLEntity(order)
@_generative
def group_by(self, group_by):
if self._group is False:
self += SQLEntity('GROUP BY')
self._group = True
else:
self += SQLEntity(',')
if isinstance(group_by, str):
self += SQLEntity(group_by)
def limit(self, limit, position=0):
self += SQLEntity('LIMIT {0},{1}'.format(position, limit))
if limit == 1:
return self.execute(unique=True)
return self.execute()
def one(self):
return self.limit(1)
def all(self):
return self.execute()
class SelectContainer(ConditionableExecutableContainer):
"""SELECT SQL Query."""
def __init__(self, table, *args, **kwargs):
super(SelectContainer, self).__init__(table)
self.kwargs = kwargs
self.args = args
self.is_count = kwargs.get('is_count') or False
self.selected = []
self.add_from = kwargs.get('add_from') or False
self.executable = True
# add selected columns
if self.is_count:
columns = SQLEntity('COUNT(*)')
else:
columns = EntityContainer(separator=',')
for column in self.table._columns.values() if not args else args:
columns += column.sql_entities['selection']
self.selected.append(hash(column))
# add selected tables
tables = EntityContainer(separator=',')
tables += self.table._sql_entity
if self.add_from:
tables += SQLEntity(self.add_from)
# add joins
joins = EntityContainer()
for foreign in reversed(self.table._foreigns):
if hash(foreign['column']) in self.selected or self.is_count:
join = 'INNER' if foreign['column'].required else 'LEFT'
joins += SQLJoin(join, foreign['table']._sql_entity, foreign['left_on'], foreign['right_on'])
if not self.is_count:
for key, column in foreign['table']._columns.items():
columns += SQLColumn(
column.sql_column,
column.table._db_table,
'{0}_{1}'.format(foreign['column'].reference, column.sql_column)
)
self += SQLEntity('SELECT')
self += columns
self += SQLEntity('FROM')
self += tables
if len(joins) != 0:
self += joins
def execute(self, unique=False):
cursor = self.table._database.execute(self.sql)
if self.is_count:
return cursor.fetchone()[0]
if unique:
try:
return ResultContainer(self.table, cursor).result[0]
except IndexError:
return False
return ResultContainer(self.table, cursor).result
def count(self):
self.entities[1] = SQLEntity('COUNT(*)')
self.is_count = True
return self.execute() | self += SQLEntity('(') |
1080p_081.ts | size 1474108 | version https://git-lfs.github.com/spec/v1
oid sha256:cee2da444323ac09e42d546ba30a92c7c7c161f87f75ce665803e07c42c7ebf6 |
|
models.py | import datetime
from moto.core import BaseBackend
from moto.core.utils import iso_8601_datetime
class Token(object):
def __init__(self, duration, name=None, policy=None):
now = datetime.datetime.now()
self.expiration = now + datetime.timedelta(seconds=duration)
self.name = name
self.policy = None
@property
def expiration_ISO8601(self):
return iso_8601_datetime(self.expiration)
class AssumedRole(object):
def __init__(self, role_session_name, role_arn, policy, duration, external_id):
self.session_name = role_session_name
self.arn = role_arn
self.policy = policy
now = datetime.datetime.now()
self.expiration = now + datetime.timedelta(seconds=duration)
self.external_id = external_id
@property
def expiration_ISO8601(self):
return iso_8601_datetime(self.expiration)
class STSBackend(BaseBackend):
def get_session_token(self, duration):
token = Token(duration=duration)
return token
def | (self, name, duration, policy):
token = Token(duration=duration, name=name, policy=policy)
return token
def assume_role(self, **kwargs):
role = AssumedRole(**kwargs)
return role
sts_backend = STSBackend()
| get_federation_token |
ListDirectoriesCommand.ts | import { CloudDirectoryClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../CloudDirectoryClient";
import { ListDirectoriesRequest, ListDirectoriesResponse } from "../models/models_0";
import {
deserializeAws_restJson1ListDirectoriesCommand,
serializeAws_restJson1ListDirectoriesCommand,
} from "../protocols/Aws_restJson1";
import { getSerdePlugin } from "@aws-sdk/middleware-serde";
import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http";
import { Command as $Command } from "@aws-sdk/smithy-client";
import {
FinalizeHandlerArguments,
Handler,
HandlerExecutionContext,
MiddlewareStack,
HttpHandlerOptions as __HttpHandlerOptions,
MetadataBearer as __MetadataBearer,
SerdeContext as __SerdeContext,
} from "@aws-sdk/types";
export type ListDirectoriesCommandInput = ListDirectoriesRequest;
export type ListDirectoriesCommandOutput = ListDirectoriesResponse & __MetadataBearer;
export class ListDirectoriesCommand extends $Command<
ListDirectoriesCommandInput,
ListDirectoriesCommandOutput,
CloudDirectoryClientResolvedConfig
> {
// Start section: command_properties
// End section: command_properties
constructor(readonly input: ListDirectoriesCommandInput) {
// Start section: command_constructor
super();
// End section: command_constructor
}
resolveMiddleware(
clientStack: MiddlewareStack<ServiceInputTypes, ServiceOutputTypes>,
configuration: CloudDirectoryClientResolvedConfig,
options?: __HttpHandlerOptions
): Handler<ListDirectoriesCommandInput, ListDirectoriesCommandOutput> {
this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); | const { logger } = configuration;
const clientName = "CloudDirectoryClient";
const commandName = "ListDirectoriesCommand";
const handlerExecutionContext: HandlerExecutionContext = {
logger,
clientName,
commandName,
inputFilterSensitiveLog: ListDirectoriesRequest.filterSensitiveLog,
outputFilterSensitiveLog: ListDirectoriesResponse.filterSensitiveLog,
};
if (typeof logger.info === "function") {
logger.info({
clientName,
commandName,
});
}
const { requestHandler } = configuration;
return stack.resolve(
(request: FinalizeHandlerArguments<any>) =>
requestHandler.handle(request.request as __HttpRequest, options || {}),
handlerExecutionContext
);
}
private serialize(input: ListDirectoriesCommandInput, context: __SerdeContext): Promise<__HttpRequest> {
return serializeAws_restJson1ListDirectoriesCommand(input, context);
}
private deserialize(output: __HttpResponse, context: __SerdeContext): Promise<ListDirectoriesCommandOutput> {
return deserializeAws_restJson1ListDirectoriesCommand(output, context);
}
// Start section: command_body_extra
// End section: command_body_extra
} |
const stack = clientStack.concat(this.middlewareStack);
|
request.rs | //! Handling of client requests to the server.
use std::collections::HashSet;
use std::io::{Read, Write};
use std::net::{Shutdown, TcpStream};
use std::sync::atomic::Ordering;
use chrono::{offset::Utc, Duration};
use expjobserver::{
cartesian_product, cmd_replace_machine, cmd_replace_vars, cmd_to_path,
protocol::{
self,
request::RequestType::{self, *},
response::ResponseType::{self, *},
},
serialize_ts,
};
use log::{error, info, warn};
use prost::Message;
use super::{MachineStatus, Matrix, Server, Task, TaskState, TaskType};
impl Task {
pub fn | (&self) -> protocol::Status {
use protocol::status::{Erroropt::Error, Machineopt::Machine, Outputopt::Output};
let mut status = protocol::Status::default();
// Set the state
match &self.state {
TaskState::Waiting => {
status.status = protocol::status::Status::Waiting.into();
}
TaskState::Held => {
status.status = protocol::status::Status::Held.into();
}
TaskState::Running { .. }
| TaskState::CheckingResults { .. }
| TaskState::Finalize { .. } => {
status.status = protocol::status::Status::Running.into();
}
TaskState::CopyingResults { .. } => {
status.status = protocol::status::Status::Copyresults.into();
}
TaskState::Done | TaskState::DoneWithResults { .. } => {
status.status = protocol::status::Status::Done.into();
}
TaskState::Error { .. } | TaskState::ErrorDone { .. } => {
status.status = protocol::status::Status::Failed.into();
}
TaskState::Canceled { .. } | TaskState::Killed => {
status.status = protocol::status::Status::Canceled.into();
}
TaskState::Unknown { .. } => {
status.status = protocol::status::Status::Unknown.into();
}
}
// Set the machine
match &self.state {
TaskState::Running { .. }
| TaskState::CheckingResults { .. }
| TaskState::Finalize { .. }
| TaskState::CopyingResults { .. }
| TaskState::Done
| TaskState::DoneWithResults { .. } => {
status.machineopt = Some(Machine(self.machine.as_ref().unwrap().clone()));
}
TaskState::Error { .. } | TaskState::ErrorDone { .. } => {
status.machineopt = Some(Machine(self.machine.as_ref().unwrap().clone()));
}
TaskState::Unknown { machine } => {
status.machineopt = machine.clone().map(|m| Machine(m));
}
TaskState::Waiting
| TaskState::Held
| TaskState::Canceled { .. }
| TaskState::Killed => {}
}
// Set the output
match &self.state {
TaskState::DoneWithResults { results_path } => {
status.outputopt = Some(Output(results_path.clone()));
}
TaskState::Running { .. }
| TaskState::CheckingResults
| TaskState::CopyingResults { .. }
| TaskState::Finalize { .. }
| TaskState::Done
| TaskState::Error { .. }
| TaskState::ErrorDone { .. }
| TaskState::Waiting
| TaskState::Held
| TaskState::Canceled { .. }
| TaskState::Killed
| TaskState::Unknown { .. } => {}
}
// Set the error
match &self.state {
TaskState::Error { error, .. } | TaskState::ErrorDone { error, .. } => {
status.erroropt = Some(Error(error.clone()));
}
TaskState::Running { .. }
| TaskState::CheckingResults
| TaskState::CopyingResults { .. }
| TaskState::Finalize { .. }
| TaskState::Done
| TaskState::DoneWithResults { .. }
| TaskState::Waiting
| TaskState::Held
| TaskState::Canceled { .. }
| TaskState::Killed
| TaskState::Unknown { .. } => {}
}
status
}
}
impl Server {
/// Mark the given job as canceled. This doesn't actually do anything yet. The job will be
/// killed and removed asynchronously.
fn cancel_job(&self, jid: u64, remove: bool) -> ResponseType {
// We set the `canceled` flag and let the job server handle the rest.
if let Some(job) = self.tasks.lock().unwrap().get_mut(&jid) {
info!("Cancelling task {}, {:?}", jid, job);
job.canceled = Some(remove);
Okresp(protocol::OkResp {})
} else {
error!("No such job: {}", jid);
Nsjresp(protocol::NoSuchJobResp {})
}
}
pub fn handle_client(&self, mut client: TcpStream) -> std::io::Result<()> {
// Indicate that the work thread should check for new tasks.
self.client_ping.fetch_or(true, Ordering::Relaxed);
let peer_addr = client.peer_addr()?;
info!("Handling request from {}", peer_addr);
let mut request = Vec::new();
client.read_to_end(&mut request)?;
let request = protocol::Request::decode(request.as_slice())?;
info!("(request) {}: {:?}", peer_addr, request);
client.shutdown(Shutdown::Read)?;
let request = match request.request_type {
Some(request) => request,
None => {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput,
"Request was unexpectedly empty.",
))
}
};
let response_ty = self.handle_request(request)?;
info!("(response) {}: {:?}", peer_addr, response_ty);
let mut response = protocol::Response::default();
response.response_type = Some(response_ty);
let mut bytes = vec![];
response.encode(&mut bytes)?;
client.write_all(&bytes)?;
Ok(())
}
fn handle_request(&self, request: RequestType) -> std::io::Result<ResponseType> {
let response =
match request {
Preq(protocol::PingRequest {}) => Okresp(protocol::OkResp {}),
Mareq(protocol::MakeAvailableRequest { addr, class }) => {
let mut locked = self.machines.lock().unwrap();
// Check if the machine is already there, since it may be running a job.
let old = locked.get(&addr);
let running_job = if let Some(old_class) = old {
warn!(
"Removing {} from old class {}. New class is {}",
addr, old_class.class, class
);
old_class.running
} else {
None
};
info!(
"Add machine {}/{} with running job: {:?}",
addr, class, running_job
);
// Add machine
locked.insert(
addr.clone(),
MachineStatus {
class,
running: running_job,
},
);
// Respond
Okresp(protocol::OkResp {})
}
Rareq(protocol::RemoveAvailableRequest { addr }) => {
if let Some(old_class) = self.machines.lock().unwrap().remove(&addr) {
info!("Removed machine {}/{}", addr, old_class.class);
// Cancel any running jobs on the machine.
if let Some(running) = old_class.running {
self.cancel_job(running, false);
}
Okresp(protocol::OkResp {})
} else {
error!("No such machine: {}", addr);
Nsmresp(protocol::NoSuchMachineResp {})
}
}
Lareq(protocol::ListAvailableRequest {}) => Mresp(protocol::MachinesResp {
machine_status: self
.machines
.lock()
.unwrap()
.iter()
.map(|(addr, info)| {
(
addr.clone(),
protocol::MachineStatus {
class: info.class.clone(),
is_free: info.running.is_none(),
running_job: info.running.unwrap_or(0),
},
)
})
.collect(),
}),
Lvreq(protocol::ListVarsRequest {}) => Vresp(protocol::VarsResp {
vars: self.variables.lock().unwrap().clone(),
}),
Sumreq(protocol::SetUpMachineRequest {
addr,
classopt,
cmds,
timeout,
}) => {
let jid = self.next_jid.fetch_add(1, Ordering::Relaxed);
info!(
"Create setup task with ID {}. Machine: {}. Cmds: {:?}",
jid, addr, cmds
);
let variables = self.variables.lock().unwrap().clone();
let class = classopt
.map(|protocol::set_up_machine_request::Classopt::Class(class)| class);
self.tasks.lock().unwrap().insert(
jid,
Task {
jid,
matrix: None,
ty: TaskType::SetupTask,
cmds,
class,
machine: Some(addr),
state: TaskState::Waiting,
variables,
cp_results: None,
canceled: None,
repeat_on_fail: false,
maximum_failures: None,
attempt: 0,
timestamp: Utc::now(),
done_timestamp: None,
timeout: if timeout > 0 {
Some(Duration::minutes(timeout as i64))
} else {
None
},
timedout: None,
},
);
Jiresp(protocol::JobIdResp { jid })
}
Svreq(protocol::SetVarRequest { name, value }) => {
let old = self
.variables
.lock()
.unwrap()
.insert(name.clone(), value.clone());
info!("Set {}={}", name, value);
if let Some(old_value) = old {
warn!(
"Old value of {} was {}. New value is {}",
name, old_value, value
);
}
// Respond
Okresp(protocol::OkResp {})
}
Ajreq(protocol::AddJobRequest {
class,
cmd,
cp_resultsopt,
// prost uses a default of `false`.
repeat_on_fail,
timeout,
}) => {
let jid = self.next_jid.fetch_add(1, Ordering::Relaxed);
info!("Added job {} with class {}: {}", jid, class, cmd);
let variables = self.variables.lock().unwrap().clone();
let cp_results = cp_resultsopt
.map(|protocol::add_job_request::CpResultsopt::CpResults(s)| s);
self.tasks.lock().unwrap().insert(
jid,
Task {
jid,
matrix: None,
ty: TaskType::Job,
cmds: vec![cmd],
class: Some(class),
cp_results,
state: TaskState::Waiting,
variables,
machine: None,
canceled: None,
repeat_on_fail,
maximum_failures: None,
attempt: 0,
timestamp: Utc::now(),
done_timestamp: None,
timeout: if timeout > 0 {
Some(Duration::minutes(timeout as i64))
} else {
None
},
timedout: None,
},
);
Jiresp(protocol::JobIdResp { jid })
}
Ljreq(protocol::ListJobsRequest {}) => {
let tasks: Vec<_> = self
.tasks
.lock()
.unwrap()
.values()
.filter_map(|task| {
if task.matrix.is_none() {
Some(task.jid)
} else {
None
}
})
.collect();
let running: Vec<_> = self
.tasks
.lock()
.unwrap()
.values()
.filter_map(|task| {
if matches!(task.state, TaskState::Running{..}) {
Some(task.jid)
} else {
None
}
})
.collect();
let matrices: Vec<_> = self
.matrices
.lock()
.unwrap()
.values()
.map(|matrix| {
let cp_resultsopt = matrix.cp_results.as_ref().map(|s| {
protocol::matrix_status_resp::CpResultsopt::CpResults(s.into())
});
protocol::MatrixStatusResp {
id: matrix.id,
class: matrix.class.clone(),
cp_resultsopt,
cmd: matrix.cmd.clone(),
jobs: matrix.jids.iter().map(|j| *j).collect(),
variables: protocol::convert_map(&matrix.variables),
}
})
.collect();
Jresp(protocol::JobsResp {
jobs: tasks,
matrices,
running,
})
}
Hjreq(protocol::HoldJobRequest { jid }) => {
let mut locked_tasks = self.tasks.lock().unwrap();
let task = locked_tasks.get_mut(&jid);
match task {
Some(task) => {
let is_waiting = match task.state {
TaskState::Waiting | TaskState::Held => true,
_ => false,
};
if is_waiting {
task.state = TaskState::Held;
Okresp(protocol::OkResp {})
} else {
error!(
"Attempted to put task {} on hold, but current state is {:?}",
jid, task.state
);
Nwresp(protocol::NotWaitingResp {})
}
}
None => Nsjresp(protocol::NoSuchJobResp {}),
}
}
Ujreq(protocol::UnholdJobRequest { jid }) => {
let mut locked_tasks = self.tasks.lock().unwrap();
let task = locked_tasks.get_mut(&jid);
match task {
Some(task) => {
let is_held = match task.state {
TaskState::Held => true,
_ => false,
};
if is_held {
task.state = TaskState::Waiting;
Okresp(protocol::OkResp {})
} else {
error!(
"Attempted to unhold task {}, but current state is {:?}",
jid, task.state
);
Nwresp(protocol::NotWaitingResp {})
}
}
None => Nsjresp(protocol::NoSuchJobResp {}),
}
}
Cjreq(protocol::CancelJobRequest { jid, remove }) => self.cancel_job(jid, remove),
Jsreq(protocol::JobStatusRequest { jid }) => {
let locked_tasks = self.tasks.lock().unwrap();
let task = locked_tasks.get(&jid);
match task {
Some(Task {
jid,
ty: TaskType::Job,
class,
cmds,
variables,
machine,
..
}) => {
info!("Status of job {}, {:?}", jid, task);
let log = if let Some(machine) = machine {
let cmd = cmd_replace_machine(
&cmd_replace_vars(cmds.first().unwrap(), &variables),
&machine,
);
format!("{}", cmd_to_path(*jid, &cmd, &self.log_dir))
} else {
"/dev/null".into()
};
Jsresp(protocol::JobStatusResp {
jid: *jid,
matrixidopt: task
.unwrap()
.matrix
.map(|m| protocol::job_status_resp::Matrixidopt::Matrix(m)),
class: class.as_ref().expect("No class for clone").clone(),
cmd: cmds.first().unwrap().clone(),
status: Some(task.unwrap().status()),
variables: variables.clone(),
log,
timestamp: serialize_ts(task.unwrap().timestamp),
donetsop: task.unwrap().done_timestamp.map(serialize_ts).map(
|ts| protocol::job_status_resp::Donetsop::DoneTimestamp(ts),
),
cp_results: task
.unwrap()
.cp_results
.as_ref()
.map(Clone::clone)
.unwrap_or_else(|| "".into()),
})
}
Some(Task {
jid,
ty: TaskType::SetupTask,
class,
cmds,
state,
variables,
machine,
..
}) => {
info!("Status setup task {}, {:?}", jid, task);
let cmd = match state {
TaskState::Waiting | TaskState::Held => &cmds[0],
TaskState::Running { index } => &cmds[*index],
TaskState::Error { n, .. } | TaskState::ErrorDone { n, .. } => {
&cmds[*n]
}
TaskState::Done
| TaskState::DoneWithResults { .. }
| TaskState::CheckingResults
| TaskState::CopyingResults { .. }
| TaskState::Finalize { .. }
| TaskState::Canceled { .. }
| TaskState::Killed
| TaskState::Unknown { .. } => cmds.last().unwrap(),
}
.clone();
let log = if let Some(machine) = machine {
let cmd = cmd_replace_machine(
&cmd_replace_vars(&cmd, &variables),
&machine,
);
format!("{}", cmd_to_path(*jid, &cmd, &self.log_dir))
} else {
"/dev/null".into()
};
Jsresp(protocol::JobStatusResp {
jid: *jid,
matrixidopt: task
.unwrap()
.matrix
.map(|m| protocol::job_status_resp::Matrixidopt::Matrix(m)),
class: class.as_ref().map(Clone::clone).unwrap_or("".into()),
cmd,
status: Some(task.unwrap().status()),
variables: variables.clone(),
log,
timestamp: serialize_ts(task.unwrap().timestamp),
donetsop: task.unwrap().done_timestamp.map(serialize_ts).map(
|ts| protocol::job_status_resp::Donetsop::DoneTimestamp(ts),
),
cp_results: task
.unwrap()
.cp_results
.as_ref()
.map(Clone::clone)
.unwrap_or_else(|| "".into()),
})
}
None => {
error!("No such job: {}", jid);
Nsjresp(protocol::NoSuchJobResp {})
}
}
}
Cljreq(protocol::CloneJobRequest { jid }) => {
let mut locked_jobs = self.tasks.lock().unwrap();
let task = locked_jobs.get(&jid);
match task {
Some(
task
@
Task {
ty: TaskType::Job, ..
},
)
| Some(
task
@
Task {
ty: TaskType::SetupTask,
cp_results: None,
repeat_on_fail: false,
..
},
) => {
let new_jid = self.next_jid.fetch_add(1, Ordering::Relaxed);
let task = Self::clone_task(new_jid, task);
let maybe_matrix = task.matrix.clone();
locked_jobs.insert(new_jid, task);
if let Some(matrix) = maybe_matrix {
self.matrices
.lock()
.unwrap()
.get_mut(&matrix)
.unwrap()
.jids
.insert(new_jid);
}
Jiresp(protocol::JobIdResp { jid: new_jid })
}
None => {
error!("No such job or setup task: {}", jid);
Nsjresp(protocol::NoSuchJobResp {})
}
weird_state => {
error!(
"Unexpected task state! Ignoring clone request. {:#?}",
weird_state
);
Ierr(protocol::InternalError {})
}
}
}
Amreq(protocol::AddMatrixRequest {
vars,
cmd,
class,
cp_resultsopt,
repeat,
timeout,
}) => {
let id = self.next_jid.fetch_add(1, Ordering::Relaxed);
let mut vars = protocol::reverse_map(&vars);
let cp_results = cp_resultsopt
.map(|protocol::add_matrix_request::CpResultsopt::CpResults(s)| s);
// Get the set of base variables, some of which may be overridden by the matrix
// variables in the template.
vars.extend(
self.variables
.lock()
.unwrap()
.iter()
.map(|(k, v)| (k.to_owned(), vec![v.to_owned()])),
);
let timeout = if timeout == 0 {
None
} else {
Some(Duration::minutes(timeout as i64))
};
info!(
"Create matrix with ID {}. Cmd: {:?}, Vars: {:?}",
id, cmd, vars
);
let mut jids = HashSet::new();
// Create a new job for every element in the cartesian product of the variables.
for config in cartesian_product(&vars) {
let cmd = cmd_replace_vars(&cmd, &config);
for _ in 0..repeat {
let jid = self.next_jid.fetch_add(1, Ordering::Relaxed);
jids.insert(jid);
info!(
"[Matrix {}] Added job {} with class {}: {}",
id, jid, class, cmd
);
self.tasks.lock().unwrap().insert(
jid,
Task {
jid,
matrix: Some(id),
ty: TaskType::Job,
cmds: vec![cmd.clone()],
class: Some(class.clone()),
cp_results: cp_results.clone(),
state: TaskState::Waiting,
variables: config.clone(),
timeout,
machine: None,
canceled: None,
repeat_on_fail: true,
maximum_failures: None,
attempt: 0,
timestamp: Utc::now(),
done_timestamp: None,
timedout: None,
},
);
}
}
self.matrices.lock().unwrap().insert(
id,
Matrix {
id,
cmd,
class,
cp_results,
variables: vars,
jids,
},
);
Miresp(protocol::MatrixIdResp { id })
}
Smreq(protocol::StatMatrixRequest { id }) => {
if let Some(matrix) = self.matrices.lock().unwrap().get(&id) {
info!("Status of matrix {}, {:?}", id, matrix);
let cp_resultsopt = matrix.cp_results.as_ref().map(|s| {
protocol::matrix_status_resp::CpResultsopt::CpResults(s.into())
});
Msresp(protocol::MatrixStatusResp {
id,
class: matrix.class.clone(),
cp_resultsopt,
cmd: matrix.cmd.clone(),
jobs: matrix.jids.iter().map(|j| *j).collect(),
variables: protocol::convert_map(&matrix.variables),
})
} else {
error!("No such matrix: {}", id);
Nsmatresp(protocol::NoSuchMatrixResp {})
}
}
};
Ok(response)
}
}
| status |
main.rs | //! # foraget
//!
//! `foraget` is a simple universal package manager for Unix-like systems.
use std::process;
use ansi_term::Color;
use clap::{crate_authors, crate_description, crate_name, crate_version, App, Arg, SubCommand};
mod environment;
mod package_managers;
mod platforms;
mod tasks;
use environment::does_exist;
use package_managers::PackageManager;
use platforms::get_relevant_package_managers;
/// The entry point to foraget.
///
/// Gathers information about relevant package managers for the current environment and calls `run`
/// passing them in. If there are no known package managers for the currently detected environment,
/// ends the program with an appropriate message.
fn main() {
// Check for dependencies
check_for_dependencies();
// Get relevant package managers
if let Some(package_managers) = get_relevant_package_managers() {
// Run foraget for the relevant package managers
run(&package_managers);
} else {
// Print error message about non-implementation for the platform
println!(
"{}",
Color::Red.paint("No known package managers for this system!")
);
// Exit foraget
process::exit(0);
}
}
fn check_for_dependencies() {
let dependencies = vec![("fzf", "A command-line fuzzy finder")];
dependencies.iter().for_each(|d| {
if !does_exist(d.0) {
println!("{}", Color::Red.paint("The below dependency is required:"));
println!("{} - {}", d.0, d.1);
process::exit(0);
}
})
}
/// Runs foraget with the supplied package managers.
fn run(package_managers: &Vec<PackageManager>) | {
let matches = App::new(crate_name!())
.version(crate_version!())
.author(crate_authors!())
.about(crate_description!())
.subcommand(SubCommand::with_name("init").about("Install additional package sources"))
.subcommand(
SubCommand::with_name("search")
.about("Search for a package across sources")
.arg(
Arg::with_name("PACKAGE")
.help("The package to search")
.required(true)
.index(1),
),
)
.subcommand(
SubCommand::with_name("install")
.about("Install a package")
.arg(
Arg::with_name("PACKAGE")
.help("The package to install")
.required(true)
.index(1),
),
)
.subcommand(
SubCommand::with_name("uninstall")
.about("Uninstall a package if installed")
.arg(
Arg::with_name("PACKAGE")
.help("The package to uninstall")
.required(true)
.index(1),
),
)
.get_matches();
if let Some(_) = matches.subcommand_matches("init") {
// Init package sources
tasks::init();
} else if let Some(matches) = matches.subcommand_matches("search") {
// Search for the package across relevant package managers
tasks::search(&package_managers, matches.value_of("PACKAGE").unwrap());
} else if let Some(matches) = matches.subcommand_matches("install") {
// Prompt to install the package from one of the relevant package managers
tasks::install(&package_managers, matches.value_of("PACKAGE").unwrap());
} else if let Some(matches) = matches.subcommand_matches("uninstall") {
// Try uninstalling the package using one of the relevant package managers
tasks::uninstall(&package_managers, matches.value_of("PACKAGE").unwrap());
} else {
// Ask to be run with a command
println!("{}", Color::Red.paint("Please run foraget with a command!"));
}
} |
|
sliderbase_test.js | // Copyright 2008 The Closure Library Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS-IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
goog.provide("goog.ui.SliderBaseTest")
goog.setTestOnly("goog.ui.SliderBaseTest")
goog.require("goog.a11y.aria")
goog.require("goog.a11y.aria.State")
goog.require("goog.dom")
goog.require("goog.dom.TagName")
goog.require("goog.dom.classlist")
goog.require("goog.events")
goog.require("goog.events.EventType")
goog.require("goog.events.KeyCodes")
goog.require("goog.fx.Animation")
goog.require("goog.math.Coordinate")
goog.require("goog.style")
goog.require("goog.style.bidi")
goog.require("goog.testing.MockClock")
goog.require("goog.testing.MockControl")
goog.require("goog.testing.events")
goog.require("goog.testing.jsunit")
goog.require("goog.testing.mockmatchers")
goog.require("goog.testing.recordFunction")
goog.require("goog.ui.Component")
goog.require("goog.ui.SliderBase")
goog.require("goog.userAgent")
var oneThumbSlider
var oneThumbSliderRtl
var oneChangeEventCount
var twoThumbSlider
var twoThumbSliderRtl
var twoChangeEventCount
var mockClock
var mockAnimation
/**
* A basic class to implement the abstract goog.ui.SliderBase for testing.
* @param {boolean=} testOnlyIsRightToLeft This parameter is necessary to tell
* if the slider is rendered right-to-left when creating thumbs (before
* entering the document). Used only for test purposes.
* @constructor
* @extends {goog.ui.SliderBase}
*/
function OneThumbSlider(testOnlyIsRightToLeft = false) {
this.testOnlyIsRightToLeft_ = testOnlyIsRightToLeft
goog.ui.SliderBase.call(this, undefined /* domHelper */, function(value) {
return value > 5 ? "A big value." : "A small value."
})
}
goog.inherits(OneThumbSlider, goog.ui.SliderBase)
/** @override */
OneThumbSlider.prototype.createThumbs = function() {
var dirSuffix = this.testOnlyIsRightToLeft_ ? "Rtl" : ""
this.valueThumb = this.extentThumb = goog.dom.getElement("thumb" + dirSuffix)
}
/** @override */
OneThumbSlider.prototype.getCssClass = function(orientation) {
return goog.getCssName("test-slider", orientation)
}
/**
* A basic class to implement the abstract goog.ui.SliderBase for testing.
* @param {boolean=} testOnlyIsRightToLeft This parameter is necessary to tell
* if the slider is rendered right-to-left when creating thumbs (before
* entering the document). Used only for test purposes.
* @constructor
* @extends {goog.ui.SliderBase}
*/
function TwoThumbSlider(testOnlyIsRightToLeft = false) {
this.testOnlyIsRightToLeft_ = testOnlyIsRightToLeft
goog.ui.SliderBase.call(this)
}
goog.inherits(TwoThumbSlider, goog.ui.SliderBase)
/** @override */
TwoThumbSlider.prototype.createThumbs = function() {
var dirSuffix = this.testOnlyIsRightToLeft_ ? "Rtl" : ""
this.valueThumb = goog.dom.getElement("valueThumb" + dirSuffix)
this.extentThumb = goog.dom.getElement("extentThumb" + dirSuffix)
this.rangeHighlight = goog.dom.getElement("rangeHighlight" + dirSuffix)
}
/** @override */
TwoThumbSlider.prototype.getCssClass = function(orientation) {
return goog.getCssName("test-slider", orientation)
}
/**
* Basic class that implements the AnimationFactory interface for testing.
* @param {!goog.fx.Animation|!Array<!goog.fx.Animation>} testAnimations The
* test animations to use.
* @constructor
* @implements {goog.ui.SliderBase.AnimationFactory}
*/
function AnimationFactory(testAnimations) {
this.testAnimations = testAnimations
}
/** @override */
AnimationFactory.prototype.createAnimations = function() {
return this.testAnimations
}
function setUp() {
var sandBox = goog.dom.getElement("sandbox")
mockClock = new goog.testing.MockClock(true)
var oneThumbElem = goog.dom.createDom(
goog.dom.TagName.DIV,
{ id: "oneThumbSlider" },
goog.dom.createDom(goog.dom.TagName.SPAN, { id: "thumb" })
)
sandBox.appendChild(oneThumbElem)
oneThumbSlider = new OneThumbSlider()
oneThumbSlider.decorate(oneThumbElem)
oneChangeEventCount = 0
goog.events.listen(
oneThumbSlider,
goog.ui.Component.EventType.CHANGE,
function() {
oneChangeEventCount++
}
)
var twoThumbElem = goog.dom.createDom(
goog.dom.TagName.DIV,
{ id: "twoThumbSlider" },
goog.dom.createDom(goog.dom.TagName.DIV, { id: "rangeHighlight" }),
goog.dom.createDom(goog.dom.TagName.SPAN, { id: "valueThumb" }),
goog.dom.createDom(goog.dom.TagName.SPAN, { id: "extentThumb" })
)
sandBox.appendChild(twoThumbElem)
twoThumbSlider = new TwoThumbSlider()
twoThumbSlider.decorate(twoThumbElem)
twoChangeEventCount = 0
goog.events.listen(
twoThumbSlider,
goog.ui.Component.EventType.CHANGE,
function() {
twoChangeEventCount++
}
)
var sandBoxRtl = goog.dom.createDom(goog.dom.TagName.DIV, {
dir: "rtl",
style: "position:absolute;"
})
sandBox.appendChild(sandBoxRtl)
var oneThumbElemRtl = goog.dom.createDom(
goog.dom.TagName.DIV,
{ id: "oneThumbSliderRtl" },
goog.dom.createDom(goog.dom.TagName.SPAN, { id: "thumbRtl" })
)
sandBoxRtl.appendChild(oneThumbElemRtl)
oneThumbSliderRtl = new OneThumbSlider(true /* testOnlyIsRightToLeft */)
oneThumbSliderRtl.enableFlipForRtl(true)
oneThumbSliderRtl.decorate(oneThumbElemRtl)
goog.events.listen(
oneThumbSliderRtl,
goog.ui.Component.EventType.CHANGE,
function() {
oneChangeEventCount++
}
)
var twoThumbElemRtl = goog.dom.createDom(
goog.dom.TagName.DIV,
{ id: "twoThumbSliderRtl" },
goog.dom.createDom(goog.dom.TagName.DIV, { id: "rangeHighlightRtl" }),
goog.dom.createDom(goog.dom.TagName.SPAN, { id: "valueThumbRtl" }),
goog.dom.createDom(goog.dom.TagName.SPAN, { id: "extentThumbRtl" })
)
sandBoxRtl.appendChild(twoThumbElemRtl)
twoThumbSliderRtl = new TwoThumbSlider(true /* testOnlyIsRightToLeft */)
twoThumbSliderRtl.enableFlipForRtl(true)
twoThumbSliderRtl.decorate(twoThumbElemRtl)
twoChangeEventCount = 0
goog.events.listen(
twoThumbSliderRtl,
goog.ui.Component.EventType.CHANGE,
function() {
twoChangeEventCount++
}
)
}
function tearDown() {
oneThumbSlider.dispose()
twoThumbSlider.dispose()
oneThumbSliderRtl.dispose()
twoThumbSliderRtl.dispose()
mockClock.dispose()
goog.dom.removeChildren(goog.dom.getElement("sandbox"))
}
function testGetAndSetValue() {
oneThumbSlider.setValue(30)
assertEquals(30, oneThumbSlider.getValue())
assertEquals(
"Setting valid value must dispatch only a single change event.",
1,
oneChangeEventCount
)
oneThumbSlider.setValue(30)
assertEquals(30, oneThumbSlider.getValue())
assertEquals(
"Setting to same value must not dispatch change event.",
1,
oneChangeEventCount
)
oneThumbSlider.setValue(-30)
assertEquals(
"Setting invalid value must not change value.",
30,
oneThumbSlider.getValue()
)
assertEquals(
"Setting invalid value must not dispatch change event.",
1,
oneChangeEventCount
)
// Value thumb can't go past extent thumb, so we must move that first to
// allow setting value.
twoThumbSlider.setExtent(70)
twoChangeEventCount = 0
twoThumbSlider.setValue(60)
assertEquals(60, twoThumbSlider.getValue())
assertEquals(
"Setting valid value must dispatch only a single change event.",
1,
twoChangeEventCount
)
twoThumbSlider.setValue(60)
assertEquals(60, twoThumbSlider.getValue())
assertEquals(
"Setting to same value must not dispatch change event.",
1,
twoChangeEventCount
)
twoThumbSlider.setValue(-60)
assertEquals(
"Setting invalid value must not change value.",
60,
twoThumbSlider.getValue()
)
assertEquals(
"Setting invalid value must not dispatch change event.",
1,
twoChangeEventCount
)
}
function testGetAndSetValueRtl() {
var thumbElement = goog.dom.getElement("thumbRtl")
assertEquals(0, goog.style.bidi.getOffsetStart(thumbElement))
assertEquals("", thumbElement.style.left)
assertEquals("0px", thumbElement.style.right)
oneThumbSliderRtl.setValue(30)
assertEquals(30, oneThumbSliderRtl.getValue())
assertEquals(
"Setting valid value must dispatch only a single change event.",
1,
oneChangeEventCount
)
assertEquals("", thumbElement.style.left)
assertEquals("294px", thumbElement.style.right)
oneThumbSliderRtl.setValue(30)
assertEquals(30, oneThumbSliderRtl.getValue())
assertEquals(
"Setting to same value must not dispatch change event.",
1,
oneChangeEventCount
)
oneThumbSliderRtl.setValue(-30)
assertEquals(
"Setting invalid value must not change value.",
30,
oneThumbSliderRtl.getValue()
)
assertEquals(
"Setting invalid value must not dispatch change event.",
1,
oneChangeEventCount
)
// Value thumb can't go past extent thumb, so we must move that first to
// allow setting value.
var valueThumbElement = goog.dom.getElement("valueThumbRtl")
var extentThumbElement = goog.dom.getElement("extentThumbRtl")
assertEquals(0, goog.style.bidi.getOffsetStart(valueThumbElement))
assertEquals(0, goog.style.bidi.getOffsetStart(extentThumbElement))
assertEquals("", valueThumbElement.style.left)
assertEquals("0px", valueThumbElement.style.right)
assertEquals("", extentThumbElement.style.left)
assertEquals("0px", extentThumbElement.style.right)
twoThumbSliderRtl.setExtent(70)
twoChangeEventCount = 0
twoThumbSliderRtl.setValue(60)
assertEquals(60, twoThumbSliderRtl.getValue())
assertEquals(
"Setting valid value must dispatch only a single change event.",
1,
twoChangeEventCount
) | assertEquals(60, twoThumbSliderRtl.getValue())
assertEquals(
"Setting to same value must not dispatch change event.",
1,
twoChangeEventCount
)
assertEquals("", valueThumbElement.style.left)
assertEquals("600px", valueThumbElement.style.right)
assertEquals("", extentThumbElement.style.left)
assertEquals("700px", extentThumbElement.style.right)
twoThumbSliderRtl.setValue(-60)
assertEquals(
"Setting invalid value must not change value.",
60,
twoThumbSliderRtl.getValue()
)
assertEquals(
"Setting invalid value must not dispatch change event.",
1,
twoChangeEventCount
)
}
function testGetAndSetExtent() {
// Note(user): With a one thumb slider the API only really makes sense if you
// always use setValue since there is no extent.
twoThumbSlider.setExtent(7)
assertEquals(7, twoThumbSlider.getExtent())
assertEquals(
"Setting valid value must dispatch only a single change event.",
1,
twoChangeEventCount
)
twoThumbSlider.setExtent(7)
assertEquals(7, twoThumbSlider.getExtent())
assertEquals(
"Setting to same value must not dispatch change event.",
1,
twoChangeEventCount
)
twoThumbSlider.setExtent(-7)
assertEquals(
"Setting invalid value must not change value.",
7,
twoThumbSlider.getExtent()
)
assertEquals(
"Setting invalid value must not dispatch change event.",
1,
twoChangeEventCount
)
}
function testUpdateValueExtent() {
twoThumbSlider.setValueAndExtent(30, 50)
assertNotNull(twoThumbSlider.getElement())
assertEquals(
"Setting value results in updating aria-valuenow",
"30",
goog.a11y.aria.getState(
twoThumbSlider.getElement(),
goog.a11y.aria.State.VALUENOW
)
)
assertEquals(30, twoThumbSlider.getValue())
assertEquals(50, twoThumbSlider.getExtent())
}
function testValueText() {
oneThumbSlider.setValue(10)
assertEquals(
"Setting value results in correct aria-valuetext",
"A big value.",
goog.a11y.aria.getState(
oneThumbSlider.getElement(),
goog.a11y.aria.State.VALUETEXT
)
)
oneThumbSlider.setValue(2)
assertEquals(
"Updating value results in updated aria-valuetext",
"A small value.",
goog.a11y.aria.getState(
oneThumbSlider.getElement(),
goog.a11y.aria.State.VALUETEXT
)
)
}
function testGetValueText() {
oneThumbSlider.setValue(10)
assertEquals(
"Getting the text value gets the correct description",
"A big value.",
oneThumbSlider.getTextValue()
)
oneThumbSlider.setValue(2)
assertEquals(
"Getting the updated text value gets the correct updated description",
"A small value.",
oneThumbSlider.getTextValue()
)
}
function testRangeListener() {
var slider = new goog.ui.SliderBase()
slider.updateUi_ = slider.updateAriaStates = function() {}
slider.rangeModel.setValue(0)
var f = goog.testing.recordFunction()
goog.events.listen(slider, goog.ui.Component.EventType.CHANGE, f)
slider.rangeModel.setValue(50)
assertEquals(1, f.getCallCount())
slider.exitDocument()
slider.rangeModel.setValue(0)
assertEquals(
"The range model listener should not have been removed so we " +
"should have gotten a second event dispatch",
2,
f.getCallCount()
)
}
/**
* Verifies that rangeHighlight position and size are correct for the given
* startValue and endValue. Assumes slider has default min/max values [0, 100],
* width of 1020px, and thumb widths of 20px, with rangeHighlight drawn from
* the centers of the thumbs.
* @param {number} rangeHighlight The range highlight.
* @param {number} startValue The start value.
* @param {number} endValue The end value.
*/
function assertHighlightedRange(rangeHighlight, startValue, endValue) {
var rangeStr = "[" + startValue + ", " + endValue + "]"
var rangeStart = 10 + 10 * startValue
assertEquals(
"Range highlight for " +
rangeStr +
" should start at " +
rangeStart +
"px.",
rangeStart,
rangeHighlight.offsetLeft
)
var rangeSize = 10 * (endValue - startValue)
assertEquals(
"Range highlight for " +
rangeStr +
" should have size " +
rangeSize +
"px.",
rangeSize,
rangeHighlight.offsetWidth
)
}
function testKeyHandlingTests() {
twoThumbSlider.setValue(0)
twoThumbSlider.setExtent(100)
assertEquals(0, twoThumbSlider.getValue())
assertEquals(100, twoThumbSlider.getExtent())
goog.testing.events.fireKeySequence(
twoThumbSlider.getElement(),
goog.events.KeyCodes.RIGHT
)
assertEquals(1, twoThumbSlider.getValue())
assertEquals(99, twoThumbSlider.getExtent())
goog.testing.events.fireKeySequence(
twoThumbSlider.getElement(),
goog.events.KeyCodes.RIGHT
)
assertEquals(2, twoThumbSlider.getValue())
assertEquals(98, twoThumbSlider.getExtent())
goog.testing.events.fireKeySequence(
twoThumbSlider.getElement(),
goog.events.KeyCodes.LEFT
)
assertEquals(1, twoThumbSlider.getValue())
assertEquals(98, twoThumbSlider.getExtent())
goog.testing.events.fireKeySequence(
twoThumbSlider.getElement(),
goog.events.KeyCodes.LEFT
)
assertEquals(0, twoThumbSlider.getValue())
assertEquals(98, twoThumbSlider.getExtent())
goog.testing.events.fireKeySequence(
twoThumbSlider.getElement(),
goog.events.KeyCodes.RIGHT,
{ shiftKey: true }
)
assertEquals(10, twoThumbSlider.getValue())
assertEquals(90, twoThumbSlider.getExtent())
goog.testing.events.fireKeySequence(
twoThumbSlider.getElement(),
goog.events.KeyCodes.RIGHT,
{ shiftKey: true }
)
assertEquals(20, twoThumbSlider.getValue())
assertEquals(80, twoThumbSlider.getExtent())
goog.testing.events.fireKeySequence(
twoThumbSlider.getElement(),
goog.events.KeyCodes.LEFT,
{ shiftKey: true }
)
assertEquals(10, twoThumbSlider.getValue())
assertEquals(80, twoThumbSlider.getExtent())
goog.testing.events.fireKeySequence(
twoThumbSlider.getElement(),
goog.events.KeyCodes.LEFT,
{ shiftKey: true }
)
assertEquals(0, twoThumbSlider.getValue())
assertEquals(80, twoThumbSlider.getExtent())
}
function testKeyHandlingLargeStepSize() {
twoThumbSlider.setValue(0)
twoThumbSlider.setExtent(100)
twoThumbSlider.setStep(5)
assertEquals(0, twoThumbSlider.getValue())
assertEquals(100, twoThumbSlider.getExtent())
goog.testing.events.fireKeySequence(
twoThumbSlider.getElement(),
goog.events.KeyCodes.RIGHT
)
assertEquals(5, twoThumbSlider.getValue())
assertEquals(95, twoThumbSlider.getExtent())
goog.testing.events.fireKeySequence(
twoThumbSlider.getElement(),
goog.events.KeyCodes.RIGHT
)
assertEquals(10, twoThumbSlider.getValue())
assertEquals(90, twoThumbSlider.getExtent())
goog.testing.events.fireKeySequence(
twoThumbSlider.getElement(),
goog.events.KeyCodes.LEFT
)
assertEquals(5, twoThumbSlider.getValue())
assertEquals(90, twoThumbSlider.getExtent())
goog.testing.events.fireKeySequence(
twoThumbSlider.getElement(),
goog.events.KeyCodes.LEFT
)
assertEquals(0, twoThumbSlider.getValue())
assertEquals(90, twoThumbSlider.getExtent())
}
function testKeyHandlingRtl() {
twoThumbSliderRtl.setValue(0)
twoThumbSliderRtl.setExtent(100)
assertEquals(0, twoThumbSliderRtl.getValue())
assertEquals(100, twoThumbSliderRtl.getExtent())
goog.testing.events.fireKeySequence(
twoThumbSliderRtl.getElement(),
goog.events.KeyCodes.RIGHT
)
assertEquals(0, twoThumbSliderRtl.getValue())
assertEquals(99, twoThumbSliderRtl.getExtent())
goog.testing.events.fireKeySequence(
twoThumbSliderRtl.getElement(),
goog.events.KeyCodes.RIGHT
)
assertEquals(0, twoThumbSliderRtl.getValue())
assertEquals(98, twoThumbSliderRtl.getExtent())
goog.testing.events.fireKeySequence(
twoThumbSliderRtl.getElement(),
goog.events.KeyCodes.LEFT
)
assertEquals(1, twoThumbSliderRtl.getValue())
assertEquals(98, twoThumbSliderRtl.getExtent())
goog.testing.events.fireKeySequence(
twoThumbSliderRtl.getElement(),
goog.events.KeyCodes.LEFT
)
assertEquals(2, twoThumbSliderRtl.getValue())
assertEquals(98, twoThumbSliderRtl.getExtent())
goog.testing.events.fireKeySequence(
twoThumbSliderRtl.getElement(),
goog.events.KeyCodes.RIGHT,
{ shiftKey: true }
)
assertEquals(0, twoThumbSliderRtl.getValue())
assertEquals(90, twoThumbSliderRtl.getExtent())
goog.testing.events.fireKeySequence(
twoThumbSliderRtl.getElement(),
goog.events.KeyCodes.RIGHT,
{ shiftKey: true }
)
assertEquals(0, twoThumbSliderRtl.getValue())
assertEquals(80, twoThumbSliderRtl.getExtent())
goog.testing.events.fireKeySequence(
twoThumbSliderRtl.getElement(),
goog.events.KeyCodes.LEFT,
{ shiftKey: true }
)
assertEquals(10, twoThumbSliderRtl.getValue())
assertEquals(80, twoThumbSliderRtl.getExtent())
goog.testing.events.fireKeySequence(
twoThumbSliderRtl.getElement(),
goog.events.KeyCodes.LEFT,
{ shiftKey: true }
)
assertEquals(20, twoThumbSliderRtl.getValue())
assertEquals(80, twoThumbSliderRtl.getExtent())
}
function testRangeHighlight() {
var rangeHighlight = goog.dom.getElement("rangeHighlight")
// Test [0, 100]
twoThumbSlider.setValue(0)
twoThumbSlider.setExtent(100)
assertHighlightedRange(rangeHighlight, 0, 100)
// Test [25, 75]
twoThumbSlider.setValue(25)
twoThumbSlider.setExtent(50)
assertHighlightedRange(rangeHighlight, 25, 75)
// Test [50, 50]
twoThumbSlider.setValue(50)
twoThumbSlider.setExtent(0)
assertHighlightedRange(rangeHighlight, 50, 50)
}
function testRangeHighlightAnimation() {
var animationDelay = 160 // Delay in ms, is a bit higher than actual delay.
if (goog.userAgent.IE) {
// For some reason, (probably due to how timing works), IE7 and IE8 will not
// stop if we don't wait for it.
animationDelay = 250
}
var rangeHighlight = goog.dom.getElement("rangeHighlight")
twoThumbSlider.setValue(0)
twoThumbSlider.setExtent(100)
// Animate right thumb, final range is [0, 75]
twoThumbSlider.animatedSetValue(75)
assertHighlightedRange(rangeHighlight, 0, 100)
mockClock.tick(animationDelay)
assertHighlightedRange(rangeHighlight, 0, 75)
// Animate left thumb, final range is [25, 75]
twoThumbSlider.animatedSetValue(25)
assertHighlightedRange(rangeHighlight, 0, 75)
mockClock.tick(animationDelay)
assertHighlightedRange(rangeHighlight, 25, 75)
}
/**
* Verifies that no error occurs and that the range highlight is sized correctly
* for a zero-size slider (i.e. doesn't attempt to set a negative size). The
* test tries to resize the slider from its original size to 0, then checks
* that the range highlight's size is correctly set to 0.
*
* The size verification is needed because Webkit/Gecko outright ignore calls
* to set negative sizes on an element, leaving it at its former size. IE
* throws an error in the same situation.
*/
function testRangeHighlightForZeroSizeSlider() {
// Make sure range highlight spans whole slider before zeroing width.
twoThumbSlider.setExtent(100)
twoThumbSlider.getElement().style.width = 0
// The setVisible call is used to force a UI update.
twoThumbSlider.setVisible(true)
assertEquals(
"Range highlight size should be 0 when slider size is 0",
0,
goog.dom.getElement("rangeHighlight").offsetWidth
)
}
function testAnimatedSetValueAnimatesFactoryCreatedAnimations() {
// Create and set the factory.
var ignore = goog.testing.mockmatchers.ignoreArgument
var mockControl = new goog.testing.MockControl()
var mockAnimation1 = mockControl.createLooseMock(goog.fx.Animation)
var mockAnimation2 = mockControl.createLooseMock(goog.fx.Animation)
var testAnimations = [mockAnimation1, mockAnimation2]
oneThumbSlider.setAdditionalAnimations(new AnimationFactory(testAnimations))
// Expect the animations to be played.
mockAnimation1.play(false)
mockAnimation2.play(false)
mockAnimation1.addEventListener(ignore, ignore, ignore)
mockAnimation2.addEventListener(ignore, ignore, ignore)
// Animate and verify.
mockControl.$replayAll()
oneThumbSlider.animatedSetValue(50)
mockControl.$verifyAll()
mockControl.$resetAll()
mockControl.$tearDown()
}
function testMouseWheelEventHandlerEnable() {
// Mouse wheel handling should be enabled by default.
assertTrue(oneThumbSlider.isHandleMouseWheel())
// Test disabling the mouse wheel handler
oneThumbSlider.setHandleMouseWheel(false)
assertFalse(oneThumbSlider.isHandleMouseWheel())
// Test that enabling again works fine.
oneThumbSlider.setHandleMouseWheel(true)
assertTrue(oneThumbSlider.isHandleMouseWheel())
// Test that mouse wheel handling can be disabled before rendering a slider.
var wheelDisabledElem = goog.dom.createDom(
goog.dom.TagName.DIV,
{},
goog.dom.createDom(goog.dom.TagName.SPAN)
)
var wheelDisabledSlider = new OneThumbSlider()
wheelDisabledSlider.setHandleMouseWheel(false)
wheelDisabledSlider.decorate(wheelDisabledElem)
assertFalse(wheelDisabledSlider.isHandleMouseWheel())
}
function testDisabledAndEnabledSlider() {
// Check that a slider is enabled by default
assertTrue(oneThumbSlider.isEnabled())
var listenerCount = oneThumbSlider.getHandler().getListenerCount()
// Disable the slider and check its state
oneThumbSlider.setEnabled(false)
assertFalse(oneThumbSlider.isEnabled())
assertTrue(
goog.dom.classlist.contains(
oneThumbSlider.getElement(),
"goog-slider-disabled"
)
)
assertEquals(0, oneThumbSlider.getHandler().getListenerCount())
// setValue should work unaffected even when the slider is disabled.
oneThumbSlider.setValue(30)
assertEquals(30, oneThumbSlider.getValue())
assertEquals(
"Setting valid value must dispatch a change event " +
"even when slider is disabled.",
1,
oneChangeEventCount
)
// Test the transition from disabled to enabled
oneThumbSlider.setEnabled(true)
assertTrue(oneThumbSlider.isEnabled())
assertFalse(
goog.dom.classlist.contains(
oneThumbSlider.getElement(),
"goog-slider-disabled"
)
)
assertTrue(listenerCount == oneThumbSlider.getHandler().getListenerCount())
}
function testBlockIncrementingWithEnableAndDisabled() {
var doc = goog.dom.getOwnerDocument(oneThumbSlider.getElement())
// Case when slider is not disabled between the mouse down and up events.
goog.testing.events.fireMouseDownEvent(oneThumbSlider.getElement())
assertEquals(
1,
goog.events.getListeners(
oneThumbSlider.getElement(),
goog.events.EventType.MOUSEMOVE,
false
).length
)
assertEquals(
1,
goog.events.getListeners(doc, goog.events.EventType.MOUSEUP, true).length
)
goog.testing.events.fireMouseUpEvent(oneThumbSlider.getElement())
assertEquals(
0,
goog.events.getListeners(
oneThumbSlider.getElement(),
goog.events.EventType.MOUSEMOVE,
false
).length
)
assertEquals(
0,
goog.events.getListeners(doc, goog.events.EventType.MOUSEUP, true).length
)
// Case when the slider is disabled between the mouse down and up events.
goog.testing.events.fireMouseDownEvent(oneThumbSlider.getElement())
assertEquals(
1,
goog.events.getListeners(
oneThumbSlider.getElement(),
goog.events.EventType.MOUSEMOVE,
false
).length
)
assertEquals(
1,
goog.events.getListeners(doc, goog.events.EventType.MOUSEUP, true).length
)
oneThumbSlider.setEnabled(false)
assertEquals(
0,
goog.events.getListeners(
oneThumbSlider.getElement(),
goog.events.EventType.MOUSEMOVE,
false
).length
)
assertEquals(
0,
goog.events.getListeners(doc, goog.events.EventType.MOUSEUP, true).length
)
assertEquals(1, oneThumbSlider.getHandler().getListenerCount())
goog.testing.events.fireMouseUpEvent(oneThumbSlider.getElement())
assertEquals(
0,
goog.events.getListeners(
oneThumbSlider.getElement(),
goog.events.EventType.MOUSEMOVE,
false
).length
)
assertEquals(
0,
goog.events.getListeners(doc, goog.events.EventType.MOUSEUP, true).length
)
}
function testMouseClickWithMoveToPointEnabled() {
var stepSize = 20
oneThumbSlider.setStep(stepSize)
oneThumbSlider.setMoveToPointEnabled(true)
var initialValue = oneThumbSlider.getValue()
// Figure out the number of pixels per step.
var numSteps = Math.round(
(oneThumbSlider.getMaximum() - oneThumbSlider.getMinimum()) / stepSize
)
var size = goog.style.getSize(oneThumbSlider.getElement())
var pixelsPerStep = Math.round(size.width / numSteps)
var coords = goog.style.getClientPosition(oneThumbSlider.getElement())
coords.x += pixelsPerStep / 2
// Case when value is increased
goog.testing.events.fireClickSequence(
oneThumbSlider.getElement(),
/* opt_button */ undefined,
coords
)
assertEquals(oneThumbSlider.getValue(), initialValue + stepSize)
// Case when value is decreased
goog.testing.events.fireClickSequence(
oneThumbSlider.getElement(),
/* opt_button */ undefined,
coords
)
assertEquals(oneThumbSlider.getValue(), initialValue)
// Case when thumb is clicked
goog.testing.events.fireClickSequence(oneThumbSlider.getElement())
assertEquals(oneThumbSlider.getValue(), initialValue)
}
function testNonIntegerStepSize() {
var stepSize = 0.02
oneThumbSlider.setStep(stepSize)
oneThumbSlider.setMinimum(-1)
oneThumbSlider.setMaximum(1)
oneThumbSlider.setValue(0.7)
assertRoughlyEquals(0.7, oneThumbSlider.getValue(), 0.000001)
oneThumbSlider.setValue(0.3)
assertRoughlyEquals(0.3, oneThumbSlider.getValue(), 0.000001)
}
function testSingleThumbSliderHasZeroExtent() {
var stepSize = 0.02
oneThumbSlider.setStep(stepSize)
oneThumbSlider.setMinimum(-1)
oneThumbSlider.setMaximum(1)
oneThumbSlider.setValue(0.7)
assertEquals(0, oneThumbSlider.getExtent())
oneThumbSlider.setValue(0.3)
assertEquals(0, oneThumbSlider.getExtent())
}
/**
* Tests getThumbCoordinateForValue method.
*/
function testThumbCoordinateForValueWithHorizontalSlider() {
// Make sure the y-coordinate stays the same for the horizontal slider.
var originalY = goog.style.getPosition(oneThumbSlider.valueThumb).y
var width =
oneThumbSlider.getElement().clientWidth -
oneThumbSlider.valueThumb.offsetWidth
var range = oneThumbSlider.getMaximum() - oneThumbSlider.getMinimum()
// Verify coordinate for a particular value.
var value = 20
var expectedX = Math.round((value / range) * width)
var expectedCoord = new goog.math.Coordinate(expectedX, originalY)
var coord = oneThumbSlider.getThumbCoordinateForValue(value)
assertObjectEquals(expectedCoord, coord)
// Verify this works regardless of current position.
oneThumbSlider.setValue(value / 2)
coord = oneThumbSlider.getThumbCoordinateForValue(value)
assertObjectEquals(expectedCoord, coord)
}
function testThumbCoordinateForValueWithVerticalSlider() {
// Make sure the x-coordinate stays the same for the vertical slider.
oneThumbSlider.setOrientation(goog.ui.SliderBase.Orientation.VERTICAL)
var originalX = goog.style.getPosition(oneThumbSlider.valueThumb).x
var height =
oneThumbSlider.getElement().clientHeight -
oneThumbSlider.valueThumb.offsetHeight
var range = oneThumbSlider.getMaximum() - oneThumbSlider.getMinimum()
// Verify coordinate for a particular value.
var value = 20
var expectedY = height - Math.round((value / range) * height)
var expectedCoord = new goog.math.Coordinate(originalX, expectedY)
var coord = oneThumbSlider.getThumbCoordinateForValue(value)
assertObjectEquals(expectedCoord, coord)
// Verify this works regardless of current position.
oneThumbSlider.setValue(value / 2)
coord = oneThumbSlider.getThumbCoordinateForValue(value)
assertObjectEquals(expectedCoord, coord)
}
/**
* Tests getValueFromMousePosition method.
*/
function testValueFromMousePosition() {
var value = 30
oneThumbSlider.setValue(value)
var offset = goog.style.getPageOffset(oneThumbSlider.valueThumb)
var size = goog.style.getSize(oneThumbSlider.valueThumb)
offset.x += size.width / 2
offset.y += size.height / 2
var e = null
goog.events.listen(oneThumbSlider, goog.events.EventType.MOUSEMOVE, function(
evt
) {
e = evt
})
goog.testing.events.fireMouseMoveEvent(oneThumbSlider, offset)
assertNotEquals(e, null)
assertRoughlyEquals(
value,
Math.round(oneThumbSlider.getValueFromMousePosition(e)),
1
)
// Verify this works regardless of current position.
oneThumbSlider.setValue(value / 2)
assertRoughlyEquals(
value,
Math.round(oneThumbSlider.getValueFromMousePosition(e)),
1
)
}
/**
* Tests ignoring click event after mousedown event.
*/
function testClickAfterMousedown() {
// Get the center of the thumb at value zero.
oneThumbSlider.setValue(0)
var offset = goog.style.getPageOffset(oneThumbSlider.valueThumb)
var size = goog.style.getSize(oneThumbSlider.valueThumb)
offset.x += size.width / 2
offset.y += size.height / 2
var sliderElement = oneThumbSlider.getElement()
var width = sliderElement.clientWidth - size.width
var range = oneThumbSlider.getMaximum() - oneThumbSlider.getMinimum()
var offsetXAtZero = offset.x
// Temporarily control time.
var theTime = goog.now()
var saveGoogNow = goog.now
goog.now = function() {
return theTime
}
// set coordinate for a particular value.
var valueOne = 10
offset.x = offsetXAtZero + Math.round((valueOne / range) * width)
goog.testing.events.fireMouseDownEvent(sliderElement, null, offset)
assertEquals(valueOne, oneThumbSlider.getValue())
// Verify a click event with another value that follows quickly is ignored.
theTime += oneThumbSlider.MOUSE_DOWN_DELAY_ / 2
var valueTwo = 20
offset.x = offsetXAtZero + Math.round((valueTwo / range) * width)
goog.testing.events.fireClickEvent(sliderElement, null, offset)
assertEquals(valueOne, oneThumbSlider.getValue())
// Verify a click later in time does move the thumb.
theTime += oneThumbSlider.MOUSE_DOWN_DELAY_
goog.testing.events.fireClickEvent(sliderElement, null, offset)
assertEquals(valueTwo, oneThumbSlider.getValue())
goog.now = saveGoogNow
}
/**
* Tests dragging events.
*/
function testDragEvents() {
var offset = goog.style.getPageOffset(oneThumbSlider.valueThumb)
var size = goog.style.getSize(oneThumbSlider.valueThumb)
offset.x += size.width / 2
offset.y += size.height / 2
var event_types = []
var handler = function(evt) {
event_types.push(evt.type)
}
goog.events.listen(
oneThumbSlider,
[
goog.ui.SliderBase.EventType.DRAG_START,
goog.ui.SliderBase.EventType.DRAG_END,
goog.ui.SliderBase.EventType.DRAG_VALUE_START,
goog.ui.SliderBase.EventType.DRAG_VALUE_END,
goog.ui.SliderBase.EventType.DRAG_EXTENT_START,
goog.ui.SliderBase.EventType.DRAG_EXTENT_END,
goog.ui.Component.EventType.CHANGE
],
handler
)
// Since the order of the events between value and extent is not guaranteed
// across browsers, we need to allow for both here and once we have
// them all, make sure that they were different.
function isValueOrExtentDragStart(type) {
return (
type == goog.ui.SliderBase.EventType.DRAG_VALUE_START ||
type == goog.ui.SliderBase.EventType.DRAG_EXTENT_START
)
}
function isValueOrExtentDragEnd(type) {
return (
type == goog.ui.SliderBase.EventType.DRAG_VALUE_END ||
type == goog.ui.SliderBase.EventType.DRAG_EXTENT_END
)
}
// Test that dragging the thumb calls all the correct events.
goog.testing.events.fireMouseDownEvent(oneThumbSlider.valueThumb)
offset.x += 100
goog.testing.events.fireMouseMoveEvent(oneThumbSlider.valueThumb, offset)
goog.testing.events.fireMouseUpEvent(oneThumbSlider.valueThumb)
assertEquals(9, event_types.length)
assertEquals(goog.ui.SliderBase.EventType.DRAG_START, event_types[0])
assertTrue(isValueOrExtentDragStart(event_types[1]))
assertEquals(goog.ui.SliderBase.EventType.DRAG_START, event_types[2])
assertTrue(isValueOrExtentDragStart(event_types[3]))
assertEquals(goog.ui.Component.EventType.CHANGE, event_types[4])
assertEquals(goog.ui.SliderBase.EventType.DRAG_END, event_types[5])
assertTrue(isValueOrExtentDragEnd(event_types[6]))
assertEquals(goog.ui.SliderBase.EventType.DRAG_END, event_types[7])
assertTrue(isValueOrExtentDragEnd(event_types[8]))
assertFalse(event_types[1] == event_types[3])
assertFalse(event_types[6] == event_types[8])
// Test that clicking the thumb without moving the mouse does not cause a
// CHANGE event between DRAG_START/DRAG_END.
event_types = []
goog.testing.events.fireMouseDownEvent(oneThumbSlider.valueThumb)
goog.testing.events.fireMouseUpEvent(oneThumbSlider.valueThumb)
assertEquals(8, event_types.length)
assertEquals(goog.ui.SliderBase.EventType.DRAG_START, event_types[0])
assertTrue(isValueOrExtentDragStart(event_types[1]))
assertEquals(goog.ui.SliderBase.EventType.DRAG_START, event_types[2])
assertTrue(isValueOrExtentDragStart(event_types[3]))
assertEquals(goog.ui.SliderBase.EventType.DRAG_END, event_types[4])
assertTrue(isValueOrExtentDragEnd(event_types[5]))
assertEquals(goog.ui.SliderBase.EventType.DRAG_END, event_types[6])
assertTrue(isValueOrExtentDragEnd(event_types[7]))
assertFalse(event_types[1] == event_types[3])
assertFalse(event_types[5] == event_types[7])
// Early listener removal, do not wait for tearDown, to avoid building up
// arrays of events unnecessarilly in further tests.
goog.events.removeAll(oneThumbSlider)
}
/**
* Tests dragging events updates the value correctly in LTR mode based on the
* amount of space remaining to the right of the thumb.
*/
function testDragEventsUpdatesValue() {
// Get the center of the thumb at minimum value.
oneThumbSlider.setMinimum(100)
oneThumbSlider.setMaximum(300)
oneThumbSlider.setValue(100)
// Need to set to (0, 0) in IE8 due to a browser bug where the
// offsetWidth/height is incorrectly calculated as 0 in test files.
var offset =
goog.userAgent.IE && !goog.userAgent.isVersionOrHigher("9")
? new goog.math.Coordinate()
: goog.style.getPageOffset(oneThumbSlider.valueThumb)
var offsetXAtZero = offset.x
var sliderElement = oneThumbSlider.getElementStrict()
var thumbSize = goog.style.getSize(oneThumbSlider.valueThumb)
var width = sliderElement.clientWidth - thumbSize.width
var range = oneThumbSlider.getMaximum() - oneThumbSlider.getMinimum()
// Test that dragging the thumb calls all the correct events.
goog.testing.events.fireMouseDownEvent(oneThumbSlider.valueThumb)
// Scroll to 30 in the range of 0-200. Given that this is LTR mode, that means
// the value will be 100 + 30 = 130.
offset.x = offsetXAtZero + Math.round((30 / range) * width)
goog.testing.events.fireMouseMoveEvent(oneThumbSlider.valueThumb, offset)
assertEquals(130, oneThumbSlider.getValue())
// Scroll to 70 in the range of 0-200. Given that this is LTR mode, that means
// the value will be 100 + 70 = 170.
offset.x = offsetXAtZero + Math.round((70 / range) * width)
goog.testing.events.fireMouseMoveEvent(oneThumbSlider.valueThumb, offset)
assertEquals(170, oneThumbSlider.getValue())
goog.testing.events.fireMouseUpEvent(oneThumbSlider.valueThumb)
}
/**
* Tests dragging events updates the value correctly in RTL mode based on the
* amount of space remaining to the left of the thumb.
*/
function testDragEventsInRtlModeUpdatesValue() {
// Get the center of the thumb at minimum value.
oneThumbSliderRtl.setMinimum(100)
oneThumbSliderRtl.setMaximum(300)
oneThumbSliderRtl.setValue(100)
var offset = goog.style.getPageOffset(oneThumbSliderRtl.valueThumb)
var offsetXAtZero = offset.x
// Extra half of the thumb width in IE8 due to a browser bug where the thumb
// offsetWidth is incorrectly calculated as 0 in test files.
var thumbSize = goog.style.getSize(oneThumbSliderRtl.valueThumb)
if (goog.userAgent.IE && !goog.userAgent.isVersionOrHigher("9")) {
offsetXAtZero += thumbSize.width / 2
}
var sliderElement = oneThumbSliderRtl.getElementStrict()
var width = sliderElement.clientWidth - thumbSize.width
var range = oneThumbSliderRtl.getMaximum() - oneThumbSliderRtl.getMinimum()
// Test that dragging the thumb calls all the correct events.
goog.testing.events.fireMouseDownEvent(oneThumbSliderRtl.valueThumb)
// Scroll to 30 in the range of 0-200. Given that this is RTL mode, that means
// the value will be 100 - (-30) = 130.
offset.x = offsetXAtZero - Math.round((30 / range) * width)
goog.testing.events.fireMouseMoveEvent(oneThumbSliderRtl.valueThumb, offset)
assertEquals(130, oneThumbSliderRtl.getValue())
// Scroll to 70 in the range of 0-200. Given that this is RTL mode, that means
// the value will be 100 - (-70) = 170.
offset.x = offsetXAtZero - Math.round((70 / range) * width)
goog.testing.events.fireMouseMoveEvent(oneThumbSliderRtl.valueThumb, offset)
assertEquals(170, oneThumbSliderRtl.getValue())
goog.testing.events.fireMouseUpEvent(oneThumbSliderRtl.valueThumb)
}
/**
* Tests animationend event after click.
*/
function testAnimationEndEventAfterClick() {
var offset = goog.style.getPageOffset(oneThumbSlider.valueThumb)
var size = goog.style.getSize(oneThumbSlider.valueThumb)
offset.x += size.width / 2
offset.y += size.height / 2
var event_types = []
var handler = function(evt) {
event_types.push(evt.type)
}
var animationDelay = 160 // Delay in ms, is a bit higher than actual delay.
if (goog.userAgent.IE) {
// For some reason, (probably due to how timing works), IE7 and IE8 will not
// stop if we don't wait for it.
animationDelay = 250
}
oneThumbSlider.setMoveToPointEnabled(true)
goog.events.listen(
oneThumbSlider,
goog.ui.SliderBase.EventType.ANIMATION_END,
handler
)
function isAnimationEndType(type) {
return type == goog.ui.SliderBase.EventType.ANIMATION_END
}
offset.x += 100
goog.testing.events.fireClickSequence(
oneThumbSlider.getElement(),
/* opt_button */ undefined,
offset
)
mockClock.tick(animationDelay)
assertEquals(1, event_types.length)
assertTrue(isAnimationEndType(event_types[0]))
goog.events.removeAll(oneThumbSlider)
}
/**
* Tests that focus will be on the top level element when clicking the slider if
* `focusElementOnSliderDrag` is true.
*/
function testFocusOnSliderAfterClickIfFocusElementOnSliderDragTrue() {
var sliderElement = oneThumbSlider.getElement()
var coords = goog.style.getClientPosition(sliderElement)
goog.testing.events.fireClickSequence(
sliderElement,
/* opt_button */ undefined,
coords
)
var activeElement = oneThumbSlider.getDomHelper().getActiveElement()
assertEquals(sliderElement, activeElement)
}
/**
* Tests that focus will not be on the top level element when clicking the
* slider if `focusElementOnSliderDrag` is false.
*/
function testFocusNotOnSliderAfterClickIfFocusElementOnSliderDragFalse() {
oneThumbSlider.setFocusElementOnSliderDrag(false)
var sliderElement = oneThumbSlider.getElement()
var coords = goog.style.getClientPosition(sliderElement)
goog.testing.events.fireClickSequence(
sliderElement,
/* opt_button */ undefined,
coords
)
var activeElement = oneThumbSlider.getDomHelper().getActiveElement()
assertNotEquals(sliderElement, activeElement)
} |
twoThumbSliderRtl.setValue(60) |
applicants.component.ts | import { IVwUserObj, IDTextViewModel, DataServiceProxy } from 'app/_services/service-proxies';
import { AuthenticationService } from 'app/_services/authentication.service';
import { ActivatedRoute } from '@angular/router';
import { Router } from '@angular/router';
import { RecruitmentJobApplicationServiceProxy, RecruitmentJobServiceProxy, JobApplication, JobDTO, RecruitmentSettingServiceProxy } from './../../../_services/service-proxies';
import { Component, OnInit } from '@angular/core';
@Component({
selector: 'ngx-applicants',
templateUrl: './applicants.component.html',
styleUrls: ['./applicants.component.scss']
})
export class ApplicantsComponent implements OnInit {
pageTitle: string = 'Recent Listings';
allJobsApplication: JobApplication [] = [];
allJobs:JobDTO [] = [];
jobsCounter: number = 0
loading: boolean = false;
noJobsHeader: string = 'There is no job at the moment';
noJobs: string = 'Please check back later';
applicantId:number = 0;
user: IVwUserObj;
employmentTypeData: IDTextViewModel [] = [];
jobLevelData: IDTextViewModel [] = [];
industryData: IDTextViewModel [] = [];
salaryData: IDTextViewModel [] = [];
isProfileComplete: boolean = true;
constructor(private jobService: RecruitmentJobApplicationServiceProxy, private job: RecruitmentJobServiceProxy, private dataservice: DataServiceProxy,
private router: Router, private route: ActivatedRoute, private authServ: AuthenticationService, private settings: RecruitmentSettingServiceProxy) { }
ngOnInit(): void {
this.applicantId = Number(this.route.snapshot.paramMap.get("id"));
this.fetchPostedJobs(); | myProfle() {
}
fetchPostedJobs(){
this.loading = true;
this.job.getAllActiveJobs(undefined,undefined,undefined,undefined,undefined,undefined,1,10).subscribe( data => {
this.loading = false;
if(!data.hasError){
this.allJobs = data.result;
this.jobsCounter = data.totalRecord;
}
});
}
jobDetails(id){
this.router.navigateByUrl('/recruitment/jobdetails/'+ id);
}
gotoProfile(){
this.router.navigateByUrl('/recruitment/profile/'+ this.applicantId);
}
async getLoggedInUser(){
this.authServ.getuser().then(async (users: IVwUserObj[])=> {
if (users) {
if (users.length > 0) {
this.user = users[0];
console.log('My user is here',this.user)
}
}
})
}
} | }
|
bitcoin_da.ts | <?xml version="1.0" ?><!DOCTYPE TS><TS language="da" version="2.0">
<context>
<name>AboutDialog</name>
<message>
<location filename="../forms/aboutdialog.ui" line="+14"/>
<source>About Mpaycoin</source>
<translation>Om Mpaycoin</translation>
</message>
<message>
<location line="+39"/>
<source><b>Mpaycoin</b> version</source>
<translation><b>Mpaycoin</b> version</translation>
</message>
<message>
<location line="+57"/>
<source>
This is experimental software.
Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php.
This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</source>
<translation>
Dette program er eksperimentelt.
Det er gjort tilgængeligt under MIT/X11-softwarelicensen. Se den medfølgende fil "COPYING" eller http://www.opensource.org/licenses/mit-license.php.
Produktet indeholder software som er udviklet af OpenSSL Project til brug i OpenSSL Toolkit (http://www.openssl.org/), kryptografisk software skrevet af Eric Young ([email protected]) og UPnP-software skrevet af Thomas Bernard.</translation>
</message>
<message>
<location filename="../aboutdialog.cpp" line="+14"/>
<source>Copyright</source>
<translation>Copyright</translation>
</message>
<message>
<location line="+0"/>
<source>The Mpaycoin developers</source>
<translation>Mpaycoin-udviklerne</translation>
</message>
</context>
<context>
<name>AddressBookPage</name>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>Address Book</source>
<translation>Adressebog</translation>
</message>
<message>
<location line="+19"/>
<source>Double-click to edit address or label</source>
<translation>Dobbeltklik for at redigere adresse eller mærkat</translation>
</message>
<message>
<location line="+27"/>
<source>Create a new address</source>
<translation>Opret en ny adresse</translation>
</message>
<message>
<location line="+14"/>
<source>Copy the currently selected address to the system clipboard</source>
<translation>Kopier den valgte adresse til systemets udklipsholder</translation>
</message>
<message>
<location line="-11"/>
<source>&New Address</source>
<translation>Ny adresse</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="+63"/>
<source>These are your Mpaycoin addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source>
<translation>Dette er dine Mpaycoin-adresser til at modtage betalinger med. Du kan give en forskellig adresse til hver afsender, så du kan holde styr på, hvem der betaler dig.</translation>
</message>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>&Copy Address</source>
<translation>Kopier adresse</translation>
</message>
<message>
<location line="+11"/>
<source>Show &QR Code</source>
<translation>Vis QR-kode</translation>
</message>
<message>
<location line="+11"/>
<source>Sign a message to prove you own a Mpaycoin address</source>
<translation>Underskriv en besked for at bevise, at en Mpaycoin-adresse tilhører dig</translation>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation>Underskriv besked</translation>
</message>
<message>
<location line="+25"/>
<source>Delete the currently selected address from the list</source>
<translation>Slet den markerede adresse fra listen</translation>
</message>
<message>
<location line="+27"/>
<source>Export the data in the current tab to a file</source>
<translation>Eksportér den aktuelle visning til en fil</translation>
</message>
<message>
<location line="+3"/>
<source>&Export</source>
<translation>Eksporter</translation>
</message>
<message>
<location line="-44"/>
<source>Verify a message to ensure it was signed with a specified Mpaycoin address</source>
<translation>Verificér en besked for at sikre, at den er underskrevet med den angivne Mpaycoin-adresse</translation>
</message>
<message>
<location line="+3"/>
<source>&Verify Message</source>
<translation>Verificér besked</translation>
</message>
<message>
<location line="+14"/>
<source>&Delete</source>
<translation>Slet</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="-5"/>
<source>These are your Mpaycoin addresses for sending payments. Always check the amount and the receiving address before sending coins.</source>
<translation>Disse er dine Mpaycoin-adresser for at sende betalinger. Tjek altid beløb og modtageradresse, inden du sender mpaycoins.</translation>
</message>
<message>
<location line="+13"/>
<source>Copy &Label</source>
<translation>Kopier mærkat</translation>
</message>
<message>
<location line="+1"/>
<source>&Edit</source>
<translation>Rediger</translation>
</message>
<message>
<location line="+1"/>
<source>Send &Coins</source>
<translation>Send mpaycoins</translation>
</message>
<message>
<location line="+265"/>
<source>Export Address Book Data</source>
<translation>Eksporter adressebogsdata</translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Kommasepareret fil (*.csv)</translation>
</message>
<message>
<location line="+13"/>
<source>Error exporting</source>
<translation>Fejl under eksport</translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation>Kunne ikke skrive til filen %1.</translation>
</message>
</context>
<context>
<name>AddressTableModel</name>
<message>
<location filename="../addresstablemodel.cpp" line="+144"/>
<source>Label</source>
<translation>Mærkat</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>Adresse</translation>
</message>
<message>
<location line="+36"/>
<source>(no label)</source>
<translation>(ingen mærkat)</translation>
</message>
</context>
<context>
<name>AskPassphraseDialog</name>
<message>
<location filename="../forms/askpassphrasedialog.ui" line="+26"/>
<source>Passphrase Dialog</source>
<translation>Adgangskodedialog</translation>
</message>
<message>
<location line="+21"/>
<source>Enter passphrase</source>
<translation>Indtast adgangskode</translation>
</message>
<message>
<location line="+14"/>
<source>New passphrase</source>
<translation>Ny adgangskode</translation>
</message>
<message>
<location line="+14"/>
<source>Repeat new passphrase</source>
<translation>Gentag ny adgangskode</translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="+33"/>
<source>Enter the new passphrase to the wallet.<br/>Please use a passphrase of <b>10 or more random characters</b>, or <b>eight or more words</b>.</source>
<translation>Indtast den nye adgangskode til tegnebogen.<br/>Brug venligst en adgangskode på <b>10 eller flere tilfældige tegn</b> eller <b>otte eller flere ord</b>.</translation>
</message>
<message>
<location line="+1"/>
<source>Encrypt wallet</source>
<translation>Krypter tegnebog</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to unlock the wallet.</source>
<translation>Denne funktion har brug for din tegnebogs adgangskode for at låse tegnebogen op.</translation>
</message>
<message>
<location line="+5"/>
<source>Unlock wallet</source>
<translation>Lås tegnebog op</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to decrypt the wallet.</source>
<translation>Denne funktion har brug for din tegnebogs adgangskode for at dekryptere tegnebogen.</translation>
</message>
<message>
<location line="+5"/>
<source>Decrypt wallet</source>
<translation>Dekrypter tegnebog</translation>
</message>
<message>
<location line="+3"/>
<source>Change passphrase</source>
<translation>Skift adgangskode</translation>
</message>
<message>
<location line="+1"/>
<source>Enter the old and new passphrase to the wallet.</source>
<translation>Indtast den gamle og den nye adgangskode til tegnebogen.</translation>
</message>
<message>
<location line="+46"/>
<source>Confirm wallet encryption</source>
<translation>Bekræft tegnebogskryptering</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: If you encrypt your wallet and lose your passphrase, you will <b>LOSE ALL OF YOUR BITCOINS</b>!</source>
<translation>Advarsel: Hvis du krypterer din tegnebog og mister din adgangskode, vil du <b>MISTE ALLE DINE BITCOINS</b>!</translation>
</message>
<message>
<location line="+0"/>
<source>Are you sure you wish to encrypt your wallet?</source>
<translation>Er du sikker på, at du ønsker at kryptere din tegnebog?</translation>
</message>
<message>
<location line="+15"/>
<source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source>
<translation>VIGTIGT: Enhver tidligere sikkerhedskopi, som du har lavet af tegnebogsfilen, bør blive erstattet af den nyligt genererede, krypterede tegnebogsfil. Af sikkerhedsmæssige årsager vil tidligere sikkerhedskopier af den ikke-krypterede tegnebogsfil blive ubrugelig i det øjeblik, du starter med at anvende den nye, krypterede tegnebog.</translation>
</message>
<message>
<location line="+100"/>
<location line="+24"/>
<source>Warning: The Caps Lock key is on!</source>
<translation>Advarsel: Caps Lock-tasten er aktiveret!</translation>
</message>
<message>
<location line="-130"/>
<location line="+58"/>
<source>Wallet encrypted</source>
<translation>Tegnebog krypteret</translation>
</message>
<message>
<location line="-56"/>
<source>Mpaycoin will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your mpaycoins from being stolen by malware infecting your computer.</source>
<translation>Mpaycoin vil nu lukke for at gennemføre krypteringsprocessen. Husk på, at kryptering af din tegnebog vil ikke beskytte dine mpaycoins fuldt ud mod at blive stjålet af malware på din computer.</translation>
</message>
<message>
<location line="+13"/>
<location line="+7"/>
<location line="+42"/>
<location line="+6"/>
<source>Wallet encryption failed</source>
<translation>Tegnebogskryptering mislykkedes</translation>
</message>
<message>
<location line="-54"/>
<source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source>
<translation>Tegnebogskryptering mislykkedes på grund af en intern fejl. Din tegnebog blev ikke krypteret.</translation>
</message>
<message>
<location line="+7"/>
<location line="+48"/>
<source>The supplied passphrases do not match.</source>
<translation>De angivne adgangskoder stemmer ikke overens.</translation>
</message>
<message>
<location line="-37"/>
<source>Wallet unlock failed</source>
<translation>Tegnebogsoplåsning mislykkedes</translation>
</message>
<message>
<location line="+1"/>
<location line="+11"/>
<location line="+19"/>
<source>The passphrase entered for the wallet decryption was incorrect.</source>
<translation>Den angivne adgangskode for tegnebogsdekrypteringen er forkert.</translation>
</message>
<message>
<location line="-20"/>
<source>Wallet decryption failed</source>
<translation>Tegnebogsdekryptering mislykkedes</translation>
</message>
<message>
<location line="+14"/>
<source>Wallet passphrase was successfully changed.</source>
<translation>Tegnebogens adgangskode blev ændret.</translation>
</message>
</context>
<context>
<name>MpaycoinGUI</name>
<message>
<location filename="../bitcoingui.cpp" line="+254"/>
<source>Sign &message...</source>
<translation>Underskriv besked...</translation>
</message>
<message>
<location line="+246"/>
<source>Synchronizing with network...</source>
<translation>Synkroniserer med netværk...</translation>
</message>
<message>
<location line="-321"/>
<source>&Overview</source>
<translation>Oversigt</translation>
</message>
<message>
<location line="+1"/>
<source>Show general overview of wallet</source>
<translation>Vis generel oversigt over tegnebog</translation>
</message>
<message>
<location line="+20"/>
<source>&Transactions</source>
<translation>Transaktioner</translation>
</message>
<message>
<location line="+1"/>
<source>Browse transaction history</source>
<translation>Gennemse transaktionshistorik</translation>
</message>
<message>
<location line="+7"/>
<source>Edit the list of stored addresses and labels</source>
<translation>Rediger listen over gemte adresser og mærkater</translation>
</message>
<message>
<location line="-14"/>
<source>Show the list of addresses for receiving payments</source>
<translation>Vis listen over adresser for at modtage betalinger</translation>
</message>
<message>
<location line="+31"/>
<source>E&xit</source>
<translation>Luk</translation>
</message>
<message>
<location line="+1"/>
<source>Quit application</source>
<translation>Afslut program</translation>
</message>
<message>
<location line="+7"/>
<source>Show information about Mpaycoin</source>
<translation>Vis informationer om Mpaycoin</translation>
</message>
<message>
<location line="+2"/>
<source>About &Qt</source>
<translation>Om Qt</translation>
</message>
<message>
<location line="+1"/>
<source>Show information about Qt</source>
<translation>Vis informationer om Qt</translation>
</message>
<message>
<location line="+2"/>
<source>&Options...</source>
<translation>Indstillinger...</translation>
</message>
<message>
<location line="+9"/>
<source>&Encrypt Wallet...</source>
<translation>Krypter tegnebog...</translation>
</message>
<message>
<location line="+3"/>
<source>&Backup Wallet...</source>
<translation>Sikkerhedskopier tegnebog...</translation>
</message>
<message>
<location line="+2"/>
<source>&Change Passphrase...</source>
<translation>Skift adgangskode...</translation>
</message>
<message>
<location line="+251"/>
<source>Importing blocks from disk...</source>
<translation>Importerer blokke fra disken...</translation>
</message>
<message>
<location line="+3"/>
<source>Reindexing blocks on disk...</source>
<translation>Genindekserer blokke på disken...</translation>
</message>
<message>
<location line="-319"/>
<source>Send coins to a Mpaycoin address</source>
<translation>Send mpaycoins til en Mpaycoin-adresse</translation>
</message>
<message>
<location line="+52"/>
<source>Modify configuration options for Mpaycoin</source>
<translation>Rediger konfigurationsindstillinger af Mpaycoin</translation>
</message>
<message>
<location line="+12"/>
<source>Backup wallet to another location</source>
<translation>Lav sikkerhedskopi af tegnebogen til et andet sted</translation>
</message>
<message>
<location line="+2"/>
<source>Change the passphrase used for wallet encryption</source>
<translation>Skift adgangskode anvendt til tegnebogskryptering</translation>
</message>
<message>
<location line="+6"/>
<source>&Debug window</source>
<translation>Fejlsøgningsvindue</translation>
</message>
<message>
<location line="+1"/>
<source>Open debugging and diagnostic console</source>
<translation>Åbn fejlsøgnings- og diagnosticeringskonsollen</translation>
</message>
<message>
<location line="-4"/>
<source>&Verify message...</source>
<translation>Verificér besked...</translation>
</message>
<message>
<location line="-183"/>
<location line="+6"/>
<location line="+508"/>
<source>Mpaycoin</source>
<translation>Mpaycoin</translation>
</message>
<message>
<location line="-514"/>
<location line="+6"/>
<source>Wallet</source>
<translation>Tegnebog</translation>
</message>
<message>
<location line="+107"/>
<source>&Send</source>
<translation>Send</translation>
</message>
<message>
<location line="+7"/>
<source>&Receive</source>
<translation>Modtag</translation>
</message>
<message>
<location line="+14"/>
<source>&Addresses</source>
<translation>Adresser</translation>
</message>
<message>
<location line="+23"/>
<location line="+2"/>
<source>&About Mpaycoin</source>
<translation>Om Mpaycoin</translation>
</message>
<message>
<location line="+10"/>
<location line="+2"/>
<source>&Show / Hide</source>
<translation>Vis / skjul</translation>
</message>
<message>
<location line="+1"/>
<source>Show or hide the main Window</source>
<translation>Vis eller skjul hovedvinduet</translation>
</message>
<message>
<location line="+3"/>
<source>Encrypt the private keys that belong to your wallet</source>
<translation>Krypter de private nøgler, der hører til din tegnebog</translation>
</message>
<message>
<location line="+7"/>
<source>Sign messages with your Mpaycoin addresses to prove you own them</source>
<translation>Underskriv beskeder med dine Mpaycoin-adresser for at bevise, at de tilhører dig</translation>
</message>
<message>
<location line="+2"/>
<source>Verify messages to ensure they were signed with specified Mpaycoin addresses</source>
<translation>Verificér beskeder for at sikre, at de er underskrevet med de(n) angivne Mpaycoin-adresse(r)</translation>
</message>
<message>
<location line="+28"/>
<source>&File</source>
<translation>Fil</translation>
</message>
<message>
<location line="+7"/>
<source>&Settings</source>
<translation>Indstillinger</translation>
</message>
<message>
<location line="+6"/>
<source>&Help</source>
<translation>Hjælp</translation>
</message>
<message>
<location line="+9"/>
<source>Tabs toolbar</source>
<translation>Faneværktøjslinje</translation>
</message>
<message>
<location line="-228"/>
<location line="+288"/>
<source>[testnet]</source>
<translation>[testnetværk]</translation>
</message>
<message>
<location line="-5"/>
<location line="+5"/>
<source>Mpaycoin client</source>
<translation>Mpaycoin-klient</translation>
</message>
<message numerus="yes">
<location line="+121"/>
<source>%n active connection(s) to Mpaycoin network</source>
<translation><numerusform>%n aktiv(e) forbindelse(r) til Mpaycoin-netværket</numerusform><numerusform>%n aktiv(e) forbindelse(r) til Mpaycoin-netværket</numerusform></translation>
</message>
<message>
<location line="+22"/>
<source>No block source available...</source>
<translation>Ingen blokkilde tilgængelig...</translation>
</message>
<message>
<location line="+12"/>
<source>Processed %1 of %2 (estimated) blocks of transaction history.</source>
<translation>Behandlet %1 ud af %2 (estimeret) blokke af transaktionshistorikken.</translation>
</message>
<message>
<location line="+4"/>
<source>Processed %1 blocks of transaction history.</source>
<translation>Behandlet %1 blokke af transaktionshistorikken.</translation>
</message>
<message numerus="yes">
<location line="+20"/>
<source>%n hour(s)</source>
<translation><numerusform>%n time(r)</numerusform><numerusform>%n time(r)</numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n day(s)</source>
<translation><numerusform>%n dag(e)</numerusform><numerusform>%n dag(e)</numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n week(s)</source>
<translation><numerusform>%n uge(r)</numerusform><numerusform>%n uge(r)</numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>%1 behind</source>
<translation>%1 bagud</translation>
</message>
<message>
<location line="+14"/>
<source>Last received block was generated %1 ago.</source>
<translation>Senest modtagne blok blev genereret for %1 siden.</translation>
</message>
<message>
<location line="+2"/>
<source>Transactions after this will not yet be visible.</source>
<translation>Transaktioner herefter vil endnu ikke være synlige.</translation>
</message>
<message>
<location line="+22"/>
<source>Error</source>
<translation>Fejl</translation>
</message>
<message>
<location line="+3"/>
<source>Warning</source>
<translation>Advarsel</translation>
</message>
<message>
<location line="+3"/>
<source>Information</source>
<translation>Information</translation>
</message>
<message>
<location line="+70"/>
<source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source>
<translation>Transaktionen overskrider størrelsesgrænsen. Du kan stadig sende den for et gebyr på %1, hvilket går til de knuder, der behandler din transaktion og hjælper med at understøtte netværket. Vil du betale gebyret?</translation>
</message>
<message>
<location line="-140"/>
<source>Up to date</source>
<translation>Opdateret</translation>
</message>
<message>
<location line="+31"/>
<source>Catching up...</source>
<translation>Indhenter...</translation>
</message>
<message>
<location line="+113"/>
<source>Confirm transaction fee</source>
<translation>Bekræft transaktionsgebyr</translation>
</message>
<message>
<location line="+8"/>
<source>Sent transaction</source>
<translation>Afsendt transaktion</translation>
</message>
<message>
<location line="+0"/>
<source>Incoming transaction</source>
<translation>Indgående transaktion</translation>
</message>
<message>
<location line="+1"/>
<source>Date: %1
Amount: %2
Type: %3
Address: %4
</source>
<translation>Dato: %1
Beløb: %2
Type: %3
Adresse: %4
</translation>
</message>
<message>
<location line="+33"/>
<location line="+23"/>
<source>URI handling</source>
<translation>URI-håndtering</translation>
</message>
<message>
<location line="-23"/>
<location line="+23"/>
<source>URI can not be parsed! This can be caused by an invalid Mpaycoin address or malformed URI parameters.</source>
<translation>URI kan ikke fortolkes! Dette kan skyldes en ugyldig Mpaycoin-adresse eller misdannede URI-parametre.</translation>
</message>
<message>
<location line="+17"/>
<source>Wallet is <b>encrypted</b> and currently <b>unlocked</b></source>
<translation>Tegnebog er <b>krypteret</b> og i øjeblikket <b>ulåst</b></translation>
</message>
<message>
<location line="+8"/>
<source>Wallet is <b>encrypted</b> and currently <b>locked</b></source>
<translation>Tegnebog er <b>krypteret</b> og i øjeblikket <b>låst</b></translation>
</message>
<message>
<location filename="../bitcoin.cpp" line="+110"/>
<source>A fatal error occurred. Mpaycoin can no longer continue safely and will quit.</source>
<translation>Der opstod en fatal fejl. Mpaycoin kan ikke længere fortsætte sikkert og vil afslutte.</translation>
</message>
</context>
<context>
<name>ClientModel</name>
<message>
<location filename="../clientmodel.cpp" line="+105"/>
<source>Network Alert</source>
<translation>Netværksadvarsel</translation>
</message>
</context>
<context>
<name>EditAddressDialog</name>
<message>
<location filename="../forms/editaddressdialog.ui" line="+14"/>
<source>Edit Address</source>
<translation>Rediger adresse</translation>
</message>
<message>
<location line="+11"/>
<source>&Label</source>
<translation>Mærkat</translation>
</message>
<message>
<location line="+10"/>
<source>The label associated with this address book entry</source>
<translation>Mærkaten forbundet med denne post i adressebogen</translation>
</message>
<message>
<location line="+7"/>
<source>&Address</source>
<translation>Adresse</translation>
</message>
<message>
<location line="+10"/>
<source>The address associated with this address book entry. This can only be modified for sending addresses.</source>
<translation>Adressen tilknyttet til denne post i adressebogen. Dette kan kun ændres for afsendelsesadresser.</translation>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="+21"/>
<source>New receiving address</source>
<translation>Ny modtagelsesadresse</translation>
</message>
<message>
<location line="+4"/>
<source>New sending address</source>
<translation>Ny afsendelsesadresse</translation>
</message>
<message>
<location line="+3"/>
<source>Edit receiving address</source>
<translation>Rediger modtagelsesadresse</translation>
</message>
<message>
<location line="+4"/>
<source>Edit sending address</source>
<translation>Rediger afsendelsesadresse</translation>
</message>
<message>
<location line="+76"/>
<source>The entered address "%1" is already in the address book.</source>
<translation>Den indtastede adresse "%1" er allerede i adressebogen.</translation>
</message>
<message>
<location line="-5"/>
<source>The entered address "%1" is not a valid Mpaycoin address.</source>
<translation>Den indtastede adresse "%1" er ikke en gyldig Mpaycoin-adresse.</translation>
</message>
<message>
<location line="+10"/>
<source>Could not unlock wallet.</source>
<translation>Kunne ikke låse tegnebog op.</translation>
</message>
<message>
<location line="+5"/>
<source>New key generation failed.</source>
<translation>Ny nøglegenerering mislykkedes.</translation>
</message>
</context>
<context>
<name>FreespaceChecker</name>
<message>
<location filename="../intro.cpp" line="+61"/>
<source>A new data directory will be created.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+22"/>
<source>name</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Directory already exists. Add %1 if you intend to create a new directory here.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Path already exists, and is not a directory.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Cannot create data directory here.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>GUIUtil::HelpMessageBox</name>
<message>
<location filename="../guiutil.cpp" line="+517"/>
<location line="+13"/>
<source>Mpaycoin-Qt</source>
<translation>Mpaycoin-Qt</translation>
</message>
<message>
<location line="-13"/>
<source>version</source>
<translation>version</translation>
</message>
<message>
<location line="+2"/>
<source>Usage:</source>
<translation>Anvendelse:</translation>
</message>
<message>
<location line="+1"/>
<source>command-line options</source>
<translation>kommandolinjetilvalg</translation>
</message>
<message>
<location line="+4"/>
<source>UI options</source>
<translation>Brugergrænsefladeindstillinger</translation>
</message>
<message>
<location line="+1"/>
<source>Set language, for example "de_DE" (default: system locale)</source>
<translation>Angiv sprog, f.eks "de_DE" (standard: systemlokalitet)</translation>
</message>
<message>
<location line="+1"/>
<source>Start minimized</source>
<translation>Start minimeret</translation>
</message>
<message>
<location line="+1"/>
<source>Show splash screen on startup (default: 1)</source>
<translation>Vis opstartsbillede ved start (standard: 1)</translation>
</message>
<message>
<location line="+1"/>
<source>Choose data directory on startup (default: 0)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>Intro</name>
<message>
<location filename="../forms/intro.ui" line="+14"/>
<source>Welcome</source>
<translation>Velkommen</translation>
</message>
<message>
<location line="+9"/>
<source>Welcome to Mpaycoin-Qt.</source>
<translation>Velkommen til Mpaycoin-Qt.</translation>
</message>
<message>
<location line="+26"/>
<source>As this is the first time the program is launched, you can choose where Mpaycoin-Qt will store its data.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>Mpaycoin-Qt will download and store a copy of the Mpaycoin block chain. At least %1GB of data will be stored in this directory, and it will grow over time. The wallet will also be stored in this directory.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>Use the default data directory</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Use a custom data directory:</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../intro.cpp" line="+100"/>
<source>Error</source>
<translation>Fejl</translation>
</message>
<message>
<location line="+9"/>
<source>GB of free space available</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>(of %1GB needed)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OptionsDialog</name>
<message>
<location filename="../forms/optionsdialog.ui" line="+14"/>
<source>Options</source>
<translation>Indstillinger</translation>
</message>
<message>
<location line="+16"/>
<source>&Main</source>
<translation>Generelt</translation>
</message>
<message>
<location line="+6"/>
<source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB.</source>
<translation>Valgfrit transaktionsgebyr pr. kB, der hjælper dine transaktioner med at blive behandlet hurtigt. De fleste transaktioner er på 1 kB.</translation>
</message>
<message>
<location line="+15"/>
<source>Pay transaction &fee</source>
<translation>Betal transaktionsgebyr</translation>
</message>
<message>
<location line="+31"/>
<source>Automatically start Mpaycoin after logging in to the system.</source>
<translation>Start Mpaycoin automatisk, når der logges ind på systemet</translation>
</message>
<message>
<location line="+3"/>
<source>&Start Mpaycoin on system login</source>
<translation>Start Mpaycoin, når systemet startes</translation>
</message>
<message>
<location line="+35"/>
<source>Reset all client options to default.</source>
<translation>Nulstil alle klientindstillinger til deres standard.</translation>
</message>
<message>
<location line="+3"/>
<source>&Reset Options</source>
<translation>Nulstil indstillinger</translation>
</message>
<message>
<location line="+13"/>
<source>&Network</source>
<translation>Netværk</translation>
</message>
<message>
<location line="+6"/>
<source>Automatically open the Mpaycoin client port on the router. This only works when your router supports UPnP and it is enabled.</source>
<translation>Åbn Mpaycoin-klientens port på routeren automatisk. Dette virker kun, når din router understøtter UPnP og UPnP er aktiveret.</translation>
</message>
<message>
<location line="+3"/>
<source>Map port using &UPnP</source>
<translation>Konfigurer port vha. UPnP</translation>
</message>
<message>
<location line="+7"/>
<source>Connect to the Mpaycoin network through a SOCKS proxy (e.g. when connecting through Tor).</source>
<translation>Opret forbindelse til Mpaycoin-netværket via en SOCKS-proxy (f.eks. ved tilslutning gennem Tor)</translation>
</message>
<message>
<location line="+3"/>
<source>&Connect through SOCKS proxy:</source>
<translation>Forbind gennem SOCKS-proxy:</translation>
</message>
<message>
<location line="+9"/>
<source>Proxy &IP:</source>
<translation>Proxy-IP:</translation>
</message>
<message>
<location line="+19"/>
<source>IP address of the proxy (e.g. 127.0.0.1)</source>
<translation>IP-adressen på proxyen (f.eks. 127.0.0.1)</translation>
</message>
<message>
<location line="+7"/>
<source>&Port:</source>
<translation>Port:</translation>
</message>
<message>
<location line="+19"/>
<source>Port of the proxy (e.g. 9050)</source>
<translation>Porten på proxyen (f.eks. 9050)</translation>
</message>
<message>
<location line="+7"/>
<source>SOCKS &Version:</source>
<translation>SOCKS-version</translation>
</message>
<message>
<location line="+13"/>
<source>SOCKS version of the proxy (e.g. 5)</source> | </message>
<message>
<location line="+36"/>
<source>&Window</source>
<translation>Vindue</translation>
</message>
<message>
<location line="+6"/>
<source>Show only a tray icon after minimizing the window.</source>
<translation>Vis kun et statusikon efter minimering af vinduet.</translation>
</message>
<message>
<location line="+3"/>
<source>&Minimize to the tray instead of the taskbar</source>
<translation>Minimer til statusfeltet i stedet for proceslinjen</translation>
</message>
<message>
<location line="+7"/>
<source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source>
<translation>Minimer i stedet for at afslutte programmet, når vinduet lukkes. Når denne indstilling er valgt, vil programmet kun blive lukket, når du har valgt Afslut i menuen.</translation>
</message>
<message>
<location line="+3"/>
<source>M&inimize on close</source>
<translation>Minimer ved lukning</translation>
</message>
<message>
<location line="+21"/>
<source>&Display</source>
<translation>Visning</translation>
</message>
<message>
<location line="+8"/>
<source>User Interface &language:</source>
<translation>Brugergrænsefladesprog:</translation>
</message>
<message>
<location line="+13"/>
<source>The user interface language can be set here. This setting will take effect after restarting Mpaycoin.</source>
<translation>Brugergrænsefladesproget kan angives her. Denne indstilling træder først i kraft, når Mpaycoin genstartes.</translation>
</message>
<message>
<location line="+11"/>
<source>&Unit to show amounts in:</source>
<translation>Enhed at vise beløb i:</translation>
</message>
<message>
<location line="+13"/>
<source>Choose the default subdivision unit to show in the interface and when sending coins.</source>
<translation>Vælg den standard underopdelingsenhed, som skal vises i brugergrænsefladen og ved afsendelse af mpaycoins.</translation>
</message>
<message>
<location line="+9"/>
<source>Whether to show Mpaycoin addresses in the transaction list or not.</source>
<translation>Afgør hvorvidt Mpaycoin-adresser skal vises i transaktionslisten eller ej.</translation>
</message>
<message>
<location line="+3"/>
<source>&Display addresses in transaction list</source>
<translation>Vis adresser i transaktionsliste</translation>
</message>
<message>
<location line="+71"/>
<source>&OK</source>
<translation>OK</translation>
</message>
<message>
<location line="+7"/>
<source>&Cancel</source>
<translation>Annuller</translation>
</message>
<message>
<location line="+10"/>
<source>&Apply</source>
<translation>Anvend</translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="+54"/>
<source>default</source>
<translation>standard</translation>
</message>
<message>
<location line="+130"/>
<source>Confirm options reset</source>
<translation>Bekræft nulstilling af indstillinger</translation>
</message>
<message>
<location line="+1"/>
<source>Some settings may require a client restart to take effect.</source>
<translation>Nogle indstillinger kan kræve, at klienten genstartes, før de træder i kraft.</translation>
</message>
<message>
<location line="+0"/>
<source>Do you want to proceed?</source>
<translation>Ønsker du at fortsætte?</translation>
</message>
<message>
<location line="+42"/>
<location line="+9"/>
<source>Warning</source>
<translation>Advarsel</translation>
</message>
<message>
<location line="-9"/>
<location line="+9"/>
<source>This setting will take effect after restarting Mpaycoin.</source>
<translation>Denne indstilling træder i kraft, efter Mpaycoin genstartes.</translation>
</message>
<message>
<location line="+29"/>
<source>The supplied proxy address is invalid.</source>
<translation>Ugyldig proxy-adresse</translation>
</message>
</context>
<context>
<name>OverviewPage</name>
<message>
<location filename="../forms/overviewpage.ui" line="+14"/>
<source>Form</source>
<translation>Formular</translation>
</message>
<message>
<location line="+50"/>
<location line="+202"/>
<source>The displayed information may be out of date. Your wallet automatically synchronizes with the Mpaycoin network after a connection is established, but this process has not completed yet.</source>
<translation>Den viste information kan være forældet. Din tegnebog synkroniserer automatisk med Mpaycoin-netværket, når en forbindelse etableres, men denne proces er ikke gennemført endnu.</translation>
</message>
<message>
<location line="-131"/>
<source>Unconfirmed:</source>
<translation>Ubekræftede:</translation>
</message>
<message>
<location line="-78"/>
<source>Wallet</source>
<translation>Tegnebog</translation>
</message>
<message>
<location line="+49"/>
<source>Confirmed:</source>
<translation>Bekræftede:</translation>
</message>
<message>
<location line="+16"/>
<source>Your current spendable balance</source>
<translation>Din nuværende tilgængelige saldo</translation>
</message>
<message>
<location line="+29"/>
<source>Total of transactions that have yet to be confirmed, and do not yet count toward the spendable balance</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Immature:</source>
<translation>Umodne:</translation>
</message>
<message>
<location line="+13"/>
<source>Mined balance that has not yet matured</source>
<translation>Udvunden saldo, som endnu ikke er modnet</translation>
</message>
<message>
<location line="+13"/>
<source>Total:</source>
<translation>Total:</translation>
</message>
<message>
<location line="+16"/>
<source>Your current total balance</source>
<translation>Din nuværende totale saldo</translation>
</message>
<message>
<location line="+53"/>
<source><b>Recent transactions</b></source>
<translation><b>Nyeste transaktioner</b></translation>
</message>
<message>
<location filename="../overviewpage.cpp" line="+116"/>
<location line="+1"/>
<source>out of sync</source>
<translation>ikke synkroniseret</translation>
</message>
</context>
<context>
<name>PaymentServer</name>
<message>
<location filename="../paymentserver.cpp" line="+108"/>
<source>Cannot start mpaycoin: click-to-pay handler</source>
<translation>Kan ikke starte mpaycoin: click-to-pay-håndtering</translation>
</message>
</context>
<context>
<name>QObject</name>
<message>
<location filename="../bitcoin.cpp" line="+92"/>
<location filename="../intro.cpp" line="-32"/>
<source>Mpaycoin</source>
<translation>Mpaycoin</translation>
</message>
<message>
<location line="+1"/>
<source>Error: Specified data directory "%1" does not exist.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../intro.cpp" line="+1"/>
<source>Error: Specified data directory "%1" can not be created.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>QRCodeDialog</name>
<message>
<location filename="../forms/qrcodedialog.ui" line="+14"/>
<source>QR Code Dialog</source>
<translation>QR-kode-dialog</translation>
</message>
<message>
<location line="+59"/>
<source>Request Payment</source>
<translation>Anmod om betaling</translation>
</message>
<message>
<location line="+56"/>
<source>Amount:</source>
<translation>Beløb:</translation>
</message>
<message>
<location line="-44"/>
<source>Label:</source>
<translation>Mærkat:</translation>
</message>
<message>
<location line="+19"/>
<source>Message:</source>
<translation>Besked:</translation>
</message>
<message>
<location line="+71"/>
<source>&Save As...</source>
<translation>Gem som...</translation>
</message>
<message>
<location filename="../qrcodedialog.cpp" line="+64"/>
<source>Error encoding URI into QR Code.</source>
<translation>Fejl ved kodning fra URI til QR-kode</translation>
</message>
<message>
<location line="+40"/>
<source>The entered amount is invalid, please check.</source>
<translation>Det indtastede beløb er ugyldig, tjek venligst.</translation>
</message>
<message>
<location line="+23"/>
<source>Resulting URI too long, try to reduce the text for label / message.</source>
<translation>Resulterende URI var for lang; prøv at forkorte teksten til mærkaten/beskeden.</translation>
</message>
<message>
<location line="+25"/>
<source>Save QR Code</source>
<translation>Gem QR-kode</translation>
</message>
<message>
<location line="+0"/>
<source>PNG Images (*.png)</source>
<translation>PNG-billeder (*.png)</translation>
</message>
</context>
<context>
<name>RPCConsole</name>
<message>
<location filename="../forms/rpcconsole.ui" line="+46"/>
<source>Client name</source>
<translation>Klientnavn</translation>
</message>
<message>
<location line="+10"/>
<location line="+23"/>
<location line="+26"/>
<location line="+23"/>
<location line="+23"/>
<location line="+36"/>
<location line="+53"/>
<location line="+23"/>
<location line="+23"/>
<location filename="../rpcconsole.cpp" line="+345"/>
<source>N/A</source>
<translation>N/A</translation>
</message>
<message>
<location line="-217"/>
<source>Client version</source>
<translation>Klientversion</translation>
</message>
<message>
<location line="-45"/>
<source>&Information</source>
<translation>Information</translation>
</message>
<message>
<location line="+68"/>
<source>Using OpenSSL version</source>
<translation>Anvender OpenSSL-version</translation>
</message>
<message>
<location line="+49"/>
<source>Startup time</source>
<translation>Opstartstid</translation>
</message>
<message>
<location line="+29"/>
<source>Network</source>
<translation>Netværk</translation>
</message>
<message>
<location line="+7"/>
<source>Number of connections</source>
<translation>Antal forbindelser</translation>
</message>
<message>
<location line="+23"/>
<source>On testnet</source>
<translation>Tilsluttet testnetværk</translation>
</message>
<message>
<location line="+23"/>
<source>Block chain</source>
<translation>Blokkæde</translation>
</message>
<message>
<location line="+7"/>
<source>Current number of blocks</source>
<translation>Nuværende antal blokke</translation>
</message>
<message>
<location line="+23"/>
<source>Estimated total blocks</source>
<translation>Estimeret antal blokke</translation>
</message>
<message>
<location line="+23"/>
<source>Last block time</source>
<translation>Tidsstempel for seneste blok</translation>
</message>
<message>
<location line="+52"/>
<source>&Open</source>
<translation>Åbn</translation>
</message>
<message>
<location line="+16"/>
<source>Command-line options</source>
<translation>Kommandolinjetilvalg</translation>
</message>
<message>
<location line="+7"/>
<source>Show the Mpaycoin-Qt help message to get a list with possible Mpaycoin command-line options.</source>
<translation>Vis Mpaycoin-Qt-hjælpebeskeden for at få en liste over de tilgængelige Mpaycoin-kommandolinjeindstillinger.</translation>
</message>
<message>
<location line="+3"/>
<source>&Show</source>
<translation>Vis</translation>
</message>
<message>
<location line="+24"/>
<source>&Console</source>
<translation>Konsol</translation>
</message>
<message>
<location line="-260"/>
<source>Build date</source>
<translation>Byggedato</translation>
</message>
<message>
<location line="-104"/>
<source>Mpaycoin - Debug window</source>
<translation>Mpaycoin - Fejlsøgningsvindue</translation>
</message>
<message>
<location line="+25"/>
<source>Mpaycoin Core</source>
<translation>Mpaycoin Core</translation>
</message>
<message>
<location line="+279"/>
<source>Debug log file</source>
<translation>Fejlsøgningslogfil</translation>
</message>
<message>
<location line="+7"/>
<source>Open the Mpaycoin debug log file from the current data directory. This can take a few seconds for large log files.</source>
<translation>Åbn Mpaycoin-fejlsøgningslogfilen fra det nuværende datakatalog. Dette kan tage nogle få sekunder for en store logfiler.</translation>
</message>
<message>
<location line="+102"/>
<source>Clear console</source>
<translation>Ryd konsol</translation>
</message>
<message>
<location filename="../rpcconsole.cpp" line="-30"/>
<source>Welcome to the Mpaycoin RPC console.</source>
<translation>Velkommen til Mpaycoin RPC-konsollen</translation>
</message>
<message>
<location line="+1"/>
<source>Use up and down arrows to navigate history, and <b>Ctrl-L</b> to clear screen.</source>
<translation>Brug op og ned-piletasterne til at navigere historikken og <b>Ctrl-L</b> til at rydde skærmen.</translation>
</message>
<message>
<location line="+1"/>
<source>Type <b>help</b> for an overview of available commands.</source>
<translation>Tast <b>help</b> for en oversigt over de tilgængelige kommandoer.</translation>
</message>
</context>
<context>
<name>SendCoinsDialog</name>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="+14"/>
<location filename="../sendcoinsdialog.cpp" line="+128"/>
<location line="+5"/>
<location line="+5"/>
<location line="+5"/>
<location line="+6"/>
<location line="+5"/>
<location line="+5"/>
<source>Send Coins</source>
<translation>Send mpaycoins</translation>
</message>
<message>
<location line="+50"/>
<source>Send to multiple recipients at once</source>
<translation>Send til flere modtagere på en gang</translation>
</message>
<message>
<location line="+3"/>
<source>Add &Recipient</source>
<translation>Tilføj modtager</translation>
</message>
<message>
<location line="+20"/>
<source>Remove all transaction fields</source>
<translation>Fjern alle transaktionsfelter</translation>
</message>
<message>
<location line="+3"/>
<source>Clear &All</source>
<translation>Ryd alle</translation>
</message>
<message>
<location line="+22"/>
<source>Balance:</source>
<translation>Saldo:</translation>
</message>
<message>
<location line="+10"/>
<source>123.456 ABC</source>
<translation>123,456 ABC</translation>
</message>
<message>
<location line="+31"/>
<source>Confirm the send action</source>
<translation>Bekræft afsendelsen</translation>
</message>
<message>
<location line="+3"/>
<source>S&end</source>
<translation>Afsend</translation>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="-62"/>
<location line="+2"/>
<source><b>%1</b> to %2 (%3)</source>
<translation><b>%1</b> til %2 (%3)</translation>
</message>
<message>
<location line="+6"/>
<source>Confirm send coins</source>
<translation>Bekræft afsendelse af mpaycoins</translation>
</message>
<message>
<location line="+1"/>
<source>Are you sure you want to send %1?</source>
<translation>Er du sikker på, at du vil sende %1?</translation>
</message>
<message>
<location line="+0"/>
<source> and </source>
<translation> og </translation>
</message>
<message>
<location line="+23"/>
<source>The recipient address is not valid, please recheck.</source>
<translation>Modtagerens adresse er ikke gyldig. Tjek venligst adressen igen.</translation>
</message>
<message>
<location line="+5"/>
<source>The amount to pay must be larger than 0.</source>
<translation>Beløbet til betaling skal være større end 0.</translation>
</message>
<message>
<location line="+5"/>
<source>The amount exceeds your balance.</source>
<translation>Beløbet overstiger din saldo.</translation>
</message>
<message>
<location line="+5"/>
<source>The total exceeds your balance when the %1 transaction fee is included.</source>
<translation>Totalen overstiger din saldo, når %1 transaktionsgebyr er inkluderet.</translation>
</message>
<message>
<location line="+6"/>
<source>Duplicate address found, can only send to each address once per send operation.</source>
<translation>Duplikeret adresse fundet. Du kan kun sende til hver adresse en gang pr. afsendelse.</translation>
</message>
<message>
<location line="+5"/>
<source>Error: Transaction creation failed!</source>
<translation>Fejl: Oprettelse af transaktionen mislykkedes!</translation>
</message>
<message>
<location line="+5"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation>Fejl: Transaktionen blev afvist. Dette kan ske, hvis nogle af dine mpaycoins i din tegnebog allerede er brugt, som hvis du brugte en kopi af wallet.dat og dine mpaycoins er blevet brugt i kopien, men ikke er markeret som brugt her.</translation>
</message>
</context>
<context>
<name>SendCoinsEntry</name>
<message>
<location filename="../forms/sendcoinsentry.ui" line="+14"/>
<source>Form</source>
<translation>Formular</translation>
</message>
<message>
<location line="+15"/>
<source>A&mount:</source>
<translation>Beløb:</translation>
</message>
<message>
<location line="+13"/>
<source>Pay &To:</source>
<translation>Betal til:</translation>
</message>
<message>
<location line="+34"/>
<source>The address to send the payment to (e.g. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</source>
<translation>Mpaycoin-adressen som betalingen skal sendes til (f.eks. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</translation>
</message>
<message>
<location line="+60"/>
<location filename="../sendcoinsentry.cpp" line="+26"/>
<source>Enter a label for this address to add it to your address book</source>
<translation>Indtast en mærkat for denne adresse for at føje den til din adressebog</translation>
</message>
<message>
<location line="-78"/>
<source>&Label:</source>
<translation>Mærkat:</translation>
</message>
<message>
<location line="+28"/>
<source>Choose address from address book</source>
<translation>Vælg adresse fra adressebog</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="+7"/>
<source>Paste address from clipboard</source>
<translation>Indsæt adresse fra udklipsholderen</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+7"/>
<source>Remove this recipient</source>
<translation>Fjern denne modtager</translation>
</message>
<message>
<location filename="../sendcoinsentry.cpp" line="+1"/>
<source>Enter a Mpaycoin address (e.g. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</source>
<translation>Indtast en Mpaycoin-adresse (f.eks. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</translation>
</message>
</context>
<context>
<name>SignVerifyMessageDialog</name>
<message>
<location filename="../forms/signverifymessagedialog.ui" line="+14"/>
<source>Signatures - Sign / Verify a Message</source>
<translation>Signature - Underskriv/verificér en besked</translation>
</message>
<message>
<location line="+13"/>
<source>&Sign Message</source>
<translation>Underskriv besked</translation>
</message>
<message>
<location line="+6"/>
<source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source>
<translation>Du kan underskrive beskeder med dine Mpaycoin-adresser for at bevise, at de tilhører dig. Pas på ikke at underskrive noget vagt, da phisingangreb kan narre dig til at overdrage din identitet. Underskriv kun fuldt detaljerede udsagn, du er enig i.</translation>
</message>
<message>
<location line="+18"/>
<source>The address to sign the message with (e.g. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</source>
<translation>Mpaycoin-adressen som beskeden skal underskrives med (f.eks. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</translation>
</message>
<message>
<location line="+10"/>
<location line="+213"/>
<source>Choose an address from the address book</source>
<translation>Vælg adresse fra adressebog</translation>
</message>
<message>
<location line="-203"/>
<location line="+213"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="-203"/>
<source>Paste address from clipboard</source>
<translation>Indsæt adresse fra udklipsholderen</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+12"/>
<source>Enter the message you want to sign here</source>
<translation>Indtast beskeden, du ønsker at underskrive</translation>
</message>
<message>
<location line="+7"/>
<source>Signature</source>
<translation>Underskrift</translation>
</message>
<message>
<location line="+27"/>
<source>Copy the current signature to the system clipboard</source>
<translation>Kopier den nuværende underskrift til systemets udklipsholder</translation>
</message>
<message>
<location line="+21"/>
<source>Sign the message to prove you own this Mpaycoin address</source>
<translation>Underskriv denne besked for at bevise, at Mpaycoin-adressen tilhører dig</translation>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation>Underskriv besked</translation>
</message>
<message>
<location line="+14"/>
<source>Reset all sign message fields</source>
<translation>Nulstil alle "underskriv besked"-felter</translation>
</message>
<message>
<location line="+3"/>
<location line="+146"/>
<source>Clear &All</source>
<translation>Ryd alle</translation>
</message>
<message>
<location line="-87"/>
<source>&Verify Message</source>
<translation>Verificér besked</translation>
</message>
<message>
<location line="+6"/>
<source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source>
<translation>Indtast den underskrevne adresse, beskeden (inkluder linjeskift, mellemrum mv. nøjagtigt, som de fremgår) og underskriften for at verificére beskeden. Vær forsigtig med ikke at lægge mere i underskriften end besked selv, så du undgår at blive narret af et man-in-the-middle-angreb.</translation>
</message>
<message>
<location line="+21"/>
<source>The address the message was signed with (e.g. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</source>
<translation>Mpaycoin-adressen som beskeden er underskrevet med (f.eks. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</translation>
</message>
<message>
<location line="+40"/>
<source>Verify the message to ensure it was signed with the specified Mpaycoin address</source>
<translation>Verificér beskeden for at sikre, at den er underskrevet med den angivne Mpaycoin-adresse</translation>
</message>
<message>
<location line="+3"/>
<source>Verify &Message</source>
<translation>Verificér besked</translation>
</message>
<message>
<location line="+14"/>
<source>Reset all verify message fields</source>
<translation>Nulstil alle "verificér besked"-felter</translation>
</message>
<message>
<location filename="../signverifymessagedialog.cpp" line="+27"/>
<location line="+3"/>
<source>Enter a Mpaycoin address (e.g. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</source>
<translation>Indtast en Mpaycoin-adresse (f.eks. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</translation>
</message>
<message>
<location line="-2"/>
<source>Click "Sign Message" to generate signature</source>
<translation>Klik "Underskriv besked" for at generere underskriften</translation>
</message>
<message>
<location line="+3"/>
<source>Enter Mpaycoin signature</source>
<translation>Indtast Mpaycoin-underskriften</translation>
</message>
<message>
<location line="+82"/>
<location line="+81"/>
<source>The entered address is invalid.</source>
<translation>Den indtastede adresse er ugyldig.</translation>
</message>
<message>
<location line="-81"/>
<location line="+8"/>
<location line="+73"/>
<location line="+8"/>
<source>Please check the address and try again.</source>
<translation>Tjek venligst adressen, og forsøg igen.</translation>
</message>
<message>
<location line="-81"/>
<location line="+81"/>
<source>The entered address does not refer to a key.</source>
<translation>Den indtastede adresse henviser ikke til en nøgle.</translation>
</message>
<message>
<location line="-73"/>
<source>Wallet unlock was cancelled.</source>
<translation>Tegnebogsoplåsning annulleret.</translation>
</message>
<message>
<location line="+8"/>
<source>Private key for the entered address is not available.</source>
<translation>Den private nøgle for den indtastede adresse er ikke tilgængelig.</translation>
</message>
<message>
<location line="+12"/>
<source>Message signing failed.</source>
<translation>Underskrivning af besked mislykkedes.</translation>
</message>
<message>
<location line="+5"/>
<source>Message signed.</source>
<translation>Besked underskrevet.</translation>
</message>
<message>
<location line="+59"/>
<source>The signature could not be decoded.</source>
<translation>Underskriften kunne ikke afkodes.</translation>
</message>
<message>
<location line="+0"/>
<location line="+13"/>
<source>Please check the signature and try again.</source>
<translation>Tjek venligst underskriften, og forsøg igen.</translation>
</message>
<message>
<location line="+0"/>
<source>The signature did not match the message digest.</source>
<translation>Underskriften matcher ikke beskedens indhold.</translation>
</message>
<message>
<location line="+7"/>
<source>Message verification failed.</source>
<translation>Verificéring af besked mislykkedes.</translation>
</message>
<message>
<location line="+5"/>
<source>Message verified.</source>
<translation>Besked verificéret.</translation>
</message>
</context>
<context>
<name>SplashScreen</name>
<message>
<location filename="../splashscreen.cpp" line="+22"/>
<source>The Mpaycoin developers</source>
<translation>Mpaycoin-udviklerne</translation>
</message>
<message>
<location line="+1"/>
<source>[testnet]</source>
<translation>[testnet]</translation>
</message>
</context>
<context>
<name>TransactionDesc</name>
<message>
<location filename="../transactiondesc.cpp" line="+20"/>
<source>Open until %1</source>
<translation>Åben indtil %1</translation>
</message>
<message>
<location line="+6"/>
<source>%1/offline</source>
<translation>%1/offline</translation>
</message>
<message>
<location line="+2"/>
<source>%1/unconfirmed</source>
<translation>%1/ubekræftet</translation>
</message>
<message>
<location line="+2"/>
<source>%1 confirmations</source>
<translation>%1 bekræftelser</translation>
</message>
<message>
<location line="+18"/>
<source>Status</source>
<translation>Status</translation>
</message>
<message numerus="yes">
<location line="+7"/>
<source>, broadcast through %n node(s)</source>
<translation><numerusform>, transmitteret igennem %n knude(r)</numerusform><numerusform>, transmitteret igennem %n knude(r)</numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>Date</source>
<translation>Dato</translation>
</message>
<message>
<location line="+7"/>
<source>Source</source>
<translation>Kilde</translation>
</message>
<message>
<location line="+0"/>
<source>Generated</source>
<translation>Genereret</translation>
</message>
<message>
<location line="+5"/>
<location line="+17"/>
<source>From</source>
<translation>Fra</translation>
</message>
<message>
<location line="+1"/>
<location line="+22"/>
<location line="+58"/>
<source>To</source>
<translation>Til</translation>
</message>
<message>
<location line="-77"/>
<location line="+2"/>
<source>own address</source>
<translation>egen adresse</translation>
</message>
<message>
<location line="-2"/>
<source>label</source>
<translation>mærkat</translation>
</message>
<message>
<location line="+37"/>
<location line="+12"/>
<location line="+45"/>
<location line="+17"/>
<location line="+30"/>
<source>Credit</source>
<translation>Kredit</translation>
</message>
<message numerus="yes">
<location line="-102"/>
<source>matures in %n more block(s)</source>
<translation><numerusform>modner efter yderligere %n blok(ke)</numerusform><numerusform>modner efter yderligere %n blok(ke)</numerusform></translation>
</message>
<message>
<location line="+2"/>
<source>not accepted</source>
<translation>ikke accepteret</translation>
</message>
<message>
<location line="+44"/>
<location line="+8"/>
<location line="+15"/>
<location line="+30"/>
<source>Debit</source>
<translation>Debet</translation>
</message>
<message>
<location line="-39"/>
<source>Transaction fee</source>
<translation>Transaktionsgebyr</translation>
</message>
<message>
<location line="+16"/>
<source>Net amount</source>
<translation>Nettobeløb</translation>
</message>
<message>
<location line="+6"/>
<source>Message</source>
<translation>Besked</translation>
</message>
<message>
<location line="+2"/>
<source>Comment</source>
<translation>Kommentar</translation>
</message>
<message>
<location line="+2"/>
<source>Transaction ID</source>
<translation>Transaktionens ID</translation>
</message>
<message>
<location line="+3"/>
<source>Generated coins must mature 20 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to "not accepted" and it won't be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source>
<translation>Genererede mpaycoins skal vente 20 blokke, før de kan blive brugt. Da du genererede denne blok, blev den transmitteret til netværket for at blive føjet til blokkæden. Hvis det mislykkes at komme ind i kæden, vil den skifte til "ikke godkendt" og ikke blive kunne bruges. Dette kan lejlighedsvis ske, hvis en anden knude genererer en blok inden for få sekunder af din.</translation>
</message>
<message>
<location line="+7"/>
<source>Debug information</source>
<translation>Fejlsøgningsinformation</translation>
</message>
<message>
<location line="+8"/>
<source>Transaction</source>
<translation>Transaktion</translation>
</message>
<message>
<location line="+3"/>
<source>Inputs</source>
<translation>Input</translation>
</message>
<message>
<location line="+23"/>
<source>Amount</source>
<translation>Beløb</translation>
</message>
<message>
<location line="+1"/>
<source>true</source>
<translation>sand</translation>
</message>
<message>
<location line="+0"/>
<source>false</source>
<translation>falsk</translation>
</message>
<message>
<location line="-209"/>
<source>, has not been successfully broadcast yet</source>
<translation>, er ikke blevet transmitteret endnu</translation>
</message>
<message numerus="yes">
<location line="-35"/>
<source>Open for %n more block(s)</source>
<translation><numerusform>Åben %n blok yderligere</numerusform><numerusform>Åben %n blokke yderligere</numerusform></translation>
</message>
<message>
<location line="+70"/>
<source>unknown</source>
<translation>ukendt</translation>
</message>
</context>
<context>
<name>TransactionDescDialog</name>
<message>
<location filename="../forms/transactiondescdialog.ui" line="+14"/>
<source>Transaction details</source>
<translation>Transaktionsdetaljer</translation>
</message>
<message>
<location line="+6"/>
<source>This pane shows a detailed description of the transaction</source>
<translation>Denne rude viser en detaljeret beskrivelse af transaktionen</translation>
</message>
</context>
<context>
<name>TransactionTableModel</name>
<message>
<location filename="../transactiontablemodel.cpp" line="+225"/>
<source>Date</source>
<translation>Dato</translation>
</message>
<message>
<location line="+0"/>
<source>Type</source>
<translation>Type</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>Adresse</translation>
</message>
<message>
<location line="+0"/>
<source>Amount</source>
<translation>Beløb</translation>
</message>
<message numerus="yes">
<location line="+57"/>
<source>Open for %n more block(s)</source>
<translation><numerusform>Åben %n blok(ke) yderligere</numerusform><numerusform>Åben %n blok(ke) yderligere</numerusform></translation>
</message>
<message>
<location line="+3"/>
<source>Open until %1</source>
<translation>Åben indtil %1</translation>
</message>
<message>
<location line="+3"/>
<source>Offline (%1 confirmations)</source>
<translation>Offline (%1 bekræftelser)</translation>
</message>
<message>
<location line="+3"/>
<source>Unconfirmed (%1 of %2 confirmations)</source>
<translation>Ubekræftet (%1 af %2 bekræftelser)</translation>
</message>
<message>
<location line="+3"/>
<source>Confirmed (%1 confirmations)</source>
<translation>Bekræftet (%1 bekræftelser)</translation>
</message>
<message numerus="yes">
<location line="+8"/>
<source>Mined balance will be available when it matures in %n more block(s)</source>
<translation><numerusform>Udvunden saldo, som vil være tilgængelig, når den modner efter yderligere %n blok(ke)</numerusform><numerusform>Udvunden saldo, som vil være tilgængelig, når den modner efter yderligere %n blok(ke)</numerusform></translation>
</message>
<message>
<location line="+5"/>
<source>This block was not received by any other nodes and will probably not be accepted!</source>
<translation>Denne blok blev ikke modtaget af nogen andre knuder og vil formentlig ikke blive accepteret!</translation>
</message>
<message>
<location line="+3"/>
<source>Generated but not accepted</source>
<translation>Genereret, men ikke accepteret</translation>
</message>
<message>
<location line="+43"/>
<source>Received with</source>
<translation>Modtaget med</translation>
</message>
<message>
<location line="+2"/>
<source>Received from</source>
<translation>Modtaget fra</translation>
</message>
<message>
<location line="+3"/>
<source>Sent to</source>
<translation>Sendt til</translation>
</message>
<message>
<location line="+2"/>
<source>Payment to yourself</source>
<translation>Betaling til dig selv</translation>
</message>
<message>
<location line="+2"/>
<source>Mined</source>
<translation>Udvundne</translation>
</message>
<message>
<location line="+38"/>
<source>(n/a)</source>
<translation>(n/a)</translation>
</message>
<message>
<location line="+199"/>
<source>Transaction status. Hover over this field to show number of confirmations.</source>
<translation>Transaktionsstatus. Hold musen over dette felt for at vise antallet af bekræftelser.</translation>
</message>
<message>
<location line="+2"/>
<source>Date and time that the transaction was received.</source>
<translation>Dato og klokkeslæt for modtagelse af transaktionen.</translation>
</message>
<message>
<location line="+2"/>
<source>Type of transaction.</source>
<translation>Transaktionstype.</translation>
</message>
<message>
<location line="+2"/>
<source>Destination address of transaction.</source>
<translation>Destinationsadresse for transaktion.</translation>
</message>
<message>
<location line="+2"/>
<source>Amount removed from or added to balance.</source>
<translation>Beløb fjernet eller tilføjet balance.</translation>
</message>
</context>
<context>
<name>TransactionView</name>
<message>
<location filename="../transactionview.cpp" line="+52"/>
<location line="+16"/>
<source>All</source>
<translation>Alle</translation>
</message>
<message>
<location line="-15"/>
<source>Today</source>
<translation>I dag</translation>
</message>
<message>
<location line="+1"/>
<source>This week</source>
<translation>Denne uge</translation>
</message>
<message>
<location line="+1"/>
<source>This month</source>
<translation>Denne måned</translation>
</message>
<message>
<location line="+1"/>
<source>Last month</source>
<translation>Sidste måned</translation>
</message>
<message>
<location line="+1"/>
<source>This year</source>
<translation>Dette år</translation>
</message>
<message>
<location line="+1"/>
<source>Range...</source>
<translation>Interval...</translation>
</message>
<message>
<location line="+11"/>
<source>Received with</source>
<translation>Modtaget med</translation>
</message>
<message>
<location line="+2"/>
<source>Sent to</source>
<translation>Sendt til</translation>
</message>
<message>
<location line="+2"/>
<source>To yourself</source>
<translation>Til dig selv</translation>
</message>
<message>
<location line="+1"/>
<source>Mined</source>
<translation>Udvundne</translation>
</message>
<message>
<location line="+1"/>
<source>Other</source>
<translation>Andet</translation>
</message>
<message>
<location line="+7"/>
<source>Enter address or label to search</source>
<translation>Indtast adresse eller mærkat for at søge</translation>
</message>
<message>
<location line="+7"/>
<source>Min amount</source>
<translation>Minimumsbeløb</translation>
</message>
<message>
<location line="+34"/>
<source>Copy address</source>
<translation>Kopier adresse</translation>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation>Kopier mærkat</translation>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation>Kopier beløb</translation>
</message>
<message>
<location line="+1"/>
<source>Copy transaction ID</source>
<translation>Kopier transaktionens ID</translation>
</message>
<message>
<location line="+1"/>
<source>Edit label</source>
<translation>Rediger mærkat</translation>
</message>
<message>
<location line="+1"/>
<source>Show transaction details</source>
<translation>Vis transaktionsdetaljer</translation>
</message>
<message>
<location line="+143"/>
<source>Export Transaction Data</source>
<translation>Eksporter transaktionsdata</translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Kommasepareret fil (*.csv)</translation>
</message>
<message>
<location line="+8"/>
<source>Confirmed</source>
<translation>Bekræftet</translation>
</message>
<message>
<location line="+1"/>
<source>Date</source>
<translation>Dato</translation>
</message>
<message>
<location line="+1"/>
<source>Type</source>
<translation>Type</translation>
</message>
<message>
<location line="+1"/>
<source>Label</source>
<translation>Mærkat</translation>
</message>
<message>
<location line="+1"/>
<source>Address</source>
<translation>Adresse</translation>
</message>
<message>
<location line="+1"/>
<source>Amount</source>
<translation>Beløb</translation>
</message>
<message>
<location line="+1"/>
<source>ID</source>
<translation>ID</translation>
</message>
<message>
<location line="+4"/>
<source>Error exporting</source>
<translation>Fejl under eksport</translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation>Kunne ikke skrive til filen %1.</translation>
</message>
<message>
<location line="+100"/>
<source>Range:</source>
<translation>Interval:</translation>
</message>
<message>
<location line="+8"/>
<source>to</source>
<translation>til</translation>
</message>
</context>
<context>
<name>WalletModel</name>
<message>
<location filename="../walletmodel.cpp" line="+193"/>
<source>Send Coins</source>
<translation>Send mpaycoins</translation>
</message>
</context>
<context>
<name>WalletView</name>
<message>
<location filename="../walletview.cpp" line="+46"/>
<source>&Export</source>
<translation>Eksporter</translation>
</message>
<message>
<location line="+1"/>
<source>Export the data in the current tab to a file</source>
<translation>Eksportér den aktuelle visning til en fil</translation>
</message>
<message>
<location line="+197"/>
<source>Backup Wallet</source>
<translation>Sikkerhedskopier tegnebog</translation>
</message>
<message>
<location line="+0"/>
<source>Wallet Data (*.dat)</source>
<translation>Tegnebogsdata (*.dat)</translation>
</message>
<message>
<location line="+3"/>
<source>Backup Failed</source>
<translation>Foretagelse af sikkerhedskopi fejlede</translation>
</message>
<message>
<location line="+0"/>
<source>There was an error trying to save the wallet data to the new location.</source>
<translation>Der opstod en fejl i forbindelse med at gemme tegnebogsdata til det nye sted</translation>
</message>
<message>
<location line="+4"/>
<source>Backup Successful</source>
<translation>Sikkerhedskopieret problemfri</translation>
</message>
<message>
<location line="+0"/>
<source>The wallet data was successfully saved to the new location.</source>
<translation>Tegnebogsdata blev problemfrit gemt til det nye sted.</translation>
</message>
</context>
<context>
<name>bitcoin-core</name>
<message>
<location filename="../bitcoinstrings.cpp" line="+98"/>
<source>Mpaycoin version</source>
<translation>Mpaycoin-version</translation>
</message>
<message>
<location line="+104"/>
<source>Usage:</source>
<translation>Anvendelse:</translation>
</message>
<message>
<location line="-30"/>
<source>Send command to -server or mpaycoind</source>
<translation>Send kommando til -server eller mpaycoind</translation>
</message>
<message>
<location line="-23"/>
<source>List commands</source>
<translation>Liste over kommandoer</translation>
</message>
<message>
<location line="-13"/>
<source>Get help for a command</source>
<translation>Få hjælp til en kommando</translation>
</message>
<message>
<location line="+25"/>
<source>Options:</source>
<translation>Indstillinger:</translation>
</message>
<message>
<location line="+24"/>
<source>Specify configuration file (default: mpaycoin.conf)</source>
<translation>Angiv konfigurationsfil (standard: mpaycoin.conf)</translation>
</message>
<message>
<location line="+3"/>
<source>Specify pid file (default: mpaycoind.pid)</source>
<translation>Angiv pid-fil (default: mpaycoind.pid)</translation>
</message>
<message>
<location line="-1"/>
<source>Specify data directory</source>
<translation>Angiv datakatalog</translation>
</message>
<message>
<location line="-9"/>
<source>Set database cache size in megabytes (default: 25)</source>
<translation>Angiv databasecachestørrelse i megabytes (standard: 25)</translation>
</message>
<message>
<location line="-28"/>
<source>Listen for connections on <port> (default: 7817 or testnet: 17817)</source>
<translation>Lyt til forbindelser på <port> (standard: 7817 eller testnetværk: 17817)</translation>
</message>
<message>
<location line="+5"/>
<source>Maintain at most <n> connections to peers (default: 125)</source>
<translation>Oprethold højest <n> forbindelser til andre i netværket (standard: 125)</translation>
</message>
<message>
<location line="-49"/>
<source>Connect to a node to retrieve peer addresses, and disconnect</source>
<translation>Forbind til en knude for at modtage adresse, og afbryd</translation>
</message>
<message>
<location line="+84"/>
<source>Specify your own public address</source>
<translation>Angiv din egen offentlige adresse</translation>
</message>
<message>
<location line="+3"/>
<source>Threshold for disconnecting misbehaving peers (default: 100)</source>
<translation>Grænse for afbrydelse til dårlige forbindelser (standard: 100)</translation>
</message>
<message>
<location line="-136"/>
<source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source>
<translation>Antal sekunder dårlige forbindelser skal vente før reetablering (standard: 86400)</translation>
</message>
<message>
<location line="-33"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source>
<translation>Der opstod en fejl ved angivelse af RPC-porten %u til at lytte på IPv4: %s</translation>
</message>
<message>
<location line="+31"/>
<source>Listen for JSON-RPC connections on <port> (default: 7818 or testnet: 17818)</source>
<translation>Lyt til JSON-RPC-forbindelser på <port> (standard: 7818 eller testnetværk: 17818)</translation>
</message>
<message>
<location line="+37"/>
<source>Accept command line and JSON-RPC commands</source>
<translation>Accepter kommandolinje- og JSON-RPC-kommandoer</translation>
</message>
<message>
<location line="+77"/>
<source>Run in the background as a daemon and accept commands</source>
<translation>Kør i baggrunden som en service, og accepter kommandoer</translation>
</message>
<message>
<location line="+38"/>
<source>Use the test network</source>
<translation>Brug testnetværket</translation>
</message>
<message>
<location line="-114"/>
<source>Accept connections from outside (default: 1 if no -proxy or -connect)</source>
<translation>Accepter forbindelser udefra (standard: 1 hvis hverken -proxy eller -connect)</translation>
</message>
<message>
<location line="-84"/>
<source>%s, you must set a rpcpassword in the configuration file:
%s
It is recommended you use the following random password:
rpcuser=mpaycoinrpc
rpcpassword=%s
(you do not need to remember this password)
The username and password MUST NOT be the same.
If the file does not exist, create it with owner-readable-only file permissions.
It is also recommended to set alertnotify so you are notified of problems;
for example: alertnotify=echo %%s | mail -s "Mpaycoin Alert" [email protected]
</source>
<translation>%s, du skal angive en RPC-adgangskode i konfigurationsfilen:
%s
Det anbefales, at du bruger nedenstående, tilfældige adgangskode:
rpcuser=mpaycoinrpc
rpcpassword=%s
(du behøver ikke huske denne adgangskode)
Brugernavnet og adgangskode MÅ IKKE være det samme.
Hvis filen ikke eksisterer, opret den og giv ingen andre end ejeren læserettighed.
Det anbefales også at angive alertnotify, så du påmindes om problemer;
f.eks.: alertnotify=echo %%s | mail -s "Mpaycoin Alert" [email protected]
</translation>
</message>
<message>
<location line="+17"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source>
<translation>Der opstod en fejl ved angivelse af RPC-porten %u til at lytte på IPv6, falder tilbage til IPv4: %s</translation>
</message>
<message>
<location line="+3"/>
<source>Bind to given address and always listen on it. Use [host]:port notation for IPv6</source>
<translation>Tildel til den givne adresse og lyt altid på den. Brug [vært]:port-notation for IPv6</translation>
</message>
<message>
<location line="+3"/>
<source>Cannot obtain a lock on data directory %s. Mpaycoin is probably already running.</source>
<translation>Kan ikke opnå lås på datakatalog %s. Mpaycoin er sandsynligvis allerede startet.</translation>
</message>
<message>
<location line="+3"/>
<source>Enter regression test mode, which uses a special chain in which blocks can be solved instantly. This is intended for regression testing tools and app development.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Error: The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation>Fejl: Transaktionen blev afvist. Dette kan ske, hvis nogle af dine mpaycoins i din tegnebog allerede er brugt, som hvis du brugte en kopi af wallet.dat og dine mpaycoins er blevet brugt i kopien, men ikke er markeret som brugt her.</translation>
</message>
<message>
<location line="+4"/>
<source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds!</source>
<translation>Fejl: Denne transaktion kræver et transaktionsgebyr på minimum %s pga. dens størrelse, kompleksitet eller anvendelse af nyligt modtagne mpaycoins!</translation>
</message>
<message>
<location line="+3"/>
<source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source>
<translation>Udfør kommando, når en relevant advarsel modtages (%s i kommandoen erstattes med beskeden)</translation>
</message>
<message>
<location line="+3"/>
<source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source>
<translation>Udfør kommando, når en transaktion i tegnebogen ændres (%s i kommandoen erstattes med TxID)</translation>
</message>
<message>
<location line="+11"/>
<source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source>
<translation>Angiv maksimumstørrelse for høj prioritet/lavt gebyr-transaktioner i bytes (standard: 27000)</translation>
</message>
<message>
<location line="+6"/>
<source>This is a pre-release test build - use at your own risk - do not use for mining or merchant applications</source>
<translation>Dette er en foreløbig testudgivelse - brug på eget ansvar - brug ikke til udvinding eller handelsprogrammer</translation>
</message>
<message>
<location line="+5"/>
<source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source>
<translation>Advarsel: -paytxfee er sat meget højt! Dette er det gebyr du vil betale, hvis du sender en transaktion.</translation>
</message>
<message>
<location line="+3"/>
<source>Warning: Displayed transactions may not be correct! You may need to upgrade, or other nodes may need to upgrade.</source>
<translation>Advarsel: Viste transaktioner kan være ukorrekte! Du eller andre knuder kan have behov for at opgradere.</translation>
</message>
<message>
<location line="+3"/>
<source>Warning: Please check that your computer's date and time are correct! If your clock is wrong Mpaycoin will not work properly.</source>
<translation>Advarsel: Undersøg venligst, at din computers dato og klokkeslæt er korrekt indstillet! Hvis der er fejl i disse, vil Mpaycoin ikke fungere korrekt.</translation>
</message>
<message>
<location line="+3"/>
<source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source>
<translation>Advarsel: fejl under læsning af wallet.dat! Alle nøgler blev læst korrekt, men transaktionsdata eller adressebogsposter kan mangle eller være forkerte.</translation>
</message>
<message>
<location line="+3"/>
<source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source>
<translation>Advarsel: wallet.dat ødelagt, data reddet! Oprindelig wallet.net gemt som wallet.{timestamp}.bak i %s; hvis din saldo eller dine transaktioner er forkert, bør du genskabe fra en sikkerhedskopi.</translation>
</message>
<message>
<location line="+14"/>
<source>Attempt to recover private keys from a corrupt wallet.dat</source>
<translation>Forsøg at genskabe private nøgler fra ødelagt wallet.dat</translation>
</message>
<message>
<location line="+2"/>
<source>Block creation options:</source>
<translation>Blokoprettelsestilvalg:</translation>
</message>
<message>
<location line="+5"/>
<source>Connect only to the specified node(s)</source>
<translation>Tilslut kun til de(n) angivne knude(r)</translation>
</message>
<message>
<location line="+3"/>
<source>Corrupted block database detected</source>
<translation>Ødelagt blokdatabase opdaget</translation>
</message>
<message>
<location line="+1"/>
<source>Discover own IP address (default: 1 when listening and no -externalip)</source>
<translation>Find egen IP-adresse (standard: 1 når lytter og ingen -externalip)</translation>
</message>
<message>
<location line="+1"/>
<source>Do you want to rebuild the block database now?</source>
<translation>Ønsker du at genbygge blokdatabasen nu?</translation>
</message>
<message>
<location line="+2"/>
<source>Error initializing block database</source>
<translation>Klargøring af blokdatabase mislykkedes</translation>
</message>
<message>
<location line="+1"/>
<source>Error initializing wallet database environment %s!</source>
<translation>Klargøring af tegnebogsdatabasemiljøet %s mislykkedes!</translation>
</message>
<message>
<location line="+1"/>
<source>Error loading block database</source>
<translation>Indlæsning af blokdatabase mislykkedes</translation>
</message>
<message>
<location line="+4"/>
<source>Error opening block database</source>
<translation>Åbning af blokdatabase mislykkedes</translation>
</message>
<message>
<location line="+2"/>
<source>Error: Disk space is low!</source>
<translation>Fejl: Mangel på ledig diskplads!</translation>
</message>
<message>
<location line="+1"/>
<source>Error: Wallet locked, unable to create transaction!</source>
<translation>Fejl: Tegnebog låst, kan ikke oprette transaktion!</translation>
</message>
<message>
<location line="+1"/>
<source>Error: system error: </source>
<translation>Fejl: systemfejl: </translation>
</message>
<message>
<location line="+1"/>
<source>Failed to listen on any port. Use -listen=0 if you want this.</source>
<translation>Lytning på enhver port mislykkedes. Brug -listen=0, hvis du ønsker dette.</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to read block info</source>
<translation>Læsning af blokinformation mislykkedes</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to read block</source>
<translation>Læsning af blok mislykkedes</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to sync block index</source>
<translation>Synkronisering af blokindeks mislykkedes</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write block index</source>
<translation>Skrivning af blokindeks mislykkedes</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write block info</source>
<translation>Skrivning af blokinformation mislykkedes</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write block</source>
<translation>Skrivning af blok mislykkedes</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write file info</source>
<translation>Skriving af filinformation mislykkedes</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write to coin database</source>
<translation>Skrivning af mpaycoin-database mislykkedes</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write transaction index</source>
<translation>Skrivning af transaktionsindeks mislykkedes</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write undo data</source>
<translation>Skrivning af genskabelsesdata mislykkedes</translation>
</message>
<message>
<location line="+2"/>
<source>Find peers using DNS lookup (default: 1 unless -connect)</source>
<translation>Find ligeværdige ved DNS-opslag (standard: 1 hvis ikke -connect)</translation>
</message>
<message>
<location line="+1"/>
<source>Generate coins (default: 0)</source>
<translation>Generer mpaycoins (standard: 0)</translation>
</message>
<message>
<location line="+2"/>
<source>How many blocks to check at startup (default: 288, 0 = all)</source>
<translation>Antal blokke som tjekkes ved opstart (0=alle, standard: 288)</translation>
</message>
<message>
<location line="+1"/>
<source>How thorough the block verification is (0-4, default: 3)</source>
<translation>Grundighed af verificéring af blokke (0-4, standard: 3)</translation>
</message>
<message>
<location line="+2"/>
<source>Incorrect or no genesis block found. Wrong datadir for network?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+18"/>
<source>Not enough file descriptors available.</source>
<translation>For få tilgængelige fildeskriptorer.</translation>
</message>
<message>
<location line="+8"/>
<source>Rebuild block chain index from current blk000??.dat files</source>
<translation>Genbyg blokkædeindeks fra nuværende blk000??.dat filer</translation>
</message>
<message>
<location line="+16"/>
<source>Set the number of threads to service RPC calls (default: 4)</source>
<translation>Angiv antallet af tråde til at håndtere RPC-kald (standard: 4)</translation>
</message>
<message>
<location line="+7"/>
<source>Specify wallet file (within data directory)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+20"/>
<source>Verifying blocks...</source>
<translation>Verificere blokke...</translation>
</message>
<message>
<location line="+1"/>
<source>Verifying wallet...</source>
<translation>Verificere tegnebog...</translation>
</message>
<message>
<location line="+1"/>
<source>Wallet %s resides outside data directory %s</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>You need to rebuild the database using -reindex to change -txindex</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-76"/>
<source>Imports blocks from external blk000??.dat file</source>
<translation>Importerer blokke fra ekstern blk000??.dat fil</translation>
</message>
<message>
<location line="-76"/>
<source>Set the number of script verification threads (up to 16, 0 = auto, <0 = leave that many cores free, default: 0)</source>
<translation>Angiv nummeret af tråde til verificering af script (op til 16, 0 = automatisk, <0 = efterlad det antal kerner tilgængelige, standard: 0)</translation>
</message>
<message>
<location line="+78"/>
<source>Information</source>
<translation>Information</translation>
</message>
<message>
<location line="+3"/>
<source>Invalid -tor address: '%s'</source>
<translation>Ugyldig -tor adresse: '%s'</translation>
</message>
<message>
<location line="+1"/>
<source>Invalid amount for -minrelaytxfee=<amount>: '%s'</source>
<translation>Ugyldigt beløb til -minrelaytxfee=<beløb>:'%s'</translation>
</message>
<message>
<location line="+1"/>
<source>Invalid amount for -mintxfee=<amount>: '%s'</source>
<translation>Ugyldigt beløb til -mintxfee=<beløb>:'%s'</translation>
</message>
<message>
<location line="+8"/>
<source>Maintain a full transaction index (default: 0)</source>
<translation>Vedligehold et komplet transaktionsindeks (standard: 0)</translation>
</message>
<message>
<location line="+2"/>
<source>Maximum per-connection receive buffer, <n>*1000 bytes (default: 5000)</source>
<translation>Maksimum for modtagelsesbuffer pr. forbindelse, <n>*1000 bytes (standard: 5000)</translation>
</message>
<message>
<location line="+1"/>
<source>Maximum per-connection send buffer, <n>*1000 bytes (default: 1000)</source>
<translation>Maksimum for afsendelsesbuffer pr. forbindelse, <n>*1000 bytes (standard: 1000)</translation>
</message>
<message>
<location line="+2"/>
<source>Only accept block chain matching built-in checkpoints (default: 1)</source>
<translation>Accepter kun blokkæde, som matcher indbyggede kontrolposter (standard: 1)</translation>
</message>
<message>
<location line="+1"/>
<source>Only connect to nodes in network <net> (IPv4, IPv6 or Tor)</source>
<translation>Tilslut kun til knuder i netværk <net> (IPv4, IPv6 eller Tor)</translation>
</message>
<message>
<location line="+2"/>
<source>Output extra debugging information. Implies all other -debug* options</source>
<translation>Skriv ekstra fejlsøgningsinformation. Indebærer alle andre -debug* tilvalg</translation>
</message>
<message>
<location line="+1"/>
<source>Output extra network debugging information</source>
<translation>Skriv ekstra netværksfejlsøgningsinformation</translation>
</message>
<message>
<location line="+2"/>
<source>Prepend debug output with timestamp</source>
<translation>Tilføj fejlsøgningsoutput med tidsstempel</translation>
</message>
<message>
<location line="+5"/>
<source>SSL options: (see the Mpaycoin Wiki for SSL setup instructions)</source>
<translation>SSL-indstillinger: (se Mpaycoin Wiki for SSL-opsætningsinstruktioner)</translation>
</message>
<message>
<location line="+1"/>
<source>Select the version of socks proxy to use (4-5, default: 5)</source>
<translation>Angiv version af SOCKS-proxyen (4-5, standard: 5)</translation>
</message>
<message>
<location line="+3"/>
<source>Send trace/debug info to console instead of debug.log file</source>
<translation>Send sporings-/fejlsøgningsinformation til konsollen i stedet for debug.log filen</translation>
</message>
<message>
<location line="+1"/>
<source>Send trace/debug info to debugger</source>
<translation>Send sporings-/fejlsøgningsinformation til fejlsøgningprogrammet</translation>
</message>
<message>
<location line="+5"/>
<source>Set maximum block size in bytes (default: 250000)</source>
<translation>Angiv maksimumblokstørrelse i bytes (standard: 250000)</translation>
</message>
<message>
<location line="+1"/>
<source>Set minimum block size in bytes (default: 0)</source>
<translation>Angiv minimumsblokstørrelse i bytes (standard: 0)</translation>
</message>
<message>
<location line="+2"/>
<source>Shrink debug.log file on client startup (default: 1 when no -debug)</source>
<translation>Formindsk debug.log filen ved klientopstart (standard: 1 hvis ikke -debug)</translation>
</message>
<message>
<location line="+1"/>
<source>Signing transaction failed</source>
<translation>Underskrift af transaktion mislykkedes</translation>
</message>
<message>
<location line="+2"/>
<source>Specify connection timeout in milliseconds (default: 5000)</source>
<translation>Angiv tilslutningstimeout i millisekunder (standard: 5000)</translation>
</message>
<message>
<location line="+5"/>
<source>System error: </source>
<translation>Systemfejl: </translation>
</message>
<message>
<location line="+4"/>
<source>Transaction amount too small</source>
<translation>Transaktionsbeløb er for lavt</translation>
</message>
<message>
<location line="+1"/>
<source>Transaction amounts must be positive</source>
<translation>Transaktionsbeløb skal være positive</translation>
</message>
<message>
<location line="+1"/>
<source>Transaction too large</source>
<translation>Transaktionen er for stor</translation>
</message>
<message>
<location line="+7"/>
<source>Use UPnP to map the listening port (default: 0)</source>
<translation>Forsøg at bruge UPnP til at konfigurere den lyttende port (standard: 0)</translation>
</message>
<message>
<location line="+1"/>
<source>Use UPnP to map the listening port (default: 1 when listening)</source>
<translation>Forsøg at bruge UPnP til at konfigurere den lyttende port (standard: 1 når lytter)</translation>
</message>
<message>
<location line="+1"/>
<source>Use proxy to reach tor hidden services (default: same as -proxy)</source>
<translation>Brug proxy til at tilgå Tor Hidden Services (standard: som -proxy)</translation>
</message>
<message>
<location line="+2"/>
<source>Username for JSON-RPC connections</source>
<translation>Brugernavn til JSON-RPC-forbindelser</translation>
</message>
<message>
<location line="+5"/>
<source>Warning</source>
<translation>Advarsel</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: This version is obsolete, upgrade required!</source>
<translation>Advarsel: Denne version er forældet, opgradering påkrævet!</translation>
</message>
<message>
<location line="+2"/>
<source>wallet.dat corrupt, salvage failed</source>
<translation>wallet.dat ødelagt, redning af data mislykkedes</translation>
</message>
<message>
<location line="-52"/>
<source>Password for JSON-RPC connections</source>
<translation>Adgangskode til JSON-RPC-forbindelser</translation>
</message>
<message>
<location line="-68"/>
<source>Allow JSON-RPC connections from specified IP address</source>
<translation>Tillad JSON-RPC-forbindelser fra bestemt IP-adresse</translation>
</message>
<message>
<location line="+77"/>
<source>Send commands to node running on <ip> (default: 127.0.0.1)</source>
<translation>Send kommandoer til knude, der kører på <ip> (standard: 127.0.0.1)</translation>
</message>
<message>
<location line="-121"/>
<source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source>
<translation>Udfør kommando, når den bedste blok ændres (%s i kommandoen erstattes med blokhash)</translation>
</message>
<message>
<location line="+149"/>
<source>Upgrade wallet to latest format</source>
<translation>Opgrader tegnebog til seneste format</translation>
</message>
<message>
<location line="-22"/>
<source>Set key pool size to <n> (default: 100)</source>
<translation>Angiv nøglepoolstørrelse til <n> (standard: 100)</translation>
</message>
<message>
<location line="-12"/>
<source>Rescan the block chain for missing wallet transactions</source>
<translation>Gennemsøg blokkæden for manglende tegnebogstransaktioner</translation>
</message>
<message>
<location line="+36"/>
<source>Use OpenSSL (https) for JSON-RPC connections</source>
<translation>Brug OpenSSL (https) for JSON-RPC-forbindelser</translation>
</message>
<message>
<location line="-27"/>
<source>Server certificate file (default: server.cert)</source>
<translation>Servercertifikat-fil (standard: server.cert)</translation>
</message>
<message>
<location line="+1"/>
<source>Server private key (default: server.pem)</source>
<translation>Serverens private nøgle (standard: server.pem)</translation>
</message>
<message>
<location line="-156"/>
<source>Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</source>
<translation>Acceptable ciphers (standard: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</translation>
</message>
<message>
<location line="+171"/>
<source>This help message</source>
<translation>Denne hjælpebesked</translation>
</message>
<message>
<location line="+6"/>
<source>Unable to bind to %s on this computer (bind returned error %d, %s)</source>
<translation>Kunne ikke tildele %s på denne computer (bind returnerede fejl %d, %s)</translation>
</message>
<message>
<location line="-93"/>
<source>Connect through socks proxy</source>
<translation>Tilslut via SOCKS-proxy</translation>
</message>
<message>
<location line="-10"/>
<source>Allow DNS lookups for -addnode, -seednode and -connect</source>
<translation>Tillad DNS-opslag for -addnode, -seednode og -connect</translation>
</message>
<message>
<location line="+56"/>
<source>Loading addresses...</source>
<translation>Indlæser adresser...</translation>
</message>
<message>
<location line="-36"/>
<source>Error loading wallet.dat: Wallet corrupted</source>
<translation>Fejl ved indlæsning af wallet.dat: Tegnebog ødelagt</translation>
</message>
<message>
<location line="+1"/>
<source>Error loading wallet.dat: Wallet requires newer version of Mpaycoin</source>
<translation>Fejl ved indlæsning af wallet.dat: Tegnebog kræver en nyere version af Mpaycoin</translation>
</message>
<message>
<location line="+96"/>
<source>Wallet needed to be rewritten: restart Mpaycoin to complete</source>
<translation>Det var nødvendigt at genskrive tegnebogen: genstart Mpaycoin for at gennemføre</translation>
</message>
<message>
<location line="-98"/>
<source>Error loading wallet.dat</source>
<translation>Fejl ved indlæsning af wallet.dat</translation>
</message>
<message>
<location line="+29"/>
<source>Invalid -proxy address: '%s'</source>
<translation>Ugyldig -proxy adresse: '%s'</translation>
</message>
<message>
<location line="+57"/>
<source>Unknown network specified in -onlynet: '%s'</source>
<translation>Ukendt netværk anført i -onlynet: '%s'</translation>
</message>
<message>
<location line="-1"/>
<source>Unknown -socks proxy version requested: %i</source>
<translation>Ukendt -socks proxy-version: %i</translation>
</message>
<message>
<location line="-98"/>
<source>Cannot resolve -bind address: '%s'</source>
<translation>Kan ikke finde -bind adressen: '%s'</translation>
</message>
<message>
<location line="+1"/>
<source>Cannot resolve -externalip address: '%s'</source>
<translation>Kan ikke finde -externalip adressen: '%s'</translation>
</message>
<message>
<location line="+45"/>
<source>Invalid amount for -paytxfee=<amount>: '%s'</source>
<translation>Ugyldigt beløb for -paytxfee=<amount>: '%s'</translation>
</message>
<message>
<location line="+1"/>
<source>Invalid amount</source>
<translation>Ugyldigt beløb</translation>
</message>
<message>
<location line="-6"/>
<source>Insufficient funds</source>
<translation>Manglende dækning</translation>
</message>
<message>
<location line="+10"/>
<source>Loading block index...</source>
<translation>Indlæser blokindeks...</translation>
</message>
<message>
<location line="-58"/>
<source>Add a node to connect to and attempt to keep the connection open</source>
<translation>Tilføj en knude til at forbinde til og forsøg at holde forbindelsen åben</translation>
</message>
<message>
<location line="-25"/>
<source>Unable to bind to %s on this computer. Mpaycoin is probably already running.</source>
<translation>Kunne ikke tildele %s på denne computer. Mpaycoin kører sikkert allerede.</translation>
</message>
<message>
<location line="+64"/>
<source>Fee per KB to add to transactions you send</source>
<translation>Gebyr pr. kB, som skal tilføjes til transaktioner, du sender</translation>
</message>
<message>
<location line="+20"/>
<source>Loading wallet...</source>
<translation>Indlæser tegnebog...</translation>
</message>
<message>
<location line="-53"/>
<source>Cannot downgrade wallet</source>
<translation>Kan ikke nedgradere tegnebog</translation>
</message>
<message>
<location line="+3"/>
<source>Cannot write default address</source>
<translation>Kan ikke skrive standardadresse</translation>
</message>
<message>
<location line="+65"/>
<source>Rescanning...</source>
<translation>Genindlæser...</translation>
</message>
<message>
<location line="-58"/>
<source>Done loading</source>
<translation>Indlæsning gennemført</translation>
</message>
<message>
<location line="+84"/>
<source>To use the %s option</source>
<translation>For at bruge %s mulighed</translation>
</message>
<message>
<location line="-76"/>
<source>Error</source>
<translation>Fejl</translation>
</message>
<message>
<location line="-31"/>
<source>You must set rpcpassword=<password> in the configuration file:
%s
If the file does not exist, create it with owner-readable-only file permissions.</source>
<translation>Du skal angive rpcpassword=<password> i konfigurationsfilen:
%s
Hvis filen ikke eksisterer, opret den og giv ingen andre end ejeren læserettighed.</translation>
</message>
</context>
</TS> | <translation>SOCKS-version af proxyen (f.eks. 5)</translation> |
server_api.ts | import { Asset, AssetType } from "stellar-base";
import { Omit } from "utility-types";
import { Horizon } from "./horizon_api";
/* tslint:disable-next-line: no-namespace */
export namespace ServerApi {
export interface CollectionPage<
T extends Horizon.BaseResponse = Horizon.BaseResponse
> {
records: T[];
next: () => Promise<CollectionPage<T>>;
prev: () => Promise<CollectionPage<T>>;
}
export interface CallFunctionTemplateOptions {
cursor?: string | number;
limit?: number;
order?: "asc" | "desc";
}
export type CallFunction<
T extends Horizon.BaseResponse = Horizon.BaseResponse
> = () => Promise<T>;
export type CallCollectionFunction<
T extends Horizon.BaseResponse = Horizon.BaseResponse
> = (options?: CallFunctionTemplateOptions) => Promise<CollectionPage<T>>;
export interface AccountRecordSigners {
key: string;
weight: number;
type: string;
}
export interface AccountRecord extends Horizon.BaseResponse {
id: string;
paging_token: string;
account_id: string;
sequence: string;
subentry_count: number;
home_domain?: string;
inflation_destination?: string;
last_modified_ledger: number;
thresholds: Horizon.AccountThresholds;
flags: Horizon.Flags;
balances: Horizon.BalanceLine[];
signers: AccountRecordSigners[];
data: (options: { value: string }) => Promise<{ value: string }>;
data_attr: {
[key: string]: string;
};
sponsor?: string;
num_sponsoring: number;
num_sponsored: number;
effects: CallCollectionFunction<EffectRecord>;
offers: CallCollectionFunction<OfferRecord>; | trades: CallCollectionFunction<TradeRecord>;
}
export interface ClaimableBalanceRecord extends Horizon.BaseResponse {
id: string;
paging_token: string;
asset: string;
amount: string;
sponsor?: string;
last_modified_ledger: number;
claimants: Horizon.Claimant[];
}
export interface EffectRecord extends Horizon.BaseResponse {
account: string;
paging_token: string;
type_i: string;
type: string;
created_at: string;
id: string;
// account_debited / credited / trustline_created
amount?: any;
asset_type?: string;
asset_code?: string;
asset_issuer?: string;
// trustline_created / removed
limit?: string;
// signer_created
public_key?: string;
// trade
offer_id?: number | string;
bought_amount?: string;
bought_asset_type?: string;
bought_asset_code?: string;
bought_asset_issuer?: string;
sold_amount?: string;
sold_asset_type?: string;
sold_asset_code?: string;
sold_asset_issuer?: string;
// account_created
starting_balance?: string;
// These were retrieved from the go repo, not through direct observation
// so they could be wrong!
// account thresholds updated
low_threshold?: number;
med_threshold?: number;
high_threshold?: number;
// home domain updated
home_domain?: string;
// account flags updated
auth_required_flag?: boolean;
auth_revokable_flag?: boolean;
// seq bumped
new_seq?: number | string;
// signer created / removed / updated
weight?: number;
key?: string;
// trustline authorized / deauthorized
trustor?: string;
// claimable_balance_created
// claimable_balance_claimant_created
// claimable_balance_claimed
balance_id?: string;
asset?: string;
predicate?: Horizon.Predicate;
// account_sponsorship_created
// trustline_sponsorship_created
// claimable_balance_sponsorship_created
// signer_sponsorship_created
// data_sponsorship_created
sponsor?: string;
signer?: string;
data_name?: string;
// account_sponsorship_updated
// account_sponsorship_removed
// trustline_sponsorship_updated
// trustline_sponsorship_removed
// claimable_balance_sponsorship_updated
// claimable_balance_sponsorship_removed
// signer_sponsorship_updated
// signer_sponsorship_removed
// data_sponsorship_updated
// data_sponsorship_removed
new_sponsor?: string;
former_sponsor?: string;
operation?: CallFunction<OperationRecord>;
precedes?: CallFunction<EffectRecord>;
succeeds?: CallFunction<EffectRecord>;
}
export interface LedgerRecord extends Horizon.BaseResponse {
id: string;
paging_token: string;
hash: string;
prev_hash: string;
sequence: number;
transaction_count: number;
operation_count: number;
tx_set_operation_count: number | null;
closed_at: string;
total_coins: string;
fee_pool: string;
base_fee: number;
base_reserve: string;
max_tx_set_size: number;
protocol_version: number;
header_xdr: string;
base_fee_in_stroops: number;
base_reserve_in_stroops: number;
effects: CallCollectionFunction<EffectRecord>;
operations: CallCollectionFunction<OperationRecord>;
self: CallFunction<LedgerRecord>;
transactions: CallCollectionFunction<TransactionRecord>;
}
export interface OfferAsset {
asset_type: AssetType;
asset_code?: string;
asset_issuer?: string;
}
export interface OfferRecord extends Horizon.BaseResponse {
id: number | string;
paging_token: string;
seller: string;
selling: OfferAsset;
buying: OfferAsset;
amount: string;
price_r: Horizon.PriceRShorthand;
price: string;
last_modified_ledger: number;
last_modified_time: string;
sponsor?: string;
}
import OperationResponseType = Horizon.OperationResponseType;
import OperationResponseTypeI = Horizon.OperationResponseTypeI;
export interface BaseOperationRecord<
T extends OperationResponseType = OperationResponseType,
TI extends OperationResponseTypeI = OperationResponseTypeI
> extends Horizon.BaseOperationResponse<T, TI> {
self: CallFunction<OperationRecord>;
succeeds: CallFunction<OperationRecord>;
precedes: CallFunction<OperationRecord>;
effects: CallCollectionFunction<EffectRecord>;
transaction: CallFunction<TransactionRecord>;
}
export interface CreateAccountOperationRecord
extends BaseOperationRecord<
OperationResponseType.createAccount,
OperationResponseTypeI.createAccount
>,
Horizon.CreateAccountOperationResponse {}
export interface PaymentOperationRecord
extends BaseOperationRecord<
OperationResponseType.payment,
OperationResponseTypeI.payment
>,
Horizon.PaymentOperationResponse {
sender: CallFunction<AccountRecord>;
receiver: CallFunction<AccountRecord>;
}
export interface PathPaymentOperationRecord
extends BaseOperationRecord<
OperationResponseType.pathPayment,
OperationResponseTypeI.pathPayment
>,
Horizon.PathPaymentOperationResponse {}
export interface PathPaymentStrictSendOperationRecord
extends BaseOperationRecord<
OperationResponseType.pathPaymentStrictSend,
OperationResponseTypeI.pathPaymentStrictSend
>,
Horizon.PathPaymentStrictSendOperationResponse {}
export interface ManageOfferOperationRecord
extends BaseOperationRecord<
OperationResponseType.manageOffer,
OperationResponseTypeI.manageOffer
>,
Horizon.ManageOfferOperationResponse {}
export interface PassiveOfferOperationRecord
extends BaseOperationRecord<
OperationResponseType.createPassiveOffer,
OperationResponseTypeI.createPassiveOffer
>,
Horizon.PassiveOfferOperationResponse {}
export interface SetOptionsOperationRecord
extends BaseOperationRecord<
OperationResponseType.setOptions,
OperationResponseTypeI.setOptions
>,
Horizon.SetOptionsOperationResponse {}
export interface ChangeTrustOperationRecord
extends BaseOperationRecord<
OperationResponseType.changeTrust,
OperationResponseTypeI.changeTrust
>,
Horizon.ChangeTrustOperationResponse {}
export interface AllowTrustOperationRecord
extends BaseOperationRecord<
OperationResponseType.allowTrust,
OperationResponseTypeI.allowTrust
>,
Horizon.AllowTrustOperationResponse {}
export interface AccountMergeOperationRecord
extends BaseOperationRecord<
OperationResponseType.accountMerge,
OperationResponseTypeI.accountMerge
>,
Horizon.AccountMergeOperationResponse {}
export interface InflationOperationRecord
extends BaseOperationRecord<
OperationResponseType.inflation,
OperationResponseTypeI.inflation
>,
Horizon.InflationOperationResponse {}
export interface ManageDataOperationRecord
extends BaseOperationRecord<
OperationResponseType.manageData,
OperationResponseTypeI.manageData
>,
Horizon.ManageDataOperationResponse {}
export interface BumpSequenceOperationRecord
extends BaseOperationRecord<
OperationResponseType.bumpSequence,
OperationResponseTypeI.bumpSequence
>,
Horizon.BumpSequenceOperationResponse {}
export interface CreateClaimableBalanceOperationRecord
extends BaseOperationRecord<
OperationResponseType.createClaimableBalance,
OperationResponseTypeI.createClaimableBalance
>,
Horizon.CreateClaimableBalanceOperationResponse {}
export interface ClaimClaimableBalanceOperationRecord
extends BaseOperationRecord<
OperationResponseType.claimClaimableBalance,
OperationResponseTypeI.claimClaimableBalance
>,
Horizon.ClaimClaimableBalanceOperationResponse {}
export interface BeginSponsoringFutureReservesOperationRecord
extends BaseOperationRecord<
OperationResponseType.beginSponsoringFutureReserves,
OperationResponseTypeI.beginSponsoringFutureReserves
>,
Horizon.BeginSponsoringFutureReservesOperationResponse {}
export interface EndSponsoringFutureReservesOperationRecord
extends BaseOperationRecord<
OperationResponseType.endSponsoringFutureReserves,
OperationResponseTypeI.endSponsoringFutureReserves
>,
Horizon.EndSponsoringFutureReservesOperationResponse {}
export interface RevokeSponsorshipOperationRecord
extends BaseOperationRecord<
OperationResponseType.revokeSponsorship,
OperationResponseTypeI.revokeSponsorship
>,
Horizon.RevokeSponsorshipOperationResponse {}
export type OperationRecord =
| CreateAccountOperationRecord
| PaymentOperationRecord
| PathPaymentOperationRecord
| ManageOfferOperationRecord
| PassiveOfferOperationRecord
| SetOptionsOperationRecord
| ChangeTrustOperationRecord
| AllowTrustOperationRecord
| AccountMergeOperationRecord
| InflationOperationRecord
| ManageDataOperationRecord
| BumpSequenceOperationRecord
| PathPaymentStrictSendOperationRecord
| CreateClaimableBalanceOperationRecord
| ClaimClaimableBalanceOperationRecord
| BeginSponsoringFutureReservesOperationRecord
| EndSponsoringFutureReservesOperationRecord
| RevokeSponsorshipOperationRecord;
export interface TradeRecord extends Horizon.BaseResponse {
id: string;
paging_token: string;
ledger_close_time: string;
offer_id: string;
base_offer_id: string;
base_account: string;
base_amount: string;
base_asset_type: string;
base_asset_code?: string;
base_asset_issuer?: string;
counter_offer_id: string;
counter_account: string;
counter_amount: string;
counter_asset_type: string;
counter_asset_code?: string;
counter_asset_issuer?: string;
base_is_seller: boolean;
base: CallFunction<AccountRecord>;
counter: CallFunction<AccountRecord>;
operation: CallFunction<OperationRecord>;
}
export interface TransactionRecord
extends Omit<Horizon.TransactionResponse, "ledger"> {
ledger_attr: Horizon.TransactionResponse["ledger"];
account: CallFunction<AccountRecord>;
effects: CallCollectionFunction<EffectRecord>;
ledger: CallFunction<LedgerRecord>;
operations: CallCollectionFunction<OperationRecord>;
precedes: CallFunction<TransactionRecord>;
self: CallFunction<TransactionRecord>;
succeeds: CallFunction<TransactionRecord>;
}
export interface AssetRecord extends Horizon.BaseResponse {
asset_type: AssetType.credit4 | AssetType.credit12;
asset_code: string;
asset_issuer: string;
paging_token: string;
amount: string;
num_accounts: number;
flags: Horizon.Flags;
}
export interface OrderbookRecord extends Horizon.BaseResponse {
bids: Array<{
price_r: {
d: number;
n: number;
};
price: string;
amount: string;
}>;
asks: Array<{
price_r: {
d: number;
n: number;
};
price: string;
amount: string;
}>;
base: Asset;
counter: Asset;
}
export interface PaymentPathRecord extends Horizon.BaseResponse {
path: Array<{
asset_code: string;
asset_issuer: string;
asset_type: string;
}>;
source_amount: string;
source_asset_type: string;
source_asset_code: string;
source_asset_issuer: string;
destination_amount: string;
destination_asset_type: string;
destination_asset_code: string;
destination_asset_issuer: string;
}
} | operations: CallCollectionFunction<OperationRecord>;
payments: CallCollectionFunction<PaymentOperationRecord>; |
Session.ts | import { IsBoolean, IsObject, IsString, IsOptional, IsArray } from '@jovotech/output';
export class Session {
@IsString()
@IsOptional()
id?: string;
@IsObject()
data!: Record<string, unknown>;
@IsOptional()
@IsArray() |
@IsBoolean()
end!: boolean;
} | state?: any[]; |
pool.rs | // Copyright 2021 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use std::{
collections::VecDeque,
mem,
sync::{
mpsc::{channel, Receiver, Sender},
Arc,
},
thread::{self, JoinHandle},
time::{Duration, Instant},
};
use async_task::{Runnable, Task};
use slab::Slab;
use sync::{Condvar, Mutex};
use sys_util::{error, warn};
const DEFAULT_SHUTDOWN_TIMEOUT: Duration = Duration::from_secs(10);
struct State {
tasks: VecDeque<Runnable>,
num_threads: usize,
num_idle: usize,
num_notified: usize,
worker_threads: Slab<JoinHandle<()>>,
exited_threads: Option<Receiver<usize>>,
exit: Sender<usize>,
shutting_down: bool,
}
fn run_blocking_thread(idx: usize, inner: Arc<Inner>, exit: Sender<usize>) {
let mut state = inner.state.lock();
while !state.shutting_down {
if let Some(runnable) = state.tasks.pop_front() {
drop(state);
runnable.run();
state = inner.state.lock();
continue;
}
// No more tasks so wait for more work.
state.num_idle += 1;
let (guard, result) = inner
.condvar
.wait_timeout_while(state, inner.keepalive, |s| {
!s.shutting_down && s.num_notified == 0
});
state = guard;
// If `state.num_notified > 0` then this was a real wakeup.
if state.num_notified > 0 {
state.num_notified -= 1;
continue;
}
// Only decrement the idle count if we timed out. Otherwise, it was decremented when new
// work was added to `state.tasks`.
if result.timed_out() {
state.num_idle = state
.num_idle
.checked_sub(1)
.expect("`num_idle` underflow on timeout");
break;
}
}
state.num_threads -= 1;
// If we're shutting down then the BlockingPool will take care of joining all the threads.
// Otherwise, we need to join the last worker thread that exited here.
let last_exited_thread = if let Some(exited_threads) = state.exited_threads.as_mut() {
exited_threads
.try_recv()
.map(|idx| state.worker_threads.remove(idx))
.ok()
} else {
None
};
// Drop the lock before trying to join the last exited thread.
drop(state);
if let Some(handle) = last_exited_thread {
let _ = handle.join();
}
if let Err(e) = exit.send(idx) {
error!("Failed to send thread exit event on channel: {}", e);
}
}
struct Inner {
state: Mutex<State>,
condvar: Condvar,
max_threads: usize,
keepalive: Duration,
}
impl Inner {
fn schedule(self: &Arc<Inner>, runnable: Runnable) {
let mut state = self.state.lock();
// If we're shutting down then nothing is going to run this task.
if state.shutting_down {
return;
}
state.tasks.push_back(runnable);
if state.num_idle == 0 {
// There are no idle threads. Spawn a new one if possible.
if state.num_threads < self.max_threads {
state.num_threads += 1;
let exit = state.exit.clone();
let entry = state.worker_threads.vacant_entry();
let idx = entry.key();
let inner = self.clone();
entry.insert(
thread::Builder::new()
.name(format!("blockingPool{}", idx))
.spawn(move || run_blocking_thread(idx, inner, exit))
.unwrap(),
);
}
} else {
// We have idle threads, wake one up.
state.num_idle -= 1;
state.num_notified += 1;
self.condvar.notify_one();
}
}
}
#[derive(Debug, thiserror::Error)]
#[error("{0} BlockingPool threads did not exit in time and will be detached")]
pub struct ShutdownTimedOut(usize);
/// A thread pool for running work that may block.
///
/// It is generally discouraged to do any blocking work inside an async function. However, this is
/// sometimes unavoidable when dealing with interfaces that don't provide async variants. In this
/// case callers may use the `BlockingPool` to run the blocking work on a different thread and
/// `await` for its result to finish, which will prevent blocking the main thread of the
/// application.
///
/// Since the blocking work is sent to another thread, users should be careful when using the
/// `BlockingPool` for latency-sensitive operations. Additionally, the `BlockingPool` is intended to
/// be used for work that will eventually complete on its own. Users who want to spawn a thread
/// should just use `thread::spawn` directly.
///
/// There is no way to cancel work once it has been picked up by one of the worker threads in the
/// `BlockingPool`. Dropping or shutting down the pool will block up to a timeout (default 10
/// seconds) to wait for any active blocking work to finish. Any threads running tasks that have not
/// completed by that time will be detached.
///
/// # Examples
///
/// Spawn a task to run in the `BlockingPool` and await on its result.
///
/// ```edition2018
/// use cros_async::BlockingPool;
///
/// # async fn do_it() {
/// let pool = BlockingPool::default();
///
/// let res = pool.spawn(move || {
/// // Do some CPU-intensive or blocking work here.
///
/// 42
/// }).await;
///
/// assert_eq!(res, 42);
/// # }
/// # cros_async::block_on(do_it());
/// ```
pub struct BlockingPool {
inner: Arc<Inner>,
}
impl BlockingPool {
/// Create a new `BlockingPool`.
///
/// The `BlockingPool` will never spawn more than `max_threads` threads to do work, regardless
/// of the number of tasks that are added to it. This value should be set relatively low (for
/// example, the number of CPUs on the machine) if the pool is intended to run CPU intensive
/// work or it should be set relatively high (128 or more) if the pool is intended to be used
/// for various IO operations that cannot be completed asynchronously. The default value is 256.
///
/// Worker threads are spawned on demand when new work is added to the pool and will
/// automatically exit after being idle for some time so there is no overhead for setting
/// `max_threads` to a large value when there is little to no work assigned to the
/// `BlockingPool`. `keepalive` determines the idle duration after which the worker thread will
/// exit. The default value is 10 seconds.
pub fn new(max_threads: usize, keepalive: Duration) -> BlockingPool {
let (exit, exited_threads) = channel();
BlockingPool {
inner: Arc::new(Inner {
state: Mutex::new(State {
tasks: VecDeque::new(),
num_threads: 0,
num_idle: 0,
num_notified: 0,
worker_threads: Slab::new(),
exited_threads: Some(exited_threads),
exit,
shutting_down: false,
}),
condvar: Condvar::new(),
max_threads,
keepalive,
}),
}
}
/// Like new but with pre-allocating capacity for up to `max_threads`. | pub fn with_capacity(max_threads: usize, keepalive: Duration) -> BlockingPool {
let (exit, exited_threads) = channel();
BlockingPool {
inner: Arc::new(Inner {
state: Mutex::new(State {
tasks: VecDeque::new(),
num_threads: 0,
num_idle: 0,
num_notified: 0,
worker_threads: Slab::with_capacity(max_threads),
exited_threads: Some(exited_threads),
exit,
shutting_down: false,
}),
condvar: Condvar::new(),
max_threads,
keepalive,
}),
}
}
/// Spawn a task to run in the `BlockingPool`.
///
/// Callers may `await` the returned `Task` to be notified when the work is completed.
///
/// # Panics
///
/// `await`ing a `Task` after dropping the `BlockingPool` or calling `BlockingPool::shutdown`
/// will panic if the work was not completed before the pool was shut down.
pub fn spawn<F, R>(&self, f: F) -> Task<R>
where
F: FnOnce() -> R + Send + 'static,
R: Send + 'static,
{
let raw = Arc::downgrade(&self.inner);
let schedule = move |runnable| {
if let Some(i) = raw.upgrade() {
i.schedule(runnable);
}
};
let (runnable, task) = async_task::spawn(async move { f() }, schedule);
runnable.schedule();
task
}
/// Shut down the `BlockingPool`.
///
/// If `deadline` is provided then this will block until either all worker threads exit or the
/// deadline is exceeded. If `deadline` is not given then this will block indefinitely until all
/// worker threads exit. Any work that was added to the `BlockingPool` but not yet picked up by
/// a worker thread will not complete and `await`ing on the `Task` for that work will panic.
pub fn shutdown(&self, deadline: Option<Instant>) -> Result<(), ShutdownTimedOut> {
let mut state = self.inner.state.lock();
if state.shutting_down {
// We've already shut down this BlockingPool.
return Ok(());
}
state.shutting_down = true;
let exited_threads = state.exited_threads.take().expect("exited_threads missing");
let unfinished_tasks = mem::replace(&mut state.tasks, VecDeque::new());
let mut worker_threads = mem::replace(&mut state.worker_threads, Slab::new());
drop(state);
self.inner.condvar.notify_all();
// Cancel any unfinished work after releasing the lock.
drop(unfinished_tasks);
// Now wait for all worker threads to exit.
if let Some(deadline) = deadline {
let mut now = Instant::now();
while now < deadline && !worker_threads.is_empty() {
if let Ok(idx) = exited_threads.recv_timeout(deadline - now) {
let _ = worker_threads.remove(idx).join();
}
now = Instant::now();
}
// Any threads that have not yet joined will just be detached.
if !worker_threads.is_empty() {
return Err(ShutdownTimedOut(worker_threads.len()));
}
Ok(())
} else {
// Block indefinitely until all worker threads exit.
for handle in worker_threads.drain() {
let _ = handle.join();
}
Ok(())
}
}
}
impl Default for BlockingPool {
fn default() -> BlockingPool {
BlockingPool::new(256, Duration::from_secs(10))
}
}
impl Drop for BlockingPool {
fn drop(&mut self) {
if let Err(e) = self.shutdown(Some(Instant::now() + DEFAULT_SHUTDOWN_TIMEOUT)) {
warn!("{}", e);
}
}
}
#[cfg(test)]
mod test {
use std::{
sync::{Arc, Barrier},
thread,
time::{Duration, Instant},
};
use futures::{stream::FuturesUnordered, StreamExt};
use sync::{Condvar, Mutex};
use crate::{block_on, BlockingPool};
#[test]
fn blocking_sleep() {
let pool = BlockingPool::default();
let res = block_on(pool.spawn(|| 42));
assert_eq!(res, 42);
}
#[test]
fn fast_tasks_with_short_keepalive() {
let pool = BlockingPool::new(256, Duration::from_millis(1));
let streams = FuturesUnordered::new();
for _ in 0..2 {
for _ in 0..256 {
let task = pool.spawn(|| ());
streams.push(task);
}
thread::sleep(Duration::from_millis(1));
}
block_on(streams.collect::<Vec<_>>());
// The test passes if there are no panics, which would happen if one of the worker threads
// triggered an underflow on `pool.inner.state.num_idle`.
}
#[test]
fn more_tasks_than_threads() {
let pool = BlockingPool::new(4, Duration::from_secs(10));
let stream = (0..19)
.map(|_| pool.spawn(|| thread::sleep(Duration::from_millis(5))))
.collect::<FuturesUnordered<_>>();
let results = block_on(stream.collect::<Vec<_>>());
assert_eq!(results.len(), 19);
}
#[test]
fn shutdown() {
let pool = BlockingPool::default();
let stream = (0..19)
.map(|_| pool.spawn(|| thread::sleep(Duration::from_millis(5))))
.collect::<FuturesUnordered<_>>();
let results = block_on(stream.collect::<Vec<_>>());
assert_eq!(results.len(), 19);
pool.shutdown(Some(Instant::now() + Duration::from_secs(10)))
.unwrap();
let state = pool.inner.state.lock();
assert_eq!(state.num_threads, 0);
}
#[test]
fn keepalive_timeout() {
// Set the keepalive to a very low value so that threads will exit soon after they run out
// of work.
let pool = BlockingPool::new(7, Duration::from_millis(1));
let stream = (0..19)
.map(|_| pool.spawn(|| thread::sleep(Duration::from_millis(5))))
.collect::<FuturesUnordered<_>>();
let results = block_on(stream.collect::<Vec<_>>());
assert_eq!(results.len(), 19);
// Wait for all threads to exit.
let deadline = Instant::now() + Duration::from_secs(10);
while Instant::now() < deadline {
thread::sleep(Duration::from_millis(100));
let state = pool.inner.state.lock();
if state.num_threads == 0 {
break;
}
}
{
let state = pool.inner.state.lock();
assert_eq!(state.num_threads, 0);
assert_eq!(state.num_idle, 0);
}
}
#[test]
#[should_panic]
fn shutdown_with_pending_work() {
let pool = BlockingPool::new(1, Duration::from_secs(10));
let mu = Arc::new(Mutex::new(false));
let cv = Arc::new(Condvar::new());
// First spawn a thread that blocks the pool.
let task_mu = mu.clone();
let task_cv = cv.clone();
pool.spawn(move || {
let mut ready = task_mu.lock();
while !*ready {
ready = task_cv.wait(ready);
}
})
.detach();
// This task will never finish because we will shut down the pool first.
let unfinished = pool.spawn(|| 5);
// Spawn a thread to unblock the work we started earlier once it sees that the pool is
// shutting down.
let inner = pool.inner.clone();
thread::spawn(move || {
let mut state = inner.state.lock();
while !state.shutting_down {
state = inner.condvar.wait(state);
}
*mu.lock() = true;
cv.notify_all();
});
pool.shutdown(None).unwrap();
// This should panic.
assert_eq!(block_on(unfinished), 5);
}
#[test]
fn unfinished_worker_thread() {
let pool = BlockingPool::default();
let ready = Arc::new(Mutex::new(false));
let cv = Arc::new(Condvar::new());
let barrier = Arc::new(Barrier::new(2));
let thread_ready = ready.clone();
let thread_barrier = barrier.clone();
let thread_cv = cv.clone();
let task = pool.spawn(move || {
thread_barrier.wait();
let mut ready = thread_ready.lock();
while !*ready {
ready = thread_cv.wait(ready);
}
});
// Wait to shut down the pool until after the worker thread has started.
barrier.wait();
pool.shutdown(Some(Instant::now() + Duration::from_millis(5)))
.unwrap_err();
let num_threads = pool.inner.state.lock().num_threads;
assert_eq!(num_threads, 1);
// Now wake up the blocked task so we don't leak the thread.
*ready.lock() = true;
cv.notify_all();
block_on(task);
let deadline = Instant::now() + Duration::from_secs(10);
while Instant::now() < deadline {
thread::sleep(Duration::from_millis(100));
let state = pool.inner.state.lock();
if state.num_threads == 0 {
break;
}
}
{
let state = pool.inner.state.lock();
assert_eq!(state.num_threads, 0);
assert_eq!(state.num_idle, 0);
}
}
} | |
elementary.rs | use crate::cerbere::Cerbere;
use crate::dependencies;
use std::process::Command;
const SCHEMA: &str = "io.elementary.desktop.cerbere";
const KEY: &str = "monitored-processes";
fn check_gsettings() {
if !dependencies::check("gsettings") {
eprintln!("Missing gsettings (libglib2.0-0)");
std::process::exit(1);
}
}
fn util(fix: bool) {
check_gsettings();
let current = match Command::new("gsettings").args(&["get", SCHEMA, KEY]).output() {
Ok(v) => {
match v.status.success() {
true => String::from_utf8_lossy(&v.stdout).trim().to_string(),
false => {
eprintln!("gsettings did not exix successfully");
std::process::exit(1);
}
}
},
Err(e) => {
eprintln!("{:#?}", e);
std::process::exit(1);
}
};
let mut cerbere = Cerbere::from(current);
| cerbere.remove("'plank'");
} else {
cerbere.add("'plank'");
}
match Command::new("gsettings").args(&["set", SCHEMA, KEY, &cerbere.to_string()]).output() {
Ok(v) => {
if !v.status.success() {
eprintln!("gsettings did not exix successfully");
std::process::exit(1);
}
},
Err(e) => {
eprintln!("{:#?}", e);
std::process::exit(1);
}
}
println!("Success");
std::process::exit(0);
}
pub fn fix() {
util(true);
}
pub fn restore() {
util(false);
} | if fix { |
wijmo.angular2.chart.interaction.d.ts | /*!
*
* Wijmo Library 5.20191.603
* http://wijmo.com/
*
* Copyright(c) GrapeCity, Inc. All rights reserved.
*
* Licensed under the GrapeCity Commercial License.
* [email protected]
* wijmo.com/products/wijmo-5/license/
*
*/
/**
* {@module wijmo.angular2.chart.interaction}
* Contains Angular 2 components for the <b>wijmo.chart.interaction</b> module.
*
* <b>wijmo.angular2.chart.interaction</b> is an external TypeScript module that can be imported to your code
* using its ambient module name. For example:
*
* <pre>import * as wjInteraction from 'wijmo/wijmo.angular2.chart.interaction';
* import * as wjChart from 'wijmo/wijmo.angular2.chart';
*
* @Component({
* directives: [wjChart.WjFlexChart, wjInteraction.WjFlexChartRangeSelector, wjChart.WjFlexChartSeries],
* template: `
* <wj-flex-chart [itemsSource]="data" [bindingX]="'x'">
* <wj-flex-chart-range-selector></wj-flex-chart-range-selector>
* <wj-flex-chart-series [binding]="'y'"></wj-flex-chart-series>
* </wj-flex-chart>`,
* selector: 'my-cmp',
* })
* export class MyCmp {
* data: any[];
* }</pre>
*
*/
/**
*
*/
export declare var ___keepComment: any;
import { EventEmitter, AfterViewInit, ElementRef, Injector, OnInit, OnDestroy } from '@angular/core';
import { IWjComponentMetadata, IWjComponentMeta } from 'wijmo/wijmo.angular2.directiveBase';
import * as wjcChartInteraction from 'wijmo/wijmo.chart.interaction';
export declare var wjFlexChartRangeSelectorMeta: IWjComponentMeta;
/**
* Angular 2 component for the {@link wijmo.chart.interaction.RangeSelector} control.
*
* The <b>wj-flex-chart-range-selector</b> component must be
* contained in one of the following components:
* {@link wijmo.angular2.chart.WjFlexChart}
* or {@link wijmo.angular2.chart.finance.WjFinancialChart}.
*
* Use the <b>wj-flex-chart-range-selector</b> component to add <b>RangeSelector</b> controls to your
* Angular 2 applications. For details about Angular 2 markup syntax, see
* <a href="/wijmo/docs/GettingStarted/Angular-Components">Angular 2 Markup</a>.
*
* The <b>WjFlexChartRangeSelector</b> component is derived from the <b>RangeSelector</b> control and
* inherits all its properties, events and methods.
*/
export declare class WjFlexChartRangeSelector extends wjcChartInteraction.RangeSelector implements OnInit, OnDestroy, AfterViewInit {
static readonly meta: IWjComponentMetadata;
private _wjBehaviour;
/**
* Indicates whether the component has been initialized by Angular.
* Changes its value from false to true right before triggering the <b>initialized</b> event.
*/
isInitialized: boolean;
/**
* This event is triggered after the component has been initialized by Angular, that is
* all bound properties have been assigned and child components (if any) have been initialized.
*/
initialized: EventEmitter<any>;
/**
* Gets or sets a name of a property that this component is assigned to.
* Default value is ''.
*/
wjProperty: string;
/**
* Angular (EventEmitter) version of the Wijmo <b>rangeChanged</b> event for programmatic access.
* Use this event name if you want to subscribe to the Angular version of the event in code.
* In template bindings use the conventional <b>rangeChanged</b> Wijmo event name.
*/
rangeChangedNg: EventEmitter<any>;
constructor(elRef: ElementRef, injector: Injector, parentCmp: any);
/**
* If you create a custom component inherited from a Wijmo component, you can override this
* method and perform necessary initializations that you usually do in a class constructor.
* This method is called in the last line of a Wijmo component constructor and allows you
* to not declare your custom component's constructor at all, thus preventing you from a necessity
* to maintain constructor parameters and keep them in synch with Wijmo component's constructor parameters.
*/
created(): void;
ngOnInit(): void;
ngAfterViewInit(): void;
ngOnDestroy(): void;
}
export declare var wjFlexChartGesturesMeta: IWjComponentMeta;
/**
* Angular 2 component for the {@link wijmo.chart.interaction.ChartGestures} control.
*
* The <b>wj-flex-chart-gestures</b> component must be
* contained in one of the following components:
* {@link wijmo.angular2.chart.WjFlexChart}
* or {@link wijmo.angular2.chart.finance.WjFinancialChart}.
*
* Use the <b>wj-flex-chart-gestures</b> component to add <b>ChartGestures</b> controls to your
* Angular 2 applications. For details about Angular 2 markup syntax, see
* <a href="/wijmo/docs/GettingStarted/Angular-Components">Angular 2 Markup</a>.
*
* The <b>WjFlexChartGestures</b> component is derived from the <b>ChartGestures</b> control and
* inherits all its properties, events and methods.
*/
export declare class | extends wjcChartInteraction.ChartGestures implements OnInit, OnDestroy, AfterViewInit {
static readonly meta: IWjComponentMetadata;
private _wjBehaviour;
/**
* Indicates whether the component has been initialized by Angular.
* Changes its value from false to true right before triggering the <b>initialized</b> event.
*/
isInitialized: boolean;
/**
* This event is triggered after the component has been initialized by Angular, that is
* all bound properties have been assigned and child components (if any) have been initialized.
*/
initialized: EventEmitter<any>;
/**
* Gets or sets a name of a property that this component is assigned to.
* Default value is ''.
*/
wjProperty: string;
constructor(elRef: ElementRef, injector: Injector, parentCmp: any);
/**
* If you create a custom component inherited from a Wijmo component, you can override this
* method and perform necessary initializations that you usually do in a class constructor.
* This method is called in the last line of a Wijmo component constructor and allows you
* to not declare your custom component's constructor at all, thus preventing you from a necessity
* to maintain constructor parameters and keep them in synch with Wijmo component's constructor parameters.
*/
created(): void;
ngOnInit(): void;
ngAfterViewInit(): void;
ngOnDestroy(): void;
}
export declare class WjChartInteractionModule {
}
| WjFlexChartGestures |
star_wars_data.py | """This defines a basic set of data for our Star Wars Schema.
This data is hard coded for the sake of the demo, but you could imagine fetching this
data from a backend service rather than from hardcoded JSON objects in a more complex
demo.
"""
from typing import Collection, Iterator
__all__ = ["get_droid", "get_friends", "get_hero", "get_human", "get_secret_backstory"]
# These are classes which correspond to the schema.
# They represent the shape of the data visited during field resolution.
class Character:
id: str
name: str
friends: Collection[str]
appearsIn: Collection[str]
# noinspection PyPep8Naming
class Human(Character):
type = "Human"
homePlanet: str
# noinspection PyShadowingBuiltins
def __init__(self, id, name, friends, appearsIn, homePlanet):
|
# noinspection PyPep8Naming
class Droid(Character):
type = "Droid"
primaryFunction: str
# noinspection PyShadowingBuiltins
def __init__(self, id, name, friends, appearsIn, primaryFunction):
self.id, self.name = id, name
self.friends, self.appearsIn = friends, appearsIn
self.primaryFunction = primaryFunction
luke = Human(
id="1000",
name="Luke Skywalker",
friends=["1002", "1003", "2000", "2001"],
appearsIn=[4, 5, 6],
homePlanet="Tatooine",
)
vader = Human(
id="1001",
name="Darth Vader",
friends=["1004"],
appearsIn=[4, 5, 6],
homePlanet="Tatooine",
)
han = Human(
id="1002",
name="Han Solo",
friends=["1000", "1003", "2001"],
appearsIn=[4, 5, 6],
homePlanet=None,
)
leia = Human(
id="1003",
name="Leia Organa",
friends=["1000", "1002", "2000", "2001"],
appearsIn=[4, 5, 6],
homePlanet="Alderaan",
)
tarkin = Human(
id="1004", name="Wilhuff Tarkin", friends=["1001"], appearsIn=[4], homePlanet=None
)
human_data = {"1000": luke, "1001": vader, "1002": han, "1003": leia, "1004": tarkin}
threepio = Droid(
id="2000",
name="C-3PO",
friends=["1000", "1002", "1003", "2001"],
appearsIn=[4, 5, 6],
primaryFunction="Protocol",
)
artoo = Droid(
id="2001",
name="R2-D2",
friends=["1000", "1002", "1003"],
appearsIn=[4, 5, 6],
primaryFunction="Astromech",
)
droid_data = {"2000": threepio, "2001": artoo}
# noinspection PyShadowingBuiltins
def get_character(id: str) -> Character:
"""Helper function to get a character by ID."""
return human_data.get(id) or droid_data.get(id) # type: ignore
def get_friends(character: Character) -> Iterator[Character]:
"""Allows us to query for a character's friends."""
return map(get_character, character.friends)
def get_hero(episode: int) -> Character:
"""Allows us to fetch the undisputed hero of the trilogy, R2-D2."""
if episode == 5:
# Luke is the hero of Episode V.
return luke
# Artoo is the hero otherwise.
return artoo
# noinspection PyShadowingBuiltins
def get_human(id: str) -> Human:
"""Allows us to query for the human with the given id."""
return human_data.get(id) # type: ignore
# noinspection PyShadowingBuiltins
def get_droid(id: str) -> Droid:
"""Allows us to query for the droid with the given id."""
return droid_data.get(id) # type: ignore
# noinspection PyUnusedLocal
def get_secret_backstory(character: Character) -> str:
"""Raise an error when attempting to get the secret backstory."""
raise RuntimeError("secretBackstory is secret.")
| self.id, self.name = id, name
self.friends, self.appearsIn = friends, appearsIn
self.homePlanet = homePlanet |
sat_cuboid_triangle.rs | use crate::math::{Isometry, Real, Vector};
#[cfg(feature = "dim3")]
use crate::query::sat;
use crate::shape::{Cuboid, Triangle};
#[cfg(feature = "dim2")]
use crate::{query::sat::support_map_support_map_compute_separation, shape::SupportMap};
/// Finds the best separating edge between a cuboid and a triangle.
///
/// All combinations of edges from the cuboid and the triangle are taken into
/// account.
#[cfg(feature = "dim3")]
pub fn cuboid_triangle_find_local_separating_edge_twoway(
cube1: &Cuboid,
triangle2: &Triangle,
pos12: &Isometry<Real>,
) -> (Real, Vector<Real>) {
let x2 = pos12 * (triangle2.b - triangle2.a);
let y2 = pos12 * (triangle2.c - triangle2.b);
let z2 = pos12 * (triangle2.a - triangle2.c);
// We have 3 * 3 = 3 axes to test.
let axes = [
// Vector::{x, y ,z}().cross(y2)
Vector::new(0.0, -x2.z, x2.y),
Vector::new(x2.z, 0.0, -x2.x),
Vector::new(-x2.y, x2.x, 0.0),
// Vector::{x, y ,z}().cross(y2)
Vector::new(0.0, -y2.z, y2.y),
Vector::new(y2.z, 0.0, -y2.x),
Vector::new(-y2.y, y2.x, 0.0),
// Vector::{x, y ,z}().cross(y2)
Vector::new(0.0, -z2.z, z2.y),
Vector::new(z2.z, 0.0, -z2.x),
Vector::new(-z2.y, z2.x, 0.0),
];
sat::cuboid_support_map_find_local_separating_edge_twoway(cube1, triangle2, &axes, pos12)
}
/// Finds the best separating normal between a triangle and a convex shape implementing the `SupportMap` trait.
///
/// Only the normals of `triangle1` are tested.
#[cfg(feature = "dim2")]
pub fn triangle_support_map_find_local_separating_normal_oneway(
triangle1: &Triangle,
shape2: &impl SupportMap,
pos12: &Isometry<Real>,
) -> (Real, Vector<Real>) {
let mut best_sep = -Real::MAX;
let mut best_normal = Vector::zeros();
for edge in &triangle1.edges() {
if let Some(normal) = edge.normal() {
let sep = support_map_support_map_compute_separation(triangle1, shape2, pos12, &normal);
if sep > best_sep {
best_sep = sep;
best_normal = *normal;
}
}
}
(best_sep, best_normal)
}
/// Finds the best separating normal between a triangle and a cuboid.
///
/// Only the normals of `triangle1` are tested.
#[cfg(feature = "dim2")]
pub fn triangle_cuboid_find_local_separating_normal_oneway(
triangle1: &Triangle,
shape2: &Cuboid,
pos12: &Isometry<Real>,
) -> (Real, Vector<Real>) |
/// Finds the best separating normal a triangle and a cuboid.
///
/// Only the normals of `triangle1` are tested.
#[cfg(feature = "dim3")]
pub fn triangle_cuboid_find_local_separating_normal_oneway(
triangle1: &Triangle,
shape2: &Cuboid,
pos12: &Isometry<Real>,
) -> (Real, Vector<Real>) {
sat::point_cuboid_find_local_separating_normal_oneway(
triangle1.a,
triangle1.normal(),
shape2,
pos12,
)
}
| {
triangle_support_map_find_local_separating_normal_oneway(triangle1, shape2, pos12)
} |
namespace.rs | use super::pytype::PyTypeRef;
use crate::function::KwArgs;
use crate::pyobject::{PyClassImpl, PyContext, PyRef, PyResult, PyValue};
use crate::vm::VirtualMachine;
/// A simple attribute-based namespace.
///
/// SimpleNamespace(**kwargs)
#[pyclass(module = false, name = "SimpleNamespace")]
#[derive(Debug)]
pub struct PyNamespace;
impl PyValue for PyNamespace {
fn class(vm: &VirtualMachine) -> &PyTypeRef {
&vm.ctx.types.namespace_type
}
}
#[pyimpl(flags(BASETYPE, HAS_DICT))]
impl PyNamespace {
#[pyslot]
fn tp_new(cls: PyTypeRef, kwargs: KwArgs, vm: &VirtualMachine) -> PyResult<PyRef<Self>> {
let zelf = PyNamespace.into_ref_with_type(vm, cls)?;
for (name, value) in kwargs.into_iter() {
vm.set_attr(zelf.as_object(), name, value)?;
}
Ok(zelf)
}
}
pub fn init(context: &PyContext) | {
PyNamespace::extend_class(context, &context.types.namespace_type);
} |
|
validate-url.ts | import {AbstractControl, ValidationErrors} from '@angular/forms';
export namespace TagValidators {
// tslint:disable-next-line
export const URL_PATTERN = /^([a-z]([a-z]|\d|\+|-|\.)*):(\/\/(((([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(%[\da-f]{2})|[!\$&'\(\)\*\+,;=]|:)*@)?((\[(|(v[\da-f]{1,}\.(([a-z]|\d|-|\.|_|~)|[!\$&'\(\)\*\+,;=]|:)+))\])|((\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5]))|(([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(%[\da-f]{2})|[!\$&'\(\)\*\+,;=])*)(:\d*)?)(\/(([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(%[\da-f]{2})|[!\$&'\(\)\*\+,;=]|:|@)*)*|(\/((([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(%[\da-f]{2})|[!\$&'\(\)\*\+,;=]|:|@)+(\/(([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(%[\da-f]{2})|[!\$&'\(\)\*\+,;=]|:|@)*)*)?)|((([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(%[\da-f]{2})|[!\$&'\(\)\*\+,;=]|:|@)+(\/(([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(%[\da-f]{2})|[!\$&'\(\)\*\+,;=]|:|@)*)*)|((([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(%[\da-f]{2})|[!\$&'\(\)\*\+,;=]|:|@)){0})(\?((([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(%[\da-f]{2})|[!\$&'\(\)\*\+,;=]|:|@)|[\uE000-\uF8FF]|\/|\?)*)?(\#((([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(%[\da-f]{2})|[!\$&'\(\)\*\+,;=]|:|@)|\/|\?)*)?$/i;
// tslint:disable-next-line
export const HTTP_PATTERN = /((([A-Za-z]{3,9}:(?:\/\/)?)(?:[-;:&=\+\$,\w]+@)?[A-Za-z0-9.-]+|(?:www.|[-;:&=\+\$,\w]+@)[A-Za-z0-9.-]+)((?:\/[\+~%\/.\w-_]*)?\??(?:[-\+=&;%@.\w_]*)#?(?:[\w]*))?)/;
export const HTTP_SIMPLE_URL = /^https?:\/\/[0-9a-z-.]+\/?$/i;
export function | (value: string): string {
if (HTTP_PATTERN.test('http://' + value)) {
if (value.indexOf('/') === -1) {
value += '/';
}
return 'http://' + value;
}
return value;
}
export function toEndsWithSlash(value: string): string {
if (HTTP_SIMPLE_URL.test(value)) {
return value.endsWith('/') ? value : value + '/';
}
return value;
}
export function toUrl(value: string): string {
if (!URL_PATTERN.test(value)) {
value = toHttp(value);
}
return toEndsWithSlash(value);
}
export function validateUrl(control: AbstractControl): ValidationErrors | null {
if (control.value === null || control.value.length === 0) {
return null;
}
const value: string = toUrl(control.value);
return URL_PATTERN.test(value) ? null :
{'invalidUrl': true};
}
}
| toHttp |
forms.py | from django import forms
from django.utils.translation import gettext_lazy as _
from .models import Diary
class DiaryModelForm(forms.ModelForm):
class | :
widgets = {
'date': forms.DateInput(attrs={'type': 'date'}),
'daily_record': forms.Textarea(attrs={'rows': 4, 'class': 'ckeditor4'}),
'todo': forms.Textarea(attrs={'rows': 4, 'class': 'ckeditor4'}),
'remark': forms.Textarea(attrs={'rows': 4, 'class': 'ckeditor4'}),
'comment': forms.Textarea(attrs={'rows': 4, 'class': 'ckeditor4', 'readonly': ''}), # TODO: ckeditor4 or not?
}
model = Diary
exclude = ['created_by']
def full_clean(self):
super().full_clean()
try:
self.instance.validate_unique()
except forms.ValidationError:
self.add_error(field='date', error=_('The diary with this date has already existed.'))
class DiaryCommentModelForm(forms.ModelForm):
class Meta:
widgets = {
'date': forms.DateInput(attrs={'type': 'date', 'readonly': ''}),
# Attrs readonly & style="pointer-events: none" make the <select> tag work like a readonly field.
'daily_check': forms.Select(attrs={'readonly': '', 'style': 'pointer-events: none'}),
'daily_record': forms.Textarea(attrs={'rows': 4, 'class': 'ckeditor4', 'readonly': ''}),
'todo': forms.Textarea(attrs={'rows': 4, 'class': 'ckeditor4', 'readonly': ''}),
'remark': forms.Textarea(attrs={'rows': 4, 'class': 'ckeditor4', 'readonly': ''}),
'comment': forms.Textarea(attrs={'rows': 4, 'class': 'ckeditor4'}), # TODO: ckeditor4 or not?
}
model = Diary
exclude = ['created_by']
def full_clean(self):
super().full_clean()
try:
self.instance.validate_unique()
except forms.ValidationError:
self.add_error(field='date', error=_('The diary with this date has already existed.'))
| Meta |
accounts_db.rs | //! Persistent accounts are stored in below path location:
//! <path>/<pid>/data/
//!
//! The persistent store would allow for this mode of operation:
//! - Concurrent single thread append with many concurrent readers.
//!
//! The underlying memory is memory mapped to a file. The accounts would be
//! stored across multiple files and the mappings of file and offset of a
//! particular account would be stored in a shared index. This will allow for
//! concurrent commits without blocking reads, which will sequentially write
//! to memory, ssd or disk, and should be as fast as the hardware allow for.
//! The only required in memory data structure with a write lock is the index,
//! which should be fast to update.
//!
//! AppendVec's only store accounts for single slots. To bootstrap the
//! index from a persistent store of AppendVec's, the entries include
//! a "write_version". A single global atomic `AccountsDb::write_version`
//! tracks the number of commits to the entire data store. So the latest
//! commit for each slot entry would be indexed.
#[cfg(test)]
use std::{thread::sleep, time::Duration};
use {
crate::{
accounts_background_service::{DroppedSlotsSender, SendDroppedBankCallback},
accounts_cache::{AccountsCache, CachedAccount, SlotCache},
accounts_hash::{AccountsHash, CalculateHashIntermediate, HashStats, PreviousPass},
accounts_index::{
AccountIndexGetResult, AccountSecondaryIndexes, AccountsIndex, AccountsIndexConfig,
AccountsIndexRootsStats, IndexKey, IndexValue, IsCached, RefCount, ScanConfig,
ScanResult, SlotList, SlotSlice, ZeroLamport, ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS,
ACCOUNTS_INDEX_CONFIG_FOR_TESTING,
},
accounts_update_notifier_interface::AccountsUpdateNotifier,
ancestors::Ancestors,
append_vec::{AppendVec, StoredAccountMeta, StoredMeta, StoredMetaWriteVersion},
cache_hash_data::CacheHashData,
contains::Contains,
pubkey_bins::PubkeyBinCalculator24,
read_only_accounts_cache::ReadOnlyAccountsCache,
rent_collector::RentCollector,
sorted_storages::SortedStorages,
},
blake3::traits::digest::Digest,
crossbeam_channel::{unbounded, Receiver, Sender},
dashmap::{
mapref::entry::Entry::{Occupied, Vacant},
DashMap, DashSet,
},
log::*,
rand::{prelude::SliceRandom, thread_rng, Rng},
rayon::{prelude::*, ThreadPool},
serde::{Deserialize, Serialize},
solana_measure::measure::Measure,
solana_rayon_threadlimit::get_thread_count,
solana_sdk::{
account::{AccountSharedData, ReadableAccount},
clock::{BankId, Epoch, Slot, SlotCount},
epoch_schedule::EpochSchedule,
genesis_config::{ClusterType, GenesisConfig},
hash::Hash,
pubkey::Pubkey,
timing::AtomicInterval,
},
solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY,
std::{
borrow::{Borrow, Cow},
boxed::Box,
collections::{hash_map::Entry, BTreeSet, HashMap, HashSet},
convert::TryFrom,
hash::{Hash as StdHash, Hasher as StdHasher},
io::{Error as IoError, Result as IoResult},
ops::{Range, RangeBounds},
path::{Path, PathBuf},
str::FromStr,
sync::{
atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering},
Arc, Condvar, Mutex, MutexGuard, RwLock,
},
thread::Builder,
time::Instant,
},
tempfile::TempDir,
};
const PAGE_SIZE: u64 = 4 * 1024;
const MAX_RECYCLE_STORES: usize = 1000;
const STORE_META_OVERHEAD: usize = 256;
// when the accounts write cache exceeds this many bytes, we will flush it
// this can be specified on the command line, too (--accounts-db-cache-limit-mb)
const WRITE_CACHE_LIMIT_BYTES_DEFAULT: u64 = 15_000_000_000;
const FLUSH_CACHE_RANDOM_THRESHOLD: usize = MAX_LOCKOUT_HISTORY;
const SCAN_SLOT_PAR_ITER_THRESHOLD: usize = 4000;
pub const DEFAULT_FILE_SIZE: u64 = PAGE_SIZE * 1024;
pub const DEFAULT_NUM_THREADS: u32 = 8;
pub const DEFAULT_NUM_DIRS: u32 = 4;
// When calculating hashes, it is helpful to break the pubkeys found into bins based on the pubkey value.
// More bins means smaller vectors to sort, copy, etc.
pub const PUBKEY_BINS_FOR_CALCULATING_HASHES: usize = 65536;
pub const NUM_SCAN_PASSES_DEFAULT: usize = 2;
// Without chunks, we end up with 1 output vec for each outer snapshot storage.
// This results in too many vectors to be efficient.
// Chunks when scanning storages to calculate hashes.
// If this is too big, we don't get enough parallelism of scanning storages.
// If this is too small, then we produce too many output vectors to iterate.
// Metrics indicate a sweet spot in the 2.5k-5k range for mnb.
const MAX_ITEMS_PER_CHUNK: Slot = 2_500;
// A specially reserved storage id just for entries in the cache, so that
// operations that take a storage entry can maintain a common interface
// when interacting with cached accounts. This id is "virtual" in that it
// doesn't actually refer to an actual storage entry.
const CACHE_VIRTUAL_STORAGE_ID: usize = AppendVecId::MAX;
// A specially reserved write version (identifier for ordering writes in an AppendVec)
// for entries in the cache, so that operations that take a storage entry can maintain
// a common interface when interacting with cached accounts. This version is "virtual" in
// that it doesn't actually map to an entry in an AppendVec.
const CACHE_VIRTUAL_WRITE_VERSION: StoredMetaWriteVersion = 0;
// A specially reserved offset (represents an offset into an AppendVec)
// for entries in the cache, so that operations that take a storage entry can maintain
// a common interface when interacting with cached accounts. This version is "virtual" in
// that it doesn't actually map to an entry in an AppendVec.
const CACHE_VIRTUAL_OFFSET: usize = 0;
const CACHE_VIRTUAL_STORED_SIZE: usize = 0;
pub const ACCOUNTS_DB_CONFIG_FOR_TESTING: AccountsDbConfig = AccountsDbConfig {
index: Some(ACCOUNTS_INDEX_CONFIG_FOR_TESTING),
accounts_hash_cache_path: None,
filler_account_count: None,
hash_calc_num_passes: None,
write_cache_limit_bytes: None,
};
pub const ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS: AccountsDbConfig = AccountsDbConfig {
index: Some(ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS),
accounts_hash_cache_path: None,
filler_account_count: None,
hash_calc_num_passes: None,
write_cache_limit_bytes: None,
};
pub type BinnedHashData = Vec<Vec<CalculateHashIntermediate>>;
pub struct AccountsAddRootTiming {
pub index_us: u64,
pub cache_us: u64,
pub store_us: u64,
}
#[derive(Debug, Default, Clone)]
pub struct AccountsDbConfig {
pub index: Option<AccountsIndexConfig>,
pub accounts_hash_cache_path: Option<PathBuf>,
pub filler_account_count: Option<usize>,
pub hash_calc_num_passes: Option<usize>,
pub write_cache_limit_bytes: Option<u64>,
}
struct FoundStoredAccount<'a> {
pub account: StoredAccountMeta<'a>,
pub store_id: AppendVecId,
pub account_size: usize,
}
#[cfg(not(test))]
const ABSURD_CONSECUTIVE_FAILED_ITERATIONS: usize = 100;
type DashMapVersionHash = DashMap<Pubkey, (u64, Hash)>;
#[derive(Debug, Clone, Copy)]
pub enum AccountShrinkThreshold {
/// Measure the total space sparseness across all candididates
/// And select the candidiates by using the top sparse account storage entries to shrink.
/// The value is the overall shrink threshold measured as ratio of the total live bytes
/// over the total bytes.
TotalSpace { shrink_ratio: f64 },
/// Use the following option to shrink all stores whose alive ratio is below
/// the specified threshold.
IndividalStore { shrink_ratio: f64 },
}
pub const DEFAULT_ACCOUNTS_SHRINK_OPTIMIZE_TOTAL_SPACE: bool = true;
pub const DEFAULT_ACCOUNTS_SHRINK_RATIO: f64 = 0.80;
// The default extra account space in percentage from the ideal target
const DEFAULT_ACCOUNTS_SHRINK_THRESHOLD_OPTION: AccountShrinkThreshold =
AccountShrinkThreshold::TotalSpace {
shrink_ratio: DEFAULT_ACCOUNTS_SHRINK_RATIO,
};
impl Default for AccountShrinkThreshold {
fn default() -> AccountShrinkThreshold {
DEFAULT_ACCOUNTS_SHRINK_THRESHOLD_OPTION
}
}
pub enum ScanStorageResult<R, B> {
Cached(Vec<R>),
Stored(B),
}
#[derive(Debug, Default)]
pub struct ErrorCounters {
pub total: usize,
pub account_in_use: usize,
pub account_loaded_twice: usize,
pub account_not_found: usize,
pub blockhash_not_found: usize,
pub blockhash_too_old: usize,
pub call_chain_too_deep: usize,
pub already_processed: usize,
pub instruction_error: usize,
pub insufficient_funds: usize,
pub invalid_account_for_fee: usize,
pub invalid_account_index: usize,
pub invalid_program_for_execution: usize,
pub not_allowed_during_cluster_maintenance: usize,
pub invalid_writable_account: usize,
pub invalid_rent_paying_account: usize,
}
#[derive(Debug, Default, Clone, Copy)]
pub struct IndexGenerationInfo {
pub accounts_data_len: u64,
}
#[derive(Debug, Default, Clone, Copy)]
struct SlotIndexGenerationInfo {
insert_time_us: u64,
num_accounts: u64,
num_accounts_rent_exempt: u64,
accounts_data_len: u64,
}
#[derive(Default, Debug)]
struct GenerateIndexTimings {
pub index_time: u64,
pub scan_time: u64,
pub insertion_time_us: u64,
pub min_bin_size: usize,
pub max_bin_size: usize,
pub total_items: usize,
pub storage_size_accounts_map_us: u64,
pub storage_size_storages_us: u64,
pub storage_size_accounts_map_flatten_us: u64,
pub index_flush_us: u64,
pub rent_exempt: u64,
pub total_duplicates: u64,
pub accounts_data_len_dedup_time_us: u64,
}
#[derive(Default, Debug, PartialEq)]
struct StorageSizeAndCount {
pub stored_size: usize,
pub count: usize,
}
type StorageSizeAndCountMap = DashMap<AppendVecId, StorageSizeAndCount>;
impl GenerateIndexTimings {
pub fn report(&self) {
datapoint_info!(
"generate_index",
// we cannot accurately measure index insertion time because of many threads and lock contention
("total_us", self.index_time, i64),
("scan_stores_us", self.scan_time, i64),
("insertion_time_us", self.insertion_time_us, i64),
("min_bin_size", self.min_bin_size as i64, i64),
("max_bin_size", self.max_bin_size as i64, i64),
(
"storage_size_accounts_map_us",
self.storage_size_accounts_map_us as i64,
i64
),
(
"storage_size_storages_us",
self.storage_size_storages_us as i64,
i64
),
(
"storage_size_accounts_map_flatten_us",
self.storage_size_accounts_map_flatten_us as i64,
i64
),
("index_flush_us", self.index_flush_us as i64, i64),
(
"total_rent_paying_with_duplicates",
self.total_duplicates.saturating_sub(self.rent_exempt) as i64,
i64
),
(
"total_items_with_duplicates",
self.total_duplicates as i64,
i64
),
("total_items", self.total_items as i64, i64),
(
"accounts_data_len_dedup_time_us",
self.accounts_data_len_dedup_time_us as i64,
i64
),
);
}
}
#[derive(Default, Debug, PartialEq, Clone, Copy)]
pub struct AccountInfo {
/// index identifying the append storage
store_id: AppendVecId,
/// offset into the storage
offset: usize,
/// needed to track shrink candidacy in bytes. Used to update the number
/// of alive bytes in an AppendVec as newer slots purge outdated entries
stored_size: usize,
/// lamports in the account used when squashing kept for optimization
/// purposes to remove accounts with zero balance.
lamports: u64,
}
impl IsCached for AccountInfo {
fn is_cached(&self) -> bool {
self.store_id == CACHE_VIRTUAL_STORAGE_ID
}
}
impl IndexValue for AccountInfo {}
impl ZeroLamport for AccountInfo {
fn is_zero_lamport(&self) -> bool {
self.lamports == 0
}
}
impl ZeroLamport for AccountSharedData {
fn is_zero_lamport(&self) -> bool {
self.lamports() == 0
}
}
struct MultiThreadProgress<'a> {
last_update: Instant,
my_last_report_count: u64,
total_count: &'a AtomicU64,
report_delay_secs: u64,
first_caller: bool,
ultimate_count: u64,
}
impl<'a> MultiThreadProgress<'a> {
fn new(total_count: &'a AtomicU64, report_delay_secs: u64, ultimate_count: u64) -> Self {
Self {
last_update: Instant::now(),
my_last_report_count: 0,
total_count,
report_delay_secs,
first_caller: false,
ultimate_count,
}
}
fn report(&mut self, my_current_count: u64) {
let now = Instant::now();
if now.duration_since(self.last_update).as_secs() >= self.report_delay_secs {
let my_total_newly_processed_slots_since_last_report =
my_current_count - self.my_last_report_count;
self.my_last_report_count = my_current_count;
let previous_total_processed_slots_across_all_threads = self.total_count.fetch_add(
my_total_newly_processed_slots_since_last_report,
Ordering::Relaxed,
);
self.first_caller =
self.first_caller || 0 == previous_total_processed_slots_across_all_threads;
if self.first_caller {
info!(
"generating index: {}/{} slots...",
previous_total_processed_slots_across_all_threads
+ my_total_newly_processed_slots_since_last_report,
self.ultimate_count
);
}
self.last_update = now;
}
}
}
/// An offset into the AccountsDb::storage vector
pub type AppendVecId = usize;
pub type SnapshotStorage = Vec<Arc<AccountStorageEntry>>;
pub type SnapshotStorages = Vec<SnapshotStorage>;
// Each slot has a set of storage entries.
pub(crate) type SlotStores = Arc<RwLock<HashMap<usize, Arc<AccountStorageEntry>>>>;
type AccountSlots = HashMap<Pubkey, HashSet<Slot>>;
type AppendVecOffsets = HashMap<AppendVecId, HashSet<usize>>;
type ReclaimResult = (AccountSlots, AppendVecOffsets);
type StorageFinder<'a> = Box<dyn Fn(Slot, usize) -> Arc<AccountStorageEntry> + 'a>;
type ShrinkCandidates = HashMap<Slot, HashMap<AppendVecId, Arc<AccountStorageEntry>>>;
trait Versioned {
fn version(&self) -> u64;
}
impl Versioned for (u64, Hash) {
fn version(&self) -> u64 {
self.0
}
}
impl Versioned for (u64, AccountInfo) {
fn version(&self) -> u64 {
self.0
}
}
// Some hints for applicability of additional sanity checks for the do_load fast-path;
// Slower fallback code path will be taken if the fast path has failed over the retry
// threshold, regardless of these hints. Also, load cannot fail not-deterministically
// even under very rare circumstances, unlike previously did allow.
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum LoadHint {
// Caller hints that it's loading transactions for a block which is
// descended from the current root, and at the tip of its fork.
// Thereby, further this assumes AccountIndex::max_root should not increase
// during this load, meaning there should be no squash.
// Overall, this enables us to assert!() strictly while running the fast-path for
// account loading, while maintaining the determinism of account loading and resultant
// transaction execution thereof.
FixedMaxRoot,
// Caller can't hint the above safety assumption. Generally RPC and miscellaneous
// other call-site falls into this category. The likelihood of slower path is slightly
// increased as well.
Unspecified,
}
#[derive(Debug)]
pub enum LoadedAccountAccessor<'a> {
// StoredAccountMeta can't be held directly here due to its lifetime dependency to
// AccountStorageEntry
Stored(Option<(Arc<AccountStorageEntry>, usize)>),
// None value in Cached variant means the cache was flushed
Cached(Option<Cow<'a, CachedAccount>>),
}
mod geyser_plugin_utils;
impl<'a> LoadedAccountAccessor<'a> {
fn check_and_get_loaded_account(&mut self) -> LoadedAccount {
// all of these following .expect() and .unwrap() are like serious logic errors,
// ideal for representing this as rust type system....
match self {
LoadedAccountAccessor::Cached(None) | LoadedAccountAccessor::Stored(None) => {
panic!("Should have already been taken care of when creating this LoadedAccountAccessor");
}
LoadedAccountAccessor::Cached(Some(_cached_account)) => {
// Cached(Some(x)) variant always produces `Some` for get_loaded_account() since
// it just returns the inner `x` without additional fetches
self.get_loaded_account().unwrap()
}
LoadedAccountAccessor::Stored(Some(_maybe_storage_entry)) => {
// If we do find the storage entry, we can guarantee that the storage entry is
// safe to read from because we grabbed a reference to the storage entry while it
// was still in the storage map. This means even if the storage entry is removed
// from the storage map after we grabbed the storage entry, the recycler should not
// reset the storage entry until we drop the reference to the storage entry.
self.get_loaded_account()
.expect("If a storage entry was found in the storage map, it must not have been reset yet")
}
}
}
fn get_loaded_account(&mut self) -> Option<LoadedAccount> {
match self {
LoadedAccountAccessor::Cached(cached_account) => {
let cached_account: Cow<'a, CachedAccount> = cached_account.take().expect(
"Cache flushed/purged should be handled before trying to fetch account",
);
Some(LoadedAccount::Cached(cached_account))
}
LoadedAccountAccessor::Stored(maybe_storage_entry) => {
// storage entry may not be present if slot was cleaned up in
// between reading the accounts index and calling this function to
// get account meta from the storage entry here
maybe_storage_entry
.as_ref()
.and_then(|(storage_entry, offset)| {
storage_entry
.get_stored_account_meta(*offset)
.map(LoadedAccount::Stored)
})
}
}
}
}
pub enum LoadedAccount<'a> {
Stored(StoredAccountMeta<'a>),
Cached(Cow<'a, CachedAccount>),
}
impl<'a> LoadedAccount<'a> {
pub fn owner(&self) -> &Pubkey {
match self {
LoadedAccount::Stored(stored_account_meta) => &stored_account_meta.account_meta.owner,
LoadedAccount::Cached(cached_account) => cached_account.account.owner(),
}
}
pub fn executable(&self) -> bool {
match self {
LoadedAccount::Stored(stored_account_meta) => {
stored_account_meta.account_meta.executable
}
LoadedAccount::Cached(cached_account) => cached_account.account.executable(),
}
}
pub fn loaded_hash(&self) -> Hash {
match self {
LoadedAccount::Stored(stored_account_meta) => *stored_account_meta.hash,
LoadedAccount::Cached(cached_account) => cached_account.hash(),
}
}
pub fn pubkey(&self) -> &Pubkey {
match self {
LoadedAccount::Stored(stored_account_meta) => &stored_account_meta.meta.pubkey,
LoadedAccount::Cached(cached_account) => cached_account.pubkey(),
}
}
pub fn write_version(&self) -> StoredMetaWriteVersion {
match self {
LoadedAccount::Stored(stored_account_meta) => stored_account_meta.meta.write_version,
LoadedAccount::Cached(_) => CACHE_VIRTUAL_WRITE_VERSION,
}
}
pub fn compute_hash(&self, slot: Slot, pubkey: &Pubkey) -> Hash {
match self {
LoadedAccount::Stored(stored_account_meta) => {
AccountsDb::hash_stored_account(slot, stored_account_meta)
}
LoadedAccount::Cached(cached_account) => {
AccountsDb::hash_account(slot, &cached_account.account, pubkey)
}
}
}
pub fn stored_size(&self) -> usize {
match self {
LoadedAccount::Stored(stored_account_meta) => stored_account_meta.stored_size,
LoadedAccount::Cached(_) => CACHE_VIRTUAL_STORED_SIZE,
}
}
pub fn lamports(&self) -> u64 {
match self {
LoadedAccount::Stored(stored_account_meta) => stored_account_meta.account_meta.lamports,
LoadedAccount::Cached(cached_account) => cached_account.account.lamports(),
}
}
pub fn take_account(self) -> AccountSharedData {
match self {
LoadedAccount::Stored(stored_account_meta) => stored_account_meta.clone_account(),
LoadedAccount::Cached(cached_account) => match cached_account {
Cow::Owned(cached_account) => cached_account.account.clone(),
Cow::Borrowed(cached_account) => cached_account.account.clone(),
},
}
}
pub fn is_cached(&self) -> bool {
match self {
LoadedAccount::Stored(_) => false,
LoadedAccount::Cached(_) => true,
}
}
}
#[derive(Clone, Default, Debug)]
pub struct AccountStorage(pub DashMap<Slot, SlotStores>);
impl AccountStorage {
fn get_account_storage_entry(
&self,
slot: Slot,
store_id: AppendVecId,
) -> Option<Arc<AccountStorageEntry>> {
self.get_slot_stores(slot)
.and_then(|storage_map| storage_map.read().unwrap().get(&store_id).cloned())
}
pub fn get_slot_stores(&self, slot: Slot) -> Option<SlotStores> {
self.0.get(&slot).map(|result| result.value().clone())
}
fn get_slot_storage_entries(&self, slot: Slot) -> Option<Vec<Arc<AccountStorageEntry>>> {
self.get_slot_stores(slot)
.map(|res| res.read().unwrap().values().cloned().collect())
}
fn slot_store_count(&self, slot: Slot, store_id: AppendVecId) -> Option<usize> {
self.get_account_storage_entry(slot, store_id)
.map(|store| store.count())
}
fn all_slots(&self) -> Vec<Slot> {
self.0.iter().map(|iter_item| *iter_item.key()).collect()
}
}
#[derive(Debug, Eq, PartialEq, Copy, Clone, Deserialize, Serialize, AbiExample, AbiEnumVisitor)]
pub enum AccountStorageStatus {
Available = 0,
Full = 1,
Candidate = 2,
}
impl Default for AccountStorageStatus {
fn default() -> Self {
Self::Available
}
}
#[derive(Debug)]
pub enum BankHashVerificationError {
MismatchedAccountHash,
MismatchedBankHash,
MissingBankHash,
MismatchedTotalLamports(u64, u64),
}
#[derive(Default)]
struct CleanKeyTimings {
collect_delta_keys_us: u64,
delta_insert_us: u64,
hashset_to_vec_us: u64,
dirty_store_processing_us: u64,
delta_key_count: u64,
dirty_pubkeys_count: u64,
}
/// Persistent storage structure holding the accounts
#[derive(Debug)]
pub struct AccountStorageEntry {
pub(crate) id: AtomicUsize,
pub(crate) slot: AtomicU64,
/// storage holding the accounts
pub(crate) accounts: AppendVec,
/// Keeps track of the number of accounts stored in a specific AppendVec.
/// This is periodically checked to reuse the stores that do not have
/// any accounts in it
/// status corresponding to the storage, lets us know that
/// the append_vec, once maxed out, then emptied, can be reclaimed
count_and_status: RwLock<(usize, AccountStorageStatus)>,
/// This is the total number of accounts stored ever since initialized to keep
/// track of lifetime count of all store operations. And this differs from
/// count_and_status in that this field won't be decremented.
///
/// This is used as a rough estimate for slot shrinking. As such a relaxed
/// use case, this value ARE NOT strictly synchronized with count_and_status!
approx_store_count: AtomicUsize,
alive_bytes: AtomicUsize,
}
impl AccountStorageEntry {
pub fn new(path: &Path, slot: Slot, id: usize, file_size: u64) -> Self {
let tail = AppendVec::file_name(slot, id);
let path = Path::new(path).join(tail);
let accounts = AppendVec::new(&path, true, file_size as usize);
Self {
id: AtomicUsize::new(id),
slot: AtomicU64::new(slot),
accounts,
count_and_status: RwLock::new((0, AccountStorageStatus::Available)),
approx_store_count: AtomicUsize::new(0),
alive_bytes: AtomicUsize::new(0),
}
}
pub(crate) fn new_existing(
slot: Slot,
id: AppendVecId,
accounts: AppendVec,
num_accounts: usize,
) -> Self {
Self {
id: AtomicUsize::new(id),
slot: AtomicU64::new(slot),
accounts,
count_and_status: RwLock::new((0, AccountStorageStatus::Available)),
approx_store_count: AtomicUsize::new(num_accounts),
alive_bytes: AtomicUsize::new(0),
}
}
pub fn set_status(&self, mut status: AccountStorageStatus) {
let mut count_and_status = self.count_and_status.write().unwrap();
let count = count_and_status.0;
if status == AccountStorageStatus::Full && count == 0 {
// this case arises when the append_vec is full (store_ptrs fails),
// but all accounts have already been removed from the storage
//
// the only time it's safe to call reset() on an append_vec is when
// every account has been removed
// **and**
// the append_vec has previously been completely full
//
self.accounts.reset();
status = AccountStorageStatus::Available;
}
*count_and_status = (count, status);
}
pub fn recycle(&self, slot: Slot, id: usize) {
let mut count_and_status = self.count_and_status.write().unwrap();
self.accounts.reset();
*count_and_status = (0, AccountStorageStatus::Available);
self.slot.store(slot, Ordering::Release);
self.id.store(id, Ordering::Release);
self.approx_store_count.store(0, Ordering::Relaxed);
self.alive_bytes.store(0, Ordering::Release);
}
pub fn status(&self) -> AccountStorageStatus {
self.count_and_status.read().unwrap().1
}
pub fn count(&self) -> usize {
self.count_and_status.read().unwrap().0
}
pub fn approx_stored_count(&self) -> usize {
self.approx_store_count.load(Ordering::Relaxed)
}
pub fn alive_bytes(&self) -> usize {
self.alive_bytes.load(Ordering::SeqCst)
}
pub fn written_bytes(&self) -> u64 {
self.accounts.len() as u64
}
pub fn total_bytes(&self) -> u64 {
self.accounts.capacity()
}
pub fn has_accounts(&self) -> bool {
self.count() > 0
}
pub fn slot(&self) -> Slot {
self.slot.load(Ordering::Acquire)
}
pub fn append_vec_id(&self) -> AppendVecId {
self.id.load(Ordering::Acquire)
}
pub fn flush(&self) -> Result<(), IoError> {
self.accounts.flush()
}
fn get_stored_account_meta(&self, offset: usize) -> Option<StoredAccountMeta> {
Some(self.accounts.get_account(offset)?.0)
}
fn add_account(&self, num_bytes: usize) {
let mut count_and_status = self.count_and_status.write().unwrap();
*count_and_status = (count_and_status.0 + 1, count_and_status.1);
self.approx_store_count.fetch_add(1, Ordering::Relaxed);
self.alive_bytes.fetch_add(num_bytes, Ordering::SeqCst);
}
fn try_available(&self) -> bool {
let mut count_and_status = self.count_and_status.write().unwrap();
let (count, status) = *count_and_status;
if status == AccountStorageStatus::Available {
*count_and_status = (count, AccountStorageStatus::Candidate);
true
} else {
false
}
}
pub fn all_accounts(&self) -> Vec<StoredAccountMeta> {
self.accounts.accounts(0)
}
fn remove_account(&self, num_bytes: usize, reset_accounts: bool) -> usize {
let mut count_and_status = self.count_and_status.write().unwrap();
let (mut count, mut status) = *count_and_status;
if count == 1 && status == AccountStorageStatus::Full && reset_accounts {
// this case arises when we remove the last account from the
// storage, but we've learned from previous write attempts that
// the storage is full
//
// the only time it's safe to call reset() on an append_vec is when
// every account has been removed
// **and**
// the append_vec has previously been completely full
//
// otherwise, the storage may be in flight with a store()
// call
self.accounts.reset();
status = AccountStorageStatus::Available;
}
// Some code path is removing accounts too many; this may result in an
// unintended reveal of old state for unrelated accounts.
assert!(
count > 0,
"double remove of account in slot: {}/store: {}!!",
self.slot(),
self.append_vec_id(),
);
self.alive_bytes.fetch_sub(num_bytes, Ordering::SeqCst);
count -= 1;
*count_and_status = (count, status);
count
}
pub fn get_path(&self) -> PathBuf {
self.accounts.get_path()
}
}
pub fn get_temp_accounts_paths(count: u32) -> IoResult<(Vec<TempDir>, Vec<PathBuf>)> {
let temp_dirs: IoResult<Vec<TempDir>> = (0..count).map(|_| TempDir::new()).collect();
let temp_dirs = temp_dirs?;
let paths: Vec<PathBuf> = temp_dirs.iter().map(|t| t.path().to_path_buf()).collect();
Ok((temp_dirs, paths))
}
#[derive(Clone, Default, Debug, Serialize, Deserialize, PartialEq, AbiExample)]
pub struct BankHashStats {
pub num_updated_accounts: u64,
pub num_removed_accounts: u64,
pub num_lamports_stored: u64,
pub total_data_len: u64,
pub num_executable_accounts: u64,
}
impl BankHashStats {
pub fn update<T: ReadableAccount + ZeroLamport>(&mut self, account: &T) {
if account.is_zero_lamport() {
self.num_removed_accounts += 1;
} else {
self.num_updated_accounts += 1;
}
self.total_data_len = self
.total_data_len
.wrapping_add(account.data().len() as u64);
if account.executable() {
self.num_executable_accounts += 1;
}
self.num_lamports_stored = self.num_lamports_stored.wrapping_add(account.lamports());
}
pub fn merge(&mut self, other: &BankHashStats) {
self.num_updated_accounts += other.num_updated_accounts;
self.num_removed_accounts += other.num_removed_accounts;
self.total_data_len = self.total_data_len.wrapping_add(other.total_data_len);
self.num_lamports_stored = self
.num_lamports_stored
.wrapping_add(other.num_lamports_stored);
self.num_executable_accounts += other.num_executable_accounts;
}
}
#[derive(Clone, Default, Debug, Serialize, Deserialize, PartialEq, AbiExample)]
pub struct BankHashInfo {
pub hash: Hash,
pub snapshot_hash: Hash,
pub stats: BankHashStats,
}
#[derive(Default)]
pub struct StoreAccountsTiming {
store_accounts_elapsed: u64,
update_index_elapsed: u64,
handle_reclaims_elapsed: u64,
}
#[derive(Debug, Default)]
struct RecycleStores {
entries: Vec<(Instant, Arc<AccountStorageEntry>)>,
total_bytes: u64,
}
// 30 min should be enough to be certain there won't be any prospective recycle uses for given
// store entry
// That's because it already processed ~2500 slots and ~25 passes of AccountsBackgroundService
pub const EXPIRATION_TTL_SECONDS: u64 = 1800;
impl RecycleStores {
fn add_entry(&mut self, new_entry: Arc<AccountStorageEntry>) {
self.total_bytes += new_entry.total_bytes();
self.entries.push((Instant::now(), new_entry))
}
fn iter(&self) -> std::slice::Iter<(Instant, Arc<AccountStorageEntry>)> {
self.entries.iter()
}
fn add_entries(&mut self, new_entries: Vec<Arc<AccountStorageEntry>>) {
self.total_bytes += new_entries.iter().map(|e| e.total_bytes()).sum::<u64>();
let now = Instant::now();
for new_entry in new_entries {
self.entries.push((now, new_entry));
}
}
fn expire_old_entries(&mut self) -> Vec<Arc<AccountStorageEntry>> {
let mut expired = vec![];
let now = Instant::now();
let mut expired_bytes = 0;
self.entries.retain(|(recycled_time, entry)| {
if now.duration_since(*recycled_time).as_secs() > EXPIRATION_TTL_SECONDS {
if Arc::strong_count(entry) >= 2 {
warn!(
"Expiring still in-use recycled StorageEntry anyway...: id: {} slot: {}",
entry.append_vec_id(),
entry.slot(),
);
}
expired_bytes += entry.total_bytes();
expired.push(entry.clone());
false
} else {
true
}
});
self.total_bytes -= expired_bytes;
expired
}
fn remove_entry(&mut self, index: usize) -> Arc<AccountStorageEntry> {
let (_added_time, removed_entry) = self.entries.swap_remove(index);
self.total_bytes -= removed_entry.total_bytes();
removed_entry
}
fn entry_count(&self) -> usize {
self.entries.len()
}
fn total_bytes(&self) -> u64 {
self.total_bytes
}
}
/// Removing unrooted slots in Accounts Background Service needs to be synchronized with flushing
/// slots from the Accounts Cache. This keeps track of those slots and the Mutex + Condvar for
/// synchronization.
#[derive(Debug, Default)]
struct RemoveUnrootedSlotsSynchronization {
// slots being flushed from the cache or being purged
slots_under_contention: Mutex<HashSet<Slot>>,
signal: Condvar,
}
type AccountInfoAccountsIndex = AccountsIndex<AccountInfo>;
// This structure handles the load/store of the accounts
#[derive(Debug)]
pub struct AccountsDb {
/// Keeps tracks of index into AppendVec on a per slot basis
pub accounts_index: AccountInfoAccountsIndex,
pub storage: AccountStorage,
pub accounts_cache: AccountsCache,
write_cache_limit_bytes: Option<u64>,
sender_bg_hasher: Option<Sender<CachedAccount>>,
read_only_accounts_cache: ReadOnlyAccountsCache,
recycle_stores: RwLock<RecycleStores>,
/// distribute the accounts across storage lists
pub next_id: AtomicUsize,
/// Set of shrinkable stores organized by map of slot to append_vec_id
pub shrink_candidate_slots: Mutex<ShrinkCandidates>,
/// Legacy shrink slots to support non-cached code-path.
pub shrink_candidate_slots_v1: Mutex<Vec<Slot>>,
pub(crate) write_version: AtomicU64,
/// Set of storage paths to pick from
pub(crate) paths: Vec<PathBuf>,
accounts_hash_cache_path: PathBuf,
// used by tests
// holds this until we are dropped
#[allow(dead_code)]
temp_accounts_hash_cache_path: Option<TempDir>,
pub shrink_paths: RwLock<Option<Vec<PathBuf>>>,
/// Directory of paths this accounts_db needs to hold/remove
#[allow(dead_code)]
pub(crate) temp_paths: Option<Vec<TempDir>>,
/// Starting file size of appendvecs
file_size: u64,
/// Thread pool used for par_iter
pub thread_pool: ThreadPool,
pub thread_pool_clean: ThreadPool,
/// Number of append vecs to create to maximize parallelism when scanning
/// the accounts
min_num_stores: usize,
pub bank_hashes: RwLock<HashMap<Slot, BankHashInfo>>,
stats: AccountsStats,
clean_accounts_stats: CleanAccountsStats,
// Stats for purges called outside of clean_accounts()
external_purge_slots_stats: PurgeStats,
shrink_stats: ShrinkStats,
pub cluster_type: Option<ClusterType>,
pub account_indexes: AccountSecondaryIndexes,
pub caching_enabled: bool,
/// Set of unique keys per slot which is used
/// to drive clean_accounts
/// Generated by get_accounts_delta_hash
uncleaned_pubkeys: DashMap<Slot, Vec<Pubkey>>,
#[cfg(test)]
load_delay: u64,
#[cfg(test)]
load_limit: AtomicU64,
is_bank_drop_callback_enabled: AtomicBool,
/// Set of slots currently being flushed by `flush_slot_cache()` or removed
/// by `remove_unrooted_slot()`. Used to ensure `remove_unrooted_slots(slots)`
/// can safely clear the set of unrooted slots `slots`.
remove_unrooted_slots_synchronization: RemoveUnrootedSlotsSynchronization,
shrink_ratio: AccountShrinkThreshold,
/// Set of stores which are recently rooted or had accounts removed
/// such that potentially a 0-lamport account update could be present which
/// means we can remove the account from the index entirely.
dirty_stores: DashMap<(Slot, AppendVecId), Arc<AccountStorageEntry>>,
/// Zero-lamport accounts that are *not* purged during clean because they need to stay alive
/// for incremental snapshot support.
zero_lamport_accounts_to_purge_after_full_snapshot: DashSet<(Slot, Pubkey)>,
/// GeyserPlugin accounts update notifier
accounts_update_notifier: Option<AccountsUpdateNotifier>,
filler_account_count: usize,
pub filler_account_suffix: Option<Pubkey>,
// # of passes should be a function of the total # of accounts that are active.
// higher passes = slower total time, lower dynamic memory usage
// lower passes = faster total time, higher dynamic memory usage
// passes=2 cuts dynamic memory usage in approximately half.
pub num_hash_scan_passes: Option<usize>,
}
#[derive(Debug, Default)]
struct AccountsStats {
delta_hash_scan_time_total_us: AtomicU64,
delta_hash_accumulate_time_total_us: AtomicU64,
delta_hash_num: AtomicU64,
last_store_report: AtomicInterval,
store_hash_accounts: AtomicU64,
calc_stored_meta: AtomicU64,
store_accounts: AtomicU64,
store_update_index: AtomicU64,
store_handle_reclaims: AtomicU64,
store_append_accounts: AtomicU64,
store_find_store: AtomicU64,
store_num_accounts: AtomicU64,
store_total_data: AtomicU64,
recycle_store_count: AtomicU64,
create_store_count: AtomicU64,
store_get_slot_store: AtomicU64,
store_find_existing: AtomicU64,
dropped_stores: AtomicU64,
store_uncleaned_update: AtomicU64,
}
#[derive(Debug, Default)]
struct PurgeStats {
last_report: AtomicInterval,
safety_checks_elapsed: AtomicU64,
remove_cache_elapsed: AtomicU64,
remove_storage_entries_elapsed: AtomicU64,
drop_storage_entries_elapsed: AtomicU64,
num_cached_slots_removed: AtomicUsize,
num_stored_slots_removed: AtomicUsize,
total_removed_storage_entries: AtomicUsize,
total_removed_cached_bytes: AtomicU64,
total_removed_stored_bytes: AtomicU64,
recycle_stores_write_elapsed: AtomicU64,
scan_storages_elasped: AtomicU64,
purge_accounts_index_elapsed: AtomicU64,
handle_reclaims_elapsed: AtomicU64,
}
impl PurgeStats {
fn report(&self, metric_name: &'static str, report_interval_ms: Option<u64>) {
let should_report = report_interval_ms
.map(|report_interval_ms| self.last_report.should_update(report_interval_ms))
.unwrap_or(true);
if should_report {
datapoint_info!(
metric_name,
(
"safety_checks_elapsed",
self.safety_checks_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"remove_cache_elapsed",
self.remove_cache_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"remove_storage_entries_elapsed",
self.remove_storage_entries_elapsed
.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"drop_storage_entries_elapsed",
self.drop_storage_entries_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"num_cached_slots_removed",
self.num_cached_slots_removed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"num_stored_slots_removed",
self.num_stored_slots_removed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"total_removed_storage_entries",
self.total_removed_storage_entries
.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"total_removed_cached_bytes",
self.total_removed_cached_bytes.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"total_removed_stored_bytes",
self.total_removed_stored_bytes.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"recycle_stores_write_elapsed",
self.recycle_stores_write_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"scan_storages_elasped",
self.scan_storages_elasped.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"purge_accounts_index_elapsed",
self.purge_accounts_index_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"handle_reclaims_elapsed",
self.handle_reclaims_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
);
}
}
}
#[derive(Debug, Default)]
struct FlushStats {
#[allow(dead_code)]
slot: Slot,
#[allow(dead_code)]
num_flushed: usize,
#[allow(dead_code)]
num_purged: usize,
#[allow(dead_code)]
total_size: u64,
}
#[derive(Debug, Default)]
struct LatestAccountsIndexRootsStats {
roots_len: AtomicUsize,
uncleaned_roots_len: AtomicUsize,
previous_uncleaned_roots_len: AtomicUsize,
roots_range: AtomicU64,
rooted_cleaned_count: AtomicUsize,
unrooted_cleaned_count: AtomicUsize,
clean_unref_from_storage_us: AtomicU64,
clean_dead_slot_us: AtomicU64,
}
impl LatestAccountsIndexRootsStats {
fn update(&self, accounts_index_roots_stats: &AccountsIndexRootsStats) {
self.roots_len
.store(accounts_index_roots_stats.roots_len, Ordering::Relaxed);
self.uncleaned_roots_len.store(
accounts_index_roots_stats.uncleaned_roots_len,
Ordering::Relaxed,
);
self.previous_uncleaned_roots_len.store(
accounts_index_roots_stats.previous_uncleaned_roots_len,
Ordering::Relaxed,
);
self.roots_range
.store(accounts_index_roots_stats.roots_range, Ordering::Relaxed);
self.rooted_cleaned_count.fetch_add(
accounts_index_roots_stats.rooted_cleaned_count,
Ordering::Relaxed,
);
self.unrooted_cleaned_count.fetch_add(
accounts_index_roots_stats.unrooted_cleaned_count,
Ordering::Relaxed,
);
self.clean_unref_from_storage_us.fetch_add(
accounts_index_roots_stats.clean_unref_from_storage_us,
Ordering::Relaxed,
);
self.clean_dead_slot_us.fetch_add(
accounts_index_roots_stats.clean_dead_slot_us,
Ordering::Relaxed,
);
}
fn report(&self) {
datapoint_info!(
"accounts_index_roots_len",
(
"roots_len",
self.roots_len.load(Ordering::Relaxed) as i64,
i64
),
(
"uncleaned_roots_len",
self.uncleaned_roots_len.load(Ordering::Relaxed) as i64,
i64
),
(
"previous_uncleaned_roots_len",
self.previous_uncleaned_roots_len.load(Ordering::Relaxed) as i64,
i64
),
(
"roots_range_width",
self.roots_range.load(Ordering::Relaxed) as i64,
i64
),
(
"unrooted_cleaned_count",
self.unrooted_cleaned_count.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"rooted_cleaned_count",
self.rooted_cleaned_count.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"clean_unref_from_storage_us",
self.clean_unref_from_storage_us.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"clean_dead_slot_us",
self.clean_dead_slot_us.swap(0, Ordering::Relaxed) as i64,
i64
),
);
// Don't need to reset since this tracks the latest updates, not a cumulative total
}
}
#[derive(Debug, Default)]
struct CleanAccountsStats {
purge_stats: PurgeStats,
latest_accounts_index_roots_stats: LatestAccountsIndexRootsStats,
// stats held here and reported by clean_accounts
clean_old_root_us: AtomicU64,
clean_old_root_reclaim_us: AtomicU64,
reset_uncleaned_roots_us: AtomicU64,
remove_dead_accounts_remove_us: AtomicU64,
remove_dead_accounts_shrink_us: AtomicU64,
clean_stored_dead_slots_us: AtomicU64,
}
impl CleanAccountsStats {
fn report(&self) {
self.purge_stats.report("clean_purge_slots_stats", None);
self.latest_accounts_index_roots_stats.report();
}
}
#[derive(Debug, Default)]
struct ShrinkStats {
last_report: AtomicInterval,
num_slots_shrunk: AtomicUsize,
storage_read_elapsed: AtomicU64,
index_read_elapsed: AtomicU64,
find_alive_elapsed: AtomicU64,
create_and_insert_store_elapsed: AtomicU64,
store_accounts_elapsed: AtomicU64,
update_index_elapsed: AtomicU64,
handle_reclaims_elapsed: AtomicU64,
write_storage_elapsed: AtomicU64,
rewrite_elapsed: AtomicU64,
drop_storage_entries_elapsed: AtomicU64,
recycle_stores_write_elapsed: AtomicU64,
accounts_removed: AtomicUsize,
bytes_removed: AtomicU64,
bytes_written: AtomicU64,
skipped_shrink: AtomicU64,
dead_accounts: AtomicU64,
alive_accounts: AtomicU64,
accounts_loaded: AtomicU64,
}
impl ShrinkStats {
fn report(&self) {
if self.last_report.should_update(1000) {
datapoint_info!(
"shrink_stats",
(
"num_slots_shrunk",
self.num_slots_shrunk.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"storage_read_elapsed",
self.storage_read_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"index_read_elapsed",
self.index_read_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"find_alive_elapsed",
self.find_alive_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"create_and_insert_store_elapsed",
self.create_and_insert_store_elapsed
.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"store_accounts_elapsed",
self.store_accounts_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"update_index_elapsed",
self.update_index_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"handle_reclaims_elapsed",
self.handle_reclaims_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"write_storage_elapsed",
self.write_storage_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"rewrite_elapsed",
self.rewrite_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"drop_storage_entries_elapsed",
self.drop_storage_entries_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"recycle_stores_write_time",
self.recycle_stores_write_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"accounts_removed",
self.accounts_removed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"bytes_removed",
self.bytes_removed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"bytes_written",
self.bytes_written.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"skipped_shrink",
self.skipped_shrink.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"alive_accounts",
self.alive_accounts.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"dead_accounts",
self.dead_accounts.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"accounts_loaded",
self.accounts_loaded.swap(0, Ordering::Relaxed) as i64,
i64
),
);
}
}
}
fn quarter_thread_count() -> usize {
std::cmp::max(2, num_cpus::get() / 4)
}
pub fn make_min_priority_thread_pool() -> ThreadPool {
// Use lower thread count to reduce priority.
let num_threads = quarter_thread_count();
rayon::ThreadPoolBuilder::new()
.thread_name(|i| format!("solana-cleanup-accounts-{}", i))
.num_threads(num_threads)
.build()
.unwrap()
}
#[cfg(all(test, RUSTC_WITH_SPECIALIZATION))]
impl solana_frozen_abi::abi_example::AbiExample for AccountsDb {
fn example() -> Self {
let accounts_db = AccountsDb::new_single_for_tests();
let key = Pubkey::default();
let some_data_len = 5;
let some_slot: Slot = 0;
let account = AccountSharedData::new(1, some_data_len, &key);
accounts_db.store_uncached(some_slot, &[(&key, &account)]);
accounts_db.add_root(0);
accounts_db
}
}
impl<'a> ZeroLamport for StoredAccountMeta<'a> {
fn is_zero_lamport(&self) -> bool {
self.lamports() == 0
}
}
impl<'a> ReadableAccount for StoredAccountMeta<'a> {
fn lamports(&self) -> u64 {
self.account_meta.lamports
}
fn data(&self) -> &[u8] {
self.data
}
fn owner(&self) -> &Pubkey {
&self.account_meta.owner
}
fn executable(&self) -> bool {
self.account_meta.executable
}
fn rent_epoch(&self) -> Epoch {
self.account_meta.rent_epoch
}
}
struct IndexAccountMapEntry<'a> {
pub write_version: StoredMetaWriteVersion,
pub store_id: AppendVecId,
pub stored_account: StoredAccountMeta<'a>,
}
type GenerateIndexAccountsMap<'a> = HashMap<Pubkey, IndexAccountMapEntry<'a>>;
impl AccountsDb {
pub fn default_for_tests() -> Self {
Self::default_with_accounts_index(AccountInfoAccountsIndex::default_for_tests(), None, None)
}
/// return (num_hash_scan_passes, bins_per_pass)
fn bins_per_pass(num_hash_scan_passes: Option<usize>) -> (usize, usize) {
let num_hash_scan_passes = num_hash_scan_passes.unwrap_or(NUM_SCAN_PASSES_DEFAULT);
let bins_per_pass = PUBKEY_BINS_FOR_CALCULATING_HASHES / num_hash_scan_passes;
assert!(
num_hash_scan_passes <= PUBKEY_BINS_FOR_CALCULATING_HASHES,
"num_hash_scan_passes must be <= {}",
PUBKEY_BINS_FOR_CALCULATING_HASHES
);
assert_eq!(
bins_per_pass * num_hash_scan_passes,
PUBKEY_BINS_FOR_CALCULATING_HASHES
); // evenly divisible
(num_hash_scan_passes, bins_per_pass)
}
fn default_with_accounts_index(
accounts_index: AccountInfoAccountsIndex,
accounts_hash_cache_path: Option<PathBuf>,
num_hash_scan_passes: Option<usize>,
) -> Self {
let num_threads = get_thread_count();
const MAX_READ_ONLY_CACHE_DATA_SIZE: usize = 200_000_000;
let mut temp_accounts_hash_cache_path = None;
let accounts_hash_cache_path = accounts_hash_cache_path.unwrap_or_else(|| {
temp_accounts_hash_cache_path = Some(TempDir::new().unwrap());
temp_accounts_hash_cache_path
.as_ref()
.unwrap()
.path()
.to_path_buf()
});
let mut bank_hashes = HashMap::new();
bank_hashes.insert(0, BankHashInfo::default());
// validate inside here
Self::bins_per_pass(num_hash_scan_passes);
AccountsDb {
accounts_index,
storage: AccountStorage::default(),
accounts_cache: AccountsCache::default(),
sender_bg_hasher: None,
read_only_accounts_cache: ReadOnlyAccountsCache::new(MAX_READ_ONLY_CACHE_DATA_SIZE),
recycle_stores: RwLock::new(RecycleStores::default()),
uncleaned_pubkeys: DashMap::new(),
next_id: AtomicUsize::new(0),
shrink_candidate_slots_v1: Mutex::new(Vec::new()),
shrink_candidate_slots: Mutex::new(HashMap::new()),
write_cache_limit_bytes: None,
write_version: AtomicU64::new(0),
paths: vec![],
accounts_hash_cache_path,
temp_accounts_hash_cache_path,
shrink_paths: RwLock::new(None),
temp_paths: None,
file_size: DEFAULT_FILE_SIZE,
thread_pool: rayon::ThreadPoolBuilder::new()
.num_threads(num_threads)
.thread_name(|i| format!("solana-db-accounts-{}", i))
.build()
.unwrap(),
thread_pool_clean: make_min_priority_thread_pool(),
min_num_stores: num_threads,
bank_hashes: RwLock::new(bank_hashes),
external_purge_slots_stats: PurgeStats::default(),
clean_accounts_stats: CleanAccountsStats::default(),
shrink_stats: ShrinkStats::default(),
stats: AccountsStats::default(),
cluster_type: None,
account_indexes: AccountSecondaryIndexes::default(),
caching_enabled: false,
#[cfg(test)]
load_delay: u64::default(),
#[cfg(test)]
load_limit: AtomicU64::default(),
is_bank_drop_callback_enabled: AtomicBool::default(),
remove_unrooted_slots_synchronization: RemoveUnrootedSlotsSynchronization::default(),
shrink_ratio: AccountShrinkThreshold::default(),
dirty_stores: DashMap::default(),
zero_lamport_accounts_to_purge_after_full_snapshot: DashSet::default(),
accounts_update_notifier: None,
filler_account_count: 0,
filler_account_suffix: None,
num_hash_scan_passes,
}
}
pub fn new_for_tests(paths: Vec<PathBuf>, cluster_type: &ClusterType) -> Self {
AccountsDb::new_with_config(
paths,
cluster_type,
AccountSecondaryIndexes::default(),
false,
AccountShrinkThreshold::default(),
Some(ACCOUNTS_DB_CONFIG_FOR_TESTING),
None,
)
}
pub fn new_for_tests_with_caching(paths: Vec<PathBuf>, cluster_type: &ClusterType) -> Self {
AccountsDb::new_with_config(
paths,
cluster_type,
AccountSecondaryIndexes::default(),
true,
AccountShrinkThreshold::default(),
Some(ACCOUNTS_DB_CONFIG_FOR_TESTING),
None,
)
}
pub fn new_with_config(
paths: Vec<PathBuf>,
cluster_type: &ClusterType,
account_indexes: AccountSecondaryIndexes,
caching_enabled: bool,
shrink_ratio: AccountShrinkThreshold,
accounts_db_config: Option<AccountsDbConfig>,
accounts_update_notifier: Option<AccountsUpdateNotifier>,
) -> Self {
let accounts_index =
AccountsIndex::new(accounts_db_config.as_ref().and_then(|x| x.index.clone()));
let accounts_hash_cache_path = accounts_db_config
.as_ref()
.and_then(|x| x.accounts_hash_cache_path.clone());
let filler_account_count = accounts_db_config
.as_ref()
.and_then(|cfg| cfg.filler_account_count)
.unwrap_or_default();
let filler_account_suffix = if filler_account_count > 0 {
Some(solana_sdk::pubkey::new_rand())
} else {
None
};
let paths_is_empty = paths.is_empty();
let mut new = Self {
paths,
cluster_type: Some(*cluster_type),
account_indexes,
caching_enabled,
shrink_ratio,
accounts_update_notifier,
filler_account_count,
filler_account_suffix,
write_cache_limit_bytes: accounts_db_config
.as_ref()
.and_then(|x| x.write_cache_limit_bytes),
..Self::default_with_accounts_index(
accounts_index,
accounts_hash_cache_path,
accounts_db_config
.as_ref()
.and_then(|cfg| cfg.hash_calc_num_passes),
)
};
if paths_is_empty {
// Create a temporary set of accounts directories, used primarily
// for testing
let (temp_dirs, paths) = get_temp_accounts_paths(DEFAULT_NUM_DIRS).unwrap();
new.accounts_update_notifier = None;
new.paths = paths;
new.temp_paths = Some(temp_dirs);
};
new.start_background_hasher();
{
for path in new.paths.iter() {
std::fs::create_dir_all(path).expect("Create directory failed.");
}
}
new
}
pub fn set_shrink_paths(&self, paths: Vec<PathBuf>) {
assert!(!paths.is_empty());
let mut shrink_paths = self.shrink_paths.write().unwrap();
for path in &paths {
std::fs::create_dir_all(path).expect("Create directory failed.");
}
*shrink_paths = Some(paths);
}
pub fn file_size(&self) -> u64 {
self.file_size
}
pub fn new_single_for_tests() -> Self {
AccountsDb {
min_num_stores: 0,
..AccountsDb::new_for_tests(Vec::new(), &ClusterType::Development)
}
}
pub fn new_single_for_tests_with_caching() -> Self {
AccountsDb {
min_num_stores: 0,
..AccountsDb::new_for_tests_with_caching(Vec::new(), &ClusterType::Development)
}
}
fn new_storage_entry(&self, slot: Slot, path: &Path, size: u64) -> AccountStorageEntry {
AccountStorageEntry::new(
path,
slot,
self.next_id.fetch_add(1, Ordering::AcqRel),
size,
)
}
pub fn expected_cluster_type(&self) -> ClusterType {
self.cluster_type
.expect("Cluster type must be set at initialization")
}
/// Reclaim older states of accounts older than max_clean_root for AccountsDb bloat mitigation
fn clean_accounts_older_than_root(
&self,
purges: Vec<Pubkey>,
max_clean_root: Option<Slot>,
) -> ReclaimResult {
if purges.is_empty() {
return ReclaimResult::default();
}
// This number isn't carefully chosen; just guessed randomly such that
// the hot loop will be the order of ~Xms.
const INDEX_CLEAN_BULK_COUNT: usize = 4096;
let mut clean_rooted = Measure::start("clean_old_root-ms");
let reclaim_vecs = purges
.par_chunks(INDEX_CLEAN_BULK_COUNT)
.map(|pubkeys: &[Pubkey]| {
let mut reclaims = Vec::new();
for pubkey in pubkeys {
self.accounts_index
.clean_rooted_entries(pubkey, &mut reclaims, max_clean_root);
}
reclaims
});
let reclaims: Vec<_> = reclaim_vecs.flatten().collect();
clean_rooted.stop();
inc_new_counter_info!("clean-old-root-par-clean-ms", clean_rooted.as_ms() as usize);
self.clean_accounts_stats
.clean_old_root_us
.fetch_add(clean_rooted.as_us(), Ordering::Relaxed);
let mut measure = Measure::start("clean_old_root_reclaims");
// Don't reset from clean, since the pubkeys in those stores may need to be unref'ed
// and those stores may be used for background hashing.
let reset_accounts = false;
let mut reclaim_result = ReclaimResult::default();
self.handle_reclaims(
&reclaims,
None,
Some(&self.clean_accounts_stats.purge_stats),
Some(&mut reclaim_result),
reset_accounts,
);
measure.stop();
debug!("{} {}", clean_rooted, measure);
inc_new_counter_info!("clean-old-root-reclaim-ms", measure.as_ms() as usize);
self.clean_accounts_stats
.clean_old_root_reclaim_us
.fetch_add(measure.as_us(), Ordering::Relaxed);
reclaim_result
}
fn do_reset_uncleaned_roots(&self, max_clean_root: Option<Slot>) {
let mut measure = Measure::start("reset");
self.accounts_index.reset_uncleaned_roots(max_clean_root);
measure.stop();
self.clean_accounts_stats
.reset_uncleaned_roots_us
.fetch_add(measure.as_us(), Ordering::Relaxed);
}
fn calc_delete_dependencies(
purges: &HashMap<Pubkey, (SlotList<AccountInfo>, u64)>,
store_counts: &mut HashMap<AppendVecId, (usize, HashSet<Pubkey>)>,
) {
// Another pass to check if there are some filtered accounts which
// do not match the criteria of deleting all appendvecs which contain them
// then increment their storage count.
let mut already_counted = HashSet::new();
for (pubkey, (account_infos, ref_count_from_storage)) in purges.iter() {
let no_delete = if account_infos.len() as u64 != *ref_count_from_storage {
debug!(
"calc_delete_dependencies(),
pubkey: {},
account_infos: {:?},
account_infos_len: {},
ref_count_from_storage: {}",
pubkey,
account_infos,
account_infos.len(),
ref_count_from_storage,
);
true
} else {
let mut no_delete = false;
for (_slot, account_info) in account_infos {
debug!(
"calc_delete_dependencies()
storage id: {},
count len: {}",
account_info.store_id,
store_counts.get(&account_info.store_id).unwrap().0,
);
if store_counts.get(&account_info.store_id).unwrap().0 != 0 {
no_delete = true;
break;
}
}
no_delete
};
if no_delete {
let mut pending_store_ids: HashSet<usize> = HashSet::new();
for (_bank_id, account_info) in account_infos {
if !already_counted.contains(&account_info.store_id) {
pending_store_ids.insert(account_info.store_id);
}
}
while !pending_store_ids.is_empty() {
let id = pending_store_ids.iter().next().cloned().unwrap();
pending_store_ids.remove(&id);
if already_counted.contains(&id) {
continue;
}
store_counts.get_mut(&id).unwrap().0 += 1;
already_counted.insert(id);
let affected_pubkeys = &store_counts.get(&id).unwrap().1;
for key in affected_pubkeys {
for (_slot, account_info) in &purges.get(key).unwrap().0 {
if !already_counted.contains(&account_info.store_id) {
pending_store_ids.insert(account_info.store_id);
}
}
}
}
}
}
}
fn background_hasher(receiver: Receiver<CachedAccount>) {
loop {
let result = receiver.recv();
match result {
Ok(account) => {
// if we hold the only ref, then this account doesn't need to be hashed, we ignore this account and it will disappear
if Arc::strong_count(&account) > 1 {
// this will cause the hash to be calculated and store inside account if it needs to be calculated
let _ = (*account).hash();
};
}
Err(_) => {
break;
}
}
}
}
fn start_background_hasher(&mut self) {
let (sender, receiver) = unbounded();
Builder::new()
.name("solana-db-store-hasher-accounts".to_string())
.spawn(move || {
Self::background_hasher(receiver);
})
.unwrap();
self.sender_bg_hasher = Some(sender);
}
fn purge_keys_exact<'a, C: 'a>(
&'a self,
pubkey_to_slot_set: impl Iterator<Item = &'a (Pubkey, C)>,
) -> Vec<(u64, AccountInfo)>
where
C: Contains<'a, Slot>,
{
let mut reclaims = Vec::new();
let mut dead_keys = Vec::new();
for (pubkey, slots_set) in pubkey_to_slot_set {
let is_empty = self
.accounts_index
.purge_exact(pubkey, slots_set, &mut reclaims);
if is_empty {
dead_keys.push(pubkey);
}
}
self.accounts_index
.handle_dead_keys(&dead_keys, &self.account_indexes);
reclaims
}
fn max_clean_root(&self, proposed_clean_root: Option<Slot>) -> Option<Slot> {
match (
self.accounts_index.min_ongoing_scan_root(),
proposed_clean_root,
) {
(None, None) => None,
(Some(min_scan_root), None) => Some(min_scan_root),
(None, Some(proposed_clean_root)) => Some(proposed_clean_root),
(Some(min_scan_root), Some(proposed_clean_root)) => {
Some(std::cmp::min(min_scan_root, proposed_clean_root))
}
}
}
/// Collect all the uncleaned slots, up to a max slot
///
/// Search through the uncleaned Pubkeys and return all the slots, up to a maximum slot.
fn collect_uncleaned_slots_up_to_slot(&self, max_slot: Slot) -> Vec<Slot> {
self.uncleaned_pubkeys
.iter()
.filter_map(|entry| {
let slot = *entry.key();
(slot <= max_slot).then(|| slot)
})
.collect()
}
/// Remove `slots` from `uncleaned_pubkeys` and collect all pubkeys
///
/// For each slot in the list of uncleaned slots, remove it from the `uncleaned_pubkeys` Map
/// and collect all the pubkeys to return.
fn remove_uncleaned_slots_and_collect_pubkeys(
&self,
uncleaned_slots: Vec<Slot>,
) -> Vec<Vec<Pubkey>> {
uncleaned_slots
.into_iter()
.filter_map(|uncleaned_slot| {
self.uncleaned_pubkeys
.remove(&uncleaned_slot)
.map(|(_removed_slot, removed_pubkeys)| removed_pubkeys)
})
.collect()
}
/// Remove uncleaned slots, up to a maximum slot, and return the collected pubkeys
///
fn remove_uncleaned_slots_and_collect_pubkeys_up_to_slot(
&self,
max_slot: Slot,
) -> Vec<Vec<Pubkey>> {
let uncleaned_slots = self.collect_uncleaned_slots_up_to_slot(max_slot);
self.remove_uncleaned_slots_and_collect_pubkeys(uncleaned_slots)
}
// Construct a vec of pubkeys for cleaning from:
// uncleaned_pubkeys - the delta set of updated pubkeys in rooted slots from the last clean
// dirty_stores - set of stores which had accounts removed or recently rooted
fn construct_candidate_clean_keys(
&self,
max_clean_root: Option<Slot>,
last_full_snapshot_slot: Option<Slot>,
timings: &mut CleanKeyTimings,
) -> Vec<Pubkey> {
let mut dirty_store_processing_time = Measure::start("dirty_store_processing");
let max_slot = max_clean_root.unwrap_or_else(|| self.accounts_index.max_root());
let mut dirty_stores = Vec::with_capacity(self.dirty_stores.len());
self.dirty_stores.retain(|(slot, _store_id), store| {
if *slot > max_slot {
true
} else {
dirty_stores.push((*slot, store.clone()));
false
}
});
let dirty_stores_len = dirty_stores.len();
let pubkeys = DashSet::new();
for (_slot, store) in dirty_stores {
for account in store.accounts.accounts(0) {
pubkeys.insert(account.meta.pubkey);
}
}
trace!(
"dirty_stores.len: {} pubkeys.len: {}",
dirty_stores_len,
pubkeys.len()
);
timings.dirty_pubkeys_count = pubkeys.len() as u64;
dirty_store_processing_time.stop();
timings.dirty_store_processing_us += dirty_store_processing_time.as_us();
let mut collect_delta_keys = Measure::start("key_create");
let delta_keys = self.remove_uncleaned_slots_and_collect_pubkeys_up_to_slot(max_slot);
collect_delta_keys.stop();
timings.collect_delta_keys_us += collect_delta_keys.as_us();
let mut delta_insert = Measure::start("delta_insert");
self.thread_pool_clean.install(|| {
delta_keys.par_iter().for_each(|keys| {
for key in keys {
pubkeys.insert(*key);
}
});
});
delta_insert.stop();
timings.delta_insert_us += delta_insert.as_us();
timings.delta_key_count = pubkeys.len() as u64;
let mut hashset_to_vec = Measure::start("flat_map");
let mut pubkeys: Vec<Pubkey> = pubkeys.into_iter().collect();
hashset_to_vec.stop();
timings.hashset_to_vec_us += hashset_to_vec.as_us();
// Check if we should purge any of the zero_lamport_accounts_to_purge_later, based on the
// last_full_snapshot_slot.
assert!(
last_full_snapshot_slot.is_some() || self.zero_lamport_accounts_to_purge_after_full_snapshot.is_empty(),
"if snapshots are disabled, then zero_lamport_accounts_to_purge_later should always be empty"
);
if let Some(last_full_snapshot_slot) = last_full_snapshot_slot {
self.zero_lamport_accounts_to_purge_after_full_snapshot
.retain(|(slot, pubkey)| {
let is_candidate_for_clean =
max_slot >= *slot && last_full_snapshot_slot >= *slot;
if is_candidate_for_clean {
pubkeys.push(*pubkey);
}
!is_candidate_for_clean
});
}
pubkeys
}
// Purge zero lamport accounts and older rooted account states as garbage
// collection
// Only remove those accounts where the entire rooted history of the account
// can be purged because there are no live append vecs in the ancestors
pub fn clean_accounts(
&self,
max_clean_root: Option<Slot>,
is_startup: bool,
last_full_snapshot_slot: Option<Slot>,
) {
let mut measure_all = Measure::start("clean_accounts");
let max_clean_root = self.max_clean_root(max_clean_root);
// hold a lock to prevent slot shrinking from running because it might modify some rooted
// slot storages which can not happen as long as we're cleaning accounts because we're also
// modifying the rooted slot storages!
let mut candidates_v1 = self.shrink_candidate_slots_v1.lock().unwrap();
self.report_store_stats();
let mut key_timings = CleanKeyTimings::default();
let mut pubkeys = self.construct_candidate_clean_keys(
max_clean_root,
last_full_snapshot_slot,
&mut key_timings,
);
let mut sort = Measure::start("sort");
if is_startup {
pubkeys.par_sort_unstable();
} else {
self.thread_pool_clean
.install(|| pubkeys.par_sort_unstable());
}
sort.stop();
let total_keys_count = pubkeys.len();
let mut accounts_scan = Measure::start("accounts_scan");
let uncleaned_roots = self.accounts_index.clone_uncleaned_roots();
let uncleaned_roots_len = self.accounts_index.uncleaned_roots_len();
let found_not_zero_accum = AtomicU64::new(0);
let not_found_on_fork_accum = AtomicU64::new(0);
let missing_accum = AtomicU64::new(0);
let useful_accum = AtomicU64::new(0);
// parallel scan the index.
let (mut purges_zero_lamports, purges_old_accounts) = {
let do_clean_scan = || {
pubkeys
.par_chunks(4096)
.map(|pubkeys: &[Pubkey]| {
let mut purges_zero_lamports = HashMap::new();
let mut purges_old_accounts = Vec::new();
let mut found_not_zero = 0;
let mut not_found_on_fork = 0;
let mut missing = 0;
let mut useful = 0;
self.accounts_index.scan(
pubkeys,
max_clean_root,
// return true if we want this item to remain in the cache
|exists, slot_list, index_in_slot_list, pubkey, ref_count| {
let mut useless = true;
if !exists {
missing += 1;
} else {
match index_in_slot_list {
Some(index_in_slot_list) => {
// found info relative to max_clean_root
let (slot, account_info) =
&slot_list[index_in_slot_list];
if account_info.lamports == 0 {
useless = false;
purges_zero_lamports.insert(
*pubkey,
(
self.accounts_index.get_rooted_entries(
slot_list,
max_clean_root,
),
ref_count,
),
);
} else {
found_not_zero += 1;
}
let slot = *slot;
if uncleaned_roots.contains(&slot) {
// Assertion enforced by `accounts_index.get()`, the latest slot
// will not be greater than the given `max_clean_root`
if let Some(max_clean_root) = max_clean_root {
assert!(slot <= max_clean_root);
}
purges_old_accounts.push(*pubkey);
useless = false;
}
}
None => {
// This pubkey is in the index but not in a root slot, so clean
// it up by adding it to the to-be-purged list.
//
// Also, this pubkey must have been touched by some slot since
// it was in the dirty list, so we assume that the slot it was
// touched in must be unrooted.
not_found_on_fork += 1;
useless = false;
purges_old_accounts.push(*pubkey);
}
}
}
if !useless {
useful += 1;
}
!useless
},
);
found_not_zero_accum.fetch_add(found_not_zero, Ordering::Relaxed);
not_found_on_fork_accum.fetch_add(not_found_on_fork, Ordering::Relaxed);
missing_accum.fetch_add(missing, Ordering::Relaxed);
useful_accum.fetch_add(useful, Ordering::Relaxed);
(purges_zero_lamports, purges_old_accounts)
})
.reduce(
|| (HashMap::new(), Vec::new()),
|mut m1, m2| {
// Collapse down the hashmaps/vecs into one.
m1.0.extend(m2.0);
m1.1.extend(m2.1);
m1
},
)
};
if is_startup {
do_clean_scan()
} else {
self.thread_pool_clean.install(do_clean_scan)
}
};
accounts_scan.stop();
let mut clean_old_rooted = Measure::start("clean_old_roots");
let (purged_account_slots, removed_accounts) =
self.clean_accounts_older_than_root(purges_old_accounts, max_clean_root);
if self.caching_enabled {
self.do_reset_uncleaned_roots(max_clean_root);
} else {
self.do_reset_uncleaned_roots_v1(&mut candidates_v1, max_clean_root);
}
clean_old_rooted.stop();
let mut store_counts_time = Measure::start("store_counts");
// Calculate store counts as if everything was purged
// Then purge if we can
let mut store_counts: HashMap<AppendVecId, (usize, HashSet<Pubkey>)> = HashMap::new();
for (key, (account_infos, ref_count)) in purges_zero_lamports.iter_mut() {
if purged_account_slots.contains_key(key) {
*ref_count = self.accounts_index.ref_count_from_storage(key);
}
account_infos.retain(|(slot, account_info)| {
let was_slot_purged = purged_account_slots
.get(key)
.map(|slots_removed| slots_removed.contains(slot))
.unwrap_or(false);
if was_slot_purged {
// No need to look up the slot storage below if the entire
// slot was purged
return false;
}
// Check if this update in `slot` to the account with `key` was reclaimed earlier by
// `clean_accounts_older_than_root()`
let was_reclaimed = removed_accounts
.get(&account_info.store_id)
.map(|store_removed| store_removed.contains(&account_info.offset))
.unwrap_or(false);
if was_reclaimed {
return false;
}
if let Some(store_count) = store_counts.get_mut(&account_info.store_id) {
store_count.0 -= 1;
store_count.1.insert(*key);
} else {
let mut key_set = HashSet::new();
key_set.insert(*key);
assert!(
!account_info.is_cached(),
"The Accounts Cache must be flushed first for this account info. pubkey: {}, slot: {}",
*key,
*slot
);
let count = self
.storage
.slot_store_count(*slot, account_info.store_id)
.unwrap()
- 1;
debug!(
"store_counts, inserting slot: {}, store id: {}, count: {}",
slot, account_info.store_id, count
);
store_counts.insert(account_info.store_id, (count, key_set));
}
true
});
}
store_counts_time.stop();
let mut calc_deps_time = Measure::start("calc_deps");
Self::calc_delete_dependencies(&purges_zero_lamports, &mut store_counts);
calc_deps_time.stop();
let mut purge_filter = Measure::start("purge_filter");
self.filter_zero_lamport_clean_for_incremental_snapshots(
max_clean_root,
last_full_snapshot_slot,
&store_counts,
&mut purges_zero_lamports,
);
purge_filter.stop();
let mut reclaims_time = Measure::start("reclaims");
// Recalculate reclaims with new purge set
let pubkey_to_slot_set: Vec<_> = purges_zero_lamports
.into_iter()
.map(|(key, (slots_list, _ref_count))| {
(
key,
slots_list
.into_iter()
.map(|(slot, _)| slot)
.collect::<HashSet<Slot>>(),
)
})
.collect();
let reclaims = self.purge_keys_exact(pubkey_to_slot_set.iter());
// Don't reset from clean, since the pubkeys in those stores may need to be unref'ed
// and those stores may be used for background hashing.
let reset_accounts = false;
let mut reclaim_result = ReclaimResult::default();
let reclaim_result = Some(&mut reclaim_result);
self.handle_reclaims(
&reclaims,
None,
Some(&self.clean_accounts_stats.purge_stats),
reclaim_result,
reset_accounts,
);
reclaims_time.stop();
measure_all.stop();
self.clean_accounts_stats.report();
datapoint_info!(
"clean_accounts",
("total_us", measure_all.as_us(), i64),
(
"collect_delta_keys_us",
key_timings.collect_delta_keys_us,
i64
),
(
"dirty_store_processing_us",
key_timings.dirty_store_processing_us,
i64
),
("accounts_scan", accounts_scan.as_us() as i64, i64),
("clean_old_rooted", clean_old_rooted.as_us() as i64, i64),
("store_counts", store_counts_time.as_us() as i64, i64),
("purge_filter", purge_filter.as_us() as i64, i64),
("calc_deps", calc_deps_time.as_us() as i64, i64),
("reclaims", reclaims_time.as_us() as i64, i64),
("delta_insert_us", key_timings.delta_insert_us, i64),
("delta_key_count", key_timings.delta_key_count, i64),
("dirty_pubkeys_count", key_timings.dirty_pubkeys_count, i64),
("sort_us", sort.as_us(), i64),
("useful_keys", useful_accum.load(Ordering::Relaxed), i64),
("total_keys_count", total_keys_count, i64),
(
"scan_found_not_zero",
found_not_zero_accum.load(Ordering::Relaxed),
i64
),
(
"scan_not_found_on_fork",
not_found_on_fork_accum.load(Ordering::Relaxed),
i64
),
("scan_missing", missing_accum.load(Ordering::Relaxed), i64),
("uncleaned_roots_len", uncleaned_roots_len, i64),
(
"clean_old_root_us",
self.clean_accounts_stats
.clean_old_root_us
.swap(0, Ordering::Relaxed),
i64
),
(
"clean_old_root_reclaim_us",
self.clean_accounts_stats
.clean_old_root_reclaim_us
.swap(0, Ordering::Relaxed),
i64
),
(
"reset_uncleaned_roots_us",
self.clean_accounts_stats
.reset_uncleaned_roots_us
.swap(0, Ordering::Relaxed),
i64
),
(
"remove_dead_accounts_remove_us",
self.clean_accounts_stats
.remove_dead_accounts_remove_us
.swap(0, Ordering::Relaxed),
i64
),
(
"remove_dead_accounts_shrink_us",
self.clean_accounts_stats
.remove_dead_accounts_shrink_us
.swap(0, Ordering::Relaxed),
i64
),
(
"clean_stored_dead_slots_us",
self.clean_accounts_stats
.clean_stored_dead_slots_us
.swap(0, Ordering::Relaxed),
i64
),
);
}
/// Removes the accounts in the input `reclaims` from the tracked "count" of
/// their corresponding storage entries. Note this does not actually free
/// the memory from the storage entries until all the storage entries for
/// a given slot `S` are empty, at which point `process_dead_slots` will
/// remove all the storage entries for `S`.
///
/// # Arguments
/// * `reclaims` - The accounts to remove from storage entries' "count". Note here
/// that we should not remove cache entries, only entries for accounts actually
/// stored in a storage entry.
///
/// * `expected_single_dead_slot` - A correctness assertion. If this is equal to `Some(S)`,
/// then the function will check that the only slot being cleaned up in `reclaims`
/// is the slot == `S`. This is true for instance when `handle_reclaims` is called
/// from store or slot shrinking, as those should only touch the slot they are
/// currently storing to or shrinking.
///
/// * `purge_stats` - The stats used to track performance of purging dead slots. This
/// also serves a correctness assertion. If `purge_stats.is_none()`, this implies
/// there can be no dead slots that happen as a result of this call, and the function
/// will check that no slots are cleaned up/removed via `process_dead_slots`. For instance,
/// on store, no slots should be cleaned up, but during the background clean accounts
/// purges accounts from old rooted slots, so outdated slots may be removed.
///
/// * `reclaim_result` - Information about accounts that were removed from storage, does
/// not include accounts that were removed from the cache
///
/// * `reset_accounts` - Reset the append_vec store when the store is dead (count==0)
/// From the clean and shrink paths it should be false since there may be an in-progress
/// hash operation and the stores may hold accounts that need to be unref'ed.
fn handle_reclaims(
&self,
reclaims: SlotSlice<AccountInfo>,
expected_single_dead_slot: Option<Slot>,
// TODO: coalesce `purge_stats` and `reclaim_result` together into one option, as they
// are both either Some or None
purge_stats: Option<&PurgeStats>,
reclaim_result: Option<&mut ReclaimResult>,
reset_accounts: bool,
) {
if reclaims.is_empty() {
return;
}
let (purged_account_slots, reclaimed_offsets) =
if let Some((ref mut x, ref mut y)) = reclaim_result {
(Some(x), Some(y))
} else {
(None, None)
};
let dead_slots = self.remove_dead_accounts(
reclaims,
expected_single_dead_slot,
reclaimed_offsets,
reset_accounts,
);
if purge_stats.is_none() {
assert!(dead_slots.is_empty());
} else if let Some(expected_single_dead_slot) = expected_single_dead_slot {
assert!(dead_slots.len() <= 1);
if dead_slots.len() == 1 {
assert!(dead_slots.contains(&expected_single_dead_slot));
}
}
if let Some(purge_stats) = purge_stats {
self.process_dead_slots(&dead_slots, purged_account_slots, purge_stats);
}
}
/// During clean, some zero-lamport accounts that are marked for purge should *not* actually
/// get purged. Filter out those accounts here.
///
/// When using incremental snapshots, do not purge zero-lamport accounts if the slot is higher
/// than the last full snapshot slot. This is to protect against the following scenario:
///
/// ```text
/// A full snapshot is taken, and it contains an account with a non-zero balance. Later,
/// that account's goes to zero. Evntually cleaning runs, and before, this account would be
/// cleaned up. Finally, an incremental snapshot is taken.
///
/// Later, the incremental (and full) snapshot is used to rebuild the bank and accounts
/// database (e.x. if the node restarts). The full snapshot _does_ contain the account (from
/// above) and its balance is non-zero, however, since the account was cleaned up in a later
/// slot, the incremental snapshot does not contain any info about this account, thus, the
/// accounts database will contain the old info from this account, which has its old non-zero
/// balance. Very bad!
/// ```
///
/// This filtering step can be skipped if there is no `last_full_snapshot_slot`, or if the
/// `max_clean_root` is less-than-or-equal-to the `last_full_snapshot_slot`.
fn filter_zero_lamport_clean_for_incremental_snapshots(
&self,
max_clean_root: Option<Slot>,
last_full_snapshot_slot: Option<Slot>,
store_counts: &HashMap<AppendVecId, (usize, HashSet<Pubkey>)>,
purges_zero_lamports: &mut HashMap<Pubkey, (SlotList<AccountInfo>, RefCount)>,
) {
let should_filter_for_incremental_snapshots =
max_clean_root.unwrap_or(Slot::MAX) > last_full_snapshot_slot.unwrap_or(Slot::MAX);
assert!(
last_full_snapshot_slot.is_some() || !should_filter_for_incremental_snapshots,
"if filtering for incremental snapshots, then snapshots should be enabled",
);
purges_zero_lamports.retain(|pubkey, (slot_account_infos, _ref_count)| {
// Only keep purges_zero_lamports where the entire history of the account in the root set
// can be purged. All AppendVecs for those updates are dead.
for (_slot, account_info) in slot_account_infos.iter() {
if store_counts.get(&account_info.store_id).unwrap().0 != 0 {
return false;
}
}
// Exit early if not filtering more for incremental snapshots
if !should_filter_for_incremental_snapshots {
return true;
}
let slot_account_info_at_highest_slot = slot_account_infos
.iter()
.max_by_key(|(slot, _account_info)| slot);
slot_account_info_at_highest_slot.map_or(true, |(slot, account_info)| {
// Do *not* purge zero-lamport accounts if the slot is greater than the last full
// snapshot slot. Since we're `retain`ing the accounts-to-purge, I felt creating
// the `cannot_purge` variable made this easier to understand. Accounts that do
// not get purged here are added to a list so they be considered for purging later
// (i.e. after the next full snapshot).
assert!(account_info.is_zero_lamport());
let cannot_purge = *slot > last_full_snapshot_slot.unwrap();
if cannot_purge {
self.zero_lamport_accounts_to_purge_after_full_snapshot
.insert((*slot, *pubkey));
}
!cannot_purge
})
});
}
// Must be kept private!, does sensitive cleanup that should only be called from
// supported pipelines in AccountsDb
fn process_dead_slots(
&self,
dead_slots: &HashSet<Slot>,
purged_account_slots: Option<&mut AccountSlots>,
purge_stats: &PurgeStats,
) {
if dead_slots.is_empty() {
return;
}
let mut clean_dead_slots = Measure::start("reclaims::clean_dead_slots");
self.clean_stored_dead_slots(dead_slots, purged_account_slots);
clean_dead_slots.stop();
let mut purge_removed_slots = Measure::start("reclaims::purge_removed_slots");
self.purge_dead_slots_from_storage(dead_slots.iter(), purge_stats);
purge_removed_slots.stop();
// If the slot is dead, remove the need to shrink the storages as
// the storage entries will be purged.
{
let mut list = self.shrink_candidate_slots.lock().unwrap();
for slot in dead_slots {
list.remove(slot);
}
}
debug!(
"process_dead_slots({}): {} {} {:?}",
dead_slots.len(),
clean_dead_slots,
purge_removed_slots,
dead_slots,
);
}
fn load_accounts_index_for_shrink<'a, I>(
&'a self,
iter: I,
alive_accounts: &mut Vec<(&'a Pubkey, &'a FoundStoredAccount<'a>)>,
unrefed_pubkeys: &mut Vec<&'a Pubkey>,
) -> usize
where
I: Iterator<Item = &'a (Pubkey, FoundStoredAccount<'a>)>,
{
let mut alive_total = 0;
let mut alive = 0;
let mut dead = 0;
iter.for_each(|(pubkey, stored_account)| {
let lookup = self.accounts_index.get_account_read_entry(pubkey);
if let Some(locked_entry) = lookup {
let is_alive = locked_entry.slot_list().iter().any(|(_slot, i)| {
i.store_id == stored_account.store_id
&& i.offset == stored_account.account.offset
});
if !is_alive {
// This pubkey was found in the storage, but no longer exists in the index.
// It would have had a ref to the storage from the initial store, but it will
// not exist in the re-written slot. Unref it to keep the index consistent with
// rewriting the storage entries.
unrefed_pubkeys.push(pubkey);
locked_entry.unref();
dead += 1;
} else {
alive_accounts.push((pubkey, stored_account));
alive_total += stored_account.account_size;
alive += 1;
}
}
});
self.shrink_stats
.alive_accounts
.fetch_add(alive, Ordering::Relaxed);
self.shrink_stats
.dead_accounts
.fetch_add(dead, Ordering::Relaxed);
alive_total
}
fn do_shrink_slot_stores<'a, I>(&'a self, slot: Slot, stores: I) -> usize
where
I: Iterator<Item = &'a Arc<AccountStorageEntry>>,
{
debug!("do_shrink_slot_stores: slot: {}", slot);
let mut stored_accounts: HashMap<Pubkey, FoundStoredAccount> = HashMap::new();
let mut original_bytes = 0;
let mut num_stores = 0;
for store in stores {
let mut start = 0;
original_bytes += store.total_bytes();
let store_id = store.append_vec_id();
while let Some((account, next)) = store.accounts.get_account(start) {
let new_entry = FoundStoredAccount {
account,
store_id,
account_size: next - start,
};
match stored_accounts.entry(new_entry.account.meta.pubkey) {
Entry::Occupied(mut occupied_entry) => {
if new_entry.account.meta.write_version
> occupied_entry.get().account.meta.write_version
{
occupied_entry.insert(new_entry);
}
}
Entry::Vacant(vacant_entry) => {
vacant_entry.insert(new_entry);
}
}
start = next;
}
num_stores += 1;
}
// sort by pubkey to keep account index lookups close
let mut stored_accounts = stored_accounts.into_iter().collect::<Vec<_>>();
stored_accounts.sort_unstable_by(|a, b| a.0.cmp(&b.0));
let mut index_read_elapsed = Measure::start("index_read_elapsed");
let alive_total_collect = AtomicUsize::new(0);
let len = stored_accounts.len();
let alive_accounts_collect = Mutex::new(Vec::with_capacity(len));
let unrefed_pubkeys_collect = Mutex::new(Vec::with_capacity(len));
self.shrink_stats
.accounts_loaded
.fetch_add(len as u64, Ordering::Relaxed);
self.thread_pool.install(|| {
let chunk_size = 50; // # accounts/thread
let chunks = len / chunk_size + 1;
(0..chunks).into_par_iter().for_each(|chunk| {
let skip = chunk * chunk_size;
let mut alive_accounts = Vec::with_capacity(chunk_size);
let mut unrefed_pubkeys = Vec::with_capacity(chunk_size);
let alive_total = self.load_accounts_index_for_shrink(
stored_accounts.iter().skip(skip).take(chunk_size),
&mut alive_accounts,
&mut unrefed_pubkeys,
);
// collect
alive_accounts_collect
.lock()
.unwrap()
.append(&mut alive_accounts);
unrefed_pubkeys_collect
.lock()
.unwrap()
.append(&mut unrefed_pubkeys);
alive_total_collect.fetch_add(alive_total, Ordering::Relaxed);
});
});
let alive_accounts = alive_accounts_collect.into_inner().unwrap();
let unrefed_pubkeys = unrefed_pubkeys_collect.into_inner().unwrap();
let alive_total = alive_total_collect.load(Ordering::Relaxed);
index_read_elapsed.stop();
let aligned_total: u64 = Self::page_align(alive_total as u64);
// This shouldn't happen if alive_bytes/approx_stored_count are accurate
if Self::should_not_shrink(aligned_total, original_bytes, num_stores) {
self.shrink_stats
.skipped_shrink
.fetch_add(1, Ordering::Relaxed);
for pubkey in unrefed_pubkeys {
if let Some(locked_entry) = self.accounts_index.get_account_read_entry(pubkey) {
locked_entry.addref();
}
}
return 0;
}
let total_starting_accounts = stored_accounts.len();
let total_accounts_after_shrink = alive_accounts.len();
debug!(
"shrinking: slot: {}, accounts: ({} => {}) bytes: ({} ; aligned to: {}) original: {}",
slot,
total_starting_accounts,
total_accounts_after_shrink,
alive_total,
aligned_total,
original_bytes,
);
let mut rewrite_elapsed = Measure::start("rewrite_elapsed");
let mut dead_storages = vec![];
let mut find_alive_elapsed = 0;
let mut create_and_insert_store_elapsed = 0;
let mut write_storage_elapsed = 0;
let mut store_accounts_timing = StoreAccountsTiming::default();
if aligned_total > 0 {
let mut start = Measure::start("find_alive_elapsed");
let mut accounts = Vec::with_capacity(alive_accounts.len());
let mut hashes = Vec::with_capacity(alive_accounts.len());
let mut write_versions = Vec::with_capacity(alive_accounts.len());
for (pubkey, alive_account) in alive_accounts {
accounts.push((pubkey, &alive_account.account));
hashes.push(alive_account.account.hash);
write_versions.push(alive_account.account.meta.write_version);
}
start.stop();
find_alive_elapsed = start.as_us();
let mut start = Measure::start("create_and_insert_store_elapsed");
let shrunken_store = if let Some(new_store) =
self.try_recycle_and_insert_store(slot, aligned_total, aligned_total + 1024)
{
new_store
} else {
let maybe_shrink_paths = self.shrink_paths.read().unwrap();
if let Some(ref shrink_paths) = *maybe_shrink_paths {
self.create_and_insert_store_with_paths(
slot,
aligned_total,
"shrink-w-path",
shrink_paths,
)
} else {
self.create_and_insert_store(slot, aligned_total, "shrink")
}
};
start.stop();
create_and_insert_store_elapsed = start.as_us();
// here, we're writing back alive_accounts. That should be an atomic operation
// without use of rather wide locks in this whole function, because we're
// mutating rooted slots; There should be no writers to them.
store_accounts_timing = self.store_accounts_frozen(
slot,
&accounts,
Some(&hashes),
Some(Box::new(move |_, _| shrunken_store.clone())),
Some(Box::new(write_versions.into_iter())),
);
// `store_accounts_frozen()` above may have purged accounts from some
// other storage entries (the ones that were just overwritten by this
// new storage entry). This means some of those stores might have caused
// this slot to be read to `self.shrink_candidate_slots`, so delete
// those here
self.shrink_candidate_slots.lock().unwrap().remove(&slot);
// Purge old, overwritten storage entries
let mut start = Measure::start("write_storage_elapsed");
if let Some(slot_stores) = self.storage.get_slot_stores(slot) {
slot_stores.write().unwrap().retain(|_key, store| {
if store.count() == 0 {
self.dirty_stores
.insert((slot, store.append_vec_id()), store.clone());
dead_storages.push(store.clone());
false
} else {
true
}
});
}
start.stop();
write_storage_elapsed = start.as_us();
}
rewrite_elapsed.stop();
let mut recycle_stores_write_elapsed = Measure::start("recycle_stores_write_time");
let mut recycle_stores = self.recycle_stores.write().unwrap();
recycle_stores_write_elapsed.stop();
let mut drop_storage_entries_elapsed = Measure::start("drop_storage_entries_elapsed");
if recycle_stores.entry_count() < MAX_RECYCLE_STORES {
recycle_stores.add_entries(dead_storages);
drop(recycle_stores);
} else {
self.stats
.dropped_stores
.fetch_add(dead_storages.len() as u64, Ordering::Relaxed);
drop(recycle_stores);
drop(dead_storages);
}
drop_storage_entries_elapsed.stop();
self.shrink_stats
.num_slots_shrunk
.fetch_add(1, Ordering::Relaxed);
self.shrink_stats
.index_read_elapsed
.fetch_add(index_read_elapsed.as_us(), Ordering::Relaxed);
self.shrink_stats
.find_alive_elapsed
.fetch_add(find_alive_elapsed, Ordering::Relaxed);
self.shrink_stats
.create_and_insert_store_elapsed
.fetch_add(create_and_insert_store_elapsed, Ordering::Relaxed);
self.shrink_stats.store_accounts_elapsed.fetch_add(
store_accounts_timing.store_accounts_elapsed,
Ordering::Relaxed,
);
self.shrink_stats.update_index_elapsed.fetch_add(
store_accounts_timing.update_index_elapsed,
Ordering::Relaxed,
);
self.shrink_stats.handle_reclaims_elapsed.fetch_add(
store_accounts_timing.handle_reclaims_elapsed,
Ordering::Relaxed,
);
self.shrink_stats
.write_storage_elapsed
.fetch_add(write_storage_elapsed, Ordering::Relaxed);
self.shrink_stats
.rewrite_elapsed
.fetch_add(rewrite_elapsed.as_us(), Ordering::Relaxed);
self.shrink_stats
.drop_storage_entries_elapsed
.fetch_add(drop_storage_entries_elapsed.as_us(), Ordering::Relaxed);
self.shrink_stats
.recycle_stores_write_elapsed
.fetch_add(recycle_stores_write_elapsed.as_us(), Ordering::Relaxed);
self.shrink_stats.accounts_removed.fetch_add(
total_starting_accounts - total_accounts_after_shrink,
Ordering::Relaxed,
);
self.shrink_stats.bytes_removed.fetch_add(
original_bytes.saturating_sub(aligned_total),
Ordering::Relaxed,
);
self.shrink_stats
.bytes_written
.fetch_add(aligned_total, Ordering::Relaxed);
self.shrink_stats.report();
total_accounts_after_shrink
}
// Reads all accounts in given slot's AppendVecs and filter only to alive,
// then create a minimum AppendVec filled with the alive.
fn shrink_slot_forced(&self, slot: Slot) -> usize {
debug!("shrink_slot_forced: slot: {}", slot);
if let Some(stores_lock) = self.storage.get_slot_stores(slot) {
let stores: Vec<Arc<AccountStorageEntry>> =
stores_lock.read().unwrap().values().cloned().collect();
if !Self::is_shrinking_productive(slot, &stores) {
return 0;
}
self.do_shrink_slot_stores(slot, stores.iter())
} else {
0
}
}
fn all_slots_in_storage(&self) -> Vec<Slot> {
self.storage.all_slots()
}
fn all_root_slots_in_index(&self) -> Vec<Slot> {
self.accounts_index.all_roots()
}
/// Given the input `ShrinkCandidates`, this function sorts the stores by their alive ratio
/// in increasing order with the most sparse entries in the front. It will then simulate the
/// shrinking by working on the most sparse entries first and if the overall alive ratio is
/// achieved, it will stop and return the filtered-down candidates and the candidates which
/// are skipped in this round and might be eligible for the future shrink.
fn select_candidates_by_total_usage(
&self,
shrink_slots: &ShrinkCandidates,
shrink_ratio: f64,
) -> (ShrinkCandidates, ShrinkCandidates) {
struct StoreUsageInfo {
slot: Slot,
alive_ratio: f64,
store: Arc<AccountStorageEntry>,
}
let mut measure = Measure::start("select_top_sparse_storage_entries-ms");
let mut store_usage: Vec<StoreUsageInfo> = Vec::with_capacity(shrink_slots.len());
let mut total_alive_bytes: u64 = 0;
let mut candidates_count: usize = 0;
let mut total_bytes: u64 = 0;
let mut total_candidate_stores: usize = 0;
for (slot, slot_shrink_candidates) in shrink_slots {
candidates_count += slot_shrink_candidates.len();
for store in slot_shrink_candidates.values() {
total_alive_bytes += Self::page_align(store.alive_bytes() as u64);
total_bytes += store.total_bytes();
let alive_ratio = Self::page_align(store.alive_bytes() as u64) as f64
/ store.total_bytes() as f64;
store_usage.push(StoreUsageInfo {
slot: *slot,
alive_ratio,
store: store.clone(),
});
total_candidate_stores += 1;
}
}
store_usage.sort_by(|a, b| {
a.alive_ratio
.partial_cmp(&b.alive_ratio)
.unwrap_or(std::cmp::Ordering::Equal)
});
// Working from the beginning of store_usage which are the most sparse and see when we can stop
// shrinking while still achieving the overall goals.
let mut shrink_slots: ShrinkCandidates = HashMap::new();
let mut shrink_slots_next_batch: ShrinkCandidates = HashMap::new();
for usage in &store_usage {
let store = &usage.store;
let alive_ratio = (total_alive_bytes as f64) / (total_bytes as f64);
debug!("alive_ratio: {:?} store_id: {:?}, store_ratio: {:?} requirment: {:?}, total_bytes: {:?} total_alive_bytes: {:?}",
alive_ratio, usage.store.append_vec_id(), usage.alive_ratio, shrink_ratio, total_bytes, total_alive_bytes);
if alive_ratio > shrink_ratio {
// we have reached our goal, stop
debug!(
"Shrinking goal can be achieved at slot {:?}, total_alive_bytes: {:?} \
total_bytes: {:?}, alive_ratio: {:}, shrink_ratio: {:?}",
usage.slot, total_alive_bytes, total_bytes, alive_ratio, shrink_ratio
);
if usage.alive_ratio < shrink_ratio {
shrink_slots_next_batch
.entry(usage.slot)
.or_default()
.insert(store.append_vec_id(), store.clone());
} else {
break;
}
} else {
let current_store_size = store.total_bytes();
let after_shrink_size = Self::page_align(store.alive_bytes() as u64);
let bytes_saved = current_store_size.saturating_sub(after_shrink_size);
total_bytes -= bytes_saved;
shrink_slots
.entry(usage.slot)
.or_default()
.insert(store.append_vec_id(), store.clone());
}
}
measure.stop();
inc_new_counter_info!(
"shrink_select_top_sparse_storage_entries-ms",
measure.as_ms() as usize
);
inc_new_counter_info!(
"shrink_select_top_sparse_storage_entries-seeds",
candidates_count
);
inc_new_counter_info!(
"shrink_total_preliminary_candidate_stores",
total_candidate_stores
);
(shrink_slots, shrink_slots_next_batch)
}
pub fn shrink_candidate_slots(&self) -> usize {
let shrink_candidates_slots =
std::mem::take(&mut *self.shrink_candidate_slots.lock().unwrap());
let (shrink_slots, shrink_slots_next_batch) = {
if let AccountShrinkThreshold::TotalSpace { shrink_ratio } = self.shrink_ratio {
let (shrink_slots, shrink_slots_next_batch) =
self.select_candidates_by_total_usage(&shrink_candidates_slots, shrink_ratio);
(shrink_slots, Some(shrink_slots_next_batch))
} else {
(shrink_candidates_slots, None)
}
};
let mut measure_shrink_all_candidates = Measure::start("shrink_all_candidate_slots-ms");
let num_candidates = shrink_slots.len();
let shrink_candidates_count: usize = self.thread_pool.install(|| {
shrink_slots
.into_par_iter()
.map(|(slot, slot_shrink_candidates)| {
let mut measure = Measure::start("shrink_candidate_slots-ms");
self.do_shrink_slot_stores(slot, slot_shrink_candidates.values());
measure.stop();
inc_new_counter_info!("shrink_candidate_slots-ms", measure.as_ms() as usize);
slot_shrink_candidates.len()
})
.sum()
});
measure_shrink_all_candidates.stop();
inc_new_counter_info!(
"shrink_all_candidate_slots-ms",
measure_shrink_all_candidates.as_ms() as usize
);
inc_new_counter_info!("shrink_all_candidate_slots-count", shrink_candidates_count);
let mut pended_counts: usize = 0;
if let Some(shrink_slots_next_batch) = shrink_slots_next_batch {
let mut shrink_slots = self.shrink_candidate_slots.lock().unwrap();
for (slot, stores) in shrink_slots_next_batch {
pended_counts += stores.len();
shrink_slots.entry(slot).or_default().extend(stores);
}
}
inc_new_counter_info!("shrink_pended_stores-count", pended_counts);
num_candidates
}
pub fn shrink_all_slots(&self, is_startup: bool, last_full_snapshot_slot: Option<Slot>) {
const DIRTY_STORES_CLEANING_THRESHOLD: usize = 10_000;
const OUTER_CHUNK_SIZE: usize = 2000;
if is_startup && self.caching_enabled {
let slots = self.all_slots_in_storage();
let threads = num_cpus::get();
let inner_chunk_size = std::cmp::max(OUTER_CHUNK_SIZE / threads, 1);
slots.chunks(OUTER_CHUNK_SIZE).for_each(|chunk| {
chunk.par_chunks(inner_chunk_size).for_each(|slots| {
for slot in slots {
self.shrink_slot_forced(*slot);
}
});
if self.dirty_stores.len() > DIRTY_STORES_CLEANING_THRESHOLD {
self.clean_accounts(None, is_startup, last_full_snapshot_slot);
}
});
} else {
for slot in self.all_slots_in_storage() {
if self.caching_enabled {
self.shrink_slot_forced(slot);
} else {
self.do_shrink_slot_forced_v1(slot);
}
if self.dirty_stores.len() > DIRTY_STORES_CLEANING_THRESHOLD {
self.clean_accounts(None, is_startup, last_full_snapshot_slot);
}
}
}
}
pub fn scan_accounts<F, A>(
&self,
ancestors: &Ancestors,
bank_id: BankId,
scan_func: F,
config: &ScanConfig,
) -> ScanResult<A>
where
F: Fn(&mut A, Option<(&Pubkey, AccountSharedData, Slot)>),
A: Default,
{
let mut collector = A::default();
// This can error out if the slots being scanned over are aborted
self.accounts_index.scan_accounts(
ancestors,
bank_id,
|pubkey, (account_info, slot)| {
let account_slot = self
.get_account_accessor(slot, pubkey, account_info.store_id, account_info.offset)
.get_loaded_account()
.map(|loaded_account| (pubkey, loaded_account.take_account(), slot));
scan_func(&mut collector, account_slot)
},
config,
)?;
Ok(collector)
}
pub fn unchecked_scan_accounts<F, A>(
&self,
metric_name: &'static str,
ancestors: &Ancestors,
scan_func: F,
config: &ScanConfig,
) -> A
where
F: Fn(&mut A, (&Pubkey, LoadedAccount, Slot)),
A: Default,
{
let mut collector = A::default();
self.accounts_index.unchecked_scan_accounts(
metric_name,
ancestors,
|pubkey, (account_info, slot)| {
if let Some(loaded_account) = self
.get_account_accessor(slot, pubkey, account_info.store_id, account_info.offset)
.get_loaded_account()
{
scan_func(&mut collector, (pubkey, loaded_account, slot));
}
},
config,
);
collector
}
pub fn range_scan_accounts<F, A, R>(
&self,
metric_name: &'static str,
ancestors: &Ancestors,
range: R,
config: &ScanConfig,
scan_func: F,
) -> A
where
F: Fn(&mut A, Option<(&Pubkey, AccountSharedData, Slot)>),
A: Default,
R: RangeBounds<Pubkey> + std::fmt::Debug,
{
let mut collector = A::default();
self.accounts_index.range_scan_accounts(
metric_name,
ancestors,
range,
config,
|pubkey, (account_info, slot)| {
// unlike other scan fns, this is called from Bank::collect_rent_eagerly(),
// which is on-consensus processing in the banking/replaying stage.
// This requires infallible and consistent account loading.
// So, we unwrap Option<LoadedAccount> from get_loaded_account() here.
// This is safe because this closure is invoked with the account_info,
// while we lock the index entry at AccountsIndex::do_scan_accounts() ultimately,
// meaning no other subsystems can invalidate the account_info before making their
// changes to the index entry.
// For details, see the comment in retry_to_get_account_accessor()
let account_slot = self
.get_account_accessor(slot, pubkey, account_info.store_id, account_info.offset)
.get_loaded_account()
.map(|loaded_account| (pubkey, loaded_account.take_account(), slot))
.unwrap();
scan_func(&mut collector, Some(account_slot))
},
);
collector
}
pub fn index_scan_accounts<F, A>(
&self,
ancestors: &Ancestors,
bank_id: BankId,
index_key: IndexKey,
scan_func: F,
config: &ScanConfig,
) -> ScanResult<(A, bool)>
where
F: Fn(&mut A, Option<(&Pubkey, AccountSharedData, Slot)>),
A: Default,
{
let key = match &index_key {
IndexKey::ProgramId(key) => key,
IndexKey::SplTokenMint(key) => key,
IndexKey::SplTokenOwner(key) => key,
};
if !self.account_indexes.include_key(key) {
// the requested key was not indexed in the secondary index, so do a normal scan
let used_index = false;
let scan_result = self.scan_accounts(ancestors, bank_id, scan_func, config)?;
return Ok((scan_result, used_index));
}
let mut collector = A::default();
self.accounts_index.index_scan_accounts(
ancestors,
bank_id,
index_key,
|pubkey, (account_info, slot)| {
let account_slot = self
.get_account_accessor(slot, pubkey, account_info.store_id, account_info.offset)
.get_loaded_account()
.map(|loaded_account| (pubkey, loaded_account.take_account(), slot));
scan_func(&mut collector, account_slot)
},
config,
)?;
let used_index = true;
Ok((collector, used_index))
}
/// Scan a specific slot through all the account storage in parallel
pub fn scan_account_storage<R, B>(
&self,
slot: Slot,
cache_map_func: impl Fn(LoadedAccount) -> Option<R> + Sync,
storage_scan_func: impl Fn(&B, LoadedAccount) + Sync,
) -> ScanStorageResult<R, B>
where
R: Send,
B: Send + Default + Sync,
{
if let Some(slot_cache) = self.accounts_cache.slot_cache(slot) {
// If we see the slot in the cache, then all the account information
// is in this cached slot
if slot_cache.len() > SCAN_SLOT_PAR_ITER_THRESHOLD {
ScanStorageResult::Cached(self.thread_pool.install(|| {
slot_cache
.par_iter()
.filter_map(|cached_account| {
cache_map_func(LoadedAccount::Cached(Cow::Borrowed(
cached_account.value(),
)))
})
.collect()
}))
} else {
ScanStorageResult::Cached(
slot_cache
.iter()
.filter_map(|cached_account| {
cache_map_func(LoadedAccount::Cached(Cow::Borrowed(
cached_account.value(),
)))
})
.collect(),
)
}
} else {
let retval = B::default();
// If the slot is not in the cache, then all the account information must have
// been flushed. This is guaranteed because we only remove the rooted slot from
// the cache *after* we've finished flushing in `flush_slot_cache`.
let storage_maps: Vec<Arc<AccountStorageEntry>> = self
.storage
.get_slot_storage_entries(slot)
.unwrap_or_default();
self.thread_pool.install(|| {
storage_maps
.par_iter()
.flat_map(|storage| storage.all_accounts())
.for_each(|account| storage_scan_func(&retval, LoadedAccount::Stored(account)));
});
ScanStorageResult::Stored(retval)
}
}
pub fn set_hash(&self, slot: Slot, parent_slot: Slot) {
let mut bank_hashes = self.bank_hashes.write().unwrap();
if bank_hashes.get(&slot).is_some() {
error!(
"set_hash: already exists; multiple forks with shared slot {} as child (parent: {})!?",
slot, parent_slot,
);
return;
}
let new_hash_info = BankHashInfo {
hash: Hash::default(),
snapshot_hash: Hash::default(),
stats: BankHashStats::default(),
};
bank_hashes.insert(slot, new_hash_info);
}
pub fn load(
&self,
ancestors: &Ancestors,
pubkey: &Pubkey,
load_hint: LoadHint,
) -> Option<(AccountSharedData, Slot)> {
self.do_load(ancestors, pubkey, None, load_hint)
}
pub fn load_with_fixed_root(
&self,
ancestors: &Ancestors,
pubkey: &Pubkey,
) -> Option<(AccountSharedData, Slot)> {
self.load(ancestors, pubkey, LoadHint::FixedMaxRoot)
}
pub fn load_without_fixed_root(
&self,
ancestors: &Ancestors,
pubkey: &Pubkey,
) -> Option<(AccountSharedData, Slot)> {
self.load(ancestors, pubkey, LoadHint::Unspecified)
}
fn read_index_for_accessor_or_load_slow<'a>(
&'a self,
ancestors: &Ancestors,
pubkey: &'a Pubkey,
max_root: Option<Slot>,
clone_in_lock: bool,
) -> Option<(Slot, AppendVecId, usize, Option<LoadedAccountAccessor<'a>>)> {
let (lock, index) = match self.accounts_index.get(pubkey, Some(ancestors), max_root) {
AccountIndexGetResult::Found(lock, index) => (lock, index),
// we bail out pretty early for missing.
AccountIndexGetResult::NotFoundOnFork => {
return None;
}
AccountIndexGetResult::Missing(_) => {
return None;
}
};
let slot_list = lock.slot_list();
let (
slot,
AccountInfo {
store_id, offset, ..
},
) = slot_list[index];
let some_from_slow_path = if clone_in_lock {
// the fast path must have failed.... so take the slower approach
// of copying potentially large Account::data inside the lock.
// calling check_and_get_loaded_account is safe as long as we're guaranteed to hold
// the lock during the time and there should be no purge thanks to alive ancestors
// held by our caller.
Some(self.get_account_accessor(slot, pubkey, store_id, offset))
} else {
None
};
Some((slot, store_id, offset, some_from_slow_path))
// `lock` is dropped here rather pretty quickly with clone_in_lock = false,
// so the entry could be raced for mutation by other subsystems,
// before we actually provision an account data for caller's use from now on.
// This is traded for less contention and resultant performance, introducing fair amount of
// delicate handling in retry_to_get_account_accessor() below ;)
// you're warned!
}
fn retry_to_get_account_accessor<'a>(
&'a self,
mut slot: Slot,
mut store_id: usize,
mut offset: usize,
ancestors: &'a Ancestors,
pubkey: &'a Pubkey,
max_root: Option<Slot>,
load_hint: LoadHint,
) -> Option<(LoadedAccountAccessor<'a>, Slot)> {
// Happy drawing time! :)
//
// Reader | Accessed data source for cached/stored
// -------------------------------------+----------------------------------
// R1 read_index_for_accessor_or_load_slow()| cached/stored: index
// | |
// <(store_id, offset, ..)> |
// V |
// R2 retry_to_get_account_accessor()/ | cached: map of caches & entry for (slot, pubkey)
// get_account_accessor() | stored: map of stores
// | |
// <Accessor> |
// V |
// R3 check_and_get_loaded_account()/ | cached: N/A (note: basically noop unwrap)
// get_loaded_account() | stored: store's entry for slot
// | |
// <LoadedAccount> |
// V |
// R4 take_account() | cached/stored: entry of cache/storage for (slot, pubkey)
// | |
// <AccountSharedData> |
// V |
// Account!! V
//
// Flusher | Accessed data source for cached/stored
// -------------------------------------+----------------------------------
// F1 flush_slot_cache() | N/A
// | |
// V |
// F2 store_accounts_frozen()/ | map of stores (creates new entry)
// write_accounts_to_storage() |
// | |
// V |
// F3 store_accounts_frozen()/ | index
// update_index() | (replaces existing store_id, offset in caches)
// | |
// V |
// F4 accounts_cache.remove_slot() | map of caches (removes old entry)
// V
//
// Remarks for flusher: So, for any reading operations, it's a race condition where F4 happens
// between R1 and R2. In that case, retrying from R1 is safu because F3 should have
// been occurred.
//
// Shrinker | Accessed data source for stored
// -------------------------------------+----------------------------------
// S1 do_shrink_slot_stores() | N/A
// | |
// V |
// S2 store_accounts_frozen()/ | map of stores (creates new entry)
// write_accounts_to_storage() |
// | |
// V |
// S3 store_accounts_frozen()/ | index
// update_index() | (replaces existing store_id, offset in stores)
// | |
// V |
// S4 do_shrink_slot_stores()/ | map of stores (removes old entry)
// dead_storages
//
// Remarks for shrinker: So, for any reading operations, it's a race condition
// where S4 happens between R1 and R2. In that case, retrying from R1 is safu because S3 should have
// been occurred, and S3 atomically replaced the index accordingly.
//
// Cleaner | Accessed data source for stored
// -------------------------------------+----------------------------------
// C1 clean_accounts() | N/A
// | |
// V |
// C2 clean_accounts()/ | index
// purge_keys_exact() | (removes existing store_id, offset for stores)
// | |
// V |
// C3 clean_accounts()/ | map of stores (removes old entry)
// handle_reclaims() |
//
// Remarks for cleaner: So, for any reading operations, it's a race condition
// where C3 happens between R1 and R2. In that case, retrying from R1 is safu.
// In that case, None would be returned while bailing out at R1.
//
// Purger | Accessed data source for cached/stored
// ---------------------------------------+----------------------------------
// P1 purge_slot() | N/A
// | |
// V |
// P2 purge_slots_from_cache_and_store() | map of caches/stores (removes old entry)
// | |
// V |
// P3 purge_slots_from_cache_and_store()/ | index
// purge_slot_cache()/ |
// purge_slot_cache_pubkeys() | (removes existing store_id, offset for cache)
// purge_slot_storage()/ |
// purge_keys_exact() | (removes accounts index entries)
// handle_reclaims() | (removes storage entries)
// OR |
// clean_accounts()/ |
// clean_accounts_older_than_root()| (removes existing store_id, offset for stores)
// V
//
// Remarks for purger: So, for any reading operations, it's a race condition
// where P2 happens between R1 and R2. In that case, retrying from R1 is safu.
// In that case, we may bail at index read retry when P3 hasn't been run
#[cfg(test)]
{
// Give some time for cache flushing to occur here for unit tests
sleep(Duration::from_millis(self.load_delay));
}
// Failsafe for potential race conditions with other subsystems
let mut num_acceptable_failed_iterations = 0;
loop {
let account_accessor = self.get_account_accessor(slot, pubkey, store_id, offset);
match account_accessor {
LoadedAccountAccessor::Cached(Some(_)) | LoadedAccountAccessor::Stored(Some(_)) => {
// Great! There was no race, just return :) This is the most usual situation
return Some((account_accessor, slot));
}
LoadedAccountAccessor::Cached(None) => {
num_acceptable_failed_iterations += 1;
// Cache was flushed in between checking the index and retrieving from the cache,
// so retry. This works because in accounts cache flush, an account is written to
// storage *before* it is removed from the cache
match load_hint {
LoadHint::FixedMaxRoot => {
// it's impossible for this to fail for transaction loads from
// replaying/banking more than once.
// This is because:
// 1) For a slot `X` that's being replayed, there is only one
// latest ancestor containing the latest update for the account, and this
// ancestor can only be flushed once.
// 2) The root cannot move while replaying, so the index cannot continually
// find more up to date entries than the current `slot`
assert!(num_acceptable_failed_iterations <= 1);
}
LoadHint::Unspecified => {
// Because newer root can be added to the index (= not fixed),
// multiple flush race conditions can be observed under very rare
// condition, at least theoretically
}
}
}
LoadedAccountAccessor::Stored(None) => {
match load_hint {
LoadHint::FixedMaxRoot => {
// When running replay on the validator, or banking stage on the leader,
// it should be very rare that the storage entry doesn't exist if the
// entry in the accounts index is the latest version of this account.
//
// There are only a few places where the storage entry may not exist
// after reading the index:
// 1) Shrink has removed the old storage entry and rewritten to
// a newer storage entry
// 2) The `pubkey` asked for in this function is a zero-lamport account,
// and the storage entry holding this account qualified for zero-lamport clean.
//
// In both these cases, it should be safe to retry and recheck the accounts
// index indefinitely, without incrementing num_acceptable_failed_iterations.
// That's because if the root is fixed, there should be a bounded number
// of pending cleans/shrinks (depends how far behind the AccountsBackgroundService
// is), termination to the desired condition is guaranteed.
//
// Also note that in both cases, if we do find the storage entry,
// we can guarantee that the storage entry is safe to read from because
// we grabbed a reference to the storage entry while it was still in the
// storage map. This means even if the storage entry is removed from the storage
// map after we grabbed the storage entry, the recycler should not reset the
// storage entry until we drop the reference to the storage entry.
//
// eh, no code in this arm? yes!
}
LoadHint::Unspecified => {
// RPC get_account() may have fetched an old root from the index that was
// either:
// 1) Cleaned up by clean_accounts(), so the accounts index has been updated
// and the storage entries have been removed.
// 2) Dropped by purge_slots() because the slot was on a minor fork, which
// removes the slots' storage entries but doesn't purge from the accounts index
// (account index cleanup is left to clean for stored slots). Note that
// this generally is impossible to occur in the wild because the RPC
// should hold the slot's bank, preventing it from being purged() to
// begin with.
num_acceptable_failed_iterations += 1;
}
}
}
}
#[cfg(not(test))]
let load_limit = ABSURD_CONSECUTIVE_FAILED_ITERATIONS;
#[cfg(test)]
let load_limit = self.load_limit.load(Ordering::Relaxed);
let fallback_to_slow_path = if num_acceptable_failed_iterations >= load_limit {
// The latest version of the account existed in the index, but could not be
// fetched from storage. This means a race occurred between this function and clean
// accounts/purge_slots
let message = format!(
"do_load() failed to get key: {} from storage, latest attempt was for \
slot: {}, storage_entry: {} offset: {}, load_hint: {:?}",
pubkey, slot, store_id, offset, load_hint,
);
datapoint_warn!("accounts_db-do_load_warn", ("warn", message, String));
true
} else {
false
};
// Because reading from the cache/storage failed, retry from the index read
let (new_slot, new_store_id, new_offset, maybe_account_accessor) = self
.read_index_for_accessor_or_load_slow(
ancestors,
pubkey,
max_root,
fallback_to_slow_path,
)?;
// Notice the subtle `?` at previous line, we bail out pretty early if missing.
if new_slot == slot && new_store_id == store_id {
// Considering that we're failed to get accessor above and further that
// the index still returned the same (slot, store_id) tuple, offset must be same
// too.
assert!(new_offset == offset);
// If the entry was missing from the cache, that means it must have been flushed,
// and the accounts index is always updated before cache flush, so store_id must
// not indicate being cached at this point.
assert!(new_store_id != CACHE_VIRTUAL_STORAGE_ID);
// If this is not a cache entry, then this was a minor fork slot
// that had its storage entries cleaned up by purge_slots() but hasn't been
// cleaned yet. That means this must be rpc access and not replay/banking at the
// very least. Note that purge shouldn't occur even for RPC as caller must hold all
// of ancestor slots..
assert!(load_hint == LoadHint::Unspecified);
// Everything being assert!()-ed, let's panic!() here as it's an error condition
// after all....
// That reasoning is based on the fact all of code-path reaching this fn
// retry_to_get_account_accessor() must outlive the Arc<Bank> (and its all
// ancestors) over this fn invocation, guaranteeing the prevention of being purged,
// first of all.
// For details, see the comment in AccountIndex::do_checked_scan_accounts(),
// which is referring back here.
panic!(
"Bad index entry detected ({}, {}, {}, {}, {:?})",
pubkey, slot, store_id, offset, load_hint
);
} else if fallback_to_slow_path {
// the above bad-index-entry check must had been checked first to retain the same
// behavior
return Some((
maybe_account_accessor.expect("must be some if clone_in_lock=true"),
new_slot,
));
}
slot = new_slot;
store_id = new_store_id;
offset = new_offset;
}
}
fn do_load(
&self,
ancestors: &Ancestors,
pubkey: &Pubkey,
max_root: Option<Slot>,
load_hint: LoadHint,
) -> Option<(AccountSharedData, Slot)> {
#[cfg(not(test))]
assert!(max_root.is_none());
let (slot, store_id, offset, _maybe_account_accesor) =
self.read_index_for_accessor_or_load_slow(ancestors, pubkey, max_root, false)?;
// Notice the subtle `?` at previous line, we bail out pretty early if missing.
if self.caching_enabled && store_id != CACHE_VIRTUAL_STORAGE_ID {
let result = self.read_only_accounts_cache.load(*pubkey, slot);
if let Some(account) = result {
return Some((account, slot));
}
}
let (mut account_accessor, slot) = self.retry_to_get_account_accessor(
slot, store_id, offset, ancestors, pubkey, max_root, load_hint,
)?;
let loaded_account = account_accessor.check_and_get_loaded_account();
let is_cached = loaded_account.is_cached();
let account = loaded_account.take_account();
if self.caching_enabled && !is_cached {
/*
We show this store into the read-only cache for account 'A' and future loads of 'A' from the read-only cache are
safe/reflect 'A''s latest state on this fork.
This safety holds if during replay of slot 'S', we show we only read 'A' from the write cache,
not the read-only cache, after it's been updated in replay of slot 'S'.
Assume for contradiction this is not true, and we read 'A' from the read-only cache *after* it had been updated in 'S'.
This means an entry '(S, A)' was added to the read-only cache after 'A' had been updated in 'S'.
Now when '(S, A)' was being added to the read-only cache, it must have been true that 'is_cache == false',
which means '(S', A)' does not exist in the write cache yet.
However, by the assumption for contradiction above , 'A' has already been updated in 'S' which means '(S, A)'
must exist in the write cache, which is a contradiction.
*/
self.read_only_accounts_cache
.store(*pubkey, slot, account.clone());
}
Some((account, slot))
}
pub fn load_account_hash(
&self,
ancestors: &Ancestors,
pubkey: &Pubkey,
max_root: Option<Slot>,
load_hint: LoadHint,
) -> Option<Hash> {
let (slot, store_id, offset, _maybe_account_accesor) =
self.read_index_for_accessor_or_load_slow(ancestors, pubkey, max_root, false)?;
// Notice the subtle `?` at previous line, we bail out pretty early if missing.
let (mut account_accessor, _) = self.retry_to_get_account_accessor(
slot, store_id, offset, ancestors, pubkey, max_root, load_hint,
)?;
let loaded_account = account_accessor.check_and_get_loaded_account();
Some(loaded_account.loaded_hash())
}
fn get_account_accessor<'a>(
&'a self,
slot: Slot,
pubkey: &'a Pubkey,
store_id: usize,
offset: usize,
) -> LoadedAccountAccessor<'a> {
if store_id == CACHE_VIRTUAL_STORAGE_ID {
let maybe_cached_account = self.accounts_cache.load(slot, pubkey).map(Cow::Owned);
LoadedAccountAccessor::Cached(maybe_cached_account)
} else {
let maybe_storage_entry = self
.storage
.get_account_storage_entry(slot, store_id)
.map(|account_storage_entry| (account_storage_entry, offset));
LoadedAccountAccessor::Stored(maybe_storage_entry)
}
}
fn try_recycle_and_insert_store(
&self,
slot: Slot,
min_size: u64,
max_size: u64,
) -> Option<Arc<AccountStorageEntry>> {
let store = self.try_recycle_store(slot, min_size, max_size)?;
self.insert_store(slot, store.clone());
Some(store)
}
fn try_recycle_store(
&self,
slot: Slot,
min_size: u64,
max_size: u64,
) -> Option<Arc<AccountStorageEntry>> {
let mut max = 0;
let mut min = std::u64::MAX;
let mut avail = 0;
let mut recycle_stores = self.recycle_stores.write().unwrap();
for (i, (_recycled_time, store)) in recycle_stores.iter().enumerate() {
if Arc::strong_count(store) == 1 {
max = std::cmp::max(store.accounts.capacity(), max);
min = std::cmp::min(store.accounts.capacity(), min);
avail += 1;
if store.accounts.capacity() >= min_size && store.accounts.capacity() < max_size {
let ret = recycle_stores.remove_entry(i);
drop(recycle_stores);
let old_id = ret.append_vec_id();
ret.recycle(slot, self.next_id.fetch_add(1, Ordering::AcqRel));
debug!(
"recycling store: {} {:?} old_id: {}",
ret.append_vec_id(),
ret.get_path(),
old_id
);
return Some(ret);
}
}
}
debug!(
"no recycle stores max: {} min: {} len: {} looking: {}, {} avail: {}",
max,
min,
recycle_stores.entry_count(),
min_size,
max_size,
avail,
);
None
}
fn find_storage_candidate(&self, slot: Slot, size: usize) -> Arc<AccountStorageEntry> {
let mut create_extra = false;
let mut get_slot_stores = Measure::start("get_slot_stores");
let slot_stores_lock = self.storage.get_slot_stores(slot);
get_slot_stores.stop();
self.stats
.store_get_slot_store
.fetch_add(get_slot_stores.as_us(), Ordering::Relaxed);
let mut find_existing = Measure::start("find_existing");
if let Some(slot_stores_lock) = slot_stores_lock {
let slot_stores = slot_stores_lock.read().unwrap();
if !slot_stores.is_empty() {
if slot_stores.len() <= self.min_num_stores {
let mut total_accounts = 0;
for store in slot_stores.values() {
total_accounts += store.count();
}
// Create more stores so that when scanning the storage all CPUs have work
if (total_accounts / 16) >= slot_stores.len() {
create_extra = true;
}
}
// pick an available store at random by iterating from a random point
let to_skip = thread_rng().gen_range(0, slot_stores.len());
for (i, store) in slot_stores.values().cycle().skip(to_skip).enumerate() {
if store.try_available() {
let ret = store.clone();
drop(slot_stores);
if create_extra {
if self
.try_recycle_and_insert_store(slot, size as u64, std::u64::MAX)
.is_none()
{
self.stats
.create_store_count
.fetch_add(1, Ordering::Relaxed);
self.create_and_insert_store(slot, self.file_size, "store extra");
} else {
self.stats
.recycle_store_count
.fetch_add(1, Ordering::Relaxed);
}
}
find_existing.stop();
self.stats
.store_find_existing
.fetch_add(find_existing.as_us(), Ordering::Relaxed);
return ret;
}
// looked at every store, bail...
if i == slot_stores.len() {
break;
}
}
}
}
find_existing.stop();
self.stats
.store_find_existing
.fetch_add(find_existing.as_us(), Ordering::Relaxed);
let store = if let Some(store) = self.try_recycle_store(slot, size as u64, std::u64::MAX) {
self.stats
.recycle_store_count
.fetch_add(1, Ordering::Relaxed);
store
} else {
self.stats
.create_store_count
.fetch_add(1, Ordering::Relaxed);
self.create_store(slot, self.file_size, "store", &self.paths)
};
// try_available is like taking a lock on the store,
// preventing other threads from using it.
// It must succeed here and happen before insert,
// otherwise another thread could also grab it from the index.
assert!(store.try_available());
self.insert_store(slot, store.clone());
store
}
fn page_align(size: u64) -> u64 {
(size + (PAGE_SIZE - 1)) & !(PAGE_SIZE - 1)
}
fn has_space_available(&self, slot: Slot, size: u64) -> bool {
let slot_storage = self.storage.get_slot_stores(slot).unwrap();
let slot_storage_r = slot_storage.read().unwrap();
for (_id, store) in slot_storage_r.iter() {
if store.status() == AccountStorageStatus::Available
&& (store.accounts.capacity() - store.accounts.len() as u64) > size
{
return true;
}
}
false
}
fn create_store(
&self,
slot: Slot,
size: u64,
from: &str,
paths: &[PathBuf],
) -> Arc<AccountStorageEntry> {
let path_index = thread_rng().gen_range(0, paths.len());
let store = Arc::new(self.new_storage_entry(
slot,
Path::new(&paths[path_index]),
Self::page_align(size),
));
assert!(
store.append_vec_id() != CACHE_VIRTUAL_STORAGE_ID,
"We've run out of storage ids!"
);
debug!(
"creating store: {} slot: {} len: {} size: {} from: {} path: {:?}",
store.append_vec_id(),
slot,
store.accounts.len(),
store.accounts.capacity(),
from,
store.accounts.get_path()
);
store
}
fn create_and_insert_store(
&self,
slot: Slot,
size: u64,
from: &str,
) -> Arc<AccountStorageEntry> {
self.create_and_insert_store_with_paths(slot, size, from, &self.paths)
}
fn create_and_insert_store_with_paths(
&self,
slot: Slot,
size: u64,
from: &str,
paths: &[PathBuf],
) -> Arc<AccountStorageEntry> {
let store = self.create_store(slot, size, from, paths);
let store_for_index = store.clone();
self.insert_store(slot, store_for_index);
store
}
fn insert_store(&self, slot: Slot, store: Arc<AccountStorageEntry>) {
let slot_storages: SlotStores = self.storage.get_slot_stores(slot).unwrap_or_else(||
// DashMap entry.or_insert() returns a RefMut, essentially a write lock,
// which is dropped after this block ends, minimizing time held by the lock.
// However, we still want to persist the reference to the `SlotStores` behind
// the lock, hence we clone it out, (`SlotStores` is an Arc so is cheap to clone).
self.storage
.0
.entry(slot)
.or_insert(Arc::new(RwLock::new(HashMap::new())))
.clone());
assert!(slot_storages
.write()
.unwrap()
.insert(store.append_vec_id(), store)
.is_none());
}
pub fn create_drop_bank_callback(
&self,
pruned_banks_sender: DroppedSlotsSender,
) -> SendDroppedBankCallback {
self.is_bank_drop_callback_enabled
.store(true, Ordering::SeqCst);
SendDroppedBankCallback::new(pruned_banks_sender)
}
/// This should only be called after the `Bank::drop()` runs in bank.rs, See BANK_DROP_SAFETY
/// comment below for more explanation.
/// `is_from_abs` is true if the caller is the AccountsBackgroundService
pub fn purge_slot(&self, slot: Slot, bank_id: BankId, is_from_abs: bool) {
if self.is_bank_drop_callback_enabled.load(Ordering::SeqCst) && !is_from_abs {
panic!("bad drop callpath detected; Bank::drop() must run serially with other logic in ABS like clean_accounts()")
}
// BANK_DROP_SAFETY: Because this function only runs once the bank is dropped,
// we know that there are no longer any ongoing scans on this bank, because scans require
// and hold a reference to the bank at the tip of the fork they're scanning. Hence it's
// safe to remove this bank_id from the `removed_bank_ids` list at this point.
if self
.accounts_index
.removed_bank_ids
.lock()
.unwrap()
.remove(&bank_id)
{
// If this slot was already cleaned up, no need to do any further cleans
return;
}
self.purge_slots(std::iter::once(&slot));
}
fn recycle_slot_stores(
&self,
total_removed_storage_entries: usize,
slot_stores: &[SlotStores],
) -> u64 {
let mut recycled_count = 0;
let mut recycle_stores_write_elapsed = Measure::start("recycle_stores_write_elapsed");
let mut recycle_stores = self.recycle_stores.write().unwrap();
recycle_stores_write_elapsed.stop();
for slot_entries in slot_stores {
let entry = slot_entries.read().unwrap();
for (_store_id, stores) in entry.iter() {
if recycle_stores.entry_count() > MAX_RECYCLE_STORES {
let dropped_count = total_removed_storage_entries - recycled_count;
self.stats
.dropped_stores
.fetch_add(dropped_count as u64, Ordering::Relaxed);
return recycle_stores_write_elapsed.as_us();
}
recycle_stores.add_entry(stores.clone());
recycled_count += 1;
}
}
recycle_stores_write_elapsed.as_us()
}
/// Purges every slot in `removed_slots` from both the cache and storage. This includes
/// entries in the accounts index, cache entries, and any backing storage entries.
fn purge_slots_from_cache_and_store<'a>(
&self,
removed_slots: impl Iterator<Item = &'a Slot>,
purge_stats: &PurgeStats,
) {
let mut remove_cache_elapsed_across_slots = 0;
let mut num_cached_slots_removed = 0;
let mut total_removed_cached_bytes = 0;
for remove_slot in removed_slots {
// This function is only currently safe with respect to `flush_slot_cache()` because
// both functions run serially in AccountsBackgroundService.
let mut remove_cache_elapsed = Measure::start("remove_cache_elapsed");
// Note: we cannot remove this slot from the slot cache until we've removed its
// entries from the accounts index first. This is because `scan_accounts()` relies on
// holding the index lock, finding the index entry, and then looking up the entry
// in the cache. If it fails to find that entry, it will panic in `get_loaded_account()`
if let Some(slot_cache) = self.accounts_cache.slot_cache(*remove_slot) {
// If the slot is still in the cache, remove the backing storages for
// the slot and from the Accounts Index
num_cached_slots_removed += 1;
total_removed_cached_bytes += slot_cache.total_bytes();
self.purge_slot_cache(*remove_slot, slot_cache);
remove_cache_elapsed.stop();
remove_cache_elapsed_across_slots += remove_cache_elapsed.as_us();
// Nobody else shoud have removed the slot cache entry yet
assert!(self.accounts_cache.remove_slot(*remove_slot).is_some());
} else {
self.purge_slot_storage(*remove_slot, purge_stats);
}
// It should not be possible that a slot is neither in the cache or storage. Even in
// a slot with all ticks, `Bank::new_from_parent()` immediately stores some sysvars
// on bank creation.
}
purge_stats
.remove_cache_elapsed
.fetch_add(remove_cache_elapsed_across_slots, Ordering::Relaxed);
purge_stats
.num_cached_slots_removed
.fetch_add(num_cached_slots_removed, Ordering::Relaxed);
purge_stats
.total_removed_cached_bytes
.fetch_add(total_removed_cached_bytes, Ordering::Relaxed);
}
/// Purge the backing storage entries for the given slot, does not purge from
/// the cache!
fn purge_dead_slots_from_storage<'a>(
&'a self,
removed_slots: impl Iterator<Item = &'a Slot> + Clone,
purge_stats: &PurgeStats,
) {
// Check all slots `removed_slots` are no longer "relevant" roots.
// Note that the slots here could have been rooted slots, but if they're passed here
// for removal it means:
// 1) All updates in that old root have been outdated by updates in newer roots
// 2) Those slots/roots should have already been purged from the accounts index root
// tracking metadata via `accounts_index.clean_dead_slot()`.
let mut safety_checks_elapsed = Measure::start("safety_checks_elapsed");
assert!(self
.accounts_index
.get_rooted_from_list(removed_slots.clone())
.is_empty());
safety_checks_elapsed.stop();
purge_stats
.safety_checks_elapsed
.fetch_add(safety_checks_elapsed.as_us(), Ordering::Relaxed);
let mut total_removed_storage_entries = 0;
let mut total_removed_stored_bytes = 0;
let mut all_removed_slot_storages = vec![];
let mut remove_storage_entries_elapsed = Measure::start("remove_storage_entries_elapsed");
for remove_slot in removed_slots {
// Remove the storage entries and collect some metrics
if let Some((_, slot_storages_to_be_removed)) = self.storage.0.remove(remove_slot) {
{
let r_slot_removed_storages = slot_storages_to_be_removed.read().unwrap();
total_removed_storage_entries += r_slot_removed_storages.len();
total_removed_stored_bytes += r_slot_removed_storages
.values()
.map(|i| i.accounts.capacity())
.sum::<u64>();
}
all_removed_slot_storages.push(slot_storages_to_be_removed.clone());
}
}
remove_storage_entries_elapsed.stop();
let num_stored_slots_removed = all_removed_slot_storages.len();
let recycle_stores_write_elapsed =
self.recycle_slot_stores(total_removed_storage_entries, &all_removed_slot_storages);
let mut drop_storage_entries_elapsed = Measure::start("drop_storage_entries_elapsed");
// Backing mmaps for removed storages entries explicitly dropped here outside
// of any locks
drop(all_removed_slot_storages);
drop_storage_entries_elapsed.stop();
purge_stats
.remove_storage_entries_elapsed
.fetch_add(remove_storage_entries_elapsed.as_us(), Ordering::Relaxed);
purge_stats
.drop_storage_entries_elapsed
.fetch_add(drop_storage_entries_elapsed.as_us(), Ordering::Relaxed);
purge_stats
.num_stored_slots_removed
.fetch_add(num_stored_slots_removed, Ordering::Relaxed);
purge_stats
.total_removed_storage_entries
.fetch_add(total_removed_storage_entries, Ordering::Relaxed);
purge_stats
.total_removed_stored_bytes
.fetch_add(total_removed_stored_bytes, Ordering::Relaxed);
purge_stats
.recycle_stores_write_elapsed
.fetch_add(recycle_stores_write_elapsed, Ordering::Relaxed);
}
fn purge_slot_cache(&self, purged_slot: Slot, slot_cache: SlotCache) {
let mut purged_slot_pubkeys: HashSet<(Slot, Pubkey)> = HashSet::new();
let pubkey_to_slot_set: Vec<(Pubkey, Slot)> = slot_cache
.iter()
.map(|account| {
purged_slot_pubkeys.insert((purged_slot, *account.key()));
(*account.key(), purged_slot)
})
.collect();
self.purge_slot_cache_pubkeys(purged_slot, purged_slot_pubkeys, pubkey_to_slot_set, true);
}
fn purge_slot_cache_pubkeys(
&self,
purged_slot: Slot,
purged_slot_pubkeys: HashSet<(Slot, Pubkey)>,
pubkey_to_slot_set: Vec<(Pubkey, Slot)>,
is_dead: bool,
) {
// Slot purged from cache should not exist in the backing store
assert!(self.storage.get_slot_stores(purged_slot).is_none());
let num_purged_keys = pubkey_to_slot_set.len();
let reclaims = self.purge_keys_exact(pubkey_to_slot_set.iter());
assert_eq!(reclaims.len(), num_purged_keys);
if is_dead {
self.remove_dead_slots_metadata(
std::iter::once(&purged_slot),
purged_slot_pubkeys,
None,
);
}
}
fn purge_slot_storage(&self, remove_slot: Slot, purge_stats: &PurgeStats) {
// Because AccountsBackgroundService synchronously flushes from the accounts cache
// and handles all Bank::drop() (the cleanup function that leads to this
// function call), then we don't need to worry above an overlapping cache flush
// with this function call. This means, if we get into this case, we can be
// confident that the entire state for this slot has been flushed to the storage
// already.
let mut scan_storages_elasped = Measure::start("scan_storages_elasped");
type ScanResult = ScanStorageResult<Pubkey, Arc<Mutex<HashSet<(Pubkey, Slot)>>>>;
let scan_result: ScanResult = self.scan_account_storage(
remove_slot,
|loaded_account: LoadedAccount| Some(*loaded_account.pubkey()),
|accum: &Arc<Mutex<HashSet<(Pubkey, Slot)>>>, loaded_account: LoadedAccount| {
accum
.lock()
.unwrap()
.insert((*loaded_account.pubkey(), remove_slot));
},
);
scan_storages_elasped.stop();
purge_stats
.scan_storages_elasped
.fetch_add(scan_storages_elasped.as_us(), Ordering::Relaxed);
let mut purge_accounts_index_elapsed = Measure::start("purge_accounts_index_elapsed");
let reclaims;
match scan_result {
ScanStorageResult::Cached(_) => {
panic!("Should not see cached keys in this `else` branch, since we checked this slot did not exist in the cache above");
}
ScanStorageResult::Stored(stored_keys) => {
// Purge this slot from the accounts index
reclaims = self.purge_keys_exact(stored_keys.lock().unwrap().iter());
}
}
purge_accounts_index_elapsed.stop();
purge_stats
.purge_accounts_index_elapsed
.fetch_add(purge_accounts_index_elapsed.as_us(), Ordering::Relaxed);
// `handle_reclaims()` should remove all the account index entries and
// storage entries
let mut handle_reclaims_elapsed = Measure::start("handle_reclaims_elapsed");
// Slot should be dead after removing all its account entries
let expected_dead_slot = Some(remove_slot);
self.handle_reclaims(
&reclaims,
expected_dead_slot,
Some(purge_stats),
Some(&mut ReclaimResult::default()),
false,
);
handle_reclaims_elapsed.stop();
purge_stats
.handle_reclaims_elapsed
.fetch_add(handle_reclaims_elapsed.as_us(), Ordering::Relaxed);
// After handling the reclaimed entries, this slot's
// storage entries should be purged from self.storage
assert!(self.storage.get_slot_stores(remove_slot).is_none());
}
#[allow(clippy::needless_collect)]
fn purge_slots<'a>(&self, slots: impl Iterator<Item = &'a Slot>) {
// `add_root()` should be called first
let mut safety_checks_elapsed = Measure::start("safety_checks_elapsed");
let non_roots = slots
// Only safe to check when there are duplciate versions of a slot
// because ReplayStage will not make new roots before dumping the
// duplicate slots first. Thus we will not be in a case where we
// root slot `S`, then try to dump some other version of slot `S`, the
// dumping has to finish first
//
// Also note roots are never removed via `remove_unrooted_slot()`, so
// it's safe to filter them out here as they won't need deletion from
// self.accounts_index.removed_bank_ids in `purge_slots_from_cache_and_store()`.
.filter(|slot| !self.accounts_index.is_root(**slot));
safety_checks_elapsed.stop();
self.external_purge_slots_stats
.safety_checks_elapsed
.fetch_add(safety_checks_elapsed.as_us(), Ordering::Relaxed);
self.purge_slots_from_cache_and_store(non_roots, &self.external_purge_slots_stats);
self.external_purge_slots_stats
.report("external_purge_slots_stats", Some(1000));
}
pub fn remove_unrooted_slots(&self, remove_slots: &[(Slot, BankId)]) {
let rooted_slots = self
.accounts_index
.get_rooted_from_list(remove_slots.iter().map(|(slot, _)| slot));
assert!(
rooted_slots.is_empty(),
"Trying to remove accounts for rooted slots {:?}",
rooted_slots
);
let RemoveUnrootedSlotsSynchronization {
slots_under_contention,
signal,
} = &self.remove_unrooted_slots_synchronization;
{
// Slots that are currently being flushed by flush_slot_cache()
let mut currently_contended_slots = slots_under_contention.lock().unwrap();
// Slots that are currently being flushed by flush_slot_cache() AND
// we want to remove in this function
let mut remaining_contended_flush_slots: Vec<Slot> = remove_slots
.iter()
.filter_map(|(remove_slot, _)| {
let is_being_flushed = currently_contended_slots.contains(remove_slot);
if !is_being_flushed {
// Reserve the slots that we want to purge that aren't currently
// being flushed to prevent cache from flushing those slots in
// the future.
//
// Note that the single replay thread has to remove a specific slot `N`
// before another version of the same slot can be replayed. This means
// multiple threads should not call `remove_unrooted_slots()` simultaneously
// with the same slot.
currently_contended_slots.insert(*remove_slot);
}
// If the cache is currently flushing this slot, add it to the list
Some(remove_slot).filter(|_| is_being_flushed)
})
.cloned()
.collect();
// Wait for cache flushes to finish
loop {
if !remaining_contended_flush_slots.is_empty() {
// Wait for the signal that the cache has finished flushing a slot
//
// Don't wait if the remaining_contended_flush_slots is empty, otherwise
// we may never get a signal since there's no cache flush thread to
// do the signaling
currently_contended_slots = signal.wait(currently_contended_slots).unwrap();
} else {
// There are no slots being flushed to wait on, so it's safe to continue
// to purging the slots we want to purge!
break;
}
// For each slot the cache flush has finished, mark that we're about to start
// purging these slots by reserving it in `currently_contended_slots`.
remaining_contended_flush_slots.retain(|flush_slot| {
let is_being_flushed = currently_contended_slots.contains(flush_slot);
if !is_being_flushed {
// Mark that we're about to delete this slot now
currently_contended_slots.insert(*flush_slot);
}
is_being_flushed
});
}
}
// Mark down these slots are about to be purged so that new attempts to scan these
// banks fail, and any ongoing scans over these slots will detect that they should abort
// their results
{
let mut locked_removed_bank_ids = self.accounts_index.removed_bank_ids.lock().unwrap();
for (_slot, remove_bank_id) in remove_slots.iter() {
locked_removed_bank_ids.insert(*remove_bank_id);
}
}
let remove_unrooted_purge_stats = PurgeStats::default();
self.purge_slots_from_cache_and_store(
remove_slots.iter().map(|(slot, _)| slot),
&remove_unrooted_purge_stats,
);
remove_unrooted_purge_stats.report("remove_unrooted_slots_purge_slots_stats", Some(0));
let mut currently_contended_slots = slots_under_contention.lock().unwrap();
for (remove_slot, _) in remove_slots {
assert!(currently_contended_slots.remove(remove_slot));
}
}
pub fn hash_stored_account(slot: Slot, account: &StoredAccountMeta) -> Hash {
Self::hash_account_data(
slot,
account.account_meta.lamports,
&account.account_meta.owner,
account.account_meta.executable,
account.account_meta.rent_epoch,
account.data,
&account.meta.pubkey,
)
}
pub fn hash_account<T: ReadableAccount>(slot: Slot, account: &T, pubkey: &Pubkey) -> Hash {
Self::hash_account_data(
slot,
account.lamports(),
account.owner(),
account.executable(),
account.rent_epoch(),
account.data(),
pubkey,
)
}
fn hash_account_data(
slot: Slot,
lamports: u64,
owner: &Pubkey,
executable: bool,
rent_epoch: Epoch,
data: &[u8],
pubkey: &Pubkey,
) -> Hash {
if lamports == 0 {
return Hash::default();
}
let mut hasher = blake3::Hasher::new();
hasher.update(&lamports.to_le_bytes());
hasher.update(&slot.to_le_bytes());
hasher.update(&rent_epoch.to_le_bytes());
hasher.update(data);
if executable {
hasher.update(&[1u8; 1]);
} else {
hasher.update(&[0u8; 1]);
}
hasher.update(owner.as_ref());
hasher.update(pubkey.as_ref());
Hash::new_from_array(
<[u8; solana_sdk::hash::HASH_BYTES]>::try_from(hasher.finalize().as_slice()).unwrap(),
)
}
fn bulk_assign_write_version(&self, count: usize) -> StoredMetaWriteVersion {
self.write_version
.fetch_add(count as StoredMetaWriteVersion, Ordering::AcqRel)
}
fn write_accounts_to_storage<F: FnMut(Slot, usize) -> Arc<AccountStorageEntry>>(
&self,
slot: Slot,
hashes: &[impl Borrow<Hash>],
mut storage_finder: F,
accounts_and_meta_to_store: &[(StoredMeta, Option<&impl ReadableAccount>)],
) -> Vec<AccountInfo> {
assert_eq!(hashes.len(), accounts_and_meta_to_store.len());
let mut infos: Vec<AccountInfo> = Vec::with_capacity(accounts_and_meta_to_store.len());
let mut total_append_accounts_us = 0;
let mut total_storage_find_us = 0;
while infos.len() < accounts_and_meta_to_store.len() {
let mut storage_find = Measure::start("storage_finder");
let data_len = accounts_and_meta_to_store[infos.len()]
.1
.map(|account| account.data().len())
.unwrap_or_default();
let storage = storage_finder(slot, data_len + STORE_META_OVERHEAD);
storage_find.stop();
total_storage_find_us += storage_find.as_us();
let mut append_accounts = Measure::start("append_accounts");
let rvs = storage.accounts.append_accounts(
&accounts_and_meta_to_store[infos.len()..],
&hashes[infos.len()..],
);
assert!(!rvs.is_empty());
append_accounts.stop();
total_append_accounts_us += append_accounts.as_us();
if rvs.len() == 1 {
storage.set_status(AccountStorageStatus::Full);
// See if an account overflows the append vecs in the slot.
let data_len = (data_len + STORE_META_OVERHEAD) as u64;
if !self.has_space_available(slot, data_len) {
let special_store_size = std::cmp::max(data_len * 2, self.file_size);
if self
.try_recycle_and_insert_store(slot, special_store_size, std::u64::MAX)
.is_none()
{
self.stats
.create_store_count
.fetch_add(1, Ordering::Relaxed);
self.create_and_insert_store(slot, special_store_size, "large create");
} else {
self.stats
.recycle_store_count
.fetch_add(1, Ordering::Relaxed);
}
}
continue;
}
for (offsets, (_, account)) in rvs
.windows(2)
.zip(&accounts_and_meta_to_store[infos.len()..])
{
let stored_size = offsets[1] - offsets[0];
storage.add_account(stored_size);
infos.push(AccountInfo {
store_id: storage.append_vec_id(),
offset: offsets[0],
stored_size,
lamports: account
.map(|account| account.lamports())
.unwrap_or_default(),
});
}
// restore the state to available
storage.set_status(AccountStorageStatus::Available);
}
self.stats
.store_append_accounts
.fetch_add(total_append_accounts_us, Ordering::Relaxed);
self.stats
.store_find_store
.fetch_add(total_storage_find_us, Ordering::Relaxed);
infos
}
pub fn mark_slot_frozen(&self, slot: Slot) {
if let Some(slot_cache) = self.accounts_cache.slot_cache(slot) {
slot_cache.mark_slot_frozen();
slot_cache.report_slot_store_metrics();
}
self.accounts_cache.report_size();
}
pub fn expire_old_recycle_stores(&self) {
let mut recycle_stores_write_elapsed = Measure::start("recycle_stores_write_time");
let recycle_stores = self.recycle_stores.write().unwrap().expire_old_entries();
recycle_stores_write_elapsed.stop();
let mut drop_storage_entries_elapsed = Measure::start("drop_storage_entries_elapsed");
drop(recycle_stores);
drop_storage_entries_elapsed.stop();
self.clean_accounts_stats
.purge_stats
.drop_storage_entries_elapsed
.fetch_add(drop_storage_entries_elapsed.as_us(), Ordering::Relaxed);
self.clean_accounts_stats
.purge_stats
.recycle_stores_write_elapsed
.fetch_add(recycle_stores_write_elapsed.as_us(), Ordering::Relaxed);
}
pub fn flush_accounts_cache_slot(&self, slot: Slot) {
self.flush_slot_cache(slot, None::<&mut fn(&_, &_) -> bool>);
}
/// true if write cache is too big
fn should_aggressively_flush_cache(&self) -> bool {
self.write_cache_limit_bytes
.unwrap_or(WRITE_CACHE_LIMIT_BYTES_DEFAULT)
< self.accounts_cache.size()
}
// `force_flush` flushes all the cached roots `<= requested_flush_root`. It also then
// flushes:
// 1) excess remaining roots or unrooted slots while 'should_aggressively_flush_cache' is true
pub fn flush_accounts_cache(&self, force_flush: bool, requested_flush_root: Option<Slot>) {
#[cfg(not(test))]
assert!(requested_flush_root.is_some());
if !force_flush && !self.should_aggressively_flush_cache() {
return;
}
// Flush only the roots <= requested_flush_root, so that snapshotting has all
// the relevant roots in storage.
let mut flush_roots_elapsed = Measure::start("flush_roots_elapsed");
let mut account_bytes_saved = 0;
let mut num_accounts_saved = 0;
// Note even if force_flush is false, we will still flush all roots <= the
// given `requested_flush_root`, even if some of the later roots cannot be used for
// cleaning due to an ongoing scan
let (total_new_cleaned_roots, num_cleaned_roots_flushed) = self
.flush_rooted_accounts_cache(
requested_flush_root,
Some((&mut account_bytes_saved, &mut num_accounts_saved)),
);
flush_roots_elapsed.stop();
// Note we don't purge unrooted slots here because there may be ongoing scans/references
// for those slot, let the Bank::drop() implementation do cleanup instead on dead
// banks
// If 'should_aggressively_flush_cache', then flush the excess ones to storage
let (total_new_excess_roots, num_excess_roots_flushed) =
if self.should_aggressively_flush_cache() {
// Start by flushing the roots
//
// Cannot do any cleaning on roots past `requested_flush_root` because future
// snapshots may need updates from those later slots, hence we pass `None`
// for `should_clean`.
self.flush_rooted_accounts_cache(None, None)
} else {
(0, 0)
};
let mut excess_slot_count = 0;
let mut unflushable_unrooted_slot_count = 0;
let max_flushed_root = self.accounts_cache.fetch_max_flush_root();
if self.should_aggressively_flush_cache() {
let old_slots = self.accounts_cache.cached_frozen_slots();
excess_slot_count = old_slots.len();
let mut flush_stats = FlushStats::default();
old_slots.into_iter().for_each(|old_slot| {
// Don't flush slots that are known to be unrooted
if old_slot > max_flushed_root {
if self.should_aggressively_flush_cache() {
if let Some(stats) =
self.flush_slot_cache(old_slot, None::<&mut fn(&_, &_) -> bool>)
{
flush_stats.num_flushed += stats.num_flushed;
flush_stats.num_purged += stats.num_purged;
flush_stats.total_size += stats.total_size;
}
}
} else {
unflushable_unrooted_slot_count += 1;
}
});
datapoint_info!(
"accounts_db-flush_accounts_cache_aggressively",
("num_flushed", flush_stats.num_flushed, i64),
("num_purged", flush_stats.num_purged, i64),
("total_flush_size", flush_stats.total_size, i64),
("total_cache_size", self.accounts_cache.size(), i64),
("total_frozen_slots", excess_slot_count, i64),
("total_slots", self.accounts_cache.num_slots(), i64),
);
}
datapoint_info!(
"accounts_db-flush_accounts_cache",
("total_new_cleaned_roots", total_new_cleaned_roots, i64),
("num_cleaned_roots_flushed", num_cleaned_roots_flushed, i64),
("total_new_excess_roots", total_new_excess_roots, i64),
("num_excess_roots_flushed", num_excess_roots_flushed, i64),
("excess_slot_count", excess_slot_count, i64),
(
"unflushable_unrooted_slot_count",
unflushable_unrooted_slot_count,
i64
),
(
"flush_roots_elapsed",
flush_roots_elapsed.as_us() as i64,
i64
),
("account_bytes_saved", account_bytes_saved, i64),
("num_accounts_saved", num_accounts_saved, i64),
);
// Flush a random slot out after every force flush to catch any inconsistencies
// between cache and written state (i.e. should cause a hash mismatch between validators
// that flush and don't flush if such a bug exists).
let num_slots_remaining = self.accounts_cache.num_slots();
if force_flush && num_slots_remaining >= FLUSH_CACHE_RANDOM_THRESHOLD {
// Don't flush slots that are known to be unrooted
let mut frozen_slots = self.accounts_cache.cached_frozen_slots();
frozen_slots.retain(|s| *s > max_flushed_root);
// Remove a random index 0 <= i < `frozen_slots.len()`
let rand_slot = frozen_slots.choose(&mut thread_rng());
if let Some(rand_slot) = rand_slot {
let random_flush_stats =
self.flush_slot_cache(*rand_slot, None::<&mut fn(&_, &_) -> bool>);
info!(
"Flushed random slot: num_remaining: {} {:?}",
num_slots_remaining, random_flush_stats,
);
}
}
}
fn flush_rooted_accounts_cache(
&self,
requested_flush_root: Option<Slot>,
should_clean: Option<(&mut usize, &mut usize)>,
) -> (usize, usize) {
let max_clean_root = should_clean.as_ref().and_then(|_| {
// If there is a long running scan going on, this could prevent any cleaning
// based on updates from slots > `max_clean_root`.
self.max_clean_root(requested_flush_root)
});
// Use HashMap because HashSet doesn't provide Entry api
let mut written_accounts = HashMap::new();
// If `should_clean` is None, then`should_flush_f` is also None, which will cause
// `flush_slot_cache` to flush all accounts to storage without cleaning any accounts.
let mut should_flush_f = should_clean.map(|(account_bytes_saved, num_accounts_saved)| {
move |&pubkey: &Pubkey, account: &AccountSharedData| {
use std::collections::hash_map::Entry::{Occupied, Vacant};
let should_flush = match written_accounts.entry(pubkey) {
Vacant(vacant_entry) => {
vacant_entry.insert(());
true
}
Occupied(_occupied_entry) => {
*account_bytes_saved += account.data().len();
*num_accounts_saved += 1;
// If a later root already wrote this account, no point
// in flushing it
false
}
};
should_flush
}
});
// Always flush up to `requested_flush_root`, which is necessary for things like snapshotting.
let cached_roots: BTreeSet<Slot> = self.accounts_cache.clear_roots(requested_flush_root);
// Iterate from highest to lowest so that we don't need to flush earlier
// outdated updates in earlier roots
let mut num_roots_flushed = 0;
for &root in cached_roots.iter().rev() {
let should_flush_f = if let Some(max_clean_root) = max_clean_root {
if root > max_clean_root {
// Only if the root is greater than the `max_clean_root` do we
// have to prevent cleaning, otherwise, just default to `should_flush_f`
// for any slots <= `max_clean_root`
None
} else {
should_flush_f.as_mut()
}
} else {
should_flush_f.as_mut()
};
if self.flush_slot_cache(root, should_flush_f).is_some() {
num_roots_flushed += 1;
}
// Regardless of whether this slot was *just* flushed from the cache by the above
// `flush_slot_cache()`, we should update the `max_flush_root`.
// This is because some rooted slots may be flushed to storage *before* they are marked as root.
// This can occur for instance when:
// 1) The cache is overwhelmed, we we flushed some yet to be rooted frozen slots
// 2) Random evictions
// These slots may then *later* be marked as root, so we still need to handle updating the
// `max_flush_root` in the accounts cache.
self.accounts_cache.set_max_flush_root(root);
}
// Only add to the uncleaned roots set *after* we've flushed the previous roots,
// so that clean will actually be able to clean the slots.
let num_new_roots = cached_roots.len();
self.accounts_index.add_uncleaned_roots(cached_roots);
(num_new_roots, num_roots_flushed)
}
fn do_flush_slot_cache(
&self,
slot: Slot,
slot_cache: &SlotCache,
mut should_flush_f: Option<&mut impl FnMut(&Pubkey, &AccountSharedData) -> bool>,
) -> FlushStats {
let mut num_purged = 0;
let mut total_size = 0;
let mut num_flushed = 0;
let iter_items: Vec<_> = slot_cache.iter().collect();
let mut purged_slot_pubkeys: HashSet<(Slot, Pubkey)> = HashSet::new();
let mut pubkey_to_slot_set: Vec<(Pubkey, Slot)> = vec![];
let (accounts, hashes): (Vec<(&Pubkey, &AccountSharedData)>, Vec<Hash>) = iter_items
.iter()
.filter_map(|iter_item| {
let key = iter_item.key();
let account = &iter_item.value().account;
let should_flush = should_flush_f
.as_mut()
.map(|should_flush_f| should_flush_f(key, account))
.unwrap_or(true);
if should_flush {
let hash = iter_item.value().hash();
total_size += (account.data().len() + STORE_META_OVERHEAD) as u64;
num_flushed += 1;
Some(((key, account), hash))
} else {
// If we don't flush, we have to remove the entry from the
// index, since it's equivalent to purging
purged_slot_pubkeys.insert((slot, *key));
pubkey_to_slot_set.push((*key, slot));
num_purged += 1;
None
}
})
.unzip();
let is_dead_slot = accounts.is_empty();
// Remove the account index entries from earlier roots that are outdated by later roots.
// Safe because queries to the index will be reading updates from later roots.
self.purge_slot_cache_pubkeys(slot, purged_slot_pubkeys, pubkey_to_slot_set, is_dead_slot);
if !is_dead_slot {
let aligned_total_size = Self::page_align(total_size);
// This ensures that all updates are written to an AppendVec, before any
// updates to the index happen, so anybody that sees a real entry in the index,
// will be able to find the account in storage
let flushed_store =
self.create_and_insert_store(slot, aligned_total_size, "flush_slot_cache");
self.store_accounts_frozen(
slot,
&accounts,
Some(&hashes),
Some(Box::new(move |_, _| flushed_store.clone())),
None,
);
// If the above sizing function is correct, just one AppendVec is enough to hold
// all the data for the slot
assert_eq!(
self.storage
.get_slot_stores(slot)
.unwrap()
.read()
.unwrap()
.len(),
1
);
}
// Remove this slot from the cache, which will to AccountsDb's new readers should look like an
// atomic switch from the cache to storage.
// There is some racy condition for existing readers who just has read exactly while
// flushing. That case is handled by retry_to_get_account_accessor()
assert!(self.accounts_cache.remove_slot(slot).is_some());
FlushStats {
slot,
num_flushed,
num_purged,
total_size,
}
}
/// `should_flush_f` is an optional closure that determines whether a given
/// account should be flushed. Passing `None` will by default flush all
/// accounts
fn flush_slot_cache(
&self,
slot: Slot,
should_flush_f: Option<&mut impl FnMut(&Pubkey, &AccountSharedData) -> bool>,
) -> Option<FlushStats> {
let is_being_purged = {
let mut slots_under_contention = self
.remove_unrooted_slots_synchronization
.slots_under_contention
.lock()
.unwrap();
// If we're purging this slot, don't flush it here
if slots_under_contention.contains(&slot) {
true
} else {
slots_under_contention.insert(slot);
false
}
};
if !is_being_purged {
let flush_stats = self.accounts_cache.slot_cache(slot).map(|slot_cache| {
#[cfg(test)]
{
// Give some time for cache flushing to occur here for unit tests
sleep(Duration::from_millis(self.load_delay));
}
// Since we added the slot to `slots_under_contention` AND this slot
// still exists in the cache, we know the slot cannot be removed
// by any other threads past this point. We are now responsible for
// flushing this slot.
self.do_flush_slot_cache(slot, &slot_cache, should_flush_f)
});
// Nobody else should have been purging this slot, so should not have been removed
// from `self.remove_unrooted_slots_synchronization`.
assert!(self
.remove_unrooted_slots_synchronization
.slots_under_contention
.lock()
.unwrap()
.remove(&slot));
// Signal to any threads blocked on `remove_unrooted_slots(slot)` that we have finished
// flushing
self.remove_unrooted_slots_synchronization
.signal
.notify_all();
flush_stats
} else {
None
}
}
fn write_accounts_to_cache(
&self,
slot: Slot,
hashes: Option<&[impl Borrow<Hash>]>,
accounts_and_meta_to_store: &[(StoredMeta, Option<&impl ReadableAccount>)],
) -> Vec<AccountInfo> {
let len = accounts_and_meta_to_store.len();
let hashes = hashes.map(|hashes| {
assert_eq!(hashes.len(), len);
hashes
});
accounts_and_meta_to_store
.iter()
.enumerate()
.map(|(i, (meta, account))| {
let hash = hashes.map(|hashes| hashes[i].borrow());
let account = account
.map(|account| account.to_account_shared_data())
.unwrap_or_default();
let account_info = AccountInfo {
store_id: CACHE_VIRTUAL_STORAGE_ID,
offset: CACHE_VIRTUAL_OFFSET,
stored_size: CACHE_VIRTUAL_STORED_SIZE,
lamports: account.lamports(),
};
self.notify_account_at_accounts_update(slot, meta, &account);
let cached_account = self.accounts_cache.store(slot, &meta.pubkey, account, hash);
// hash this account in the bg
match &self.sender_bg_hasher {
Some(ref sender) => {
let _ = sender.send(cached_account);
}
None => (),
};
account_info
})
.collect()
}
fn store_accounts_to<
F: FnMut(Slot, usize) -> Arc<AccountStorageEntry>,
P: Iterator<Item = u64>,
>(
&self,
slot: Slot,
accounts: &[(&Pubkey, &(impl ReadableAccount + ZeroLamport))],
hashes: Option<&[impl Borrow<Hash>]>,
storage_finder: F,
mut write_version_producer: P,
is_cached_store: bool,
) -> Vec<AccountInfo> {
let mut calc_stored_meta_time = Measure::start("calc_stored_meta");
let accounts_and_meta_to_store: Vec<_> = accounts
.iter()
.map(|(pubkey, account)| {
self.read_only_accounts_cache.remove(**pubkey, slot);
// this is the source of Some(Account) or None.
// Some(Account) = store 'Account'
// None = store a default/empty account with 0 lamports
let (account, data_len) = if account.is_zero_lamport() {
(None, 0)
} else {
(Some(*account), account.data().len() as u64)
};
let meta = StoredMeta {
write_version: write_version_producer.next().unwrap(),
pubkey: **pubkey,
data_len,
};
(meta, account)
})
.collect();
calc_stored_meta_time.stop();
self.stats
.calc_stored_meta
.fetch_add(calc_stored_meta_time.as_us(), Ordering::Relaxed);
if self.caching_enabled && is_cached_store {
self.write_accounts_to_cache(slot, hashes, &accounts_and_meta_to_store)
} else {
match hashes {
Some(hashes) => self.write_accounts_to_storage(
slot,
hashes,
storage_finder,
&accounts_and_meta_to_store,
),
None => {
// hash any accounts where we were lazy in calculating the hash
let mut hash_time = Measure::start("hash_accounts");
let mut stats = BankHashStats::default();
let len = accounts_and_meta_to_store.len();
let mut hashes = Vec::with_capacity(len);
for account in accounts {
stats.update(account.1);
let hash = Self::hash_account(slot, account.1, account.0);
hashes.push(hash);
}
hash_time.stop();
self.stats
.store_hash_accounts
.fetch_add(hash_time.as_us(), Ordering::Relaxed);
self.write_accounts_to_storage(
slot,
&hashes,
storage_finder,
&accounts_and_meta_to_store,
)
}
}
}
}
fn report_store_stats(&self) {
let mut total_count = 0;
let mut min = std::usize::MAX;
let mut min_slot = 0;
let mut max = 0;
let mut max_slot = 0;
let mut newest_slot = 0;
let mut oldest_slot = std::u64::MAX;
let mut total_bytes = 0;
let mut total_alive_bytes = 0;
for iter_item in self.storage.0.iter() {
let slot = iter_item.key();
let slot_stores = iter_item.value().read().unwrap();
total_count += slot_stores.len();
if slot_stores.len() < min {
min = slot_stores.len();
min_slot = *slot;
}
if slot_stores.len() > max {
max = slot_stores.len();
max_slot = *slot;
}
if *slot > newest_slot {
newest_slot = *slot;
}
if *slot < oldest_slot {
oldest_slot = *slot;
}
for store in slot_stores.values() {
total_alive_bytes += Self::page_align(store.alive_bytes() as u64);
total_bytes += store.total_bytes();
}
}
info!("total_stores: {}, newest_slot: {}, oldest_slot: {}, max_slot: {} (num={}), min_slot: {} (num={})",
total_count, newest_slot, oldest_slot, max_slot, max, min_slot, min);
let total_alive_ratio = if total_bytes > 0 {
total_alive_bytes as f64 / total_bytes as f64
} else {
0.
};
datapoint_info!(
"accounts_db-stores",
("total_count", total_count, i64),
(
"recycle_count",
self.recycle_stores.read().unwrap().entry_count() as u64,
i64
),
("total_bytes", total_bytes, i64),
("total_alive_bytes", total_alive_bytes, i64),
("total_alive_ratio", total_alive_ratio, f64),
);
datapoint_info!(
"accounts_db-perf-stats",
(
"delta_hash_num",
self.stats.delta_hash_num.swap(0, Ordering::Relaxed),
i64
),
(
"delta_hash_scan_us",
self.stats
.delta_hash_scan_time_total_us
.swap(0, Ordering::Relaxed),
i64
),
(
"delta_hash_accumulate_us",
self.stats
.delta_hash_accumulate_time_total_us
.swap(0, Ordering::Relaxed),
i64
),
);
}
pub fn checked_iterative_sum_for_capitalization(total_cap: u64, new_cap: u64) -> u64 {
let new_total = total_cap as u128 + new_cap as u128;
AccountsHash::checked_cast_for_capitalization(new_total)
}
pub fn checked_sum_for_capitalization<T: Iterator<Item = u64>>(balances: T) -> u64 {
AccountsHash::checked_cast_for_capitalization(balances.map(|b| b as u128).sum::<u128>())
}
fn calculate_accounts_hash(
&self,
slot: Slot,
ancestors: &Ancestors,
check_hash: bool,
) -> Result<(Hash, u64), BankHashVerificationError> {
use BankHashVerificationError::*;
let mut collect = Measure::start("collect");
let keys: Vec<_> = self
.accounts_index
.account_maps
.iter()
.map(|map| {
let mut keys = map.read().unwrap().keys();
keys.sort_unstable(); // hashmap is not ordered, but bins are relative to each other
keys
})
.flatten()
.collect();
collect.stop();
let mut scan = Measure::start("scan");
let mismatch_found = AtomicU64::new(0);
// Pick a chunk size big enough to allow us to produce output vectors that are smaller than the overall size.
// We'll also accumulate the lamports within each chunk and fewer chunks results in less contention to accumulate the sum.
let chunks = crate::accounts_hash::MERKLE_FANOUT.pow(4);
let total_lamports = Mutex::<u64>::new(0);
let get_hashes = || {
keys.par_chunks(chunks)
.map(|pubkeys| {
let mut sum = 0u128;
let result: Vec<Hash> = pubkeys
.iter()
.filter_map(|pubkey| {
if self.is_filler_account(pubkey) {
return None;
}
if let AccountIndexGetResult::Found(lock, index) =
self.accounts_index.get(pubkey, Some(ancestors), Some(slot))
{
let (slot, account_info) = &lock.slot_list()[index];
if account_info.lamports != 0 {
// Because we're keeping the `lock' here, there is no need
// to use retry_to_get_account_accessor()
// In other words, flusher/shrinker/cleaner is blocked to
// cause any Accessor(None) situtation.
// Anyway this race condition concern is currently a moot
// point because calculate_accounts_hash() should not
// currently race with clean/shrink because the full hash
// is synchronous with clean/shrink in
// AccountsBackgroundService
self.get_account_accessor(
*slot,
pubkey,
account_info.store_id,
account_info.offset,
)
.get_loaded_account()
.and_then(
|loaded_account| {
let loaded_hash = loaded_account.loaded_hash();
let balance = account_info.lamports;
if check_hash && !self.is_filler_account(pubkey) {
let computed_hash =
loaded_account.compute_hash(*slot, pubkey);
if computed_hash != loaded_hash {
info!("hash mismatch found: computed: {}, loaded: {}, pubkey: {}", computed_hash, loaded_hash, pubkey);
mismatch_found
.fetch_add(1, Ordering::Relaxed);
return None;
}
}
sum += balance as u128;
Some(loaded_hash)
},
)
} else {
None
}
} else {
None
}
})
.collect();
let mut total = total_lamports.lock().unwrap();
*total =
AccountsHash::checked_cast_for_capitalization(*total as u128 + sum);
result
}).collect()
};
let hashes: Vec<Vec<Hash>> = if check_hash {
get_hashes()
} else {
self.thread_pool_clean.install(get_hashes)
};
if mismatch_found.load(Ordering::Relaxed) > 0 {
warn!(
"{} mismatched account hash(es) found",
mismatch_found.load(Ordering::Relaxed)
);
return Err(MismatchedAccountHash);
}
scan.stop();
let total_lamports = *total_lamports.lock().unwrap();
let mut hash_time = Measure::start("hash");
let (accumulated_hash, hash_total) = AccountsHash::calculate_hash(hashes);
hash_time.stop();
datapoint_info!(
"update_accounts_hash",
("accounts_scan", scan.as_us(), i64),
("hash", hash_time.as_us(), i64),
("hash_total", hash_total, i64),
("collect", collect.as_us(), i64),
);
Ok((accumulated_hash, total_lamports))
}
pub fn get_accounts_hash(&self, slot: Slot) -> Hash {
let bank_hashes = self.bank_hashes.read().unwrap();
let bank_hash_info = bank_hashes.get(&slot).unwrap();
bank_hash_info.snapshot_hash
}
pub fn update_accounts_hash(&self, slot: Slot, ancestors: &Ancestors) -> (Hash, u64) {
self.update_accounts_hash_with_index_option(
true, false, slot, ancestors, None, false, None, false,
)
}
pub fn update_accounts_hash_test(&self, slot: Slot, ancestors: &Ancestors) -> (Hash, u64) {
self.update_accounts_hash_with_index_option(
true, true, slot, ancestors, None, false, None, false,
)
}
fn scan_multiple_account_storages_one_slot<F, B>(
storages: &[Arc<AccountStorageEntry>],
scan_func: &F,
slot: Slot,
retval: &mut B,
) where
F: Fn(LoadedAccount, &mut B, Slot) + Send + Sync,
B: Send + Default,
{
// we have to call the scan_func in order of write_version within a slot if there are multiple storages per slot
let mut len = storages.len();
let mut progress = Vec::with_capacity(len);
let mut current = Vec::with_capacity(len);
for storage in storages {
let accounts = storage.accounts.accounts(0);
let mut iterator: std::vec::IntoIter<StoredAccountMeta<'_>> = accounts.into_iter();
if let Some(item) = iterator
.next()
.map(|stored_account| (stored_account.meta.write_version, Some(stored_account)))
{
current.push(item);
progress.push(iterator);
}
}
while !progress.is_empty() {
let mut min = current[0].0;
let mut min_index = 0;
for (i, (item, _)) in current.iter().enumerate().take(len).skip(1) {
if item < &min {
min_index = i;
min = *item;
}
}
let mut account = (0, None);
std::mem::swap(&mut account, &mut current[min_index]);
scan_func(LoadedAccount::Stored(account.1.unwrap()), retval, slot);
let next = progress[min_index]
.next()
.map(|stored_account| (stored_account.meta.write_version, Some(stored_account)));
match next {
Some(item) => {
current[min_index] = item;
}
None => {
current.remove(min_index);
progress.remove(min_index);
len -= 1;
}
}
}
}
/// Scan through all the account storage in parallel
fn scan_account_storage_no_bank<F, F2>(
cache_hash_data: &CacheHashData,
accounts_cache_and_ancestors: Option<(
&AccountsCache,
&Ancestors,
&AccountInfoAccountsIndex,
)>,
snapshot_storages: &SortedStorages,
scan_func: F,
after_func: F2,
bin_range: &Range<usize>,
bin_calculator: &PubkeyBinCalculator24,
) -> Vec<BinnedHashData>
where
F: Fn(LoadedAccount, &mut BinnedHashData, Slot) + Send + Sync,
F2: Fn(BinnedHashData) -> BinnedHashData + Send + Sync,
{
let start_bin_index = bin_range.start;
let width = snapshot_storages.range_width();
// 2 is for 2 special chunks - unaligned slots at the beginning and end
let chunks = 2 + (width as Slot / MAX_ITEMS_PER_CHUNK);
let range = snapshot_storages.range();
let slot0 = range.start;
let first_boundary =
((slot0 + MAX_ITEMS_PER_CHUNK) / MAX_ITEMS_PER_CHUNK) * MAX_ITEMS_PER_CHUNK;
(0..chunks)
.into_par_iter()
.map(|chunk| {
let mut retval = vec![];
// calculate start, end
let (start, mut end) = if chunk == 0 {
if slot0 == first_boundary {
return after_func(retval); // if we evenly divide, nothing for special chunk 0 to do
}
// otherwise first chunk is not 'full'
(slot0, first_boundary)
} else {
// normal chunk in the middle or at the end
let start = first_boundary + MAX_ITEMS_PER_CHUNK * (chunk - 1);
let end = start + MAX_ITEMS_PER_CHUNK;
(start, end)
};
end = std::cmp::min(end, range.end);
if start == end {
return after_func(retval);
}
let mut file_name = String::default();
if accounts_cache_and_ancestors.is_none()
&& end.saturating_sub(start) == MAX_ITEMS_PER_CHUNK
{
let mut load_from_cache = true;
let mut hasher = std::collections::hash_map::DefaultHasher::new(); // wrong one?
for slot in start..end {
let sub_storages = snapshot_storages.get(slot);
bin_range.start.hash(&mut hasher);
bin_range.end.hash(&mut hasher);
if let Some(sub_storages) = sub_storages {
if sub_storages.len() > 1 {
load_from_cache = false;
break;
}
let storage_file = sub_storages.first().unwrap().accounts.get_path();
slot.hash(&mut hasher);
storage_file.hash(&mut hasher);
// check alive_bytes, etc. here?
let amod = std::fs::metadata(storage_file);
if amod.is_err() {
load_from_cache = false;
break;
}
let amod = amod.unwrap().modified();
if amod.is_err() {
load_from_cache = false;
break;
}
let amod = amod
.unwrap()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs();
amod.hash(&mut hasher);
}
}
if load_from_cache {
// we have a hash value for all the storages in this slot
// so, build a file name:
let hash = hasher.finish();
file_name = format!(
"{}.{}.{}.{}.{}",
start, end, bin_range.start, bin_range.end, hash
);
if retval.is_empty() {
let range = bin_range.end - bin_range.start;
retval.append(&mut vec![Vec::new(); range]);
}
if cache_hash_data
.load(
&Path::new(&file_name),
&mut retval,
start_bin_index,
bin_calculator,
)
.is_ok()
{
return retval;
}
// fall through and load normally - we failed to load
}
}
for slot in start..end {
let sub_storages = snapshot_storages.get(slot);
let valid_slot = sub_storages.is_some();
if let Some((cache, ancestors, accounts_index)) = accounts_cache_and_ancestors {
if let Some(slot_cache) = cache.slot_cache(slot) {
if valid_slot
|| ancestors.contains_key(&slot)
|| accounts_index.is_root(slot)
{
let keys = slot_cache.get_all_pubkeys();
for key in keys {
if let Some(cached_account) = slot_cache.get_cloned(&key) {
let mut accessor = LoadedAccountAccessor::Cached(Some(
Cow::Owned(cached_account),
));
let account = accessor.get_loaded_account().unwrap();
scan_func(account, &mut retval, slot);
};
}
}
}
}
if let Some(sub_storages) = sub_storages {
Self::scan_multiple_account_storages_one_slot(
sub_storages,
&scan_func,
slot,
&mut retval,
);
}
}
let r = after_func(retval);
if !file_name.is_empty() {
let result = cache_hash_data.save(Path::new(&file_name), &r);
if result.is_err() {
info!(
"FAILED_TO_SAVE: {}-{}, {}, first_boundary: {}, {:?}",
range.start, range.end, width, first_boundary, file_name,
);
}
}
r
})
.filter(|x| !x.is_empty())
.collect()
}
// storages are sorted by slot and have range info.
// if we know slots_per_epoch, then add all stores older than slots_per_epoch to dirty_stores so clean visits these slots
fn mark_old_slots_as_dirty(&self, storages: &SortedStorages, slots_per_epoch: Option<Slot>) {
if let Some(slots_per_epoch) = slots_per_epoch {
let max = storages.range().end;
let acceptable_straggler_slot_count = 100; // do nothing special for these old stores which will likely get cleaned up shortly
let sub = slots_per_epoch + acceptable_straggler_slot_count;
let in_epoch_range_start = max.saturating_sub(sub);
for slot in storages.range().start..in_epoch_range_start {
if let Some(storages) = storages.get(slot) {
storages.iter().for_each(|store| {
self.dirty_stores
.insert((slot, store.append_vec_id()), store.clone());
});
}
}
}
}
fn calculate_accounts_hash_helper(
&self,
use_index: bool,
slot: Slot,
ancestors: &Ancestors,
check_hash: bool,
can_cached_slot_be_unflushed: bool,
slots_per_epoch: Option<Slot>,
is_startup: bool,
) -> Result<(Hash, u64), BankHashVerificationError> {
if !use_index {
let accounts_cache_and_ancestors = if can_cached_slot_be_unflushed {
Some((&self.accounts_cache, ancestors, &self.accounts_index))
} else {
None
};
let mut collect_time = Measure::start("collect");
let (combined_maps, slots) = self.get_snapshot_storages(slot, None, Some(ancestors));
collect_time.stop();
let mut sort_time = Measure::start("sort_storages");
let min_root = self.accounts_index.min_root();
let storages = SortedStorages::new_with_slots(
combined_maps.iter().zip(slots.iter()),
min_root,
Some(slot),
);
self.mark_old_slots_as_dirty(&storages, slots_per_epoch);
sort_time.stop();
let timings = HashStats {
collect_snapshots_us: collect_time.as_us(),
storage_sort_us: sort_time.as_us(),
..HashStats::default()
};
let thread_pool = if is_startup {
None
} else {
Some(&self.thread_pool_clean)
};
Self::calculate_accounts_hash_without_index(
&self.accounts_hash_cache_path,
&storages,
thread_pool,
timings,
check_hash,
accounts_cache_and_ancestors,
if self.filler_account_count > 0 {
self.filler_account_suffix.as_ref()
} else {
None
},
self.num_hash_scan_passes,
)
} else {
self.calculate_accounts_hash(slot, ancestors, check_hash)
}
}
#[allow(clippy::too_many_arguments)]
fn calculate_accounts_hash_helper_with_verify(
&self,
use_index: bool,
debug_verify: bool,
slot: Slot,
ancestors: &Ancestors,
expected_capitalization: Option<u64>,
can_cached_slot_be_unflushed: bool,
check_hash: bool,
slots_per_epoch: Option<Slot>,
is_startup: bool,
) -> Result<(Hash, u64), BankHashVerificationError> {
let (hash, total_lamports) = self.calculate_accounts_hash_helper(
use_index,
slot,
ancestors,
check_hash,
can_cached_slot_be_unflushed,
slots_per_epoch,
is_startup,
)?;
if debug_verify {
// calculate the other way (store or non-store) and verify results match.
let (hash_other, total_lamports_other) = self.calculate_accounts_hash_helper(
!use_index,
slot,
ancestors,
check_hash,
can_cached_slot_be_unflushed,
None,
is_startup,
)?;
let success = hash == hash_other
&& total_lamports == total_lamports_other
&& total_lamports == expected_capitalization.unwrap_or(total_lamports);
assert!(success, "update_accounts_hash_with_index_option mismatch. hashes: {}, {}; lamports: {}, {}; expected lamports: {:?}, using index: {}, slot: {}", hash, hash_other, total_lamports, total_lamports_other, expected_capitalization, use_index, slot);
}
Ok((hash, total_lamports))
}
pub fn update_accounts_hash_with_index_option(
&self,
use_index: bool,
debug_verify: bool,
slot: Slot,
ancestors: &Ancestors,
expected_capitalization: Option<u64>,
can_cached_slot_be_unflushed: bool,
slots_per_epoch: Option<Slot>,
is_startup: bool,
) -> (Hash, u64) {
let check_hash = false;
let (hash, total_lamports) = self
.calculate_accounts_hash_helper_with_verify(
use_index,
debug_verify,
slot,
ancestors,
expected_capitalization,
can_cached_slot_be_unflushed,
check_hash,
slots_per_epoch,
is_startup,
)
.unwrap(); // unwrap here will never fail since check_hash = false
let mut bank_hashes = self.bank_hashes.write().unwrap();
let mut bank_hash_info = bank_hashes.get_mut(&slot).unwrap();
bank_hash_info.snapshot_hash = hash;
(hash, total_lamports)
}
fn scan_snapshot_stores_with_cache(
cache_hash_data: &CacheHashData,
storage: &SortedStorages,
mut stats: &mut crate::accounts_hash::HashStats,
bins: usize,
bin_range: &Range<usize>,
check_hash: bool,
accounts_cache_and_ancestors: Option<(
&AccountsCache,
&Ancestors,
&AccountInfoAccountsIndex,
)>,
filler_account_suffix: Option<&Pubkey>,
) -> Result<Vec<BinnedHashData>, BankHashVerificationError> {
let bin_calculator = PubkeyBinCalculator24::new(bins);
assert!(bin_range.start < bins && bin_range.end <= bins && bin_range.start < bin_range.end);
let mut time = Measure::start("scan all accounts");
stats.num_snapshot_storage = storage.storage_count();
stats.num_slots = storage.slot_count();
let mismatch_found = AtomicU64::new(0);
let range = bin_range.end - bin_range.start;
let sort_time = AtomicU64::new(0);
let result: Vec<BinnedHashData> = Self::scan_account_storage_no_bank(
cache_hash_data,
accounts_cache_and_ancestors,
storage,
|loaded_account: LoadedAccount, accum: &mut BinnedHashData, slot: Slot| {
let pubkey = loaded_account.pubkey();
let mut pubkey_to_bin_index = bin_calculator.bin_from_pubkey(pubkey);
if !bin_range.contains(&pubkey_to_bin_index) {
return;
}
// when we are scanning with bin ranges, we don't need to use exact bin numbers. Subtract to make first bin we care about at index 0.
pubkey_to_bin_index -= bin_range.start;
let raw_lamports = loaded_account.lamports();
let zero_raw_lamports = raw_lamports == 0;
let balance = if zero_raw_lamports {
crate::accounts_hash::ZERO_RAW_LAMPORTS_SENTINEL
} else {
raw_lamports
};
let source_item =
CalculateHashIntermediate::new(loaded_account.loaded_hash(), balance, *pubkey);
if check_hash && !Self::is_filler_account_helper(pubkey, filler_account_suffix) {
let computed_hash = loaded_account.compute_hash(slot, pubkey);
if computed_hash != source_item.hash {
info!(
"hash mismatch found: computed: {}, loaded: {}, pubkey: {}",
computed_hash, source_item.hash, pubkey
);
mismatch_found.fetch_add(1, Ordering::Relaxed);
}
}
if accum.is_empty() {
accum.append(&mut vec![Vec::new(); range]);
}
accum[pubkey_to_bin_index].push(source_item);
},
|x| {
let (result, timing) = Self::sort_slot_storage_scan(x);
sort_time.fetch_add(timing, Ordering::Relaxed);
result
},
bin_range,
&bin_calculator,
);
stats.sort_time_total_us += sort_time.load(Ordering::Relaxed);
if check_hash && mismatch_found.load(Ordering::Relaxed) > 0 {
warn!(
"{} mismatched account hash(es) found",
mismatch_found.load(Ordering::Relaxed)
);
return Err(BankHashVerificationError::MismatchedAccountHash);
}
time.stop();
stats.scan_time_total_us += time.as_us();
Ok(result)
}
fn sort_slot_storage_scan(accum: BinnedHashData) -> (BinnedHashData, u64) {
let time = AtomicU64::new(0);
(
accum
.into_iter()
.map(|mut items| {
let mut sort_time = Measure::start("sort");
{
// sort_by vs unstable because slot and write_version are already in order
items.sort_by(AccountsHash::compare_two_hash_entries);
}
sort_time.stop();
time.fetch_add(sort_time.as_us(), Ordering::Relaxed);
items
})
.collect(),
time.load(Ordering::Relaxed),
)
}
// modeled after get_accounts_delta_hash
// intended to be faster than calculate_accounts_hash
pub fn calculate_accounts_hash_without_index(
accounts_hash_cache_path: &Path,
storages: &SortedStorages,
thread_pool: Option<&ThreadPool>,
mut stats: HashStats,
check_hash: bool,
accounts_cache_and_ancestors: Option<(
&AccountsCache,
&Ancestors,
&AccountInfoAccountsIndex,
)>,
filler_account_suffix: Option<&Pubkey>,
num_hash_scan_passes: Option<usize>,
) -> Result<(Hash, u64), BankHashVerificationError> {
let (num_hash_scan_passes, bins_per_pass) = Self::bins_per_pass(num_hash_scan_passes);
let mut scan_and_hash = move || {
let mut previous_pass = PreviousPass::default();
let mut final_result = (Hash::default(), 0);
let cache_hash_data = CacheHashData::new(&accounts_hash_cache_path);
for pass in 0..num_hash_scan_passes {
let bounds = Range {
start: pass * bins_per_pass,
end: (pass + 1) * bins_per_pass,
};
let result = Self::scan_snapshot_stores_with_cache(
&cache_hash_data,
storages,
&mut stats,
PUBKEY_BINS_FOR_CALCULATING_HASHES,
&bounds,
check_hash,
accounts_cache_and_ancestors,
filler_account_suffix,
)?;
let hash = AccountsHash {
filler_account_suffix: filler_account_suffix.cloned(),
};
let (hash, lamports, for_next_pass) = hash.rest_of_hash_calculation(
result,
&mut stats,
pass == num_hash_scan_passes - 1,
previous_pass,
bins_per_pass,
);
previous_pass = for_next_pass;
final_result = (hash, lamports);
}
info!(
"calculate_accounts_hash_without_index: slot (exclusive): {} {:?}",
storages.range().end,
final_result
);
Ok(final_result)
};
if let Some(thread_pool) = thread_pool {
thread_pool.install(scan_and_hash)
} else {
scan_and_hash()
}
}
/// Only called from startup or test code.
pub fn verify_bank_hash_and_lamports(
&self,
slot: Slot,
ancestors: &Ancestors,
total_lamports: u64,
test_hash_calculation: bool,
) -> Result<(), BankHashVerificationError> {
use BankHashVerificationError::*;
let use_index = false;
let check_hash = true;
let is_startup = true;
let can_cached_slot_be_unflushed = false;
let (calculated_hash, calculated_lamports) = self
.calculate_accounts_hash_helper_with_verify(
use_index,
test_hash_calculation,
slot,
ancestors,
None,
can_cached_slot_be_unflushed,
check_hash,
None,
is_startup,
)?;
if calculated_lamports != total_lamports {
warn!(
"Mismatched total lamports: {} calculated: {}",
total_lamports, calculated_lamports
);
return Err(MismatchedTotalLamports(calculated_lamports, total_lamports));
}
let bank_hashes = self.bank_hashes.read().unwrap();
if let Some(found_hash_info) = bank_hashes.get(&slot) {
if calculated_hash == found_hash_info.snapshot_hash {
Ok(())
} else {
warn!(
"mismatched bank hash for slot {}: {} (calculated) != {} (expected)",
slot, calculated_hash, found_hash_info.snapshot_hash
);
Err(MismatchedBankHash)
}
} else {
Err(MissingBankHash)
}
}
/// Perform the scan for pubkeys that were written to in a slot
fn do_scan_slot_for_dirty_pubkeys(
&self,
slot: Slot,
) -> ScanStorageResult<Pubkey, DashSet<Pubkey>> {
self.scan_account_storage(
slot,
|loaded_account: LoadedAccount| Some(*loaded_account.pubkey()),
|accum: &DashSet<Pubkey>, loaded_account: LoadedAccount| {
accum.insert(*loaded_account.pubkey());
},
)
}
/// Reduce the scan result of dirty pubkeys after calling `scan_account_storage()` into a
/// single vec of Pubkeys.
fn do_reduce_scan_slot_for_dirty_pubkeys(
scan_result: ScanStorageResult<Pubkey, DashSet<Pubkey>>,
) -> Vec<Pubkey> {
match scan_result {
ScanStorageResult::Cached(cached_result) => cached_result,
ScanStorageResult::Stored(stored_result) => {
stored_result.into_iter().collect::<Vec<_>>()
}
}
}
/// Scan a slot for dirty pubkeys
fn scan_slot_for_dirty_pubkeys(&self, slot: Slot) -> Vec<Pubkey> {
let dirty_pubkeys = self.do_scan_slot_for_dirty_pubkeys(slot);
Self::do_reduce_scan_slot_for_dirty_pubkeys(dirty_pubkeys)
}
/// Scan a slot in the account storage for dirty pubkeys and insert them into the list of
/// uncleaned pubkeys
///
/// This function is called in Bank::drop() when the bank is _not_ frozen, so that its pubkeys
/// are considered for cleanup.
pub fn scan_slot_and_insert_dirty_pubkeys_into_uncleaned_pubkeys(&self, slot: Slot) {
let dirty_pubkeys = self.scan_slot_for_dirty_pubkeys(slot);
self.uncleaned_pubkeys.insert(slot, dirty_pubkeys);
}
pub fn get_accounts_delta_hash(&self, slot: Slot) -> Hash {
let mut scan = Measure::start("scan");
let scan_result: ScanStorageResult<(Pubkey, Hash), DashMapVersionHash> = self
.scan_account_storage(
slot,
|loaded_account: LoadedAccount| {
// Cache only has one version per key, don't need to worry about versioning
Some((*loaded_account.pubkey(), loaded_account.loaded_hash()))
},
|accum: &DashMap<Pubkey, (u64, Hash)>, loaded_account: LoadedAccount| {
let loaded_write_version = loaded_account.write_version();
let loaded_hash = loaded_account.loaded_hash();
// keep the latest write version for each pubkey
match accum.entry(*loaded_account.pubkey()) {
Occupied(mut occupied_entry) => {
if loaded_write_version > occupied_entry.get().version() {
occupied_entry.insert((loaded_write_version, loaded_hash));
}
}
Vacant(vacant_entry) => {
vacant_entry.insert((loaded_write_version, loaded_hash));
}
}
},
);
scan.stop();
let mut accumulate = Measure::start("accumulate");
let mut hashes: Vec<_> = match scan_result {
ScanStorageResult::Cached(cached_result) => cached_result,
ScanStorageResult::Stored(stored_result) => stored_result
.into_iter()
.map(|(pubkey, (_latest_write_version, hash))| (pubkey, hash))
.collect(),
};
let dirty_keys = hashes.iter().map(|(pubkey, _hash)| *pubkey).collect();
if self.filler_accounts_enabled() {
// filler accounts must be added to 'dirty_keys' above but cannot be used to calculate hash
hashes.retain(|(pubkey, _hash)| !self.is_filler_account(pubkey));
}
let ret = AccountsHash::accumulate_account_hashes(hashes);
accumulate.stop();
let mut uncleaned_time = Measure::start("uncleaned_index");
self.uncleaned_pubkeys.insert(slot, dirty_keys);
uncleaned_time.stop();
self.stats
.store_uncleaned_update
.fetch_add(uncleaned_time.as_us(), Ordering::Relaxed);
self.stats
.delta_hash_scan_time_total_us
.fetch_add(scan.as_us(), Ordering::Relaxed);
self.stats
.delta_hash_accumulate_time_total_us
.fetch_add(accumulate.as_us(), Ordering::Relaxed);
self.stats.delta_hash_num.fetch_add(1, Ordering::Relaxed);
ret
}
// previous_slot_entry_was_cached = true means we just need to assert that after this update is complete
// that there are no items we would have put in reclaims that are not cached
fn update_index<T: ReadableAccount + Sync>(
&self,
slot: Slot,
infos: Vec<AccountInfo>,
accounts: &[(&Pubkey, &T)],
previous_slot_entry_was_cached: bool,
) -> SlotList<AccountInfo> {
// using a thread pool here results in deadlock panics from bank_hashes.write()
// so, instead we limit how many threads will be created to the same size as the bg thread pool
let chunk_size = std::cmp::max(1, accounts.len() / quarter_thread_count()); // # pubkeys/thread
infos
.par_chunks(chunk_size)
.zip(accounts.par_chunks(chunk_size))
.map(|(infos_chunk, accounts_chunk)| {
let mut reclaims = Vec::with_capacity(infos_chunk.len() / 2);
for (info, pubkey_account) in infos_chunk.iter().zip(accounts_chunk.iter()) {
let pubkey = pubkey_account.0;
self.accounts_index.upsert(
slot,
pubkey,
pubkey_account.1.owner(),
pubkey_account.1.data(),
&self.account_indexes,
*info,
&mut reclaims,
previous_slot_entry_was_cached,
);
}
reclaims
})
.flatten()
.collect::<Vec<_>>()
}
fn should_not_shrink(aligned_bytes: u64, total_bytes: u64, num_stores: usize) -> bool {
aligned_bytes + PAGE_SIZE > total_bytes && num_stores == 1
}
fn is_shrinking_productive(slot: Slot, stores: &[Arc<AccountStorageEntry>]) -> bool {
let mut alive_count = 0;
let mut stored_count = 0;
let mut alive_bytes = 0;
let mut total_bytes = 0;
for store in stores {
alive_count += store.count();
stored_count += store.approx_stored_count();
alive_bytes += store.alive_bytes();
total_bytes += store.total_bytes();
}
let aligned_bytes = Self::page_align(alive_bytes as u64);
if Self::should_not_shrink(aligned_bytes, total_bytes, stores.len()) {
trace!(
"shrink_slot_forced ({}, {}): not able to shrink at all: alive/stored: ({} / {}) ({}b / {}b) save: {}",
slot,
stores.len(),
alive_count,
stored_count,
aligned_bytes,
total_bytes,
total_bytes.saturating_sub(aligned_bytes),
);
return false;
}
true
}
fn is_candidate_for_shrink(&self, store: &Arc<AccountStorageEntry>) -> bool {
match self.shrink_ratio {
AccountShrinkThreshold::TotalSpace { shrink_ratio: _ } => {
Self::page_align(store.alive_bytes() as u64) < store.total_bytes()
}
AccountShrinkThreshold::IndividalStore { shrink_ratio } => {
(Self::page_align(store.alive_bytes() as u64) as f64 / store.total_bytes() as f64)
< shrink_ratio
}
}
}
fn remove_dead_accounts(
&self,
reclaims: SlotSlice<AccountInfo>,
expected_slot: Option<Slot>,
mut reclaimed_offsets: Option<&mut AppendVecOffsets>,
reset_accounts: bool,
) -> HashSet<Slot> {
let mut dead_slots = HashSet::new();
let mut new_shrink_candidates: ShrinkCandidates = HashMap::new();
let mut measure = Measure::start("remove");
for (slot, account_info) in reclaims {
// No cached accounts should make it here
assert_ne!(account_info.store_id, CACHE_VIRTUAL_STORAGE_ID);
if let Some(ref mut reclaimed_offsets) = reclaimed_offsets {
reclaimed_offsets
.entry(account_info.store_id)
.or_default()
.insert(account_info.offset);
}
if let Some(expected_slot) = expected_slot {
assert_eq!(*slot, expected_slot);
}
if let Some(store) = self
.storage
.get_account_storage_entry(*slot, account_info.store_id)
{
assert_eq!(
*slot, store.slot(),
"AccountDB::accounts_index corrupted. Storage pointed to: {}, expected: {}, should only point to one slot",
store.slot(), *slot
);
let count = store.remove_account(account_info.stored_size, reset_accounts);
if count == 0 {
self.dirty_stores
.insert((*slot, store.append_vec_id()), store.clone());
dead_slots.insert(*slot);
} else if self.caching_enabled
&& Self::is_shrinking_productive(*slot, &[store.clone()])
&& self.is_candidate_for_shrink(&store)
{
// Checking that this single storage entry is ready for shrinking,
// should be a sufficient indication that the slot is ready to be shrunk
// because slots should only have one storage entry, namely the one that was
// created by `flush_slot_cache()`.
{
new_shrink_candidates
.entry(*slot)
.or_default()
.insert(store.append_vec_id(), store);
}
}
}
}
measure.stop();
self.clean_accounts_stats
.remove_dead_accounts_remove_us
.fetch_add(measure.as_us(), Ordering::Relaxed);
if self.caching_enabled {
let mut measure = Measure::start("shrink");
let mut shrink_candidate_slots = self.shrink_candidate_slots.lock().unwrap();
for (slot, slot_shrink_candidates) in new_shrink_candidates {
for (store_id, store) in slot_shrink_candidates {
// count could be == 0 if multiple accounts are removed
// at once
if store.count() != 0 {
debug!(
"adding: {} {} to shrink candidates: count: {}/{} bytes: {}/{}",
store_id,
slot,
store.approx_stored_count(),
store.count(),
store.alive_bytes(),
store.total_bytes()
);
shrink_candidate_slots
.entry(slot)
.or_default()
.insert(store_id, store);
}
}
}
measure.stop();
self.clean_accounts_stats
.remove_dead_accounts_shrink_us
.fetch_add(measure.as_us(), Ordering::Relaxed);
}
dead_slots.retain(|slot| {
if let Some(slot_stores) = self.storage.get_slot_stores(*slot) {
for x in slot_stores.read().unwrap().values() {
if x.count() != 0 {
return false;
}
}
}
true
});
dead_slots
}
fn remove_dead_slots_metadata<'a>(
&'a self,
dead_slots_iter: impl Iterator<Item = &'a Slot> + Clone,
purged_slot_pubkeys: HashSet<(Slot, Pubkey)>,
// Should only be `Some` for non-cached slots
purged_stored_account_slots: Option<&mut AccountSlots>,
) {
let mut measure = Measure::start("remove_dead_slots_metadata-ms");
self.clean_dead_slots_from_accounts_index(
dead_slots_iter.clone(),
purged_slot_pubkeys,
purged_stored_account_slots,
);
{
let mut bank_hashes = self.bank_hashes.write().unwrap();
for slot in dead_slots_iter {
bank_hashes.remove(slot);
}
}
measure.stop();
inc_new_counter_info!("remove_dead_slots_metadata-ms", measure.as_ms() as usize);
}
fn clean_dead_slots_from_accounts_index<'a>(
&'a self,
dead_slots_iter: impl Iterator<Item = &'a Slot> + Clone,
purged_slot_pubkeys: HashSet<(Slot, Pubkey)>,
// Should only be `Some` for non-cached slots
purged_stored_account_slots: Option<&mut AccountSlots>,
) {
let mut accounts_index_root_stats = AccountsIndexRootsStats::default();
let mut measure = Measure::start("unref_from_storage");
if let Some(purged_stored_account_slots) = purged_stored_account_slots {
let len = purged_stored_account_slots.len();
// we could build a higher level function in accounts_index to group by bin
const BATCH_SIZE: usize = 10_000;
let batches = 1 + (len / BATCH_SIZE);
self.thread_pool_clean.install(|| {
(0..batches).into_par_iter().for_each(|batch| {
let skip = batch * BATCH_SIZE;
for (_slot, pubkey) in purged_slot_pubkeys.iter().skip(skip).take(BATCH_SIZE) {
self.accounts_index.unref_from_storage(pubkey);
}
})
});
for (slot, pubkey) in purged_slot_pubkeys {
purged_stored_account_slots
.entry(pubkey)
.or_default()
.insert(slot);
}
}
measure.stop();
accounts_index_root_stats.clean_unref_from_storage_us += measure.as_us();
let mut measure = Measure::start("clean_dead_slot");
let mut rooted_cleaned_count = 0;
let mut unrooted_cleaned_count = 0;
let dead_slots: Vec<_> = dead_slots_iter
.map(|slot| {
if self
.accounts_index
.clean_dead_slot(*slot, &mut accounts_index_root_stats)
{
rooted_cleaned_count += 1;
} else {
unrooted_cleaned_count += 1;
}
*slot
})
.collect();
measure.stop();
accounts_index_root_stats.clean_dead_slot_us += measure.as_us();
info!("remove_dead_slots_metadata: slots {:?}", dead_slots);
accounts_index_root_stats.rooted_cleaned_count += rooted_cleaned_count;
accounts_index_root_stats.unrooted_cleaned_count += unrooted_cleaned_count;
self.clean_accounts_stats
.latest_accounts_index_roots_stats
.update(&accounts_index_root_stats);
}
fn clean_stored_dead_slots(
&self,
dead_slots: &HashSet<Slot>,
purged_account_slots: Option<&mut AccountSlots>,
) {
let mut measure = Measure::start("clean_stored_dead_slots-ms");
let mut stores: Vec<Arc<AccountStorageEntry>> = vec![];
for slot in dead_slots.iter() {
if let Some(slot_storage) = self.storage.get_slot_stores(*slot) {
for store in slot_storage.read().unwrap().values() {
stores.push(store.clone());
}
}
}
let purged_slot_pubkeys: HashSet<(Slot, Pubkey)> = {
self.thread_pool_clean.install(|| {
stores
.into_par_iter()
.map(|store| {
let accounts = store.all_accounts();
let slot = store.slot();
accounts
.into_iter()
.map(|account| (slot, account.meta.pubkey))
.collect::<HashSet<(Slot, Pubkey)>>()
})
.reduce(HashSet::new, |mut reduced, store_pubkeys| {
reduced.extend(store_pubkeys);
reduced
})
})
};
self.remove_dead_slots_metadata(
dead_slots.iter(),
purged_slot_pubkeys,
purged_account_slots,
);
measure.stop();
inc_new_counter_info!("clean_stored_dead_slots-ms", measure.as_ms() as usize);
self.clean_accounts_stats
.clean_stored_dead_slots_us
.fetch_add(measure.as_us(), Ordering::Relaxed);
}
pub fn store_cached(&self, slot: Slot, accounts: &[(&Pubkey, &AccountSharedData)]) {
self.store(slot, accounts, self.caching_enabled);
}
/// Store the account update.
/// only called by tests
pub fn store_uncached(&self, slot: Slot, accounts: &[(&Pubkey, &AccountSharedData)]) {
self.store(slot, accounts, false);
}
fn store(&self, slot: Slot, accounts: &[(&Pubkey, &AccountSharedData)], is_cached_store: bool) {
// If all transactions in a batch are errored,
// it's possible to get a store with no accounts.
if accounts.is_empty() {
return;
}
let mut stats = BankHashStats::default();
let mut total_data = 0;
accounts.iter().for_each(|(_pubkey, account)| {
total_data += account.data().len();
stats.update(*account);
});
self.stats
.store_total_data
.fetch_add(total_data as u64, Ordering::Relaxed);
{
// we need to drop bank_hashes to prevent deadlocks
let mut bank_hashes = self.bank_hashes.write().unwrap();
let slot_info = bank_hashes
.entry(slot)
.or_insert_with(BankHashInfo::default);
slot_info.stats.merge(&stats);
}
// we use default hashes for now since the same account may be stored to the cache multiple times
self.store_accounts_unfrozen(slot, accounts, None, is_cached_store);
self.report_store_timings();
}
fn report_store_timings(&self) {
if self.stats.last_store_report.should_update(1000) {
let (read_only_cache_hits, read_only_cache_misses) =
self.read_only_accounts_cache.get_and_reset_stats();
datapoint_info!(
"accounts_db_store_timings",
(
"hash_accounts",
self.stats.store_hash_accounts.swap(0, Ordering::Relaxed),
i64
),
(
"store_accounts",
self.stats.store_accounts.swap(0, Ordering::Relaxed),
i64
),
(
"update_index",
self.stats.store_update_index.swap(0, Ordering::Relaxed),
i64
),
(
"handle_reclaims",
self.stats.store_handle_reclaims.swap(0, Ordering::Relaxed),
i64
),
(
"append_accounts",
self.stats.store_append_accounts.swap(0, Ordering::Relaxed),
i64
),
(
"find_storage",
self.stats.store_find_store.swap(0, Ordering::Relaxed),
i64
),
(
"num_accounts",
self.stats.store_num_accounts.swap(0, Ordering::Relaxed),
i64
),
(
"total_data",
self.stats.store_total_data.swap(0, Ordering::Relaxed),
i64
),
(
"read_only_accounts_cache_entries",
self.read_only_accounts_cache.cache_len(),
i64
),
(
"read_only_accounts_cache_data_size",
self.read_only_accounts_cache.data_size(),
i64
),
("read_only_accounts_cache_hits", read_only_cache_hits, i64),
(
"read_only_accounts_cache_misses",
read_only_cache_misses,
i64
),
(
"calc_stored_meta_us",
self.stats.calc_stored_meta.swap(0, Ordering::Relaxed),
i64
),
);
let recycle_stores = self.recycle_stores.read().unwrap();
datapoint_info!(
"accounts_db_store_timings2",
(
"recycle_store_count",
self.stats.recycle_store_count.swap(0, Ordering::Relaxed),
i64
),
(
"current_recycle_store_count",
recycle_stores.entry_count(),
i64
),
(
"current_recycle_store_bytes",
recycle_stores.total_bytes(),
i64
),
(
"create_store_count",
self.stats.create_store_count.swap(0, Ordering::Relaxed),
i64
),
(
"store_get_slot_store",
self.stats.store_get_slot_store.swap(0, Ordering::Relaxed),
i64
),
(
"store_find_existing",
self.stats.store_find_existing.swap(0, Ordering::Relaxed),
i64
),
(
"dropped_stores",
self.stats.dropped_stores.swap(0, Ordering::Relaxed),
i64
),
);
}
}
fn store_accounts_unfrozen(
&self,
slot: Slot,
accounts: &[(&Pubkey, &AccountSharedData)],
hashes: Option<&[&Hash]>,
is_cached_store: bool,
) {
// This path comes from a store to a non-frozen slot.
// If a store is dead here, then a newer update for
// each pubkey in the store must exist in another
// store in the slot. Thus it is safe to reset the store and
// re-use it for a future store op. The pubkey ref counts should still
// hold just 1 ref from this slot.
let reset_accounts = true;
self.store_accounts_custom(
slot,
accounts,
hashes,
None::<StorageFinder>,
None::<Box<dyn Iterator<Item = u64>>>,
is_cached_store,
reset_accounts,
);
}
fn store_accounts_frozen<'a, T: ReadableAccount + Sync + ZeroLamport>(
&'a self,
slot: Slot,
accounts: &[(&Pubkey, &T)],
hashes: Option<&[impl Borrow<Hash>]>,
storage_finder: Option<StorageFinder<'a>>,
write_version_producer: Option<Box<dyn Iterator<Item = StoredMetaWriteVersion>>>,
) -> StoreAccountsTiming {
// stores on a frozen slot should not reset
// the append vec so that hashing could happen on the store
// and accounts in the append_vec can be unrefed correctly
let reset_accounts = false;
let is_cached_store = false;
self.store_accounts_custom(
slot,
accounts,
hashes,
storage_finder,
write_version_producer,
is_cached_store,
reset_accounts,
)
}
fn store_accounts_custom<'a, T: ReadableAccount + Sync + ZeroLamport>(
&'a self,
slot: Slot,
accounts: &[(&Pubkey, &T)],
hashes: Option<&[impl Borrow<Hash>]>,
storage_finder: Option<StorageFinder<'a>>,
write_version_producer: Option<Box<dyn Iterator<Item = u64>>>,
is_cached_store: bool,
reset_accounts: bool,
) -> StoreAccountsTiming {
let storage_finder: StorageFinder<'a> = storage_finder
.unwrap_or_else(|| Box::new(move |slot, size| self.find_storage_candidate(slot, size)));
let write_version_producer: Box<dyn Iterator<Item = u64>> = write_version_producer
.unwrap_or_else(|| {
let mut current_version = self.bulk_assign_write_version(accounts.len());
Box::new(std::iter::from_fn(move || {
let ret = current_version;
current_version += 1;
Some(ret)
}))
});
self.stats
.store_num_accounts
.fetch_add(accounts.len() as u64, Ordering::Relaxed);
let mut store_accounts_time = Measure::start("store_accounts");
let infos = self.store_accounts_to(
slot,
accounts,
hashes,
storage_finder,
write_version_producer,
is_cached_store,
);
store_accounts_time.stop();
self.stats
.store_accounts
.fetch_add(store_accounts_time.as_us(), Ordering::Relaxed);
let mut update_index_time = Measure::start("update_index");
let previous_slot_entry_was_cached = self.caching_enabled && is_cached_store;
// If the cache was flushed, then because `update_index` occurs
// after the account are stored by the above `store_accounts_to`
// call and all the accounts are stored, all reads after this point
// will know to not check the cache anymore
let mut reclaims = self.update_index(slot, infos, accounts, previous_slot_entry_was_cached);
// For each updated account, `reclaims` should only have at most one
// item (if the account was previously updated in this slot).
// filter out the cached reclaims as those don't actually map
// to anything that needs to be cleaned in the backing storage
// entries
if self.caching_enabled {
reclaims.retain(|(_, r)| !r.is_cached());
if is_cached_store {
assert!(reclaims.is_empty());
}
}
update_index_time.stop();
self.stats
.store_update_index
.fetch_add(update_index_time.as_us(), Ordering::Relaxed);
// A store for a single slot should:
// 1) Only make "reclaims" for the same slot
// 2) Should not cause any slots to be removed from the storage
// database because
// a) this slot has at least one account (the one being stored),
// b)From 1) we know no other slots are included in the "reclaims"
//
// From 1) and 2) we guarantee passing `no_purge_stats` == None, which is
// equivalent to asserting there will be no dead slots, is safe.
let no_purge_stats = None;
let mut handle_reclaims_time = Measure::start("handle_reclaims");
self.handle_reclaims(&reclaims, Some(slot), no_purge_stats, None, reset_accounts);
handle_reclaims_time.stop();
self.stats
.store_handle_reclaims
.fetch_add(handle_reclaims_time.as_us(), Ordering::Relaxed);
StoreAccountsTiming {
store_accounts_elapsed: store_accounts_time.as_us(),
update_index_elapsed: update_index_time.as_us(),
handle_reclaims_elapsed: handle_reclaims_time.as_us(),
}
}
pub fn add_root(&self, slot: Slot) -> AccountsAddRootTiming {
let mut index_time = Measure::start("index_add_root");
self.accounts_index.add_root(slot, self.caching_enabled);
index_time.stop();
let mut cache_time = Measure::start("cache_add_root");
if self.caching_enabled {
self.accounts_cache.add_root(slot);
}
cache_time.stop();
let mut store_time = Measure::start("store_add_root");
if let Some(slot_stores) = self.storage.get_slot_stores(slot) {
for (store_id, store) in slot_stores.read().unwrap().iter() {
self.dirty_stores.insert((slot, *store_id), store.clone());
}
}
store_time.stop();
AccountsAddRootTiming {
index_us: index_time.as_us(),
cache_us: cache_time.as_us(),
store_us: store_time.as_us(),
}
}
pub fn get_snapshot_storages(
&self,
snapshot_slot: Slot,
snapshot_base_slot: Option<Slot>,
ancestors: Option<&Ancestors>,
) -> (SnapshotStorages, Vec<Slot>) {
let mut m = Measure::start("get slots");
let slots = self
.storage
.0
.iter()
.map(|k| *k.key() as Slot)
.collect::<Vec<_>>();
m.stop();
let mut m2 = Measure::start("filter");
let chunk_size = 5_000;
let wide = self.thread_pool_clean.install(|| {
slots
.par_chunks(chunk_size)
.map(|slots| {
slots
.iter()
.filter_map(|slot| {
if *slot <= snapshot_slot
&& snapshot_base_slot
.map_or(true, |snapshot_base_slot| *slot > snapshot_base_slot)
&& (self.accounts_index.is_root(*slot)
|| ancestors
.map(|ancestors| ancestors.contains_key(slot))
.unwrap_or_default())
{
self.storage.0.get(slot).map_or_else(
|| None,
|item| {
let storages = item
.value()
.read()
.unwrap()
.values()
.filter(|x| x.has_accounts())
.cloned()
.collect::<Vec<_>>();
if !storages.is_empty() {
Some((storages, *slot))
} else {
None
}
},
)
} else {
None
}
})
.collect::<Vec<(SnapshotStorage, Slot)>>()
})
.collect::<Vec<_>>()
});
m2.stop();
let mut m3 = Measure::start("flatten");
// some slots we found above may not have been a root or met the slot # constraint.
// So the resulting 'slots' vector we return will be a subset of the raw keys we got initially.
let mut slots = Vec::with_capacity(slots.len());
let result = wide
.into_iter()
.flatten()
.map(|(storage, slot)| {
slots.push(slot);
storage
})
.collect::<Vec<_>>();
m3.stop();
debug!(
"hash_total: get slots: {}, filter: {}, flatten: {}",
m.as_us(),
m2.as_us(),
m3.as_us()
);
(result, slots)
}
fn process_storage_slot<'a>(
&self,
storage_maps: &'a [Arc<AccountStorageEntry>],
) -> GenerateIndexAccountsMap<'a> {
let num_accounts = storage_maps
.iter()
.map(|storage| storage.approx_stored_count())
.sum();
let mut accounts_map = GenerateIndexAccountsMap::with_capacity(num_accounts);
storage_maps.iter().for_each(|storage| {
let accounts = storage.all_accounts();
accounts.into_iter().for_each(|stored_account| {
let this_version = stored_account.meta.write_version;
let pubkey = stored_account.meta.pubkey;
assert!(!self.is_filler_account(&pubkey));
match accounts_map.entry(pubkey) {
std::collections::hash_map::Entry::Vacant(entry) => {
entry.insert(IndexAccountMapEntry {
write_version: this_version,
store_id: storage.append_vec_id(),
stored_account,
});
}
std::collections::hash_map::Entry::Occupied(mut entry) => {
let occupied_version = entry.get().write_version;
if occupied_version < this_version {
entry.insert(IndexAccountMapEntry {
write_version: this_version,
store_id: storage.append_vec_id(),
stored_account,
});
} else {
assert_ne!(occupied_version, this_version);
}
}
}
})
});
accounts_map
}
fn generate_index_for_slot<'a>(
&self,
accounts_map: GenerateIndexAccountsMap<'a>,
slot: &Slot,
rent_collector: &RentCollector,
) -> SlotIndexGenerationInfo {
if accounts_map.is_empty() {
return SlotIndexGenerationInfo::default();
}
let secondary = !self.account_indexes.is_empty();
let mut accounts_data_len = 0;
let mut num_accounts_rent_exempt = 0;
let num_accounts = accounts_map.len();
let items = accounts_map.into_iter().map(
|(
pubkey,
IndexAccountMapEntry {
write_version: _write_version,
store_id,
stored_account,
},
)| {
if secondary {
self.accounts_index.update_secondary_indexes(
&pubkey,
&stored_account.account_meta.owner,
stored_account.data,
&self.account_indexes,
);
}
if !stored_account.is_zero_lamport() {
accounts_data_len += stored_account.data().len() as u64;
}
if !rent_collector.should_collect_rent(&pubkey, &stored_account, false)
|| rent_collector.get_rent_due(&stored_account).is_exempt()
{
num_accounts_rent_exempt += 1;
}
(
pubkey,
AccountInfo {
store_id,
offset: stored_account.offset,
stored_size: stored_account.stored_size,
lamports: stored_account.account_meta.lamports,
},
)
},
);
let (dirty_pubkeys, insert_time_us) = self
.accounts_index
.insert_new_if_missing_into_primary_index(*slot, num_accounts, items);
// dirty_pubkeys will contain a pubkey if an item has multiple rooted entries for
// a given pubkey. If there is just a single item, there is no cleaning to
// be done on that pubkey. Use only those pubkeys with multiple updates.
if !dirty_pubkeys.is_empty() {
self.uncleaned_pubkeys.insert(*slot, dirty_pubkeys);
}
SlotIndexGenerationInfo {
insert_time_us,
num_accounts: num_accounts as u64,
num_accounts_rent_exempt,
accounts_data_len,
}
}
fn filler_unique_id_bytes() -> usize {
std::mem::size_of::<u32>()
}
fn filler_rent_partition_prefix_bytes() -> usize {
std::mem::size_of::<u64>()
}
fn filler_prefix_bytes() -> usize {
Self::filler_unique_id_bytes() + Self::filler_rent_partition_prefix_bytes()
}
pub fn is_filler_account_helper(
pubkey: &Pubkey,
filler_account_suffix: Option<&Pubkey>,
) -> bool {
let offset = Self::filler_prefix_bytes();
filler_account_suffix
.as_ref()
.map(|filler_account_suffix| {
pubkey.as_ref()[offset..] == filler_account_suffix.as_ref()[offset..]
})
.unwrap_or_default()
}
/// true if 'pubkey' is a filler account
pub fn is_filler_account(&self, pubkey: &Pubkey) -> bool {
Self::is_filler_account_helper(pubkey, self.filler_account_suffix.as_ref())
}
/// true if it is possible that there are filler accounts present
pub fn filler_accounts_enabled(&self) -> bool {
self.filler_account_suffix.is_some()
}
/// retain slots in 'roots' that are > (max(roots) - slots_per_epoch)
fn retain_roots_within_one_epoch_range(roots: &mut Vec<Slot>, slots_per_epoch: SlotCount) {
if let Some(max) = roots.iter().max() {
let min = max - slots_per_epoch;
roots.retain(|slot| slot > &min);
}
}
/// filler accounts are space-holding accounts which are ignored by hash calculations and rent.
/// They are designed to allow a validator to run against a network successfully while simulating having many more accounts present.
/// All filler accounts share a common pubkey suffix. The suffix is randomly generated per validator on startup.
/// The filler accounts are added to each slot in the snapshot after index generation.
/// The accounts added in a slot are setup to have pubkeys such that rent will be collected from them before (or when?) their slot becomes an epoch old.
/// Thus, the filler accounts are rewritten by rent and the old slot can be thrown away successfully.
pub fn maybe_add_filler_accounts(&self, epoch_schedule: &EpochSchedule) {
if self.filler_account_count == 0 {
return;
}
info!("adding {} filler accounts", self.filler_account_count);
// break this up to force the accounts out of memory after each pass
let passes = 100;
let mut roots = self.storage.all_slots();
Self::retain_roots_within_one_epoch_range(&mut roots, epoch_schedule.slots_per_epoch);
let root_count = roots.len();
let per_pass = std::cmp::max(1, root_count / passes);
let overall_index = AtomicUsize::new(0);
let string = "FiLLERACCoUNTooooooooooooooooooooooooooooooo";
let hash = Hash::from_str(string).unwrap();
let owner = Pubkey::from_str(string).unwrap();
let lamports = 100_000_000;
let space = 0;
let account = AccountSharedData::new(lamports, space, &owner);
let added = AtomicUsize::default();
for pass in 0..=passes {
self.accounts_index.set_startup(true);
let roots_in_this_pass = roots
.iter()
.skip(pass * per_pass)
.take(per_pass)
.collect::<Vec<_>>();
self.thread_pool.install(|| {
roots_in_this_pass.into_par_iter().for_each(|slot| {
let storage_maps: Vec<Arc<AccountStorageEntry>> = self
.storage
.get_slot_storage_entries(*slot)
.unwrap_or_default();
if storage_maps.is_empty() {
return;
}
let partition = crate::bank::Bank::variable_cycle_partition_from_previous_slot(
epoch_schedule,
*slot,
);
let subrange = crate::bank::Bank::pubkey_range_from_partition(partition);
let idx = overall_index.fetch_add(1, Ordering::Relaxed);
let filler_entries = (idx + 1) * self.filler_account_count / root_count
- idx * self.filler_account_count / root_count;
let accounts = (0..filler_entries)
.map(|_| {
let my_id = added.fetch_add(1, Ordering::Relaxed);
let my_id_bytes = u32::to_be_bytes(my_id as u32);
// pubkey begins life as entire filler 'suffix' pubkey
let mut key = self.filler_account_suffix.unwrap();
let rent_prefix_bytes = Self::filler_rent_partition_prefix_bytes();
// first bytes are replaced with rent partition range: filler_rent_partition_prefix_bytes
key.as_mut()[0..rent_prefix_bytes]
.copy_from_slice(&subrange.start().as_ref()[0..rent_prefix_bytes]);
// next bytes are replaced with my_id: filler_unique_id_bytes
key.as_mut()[rent_prefix_bytes
..(rent_prefix_bytes + Self::filler_unique_id_bytes())]
.copy_from_slice(&my_id_bytes);
assert!(subrange.contains(&key));
key
})
.collect::<Vec<_>>();
let add = accounts
.iter()
.map(|key| (key, &account))
.collect::<Vec<_>>();
let hashes = (0..filler_entries).map(|_| hash).collect::<Vec<_>>();
self.store_accounts_frozen(*slot, &add[..], Some(&hashes[..]), None, None);
})
});
self.accounts_index.set_startup(false);
}
info!("added {} filler accounts", added.load(Ordering::Relaxed));
}
#[allow(clippy::needless_collect)]
pub fn generate_index(
&self,
limit_load_slot_count_from_snapshot: Option<usize>,
verify: bool,
genesis_config: &GenesisConfig,
) -> IndexGenerationInfo {
let mut slots = self.storage.all_slots();
#[allow(clippy::stable_sort_primitive)]
slots.sort();
if let Some(limit) = limit_load_slot_count_from_snapshot {
slots.truncate(limit); // get rid of the newer slots and keep just the older
}
let max_slot = slots.last().cloned().unwrap_or_default();
let schedule = genesis_config.epoch_schedule;
let rent_collector = RentCollector::new(
schedule.get_epoch(max_slot),
&schedule,
genesis_config.slots_per_year(),
&genesis_config.rent,
);
let accounts_data_len = AtomicU64::new(0);
// pass == 0 always runs and generates the index
// pass == 1 only runs if verify == true.
// verify checks that all the expected items are in the accounts index and measures how long it takes to look them all up
let passes = if verify { 2 } else { 1 };
for pass in 0..passes {
if pass == 0 {
self.accounts_index.set_startup(true);
}
let storage_info = StorageSizeAndCountMap::default();
let total_processed_slots_across_all_threads = AtomicU64::new(0);
let outer_slots_len = slots.len();
let chunk_size = (outer_slots_len / 7) + 1; // approximately 400k slots in a snapshot
let mut index_time = Measure::start("index");
let insertion_time_us = AtomicU64::new(0);
let rent_exempt = AtomicU64::new(0);
let total_duplicates = AtomicU64::new(0);
let storage_info_timings = Mutex::new(GenerateIndexTimings::default());
let scan_time: u64 = slots
.par_chunks(chunk_size)
.map(|slots| {
let mut log_status = MultiThreadProgress::new(
&total_processed_slots_across_all_threads,
2,
outer_slots_len as u64,
);
let mut scan_time_sum = 0;
for (index, slot) in slots.iter().enumerate() {
let mut scan_time = Measure::start("scan");
log_status.report(index as u64);
let storage_maps: Vec<Arc<AccountStorageEntry>> = self
.storage
.get_slot_storage_entries(*slot)
.unwrap_or_default();
let accounts_map = self.process_storage_slot(&storage_maps);
scan_time.stop();
scan_time_sum += scan_time.as_us();
Self::update_storage_info(
&storage_info,
&accounts_map,
&storage_info_timings,
);
let insert_us = if pass == 0 {
// generate index
let SlotIndexGenerationInfo {
insert_time_us: insert_us,
num_accounts: total_this_slot,
num_accounts_rent_exempt: rent_exempt_this_slot,
accounts_data_len: accounts_data_len_this_slot,
} = self.generate_index_for_slot(accounts_map, slot, &rent_collector);
rent_exempt.fetch_add(rent_exempt_this_slot, Ordering::Relaxed);
total_duplicates.fetch_add(total_this_slot, Ordering::Relaxed);
accounts_data_len
.fetch_add(accounts_data_len_this_slot, Ordering::Relaxed);
insert_us
} else {
// verify index matches expected and measure the time to get all items
assert!(verify);
let mut lookup_time = Measure::start("lookup_time");
for account in accounts_map.into_iter() {
let (key, account_info) = account;
let lock = self.accounts_index.get_account_maps_read_lock(&key);
let x = lock.get(&key).unwrap();
let sl = x.slot_list.read().unwrap();
let mut count = 0;
for (slot2, account_info2) in sl.iter() {
if slot2 == slot {
count += 1;
let ai = AccountInfo {
store_id: account_info.store_id,
offset: account_info.stored_account.offset,
stored_size: account_info.stored_account.stored_size,
lamports: account_info
.stored_account
.account_meta
.lamports,
};
assert_eq!(&ai, account_info2);
}
}
assert_eq!(1, count);
}
lookup_time.stop();
lookup_time.as_us()
};
insertion_time_us.fetch_add(insert_us, Ordering::Relaxed);
}
scan_time_sum
})
.sum();
index_time.stop();
info!("rent_collector: {:?}", rent_collector);
let mut min_bin_size = usize::MAX;
let mut max_bin_size = usize::MIN;
let total_items = self
.accounts_index
.account_maps
.iter()
.map(|map_bin| {
let len = map_bin.read().unwrap().len_for_stats();
min_bin_size = std::cmp::min(min_bin_size, len);
max_bin_size = std::cmp::max(max_bin_size, len);
len as usize
})
.sum();
// subtract data.len() from accounts_data_len for all old accounts that are in the index twice
let mut accounts_data_len_dedup_timer =
Measure::start("handle accounts data len duplicates");
if pass == 0 {
let mut unique_pubkeys = HashSet::<Pubkey>::default();
self.uncleaned_pubkeys.iter().for_each(|entry| {
entry.value().iter().for_each(|pubkey| {
unique_pubkeys.insert(*pubkey);
})
});
let accounts_data_len_from_duplicates = unique_pubkeys
.into_iter()
.collect::<Vec<_>>()
.par_chunks(4096)
.map(|pubkeys| self.pubkeys_to_duplicate_accounts_data_len(pubkeys))
.sum();
accounts_data_len.fetch_sub(accounts_data_len_from_duplicates, Ordering::Relaxed);
info!(
"accounts data len: {}",
accounts_data_len.load(Ordering::Relaxed)
);
}
accounts_data_len_dedup_timer.stop();
let storage_info_timings = storage_info_timings.into_inner().unwrap();
let mut index_flush_us = 0;
if pass == 0 {
// tell accounts index we are done adding the initial accounts at startup
let mut m = Measure::start("accounts_index_idle_us");
self.accounts_index.set_startup(false);
m.stop();
index_flush_us = m.as_us();
}
let mut timings = GenerateIndexTimings {
index_flush_us,
scan_time,
index_time: index_time.as_us(),
insertion_time_us: insertion_time_us.load(Ordering::Relaxed),
min_bin_size,
max_bin_size,
total_items,
rent_exempt: rent_exempt.load(Ordering::Relaxed),
total_duplicates: total_duplicates.load(Ordering::Relaxed),
storage_size_accounts_map_us: storage_info_timings.storage_size_accounts_map_us,
storage_size_accounts_map_flatten_us: storage_info_timings
.storage_size_accounts_map_flatten_us,
accounts_data_len_dedup_time_us: accounts_data_len_dedup_timer.as_us(),
..GenerateIndexTimings::default()
};
if pass == 0 {
// Need to add these last, otherwise older updates will be cleaned
for slot in &slots {
self.accounts_index.add_root(*slot, false);
}
self.set_storage_count_and_alive_bytes(storage_info, &mut timings);
}
timings.report();
}
IndexGenerationInfo {
accounts_data_len: accounts_data_len.load(Ordering::Relaxed),
}
}
/// Used during generate_index() to get the _duplicate_ accounts data len from the given pubkeys
fn pubkeys_to_duplicate_accounts_data_len(&self, pubkeys: &[Pubkey]) -> u64 {
let mut accounts_data_len_from_duplicates = 0;
pubkeys.iter().for_each(|pubkey| {
if let Some(entry) = self.accounts_index.get_account_read_entry(pubkey) {
let slot_list = entry.slot_list();
if slot_list.len() < 2 {
return;
}
// Only the account data len in the highest slot should be used, and the rest are
// duplicates. So sort the slot list in descending slot order, skip the first
// item, then sum up the remaining data len, which are the duplicates.
let mut slot_list = slot_list.clone();
slot_list
.select_nth_unstable_by(0, |a, b| b.0.cmp(&a.0))
.2
.iter()
.for_each(|(slot, account_info)| {
let maybe_storage_entry = self
.storage
.get_account_storage_entry(*slot, account_info.store_id);
let mut accessor = LoadedAccountAccessor::Stored(
maybe_storage_entry.map(|entry| (entry, account_info.offset)),
);
let loaded_account = accessor.check_and_get_loaded_account();
let account = loaded_account.take_account();
accounts_data_len_from_duplicates += account.data().len();
});
}
});
accounts_data_len_from_duplicates as u64
}
fn update_storage_info(
storage_info: &StorageSizeAndCountMap,
accounts_map: &GenerateIndexAccountsMap<'_>,
timings: &Mutex<GenerateIndexTimings>,
) {
let mut storage_size_accounts_map_time = Measure::start("storage_size_accounts_map");
let mut storage_info_local = HashMap::<AppendVecId, StorageSizeAndCount>::default();
// first collect into a local HashMap with no lock contention
for (_, v) in accounts_map.iter() {
let mut info = storage_info_local
.entry(v.store_id)
.or_insert_with(StorageSizeAndCount::default);
info.stored_size += v.stored_account.stored_size;
info.count += 1;
}
storage_size_accounts_map_time.stop();
// second, collect into the shared DashMap once we've figured out all the info per store_id
let mut storage_size_accounts_map_flatten_time =
Measure::start("storage_size_accounts_map_flatten_time");
for (store_id, v) in storage_info_local.into_iter() {
let mut info = storage_info
.entry(store_id)
.or_insert_with(StorageSizeAndCount::default);
info.stored_size += v.stored_size;
info.count += v.count;
}
storage_size_accounts_map_flatten_time.stop();
let mut timings = timings.lock().unwrap();
timings.storage_size_accounts_map_us += storage_size_accounts_map_time.as_us();
timings.storage_size_accounts_map_flatten_us +=
storage_size_accounts_map_flatten_time.as_us();
}
fn set_storage_count_and_alive_bytes(
&self,
stored_sizes_and_counts: StorageSizeAndCountMap,
timings: &mut GenerateIndexTimings,
) {
// store count and size for each storage
let mut storage_size_storages_time = Measure::start("storage_size_storages");
for slot_stores in self.storage.0.iter() {
for (id, store) in slot_stores.value().read().unwrap().iter() {
// Should be default at this point
assert_eq!(store.alive_bytes(), 0);
if let Some(entry) = stored_sizes_and_counts.get(id) {
trace!(
"id: {} setting count: {} cur: {}",
id,
entry.count,
store.count(),
);
store.count_and_status.write().unwrap().0 = entry.count;
store.alive_bytes.store(entry.stored_size, Ordering::SeqCst);
} else {
trace!("id: {} clearing count", id);
store.count_and_status.write().unwrap().0 = 0;
}
}
}
storage_size_storages_time.stop();
timings.storage_size_storages_us = storage_size_storages_time.as_us();
}
pub(crate) fn print_accounts_stats(&self, label: &str) {
self.print_index(label);
self.print_count_and_status(label);
info!("recycle_stores:");
let recycle_stores = self.recycle_stores.read().unwrap();
for (recycled_time, entry) in recycle_stores.iter() {
info!(
" slot: {} id: {} count_and_status: {:?} approx_store_count: {} len: {} capacity: {} (recycled: {:?})",
entry.slot(),
entry.append_vec_id(),
*entry.count_and_status.read().unwrap(),
entry.approx_store_count.load(Ordering::Relaxed),
entry.accounts.len(),
entry.accounts.capacity(),
recycled_time,
);
}
}
fn print_index(&self, label: &str) {
let mut roots: Vec<_> = self.accounts_index.all_roots();
#[allow(clippy::stable_sort_primitive)]
roots.sort();
info!("{}: accounts_index roots: {:?}", label, roots,);
self.accounts_index.account_maps.iter().for_each(|map| {
for (pubkey, account_entry) in
map.read().unwrap().items(&None::<&std::ops::Range<Pubkey>>)
{
info!(" key: {} ref_count: {}", pubkey, account_entry.ref_count(),);
info!(
" slots: {:?}",
*account_entry.slot_list.read().unwrap()
);
}
});
}
fn print_count_and_status(&self, label: &str) {
let mut slots: Vec<_> = self.storage.all_slots();
#[allow(clippy::stable_sort_primitive)]
slots.sort();
info!("{}: count_and status for {} slots:", label, slots.len());
for slot in &slots {
let slot_stores = self.storage.get_slot_stores(*slot).unwrap();
let r_slot_stores = slot_stores.read().unwrap();
let mut ids: Vec<_> = r_slot_stores.keys().cloned().collect();
#[allow(clippy::stable_sort_primitive)]
ids.sort();
for id in &ids {
let entry = r_slot_stores.get(id).unwrap();
info!(
" slot: {} id: {} count_and_status: {:?} approx_store_count: {} len: {} capacity: {}",
slot,
id,
*entry.count_and_status.read().unwrap(),
entry.approx_store_count.load(Ordering::Relaxed),
entry.accounts.len(),
entry.accounts.capacity(),
);
}
}
}
}
#[cfg(test)]
impl AccountsDb {
pub fn new(paths: Vec<PathBuf>, cluster_type: &ClusterType) -> Self {
Self::new_for_tests(paths, cluster_type)
}
pub fn new_with_config_for_tests(
paths: Vec<PathBuf>,
cluster_type: &ClusterType,
account_indexes: AccountSecondaryIndexes,
caching_enabled: bool,
shrink_ratio: AccountShrinkThreshold,
) -> Self {
Self::new_with_config(
paths,
cluster_type,
account_indexes,
caching_enabled,
shrink_ratio,
Some(ACCOUNTS_DB_CONFIG_FOR_TESTING),
None,
)
}
pub fn new_sized(paths: Vec<PathBuf>, file_size: u64) -> Self {
AccountsDb {
file_size,
..AccountsDb::new(paths, &ClusterType::Development)
}
}
pub fn new_sized_no_extra_stores(paths: Vec<PathBuf>, file_size: u64) -> Self {
AccountsDb {
file_size,
min_num_stores: 0,
..AccountsDb::new(paths, &ClusterType::Development)
}
}
pub fn get_append_vec_id(&self, pubkey: &Pubkey, slot: Slot) -> Option<AppendVecId> {
let ancestors = vec![(slot, 1)].into_iter().collect();
let result = self.accounts_index.get(pubkey, Some(&ancestors), None);
result.map(|(list, index)| list.slot_list()[index].1.store_id)
}
pub fn alive_account_count_in_slot(&self, slot: Slot) -> usize {
self.storage
.get_slot_stores(slot)
.map(|storages| storages.read().unwrap().values().map(|s| s.count()).sum())
.unwrap_or(0)
}
}
/// Legacy shrink functions to support non-cached path.
/// Should be able to be deleted after cache path is the only path.
impl AccountsDb {
// Reads all accounts in given slot's AppendVecs and filter only to alive,
// then create a minimum AppendVec filled with the alive.
// v1 path shrinks all stores in the slot
//
// Requires all stores in the slot to be re-written otherwise the accounts_index
// store ref count could become incorrect.
fn do_shrink_slot_v1(&self, slot: Slot, forced: bool) -> usize {
trace!("shrink_stale_slot: slot: {}", slot);
if let Some(stores_lock) = self.storage.get_slot_stores(slot) {
let stores: Vec<_> = stores_lock.read().unwrap().values().cloned().collect();
let mut alive_count = 0;
let mut stored_count = 0;
let mut written_bytes = 0;
let mut total_bytes = 0;
for store in &stores {
alive_count += store.count();
stored_count += store.approx_stored_count();
written_bytes += store.written_bytes();
total_bytes += store.total_bytes();
}
if alive_count == stored_count && stores.len() == 1 {
trace!(
"shrink_stale_slot ({}): not able to shrink at all: alive/stored: {} / {} {}",
slot,
alive_count,
stored_count,
if forced { " (forced)" } else { "" },
);
return 0;
} else if !forced {
let sparse_by_count = (alive_count as f32 / stored_count as f32) <= 0.8;
let sparse_by_bytes = (written_bytes as f32 / total_bytes as f32) <= 0.8;
let not_sparse = !sparse_by_count && !sparse_by_bytes;
let too_small_to_shrink = total_bytes <= PAGE_SIZE;
if not_sparse || too_small_to_shrink {
return 0;
}
info!(
"shrink_stale_slot ({}): not_sparse: {} count: {}/{} byte: {}/{}",
slot, not_sparse, alive_count, stored_count, written_bytes, total_bytes,
);
}
self.do_shrink_slot_stores(slot, stores.iter())
} else {
0
}
}
fn do_reset_uncleaned_roots_v1(
&self,
candidates: &mut MutexGuard<Vec<Slot>>,
max_clean_root: Option<Slot>,
) {
let previous_roots = self.accounts_index.reset_uncleaned_roots(max_clean_root);
candidates.extend(previous_roots);
}
#[cfg(test)]
fn reset_uncleaned_roots_v1(&self) {
self.do_reset_uncleaned_roots_v1(&mut self.shrink_candidate_slots_v1.lock().unwrap(), None);
}
fn do_shrink_stale_slot_v1(&self, slot: Slot) -> usize {
self.do_shrink_slot_v1(slot, false)
}
fn do_shrink_slot_forced_v1(&self, slot: Slot) {
self.do_shrink_slot_v1(slot, true);
}
fn shrink_stale_slot_v1(&self, candidates: &mut MutexGuard<Vec<Slot>>) -> usize {
let mut shrunken_account_total = 0;
let mut shrunk_slot_count = 0;
let start = Instant::now();
let num_roots = self.accounts_index.num_roots();
loop {
if let Some(slot) = self.do_next_shrink_slot_v1(candidates) {
shrunken_account_total += self.do_shrink_stale_slot_v1(slot);
} else {
return 0;
}
if start.elapsed().as_millis() > 100 || shrunk_slot_count > num_roots / 10 {
debug!(
"do_shrink_stale_slot_v1: {} {} {}us",
shrunk_slot_count,
candidates.len(),
start.elapsed().as_micros()
);
break;
}
shrunk_slot_count += 1;
}
shrunken_account_total
}
// Infinitely returns rooted roots in cyclic order
fn do_next_shrink_slot_v1(&self, candidates: &mut MutexGuard<Vec<Slot>>) -> Option<Slot> {
// At this point, a lock (= candidates) is ensured to be held to keep
// do_reset_uncleaned_roots() (in clean_accounts()) from updating candidates.
// Also, candidates in the lock may be swapped here if it's empty.
let next = candidates.pop();
if next.is_some() {
next
} else {
let mut new_all_slots = self.all_root_slots_in_index();
let next = new_all_slots.pop();
// refresh candidates for later calls!
**candidates = new_all_slots;
next
}
}
#[cfg(test)]
fn next_shrink_slot_v1(&self) -> Option<Slot> {
let mut candidates = self.shrink_candidate_slots_v1.lock().unwrap();
self.do_next_shrink_slot_v1(&mut candidates)
}
pub fn process_stale_slot_v1(&self) -> usize {
let mut measure = Measure::start("stale_slot_shrink-ms");
let candidates = self.shrink_candidate_slots_v1.try_lock();
if candidates.is_err() {
// skip and return immediately if locked by clean_accounts()
// the calling background thread will just retry later.
return 0;
}
// hold this lock as long as this shrinking process is running to avoid conflicts
// with clean_accounts().
let mut candidates = candidates.unwrap();
let count = self.shrink_stale_slot_v1(&mut candidates);
measure.stop();
inc_new_counter_info!("stale_slot_shrink-ms", measure.as_ms() as usize);
count
}
#[cfg(test)]
fn shrink_all_stale_slots_v1(&self) {
for slot in self.all_slots_in_storage() {
self.do_shrink_stale_slot_v1(slot);
}
}
}
#[cfg(test)]
pub mod tests {
use {
super::*,
crate::{
accounts_hash::MERKLE_FANOUT,
accounts_index::{tests::*, AccountSecondaryIndexesIncludeExclude, RefCount},
append_vec::{test_utils::TempFile, AccountMeta},
inline_spl_token,
},
assert_matches::assert_matches,
rand::{thread_rng, Rng},
solana_sdk::{
account::{
accounts_equal, Account, AccountSharedData, ReadableAccount, WritableAccount,
},
hash::HASH_BYTES,
pubkey::PUBKEY_BYTES,
},
std::{
iter::FromIterator,
str::FromStr,
thread::{self, sleep, Builder, JoinHandle},
time::Duration,
},
};
fn linear_ancestors(end_slot: u64) -> Ancestors {
let mut ancestors: Ancestors = vec![(0, 0)].into_iter().collect();
for i in 1..end_slot {
ancestors.insert(i, (i - 1) as usize);
}
ancestors
}
fn empty_storages<'a>() -> SortedStorages<'a> {
SortedStorages::new(&[])
}
impl AccountsDb {
fn scan_snapshot_stores(
storage: &SortedStorages,
stats: &mut crate::accounts_hash::HashStats,
bins: usize,
bin_range: &Range<usize>,
check_hash: bool,
) -> Result<Vec<BinnedHashData>, BankHashVerificationError> {
let temp_dir = TempDir::new().unwrap();
let accounts_hash_cache_path = temp_dir.path();
Self::scan_snapshot_stores_with_cache(
&CacheHashData::new(&accounts_hash_cache_path),
storage,
stats,
bins,
bin_range,
check_hash,
None,
None,
)
}
}
#[test]
fn test_retain_roots_within_one_epoch_range() {
let mut roots = vec![0, 1, 2];
let slots_per_epoch = 2;
AccountsDb::retain_roots_within_one_epoch_range(&mut roots, slots_per_epoch);
assert_eq!(&vec![1, 2], &roots);
}
#[test]
#[should_panic(
expected = "bin_range.start < bins && bin_range.end <= bins &&\\n bin_range.start < bin_range.end"
)]
fn test_accountsdb_scan_snapshot_stores_illegal_range_start() {
let mut stats = HashStats::default();
let bounds = Range { start: 2, end: 2 };
AccountsDb::scan_snapshot_stores(&empty_storages(), &mut stats, 2, &bounds, false).unwrap();
}
#[test]
#[should_panic(
expected = "bin_range.start < bins && bin_range.end <= bins &&\\n bin_range.start < bin_range.end"
)]
fn test_accountsdb_scan_snapshot_stores_illegal_range_end() {
let mut stats = HashStats::default();
let bounds = Range { start: 1, end: 3 };
AccountsDb::scan_snapshot_stores(&empty_storages(), &mut stats, 2, &bounds, false).unwrap();
}
#[test]
#[should_panic(
expected = "bin_range.start < bins && bin_range.end <= bins &&\\n bin_range.start < bin_range.end"
)]
fn test_accountsdb_scan_snapshot_stores_illegal_range_inverse() {
let mut stats = HashStats::default();
let bounds = Range { start: 1, end: 0 };
AccountsDb::scan_snapshot_stores(&empty_storages(), &mut stats, 2, &bounds, false).unwrap();
}
fn sample_storages_and_account_in_slot(
slot: Slot,
) -> (SnapshotStorages, Vec<CalculateHashIntermediate>) {
let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development);
let pubkey0 = Pubkey::new(&[0u8; 32]);
let pubkey127 = Pubkey::new(&[0x7fu8; 32]);
let pubkey128 = Pubkey::new(&[0x80u8; 32]);
let pubkey255 = Pubkey::new(&[0xffu8; 32]);
let mut raw_expected = vec![
CalculateHashIntermediate::new(Hash::default(), 1, pubkey0),
CalculateHashIntermediate::new(Hash::default(), 128, pubkey127),
CalculateHashIntermediate::new(Hash::default(), 129, pubkey128),
CalculateHashIntermediate::new(Hash::default(), 256, pubkey255),
];
let expected_hashes = vec![
Hash::from_str("5K3NW73xFHwgTWVe4LyCg4QfQda8f88uZj2ypDx2kmmH").unwrap(),
Hash::from_str("84ozw83MZ8oeSF4hRAg7SeW1Tqs9LMXagX1BrDRjtZEx").unwrap(),
Hash::from_str("5XqtnEJ41CG2JWNp7MAg9nxkRUAnyjLxfsKsdrLxQUbC").unwrap(),
Hash::from_str("DpvwJcznzwULYh19Zu5CuAA4AT6WTBe4H6n15prATmqj").unwrap(),
];
let mut raw_accounts = Vec::default();
for i in 0..raw_expected.len() {
raw_accounts.push(AccountSharedData::new(
raw_expected[i].lamports,
1,
AccountSharedData::default().owner(),
));
let hash = AccountsDb::hash_account(slot, &raw_accounts[i], &raw_expected[i].pubkey);
if slot == 1 {
assert_eq!(hash, expected_hashes[i]);
}
raw_expected[i].hash = hash;
}
let to_store = raw_accounts
.iter()
.zip(raw_expected.iter())
.map(|(account, intermediate)| (&intermediate.pubkey, account))
.collect::<Vec<_>>();
accounts.store_uncached(slot, &to_store[..]);
accounts.add_root(slot);
let (storages, slots) = accounts.get_snapshot_storages(slot, None, None);
assert_eq!(storages.len(), slots.len());
storages
.iter()
.zip(slots.iter())
.for_each(|(storages, slot)| {
for storage in storages {
assert_eq!(&storage.slot(), slot);
}
});
(storages, raw_expected)
}
fn sample_storages_and_accounts() -> (SnapshotStorages, Vec<CalculateHashIntermediate>) {
sample_storages_and_account_in_slot(1)
}
fn get_storage_refs(input: &[SnapshotStorage]) -> SortedStorages {
SortedStorages::new(input)
}
#[test]
fn test_accountsdb_scan_snapshot_stores() {
solana_logger::setup();
let (storages, raw_expected) = sample_storages_and_accounts();
let bins = 1;
let mut stats = HashStats::default();
let result = AccountsDb::scan_snapshot_stores(
&get_storage_refs(&storages),
&mut stats,
bins,
&Range {
start: 0,
end: bins,
},
false,
)
.unwrap();
assert_eq!(result, vec![vec![raw_expected.clone()]]);
let bins = 2;
let result = AccountsDb::scan_snapshot_stores(
&get_storage_refs(&storages),
&mut stats,
bins,
&Range {
start: 0,
end: bins,
},
false,
)
.unwrap();
let mut expected = vec![Vec::new(); bins];
expected[0].push(raw_expected[0].clone());
expected[0].push(raw_expected[1].clone());
expected[bins - 1].push(raw_expected[2].clone());
expected[bins - 1].push(raw_expected[3].clone());
assert_eq!(result, vec![expected]);
let bins = 4;
let result = AccountsDb::scan_snapshot_stores(
&get_storage_refs(&storages),
&mut stats,
bins,
&Range {
start: 0,
end: bins,
},
false,
)
.unwrap();
let mut expected = vec![Vec::new(); bins];
expected[0].push(raw_expected[0].clone());
expected[1].push(raw_expected[1].clone());
expected[2].push(raw_expected[2].clone());
expected[bins - 1].push(raw_expected[3].clone());
assert_eq!(result, vec![expected]);
let bins = 256;
let result = AccountsDb::scan_snapshot_stores(
&get_storage_refs(&storages),
&mut stats,
bins,
&Range {
start: 0,
end: bins,
},
false,
)
.unwrap();
let mut expected = vec![Vec::new(); bins];
expected[0].push(raw_expected[0].clone());
expected[127].push(raw_expected[1].clone());
expected[128].push(raw_expected[2].clone());
expected[bins - 1].push(raw_expected.last().unwrap().clone());
assert_eq!(result, vec![expected]);
}
#[test]
fn test_accountsdb_scan_snapshot_stores_2nd_chunk() {
// enough stores to get to 2nd chunk
let bins = 1;
let slot = MAX_ITEMS_PER_CHUNK as Slot;
let (storages, raw_expected) = sample_storages_and_account_in_slot(slot);
let storage_data = vec![(&storages[0], slot)];
let sorted_storages =
SortedStorages::new_debug(&storage_data[..], 0, MAX_ITEMS_PER_CHUNK as usize + 1);
let mut stats = HashStats::default();
let result = AccountsDb::scan_snapshot_stores(
&sorted_storages,
&mut stats,
bins,
&Range {
start: 0,
end: bins,
},
false,
)
.unwrap();
assert_eq!(result.len(), 2); // 2 chunks
assert_eq!(result[0].len(), bins);
assert_eq!(0, result[0].iter().map(|x| x.len()).sum::<usize>()); // nothing found in bin 0
assert_eq!(result[1].len(), bins);
assert_eq!(result[1], vec![raw_expected]);
}
#[test]
fn test_accountsdb_scan_snapshot_stores_binning() {
let mut stats = HashStats::default();
let (storages, raw_expected) = sample_storages_and_accounts();
// just the first bin of 2
let bins = 2;
let half_bins = bins / 2;
let result = AccountsDb::scan_snapshot_stores(
&get_storage_refs(&storages),
&mut stats,
bins,
&Range {
start: 0,
end: half_bins,
},
false,
)
.unwrap();
let mut expected = vec![Vec::new(); half_bins];
expected[0].push(raw_expected[0].clone());
expected[0].push(raw_expected[1].clone());
assert_eq!(result, vec![expected]);
// just the second bin of 2
let result = AccountsDb::scan_snapshot_stores(
&get_storage_refs(&storages),
&mut stats,
bins,
&Range {
start: 1,
end: bins,
},
false,
)
.unwrap();
let mut expected = vec![Vec::new(); half_bins];
let starting_bin_index = 0;
expected[starting_bin_index].push(raw_expected[2].clone());
expected[starting_bin_index].push(raw_expected[3].clone());
assert_eq!(result, vec![expected]);
// 1 bin at a time of 4
let bins = 4;
for (bin, expected_item) in raw_expected.iter().enumerate().take(bins) {
let result = AccountsDb::scan_snapshot_stores(
&get_storage_refs(&storages),
&mut stats,
bins,
&Range {
start: bin,
end: bin + 1,
},
false,
)
.unwrap();
let mut expected = vec![Vec::new(); 1];
expected[0].push(expected_item.clone());
assert_eq!(result, vec![expected]);
}
let bins = 256;
let bin_locations = vec![0, 127, 128, 255];
let range = 1;
for bin in 0..bins {
let result = AccountsDb::scan_snapshot_stores(
&get_storage_refs(&storages),
&mut stats,
bins,
&Range {
start: bin,
end: bin + range,
},
false,
)
.unwrap();
let mut expected = vec![];
if let Some(index) = bin_locations.iter().position(|&r| r == bin) {
expected = vec![vec![Vec::new(); range]];
expected[0][0].push(raw_expected[index].clone());
}
assert_eq!(result, expected);
}
}
#[test]
fn test_accountsdb_scan_snapshot_stores_binning_2nd_chunk() {
// enough stores to get to 2nd chunk
// range is for only 1 bin out of 256.
let bins = 256;
let slot = MAX_ITEMS_PER_CHUNK as Slot;
let (storages, raw_expected) = sample_storages_and_account_in_slot(slot);
let storage_data = vec![(&storages[0], slot)];
let sorted_storages =
SortedStorages::new_debug(&storage_data[..], 0, MAX_ITEMS_PER_CHUNK as usize + 1);
let mut stats = HashStats::default();
let range = 1;
let start = 127;
let result = AccountsDb::scan_snapshot_stores(
&sorted_storages,
&mut stats,
bins,
&Range {
start,
end: start + range,
},
false,
)
.unwrap();
assert_eq!(result.len(), 2); // 2 chunks
assert_eq!(result[0].len(), range);
assert_eq!(0, result[0].iter().map(|x| x.len()).sum::<usize>()); // nothing found in bin 0
let mut expected = vec![Vec::new(); range];
expected[0].push(raw_expected[1].clone());
assert_eq!(result[1].len(), 1);
assert_eq!(result[1], expected);
}
#[test]
fn test_accountsdb_calculate_accounts_hash_without_index_simple() {
solana_logger::setup();
let (storages, _size, _slot_expected) = sample_storage();
let result = AccountsDb::calculate_accounts_hash_without_index(
TempDir::new().unwrap().path(),
&get_storage_refs(&storages),
None,
HashStats::default(),
false,
None,
None,
None,
)
.unwrap();
let expected_hash = Hash::from_str("GKot5hBsd81kMupNCXHaqbhv3huEbxAFMLnpcX2hniwn").unwrap();
assert_eq!(result, (expected_hash, 0));
}
#[test]
fn test_accountsdb_calculate_accounts_hash_without_index() {
solana_logger::setup();
let (storages, raw_expected) = sample_storages_and_accounts();
let expected_hash =
AccountsHash::compute_merkle_root_loop(raw_expected.clone(), MERKLE_FANOUT, |item| {
item.hash
});
let sum = raw_expected.iter().map(|item| item.lamports).sum();
let result = AccountsDb::calculate_accounts_hash_without_index(
TempDir::new().unwrap().path(),
&get_storage_refs(&storages),
None,
HashStats::default(),
false,
None,
None,
None,
)
.unwrap();
assert_eq!(result, (expected_hash, sum));
}
fn sample_storage() -> (SnapshotStorages, usize, Slot) {
let (_temp_dirs, paths) = get_temp_accounts_paths(1).unwrap();
let slot_expected: Slot = 0;
let size: usize = 123;
let data = AccountStorageEntry::new(&paths[0], slot_expected, 0, size as u64);
let arc = Arc::new(data);
let storages = vec![vec![arc]];
(storages, size, slot_expected)
}
#[test]
fn test_accountsdb_scan_account_storage_no_bank() {
solana_logger::setup();
let expected = 1;
let tf = crate::append_vec::test_utils::get_append_vec_path(
"test_accountsdb_scan_account_storage_no_bank",
);
let (_temp_dirs, paths) = get_temp_accounts_paths(1).unwrap();
let slot_expected: Slot = 0;
let size: usize = 123;
let mut data = AccountStorageEntry::new(&paths[0], slot_expected, 0, size as u64);
let av = AppendVec::new(&tf.path, true, 1024 * 1024);
data.accounts = av;
let arc = Arc::new(data);
let storages = vec![vec![arc]];
let pubkey = solana_sdk::pubkey::new_rand();
let acc = AccountSharedData::new(1, 48, AccountSharedData::default().owner());
let sm = StoredMeta {
data_len: 1,
pubkey,
write_version: 1,
};
storages[0][0]
.accounts
.append_accounts(&[(sm, Some(&acc))], &[&Hash::default()]);
let calls = AtomicU64::new(0);
let temp_dir = TempDir::new().unwrap();
let accounts_hash_cache_path = temp_dir.path();
let result = AccountsDb::scan_account_storage_no_bank(
&CacheHashData::new(&accounts_hash_cache_path),
None,
&get_storage_refs(&storages),
|loaded_account: LoadedAccount, accum: &mut BinnedHashData, slot: Slot| {
calls.fetch_add(1, Ordering::Relaxed);
assert_eq!(loaded_account.pubkey(), &pubkey);
assert_eq!(slot_expected, slot);
accum.push(vec![CalculateHashIntermediate::new(
Hash::default(),
expected,
pubkey,
)]);
},
|a| a,
&Range { start: 0, end: 1 },
&PubkeyBinCalculator24::new(1),
);
assert_eq!(calls.load(Ordering::Relaxed), 1);
assert_eq!(
result,
vec![vec![vec![CalculateHashIntermediate::new(
Hash::default(),
expected,
pubkey
)]]]
);
}
#[test]
fn test_accountsdb_scan_account_storage_no_bank_one_slot() {
solana_logger::setup();
let expected = 1;
let tf = crate::append_vec::test_utils::get_append_vec_path(
"test_accountsdb_scan_account_storage_no_bank",
);
let (_temp_dirs, paths) = get_temp_accounts_paths(1).unwrap();
let slot_expected: Slot = 0;
let size: usize = 123;
let mut data = AccountStorageEntry::new(&paths[0], slot_expected, 0, size as u64);
let av = AppendVec::new(&tf.path, true, 1024 * 1024);
data.accounts = av;
let arc = Arc::new(data);
let storages = vec![vec![arc]];
let pubkey = solana_sdk::pubkey::new_rand();
let acc = AccountSharedData::new(1, 48, AccountSharedData::default().owner());
let sm = StoredMeta {
data_len: 1,
pubkey,
write_version: 1,
};
storages[0][0]
.accounts
.append_accounts(&[(sm, Some(&acc))], &[&Hash::default()]);
let calls = AtomicU64::new(0);
let mut accum = Vec::new();
let scan_func = |loaded_account: LoadedAccount, accum: &mut Vec<u64>, slot: Slot| {
calls.fetch_add(1, Ordering::Relaxed);
assert_eq!(loaded_account.pubkey(), &pubkey);
assert_eq!(slot_expected, slot);
accum.push(expected);
};
AccountsDb::scan_multiple_account_storages_one_slot(
&storages[0],
&scan_func,
slot_expected,
&mut accum,
);
assert_eq!(calls.load(Ordering::Relaxed), 1);
assert_eq!(accum, vec![expected]);
}
fn sample_storage_with_entries(
tf: &TempFile,
write_version: StoredMetaWriteVersion,
slot: Slot,
pubkey: &Pubkey,
) -> SnapshotStorages {
let (_temp_dirs, paths) = get_temp_accounts_paths(1).unwrap();
let size: usize = 123;
let mut data = AccountStorageEntry::new(&paths[0], slot, 0, size as u64);
let av = AppendVec::new(&tf.path, true, 1024 * 1024);
data.accounts = av;
let arc = Arc::new(data);
let storages = vec![vec![arc]];
let acc = AccountSharedData::new(1, 48, AccountSharedData::default().owner());
let sm = StoredMeta {
data_len: 1,
pubkey: *pubkey,
write_version,
};
storages[0][0]
.accounts
.append_accounts(&[(sm, Some(&acc))], &[&Hash::default()]);
storages
}
#[test]
fn test_accountsdb_scan_multiple_account_storage_no_bank_one_slot() {
solana_logger::setup();
let slot_expected: Slot = 0;
let tf = crate::append_vec::test_utils::get_append_vec_path(
"test_accountsdb_scan_account_storage_no_bank",
);
let write_version1 = 0;
let write_version2 = 1;
let pubkey1 = solana_sdk::pubkey::new_rand();
let pubkey2 = solana_sdk::pubkey::new_rand();
for swap in [false, true].iter() {
let mut storages = [
sample_storage_with_entries(&tf, write_version1, slot_expected, &pubkey1)
.remove(0)
.remove(0),
sample_storage_with_entries(&tf, write_version2, slot_expected, &pubkey2)
.remove(0)
.remove(0),
];
if *swap {
storages[..].swap(0, 1);
}
let calls = AtomicU64::new(0);
let scan_func = |loaded_account: LoadedAccount, accum: &mut Vec<u64>, slot: Slot| {
calls.fetch_add(1, Ordering::Relaxed);
let write_version = loaded_account.write_version();
let first = loaded_account.pubkey() == &pubkey1 && write_version == write_version1;
assert!(
first || loaded_account.pubkey() == &pubkey2 && write_version == write_version2
);
assert_eq!(slot_expected, slot);
if first {
assert!(accum.is_empty());
} else {
assert!(accum.len() == 1);
}
accum.push(write_version);
};
let mut accum = Vec::new();
AccountsDb::scan_multiple_account_storages_one_slot(
&storages,
&scan_func,
slot_expected,
&mut accum,
);
assert_eq!(calls.load(Ordering::Relaxed), storages.len() as u64);
assert_eq!(accum, vec![write_version1, write_version2]);
}
}
#[test]
fn test_accountsdb_add_root() {
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = Pubkey::default();
let account0 = AccountSharedData::new(1, 0, &key);
db.store_uncached(0, &[(&key, &account0)]);
db.add_root(0);
let ancestors = vec![(1, 1)].into_iter().collect();
assert_eq!(
db.load_without_fixed_root(&ancestors, &key),
Some((account0, 0))
);
}
#[test]
fn test_accountsdb_latest_ancestor() {
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = Pubkey::default();
let account0 = AccountSharedData::new(1, 0, &key);
db.store_uncached(0, &[(&key, &account0)]);
let account1 = AccountSharedData::new(0, 0, &key);
db.store_uncached(1, &[(&key, &account1)]);
let ancestors = vec![(1, 1)].into_iter().collect();
assert_eq!(
&db.load_without_fixed_root(&ancestors, &key).unwrap().0,
&account1
);
let ancestors = vec![(1, 1), (0, 0)].into_iter().collect();
assert_eq!(
&db.load_without_fixed_root(&ancestors, &key).unwrap().0,
&account1
);
let accounts: Vec<AccountSharedData> = db.unchecked_scan_accounts(
"",
&ancestors,
|accounts: &mut Vec<AccountSharedData>, option| {
accounts.push(option.1.take_account());
},
&ScanConfig::default(),
);
assert_eq!(accounts, vec![account1]);
}
#[test]
fn test_accountsdb_latest_ancestor_with_root() {
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = Pubkey::default();
let account0 = AccountSharedData::new(1, 0, &key);
db.store_uncached(0, &[(&key, &account0)]);
let account1 = AccountSharedData::new(0, 0, &key);
db.store_uncached(1, &[(&key, &account1)]);
db.add_root(0);
let ancestors = vec![(1, 1)].into_iter().collect();
assert_eq!(
&db.load_without_fixed_root(&ancestors, &key).unwrap().0,
&account1
);
let ancestors = vec![(1, 1), (0, 0)].into_iter().collect();
assert_eq!(
&db.load_without_fixed_root(&ancestors, &key).unwrap().0,
&account1
);
}
#[test]
fn test_accountsdb_root_one_slot() {
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = Pubkey::default();
let account0 = AccountSharedData::new(1, 0, &key);
// store value 1 in the "root", i.e. db zero
db.store_uncached(0, &[(&key, &account0)]);
// now we have:
//
// root0 -> key.lamports==1
// / \
// / \
// key.lamports==0 <- slot1 \
// slot2 -> key.lamports==1
// (via root0)
// store value 0 in one child
let account1 = AccountSharedData::new(0, 0, &key);
db.store_uncached(1, &[(&key, &account1)]);
// masking accounts is done at the Accounts level, at accountsDB we see
// original account (but could also accept "None", which is implemented
// at the Accounts level)
let ancestors = vec![(0, 0), (1, 1)].into_iter().collect();
assert_eq!(
&db.load_without_fixed_root(&ancestors, &key).unwrap().0,
&account1
);
// we should see 1 token in slot 2
let ancestors = vec![(0, 0), (2, 2)].into_iter().collect();
assert_eq!(
&db.load_without_fixed_root(&ancestors, &key).unwrap().0,
&account0
);
db.add_root(0);
let ancestors = vec![(1, 1)].into_iter().collect();
assert_eq!(
db.load_without_fixed_root(&ancestors, &key),
Some((account1, 1))
);
let ancestors = vec![(2, 2)].into_iter().collect();
assert_eq!(
db.load_without_fixed_root(&ancestors, &key),
Some((account0, 0))
); // original value
}
#[test]
fn test_accountsdb_add_root_many() {
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let mut pubkeys: Vec<Pubkey> = vec![];
create_account(&db, &mut pubkeys, 0, 100, 0, 0);
for _ in 1..100 {
let idx = thread_rng().gen_range(0, 99);
let ancestors = vec![(0, 0)].into_iter().collect();
let account = db
.load_without_fixed_root(&ancestors, &pubkeys[idx])
.unwrap();
let default_account = AccountSharedData::from(Account {
lamports: (idx + 1) as u64,
..Account::default()
});
assert_eq!((default_account, 0), account);
}
db.add_root(0);
// check that all the accounts appear with a new root
for _ in 1..100 {
let idx = thread_rng().gen_range(0, 99);
let ancestors = vec![(0, 0)].into_iter().collect();
let account0 = db
.load_without_fixed_root(&ancestors, &pubkeys[idx])
.unwrap();
let ancestors = vec![(1, 1)].into_iter().collect();
let account1 = db
.load_without_fixed_root(&ancestors, &pubkeys[idx])
.unwrap();
let default_account = AccountSharedData::from(Account {
lamports: (idx + 1) as u64,
..Account::default()
});
assert_eq!(&default_account, &account0.0);
assert_eq!(&default_account, &account1.0);
}
}
#[test]
fn test_accountsdb_count_stores() {
solana_logger::setup();
let db = AccountsDb::new_single_for_tests();
let mut pubkeys: Vec<Pubkey> = vec![];
create_account(&db, &mut pubkeys, 0, 2, DEFAULT_FILE_SIZE as usize / 3, 0);
assert!(check_storage(&db, 0, 2));
let pubkey = solana_sdk::pubkey::new_rand();
let account = AccountSharedData::new(1, DEFAULT_FILE_SIZE as usize / 3, &pubkey);
db.store_uncached(1, &[(&pubkey, &account)]);
db.store_uncached(1, &[(&pubkeys[0], &account)]);
{
let slot_0_stores = &db.storage.get_slot_stores(0).unwrap();
let slot_1_stores = &db.storage.get_slot_stores(1).unwrap();
let r_slot_0_stores = slot_0_stores.read().unwrap();
let r_slot_1_stores = slot_1_stores.read().unwrap();
assert_eq!(r_slot_0_stores.len(), 1);
assert_eq!(r_slot_1_stores.len(), 1);
assert_eq!(r_slot_0_stores.get(&0).unwrap().count(), 2);
assert_eq!(r_slot_1_stores[&1].count(), 2);
assert_eq!(r_slot_0_stores.get(&0).unwrap().approx_stored_count(), 2);
assert_eq!(r_slot_1_stores[&1].approx_stored_count(), 2);
}
// adding root doesn't change anything
db.get_accounts_delta_hash(1);
db.add_root(1);
{
let slot_0_stores = &db.storage.get_slot_stores(0).unwrap();
let slot_1_stores = &db.storage.get_slot_stores(1).unwrap();
let r_slot_0_stores = slot_0_stores.read().unwrap();
let r_slot_1_stores = slot_1_stores.read().unwrap();
assert_eq!(r_slot_0_stores.len(), 1);
assert_eq!(r_slot_1_stores.len(), 1);
assert_eq!(r_slot_0_stores.get(&0).unwrap().count(), 2);
assert_eq!(r_slot_1_stores[&1].count(), 2);
assert_eq!(r_slot_0_stores.get(&0).unwrap().approx_stored_count(), 2);
assert_eq!(r_slot_1_stores[&1].approx_stored_count(), 2);
}
// overwrite old rooted account version; only the r_slot_0_stores.count() should be
// decremented
db.store_uncached(2, &[(&pubkeys[0], &account)]);
db.clean_accounts(None, false, None);
{
let slot_0_stores = &db.storage.get_slot_stores(0).unwrap();
let slot_1_stores = &db.storage.get_slot_stores(1).unwrap();
let r_slot_0_stores = slot_0_stores.read().unwrap();
let r_slot_1_stores = slot_1_stores.read().unwrap();
assert_eq!(r_slot_0_stores.len(), 1);
assert_eq!(r_slot_1_stores.len(), 1);
assert_eq!(r_slot_0_stores.get(&0).unwrap().count(), 1);
assert_eq!(r_slot_1_stores[&1].count(), 2);
assert_eq!(r_slot_0_stores.get(&0).unwrap().approx_stored_count(), 2);
assert_eq!(r_slot_1_stores[&1].approx_stored_count(), 2);
}
}
#[test]
fn test_accounts_unsquashed() {
let key = Pubkey::default();
// 1 token in the "root", i.e. db zero
let db0 = AccountsDb::new(Vec::new(), &ClusterType::Development);
let account0 = AccountSharedData::new(1, 0, &key);
db0.store_uncached(0, &[(&key, &account0)]);
// 0 lamports in the child
let account1 = AccountSharedData::new(0, 0, &key);
db0.store_uncached(1, &[(&key, &account1)]);
// masking accounts is done at the Accounts level, at accountsDB we see
// original account
let ancestors = vec![(0, 0), (1, 1)].into_iter().collect();
assert_eq!(
db0.load_without_fixed_root(&ancestors, &key),
Some((account1, 1))
);
let ancestors = vec![(0, 0)].into_iter().collect();
assert_eq!(
db0.load_without_fixed_root(&ancestors, &key),
Some((account0, 0))
);
}
fn run_test_remove_unrooted_slot(is_cached: bool) {
let unrooted_slot = 9;
let unrooted_bank_id = 9;
let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development);
db.caching_enabled = true;
let key = Pubkey::default();
let account0 = AccountSharedData::new(1, 0, &key);
let ancestors = vec![(unrooted_slot, 1)].into_iter().collect();
if is_cached {
db.store_cached(unrooted_slot, &[(&key, &account0)]);
} else {
db.store_uncached(unrooted_slot, &[(&key, &account0)]);
}
db.bank_hashes
.write()
.unwrap()
.insert(unrooted_slot, BankHashInfo::default());
assert!(db
.accounts_index
.get(&key, Some(&ancestors), None)
.is_some());
assert_load_account(&db, unrooted_slot, key, 1);
// Purge the slot
db.remove_unrooted_slots(&[(unrooted_slot, unrooted_bank_id)]);
assert!(db.load_without_fixed_root(&ancestors, &key).is_none());
assert!(db.bank_hashes.read().unwrap().get(&unrooted_slot).is_none());
assert!(db.accounts_cache.slot_cache(unrooted_slot).is_none());
assert!(db.storage.0.get(&unrooted_slot).is_none());
assert!(db.accounts_index.get_account_read_entry(&key).is_none());
assert!(db
.accounts_index
.get(&key, Some(&ancestors), None)
.is_none());
// Test we can store for the same slot again and get the right information
let account0 = AccountSharedData::new(2, 0, &key);
db.store_uncached(unrooted_slot, &[(&key, &account0)]);
assert_load_account(&db, unrooted_slot, key, 2);
}
#[test]
fn test_remove_unrooted_slot_cached() {
run_test_remove_unrooted_slot(true);
}
#[test]
fn test_remove_unrooted_slot_storage() {
run_test_remove_unrooted_slot(false);
}
#[test]
fn test_remove_unrooted_slot_snapshot() {
solana_logger::setup();
let unrooted_slot = 9;
let unrooted_bank_id = 9;
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = solana_sdk::pubkey::new_rand();
let account0 = AccountSharedData::new(1, 0, &key);
db.store_uncached(unrooted_slot, &[(&key, &account0)]);
// Purge the slot
db.remove_unrooted_slots(&[(unrooted_slot, unrooted_bank_id)]);
// Add a new root
let key2 = solana_sdk::pubkey::new_rand();
let new_root = unrooted_slot + 1;
db.store_uncached(new_root, &[(&key2, &account0)]);
db.add_root(new_root);
// Simulate reconstruction from snapshot
let db = reconstruct_accounts_db_via_serialization(&db, new_root);
// Check root account exists
assert_load_account(&db, new_root, key2, 1);
// Check purged account stays gone
let unrooted_slot_ancestors = vec![(unrooted_slot, 1)].into_iter().collect();
assert!(db
.load_without_fixed_root(&unrooted_slot_ancestors, &key)
.is_none());
}
fn create_account(
accounts: &AccountsDb,
pubkeys: &mut Vec<Pubkey>,
slot: Slot,
num: usize,
space: usize,
num_vote: usize,
) {
let ancestors = vec![(slot, 0)].into_iter().collect();
for t in 0..num {
let pubkey = solana_sdk::pubkey::new_rand();
let account =
AccountSharedData::new((t + 1) as u64, space, AccountSharedData::default().owner());
pubkeys.push(pubkey);
assert!(accounts
.load_without_fixed_root(&ancestors, &pubkey)
.is_none());
accounts.store_uncached(slot, &[(&pubkey, &account)]);
}
for t in 0..num_vote {
let pubkey = solana_sdk::pubkey::new_rand();
let account =
AccountSharedData::new((num + t + 1) as u64, space, &solana_vote_program::id());
pubkeys.push(pubkey);
let ancestors = vec![(slot, 0)].into_iter().collect();
assert!(accounts
.load_without_fixed_root(&ancestors, &pubkey)
.is_none());
accounts.store_uncached(slot, &[(&pubkey, &account)]);
}
}
fn update_accounts(accounts: &AccountsDb, pubkeys: &[Pubkey], slot: Slot, range: usize) {
for _ in 1..1000 {
let idx = thread_rng().gen_range(0, range);
let ancestors = vec![(slot, 0)].into_iter().collect();
if let Some((mut account, _)) =
accounts.load_without_fixed_root(&ancestors, &pubkeys[idx])
{
account.checked_add_lamports(1).unwrap();
accounts.store_uncached(slot, &[(&pubkeys[idx], &account)]);
if account.is_zero_lamport() {
let ancestors = vec![(slot, 0)].into_iter().collect();
assert!(accounts
.load_without_fixed_root(&ancestors, &pubkeys[idx])
.is_none());
} else {
let default_account = AccountSharedData::from(Account {
lamports: account.lamports(),
..Account::default()
});
assert_eq!(default_account, account);
}
}
}
}
fn check_storage(accounts: &AccountsDb, slot: Slot, count: usize) -> bool {
assert_eq!(
accounts
.storage
.get_slot_stores(slot)
.unwrap()
.read()
.unwrap()
.len(),
1
);
let slot_storages = accounts.storage.get_slot_stores(slot).unwrap();
let mut total_count: usize = 0;
let r_slot_storages = slot_storages.read().unwrap();
for store in r_slot_storages.values() {
assert_eq!(store.status(), AccountStorageStatus::Available);
total_count += store.count();
}
assert_eq!(total_count, count);
let (expected_store_count, actual_store_count): (usize, usize) = (
r_slot_storages
.values()
.map(|s| s.approx_stored_count())
.sum(),
r_slot_storages
.values()
.map(|s| s.all_accounts().len())
.sum(),
);
assert_eq!(expected_store_count, actual_store_count);
total_count == count
}
fn check_accounts(
accounts: &AccountsDb,
pubkeys: &[Pubkey],
slot: Slot,
num: usize,
count: usize,
) {
let ancestors = vec![(slot, 0)].into_iter().collect();
for _ in 0..num {
let idx = thread_rng().gen_range(0, num);
let account = accounts.load_without_fixed_root(&ancestors, &pubkeys[idx]);
let account1 = Some((
AccountSharedData::new(
(idx + count) as u64,
0,
AccountSharedData::default().owner(),
),
slot,
));
assert_eq!(account, account1);
}
}
#[allow(clippy::needless_range_loop)]
fn modify_accounts(
accounts: &AccountsDb,
pubkeys: &[Pubkey],
slot: Slot,
num: usize,
count: usize,
) {
for idx in 0..num {
let account = AccountSharedData::new(
(idx + count) as u64,
0,
AccountSharedData::default().owner(),
);
accounts.store_uncached(slot, &[(&pubkeys[idx], &account)]);
}
}
#[test]
fn test_account_one() {
let (_accounts_dirs, paths) = get_temp_accounts_paths(1).unwrap();
let db = AccountsDb::new(paths, &ClusterType::Development);
let mut pubkeys: Vec<Pubkey> = vec![];
create_account(&db, &mut pubkeys, 0, 1, 0, 0);
let ancestors = vec![(0, 0)].into_iter().collect();
let account = db.load_without_fixed_root(&ancestors, &pubkeys[0]).unwrap();
let default_account = AccountSharedData::from(Account {
lamports: 1,
..Account::default()
});
assert_eq!((default_account, 0), account);
}
#[test]
fn test_account_many() {
let (_accounts_dirs, paths) = get_temp_accounts_paths(2).unwrap();
let db = AccountsDb::new(paths, &ClusterType::Development);
let mut pubkeys: Vec<Pubkey> = vec![];
create_account(&db, &mut pubkeys, 0, 100, 0, 0);
check_accounts(&db, &pubkeys, 0, 100, 1);
}
#[test]
fn test_account_update() {
let accounts = AccountsDb::new_single_for_tests();
let mut pubkeys: Vec<Pubkey> = vec![];
create_account(&accounts, &mut pubkeys, 0, 100, 0, 0);
update_accounts(&accounts, &pubkeys, 0, 99);
assert!(check_storage(&accounts, 0, 100));
}
#[test]
fn test_account_grow_many() {
let (_accounts_dir, paths) = get_temp_accounts_paths(2).unwrap();
let size = 4096;
let accounts = AccountsDb::new_sized(paths, size);
let mut keys = vec![];
for i in 0..9 {
let key = solana_sdk::pubkey::new_rand();
let account = AccountSharedData::new(i + 1, size as usize / 4, &key);
accounts.store_uncached(0, &[(&key, &account)]);
keys.push(key);
}
let ancestors = vec![(0, 0)].into_iter().collect();
for (i, key) in keys.iter().enumerate() {
assert_eq!(
accounts
.load_without_fixed_root(&ancestors, key)
.unwrap()
.0
.lamports(),
(i as u64) + 1
);
}
let mut append_vec_histogram = HashMap::new();
let mut all_storages = vec![];
for slot_storage in accounts.storage.0.iter() {
all_storages.extend(slot_storage.read().unwrap().values().cloned())
}
for storage in all_storages {
*append_vec_histogram.entry(storage.slot()).or_insert(0) += 1;
}
for count in append_vec_histogram.values() {
assert!(*count >= 2);
}
}
#[test]
fn test_account_grow() {
let accounts = AccountsDb::new_single_for_tests();
let status = [AccountStorageStatus::Available, AccountStorageStatus::Full];
let pubkey1 = solana_sdk::pubkey::new_rand();
let account1 = AccountSharedData::new(1, DEFAULT_FILE_SIZE as usize / 2, &pubkey1);
accounts.store_uncached(0, &[(&pubkey1, &account1)]);
{
let stores = &accounts.storage.get_slot_stores(0).unwrap();
let r_stores = stores.read().unwrap();
assert_eq!(r_stores.len(), 1);
assert_eq!(r_stores[&0].count(), 1);
assert_eq!(r_stores[&0].status(), AccountStorageStatus::Available);
}
let pubkey2 = solana_sdk::pubkey::new_rand();
let account2 = AccountSharedData::new(1, DEFAULT_FILE_SIZE as usize / 2, &pubkey2);
accounts.store_uncached(0, &[(&pubkey2, &account2)]);
{
assert_eq!(accounts.storage.0.len(), 1);
let stores = &accounts.storage.get_slot_stores(0).unwrap();
let r_stores = stores.read().unwrap();
assert_eq!(r_stores.len(), 2);
assert_eq!(r_stores[&0].count(), 1);
assert_eq!(r_stores[&0].status(), AccountStorageStatus::Full);
assert_eq!(r_stores[&1].count(), 1);
assert_eq!(r_stores[&1].status(), AccountStorageStatus::Available);
}
let ancestors = vec![(0, 0)].into_iter().collect();
assert_eq!(
accounts
.load_without_fixed_root(&ancestors, &pubkey1)
.unwrap()
.0,
account1
);
assert_eq!(
accounts
.load_without_fixed_root(&ancestors, &pubkey2)
.unwrap()
.0,
account2
);
// lots of stores, but 7 storages should be enough for everything
for _ in 0..25 {
accounts.store_uncached(0, &[(&pubkey1, &account1)]);
{
assert_eq!(accounts.storage.0.len(), 1);
let stores = &accounts.storage.get_slot_stores(0).unwrap();
let r_stores = stores.read().unwrap();
assert!(r_stores.len() <= 7);
assert_eq!(r_stores[&0].status(), status[0]);
}
let ancestors = vec![(0, 0)].into_iter().collect();
assert_eq!(
accounts
.load_without_fixed_root(&ancestors, &pubkey1)
.unwrap()
.0,
account1
);
assert_eq!(
accounts
.load_without_fixed_root(&ancestors, &pubkey2)
.unwrap()
.0,
account2
);
}
}
#[test]
fn test_lazy_gc_slot() {
solana_logger::setup();
//This test is pedantic
//A slot is purged when a non root bank is cleaned up. If a slot is behind root but it is
//not root, it means we are retaining dead banks.
let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development);
let pubkey = solana_sdk::pubkey::new_rand();
let account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
//store an account
accounts.store_uncached(0, &[(&pubkey, &account)]);
let ancestors = vec![(0, 0)].into_iter().collect();
let id = {
let (lock, idx) = accounts
.accounts_index
.get(&pubkey, Some(&ancestors), None)
.unwrap();
lock.slot_list()[idx].1.store_id
};
accounts.get_accounts_delta_hash(0);
accounts.add_root(1);
//slot is still there, since gc is lazy
assert!(accounts
.storage
.get_slot_stores(0)
.unwrap()
.read()
.unwrap()
.get(&id)
.is_some());
//store causes clean
accounts.store_uncached(1, &[(&pubkey, &account)]);
// generate delta state for slot 1, so clean operates on it.
accounts.get_accounts_delta_hash(1);
//slot is gone
accounts.print_accounts_stats("pre-clean");
accounts.clean_accounts(None, false, None);
assert!(accounts.storage.0.get(&0).is_none());
//new value is there
let ancestors = vec![(1, 1)].into_iter().collect();
assert_eq!(
accounts.load_without_fixed_root(&ancestors, &pubkey),
Some((account, 1))
);
}
impl AccountsDb {
fn all_account_count_in_append_vec(&self, slot: Slot) -> usize {
let slot_storage = self.storage.get_slot_stores(slot);
if let Some(slot_storage) = slot_storage {
let r_slot_storage = slot_storage.read().unwrap();
let count = r_slot_storage
.values()
.map(|store| store.all_accounts().len())
.sum();
let stored_count: usize = r_slot_storage
.values()
.map(|store| store.approx_stored_count())
.sum();
assert_eq!(stored_count, count);
count
} else {
0
}
}
pub fn ref_count_for_pubkey(&self, pubkey: &Pubkey) -> RefCount {
self.accounts_index.ref_count_from_storage(pubkey)
}
}
#[test]
fn test_clean_zero_lamport_and_dead_slot() {
solana_logger::setup();
let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development);
let pubkey1 = solana_sdk::pubkey::new_rand();
let pubkey2 = solana_sdk::pubkey::new_rand();
let account = AccountSharedData::new(1, 1, AccountSharedData::default().owner());
let zero_lamport_account =
AccountSharedData::new(0, 0, AccountSharedData::default().owner());
// Store two accounts
accounts.store_uncached(0, &[(&pubkey1, &account)]);
accounts.store_uncached(0, &[(&pubkey2, &account)]);
// Make sure both accounts are in the same AppendVec in slot 0, which
// will prevent pubkey1 from being cleaned up later even when it's a
// zero-lamport account
let ancestors = vec![(0, 1)].into_iter().collect();
let (slot1, account_info1) = accounts
.accounts_index
.get(&pubkey1, Some(&ancestors), None)
.map(|(account_list1, index1)| account_list1.slot_list()[index1])
.unwrap();
let (slot2, account_info2) = accounts
.accounts_index
.get(&pubkey2, Some(&ancestors), None)
.map(|(account_list2, index2)| account_list2.slot_list()[index2])
.unwrap();
assert_eq!(slot1, 0);
assert_eq!(slot1, slot2);
assert_eq!(account_info1.store_id, account_info2.store_id);
// Update account 1 in slot 1
accounts.store_uncached(1, &[(&pubkey1, &account)]);
// Update account 1 as zero lamports account
accounts.store_uncached(2, &[(&pubkey1, &zero_lamport_account)]);
// Pubkey 1 was the only account in slot 1, and it was updated in slot 2, so
// slot 1 should be purged
accounts.add_root(0);
accounts.add_root(1);
accounts.add_root(2);
// Slot 1 should be removed, slot 0 cannot be removed because it still has
// the latest update for pubkey 2
accounts.clean_accounts(None, false, None);
assert!(accounts.storage.get_slot_stores(0).is_some());
assert!(accounts.storage.get_slot_stores(1).is_none());
// Slot 1 should be cleaned because all it's accounts are
// zero lamports, and are not present in any other slot's
// storage entries
assert_eq!(accounts.alive_account_count_in_slot(1), 0);
}
#[test]
fn test_clean_multiple_zero_lamport_decrements_index_ref_count() {
solana_logger::setup();
let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development);
let pubkey1 = solana_sdk::pubkey::new_rand();
let pubkey2 = solana_sdk::pubkey::new_rand();
let zero_lamport_account =
AccountSharedData::new(0, 0, AccountSharedData::default().owner());
// Store 2 accounts in slot 0, then update account 1 in two more slots
accounts.store_uncached(0, &[(&pubkey1, &zero_lamport_account)]);
accounts.store_uncached(0, &[(&pubkey2, &zero_lamport_account)]);
accounts.store_uncached(1, &[(&pubkey1, &zero_lamport_account)]);
accounts.store_uncached(2, &[(&pubkey1, &zero_lamport_account)]);
// Root all slots
accounts.add_root(0);
accounts.add_root(1);
accounts.add_root(2);
// Account ref counts should match how many slots they were stored in
// Account 1 = 3 slots; account 2 = 1 slot
assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey1), 3);
assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey2), 1);
accounts.clean_accounts(None, false, None);
// Slots 0 and 1 should each have been cleaned because all of their
// accounts are zero lamports
assert!(accounts.storage.get_slot_stores(0).is_none());
assert!(accounts.storage.get_slot_stores(1).is_none());
// Slot 2 only has a zero lamport account as well. But, calc_delete_dependencies()
// should exclude slot 2 from the clean due to changes in other slots
assert!(accounts.storage.get_slot_stores(2).is_some());
// Index ref counts should be consistent with the slot stores. Account 1 ref count
// should be 1 since slot 2 is the only alive slot; account 2 should have a ref
// count of 0 due to slot 0 being dead
assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey1), 1);
assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey2), 0);
accounts.clean_accounts(None, false, None);
// Slot 2 will now be cleaned, which will leave account 1 with a ref count of 0
assert!(accounts.storage.get_slot_stores(2).is_none());
assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey1), 0);
}
#[test]
fn test_clean_zero_lamport_and_old_roots() {
solana_logger::setup();
let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development);
let pubkey = solana_sdk::pubkey::new_rand();
let account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
let zero_lamport_account =
AccountSharedData::new(0, 0, AccountSharedData::default().owner());
// Store a zero-lamport account
accounts.store_uncached(0, &[(&pubkey, &account)]);
accounts.store_uncached(1, &[(&pubkey, &zero_lamport_account)]);
// Simulate rooting the zero-lamport account, should be a
// candidate for cleaning
accounts.add_root(0);
accounts.add_root(1);
// Slot 0 should be removed, and
// zero-lamport account should be cleaned
accounts.clean_accounts(None, false, None);
assert!(accounts.storage.get_slot_stores(0).is_none());
assert!(accounts.storage.get_slot_stores(1).is_none());
// Slot 0 should be cleaned because all it's accounts have been
// updated in the rooted slot 1
assert_eq!(accounts.alive_account_count_in_slot(0), 0);
// Slot 1 should be cleaned because all it's accounts are
// zero lamports, and are not present in any other slot's
// storage entries
assert_eq!(accounts.alive_account_count_in_slot(1), 0);
// zero lamport account, should no longer exist in accounts index
// because it has been removed
assert!(accounts.accounts_index.get(&pubkey, None, None).is_none());
}
#[test]
fn test_clean_old_with_normal_account() {
solana_logger::setup();
let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development);
let pubkey = solana_sdk::pubkey::new_rand();
let account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
//store an account
accounts.store_uncached(0, &[(&pubkey, &account)]);
accounts.store_uncached(1, &[(&pubkey, &account)]);
// simulate slots are rooted after while
accounts.get_accounts_delta_hash(0);
accounts.add_root(0);
accounts.get_accounts_delta_hash(1);
accounts.add_root(1);
//even if rooted, old state isn't cleaned up
assert_eq!(accounts.alive_account_count_in_slot(0), 1);
assert_eq!(accounts.alive_account_count_in_slot(1), 1);
accounts.clean_accounts(None, false, None);
//now old state is cleaned up
assert_eq!(accounts.alive_account_count_in_slot(0), 0);
assert_eq!(accounts.alive_account_count_in_slot(1), 1);
}
#[test]
fn test_clean_old_with_zero_lamport_account() {
solana_logger::setup();
let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development);
let pubkey1 = solana_sdk::pubkey::new_rand();
let pubkey2 = solana_sdk::pubkey::new_rand();
let normal_account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
let zero_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
//store an account
accounts.store_uncached(0, &[(&pubkey1, &normal_account)]);
accounts.store_uncached(1, &[(&pubkey1, &zero_account)]);
accounts.store_uncached(0, &[(&pubkey2, &normal_account)]);
accounts.store_uncached(1, &[(&pubkey2, &normal_account)]);
//simulate slots are rooted after while
accounts.get_accounts_delta_hash(0);
accounts.add_root(0);
accounts.get_accounts_delta_hash(1);
accounts.add_root(1);
//even if rooted, old state isn't cleaned up
assert_eq!(accounts.alive_account_count_in_slot(0), 2);
assert_eq!(accounts.alive_account_count_in_slot(1), 2);
accounts.print_accounts_stats("");
accounts.clean_accounts(None, false, None);
//Old state behind zero-lamport account is cleaned up
assert_eq!(accounts.alive_account_count_in_slot(0), 0);
assert_eq!(accounts.alive_account_count_in_slot(1), 2);
}
#[test]
fn test_clean_old_with_both_normal_and_zero_lamport_accounts() {
solana_logger::setup();
let mut accounts = AccountsDb::new_with_config_for_tests(
Vec::new(),
&ClusterType::Development,
spl_token_mint_index_enabled(),
false,
AccountShrinkThreshold::default(),
);
let pubkey1 = solana_sdk::pubkey::new_rand();
let pubkey2 = solana_sdk::pubkey::new_rand();
// Set up account to be added to secondary index
let mint_key = Pubkey::new_unique();
let mut account_data_with_mint = vec![0; inline_spl_token::Account::get_packed_len()];
account_data_with_mint[..PUBKEY_BYTES].clone_from_slice(&(mint_key.to_bytes()));
let mut normal_account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
normal_account.set_owner(inline_spl_token::id());
normal_account.set_data(account_data_with_mint.clone());
let mut zero_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
zero_account.set_owner(inline_spl_token::id());
zero_account.set_data(account_data_with_mint);
//store an account
accounts.store_uncached(0, &[(&pubkey1, &normal_account)]);
accounts.store_uncached(0, &[(&pubkey1, &normal_account)]);
accounts.store_uncached(1, &[(&pubkey1, &zero_account)]);
accounts.store_uncached(0, &[(&pubkey2, &normal_account)]);
accounts.store_uncached(2, &[(&pubkey2, &normal_account)]);
//simulate slots are rooted after while
accounts.get_accounts_delta_hash(0);
accounts.add_root(0);
accounts.get_accounts_delta_hash(1);
accounts.add_root(1);
accounts.get_accounts_delta_hash(2);
accounts.add_root(2);
//even if rooted, old state isn't cleaned up
assert_eq!(accounts.alive_account_count_in_slot(0), 2);
assert_eq!(accounts.alive_account_count_in_slot(1), 1);
assert_eq!(accounts.alive_account_count_in_slot(2), 1);
// Secondary index should still find both pubkeys
let mut found_accounts = HashSet::new();
let index_key = IndexKey::SplTokenMint(mint_key);
let bank_id = 0;
accounts
.accounts_index
.index_scan_accounts(
&Ancestors::default(),
bank_id,
index_key,
|key, _| {
found_accounts.insert(*key);
},
&ScanConfig::default(),
)
.unwrap();
assert_eq!(found_accounts.len(), 2);
assert!(found_accounts.contains(&pubkey1));
assert!(found_accounts.contains(&pubkey2));
{
accounts.account_indexes.keys = Some(AccountSecondaryIndexesIncludeExclude {
exclude: true,
keys: [mint_key].iter().cloned().collect::<HashSet<Pubkey>>(),
});
// Secondary index can't be used - do normal scan: should still find both pubkeys
let found_accounts = accounts
.index_scan_accounts(
&Ancestors::default(),
bank_id,
index_key,
|collection: &mut HashSet<Pubkey>, account| {
collection.insert(*account.unwrap().0);
},
&ScanConfig::default(),
)
.unwrap();
assert!(!found_accounts.1);
assert_eq!(found_accounts.0.len(), 2);
assert!(found_accounts.0.contains(&pubkey1));
assert!(found_accounts.0.contains(&pubkey2));
accounts.account_indexes.keys = None;
// Secondary index can now be used since it isn't marked as excluded
let found_accounts = accounts
.index_scan_accounts(
&Ancestors::default(),
bank_id,
index_key,
|collection: &mut HashSet<Pubkey>, account| {
collection.insert(*account.unwrap().0);
},
&ScanConfig::default(),
)
.unwrap();
assert!(found_accounts.1);
assert_eq!(found_accounts.0.len(), 2);
assert!(found_accounts.0.contains(&pubkey1));
assert!(found_accounts.0.contains(&pubkey2));
accounts.account_indexes.keys = None;
}
accounts.clean_accounts(None, false, None);
//both zero lamport and normal accounts are cleaned up
assert_eq!(accounts.alive_account_count_in_slot(0), 0);
// The only store to slot 1 was a zero lamport account, should
// be purged by zero-lamport cleaning logic because slot 1 is
// rooted
assert_eq!(accounts.alive_account_count_in_slot(1), 0);
assert_eq!(accounts.alive_account_count_in_slot(2), 1);
// `pubkey1`, a zero lamport account, should no longer exist in accounts index
// because it has been removed by the clean
assert!(accounts.accounts_index.get(&pubkey1, None, None).is_none());
// Secondary index should have purged `pubkey1` as well
let mut found_accounts = vec![];
accounts
.accounts_index
.index_scan_accounts(
&Ancestors::default(),
bank_id,
IndexKey::SplTokenMint(mint_key),
|key, _| found_accounts.push(*key),
&ScanConfig::default(),
)
.unwrap();
assert_eq!(found_accounts, vec![pubkey2]);
}
#[test]
fn test_clean_max_slot_zero_lamport_account() {
solana_logger::setup();
let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development);
let pubkey = solana_sdk::pubkey::new_rand();
let account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
let zero_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
// store an account, make it a zero lamport account
// in slot 1
accounts.store_uncached(0, &[(&pubkey, &account)]);
accounts.store_uncached(1, &[(&pubkey, &zero_account)]);
// simulate slots are rooted after while
accounts.add_root(0);
accounts.add_root(1);
// Only clean up to account 0, should not purge slot 0 based on
// updates in later slots in slot 1
assert_eq!(accounts.alive_account_count_in_slot(0), 1);
assert_eq!(accounts.alive_account_count_in_slot(1), 1);
accounts.clean_accounts(Some(0), false, None);
assert_eq!(accounts.alive_account_count_in_slot(0), 1);
assert_eq!(accounts.alive_account_count_in_slot(1), 1);
assert!(accounts.accounts_index.get(&pubkey, None, None).is_some());
// Now the account can be cleaned up
accounts.clean_accounts(Some(1), false, None);
assert_eq!(accounts.alive_account_count_in_slot(0), 0);
assert_eq!(accounts.alive_account_count_in_slot(1), 0);
// The zero lamport account, should no longer exist in accounts index
// because it has been removed
assert!(accounts.accounts_index.get(&pubkey, None, None).is_none());
}
#[test]
fn test_uncleaned_roots_with_account() {
solana_logger::setup();
let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development);
let pubkey = solana_sdk::pubkey::new_rand();
let account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
//store an account
accounts.store_uncached(0, &[(&pubkey, &account)]);
assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 0);
// simulate slots are rooted after while
accounts.add_root(0);
assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 1);
//now uncleaned roots are cleaned up
accounts.clean_accounts(None, false, None);
assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 0);
}
#[test]
fn test_uncleaned_roots_with_no_account() {
solana_logger::setup();
let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development);
assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 0);
// simulate slots are rooted after while
accounts.add_root(0);
assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 1);
//now uncleaned roots are cleaned up
accounts.clean_accounts(None, false, None);
assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 0);
}
#[test]
fn test_accounts_db_serialize1() {
solana_logger::setup();
let accounts = AccountsDb::new_single_for_tests();
let mut pubkeys: Vec<Pubkey> = vec![];
// Create 100 accounts in slot 0
create_account(&accounts, &mut pubkeys, 0, 100, 0, 0);
accounts.clean_accounts(None, false, None);
check_accounts(&accounts, &pubkeys, 0, 100, 1);
// do some updates to those accounts and re-check
modify_accounts(&accounts, &pubkeys, 0, 100, 2);
assert!(check_storage(&accounts, 0, 100));
check_accounts(&accounts, &pubkeys, 0, 100, 2);
accounts.get_accounts_delta_hash(0);
accounts.add_root(0);
let mut pubkeys1: Vec<Pubkey> = vec![];
// CREATE SLOT 1
let latest_slot = 1;
// Modify the first 10 of the accounts from slot 0 in slot 1
modify_accounts(&accounts, &pubkeys, latest_slot, 10, 3);
// Overwrite account 30 from slot 0 with lamports=0 into slot 1.
// Slot 1 should now have 10 + 1 = 11 accounts
let account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
accounts.store_uncached(latest_slot, &[(&pubkeys[30], &account)]);
// Create 10 new accounts in slot 1, should now have 11 + 10 = 21
// accounts
create_account(&accounts, &mut pubkeys1, latest_slot, 10, 0, 0);
accounts.get_accounts_delta_hash(latest_slot);
accounts.add_root(latest_slot);
assert!(check_storage(&accounts, 1, 21));
// CREATE SLOT 2
let latest_slot = 2;
let mut pubkeys2: Vec<Pubkey> = vec![];
// Modify first 20 of the accounts from slot 0 in slot 2
modify_accounts(&accounts, &pubkeys, latest_slot, 20, 4);
accounts.clean_accounts(None, false, None);
// Overwrite account 31 from slot 0 with lamports=0 into slot 2.
// Slot 2 should now have 20 + 1 = 21 accounts
let account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
accounts.store_uncached(latest_slot, &[(&pubkeys[31], &account)]);
// Create 10 new accounts in slot 2. Slot 2 should now have
// 21 + 10 = 31 accounts
create_account(&accounts, &mut pubkeys2, latest_slot, 10, 0, 0);
accounts.get_accounts_delta_hash(latest_slot);
accounts.add_root(latest_slot);
assert!(check_storage(&accounts, 2, 31));
accounts.clean_accounts(None, false, None);
// The first 20 accounts of slot 0 have been updated in slot 2, as well as
// accounts 30 and 31 (overwritten with zero-lamport accounts in slot 1 and
// slot 2 respectively), so only 78 accounts are left in slot 0's storage entries.
assert!(check_storage(&accounts, 0, 78));
// 10 of the 21 accounts have been modified in slot 2, so only 11
// accounts left in slot 1.
assert!(check_storage(&accounts, 1, 11));
assert!(check_storage(&accounts, 2, 31));
let daccounts = reconstruct_accounts_db_via_serialization(&accounts, latest_slot);
assert_eq!(
daccounts.write_version.load(Ordering::Acquire),
accounts.write_version.load(Ordering::Acquire)
);
// Get the hash for the latest slot, which should be the only hash in the
// bank_hashes map on the deserialized AccountsDb
assert_eq!(daccounts.bank_hashes.read().unwrap().len(), 2);
assert_eq!(
daccounts.bank_hashes.read().unwrap().get(&latest_slot),
accounts.bank_hashes.read().unwrap().get(&latest_slot)
);
daccounts.print_count_and_status("daccounts");
// Don't check the first 35 accounts which have not been modified on slot 0
check_accounts(&daccounts, &pubkeys[35..], 0, 65, 37);
check_accounts(&daccounts, &pubkeys1, 1, 10, 1);
assert!(check_storage(&daccounts, 0, 100));
assert!(check_storage(&daccounts, 1, 21));
assert!(check_storage(&daccounts, 2, 31));
let ancestors = linear_ancestors(latest_slot);
assert_eq!(
daccounts.update_accounts_hash(latest_slot, &ancestors),
accounts.update_accounts_hash(latest_slot, &ancestors)
);
}
fn assert_load_account(
accounts: &AccountsDb,
slot: Slot,
pubkey: Pubkey,
expected_lamports: u64,
) {
let ancestors = vec![(slot, 0)].into_iter().collect();
let (account, slot) = accounts
.load_without_fixed_root(&ancestors, &pubkey)
.unwrap();
assert_eq!((account.lamports(), slot), (expected_lamports, slot));
}
fn assert_not_load_account(accounts: &AccountsDb, slot: Slot, pubkey: Pubkey) {
let ancestors = vec![(slot, 0)].into_iter().collect();
assert!(accounts
.load_without_fixed_root(&ancestors, &pubkey)
.is_none());
}
fn reconstruct_accounts_db_via_serialization(accounts: &AccountsDb, slot: Slot) -> AccountsDb {
let daccounts =
crate::serde_snapshot::reconstruct_accounts_db_via_serialization(accounts, slot);
daccounts.print_count_and_status("daccounts");
daccounts
}
fn assert_no_stores(accounts: &AccountsDb, slot: Slot) {
let slot_stores = accounts.storage.get_slot_stores(slot);
let r_slot_stores = slot_stores.as_ref().map(|slot_stores| {
let r_slot_stores = slot_stores.read().unwrap();
info!("{:?}", *r_slot_stores);
r_slot_stores
});
assert!(r_slot_stores.is_none() || r_slot_stores.unwrap().is_empty());
}
#[test]
fn test_accounts_db_purge_keep_live() {
solana_logger::setup();
let some_lamport = 223;
let zero_lamport = 0;
let no_data = 0;
let owner = *AccountSharedData::default().owner();
let account = AccountSharedData::new(some_lamport, no_data, &owner);
let pubkey = solana_sdk::pubkey::new_rand();
let account2 = AccountSharedData::new(some_lamport, no_data, &owner);
let pubkey2 = solana_sdk::pubkey::new_rand();
let zero_lamport_account = AccountSharedData::new(zero_lamport, no_data, &owner);
let accounts = AccountsDb::new_single_for_tests();
accounts.add_root(0);
// Step A
let mut current_slot = 1;
accounts.store_uncached(current_slot, &[(&pubkey, &account)]);
// Store another live account to slot 1 which will prevent any purge
// since the store count will not be zero
accounts.store_uncached(current_slot, &[(&pubkey2, &account2)]);
accounts.add_root(current_slot);
let (slot1, account_info1) = accounts
.accounts_index
.get(&pubkey, None, None)
.map(|(account_list1, index1)| account_list1.slot_list()[index1])
.unwrap();
let (slot2, account_info2) = accounts
.accounts_index
.get(&pubkey2, None, None)
.map(|(account_list2, index2)| account_list2.slot_list()[index2])
.unwrap();
assert_eq!(slot1, current_slot);
assert_eq!(slot1, slot2);
assert_eq!(account_info1.store_id, account_info2.store_id);
// Step B
current_slot += 1;
let zero_lamport_slot = current_slot;
accounts.store_uncached(current_slot, &[(&pubkey, &zero_lamport_account)]);
accounts.add_root(current_slot);
assert_load_account(&accounts, current_slot, pubkey, zero_lamport);
current_slot += 1;
accounts.add_root(current_slot);
accounts.print_accounts_stats("pre_purge");
accounts.clean_accounts(None, false, None);
accounts.print_accounts_stats("post_purge");
// The earlier entry for pubkey in the account index is purged,
let (slot_list_len, index_slot) = {
let account_entry = accounts
.accounts_index
.get_account_read_entry(&pubkey)
.unwrap();
let slot_list = account_entry.slot_list();
(slot_list.len(), slot_list[0].0)
};
assert_eq!(slot_list_len, 1);
// Zero lamport entry was not the one purged
assert_eq!(index_slot, zero_lamport_slot);
// The ref count should still be 2 because no slots were purged
assert_eq!(accounts.ref_count_for_pubkey(&pubkey), 2);
// storage for slot 1 had 2 accounts, now has 1 after pubkey 1
// was reclaimed
check_storage(&accounts, 1, 1);
// storage for slot 2 had 1 accounts, now has 1
check_storage(&accounts, 2, 1);
}
#[test]
fn test_accounts_db_purge1() {
solana_logger::setup();
let some_lamport = 223;
let zero_lamport = 0;
let no_data = 0;
let owner = *AccountSharedData::default().owner();
let account = AccountSharedData::new(some_lamport, no_data, &owner);
let pubkey = solana_sdk::pubkey::new_rand();
let zero_lamport_account = AccountSharedData::new(zero_lamport, no_data, &owner);
let accounts = AccountsDb::new_single_for_tests();
accounts.add_root(0);
let mut current_slot = 1;
accounts.set_hash(current_slot, current_slot - 1);
accounts.store_uncached(current_slot, &[(&pubkey, &account)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.set_hash(current_slot, current_slot - 1);
accounts.store_uncached(current_slot, &[(&pubkey, &zero_lamport_account)]);
accounts.add_root(current_slot);
assert_load_account(&accounts, current_slot, pubkey, zero_lamport);
// Otherwise slot 2 will not be removed
current_slot += 1;
accounts.set_hash(current_slot, current_slot - 1);
accounts.add_root(current_slot);
accounts.print_accounts_stats("pre_purge");
let ancestors = linear_ancestors(current_slot);
info!("ancestors: {:?}", ancestors);
let hash = accounts.update_accounts_hash_test(current_slot, &ancestors);
accounts.clean_accounts(None, false, None);
assert_eq!(
accounts.update_accounts_hash_test(current_slot, &ancestors),
hash
);
accounts.print_accounts_stats("post_purge");
// Make sure the index is for pubkey cleared
assert!(accounts
.accounts_index
.get_account_read_entry(&pubkey)
.is_none());
// slot 1 & 2 should not have any stores
assert_no_stores(&accounts, 1);
assert_no_stores(&accounts, 2);
}
#[test]
fn test_accounts_db_serialize_zero_and_free() {
solana_logger::setup();
let some_lamport = 223;
let zero_lamport = 0;
let no_data = 0;
let owner = *AccountSharedData::default().owner();
let account = AccountSharedData::new(some_lamport, no_data, &owner);
let pubkey = solana_sdk::pubkey::new_rand();
let zero_lamport_account = AccountSharedData::new(zero_lamport, no_data, &owner);
let account2 = AccountSharedData::new(some_lamport + 1, no_data, &owner);
let pubkey2 = solana_sdk::pubkey::new_rand();
let filler_account = AccountSharedData::new(some_lamport, no_data, &owner);
let filler_account_pubkey = solana_sdk::pubkey::new_rand();
let accounts = AccountsDb::new_single_for_tests();
let mut current_slot = 1;
accounts.store_uncached(current_slot, &[(&pubkey, &account)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.store_uncached(current_slot, &[(&pubkey, &zero_lamport_account)]);
accounts.store_uncached(current_slot, &[(&pubkey2, &account2)]);
// Store enough accounts such that an additional store for slot 2 is created.
while accounts
.storage
.get_slot_stores(current_slot)
.unwrap()
.read()
.unwrap()
.len()
< 2
{
accounts.store_uncached(current_slot, &[(&filler_account_pubkey, &filler_account)]);
}
accounts.add_root(current_slot);
assert_load_account(&accounts, current_slot, pubkey, zero_lamport);
accounts.print_accounts_stats("accounts");
accounts.clean_accounts(None, false, None);
accounts.print_accounts_stats("accounts_post_purge");
let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot);
accounts.print_accounts_stats("reconstructed");
assert_load_account(&accounts, current_slot, pubkey, zero_lamport);
}
fn with_chained_zero_lamport_accounts<F>(f: F)
where
F: Fn(AccountsDb, Slot) -> AccountsDb,
{
let some_lamport = 223;
let zero_lamport = 0;
let dummy_lamport = 999;
let no_data = 0;
let owner = *AccountSharedData::default().owner();
let account = AccountSharedData::new(some_lamport, no_data, &owner);
let account2 = AccountSharedData::new(some_lamport + 100_001, no_data, &owner);
let account3 = AccountSharedData::new(some_lamport + 100_002, no_data, &owner);
let zero_lamport_account = AccountSharedData::new(zero_lamport, no_data, &owner);
let pubkey = solana_sdk::pubkey::new_rand();
let purged_pubkey1 = solana_sdk::pubkey::new_rand();
let purged_pubkey2 = solana_sdk::pubkey::new_rand();
let dummy_account = AccountSharedData::new(dummy_lamport, no_data, &owner);
let dummy_pubkey = Pubkey::default();
let accounts = AccountsDb::new_single_for_tests();
let mut current_slot = 1;
accounts.store_uncached(current_slot, &[(&pubkey, &account)]);
accounts.store_uncached(current_slot, &[(&purged_pubkey1, &account2)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.store_uncached(current_slot, &[(&purged_pubkey1, &zero_lamport_account)]);
accounts.store_uncached(current_slot, &[(&purged_pubkey2, &account3)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.store_uncached(current_slot, &[(&purged_pubkey2, &zero_lamport_account)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.store_uncached(current_slot, &[(&dummy_pubkey, &dummy_account)]);
accounts.add_root(current_slot);
accounts.print_accounts_stats("pre_f");
accounts.update_accounts_hash(4, &Ancestors::default());
let accounts = f(accounts, current_slot);
accounts.print_accounts_stats("post_f");
assert_load_account(&accounts, current_slot, pubkey, some_lamport);
assert_load_account(&accounts, current_slot, purged_pubkey1, 0);
assert_load_account(&accounts, current_slot, purged_pubkey2, 0);
assert_load_account(&accounts, current_slot, dummy_pubkey, dummy_lamport);
accounts
.verify_bank_hash_and_lamports(4, &Ancestors::default(), 1222, true)
.unwrap();
}
#[test]
fn test_accounts_purge_chained_purge_before_snapshot_restore() {
solana_logger::setup();
with_chained_zero_lamport_accounts(|accounts, current_slot| {
accounts.clean_accounts(None, false, None);
reconstruct_accounts_db_via_serialization(&accounts, current_slot)
});
}
#[test]
fn test_accounts_purge_chained_purge_after_snapshot_restore() {
solana_logger::setup();
with_chained_zero_lamport_accounts(|accounts, current_slot| {
let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot);
accounts.print_accounts_stats("after_reconstruct");
accounts.clean_accounts(None, false, None);
reconstruct_accounts_db_via_serialization(&accounts, current_slot)
});
}
#[test]
#[ignore]
fn test_store_account_stress() {
let slot = 42;
let num_threads = 2;
let min_file_bytes = std::mem::size_of::<StoredMeta>()
+ std::mem::size_of::<crate::append_vec::AccountMeta>();
let db = Arc::new(AccountsDb::new_sized(Vec::new(), min_file_bytes as u64));
db.add_root(slot);
let thread_hdls: Vec<_> = (0..num_threads)
.map(|_| {
let db = db.clone();
std::thread::Builder::new()
.name("account-writers".to_string())
.spawn(move || {
let pubkey = solana_sdk::pubkey::new_rand();
let mut account = AccountSharedData::new(1, 0, &pubkey);
let mut i = 0;
loop {
let account_bal = thread_rng().gen_range(1, 99);
account.set_lamports(account_bal);
db.store_uncached(slot, &[(&pubkey, &account)]);
let (account, slot) = db
.load_without_fixed_root(&Ancestors::default(), &pubkey)
.unwrap_or_else(|| {
panic!("Could not fetch stored account {}, iter {}", pubkey, i)
});
assert_eq!(slot, slot);
assert_eq!(account.lamports(), account_bal);
i += 1;
}
})
.unwrap()
})
.collect();
for t in thread_hdls {
t.join().unwrap();
}
}
#[test]
fn test_accountsdb_scan_accounts() {
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = Pubkey::default();
let key0 = solana_sdk::pubkey::new_rand();
let account0 = AccountSharedData::new(1, 0, &key);
db.store_uncached(0, &[(&key0, &account0)]);
let key1 = solana_sdk::pubkey::new_rand();
let account1 = AccountSharedData::new(2, 0, &key);
db.store_uncached(1, &[(&key1, &account1)]);
let ancestors = vec![(0, 0)].into_iter().collect();
let accounts: Vec<AccountSharedData> = db.unchecked_scan_accounts(
"",
&ancestors,
|accounts: &mut Vec<AccountSharedData>, option| {
accounts.push(option.1.take_account());
},
&ScanConfig::default(),
);
assert_eq!(accounts, vec![account0]);
let ancestors = vec![(1, 1), (0, 0)].into_iter().collect();
let accounts: Vec<AccountSharedData> = db.unchecked_scan_accounts(
"",
&ancestors,
|accounts: &mut Vec<AccountSharedData>, option| {
accounts.push(option.1.take_account());
},
&ScanConfig::default(),
);
assert_eq!(accounts.len(), 2);
}
#[test]
fn test_cleanup_key_not_removed() {
solana_logger::setup();
let db = AccountsDb::new_single_for_tests();
let key = Pubkey::default();
let key0 = solana_sdk::pubkey::new_rand();
let account0 = AccountSharedData::new(1, 0, &key);
db.store_uncached(0, &[(&key0, &account0)]);
let key1 = solana_sdk::pubkey::new_rand();
let account1 = AccountSharedData::new(2, 0, &key);
db.store_uncached(1, &[(&key1, &account1)]);
db.print_accounts_stats("pre");
let slots: HashSet<Slot> = vec![1].into_iter().collect();
let purge_keys = vec![(key1, slots)];
db.purge_keys_exact(purge_keys.iter());
let account2 = AccountSharedData::new(3, 0, &key);
db.store_uncached(2, &[(&key1, &account2)]);
db.print_accounts_stats("post");
let ancestors = vec![(2, 0)].into_iter().collect();
assert_eq!(
db.load_without_fixed_root(&ancestors, &key1)
.unwrap()
.0
.lamports(),
3
);
}
#[test]
fn test_store_large_account() {
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = Pubkey::default();
let data_len = DEFAULT_FILE_SIZE as usize + 7;
let account = AccountSharedData::new(1, data_len, &key);
db.store_uncached(0, &[(&key, &account)]);
let ancestors = vec![(0, 0)].into_iter().collect();
let ret = db.load_without_fixed_root(&ancestors, &key).unwrap();
assert_eq!(ret.0.data().len(), data_len);
}
#[test]
fn test_stored_readable_account() {
let lamports = 1;
let owner = Pubkey::new_unique();
let executable = true;
let rent_epoch = 2;
let meta = StoredMeta {
write_version: 5,
pubkey: Pubkey::new_unique(),
data_len: 7,
};
let account_meta = AccountMeta {
lamports,
owner,
executable,
rent_epoch,
};
let data = Vec::new();
let account = Account {
lamports,
owner,
executable,
rent_epoch,
data: data.clone(),
};
let offset = 99;
let stored_size = 101;
let hash = Hash::new_unique();
let stored_account = StoredAccountMeta {
meta: &meta,
account_meta: &account_meta,
data: &data,
offset,
stored_size,
hash: &hash,
};
assert!(accounts_equal(&account, &stored_account));
}
#[test]
fn test_hash_stored_account() {
// This test uses some UNSAFE trick to detect most of account's field
// addition and deletion without changing the hash code
const ACCOUNT_DATA_LEN: usize = 3;
// the type of InputTuple elements must not contain references;
// they should be simple scalars or data blobs
type InputTuple = (
Slot,
StoredMeta,
AccountMeta,
[u8; ACCOUNT_DATA_LEN],
usize, // for StoredAccountMeta::offset
Hash,
);
const INPUT_LEN: usize = std::mem::size_of::<InputTuple>();
type InputBlob = [u8; INPUT_LEN];
let mut blob: InputBlob = [0u8; INPUT_LEN];
// spray memory with decreasing counts so that, data layout can be detected.
for (i, byte) in blob.iter_mut().enumerate() {
*byte = (INPUT_LEN - i) as u8;
}
//UNSAFE: forcibly cast the special byte pattern to actual account fields.
let (slot, meta, account_meta, data, offset, hash): InputTuple =
unsafe { std::mem::transmute::<InputBlob, InputTuple>(blob) };
let stored_account = StoredAccountMeta {
meta: &meta,
account_meta: &account_meta,
data: &data,
offset,
stored_size: CACHE_VIRTUAL_STORED_SIZE,
hash: &hash,
};
let account = stored_account.clone_account();
let expected_account_hash = if cfg!(debug_assertions) {
Hash::from_str("4StuvYHFd7xuShVXB94uHHvpqGMCaacdZnYB74QQkPA1").unwrap()
} else {
Hash::from_str("33ruy7m3Xto7irYfsBSN74aAzQwCQxsfoZxXuZy2Rra3").unwrap()
};
assert_eq!(
AccountsDb::hash_stored_account(slot, &stored_account),
expected_account_hash,
"StoredAccountMeta's data layout might be changed; update hashing if needed."
);
assert_eq!(
AccountsDb::hash_account(slot, &account, &stored_account.meta.pubkey),
expected_account_hash,
"Account-based hashing must be consistent with StoredAccountMeta-based one."
);
}
#[test]
fn test_bank_hash_stats() {
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = Pubkey::default();
let some_data_len = 5;
let some_slot: Slot = 0;
let account = AccountSharedData::new(1, some_data_len, &key);
let ancestors = vec![(some_slot, 0)].into_iter().collect();
db.store_uncached(some_slot, &[(&key, &account)]);
let mut account = db.load_without_fixed_root(&ancestors, &key).unwrap().0;
account.checked_sub_lamports(1).unwrap();
account.set_executable(true);
db.store_uncached(some_slot, &[(&key, &account)]);
db.add_root(some_slot);
let bank_hashes = db.bank_hashes.read().unwrap();
let bank_hash = bank_hashes.get(&some_slot).unwrap();
assert_eq!(bank_hash.stats.num_updated_accounts, 1);
assert_eq!(bank_hash.stats.num_removed_accounts, 1);
assert_eq!(bank_hash.stats.num_lamports_stored, 1);
assert_eq!(bank_hash.stats.total_data_len, 2 * some_data_len as u64);
assert_eq!(bank_hash.stats.num_executable_accounts, 1);
}
#[test]
fn test_calculate_accounts_hash_check_hash_mismatch() {
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = solana_sdk::pubkey::new_rand();
let some_data_len = 0;
let some_slot: Slot = 0;
let account = AccountSharedData::new(1, some_data_len, &key);
let ancestors = vec![(some_slot, 0)].into_iter().collect();
// put wrong hash value in store so we get a mismatch
db.store_accounts_unfrozen(
some_slot,
&[(&key, &account)],
Some(&[&Hash::default()]),
false,
);
db.add_root(some_slot);
let check_hash = true;
for use_index in [true, false] {
assert!(db
.calculate_accounts_hash_helper(
use_index, some_slot, &ancestors, check_hash, false, None, false,
)
.is_err());
}
}
#[test]
fn test_calculate_accounts_hash_check_hash() {
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = solana_sdk::pubkey::new_rand();
let some_data_len = 0;
let some_slot: Slot = 0;
let account = AccountSharedData::new(1, some_data_len, &key);
let ancestors = vec![(some_slot, 0)].into_iter().collect();
db.store_uncached(some_slot, &[(&key, &account)]);
db.add_root(some_slot);
let check_hash = true;
assert_eq!(
db.calculate_accounts_hash_helper(
false, some_slot, &ancestors, check_hash, false, None, false,
)
.unwrap(),
db.calculate_accounts_hash_helper(
true, some_slot, &ancestors, check_hash, false, None, false,
)
.unwrap(),
);
}
#[test]
fn test_verify_bank_hash() {
use BankHashVerificationError::*;
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = solana_sdk::pubkey::new_rand();
let some_data_len = 0;
let some_slot: Slot = 0;
let account = AccountSharedData::new(1, some_data_len, &key);
let ancestors = vec![(some_slot, 0)].into_iter().collect();
db.store_uncached(some_slot, &[(&key, &account)]);
db.add_root(some_slot);
db.update_accounts_hash_test(some_slot, &ancestors);
assert_matches!(
db.verify_bank_hash_and_lamports(some_slot, &ancestors, 1, true),
Ok(_)
);
db.bank_hashes.write().unwrap().remove(&some_slot).unwrap();
assert_matches!(
db.verify_bank_hash_and_lamports(some_slot, &ancestors, 1, true),
Err(MissingBankHash)
);
let some_bank_hash = Hash::new(&[0xca; HASH_BYTES]);
let bank_hash_info = BankHashInfo {
hash: some_bank_hash,
snapshot_hash: Hash::new(&[0xca; HASH_BYTES]),
stats: BankHashStats::default(),
};
db.bank_hashes
.write()
.unwrap()
.insert(some_slot, bank_hash_info);
assert_matches!(
db.verify_bank_hash_and_lamports(some_slot, &ancestors, 1, true),
Err(MismatchedBankHash)
);
}
#[test]
fn test_verify_bank_capitalization() {
use BankHashVerificationError::*;
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = solana_sdk::pubkey::new_rand();
let some_data_len = 0;
let some_slot: Slot = 0;
let account = AccountSharedData::new(1, some_data_len, &key);
let ancestors = vec![(some_slot, 0)].into_iter().collect();
db.store_uncached(some_slot, &[(&key, &account)]);
db.add_root(some_slot);
db.update_accounts_hash_test(some_slot, &ancestors);
assert_matches!(
db.verify_bank_hash_and_lamports(some_slot, &ancestors, 1, true),
Ok(_)
);
let native_account_pubkey = solana_sdk::pubkey::new_rand();
db.store_uncached(
some_slot,
&[(
&native_account_pubkey,
&solana_sdk::native_loader::create_loadable_account_for_test("foo"),
)],
);
db.update_accounts_hash_test(some_slot, &ancestors);
assert_matches!(
db.verify_bank_hash_and_lamports(some_slot, &ancestors, 2, true),
Ok(_)
);
assert_matches!(
db.verify_bank_hash_and_lamports(some_slot, &ancestors, 10, true),
Err(MismatchedTotalLamports(expected, actual)) if expected == 2 && actual == 10
);
}
#[test]
fn test_verify_bank_hash_no_account() {
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let some_slot: Slot = 0;
let ancestors = vec![(some_slot, 0)].into_iter().collect();
db.bank_hashes
.write()
.unwrap()
.insert(some_slot, BankHashInfo::default());
db.add_root(some_slot);
db.update_accounts_hash_test(some_slot, &ancestors);
assert_matches!(
db.verify_bank_hash_and_lamports(some_slot, &ancestors, 0, true),
Ok(_)
);
}
#[test]
fn test_verify_bank_hash_bad_account_hash() {
use BankHashVerificationError::*;
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = Pubkey::default();
let some_data_len = 0;
let some_slot: Slot = 0;
let account = AccountSharedData::new(1, some_data_len, &key);
let ancestors = vec![(some_slot, 0)].into_iter().collect();
let accounts = &[(&key, &account)];
// update AccountsDb's bank hash
{
let mut bank_hashes = db.bank_hashes.write().unwrap();
bank_hashes
.entry(some_slot)
.or_insert_with(BankHashInfo::default);
}
// provide bogus account hashes
let some_hash = Hash::new(&[0xca; HASH_BYTES]);
db.store_accounts_unfrozen(some_slot, accounts, Some(&[&some_hash]), false);
db.add_root(some_slot);
assert_matches!(
db.verify_bank_hash_and_lamports(some_slot, &ancestors, 1, true),
Err(MismatchedAccountHash)
);
}
#[test]
fn test_storage_finder() {
solana_logger::setup();
let db = AccountsDb::new_sized(Vec::new(), 16 * 1024);
let key = solana_sdk::pubkey::new_rand();
let lamports = 100;
let data_len = 8190;
let account = AccountSharedData::new(lamports, data_len, &solana_sdk::pubkey::new_rand());
// pre-populate with a smaller empty store
db.create_and_insert_store(1, 8192, "test_storage_finder");
db.store_uncached(1, &[(&key, &account)]);
}
#[test]
fn test_get_snapshot_storages_empty() {
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
assert!(db.get_snapshot_storages(0, None, None).0.is_empty());
}
#[test]
fn test_get_snapshot_storages_only_older_than_or_equal_to_snapshot_slot() {
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = Pubkey::default();
let account = AccountSharedData::new(1, 0, &key);
let before_slot = 0;
let base_slot = before_slot + 1;
let after_slot = base_slot + 1;
db.add_root(base_slot);
db.store_uncached(base_slot, &[(&key, &account)]);
assert!(db
.get_snapshot_storages(before_slot, None, None)
.0
.is_empty());
assert_eq!(1, db.get_snapshot_storages(base_slot, None, None).0.len());
assert_eq!(1, db.get_snapshot_storages(after_slot, None, None).0.len());
}
#[test]
fn test_get_snapshot_storages_only_non_empty() {
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = Pubkey::default();
let account = AccountSharedData::new(1, 0, &key);
let base_slot = 0;
let after_slot = base_slot + 1;
db.store_uncached(base_slot, &[(&key, &account)]);
db.storage
.get_slot_stores(base_slot)
.unwrap()
.write()
.unwrap()
.clear();
db.add_root(base_slot);
assert!(db
.get_snapshot_storages(after_slot, None, None)
.0
.is_empty());
db.store_uncached(base_slot, &[(&key, &account)]);
assert_eq!(1, db.get_snapshot_storages(after_slot, None, None).0.len());
}
#[test]
fn test_get_snapshot_storages_only_roots() {
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = Pubkey::default();
let account = AccountSharedData::new(1, 0, &key);
let base_slot = 0;
let after_slot = base_slot + 1;
db.store_uncached(base_slot, &[(&key, &account)]);
assert!(db
.get_snapshot_storages(after_slot, None, None)
.0
.is_empty());
db.add_root(base_slot);
assert_eq!(1, db.get_snapshot_storages(after_slot, None, None).0.len());
}
#[test]
fn test_get_snapshot_storages_exclude_empty() {
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = Pubkey::default();
let account = AccountSharedData::new(1, 0, &key);
let base_slot = 0;
let after_slot = base_slot + 1;
db.store_uncached(base_slot, &[(&key, &account)]);
db.add_root(base_slot);
assert_eq!(1, db.get_snapshot_storages(after_slot, None, None).0.len());
db.storage
.get_slot_stores(0)
.unwrap()
.read()
.unwrap()
.values()
.next()
.unwrap()
.remove_account(0, true);
assert!(db
.get_snapshot_storages(after_slot, None, None)
.0
.is_empty());
}
#[test]
fn test_get_snapshot_storages_with_base_slot() {
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = Pubkey::default();
let account = AccountSharedData::new(1, 0, &key);
let slot = 10;
db.store_uncached(slot, &[(&key, &account)]);
db.add_root(slot);
assert_eq!(
0,
db.get_snapshot_storages(slot + 1, Some(slot), None).0.len()
);
assert_eq!(
1,
db.get_snapshot_storages(slot + 1, Some(slot - 1), None)
.0
.len()
);
}
#[test]
#[should_panic(expected = "double remove of account in slot: 0/store: 0!!")]
fn test_storage_remove_account_double_remove() {
let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development);
let pubkey = solana_sdk::pubkey::new_rand();
let account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
accounts.store_uncached(0, &[(&pubkey, &account)]);
let storage_entry = accounts
.storage
.get_slot_stores(0)
.unwrap()
.read()
.unwrap()
.values()
.next()
.unwrap()
.clone();
storage_entry.remove_account(0, true);
storage_entry.remove_account(0, true);
}
#[test]
fn test_accounts_purge_long_chained_after_snapshot_restore() {
solana_logger::setup();
let old_lamport = 223;
let zero_lamport = 0;
let no_data = 0;
let owner = *AccountSharedData::default().owner();
let account = AccountSharedData::new(old_lamport, no_data, &owner);
let account2 = AccountSharedData::new(old_lamport + 100_001, no_data, &owner);
let account3 = AccountSharedData::new(old_lamport + 100_002, no_data, &owner);
let dummy_account = AccountSharedData::new(99_999_999, no_data, &owner);
let zero_lamport_account = AccountSharedData::new(zero_lamport, no_data, &owner);
let pubkey = solana_sdk::pubkey::new_rand();
let dummy_pubkey = solana_sdk::pubkey::new_rand();
let purged_pubkey1 = solana_sdk::pubkey::new_rand();
let purged_pubkey2 = solana_sdk::pubkey::new_rand();
let mut current_slot = 0;
let accounts = AccountsDb::new_single_for_tests();
// create intermediate updates to purged_pubkey1 so that
// generate_index must add slots as root last at once
current_slot += 1;
accounts.store_uncached(current_slot, &[(&pubkey, &account)]);
accounts.store_uncached(current_slot, &[(&purged_pubkey1, &account2)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.store_uncached(current_slot, &[(&purged_pubkey1, &account2)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.store_uncached(current_slot, &[(&purged_pubkey1, &account2)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.store_uncached(current_slot, &[(&purged_pubkey1, &zero_lamport_account)]);
accounts.store_uncached(current_slot, &[(&purged_pubkey2, &account3)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.store_uncached(current_slot, &[(&purged_pubkey2, &zero_lamport_account)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.store_uncached(current_slot, &[(&dummy_pubkey, &dummy_account)]);
accounts.add_root(current_slot);
accounts.print_count_and_status("before reconstruct");
let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot);
accounts.print_count_and_status("before purge zero");
accounts.clean_accounts(None, false, None);
accounts.print_count_and_status("after purge zero");
assert_load_account(&accounts, current_slot, pubkey, old_lamport);
assert_load_account(&accounts, current_slot, purged_pubkey1, 0);
assert_load_account(&accounts, current_slot, purged_pubkey2, 0);
}
fn do_full_clean_refcount(store1_first: bool, store_size: u64) {
let pubkey1 = Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap();
let pubkey2 = Pubkey::from_str("My22211111111111111111111111111111111111111").unwrap();
let pubkey3 = Pubkey::from_str("My33311111111111111111111111111111111111111").unwrap();
let old_lamport = 223;
let zero_lamport = 0;
let dummy_lamport = 999_999;
// size data so only 1 fits in a 4k store
let data_size = 2200;
let owner = *AccountSharedData::default().owner();
let account = AccountSharedData::new(old_lamport, data_size, &owner);
let account2 = AccountSharedData::new(old_lamport + 100_001, data_size, &owner);
let account3 = AccountSharedData::new(old_lamport + 100_002, data_size, &owner);
let account4 = AccountSharedData::new(dummy_lamport, data_size, &owner);
let zero_lamport_account = AccountSharedData::new(zero_lamport, data_size, &owner);
let mut current_slot = 0;
let accounts = AccountsDb::new_sized_no_extra_stores(Vec::new(), store_size);
// A: Initialize AccountsDb with pubkey1 and pubkey2
current_slot += 1;
if store1_first {
accounts.store_uncached(current_slot, &[(&pubkey1, &account)]);
accounts.store_uncached(current_slot, &[(&pubkey2, &account)]);
} else {
accounts.store_uncached(current_slot, &[(&pubkey2, &account)]);
accounts.store_uncached(current_slot, &[(&pubkey1, &account)]);
}
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
info!("post A");
accounts.print_accounts_stats("Post-A");
// B: Test multiple updates to pubkey1 in a single slot/storage
current_slot += 1;
assert_eq!(0, accounts.alive_account_count_in_slot(current_slot));
assert_eq!(1, accounts.ref_count_for_pubkey(&pubkey1));
accounts.store_uncached(current_slot, &[(&pubkey1, &account2)]);
accounts.store_uncached(current_slot, &[(&pubkey1, &account2)]);
assert_eq!(1, accounts.alive_account_count_in_slot(current_slot));
// Stores to same pubkey, same slot only count once towards the
// ref count
assert_eq!(2, accounts.ref_count_for_pubkey(&pubkey1));
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
accounts.print_accounts_stats("Post-B pre-clean");
accounts.clean_accounts(None, false, None);
info!("post B");
accounts.print_accounts_stats("Post-B");
// C: more updates to trigger clean of previous updates
current_slot += 1;
assert_eq!(2, accounts.ref_count_for_pubkey(&pubkey1));
accounts.store_uncached(current_slot, &[(&pubkey1, &account3)]);
accounts.store_uncached(current_slot, &[(&pubkey2, &account3)]);
accounts.store_uncached(current_slot, &[(&pubkey3, &account4)]);
assert_eq!(3, accounts.ref_count_for_pubkey(&pubkey1));
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
info!("post C");
accounts.print_accounts_stats("Post-C");
// D: Make all keys 0-lamport, cleans all keys
current_slot += 1;
assert_eq!(3, accounts.ref_count_for_pubkey(&pubkey1));
accounts.store_uncached(current_slot, &[(&pubkey1, &zero_lamport_account)]);
accounts.store_uncached(current_slot, &[(&pubkey2, &zero_lamport_account)]);
accounts.store_uncached(current_slot, &[(&pubkey3, &zero_lamport_account)]);
let snapshot_stores = accounts.get_snapshot_storages(current_slot, None, None).0;
let total_accounts: usize = snapshot_stores
.iter()
.flatten()
.map(|s| s.all_accounts().len())
.sum();
assert!(!snapshot_stores.is_empty());
assert!(total_accounts > 0);
info!("post D");
accounts.print_accounts_stats("Post-D");
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
accounts.clean_accounts(None, false, None);
accounts.print_accounts_stats("Post-D clean");
let total_accounts_post_clean: usize = snapshot_stores
.iter()
.flatten()
.map(|s| s.all_accounts().len())
.sum();
assert_eq!(total_accounts, total_accounts_post_clean);
// should clean all 3 pubkeys
assert_eq!(accounts.ref_count_for_pubkey(&pubkey1), 0);
assert_eq!(accounts.ref_count_for_pubkey(&pubkey2), 0);
assert_eq!(accounts.ref_count_for_pubkey(&pubkey3), 0);
}
#[test]
fn test_full_clean_refcount() {
solana_logger::setup();
// Setup 3 scenarios which try to differentiate between pubkey1 being in an
// Available slot or a Full slot which would cause a different reset behavior
// when pubkey1 is cleaned and therefor cause the ref count to be incorrect
// preventing a removal of that key.
//
// do stores with a 4mb size so only 1 store is created per slot
do_full_clean_refcount(false, 4 * 1024 * 1024);
// do stores with a 4k size and store pubkey1 first
do_full_clean_refcount(false, 4096);
// do stores with a 4k size and store pubkey1 2nd
do_full_clean_refcount(true, 4096);
}
#[test]
fn test_accounts_clean_after_snapshot_restore_then_old_revives() {
solana_logger::setup();
let old_lamport = 223;
let zero_lamport = 0;
let no_data = 0;
let dummy_lamport = 999_999;
let owner = *AccountSharedData::default().owner();
let account = AccountSharedData::new(old_lamport, no_data, &owner);
let account2 = AccountSharedData::new(old_lamport + 100_001, no_data, &owner);
let account3 = AccountSharedData::new(old_lamport + 100_002, no_data, &owner);
let dummy_account = AccountSharedData::new(dummy_lamport, no_data, &owner);
let zero_lamport_account = AccountSharedData::new(zero_lamport, no_data, &owner);
let pubkey1 = solana_sdk::pubkey::new_rand();
let pubkey2 = solana_sdk::pubkey::new_rand();
let dummy_pubkey = solana_sdk::pubkey::new_rand();
let mut current_slot = 0;
let accounts = AccountsDb::new_single_for_tests();
// A: Initialize AccountsDb with pubkey1 and pubkey2
current_slot += 1;
accounts.store_uncached(current_slot, &[(&pubkey1, &account)]);
accounts.store_uncached(current_slot, &[(&pubkey2, &account)]);
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
// B: Test multiple updates to pubkey1 in a single slot/storage
current_slot += 1;
assert_eq!(0, accounts.alive_account_count_in_slot(current_slot));
assert_eq!(1, accounts.ref_count_for_pubkey(&pubkey1));
accounts.store_uncached(current_slot, &[(&pubkey1, &account2)]);
accounts.store_uncached(current_slot, &[(&pubkey1, &account2)]);
assert_eq!(1, accounts.alive_account_count_in_slot(current_slot));
// Stores to same pubkey, same slot only count once towards the
// ref count
assert_eq!(2, accounts.ref_count_for_pubkey(&pubkey1));
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
// C: Yet more update to trigger lazy clean of step A
current_slot += 1;
assert_eq!(2, accounts.ref_count_for_pubkey(&pubkey1));
accounts.store_uncached(current_slot, &[(&pubkey1, &account3)]);
assert_eq!(3, accounts.ref_count_for_pubkey(&pubkey1));
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
// D: Make pubkey1 0-lamport; also triggers clean of step B
current_slot += 1;
assert_eq!(3, accounts.ref_count_for_pubkey(&pubkey1));
accounts.store_uncached(current_slot, &[(&pubkey1, &zero_lamport_account)]);
accounts.clean_accounts(None, false, None);
assert_eq!(
// Removed one reference from the dead slot (reference only counted once
// even though there were two stores to the pubkey in that slot)
3, /* == 3 - 1 + 1 */
accounts.ref_count_for_pubkey(&pubkey1)
);
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
// E: Avoid missing bank hash error
current_slot += 1;
accounts.store_uncached(current_slot, &[(&dummy_pubkey, &dummy_account)]);
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
assert_load_account(&accounts, current_slot, pubkey1, zero_lamport);
assert_load_account(&accounts, current_slot, pubkey2, old_lamport);
assert_load_account(&accounts, current_slot, dummy_pubkey, dummy_lamport);
// At this point, there is no index entries for A and B
// If step C and step D should be purged, snapshot restore would cause
// pubkey1 to be revived as the state of step A.
// So, prevent that from happening by introducing refcount
accounts.clean_accounts(None, false, None);
let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot);
accounts.clean_accounts(None, false, None);
info!("pubkey: {}", pubkey1);
accounts.print_accounts_stats("pre_clean");
assert_load_account(&accounts, current_slot, pubkey1, zero_lamport);
assert_load_account(&accounts, current_slot, pubkey2, old_lamport);
assert_load_account(&accounts, current_slot, dummy_pubkey, dummy_lamport);
// F: Finally, make Step A cleanable
current_slot += 1;
accounts.store_uncached(current_slot, &[(&pubkey2, &account)]);
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
// Do clean
accounts.clean_accounts(None, false, None);
// 2nd clean needed to clean-up pubkey1
accounts.clean_accounts(None, false, None);
// Ensure pubkey2 is cleaned from the index finally
assert_not_load_account(&accounts, current_slot, pubkey1);
assert_load_account(&accounts, current_slot, pubkey2, old_lamport);
assert_load_account(&accounts, current_slot, dummy_pubkey, dummy_lamport);
}
#[test]
fn test_clean_stored_dead_slots_empty() {
let accounts = AccountsDb::new_single_for_tests();
let mut dead_slots = HashSet::new();
dead_slots.insert(10);
accounts.clean_stored_dead_slots(&dead_slots, None);
}
#[test]
fn test_shrink_all_slots_none() {
for startup in &[false, true] {
let accounts = AccountsDb::new_single_for_tests();
for _ in 0..10 {
accounts.shrink_candidate_slots();
}
accounts.shrink_all_slots(*startup, None);
}
}
#[test]
fn test_shrink_next_slots() {
let mut accounts = AccountsDb::new_single_for_tests();
accounts.caching_enabled = false;
let mut current_slot = 7;
assert_eq!(
vec![None, None, None],
(0..3)
.map(|_| accounts.next_shrink_slot_v1())
.collect::<Vec<_>>()
);
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
assert_eq!(
vec![Some(7), Some(7), Some(7)],
(0..3)
.map(|_| accounts.next_shrink_slot_v1())
.collect::<Vec<_>>()
);
current_slot += 1;
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
let slots = (0..6)
.map(|_| accounts.next_shrink_slot_v1())
.collect::<Vec<_>>();
// Because the origin of this data is HashMap (not BTreeMap), key order is arbitrary per cycle.
assert!(
vec![Some(7), Some(8), Some(7), Some(8), Some(7), Some(8)] == slots
|| vec![Some(8), Some(7), Some(8), Some(7), Some(8), Some(7)] == slots
);
}
#[test]
fn test_shrink_reset_uncleaned_roots() {
let mut accounts = AccountsDb::new_single_for_tests();
accounts.caching_enabled = false;
accounts.reset_uncleaned_roots_v1();
assert_eq!(
*accounts.shrink_candidate_slots_v1.lock().unwrap(),
vec![] as Vec<Slot>
);
accounts.get_accounts_delta_hash(0);
accounts.add_root(0);
accounts.get_accounts_delta_hash(1);
accounts.add_root(1);
accounts.get_accounts_delta_hash(2);
accounts.add_root(2);
accounts.reset_uncleaned_roots_v1();
let actual_slots = accounts.shrink_candidate_slots_v1.lock().unwrap().clone();
assert_eq!(actual_slots, vec![] as Vec<Slot>);
accounts.reset_uncleaned_roots_v1();
let mut actual_slots = accounts.shrink_candidate_slots_v1.lock().unwrap().clone();
actual_slots.sort_unstable();
assert_eq!(actual_slots, vec![0, 1, 2]);
accounts.accounts_index.clear_roots();
let mut actual_slots = (0..5)
.map(|_| accounts.next_shrink_slot_v1())
.collect::<Vec<_>>();
actual_slots.sort();
assert_eq!(actual_slots, vec![None, None, Some(0), Some(1), Some(2)],);
}
#[test]
fn test_shrink_stale_slots_processed() {
solana_logger::setup();
for startup in &[false, true] {
let accounts = AccountsDb::new_single_for_tests();
let pubkey_count = 100;
let pubkeys: Vec<_> = (0..pubkey_count)
.map(|_| solana_sdk::pubkey::new_rand())
.collect();
let some_lamport = 223;
let no_data = 0;
let owner = *AccountSharedData::default().owner();
let account = AccountSharedData::new(some_lamport, no_data, &owner);
let mut current_slot = 0;
current_slot += 1;
for pubkey in &pubkeys {
accounts.store_uncached(current_slot, &[(pubkey, &account)]);
}
let shrink_slot = current_slot;
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
current_slot += 1;
let pubkey_count_after_shrink = 10;
let updated_pubkeys = &pubkeys[0..pubkey_count - pubkey_count_after_shrink];
for pubkey in updated_pubkeys {
accounts.store_uncached(current_slot, &[(pubkey, &account)]);
}
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
accounts.clean_accounts(None, false, None);
assert_eq!(
pubkey_count,
accounts.all_account_count_in_append_vec(shrink_slot)
);
accounts.shrink_all_slots(*startup, None);
assert_eq!(
pubkey_count_after_shrink,
accounts.all_account_count_in_append_vec(shrink_slot)
);
let no_ancestors = Ancestors::default();
accounts.update_accounts_hash(current_slot, &no_ancestors);
accounts
.verify_bank_hash_and_lamports(current_slot, &no_ancestors, 22300, true)
.unwrap();
let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot);
accounts
.verify_bank_hash_and_lamports(current_slot, &no_ancestors, 22300, true)
.unwrap();
// repeating should be no-op
accounts.shrink_all_slots(*startup, None);
assert_eq!(
pubkey_count_after_shrink,
accounts.all_account_count_in_append_vec(shrink_slot)
);
}
}
#[test]
fn test_shrink_candidate_slots() {
solana_logger::setup();
let accounts = AccountsDb::new_single_for_tests();
let pubkey_count = 30000;
let pubkeys: Vec<_> = (0..pubkey_count)
.map(|_| solana_sdk::pubkey::new_rand())
.collect();
let some_lamport = 223;
let no_data = 0;
let owner = *AccountSharedData::default().owner();
let account = AccountSharedData::new(some_lamport, no_data, &owner);
let mut current_slot = 0;
current_slot += 1;
for pubkey in &pubkeys {
accounts.store_uncached(current_slot, &[(pubkey, &account)]);
}
let shrink_slot = current_slot;
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
current_slot += 1;
let pubkey_count_after_shrink = 25000;
let updated_pubkeys = &pubkeys[0..pubkey_count - pubkey_count_after_shrink];
for pubkey in updated_pubkeys {
accounts.store_uncached(current_slot, &[(pubkey, &account)]);
}
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
accounts.clean_accounts(None, false, None);
assert_eq!(
pubkey_count,
accounts.all_account_count_in_append_vec(shrink_slot)
);
// Only, try to shrink stale slots, nothing happens because 90/100
// is not small enough to do a shrink
accounts.shrink_candidate_slots();
assert_eq!(
pubkey_count,
accounts.all_account_count_in_append_vec(shrink_slot)
);
// Now, do full-shrink.
accounts.shrink_all_slots(false, None);
assert_eq!(
pubkey_count_after_shrink,
accounts.all_account_count_in_append_vec(shrink_slot)
);
}
#[test]
fn test_select_candidates_by_total_usage_no_candidates() {
// no input candidates -- none should be selected
solana_logger::setup();
let accounts = AccountsDb::new_single_for_tests();
let candidates: ShrinkCandidates = HashMap::new();
let (selected_candidates, next_candidates) =
accounts.select_candidates_by_total_usage(&candidates, DEFAULT_ACCOUNTS_SHRINK_RATIO);
assert_eq!(0, selected_candidates.len());
assert_eq!(0, next_candidates.len());
}
#[test]
fn test_select_candidates_by_total_usage_3_way_split_condition() {
// three candidates, one selected for shrink, one is put back to the candidate list and one is ignored
solana_logger::setup();
let accounts = AccountsDb::new_single_for_tests();
let mut candidates: ShrinkCandidates = HashMap::new();
let common_store_path = Path::new("");
let common_slot_id = 12;
let store_file_size = 2 * PAGE_SIZE;
let store1_id = 22;
let store1 = Arc::new(AccountStorageEntry::new(
common_store_path,
common_slot_id,
store1_id,
store_file_size,
));
store1.alive_bytes.store(0, Ordering::Release);
candidates
.entry(common_slot_id)
.or_default()
.insert(store1.append_vec_id(), store1.clone());
let store2_id = 44;
let store2 = Arc::new(AccountStorageEntry::new(
common_store_path,
common_slot_id,
store2_id,
store_file_size,
));
// The store2's alive_ratio is 0.5: as its page aligned alive size is 1 page.
let store2_alive_bytes = (PAGE_SIZE - 1) as usize;
store2
.alive_bytes
.store(store2_alive_bytes, Ordering::Release);
candidates
.entry(common_slot_id)
.or_default()
.insert(store2.append_vec_id(), store2.clone());
let store3_id = 55;
let entry3 = Arc::new(AccountStorageEntry::new(
common_store_path,
common_slot_id,
store3_id,
store_file_size,
));
// The store3's alive ratio is 1.0 as its page-aligned alive size is 2 pages
let store3_alive_bytes = (PAGE_SIZE + 1) as usize;
entry3
.alive_bytes
.store(store3_alive_bytes, Ordering::Release);
candidates
.entry(common_slot_id)
.or_default()
.insert(entry3.append_vec_id(), entry3.clone());
// Set the target alive ratio to 0.6 so that we can just get rid of store1, the remaining two stores
// alive ratio can be > the target ratio: the actual ratio is 0.75 because of 3 alive pages / 4 total pages.
// The target ratio is also set to larger than store2's alive ratio: 0.5 so that it would be added
// to the candidates list for next round.
let target_alive_ratio = 0.6;
let (selected_candidates, next_candidates) =
accounts.select_candidates_by_total_usage(&candidates, target_alive_ratio);
assert_eq!(1, selected_candidates.len());
assert_eq!(1, selected_candidates[&common_slot_id].len());
assert!(selected_candidates[&common_slot_id].contains(&store1.append_vec_id()));
assert_eq!(1, next_candidates.len());
assert!(next_candidates[&common_slot_id].contains(&store2.append_vec_id()));
}
#[test]
fn test_select_candidates_by_total_usage_2_way_split_condition() {
// three candidates, 2 are selected for shrink, one is ignored
solana_logger::setup();
let accounts = AccountsDb::new_single_for_tests();
let mut candidates: ShrinkCandidates = HashMap::new();
let common_store_path = Path::new("");
let common_slot_id = 12;
let store_file_size = 2 * PAGE_SIZE;
let store1_id = 22;
let store1 = Arc::new(AccountStorageEntry::new(
common_store_path,
common_slot_id,
store1_id,
store_file_size,
));
store1.alive_bytes.store(0, Ordering::Release);
candidates
.entry(common_slot_id)
.or_default()
.insert(store1.append_vec_id(), store1.clone());
let store2_id = 44;
let store2 = Arc::new(AccountStorageEntry::new(
common_store_path,
common_slot_id,
store2_id,
store_file_size,
));
// The store2's alive_ratio is 0.5: as its page aligned alive size is 1 page.
let store2_alive_bytes = (PAGE_SIZE - 1) as usize;
store2
.alive_bytes
.store(store2_alive_bytes, Ordering::Release);
candidates
.entry(common_slot_id)
.or_default()
.insert(store2.append_vec_id(), store2.clone());
let store3_id = 55;
let entry3 = Arc::new(AccountStorageEntry::new(
common_store_path,
common_slot_id,
store3_id,
store_file_size,
));
// The store3's alive ratio is 1.0 as its page-aligned alive size is 2 pages
let store3_alive_bytes = (PAGE_SIZE + 1) as usize;
entry3
.alive_bytes
.store(store3_alive_bytes, Ordering::Release);
candidates
.entry(common_slot_id)
.or_default()
.insert(entry3.append_vec_id(), entry3.clone());
// Set the target ratio to default (0.8), both store1 and store2 must be selected and store3 is ignored.
let target_alive_ratio = DEFAULT_ACCOUNTS_SHRINK_RATIO;
let (selected_candidates, next_candidates) =
accounts.select_candidates_by_total_usage(&candidates, target_alive_ratio);
assert_eq!(1, selected_candidates.len());
assert_eq!(2, selected_candidates[&common_slot_id].len());
assert!(selected_candidates[&common_slot_id].contains(&store1.append_vec_id()));
assert!(selected_candidates[&common_slot_id].contains(&store2.append_vec_id()));
assert_eq!(0, next_candidates.len());
}
#[test]
fn test_select_candidates_by_total_usage_all_clean() {
// 2 candidates, they must be selected to achieve the target alive ratio
solana_logger::setup();
let accounts = AccountsDb::new_single_for_tests();
let mut candidates: ShrinkCandidates = HashMap::new();
let slot1 = 12;
let common_store_path = Path::new("");
let store_file_size = 4 * PAGE_SIZE;
let store1_id = 22;
let store1 = Arc::new(AccountStorageEntry::new(
common_store_path,
slot1,
store1_id,
store_file_size,
));
// store1 has 1 page-aligned alive bytes, its alive ratio is 1/4: 0.25
let store1_alive_bytes = (PAGE_SIZE - 1) as usize;
store1
.alive_bytes
.store(store1_alive_bytes, Ordering::Release);
candidates
.entry(slot1)
.or_default()
.insert(store1.append_vec_id(), store1.clone());
let store2_id = 44;
let slot2 = 44;
let store2 = Arc::new(AccountStorageEntry::new(
common_store_path,
slot2,
store2_id,
store_file_size,
));
// store2 has 2 page-aligned bytes, its alive ratio is 2/4: 0.5
let store2_alive_bytes = (PAGE_SIZE + 1) as usize;
store2
.alive_bytes
.store(store2_alive_bytes, Ordering::Release);
candidates
.entry(slot2)
.or_default()
.insert(store2.append_vec_id(), store2.clone());
// Set the target ratio to default (0.8), both stores from the two different slots must be selected.
let target_alive_ratio = DEFAULT_ACCOUNTS_SHRINK_RATIO;
let (selected_candidates, next_candidates) =
accounts.select_candidates_by_total_usage(&candidates, target_alive_ratio);
assert_eq!(2, selected_candidates.len());
assert_eq!(1, selected_candidates[&slot1].len());
assert_eq!(1, selected_candidates[&slot2].len());
assert!(selected_candidates[&slot1].contains(&store1.append_vec_id()));
assert!(selected_candidates[&slot2].contains(&store2.append_vec_id()));
assert_eq!(0, next_candidates.len());
}
#[test]
fn test_shrink_stale_slots_skipped() {
solana_logger::setup();
let mut accounts = AccountsDb::new_single_for_tests();
accounts.caching_enabled = false;
let pubkey_count = 30000;
let pubkeys: Vec<_> = (0..pubkey_count)
.map(|_| solana_sdk::pubkey::new_rand())
.collect();
let some_lamport = 223;
let no_data = 0;
let owner = *AccountSharedData::default().owner();
let account = AccountSharedData::new(some_lamport, no_data, &owner);
let mut current_slot = 0;
current_slot += 1;
for pubkey in &pubkeys {
accounts.store_uncached(current_slot, &[(pubkey, &account)]);
}
let shrink_slot = current_slot;
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
current_slot += 1;
let pubkey_count_after_shrink = 25000;
let updated_pubkeys = &pubkeys[0..pubkey_count - pubkey_count_after_shrink];
for pubkey in updated_pubkeys {
accounts.store_uncached(current_slot, &[(pubkey, &account)]);
}
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
accounts.clean_accounts(None, false, None);
assert_eq!(
pubkey_count,
accounts.all_account_count_in_append_vec(shrink_slot)
);
// Only, try to shrink stale slots.
accounts.shrink_all_stale_slots_v1();
assert_eq!(
pubkey_count,
accounts.all_account_count_in_append_vec(shrink_slot)
);
// Now, do full-shrink.
accounts.shrink_all_slots(false, None);
assert_eq!(
pubkey_count_after_shrink,
accounts.all_account_count_in_append_vec(shrink_slot)
);
}
const UPSERT_PREVIOUS_SLOT_ENTRY_WAS_CACHED_FALSE: bool = false;
#[test]
fn test_delete_dependencies() {
solana_logger::setup();
let accounts_index = AccountsIndex::default_for_tests();
let key0 = Pubkey::new_from_array([0u8; 32]);
let key1 = Pubkey::new_from_array([1u8; 32]);
let key2 = Pubkey::new_from_array([2u8; 32]);
let info0 = AccountInfo {
store_id: 0,
offset: 0,
stored_size: 0,
lamports: 0,
};
let info1 = AccountInfo {
store_id: 1,
offset: 0,
stored_size: 0,
lamports: 0,
};
let info2 = AccountInfo {
store_id: 2,
offset: 0,
stored_size: 0,
lamports: 0,
};
let info3 = AccountInfo {
store_id: 3,
offset: 0,
stored_size: 0,
lamports: 0,
};
let mut reclaims = vec![];
accounts_index.upsert(
0,
&key0,
&Pubkey::default(),
&[],
&AccountSecondaryIndexes::default(),
info0,
&mut reclaims,
UPSERT_PREVIOUS_SLOT_ENTRY_WAS_CACHED_FALSE,
);
accounts_index.upsert(
1,
&key0,
&Pubkey::default(),
&[],
&AccountSecondaryIndexes::default(),
info1,
&mut reclaims,
UPSERT_PREVIOUS_SLOT_ENTRY_WAS_CACHED_FALSE,
);
accounts_index.upsert(
1,
&key1,
&Pubkey::default(),
&[],
&AccountSecondaryIndexes::default(),
info1,
&mut reclaims,
UPSERT_PREVIOUS_SLOT_ENTRY_WAS_CACHED_FALSE,
);
accounts_index.upsert(
2,
&key1,
&Pubkey::default(),
&[],
&AccountSecondaryIndexes::default(),
info2,
&mut reclaims,
UPSERT_PREVIOUS_SLOT_ENTRY_WAS_CACHED_FALSE,
);
accounts_index.upsert(
2,
&key2,
&Pubkey::default(),
&[],
&AccountSecondaryIndexes::default(),
info2,
&mut reclaims,
UPSERT_PREVIOUS_SLOT_ENTRY_WAS_CACHED_FALSE,
);
accounts_index.upsert(
3,
&key2,
&Pubkey::default(),
&[],
&AccountSecondaryIndexes::default(),
info3,
&mut reclaims,
UPSERT_PREVIOUS_SLOT_ENTRY_WAS_CACHED_FALSE,
);
accounts_index.add_root(0, false);
accounts_index.add_root(1, false);
accounts_index.add_root(2, false);
accounts_index.add_root(3, false);
let mut purges = HashMap::new();
let (key0_entry, _) = accounts_index.get(&key0, None, None).unwrap();
purges.insert(key0, accounts_index.roots_and_ref_count(&key0_entry, None));
let (key1_entry, _) = accounts_index.get(&key1, None, None).unwrap();
purges.insert(key1, accounts_index.roots_and_ref_count(&key1_entry, None));
let (key2_entry, _) = accounts_index.get(&key2, None, None).unwrap();
purges.insert(key2, accounts_index.roots_and_ref_count(&key2_entry, None));
for (key, (list, ref_count)) in &purges {
info!(" purge {} ref_count {} =>", key, ref_count);
for x in list {
info!(" {:?}", x);
}
}
let mut store_counts = HashMap::new();
store_counts.insert(0, (0, HashSet::from_iter(vec![key0])));
store_counts.insert(1, (0, HashSet::from_iter(vec![key0, key1])));
store_counts.insert(2, (0, HashSet::from_iter(vec![key1, key2])));
store_counts.insert(3, (1, HashSet::from_iter(vec![key2])));
AccountsDb::calc_delete_dependencies(&purges, &mut store_counts);
let mut stores: Vec<_> = store_counts.keys().cloned().collect();
stores.sort_unstable();
for store in &stores {
info!(
"store: {:?} : {:?}",
store,
store_counts.get(store).unwrap()
);
}
for x in 0..3 {
assert!(store_counts[&x].0 >= 1);
}
}
#[test]
fn test_account_balance_for_capitalization_sysvar() {
let normal_sysvar = solana_sdk::account::create_account_for_test(
&solana_sdk::slot_history::SlotHistory::default(),
);
assert_eq!(normal_sysvar.lamports(), 1);
}
#[test]
fn test_account_balance_for_capitalization_native_program() {
let normal_native_program =
solana_sdk::native_loader::create_loadable_account_for_test("foo");
assert_eq!(normal_native_program.lamports(), 1);
}
#[test]
fn test_checked_sum_for_capitalization_normal() {
assert_eq!(
AccountsDb::checked_sum_for_capitalization(vec![1, 2].into_iter()),
3
);
}
#[test]
#[should_panic(expected = "overflow is detected while summing capitalization")]
fn test_checked_sum_for_capitalization_overflow() {
assert_eq!(
AccountsDb::checked_sum_for_capitalization(vec![1, u64::max_value()].into_iter()),
3
);
}
#[test]
fn test_store_overhead() {
solana_logger::setup();
let accounts = AccountsDb::new_single_for_tests();
let account = AccountSharedData::default();
let pubkey = solana_sdk::pubkey::new_rand();
accounts.store_uncached(0, &[(&pubkey, &account)]);
let slot_stores = accounts.storage.get_slot_stores(0).unwrap();
let mut total_len = 0;
for (_id, store) in slot_stores.read().unwrap().iter() {
total_len += store.accounts.len();
}
info!("total: {}", total_len);
assert!(total_len < STORE_META_OVERHEAD);
}
#[test]
fn test_store_clean_after_shrink() {
solana_logger::setup();
let accounts = AccountsDb::new_with_config_for_tests(
vec![],
&ClusterType::Development,
AccountSecondaryIndexes::default(),
true,
AccountShrinkThreshold::default(),
);
let account = AccountSharedData::new(1, 16 * 4096, &Pubkey::default());
let pubkey1 = solana_sdk::pubkey::new_rand();
accounts.store_cached(0, &[(&pubkey1, &account)]);
let pubkey2 = solana_sdk::pubkey::new_rand();
accounts.store_cached(0, &[(&pubkey2, &account)]);
let zero_account = AccountSharedData::new(0, 1, &Pubkey::default());
accounts.store_cached(1, &[(&pubkey1, &zero_account)]);
// Add root 0 and flush separately
accounts.get_accounts_delta_hash(0);
accounts.add_root(0);
accounts.flush_accounts_cache(true, None);
// clear out the dirty keys
accounts.clean_accounts(None, false, None);
// flush 1
accounts.get_accounts_delta_hash(1);
accounts.add_root(1);
accounts.flush_accounts_cache(true, None);
accounts.print_accounts_stats("pre-clean");
// clean to remove pubkey1 from 0,
// shrink to shrink pubkey1 from 0
// then another clean to remove pubkey1 from slot 1
accounts.clean_accounts(None, false, None);
accounts.shrink_candidate_slots();
accounts.clean_accounts(None, false, None);
accounts.print_accounts_stats("post-clean");
assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey1), 0);
}
#[test]
fn test_store_reuse() {
solana_logger::setup();
let accounts = AccountsDb::new_sized(vec![], 4096);
let size = 100;
let num_accounts: usize = 100;
let mut keys = Vec::new();
for i in 0..num_accounts {
let account = AccountSharedData::new((i + 1) as u64, size, &Pubkey::default());
let pubkey = solana_sdk::pubkey::new_rand();
accounts.store_uncached(0, &[(&pubkey, &account)]);
keys.push(pubkey);
}
accounts.add_root(0);
for (i, key) in keys[1..].iter().enumerate() {
let account =
AccountSharedData::new((1 + i + num_accounts) as u64, size, &Pubkey::default());
accounts.store_uncached(1, &[(key, &account)]);
}
accounts.add_root(1);
accounts.clean_accounts(None, false, None);
accounts.shrink_all_slots(false, None);
// Clean again to flush the dirty stores
// and allow them to be recycled in the next step
accounts.clean_accounts(None, false, None);
accounts.print_accounts_stats("post-shrink");
let num_stores = accounts.recycle_stores.read().unwrap().entry_count();
assert!(num_stores > 0);
let mut account_refs = Vec::new();
let num_to_store = 20;
for (i, key) in keys[..num_to_store].iter().enumerate() {
let account = AccountSharedData::new(
(1 + i + 2 * num_accounts) as u64,
i + 20,
&Pubkey::default(),
);
accounts.store_uncached(2, &[(key, &account)]);
account_refs.push(account);
}
assert!(accounts.recycle_stores.read().unwrap().entry_count() < num_stores);
accounts.print_accounts_stats("post-store");
let mut ancestors = Ancestors::default();
ancestors.insert(1, 0);
ancestors.insert(2, 1);
for (key, account_ref) in keys[..num_to_store].iter().zip(account_refs) {
assert_eq!(
accounts.load_without_fixed_root(&ancestors, key).unwrap().0,
account_ref
);
}
}
#[test]
fn test_zero_lamport_new_root_not_cleaned() {
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let account_key = Pubkey::new_unique();
let zero_lamport_account =
AccountSharedData::new(0, 0, AccountSharedData::default().owner());
// Store zero lamport account into slots 0 and 1, root both slots
db.store_uncached(0, &[(&account_key, &zero_lamport_account)]);
db.store_uncached(1, &[(&account_key, &zero_lamport_account)]);
db.get_accounts_delta_hash(0);
db.add_root(0);
db.get_accounts_delta_hash(1);
db.add_root(1);
// Only clean zero lamport accounts up to slot 0
db.clean_accounts(Some(0), false, None);
// Should still be able to find zero lamport account in slot 1
assert_eq!(
db.load_without_fixed_root(&Ancestors::default(), &account_key),
Some((zero_lamport_account, 1))
);
}
#[test]
fn test_store_load_cached() {
let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development);
db.caching_enabled = true;
let key = Pubkey::default();
let account0 = AccountSharedData::new(1, 0, &key);
let slot = 0;
db.store_cached(slot, &[(&key, &account0)]);
// Load with no ancestors and no root will return nothing
assert!(db
.load_without_fixed_root(&Ancestors::default(), &key)
.is_none());
// Load with ancestors not equal to `slot` will return nothing
let ancestors = vec![(slot + 1, 1)].into_iter().collect();
assert!(db.load_without_fixed_root(&ancestors, &key).is_none());
// Load with ancestors equal to `slot` will return the account
let ancestors = vec![(slot, 1)].into_iter().collect();
assert_eq!(
db.load_without_fixed_root(&ancestors, &key),
Some((account0.clone(), slot))
);
// Adding root will return the account even without ancestors
db.add_root(slot);
assert_eq!(
db.load_without_fixed_root(&Ancestors::default(), &key),
Some((account0, slot))
);
}
#[test]
fn test_store_flush_load_cached() {
let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development);
db.caching_enabled = true;
let key = Pubkey::default();
let account0 = AccountSharedData::new(1, 0, &key);
let slot = 0;
db.store_cached(slot, &[(&key, &account0)]);
db.mark_slot_frozen(slot);
// No root was added yet, requires an ancestor to find
// the account
db.flush_accounts_cache(true, None);
let ancestors = vec![(slot, 1)].into_iter().collect();
assert_eq!(
db.load_without_fixed_root(&ancestors, &key),
Some((account0.clone(), slot))
);
// Add root then flush
db.add_root(slot);
db.flush_accounts_cache(true, None);
assert_eq!(
db.load_without_fixed_root(&Ancestors::default(), &key),
Some((account0, slot))
);
}
#[test]
fn test_flush_accounts_cache() {
let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development);
db.caching_enabled = true;
let account0 = AccountSharedData::new(1, 0, &Pubkey::default());
let unrooted_slot = 4;
let root5 = 5;
let root6 = 6;
let unrooted_key = solana_sdk::pubkey::new_rand();
let key5 = solana_sdk::pubkey::new_rand();
let key6 = solana_sdk::pubkey::new_rand();
db.store_cached(unrooted_slot, &[(&unrooted_key, &account0)]);
db.store_cached(root5, &[(&key5, &account0)]);
db.store_cached(root6, &[(&key6, &account0)]);
for slot in &[unrooted_slot, root5, root6] {
db.mark_slot_frozen(*slot);
}
db.add_root(root5);
db.add_root(root6);
// Unrooted slot should be able to be fetched before the flush
let ancestors = vec![(unrooted_slot, 1)].into_iter().collect();
assert_eq!(
db.load_without_fixed_root(&ancestors, &unrooted_key),
Some((account0.clone(), unrooted_slot))
);
db.flush_accounts_cache(true, None);
// After the flush, the unrooted slot is still in the cache
assert!(db
.load_without_fixed_root(&ancestors, &unrooted_key)
.is_some());
assert!(db
.accounts_index
.get_account_read_entry(&unrooted_key)
.is_some());
assert_eq!(db.accounts_cache.num_slots(), 1);
assert!(db.accounts_cache.slot_cache(unrooted_slot).is_some());
assert_eq!(
db.load_without_fixed_root(&Ancestors::default(), &key5),
Some((account0.clone(), root5))
);
assert_eq!(
db.load_without_fixed_root(&Ancestors::default(), &key6),
Some((account0, root6))
);
}
fn max_cache_slots() -> usize {
// this used to be the limiting factor - used here to facilitate tests.
200
}
#[test]
fn test_flush_accounts_cache_if_needed() {
run_test_flush_accounts_cache_if_needed(0, 2 * max_cache_slots());
run_test_flush_accounts_cache_if_needed(2 * max_cache_slots(), 0);
run_test_flush_accounts_cache_if_needed(max_cache_slots() - 1, 0);
run_test_flush_accounts_cache_if_needed(0, max_cache_slots() - 1);
run_test_flush_accounts_cache_if_needed(max_cache_slots(), 0);
run_test_flush_accounts_cache_if_needed(0, max_cache_slots());
run_test_flush_accounts_cache_if_needed(2 * max_cache_slots(), 2 * max_cache_slots());
run_test_flush_accounts_cache_if_needed(max_cache_slots() - 1, max_cache_slots() - 1);
run_test_flush_accounts_cache_if_needed(max_cache_slots(), max_cache_slots());
}
fn run_test_flush_accounts_cache_if_needed(num_roots: usize, num_unrooted: usize) {
let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development);
db.write_cache_limit_bytes = Some(max_cache_slots() as u64);
db.caching_enabled = true;
let space = 1; // # data bytes per account. write cache counts data len
let account0 = AccountSharedData::new(1, space, &Pubkey::default());
let mut keys = vec![];
let num_slots = 2 * max_cache_slots();
for i in 0..num_roots + num_unrooted {
let key = Pubkey::new_unique();
db.store_cached(i as Slot, &[(&key, &account0)]);
keys.push(key);
db.mark_slot_frozen(i as Slot);
if i < num_roots {
db.add_root(i as Slot);
}
}
db.flush_accounts_cache(false, None);
let total_slots = num_roots + num_unrooted;
// If there's <= the max size, then nothing will be flushed from the slot
if total_slots <= max_cache_slots() {
assert_eq!(db.accounts_cache.num_slots(), total_slots);
} else {
// Otherwise, all the roots are flushed, and only at most max_cache_slots()
// of the unrooted slots are kept in the cache
let expected_size = std::cmp::min(num_unrooted, max_cache_slots());
if expected_size > 0 {
// +1: slot is 1-based. slot 1 has 1 byte of data
for unrooted_slot in (total_slots - expected_size + 1)..total_slots {
assert!(
db.accounts_cache
.slot_cache(unrooted_slot as Slot)
.is_some(),
"unrooted_slot: {}, total_slots: {}, expected_size: {}",
unrooted_slot,
total_slots,
expected_size
);
}
}
}
// Should still be able to fetch all the accounts after flush
for (slot, key) in (0..num_slots as Slot).zip(keys) {
let ancestors = if slot < num_roots as Slot {
Ancestors::default()
} else {
vec![(slot, 1)].into_iter().collect()
};
assert_eq!(
db.load_without_fixed_root(&ancestors, &key),
Some((account0.clone(), slot))
);
}
}
fn slot_stores(db: &AccountsDb, slot: Slot) -> Vec<Arc<AccountStorageEntry>> {
db.storage
.get_slot_storage_entries(slot)
.unwrap_or_default()
}
#[test]
fn test_read_only_accounts_cache() {
let caching_enabled = true;
let db = Arc::new(AccountsDb::new_with_config_for_tests(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
caching_enabled,
AccountShrinkThreshold::default(),
));
let account_key = Pubkey::new_unique();
let zero_lamport_account =
AccountSharedData::new(0, 0, AccountSharedData::default().owner());
let slot1_account = AccountSharedData::new(1, 1, AccountSharedData::default().owner());
db.store_cached(0, &[(&account_key, &zero_lamport_account)]);
db.store_cached(1, &[(&account_key, &slot1_account)]);
db.add_root(0);
db.add_root(1);
db.clean_accounts(None, false, None);
db.flush_accounts_cache(true, None);
db.clean_accounts(None, false, None);
db.add_root(2);
assert_eq!(db.read_only_accounts_cache.cache_len(), 0);
let account = db
.load_with_fixed_root(&Ancestors::default(), &account_key)
.map(|(account, _)| account)
.unwrap();
assert_eq!(account.lamports(), 1);
assert_eq!(db.read_only_accounts_cache.cache_len(), 1);
let account = db
.load_with_fixed_root(&Ancestors::default(), &account_key)
.map(|(account, _)| account)
.unwrap();
assert_eq!(account.lamports(), 1);
assert_eq!(db.read_only_accounts_cache.cache_len(), 1);
db.store_cached(2, &[(&account_key, &zero_lamport_account)]);
assert_eq!(db.read_only_accounts_cache.cache_len(), 1);
let account = db
.load_with_fixed_root(&Ancestors::default(), &account_key)
.map(|(account, _)| account)
.unwrap();
assert_eq!(account.lamports(), 0);
assert_eq!(db.read_only_accounts_cache.cache_len(), 1);
}
#[test]
fn test_flush_cache_clean() {
let caching_enabled = true;
let db = Arc::new(AccountsDb::new_with_config_for_tests(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
caching_enabled,
AccountShrinkThreshold::default(),
));
let account_key = Pubkey::new_unique();
let zero_lamport_account =
AccountSharedData::new(0, 0, AccountSharedData::default().owner());
let slot1_account = AccountSharedData::new(1, 1, AccountSharedData::default().owner());
db.store_cached(0, &[(&account_key, &zero_lamport_account)]);
db.store_cached(1, &[(&account_key, &slot1_account)]);
db.add_root(0);
db.add_root(1);
// Clean should not remove anything yet as nothing has been flushed
db.clean_accounts(None, false, None);
let account = db
.do_load(
&Ancestors::default(),
&account_key,
Some(0),
LoadHint::Unspecified,
)
.unwrap();
assert_eq!(account.0.lamports(), 0);
// since this item is in the cache, it should not be in the read only cache
assert_eq!(db.read_only_accounts_cache.cache_len(), 0);
// Flush, then clean again. Should not need another root to initiate the cleaning
// because `accounts_index.uncleaned_roots` should be correct
db.flush_accounts_cache(true, None);
db.clean_accounts(None, false, None);
assert!(db
.do_load(
&Ancestors::default(),
&account_key,
Some(0),
LoadHint::Unspecified
)
.is_none());
}
#[test]
fn test_flush_cache_dont_clean_zero_lamport_account() {
let caching_enabled = true;
let db = Arc::new(AccountsDb::new_with_config_for_tests(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
caching_enabled,
AccountShrinkThreshold::default(),
));
let zero_lamport_account_key = Pubkey::new_unique();
let other_account_key = Pubkey::new_unique();
let original_lamports = 1;
let slot0_account =
AccountSharedData::new(original_lamports, 1, AccountSharedData::default().owner());
let zero_lamport_account =
AccountSharedData::new(0, 0, AccountSharedData::default().owner());
// Store into slot 0, and then flush the slot to storage
db.store_cached(0, &[(&zero_lamport_account_key, &slot0_account)]);
// Second key keeps other lamport account entry for slot 0 alive,
// preventing clean of the zero_lamport_account in slot 1.
db.store_cached(0, &[(&other_account_key, &slot0_account)]);
db.add_root(0);
db.flush_accounts_cache(true, None);
assert!(!db.storage.get_slot_storage_entries(0).unwrap().is_empty());
// Store into slot 1, a dummy slot that will be dead and purged before flush
db.store_cached(1, &[(&zero_lamport_account_key, &zero_lamport_account)]);
// Store into slot 2, which makes all updates from slot 1 outdated.
// This means slot 1 is a dead slot. Later, slot 1 will be cleaned/purged
// before it even reaches storage, but this purge of slot 1should not affect
// the refcount of `zero_lamport_account_key` because cached keys do not bump
// the refcount in the index. This means clean should *not* remove
// `zero_lamport_account_key` from slot 2
db.store_cached(2, &[(&zero_lamport_account_key, &zero_lamport_account)]);
db.add_root(1);
db.add_root(2);
// Flush, then clean. Should not need another root to initiate the cleaning
// because `accounts_index.uncleaned_roots` should be correct
db.flush_accounts_cache(true, None);
db.clean_accounts(None, false, None);
// The `zero_lamport_account_key` is still alive in slot 1, so refcount for the
// pubkey should be 2
assert_eq!(
db.accounts_index
.ref_count_from_storage(&zero_lamport_account_key),
2
);
assert_eq!(
db.accounts_index.ref_count_from_storage(&other_account_key),
1
);
// The zero-lamport account in slot 2 should not be purged yet, because the
// entry in slot 1 is blocking cleanup of the zero-lamport account.
let max_root = None;
// Fine to simulate a transaction load since we are not doing any out of band
// removals, only using clean_accounts
let load_hint = LoadHint::FixedMaxRoot;
assert_eq!(
db.do_load(
&Ancestors::default(),
&zero_lamport_account_key,
max_root,
load_hint
)
.unwrap()
.0
.lamports(),
0
);
}
struct ScanTracker {
t_scan: JoinHandle<()>,
exit: Arc<AtomicBool>,
}
impl ScanTracker {
fn exit(self) -> thread::Result<()> {
self.exit.store(true, Ordering::Relaxed);
self.t_scan.join()
}
}
fn setup_scan(
db: Arc<AccountsDb>,
scan_ancestors: Arc<Ancestors>,
bank_id: BankId,
stall_key: Pubkey,
) -> ScanTracker {
let exit = Arc::new(AtomicBool::new(false));
let exit_ = exit.clone();
let ready = Arc::new(AtomicBool::new(false));
let ready_ = ready.clone();
let t_scan = Builder::new()
.name("scan".to_string())
.spawn(move || {
db.scan_accounts(
&scan_ancestors,
bank_id,
|_collector: &mut Vec<(Pubkey, AccountSharedData)>, maybe_account| {
ready_.store(true, Ordering::Relaxed);
if let Some((pubkey, _, _)) = maybe_account {
if *pubkey == stall_key {
loop {
if exit_.load(Ordering::Relaxed) {
break;
} else {
sleep(Duration::from_millis(10));
}
}
}
}
},
&ScanConfig::default(),
)
.unwrap();
})
.unwrap();
// Wait for scan to start
while !ready.load(Ordering::Relaxed) {
sleep(Duration::from_millis(10));
}
ScanTracker { t_scan, exit }
}
#[test]
fn test_scan_flush_accounts_cache_then_clean_drop() {
let caching_enabled = true;
let db = Arc::new(AccountsDb::new_with_config_for_tests(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
caching_enabled,
AccountShrinkThreshold::default(),
));
let account_key = Pubkey::new_unique();
let account_key2 = Pubkey::new_unique();
let zero_lamport_account =
AccountSharedData::new(0, 0, AccountSharedData::default().owner());
let slot1_account = AccountSharedData::new(1, 1, AccountSharedData::default().owner());
let slot2_account = AccountSharedData::new(2, 1, AccountSharedData::default().owner());
/*
Store zero lamport account into slots 0, 1, 2 where
root slots are 0, 2, and slot 1 is unrooted.
0 (root)
/ \
1 2 (root)
*/
db.store_cached(0, &[(&account_key, &zero_lamport_account)]);
db.store_cached(1, &[(&account_key, &slot1_account)]);
// Fodder for the scan so that the lock on `account_key` is not held
db.store_cached(1, &[(&account_key2, &slot1_account)]);
db.store_cached(2, &[(&account_key, &slot2_account)]);
db.get_accounts_delta_hash(0);
let max_scan_root = 0;
db.add_root(max_scan_root);
let scan_ancestors: Arc<Ancestors> = Arc::new(vec![(0, 1), (1, 1)].into_iter().collect());
let bank_id = 0;
let scan_tracker = setup_scan(db.clone(), scan_ancestors.clone(), bank_id, account_key2);
// Add a new root 2
let new_root = 2;
db.get_accounts_delta_hash(new_root);
db.add_root(new_root);
// Check that the scan is properly set up
assert_eq!(
db.accounts_index.min_ongoing_scan_root().unwrap(),
max_scan_root
);
// If we specify a requested_flush_root == 2, then `slot 2 <= max_flush_slot` will
// be flushed even though `slot 2 > max_scan_root`. The unrooted slot 1 should
// remain in the cache
db.flush_accounts_cache(true, Some(new_root));
assert_eq!(db.accounts_cache.num_slots(), 1);
assert!(db.accounts_cache.slot_cache(1).is_some());
// Intra cache cleaning should not clean the entry for `account_key` from slot 0,
// even though it was updated in slot `2` because of the ongoing scan
let account = db
.do_load(
&Ancestors::default(),
&account_key,
Some(0),
LoadHint::Unspecified,
)
.unwrap();
assert_eq!(account.0.lamports(), zero_lamport_account.lamports());
// Run clean, unrooted slot 1 should not be purged, and still readable from the cache,
// because we're still doing a scan on it.
db.clean_accounts(None, false, None);
let account = db
.do_load(
&scan_ancestors,
&account_key,
Some(max_scan_root),
LoadHint::Unspecified,
)
.unwrap();
assert_eq!(account.0.lamports(), slot1_account.lamports());
// When the scan is over, clean should not panic and should not purge something
// still in the cache.
scan_tracker.exit().unwrap();
db.clean_accounts(None, false, None);
let account = db
.do_load(
&scan_ancestors,
&account_key,
Some(max_scan_root),
LoadHint::Unspecified,
)
.unwrap();
assert_eq!(account.0.lamports(), slot1_account.lamports());
// Simulate dropping the bank, which finally removes the slot from the cache
let bank_id = 1;
db.purge_slot(1, bank_id, false);
assert!(db
.do_load(
&scan_ancestors,
&account_key,
Some(max_scan_root),
LoadHint::Unspecified
)
.is_none());
}
#[test]
fn test_alive_bytes() {
let caching_enabled = true;
let accounts_db = AccountsDb::new_with_config_for_tests(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
caching_enabled,
AccountShrinkThreshold::default(),
);
let slot: Slot = 0;
let num_keys = 10;
for data_size in 0..num_keys {
let account = AccountSharedData::new(1, data_size, &Pubkey::default());
accounts_db.store_cached(slot, &[(&Pubkey::new_unique(), &account)]);
}
accounts_db.add_root(slot);
accounts_db.flush_accounts_cache(true, None);
let mut storage_maps: Vec<Arc<AccountStorageEntry>> = accounts_db
.storage
.get_slot_storage_entries(slot)
.unwrap_or_default();
// Flushing cache should only create one storage entry
assert_eq!(storage_maps.len(), 1);
let storage0 = storage_maps.pop().unwrap();
let accounts = storage0.all_accounts();
for account in accounts {
let before_size = storage0.alive_bytes.load(Ordering::Acquire);
let account_info = accounts_db
.accounts_index
.get_account_read_entry(&account.meta.pubkey)
.map(|locked_entry| {
// Should only be one entry per key, since every key was only stored to slot 0
locked_entry.slot_list()[0]
})
.unwrap();
let removed_data_size = account_info.1.stored_size;
// Fetching the account from storage should return the same
// stored size as in the index.
assert_eq!(removed_data_size, account.stored_size);
assert_eq!(account_info.0, slot);
let reclaims = vec![account_info];
accounts_db.remove_dead_accounts(&reclaims, None, None, true);
let after_size = storage0.alive_bytes.load(Ordering::Acquire);
assert_eq!(before_size, after_size + account.stored_size);
}
}
fn setup_accounts_db_cache_clean(
num_slots: usize,
scan_slot: Option<Slot>,
write_cache_limit_bytes: Option<u64>,
) -> (Arc<AccountsDb>, Vec<Pubkey>, Vec<Slot>, Option<ScanTracker>) |
#[test]
fn test_accounts_db_cache_clean_dead_slots() {
let num_slots = 10;
let (accounts_db, keys, mut slots, _) =
setup_accounts_db_cache_clean(num_slots, None, None);
let last_dead_slot = (num_slots - 1) as Slot;
assert_eq!(*slots.last().unwrap(), last_dead_slot);
let alive_slot = last_dead_slot as Slot + 1;
slots.push(alive_slot);
for key in &keys {
// Store a slot that overwrites all previous keys, rendering all previous keys dead
accounts_db.store_cached(
alive_slot,
&[(key, &AccountSharedData::new(1, 0, &Pubkey::default()))],
);
accounts_db.add_root(alive_slot);
}
// Before the flush, we can find entries in the database for slots < alive_slot if we specify
// a smaller max root
for key in &keys {
assert!(accounts_db
.do_load(
&Ancestors::default(),
key,
Some(last_dead_slot),
LoadHint::Unspecified
)
.is_some());
}
// If no `max_clean_root` is specified, cleaning should purge all flushed slots
accounts_db.flush_accounts_cache(true, None);
assert_eq!(accounts_db.accounts_cache.num_slots(), 0);
let mut uncleaned_roots = accounts_db
.accounts_index
.clear_uncleaned_roots(None)
.into_iter()
.collect::<Vec<_>>();
uncleaned_roots.sort_unstable();
assert_eq!(uncleaned_roots, slots);
assert_eq!(
accounts_db.accounts_cache.fetch_max_flush_root(),
alive_slot,
);
// Specifying a max_root < alive_slot, should not return any more entries,
// as those have been purged from the accounts index for the dead slots.
for key in &keys {
assert!(accounts_db
.do_load(
&Ancestors::default(),
key,
Some(last_dead_slot),
LoadHint::Unspecified
)
.is_none());
}
// Each slot should only have one entry in the storage, since all other accounts were
// cleaned due to later updates
for slot in &slots {
if let ScanStorageResult::Stored(slot_accounts) = accounts_db.scan_account_storage(
*slot as Slot,
|_| Some(0),
|slot_accounts: &DashSet<Pubkey>, loaded_account: LoadedAccount| {
slot_accounts.insert(*loaded_account.pubkey());
},
) {
if *slot == alive_slot {
assert_eq!(slot_accounts.len(), keys.len());
} else {
assert!(slot_accounts.is_empty());
}
} else {
panic!("Expected slot to be in storage, not cache");
}
}
}
#[test]
fn test_accounts_db_cache_clean() {
let (accounts_db, keys, slots, _) = setup_accounts_db_cache_clean(10, None, None);
// If no `max_clean_root` is specified, cleaning should purge all flushed slots
accounts_db.flush_accounts_cache(true, None);
assert_eq!(accounts_db.accounts_cache.num_slots(), 0);
let mut uncleaned_roots = accounts_db
.accounts_index
.clear_uncleaned_roots(None)
.into_iter()
.collect::<Vec<_>>();
uncleaned_roots.sort_unstable();
assert_eq!(uncleaned_roots, slots);
assert_eq!(
accounts_db.accounts_cache.fetch_max_flush_root(),
*slots.last().unwrap()
);
// Each slot should only have one entry in the storage, since all other accounts were
// cleaned due to later updates
for slot in &slots {
if let ScanStorageResult::Stored(slot_account) = accounts_db.scan_account_storage(
*slot as Slot,
|_| Some(0),
|slot_account: &Arc<RwLock<Pubkey>>, loaded_account: LoadedAccount| {
*slot_account.write().unwrap() = *loaded_account.pubkey();
},
) {
assert_eq!(*slot_account.read().unwrap(), keys[*slot as usize]);
} else {
panic!("Everything should have been flushed")
}
}
}
fn run_test_accounts_db_cache_clean_max_root(
num_slots: usize,
requested_flush_root: Slot,
scan_root: Option<Slot>,
) {
assert!(requested_flush_root < (num_slots as Slot));
let (accounts_db, keys, slots, scan_tracker) =
setup_accounts_db_cache_clean(num_slots, scan_root, Some(max_cache_slots() as u64));
let is_cache_at_limit = num_slots - requested_flush_root as usize - 1 > max_cache_slots();
// If:
// 1) `requested_flush_root` is specified,
// 2) not at the cache limit, i.e. `is_cache_at_limit == false`, then
// `flush_accounts_cache()` should clean and flush only slots <= requested_flush_root,
accounts_db.flush_accounts_cache(true, Some(requested_flush_root));
if !is_cache_at_limit {
// Should flush all slots between 0..=requested_flush_root
assert_eq!(
accounts_db.accounts_cache.num_slots(),
slots.len() - requested_flush_root as usize - 1
);
} else {
// Otherwise, if we are at the cache limit, all roots will be flushed
assert_eq!(accounts_db.accounts_cache.num_slots(), 0,);
}
let mut uncleaned_roots = accounts_db
.accounts_index
.clear_uncleaned_roots(None)
.into_iter()
.collect::<Vec<_>>();
uncleaned_roots.sort_unstable();
let expected_max_flushed_root = if !is_cache_at_limit {
// Should flush all slots between 0..=requested_flush_root
requested_flush_root
} else {
// Otherwise, if we are at the cache limit, all roots will be flushed
num_slots as Slot - 1
};
assert_eq!(
uncleaned_roots,
slots[0..=expected_max_flushed_root as usize].to_vec()
);
assert_eq!(
accounts_db.accounts_cache.fetch_max_flush_root(),
expected_max_flushed_root,
);
for slot in &slots {
let slot_accounts = accounts_db.scan_account_storage(
*slot as Slot,
|loaded_account: LoadedAccount| {
assert!(
!is_cache_at_limit,
"When cache is at limit, all roots should have been flushed to storage"
);
// All slots <= requested_flush_root should have been flushed, regardless
// of ongoing scans
assert!(*slot > requested_flush_root);
Some(*loaded_account.pubkey())
},
|slot_accounts: &DashSet<Pubkey>, loaded_account: LoadedAccount| {
slot_accounts.insert(*loaded_account.pubkey());
if !is_cache_at_limit {
// Only true when the limit hasn't been reached and there are still
// slots left in the cache
assert!(*slot <= requested_flush_root);
}
},
);
let slot_accounts = match slot_accounts {
ScanStorageResult::Cached(slot_accounts) => {
slot_accounts.into_iter().collect::<HashSet<Pubkey>>()
}
ScanStorageResult::Stored(slot_accounts) => {
slot_accounts.into_iter().collect::<HashSet<Pubkey>>()
}
};
let expected_accounts =
if *slot >= requested_flush_root || *slot >= scan_root.unwrap_or(Slot::MAX) {
// 1) If slot > `requested_flush_root`, then either:
// a) If `is_cache_at_limit == false`, still in the cache
// b) if `is_cache_at_limit == true`, were not cleaned before being flushed to storage.
//
// In both cases all the *original* updates at index `slot` were uncleaned and thus
// should be discoverable by this scan.
//
// 2) If slot == `requested_flush_root`, the slot was not cleaned before being flushed to storage,
// so it also contains all the original updates.
//
// 3) If *slot >= scan_root, then we should not clean it either
keys[*slot as usize..]
.iter()
.cloned()
.collect::<HashSet<Pubkey>>()
} else {
// Slots less than `requested_flush_root` and `scan_root` were cleaned in the cache before being flushed
// to storage, should only contain one account
std::iter::once(keys[*slot as usize])
.into_iter()
.collect::<HashSet<Pubkey>>()
};
assert_eq!(slot_accounts, expected_accounts);
}
if let Some(scan_tracker) = scan_tracker {
scan_tracker.exit().unwrap();
}
}
#[test]
fn test_accounts_db_cache_clean_max_root() {
let requested_flush_root = 5;
run_test_accounts_db_cache_clean_max_root(10, requested_flush_root, None);
}
#[test]
fn test_accounts_db_cache_clean_max_root_with_scan() {
let requested_flush_root = 5;
run_test_accounts_db_cache_clean_max_root(
10,
requested_flush_root,
Some(requested_flush_root - 1),
);
run_test_accounts_db_cache_clean_max_root(
10,
requested_flush_root,
Some(requested_flush_root + 1),
);
}
#[test]
fn test_accounts_db_cache_clean_max_root_with_cache_limit_hit() {
let requested_flush_root = 5;
// Test that if there are > max_cache_slots() in the cache after flush, then more roots
// will be flushed
run_test_accounts_db_cache_clean_max_root(
max_cache_slots() + requested_flush_root as usize + 2,
requested_flush_root,
None,
);
}
#[test]
fn test_accounts_db_cache_clean_max_root_with_cache_limit_hit_and_scan() {
let requested_flush_root = 5;
// Test that if there are > max_cache_slots() in the cache after flush, then more roots
// will be flushed
run_test_accounts_db_cache_clean_max_root(
max_cache_slots() + requested_flush_root as usize + 2,
requested_flush_root,
Some(requested_flush_root - 1),
);
run_test_accounts_db_cache_clean_max_root(
max_cache_slots() + requested_flush_root as usize + 2,
requested_flush_root,
Some(requested_flush_root + 1),
);
}
fn run_flush_rooted_accounts_cache(should_clean: bool) {
let num_slots = 10;
let (accounts_db, keys, slots, _) = setup_accounts_db_cache_clean(num_slots, None, None);
let mut cleaned_bytes = 0;
let mut cleaned_accounts = 0;
let should_clean_tracker = if should_clean {
Some((&mut cleaned_bytes, &mut cleaned_accounts))
} else {
None
};
// If no cleaning is specified, then flush everything
accounts_db.flush_rooted_accounts_cache(None, should_clean_tracker);
for slot in &slots {
let slot_accounts = if let ScanStorageResult::Stored(slot_accounts) = accounts_db
.scan_account_storage(
*slot as Slot,
|_| Some(0),
|slot_account: &DashSet<Pubkey>, loaded_account: LoadedAccount| {
slot_account.insert(*loaded_account.pubkey());
},
) {
slot_accounts.into_iter().collect::<HashSet<Pubkey>>()
} else {
panic!("All roots should have been flushed to storage");
};
if !should_clean || slot == slots.last().unwrap() {
// The slot was not cleaned before being flushed to storage,
// so it also contains all the original updates.
assert_eq!(
slot_accounts,
keys[*slot as usize..]
.iter()
.cloned()
.collect::<HashSet<Pubkey>>()
);
} else {
// If clean was specified, only the latest slot should have all the updates.
// All these other slots have been cleaned before flush
assert_eq!(
slot_accounts,
std::iter::once(keys[*slot as usize])
.into_iter()
.collect::<HashSet<Pubkey>>()
);
}
}
}
#[test]
fn test_flush_rooted_accounts_cache_with_clean() {
run_flush_rooted_accounts_cache(true);
}
#[test]
fn test_flush_rooted_accounts_cache_without_clean() {
run_flush_rooted_accounts_cache(false);
}
fn run_test_shrink_unref(do_intra_cache_clean: bool) {
// Enable caching so that we use the straightforward implementation
// of shrink that will shrink all candidate slots
let caching_enabled = true;
let db = AccountsDb::new_with_config_for_tests(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
caching_enabled,
AccountShrinkThreshold::default(),
);
let account_key1 = Pubkey::new_unique();
let account_key2 = Pubkey::new_unique();
let account1 = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
// Store into slot 0
db.store_cached(0, &[(&account_key1, &account1)]);
db.store_cached(0, &[(&account_key2, &account1)]);
db.add_root(0);
if !do_intra_cache_clean {
// If we don't want the cache doing purges before flush,
// then we cannot flush multiple roots at once, otherwise the later
// roots will clean the earlier roots before they are stored.
// Thus flush the roots individually
db.flush_accounts_cache(true, None);
// Add an additional ref within the same slot to pubkey 1
db.store_uncached(0, &[(&account_key1, &account1)]);
}
// Make account_key1 in slot 0 outdated by updating in rooted slot 1
db.store_cached(1, &[(&account_key1, &account1)]);
db.add_root(1);
// Flushes all roots
db.flush_accounts_cache(true, None);
db.get_accounts_delta_hash(0);
db.get_accounts_delta_hash(1);
// Clean to remove outdated entry from slot 0
db.clean_accounts(Some(1), false, None);
// Shrink Slot 0
let mut slot0_stores = db.storage.get_slot_storage_entries(0).unwrap();
assert_eq!(slot0_stores.len(), 1);
let slot0_store = slot0_stores.pop().unwrap();
{
let mut shrink_candidate_slots = db.shrink_candidate_slots.lock().unwrap();
shrink_candidate_slots
.entry(0)
.or_default()
.insert(slot0_store.append_vec_id(), slot0_store);
}
db.shrink_candidate_slots();
// Make slot 0 dead by updating the remaining key
db.store_cached(2, &[(&account_key2, &account1)]);
db.add_root(2);
// Flushes all roots
db.flush_accounts_cache(true, None);
// Should be one store before clean for slot 0
assert_eq!(db.storage.get_slot_storage_entries(0).unwrap().len(), 1);
db.get_accounts_delta_hash(2);
db.clean_accounts(Some(2), false, None);
// No stores should exist for slot 0 after clean
assert!(db.storage.get_slot_storage_entries(0).is_none());
// Ref count for `account_key1` (account removed earlier by shrink)
// should be 1, since it was only stored in slot 0 and 1, and slot 0
// is now dead
assert_eq!(db.accounts_index.ref_count_from_storage(&account_key1), 1);
}
#[test]
fn test_shrink_unref() {
run_test_shrink_unref(false)
}
#[test]
fn test_shrink_unref_with_intra_slot_cleaning() {
run_test_shrink_unref(true)
}
#[test]
fn test_partial_clean() {
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let account_key1 = Pubkey::new_unique();
let account_key2 = Pubkey::new_unique();
let account1 = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
let account2 = AccountSharedData::new(2, 0, AccountSharedData::default().owner());
let account3 = AccountSharedData::new(3, 0, AccountSharedData::default().owner());
let account4 = AccountSharedData::new(4, 0, AccountSharedData::default().owner());
// Store accounts into slots 0 and 1
db.store_uncached(0, &[(&account_key1, &account1)]);
db.store_uncached(0, &[(&account_key2, &account1)]);
db.store_uncached(1, &[(&account_key1, &account2)]);
db.get_accounts_delta_hash(0);
db.get_accounts_delta_hash(1);
db.print_accounts_stats("pre-clean1");
// clean accounts - no accounts should be cleaned, since no rooted slots
//
// Checking that the uncleaned_pubkeys are not pre-maturely removed
// such that when the slots are rooted, and can actually be cleaned, then the
// delta keys are still there.
db.clean_accounts(None, false, None);
db.print_accounts_stats("post-clean1");
// Check stores > 0
assert!(!slot_stores(&db, 0).is_empty());
assert!(!slot_stores(&db, 1).is_empty());
// root slot 0
db.add_root(0);
// store into slot 2
db.store_uncached(2, &[(&account_key2, &account3)]);
db.store_uncached(2, &[(&account_key1, &account3)]);
db.get_accounts_delta_hash(2);
db.clean_accounts(None, false, None);
db.print_accounts_stats("post-clean2");
// root slots 1
db.add_root(1);
db.clean_accounts(None, false, None);
db.print_accounts_stats("post-clean3");
db.store_uncached(3, &[(&account_key2, &account4)]);
db.get_accounts_delta_hash(3);
db.add_root(3);
// Check that we can clean where max_root=3 and slot=2 is not rooted
db.clean_accounts(None, false, None);
assert!(db.uncleaned_pubkeys.is_empty());
db.print_accounts_stats("post-clean4");
assert!(slot_stores(&db, 0).is_empty());
assert!(!slot_stores(&db, 1).is_empty());
}
#[test]
fn test_recycle_stores_expiration() {
solana_logger::setup();
let common_store_path = Path::new("");
let common_slot_id = 12;
let store_file_size = 1000;
let store1_id = 22;
let entry1 = Arc::new(AccountStorageEntry::new(
common_store_path,
common_slot_id,
store1_id,
store_file_size,
));
let store2_id = 44;
let entry2 = Arc::new(AccountStorageEntry::new(
common_store_path,
common_slot_id,
store2_id,
store_file_size,
));
let mut recycle_stores = RecycleStores::default();
recycle_stores.add_entry(entry1);
recycle_stores.add_entry(entry2);
assert_eq!(recycle_stores.entry_count(), 2);
// no expiration for newly added entries
let expired = recycle_stores.expire_old_entries();
assert_eq!(
expired
.iter()
.map(|e| e.append_vec_id())
.collect::<Vec<_>>(),
Vec::<AppendVecId>::new()
);
assert_eq!(
recycle_stores
.iter()
.map(|(_, e)| e.append_vec_id())
.collect::<Vec<_>>(),
vec![store1_id, store2_id]
);
assert_eq!(recycle_stores.entry_count(), 2);
assert_eq!(recycle_stores.total_bytes(), store_file_size * 2);
// expiration for only too old entries
recycle_stores.entries[0].0 =
Instant::now() - Duration::from_secs(EXPIRATION_TTL_SECONDS + 1);
let expired = recycle_stores.expire_old_entries();
assert_eq!(
expired
.iter()
.map(|e| e.append_vec_id())
.collect::<Vec<_>>(),
vec![store1_id]
);
assert_eq!(
recycle_stores
.iter()
.map(|(_, e)| e.append_vec_id())
.collect::<Vec<_>>(),
vec![store2_id]
);
assert_eq!(recycle_stores.entry_count(), 1);
assert_eq!(recycle_stores.total_bytes(), store_file_size);
}
const RACY_SLEEP_MS: u64 = 10;
const RACE_TIME: u64 = 5;
fn start_load_thread(
with_retry: bool,
ancestors: Ancestors,
db: Arc<AccountsDb>,
exit: Arc<AtomicBool>,
pubkey: Arc<Pubkey>,
expected_lamports: impl Fn(&(AccountSharedData, Slot)) -> u64 + Send + 'static,
) -> JoinHandle<()> {
let load_hint = if with_retry {
LoadHint::FixedMaxRoot
} else {
LoadHint::Unspecified
};
std::thread::Builder::new()
.name("account-do-load".to_string())
.spawn(move || {
loop {
if exit.load(Ordering::Relaxed) {
return;
}
// Meddle load_limit to cover all branches of implementation.
// There should absolutely no behaviorial difference; the load_limit triggered
// slow branch should only affect the performance.
// Ordering::Relaxed is ok because of no data dependencies; the modified field is
// completely free-standing cfg(test) control-flow knob.
db.load_limit
.store(thread_rng().gen_range(0, 10) as u64, Ordering::Relaxed);
// Load should never be unable to find this key
let loaded_account = db.do_load(&ancestors, &pubkey, None, load_hint).unwrap();
// slot + 1 == account.lamports because of the account-cache-flush thread
assert_eq!(
loaded_account.0.lamports(),
expected_lamports(&loaded_account)
);
}
})
.unwrap()
}
fn do_test_load_account_and_cache_flush_race(with_retry: bool) {
solana_logger::setup();
let caching_enabled = true;
let mut db = AccountsDb::new_with_config_for_tests(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
caching_enabled,
AccountShrinkThreshold::default(),
);
db.load_delay = RACY_SLEEP_MS;
let db = Arc::new(db);
let pubkey = Arc::new(Pubkey::new_unique());
let exit = Arc::new(AtomicBool::new(false));
db.store_cached(
0,
&[(
&pubkey,
&AccountSharedData::new(1, 0, AccountSharedData::default().owner()),
)],
);
db.add_root(0);
db.flush_accounts_cache(true, None);
let t_flush_accounts_cache = {
let db = db.clone();
let exit = exit.clone();
let pubkey = pubkey.clone();
let mut account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
std::thread::Builder::new()
.name("account-cache-flush".to_string())
.spawn(move || {
let mut slot = 1;
loop {
if exit.load(Ordering::Relaxed) {
return;
}
account.set_lamports(slot + 1);
db.store_cached(slot, &[(&pubkey, &account)]);
db.add_root(slot);
sleep(Duration::from_millis(RACY_SLEEP_MS));
db.flush_accounts_cache(true, None);
slot += 1;
}
})
.unwrap()
};
let t_do_load = start_load_thread(
with_retry,
Ancestors::default(),
db,
exit.clone(),
pubkey,
|(_, slot)| slot + 1,
);
sleep(Duration::from_secs(RACE_TIME));
exit.store(true, Ordering::Relaxed);
t_flush_accounts_cache.join().unwrap();
t_do_load.join().map_err(std::panic::resume_unwind).unwrap()
}
#[test]
fn test_load_account_and_cache_flush_race_with_retry() {
do_test_load_account_and_cache_flush_race(true);
}
#[test]
fn test_load_account_and_cache_flush_race_without_retry() {
do_test_load_account_and_cache_flush_race(false);
}
fn do_test_load_account_and_shrink_race(with_retry: bool) {
let caching_enabled = true;
let mut db = AccountsDb::new_with_config_for_tests(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
caching_enabled,
AccountShrinkThreshold::default(),
);
db.load_delay = RACY_SLEEP_MS;
let db = Arc::new(db);
let pubkey = Arc::new(Pubkey::new_unique());
let exit = Arc::new(AtomicBool::new(false));
let slot = 1;
// Store an account
let lamports = 42;
let mut account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
account.set_lamports(lamports);
db.store_uncached(slot, &[(&pubkey, &account)]);
// Set the slot as a root so account loads will see the contents of this slot
db.add_root(slot);
let t_shrink_accounts = {
let db = db.clone();
let exit = exit.clone();
std::thread::Builder::new()
.name("account-shrink".to_string())
.spawn(move || loop {
if exit.load(Ordering::Relaxed) {
return;
}
// Simulate adding shrink candidates from clean_accounts()
let stores = db.storage.get_slot_storage_entries(slot).unwrap();
assert_eq!(stores.len(), 1);
let store = &stores[0];
let store_id = store.append_vec_id();
db.shrink_candidate_slots
.lock()
.unwrap()
.entry(slot)
.or_default()
.insert(store_id, store.clone());
db.shrink_candidate_slots();
})
.unwrap()
};
let t_do_load = start_load_thread(
with_retry,
Ancestors::default(),
db,
exit.clone(),
pubkey,
move |_| lamports,
);
sleep(Duration::from_secs(RACE_TIME));
exit.store(true, Ordering::Relaxed);
t_shrink_accounts.join().unwrap();
t_do_load.join().map_err(std::panic::resume_unwind).unwrap()
}
#[test]
fn test_load_account_and_shrink_race_with_retry() {
do_test_load_account_and_shrink_race(true);
}
#[test]
fn test_load_account_and_shrink_race_without_retry() {
do_test_load_account_and_shrink_race(false);
}
#[test]
fn test_cache_flush_delayed_remove_unrooted_race() {
let caching_enabled = true;
let mut db = AccountsDb::new_with_config_for_tests(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
caching_enabled,
AccountShrinkThreshold::default(),
);
db.load_delay = RACY_SLEEP_MS;
let db = Arc::new(db);
let slot = 10;
let bank_id = 10;
let lamports = 42;
let mut account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
account.set_lamports(lamports);
// Start up a thread to flush the accounts cache
let (flush_trial_start_sender, flush_trial_start_receiver) = unbounded();
let (flush_done_sender, flush_done_receiver) = unbounded();
let t_flush_cache = {
let db = db.clone();
std::thread::Builder::new()
.name("account-cache-flush".to_string())
.spawn(move || loop {
// Wait for the signal to start a trial
if flush_trial_start_receiver.recv().is_err() {
return;
}
db.flush_slot_cache(10, None::<&mut fn(&_, &_) -> bool>);
flush_done_sender.send(()).unwrap();
})
.unwrap()
};
// Start up a thread remove the slot
let (remove_trial_start_sender, remove_trial_start_receiver) = unbounded();
let (remove_done_sender, remove_done_receiver) = unbounded();
let t_remove = {
let db = db.clone();
std::thread::Builder::new()
.name("account-remove".to_string())
.spawn(move || loop {
// Wait for the signal to start a trial
if remove_trial_start_receiver.recv().is_err() {
return;
}
db.remove_unrooted_slots(&[(slot, bank_id)]);
remove_done_sender.send(()).unwrap();
})
.unwrap()
};
let num_trials = 10;
for _ in 0..num_trials {
let pubkey = Pubkey::new_unique();
db.store_cached(slot, &[(&pubkey, &account)]);
// Wait for both threads to finish
flush_trial_start_sender.send(()).unwrap();
remove_trial_start_sender.send(()).unwrap();
let _ = flush_done_receiver.recv();
let _ = remove_done_receiver.recv();
}
drop(flush_trial_start_sender);
drop(remove_trial_start_sender);
t_flush_cache.join().unwrap();
t_remove.join().unwrap();
}
#[test]
fn test_cache_flush_remove_unrooted_race_multiple_slots() {
let caching_enabled = true;
let db = AccountsDb::new_with_config_for_tests(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
caching_enabled,
AccountShrinkThreshold::default(),
);
let db = Arc::new(db);
let num_cached_slots = 100;
let num_trials = 100;
let (new_trial_start_sender, new_trial_start_receiver) = unbounded();
let (flush_done_sender, flush_done_receiver) = unbounded();
// Start up a thread to flush the accounts cache
let t_flush_cache = {
let db = db.clone();
std::thread::Builder::new()
.name("account-cache-flush".to_string())
.spawn(move || loop {
// Wait for the signal to start a trial
if new_trial_start_receiver.recv().is_err() {
return;
}
for slot in 0..num_cached_slots {
db.flush_slot_cache(slot, None::<&mut fn(&_, &_) -> bool>);
}
flush_done_sender.send(()).unwrap();
})
.unwrap()
};
let exit = Arc::new(AtomicBool::new(false));
let t_spurious_signal = {
let db = db.clone();
let exit = exit.clone();
std::thread::Builder::new()
.name("account-cache-flush".to_string())
.spawn(move || loop {
if exit.load(Ordering::Relaxed) {
return;
}
// Simulate spurious wake-up that can happen, but is too rare to
// otherwise depend on in tests.
db.remove_unrooted_slots_synchronization.signal.notify_all();
})
.unwrap()
};
// Run multiple trials. Has the added benefit of rewriting the same slots after we've
// dumped them in previous trials.
for _ in 0..num_trials {
// Store an account
let lamports = 42;
let mut account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
account.set_lamports(lamports);
// Pick random 50% of the slots to pass to `remove_unrooted_slots()`
let mut all_slots: Vec<(Slot, BankId)> = (0..num_cached_slots)
.map(|slot| {
let bank_id = slot + 1;
(slot, bank_id)
})
.collect();
all_slots.shuffle(&mut rand::thread_rng());
let slots_to_dump = &all_slots[0..num_cached_slots as usize / 2];
let slots_to_keep = &all_slots[num_cached_slots as usize / 2..];
// Set up a one account per slot across many different slots, track which
// pubkey was stored in each slot.
let slot_to_pubkey_map: HashMap<Slot, Pubkey> = (0..num_cached_slots)
.map(|slot| {
let pubkey = Pubkey::new_unique();
db.store_cached(slot, &[(&pubkey, &account)]);
(slot, pubkey)
})
.collect();
// Signal the flushing shred to start flushing
new_trial_start_sender.send(()).unwrap();
// Here we want to test both:
// 1) Flush thread starts flushing a slot before we try dumping it.
// 2) Flushing thread trying to flush while/after we're trying to dump the slot,
// in which case flush should ignore/move past the slot to be dumped
//
// Hence, we split into chunks to get the dumping of each chunk to race with the
// flushes. If we were to dump the entire chunk at once, then this reduces the possibility
// of the flush occurring first since the dumping logic reserves all the slots it's about
// to dump immediately.
for chunks in slots_to_dump.chunks(slots_to_dump.len() / 2) {
db.remove_unrooted_slots(chunks);
}
// Check that all the slots in `slots_to_dump` were completely removed from the
// cache, storage, and index
for (slot, _) in slots_to_dump {
assert!(db.storage.get_slot_storage_entries(*slot).is_none());
assert!(db.accounts_cache.slot_cache(*slot).is_none());
let account_in_slot = slot_to_pubkey_map[slot];
let item = db.accounts_index.get_account_read_entry(&account_in_slot);
assert!(item.is_none(), "item: {:?}", item);
}
// Wait for flush to finish before starting next trial
flush_done_receiver.recv().unwrap();
for (slot, bank_id) in slots_to_keep {
let account_in_slot = slot_to_pubkey_map[slot];
assert!(db
.load(
&Ancestors::from(vec![(*slot, 0)]),
&account_in_slot,
LoadHint::FixedMaxRoot
)
.is_some());
// Clear for next iteration so that `assert!(self.storage.get_slot_stores(purged_slot).is_none());`
// in `purge_slot_pubkeys()` doesn't trigger
db.remove_unrooted_slots(&[(*slot, *bank_id)]);
}
}
exit.store(true, Ordering::Relaxed);
drop(new_trial_start_sender);
t_flush_cache.join().unwrap();
t_spurious_signal.join().unwrap();
}
#[test]
fn test_collect_uncleaned_slots_up_to_slot() {
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let slot1 = 11;
let slot2 = 222;
let slot3 = 3333;
let pubkey1 = Pubkey::new_unique();
let pubkey2 = Pubkey::new_unique();
let pubkey3 = Pubkey::new_unique();
db.uncleaned_pubkeys.insert(slot1, vec![pubkey1]);
db.uncleaned_pubkeys.insert(slot2, vec![pubkey2]);
db.uncleaned_pubkeys.insert(slot3, vec![pubkey3]);
let mut uncleaned_slots1 = db.collect_uncleaned_slots_up_to_slot(slot1);
let mut uncleaned_slots2 = db.collect_uncleaned_slots_up_to_slot(slot2);
let mut uncleaned_slots3 = db.collect_uncleaned_slots_up_to_slot(slot3);
uncleaned_slots1.sort_unstable();
uncleaned_slots2.sort_unstable();
uncleaned_slots3.sort_unstable();
assert_eq!(uncleaned_slots1, [slot1]);
assert_eq!(uncleaned_slots2, [slot1, slot2]);
assert_eq!(uncleaned_slots3, [slot1, slot2, slot3]);
}
#[test]
fn test_remove_uncleaned_slots_and_collect_pubkeys() {
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let slot1 = 11;
let slot2 = 222;
let slot3 = 3333;
let pubkey1 = Pubkey::new_unique();
let pubkey2 = Pubkey::new_unique();
let pubkey3 = Pubkey::new_unique();
let account1 = AccountSharedData::new(0, 0, &pubkey1);
let account2 = AccountSharedData::new(0, 0, &pubkey2);
let account3 = AccountSharedData::new(0, 0, &pubkey3);
db.store_uncached(slot1, &[(&pubkey1, &account1)]);
db.store_uncached(slot2, &[(&pubkey2, &account2)]);
db.store_uncached(slot3, &[(&pubkey3, &account3)]);
db.add_root(slot1);
// slot 2 is _not_ a root on purpose
db.add_root(slot3);
db.uncleaned_pubkeys.insert(slot1, vec![pubkey1]);
db.uncleaned_pubkeys.insert(slot2, vec![pubkey2]);
db.uncleaned_pubkeys.insert(slot3, vec![pubkey3]);
let uncleaned_pubkeys1 = db
.remove_uncleaned_slots_and_collect_pubkeys(vec![slot1])
.into_iter()
.flatten()
.collect::<Vec<_>>();
let uncleaned_pubkeys2 = db
.remove_uncleaned_slots_and_collect_pubkeys(vec![slot2])
.into_iter()
.flatten()
.collect::<Vec<_>>();
let uncleaned_pubkeys3 = db
.remove_uncleaned_slots_and_collect_pubkeys(vec![slot3])
.into_iter()
.flatten()
.collect::<Vec<_>>();
assert!(uncleaned_pubkeys1.contains(&pubkey1));
assert!(!uncleaned_pubkeys1.contains(&pubkey2));
assert!(!uncleaned_pubkeys1.contains(&pubkey3));
assert!(!uncleaned_pubkeys2.contains(&pubkey1));
assert!(uncleaned_pubkeys2.contains(&pubkey2));
assert!(!uncleaned_pubkeys2.contains(&pubkey3));
assert!(!uncleaned_pubkeys3.contains(&pubkey1));
assert!(!uncleaned_pubkeys3.contains(&pubkey2));
assert!(uncleaned_pubkeys3.contains(&pubkey3));
}
#[test]
fn test_remove_uncleaned_slots_and_collect_pubkeys_up_to_slot() {
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let slot1 = 11;
let slot2 = 222;
let slot3 = 3333;
let pubkey1 = Pubkey::new_unique();
let pubkey2 = Pubkey::new_unique();
let pubkey3 = Pubkey::new_unique();
let account1 = AccountSharedData::new(0, 0, &pubkey1);
let account2 = AccountSharedData::new(0, 0, &pubkey2);
let account3 = AccountSharedData::new(0, 0, &pubkey3);
db.store_uncached(slot1, &[(&pubkey1, &account1)]);
db.store_uncached(slot2, &[(&pubkey2, &account2)]);
db.store_uncached(slot3, &[(&pubkey3, &account3)]);
// slot 1 is _not_ a root on purpose
db.add_root(slot2);
db.add_root(slot3);
db.uncleaned_pubkeys.insert(slot1, vec![pubkey1]);
db.uncleaned_pubkeys.insert(slot2, vec![pubkey2]);
db.uncleaned_pubkeys.insert(slot3, vec![pubkey3]);
let uncleaned_pubkeys = db
.remove_uncleaned_slots_and_collect_pubkeys_up_to_slot(slot3)
.into_iter()
.flatten()
.collect::<Vec<_>>();
assert!(uncleaned_pubkeys.contains(&pubkey1));
assert!(uncleaned_pubkeys.contains(&pubkey2));
assert!(uncleaned_pubkeys.contains(&pubkey3));
}
#[test]
fn test_shrink_productive() {
solana_logger::setup();
let s1 = AccountStorageEntry::new(Path::new("."), 0, 0, 1024);
let stores = vec![Arc::new(s1)];
assert!(!AccountsDb::is_shrinking_productive(0, &stores));
let s1 = AccountStorageEntry::new(Path::new("."), 0, 0, PAGE_SIZE * 4);
let stores = vec![Arc::new(s1)];
stores[0].add_account((3 * PAGE_SIZE as usize) - 1);
stores[0].add_account(10);
stores[0].remove_account(10, false);
assert!(AccountsDb::is_shrinking_productive(0, &stores));
stores[0].add_account(PAGE_SIZE as usize);
assert!(!AccountsDb::is_shrinking_productive(0, &stores));
let s1 = AccountStorageEntry::new(Path::new("."), 0, 0, PAGE_SIZE + 1);
s1.add_account(PAGE_SIZE as usize);
let s2 = AccountStorageEntry::new(Path::new("."), 0, 1, PAGE_SIZE + 1);
s2.add_account(PAGE_SIZE as usize);
let stores = vec![Arc::new(s1), Arc::new(s2)];
assert!(AccountsDb::is_shrinking_productive(0, &stores));
}
#[test]
fn test_is_candidate_for_shrink() {
solana_logger::setup();
let mut accounts = AccountsDb::new_single_for_tests();
let common_store_path = Path::new("");
let store_file_size = 2 * PAGE_SIZE;
let entry = Arc::new(AccountStorageEntry::new(
common_store_path,
0,
1,
store_file_size,
));
match accounts.shrink_ratio {
AccountShrinkThreshold::TotalSpace { shrink_ratio } => {
assert_eq!(
(DEFAULT_ACCOUNTS_SHRINK_RATIO * 100.) as u64,
(shrink_ratio * 100.) as u64
)
}
AccountShrinkThreshold::IndividalStore { shrink_ratio: _ } => {
panic!("Expect the default to be TotalSpace")
}
}
entry.alive_bytes.store(3000, Ordering::Release);
assert!(accounts.is_candidate_for_shrink(&entry));
entry.alive_bytes.store(5000, Ordering::Release);
assert!(!accounts.is_candidate_for_shrink(&entry));
accounts.shrink_ratio = AccountShrinkThreshold::TotalSpace { shrink_ratio: 0.3 };
entry.alive_bytes.store(3000, Ordering::Release);
assert!(accounts.is_candidate_for_shrink(&entry));
accounts.shrink_ratio = AccountShrinkThreshold::IndividalStore { shrink_ratio: 0.3 };
assert!(!accounts.is_candidate_for_shrink(&entry));
}
#[test]
fn test_calculate_storage_count_and_alive_bytes() {
let accounts = AccountsDb::new_single_for_tests();
let shared_key = solana_sdk::pubkey::new_rand();
let account = AccountSharedData::new(1, 1, AccountSharedData::default().owner());
let slot0 = 0;
accounts.store_uncached(slot0, &[(&shared_key, &account)]);
let storage_maps = accounts
.storage
.get_slot_storage_entries(slot0)
.unwrap_or_default();
let storage_info = StorageSizeAndCountMap::default();
let accounts_map = accounts.process_storage_slot(&storage_maps[..]);
AccountsDb::update_storage_info(&storage_info, &accounts_map, &Mutex::default());
assert_eq!(storage_info.len(), 1);
for entry in storage_info.iter() {
assert_eq!(
(entry.key(), entry.value().count, entry.value().stored_size),
(&0, 1, 144)
);
}
}
#[test]
fn test_calculate_storage_count_and_alive_bytes_0_accounts() {
let accounts = AccountsDb::new_single_for_tests();
let storage_maps = vec![];
let storage_info = StorageSizeAndCountMap::default();
let accounts_map = accounts.process_storage_slot(&storage_maps[..]);
AccountsDb::update_storage_info(&storage_info, &accounts_map, &Mutex::default());
assert!(storage_info.is_empty());
}
#[test]
fn test_calculate_storage_count_and_alive_bytes_2_accounts() {
let accounts = AccountsDb::new_single_for_tests();
let keys = [
solana_sdk::pubkey::Pubkey::new(&[0; 32]),
solana_sdk::pubkey::Pubkey::new(&[255; 32]),
];
// make sure accounts are in 2 different bins
assert!(
(accounts.accounts_index.bins() == 1)
^ (accounts
.accounts_index
.bin_calculator
.bin_from_pubkey(&keys[0])
!= accounts
.accounts_index
.bin_calculator
.bin_from_pubkey(&keys[1]))
);
let account = AccountSharedData::new(1, 1, AccountSharedData::default().owner());
let account_big = AccountSharedData::new(1, 1000, AccountSharedData::default().owner());
let slot0 = 0;
accounts.store_uncached(slot0, &[(&keys[0], &account)]);
accounts.store_uncached(slot0, &[(&keys[1], &account_big)]);
let storage_maps = accounts
.storage
.get_slot_storage_entries(slot0)
.unwrap_or_default();
let storage_info = StorageSizeAndCountMap::default();
let accounts_map = accounts.process_storage_slot(&storage_maps[..]);
AccountsDb::update_storage_info(&storage_info, &accounts_map, &Mutex::default());
assert_eq!(storage_info.len(), 1);
for entry in storage_info.iter() {
assert_eq!(
(entry.key(), entry.value().count, entry.value().stored_size),
(&0, 2, 1280)
);
}
}
#[test]
fn test_set_storage_count_and_alive_bytes() {
let accounts = AccountsDb::new_single_for_tests();
// make sure we have storage 0
let shared_key = solana_sdk::pubkey::new_rand();
let account = AccountSharedData::new(1, 1, AccountSharedData::default().owner());
let slot0 = 0;
accounts.store_uncached(slot0, &[(&shared_key, &account)]);
// fake out the store count to avoid the assert
for slot_stores in accounts.storage.0.iter() {
for (_id, store) in slot_stores.value().read().unwrap().iter() {
store.alive_bytes.store(0, Ordering::Release);
}
}
// populate based on made up hash data
let dashmap = DashMap::default();
dashmap.insert(
0,
StorageSizeAndCount {
stored_size: 2,
count: 3,
},
);
accounts.set_storage_count_and_alive_bytes(dashmap, &mut GenerateIndexTimings::default());
assert_eq!(accounts.storage.0.len(), 1);
for slot_stores in accounts.storage.0.iter() {
for (id, store) in slot_stores.value().read().unwrap().iter() {
assert_eq!(id, &0);
assert_eq!(store.count_and_status.read().unwrap().0, 3);
assert_eq!(store.alive_bytes.load(Ordering::Acquire), 2);
}
}
}
#[test]
fn test_purge_alive_unrooted_slots_after_clean() {
let accounts = AccountsDb::new_single_for_tests();
// Key shared between rooted and nonrooted slot
let shared_key = solana_sdk::pubkey::new_rand();
// Key to keep the storage entry for the unrooted slot alive
let unrooted_key = solana_sdk::pubkey::new_rand();
let slot0 = 0;
let slot1 = 1;
// Store accounts with greater than 0 lamports
let account = AccountSharedData::new(1, 1, AccountSharedData::default().owner());
accounts.store_uncached(slot0, &[(&shared_key, &account)]);
accounts.store_uncached(slot0, &[(&unrooted_key, &account)]);
// Simulate adding dirty pubkeys on bank freeze. Note this is
// not a rooted slot
accounts.get_accounts_delta_hash(slot0);
// On the next *rooted* slot, update the `shared_key` account to zero lamports
let zero_lamport_account =
AccountSharedData::new(0, 0, AccountSharedData::default().owner());
accounts.store_uncached(slot1, &[(&shared_key, &zero_lamport_account)]);
// Simulate adding dirty pubkeys on bank freeze, set root
accounts.get_accounts_delta_hash(slot1);
accounts.add_root(slot1);
// The later rooted zero-lamport update to `shared_key` cannot be cleaned
// because it is kept alive by the unrooted slot.
accounts.clean_accounts(None, false, None);
assert!(accounts
.accounts_index
.get_account_read_entry(&shared_key)
.is_some());
// Simulate purge_slot() all from AccountsBackgroundService
let is_from_abs = true;
accounts.purge_slot(slot0, 0, is_from_abs);
// Now clean should clean up the remaining key
accounts.clean_accounts(None, false, None);
assert!(accounts
.accounts_index
.get_account_read_entry(&shared_key)
.is_none());
assert!(accounts.storage.get_slot_storage_entries(slot0).is_none());
}
/// Test to make sure `clean_accounts()` works properly with the `last_full_snapshot_slot`
/// parameter. Basically:
///
/// - slot 1: set Account1's balance to non-zero
/// - slot 2: set Account1's balance to a different non-zero amount
/// - slot 3: set Account1's balance to zero
/// - call `clean_accounts()` with `max_clean_root` set to 2
/// - ensure Account1 has *not* been purged
/// - ensure the store from slot 1 is cleaned up
/// - call `clean_accounts()` with `last_full_snapshot_slot` set to 2
/// - ensure Account1 has *not* been purged
/// - call `clean_accounts()` with `last_full_snapshot_slot` set to 3
/// - ensure Account1 *has* been purged
#[test]
fn test_clean_accounts_with_last_full_snapshot_slot() {
solana_logger::setup();
let accounts_db = AccountsDb::new_single_for_tests();
let pubkey = solana_sdk::pubkey::new_rand();
let owner = solana_sdk::pubkey::new_rand();
let space = 0;
let slot1 = 1;
let account = AccountSharedData::new(111, space, &owner);
accounts_db.store_cached(slot1, &[(&pubkey, &account)]);
accounts_db.get_accounts_delta_hash(slot1);
accounts_db.add_root(slot1);
let slot2 = 2;
let account = AccountSharedData::new(222, space, &owner);
accounts_db.store_cached(slot2, &[(&pubkey, &account)]);
accounts_db.get_accounts_delta_hash(slot2);
accounts_db.add_root(slot2);
let slot3 = 3;
let account = AccountSharedData::new(0, space, &owner);
accounts_db.store_cached(slot3, &[(&pubkey, &account)]);
accounts_db.get_accounts_delta_hash(slot3);
accounts_db.add_root(slot3);
assert_eq!(accounts_db.ref_count_for_pubkey(&pubkey), 3);
accounts_db.clean_accounts(Some(slot2), false, Some(slot2));
assert_eq!(accounts_db.ref_count_for_pubkey(&pubkey), 2);
accounts_db.clean_accounts(None, false, Some(slot2));
assert_eq!(accounts_db.ref_count_for_pubkey(&pubkey), 1);
accounts_db.clean_accounts(None, false, Some(slot3));
assert_eq!(accounts_db.ref_count_for_pubkey(&pubkey), 0);
}
#[test]
fn test_filter_zero_lamport_clean_for_incremental_snapshots() {
solana_logger::setup();
let slot = 10;
struct TestParameters {
last_full_snapshot_slot: Option<Slot>,
max_clean_root: Option<Slot>,
should_contain: bool,
}
let do_test = |test_params: TestParameters| {
let account_info = AccountInfo {
store_id: 42,
offset: 123,
stored_size: 234,
lamports: 0,
};
let pubkey = solana_sdk::pubkey::new_rand();
let mut key_set = HashSet::default();
key_set.insert(pubkey);
let store_count = 0;
let mut store_counts = HashMap::default();
store_counts.insert(account_info.store_id, (store_count, key_set));
let mut purges_zero_lamports = HashMap::default();
purges_zero_lamports.insert(pubkey, (vec![(slot, account_info)], 1));
let accounts_db = AccountsDb::new_single_for_tests();
accounts_db.filter_zero_lamport_clean_for_incremental_snapshots(
test_params.max_clean_root,
test_params.last_full_snapshot_slot,
&store_counts,
&mut purges_zero_lamports,
);
assert_eq!(
purges_zero_lamports.contains_key(&pubkey),
test_params.should_contain
);
};
// Scenario 1: last full snapshot is NONE
// In this scenario incremental snapshots are OFF, so always purge
{
let last_full_snapshot_slot = None;
do_test(TestParameters {
last_full_snapshot_slot,
max_clean_root: Some(slot),
should_contain: true,
});
do_test(TestParameters {
last_full_snapshot_slot,
max_clean_root: None,
should_contain: true,
});
}
// Scenario 2: last full snapshot is GREATER THAN zero lamport account slot
// In this scenario always purge, and just test the various permutations of
// `should_filter_for_incremental_snapshots` based on `max_clean_root`.
{
let last_full_snapshot_slot = Some(slot + 1);
do_test(TestParameters {
last_full_snapshot_slot,
max_clean_root: last_full_snapshot_slot,
should_contain: true,
});
do_test(TestParameters {
last_full_snapshot_slot,
max_clean_root: last_full_snapshot_slot.map(|s| s + 1),
should_contain: true,
});
do_test(TestParameters {
last_full_snapshot_slot,
max_clean_root: None,
should_contain: true,
});
}
// Scenario 3: last full snapshot is EQUAL TO zero lamport account slot
// In this scenario always purge, as it's the same as Scenario 2.
{
let last_full_snapshot_slot = Some(slot);
do_test(TestParameters {
last_full_snapshot_slot,
max_clean_root: last_full_snapshot_slot,
should_contain: true,
});
do_test(TestParameters {
last_full_snapshot_slot,
max_clean_root: last_full_snapshot_slot.map(|s| s + 1),
should_contain: true,
});
do_test(TestParameters {
last_full_snapshot_slot,
max_clean_root: None,
should_contain: true,
});
}
// Scenario 4: last full snapshot is LESS THAN zero lamport account slot
// In this scenario do *not* purge, except when `should_filter_for_incremental_snapshots`
// is false
{
let last_full_snapshot_slot = Some(slot - 1);
do_test(TestParameters {
last_full_snapshot_slot,
max_clean_root: last_full_snapshot_slot,
should_contain: true,
});
do_test(TestParameters {
last_full_snapshot_slot,
max_clean_root: last_full_snapshot_slot.map(|s| s + 1),
should_contain: false,
});
do_test(TestParameters {
last_full_snapshot_slot,
max_clean_root: None,
should_contain: false,
});
}
}
}
| {
let caching_enabled = true;
let mut accounts_db = AccountsDb::new_with_config_for_tests(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
caching_enabled,
AccountShrinkThreshold::default(),
);
accounts_db.write_cache_limit_bytes = write_cache_limit_bytes;
let accounts_db = Arc::new(accounts_db);
let slots: Vec<_> = (0..num_slots as Slot).into_iter().collect();
let stall_slot = num_slots as Slot;
let scan_stall_key = Pubkey::new_unique();
let keys: Vec<Pubkey> = std::iter::repeat_with(Pubkey::new_unique)
.take(num_slots)
.collect();
if scan_slot.is_some() {
accounts_db.store_cached(
// Store it in a slot that isn't returned in `slots`
stall_slot,
&[(
&scan_stall_key,
&AccountSharedData::new(1, 0, &Pubkey::default()),
)],
);
}
// Store some subset of the keys in slots 0..num_slots
let mut scan_tracker = None;
for slot in &slots {
for key in &keys[*slot as usize..] {
let space = 1; // 1 byte allows us to track by size
accounts_db.store_cached(
*slot,
&[(key, &AccountSharedData::new(1, space, &Pubkey::default()))],
);
}
accounts_db.add_root(*slot as Slot);
if Some(*slot) == scan_slot {
let ancestors = Arc::new(vec![(stall_slot, 1), (*slot, 1)].into_iter().collect());
let bank_id = 0;
scan_tracker = Some(setup_scan(
accounts_db.clone(),
ancestors,
bank_id,
scan_stall_key,
));
assert_eq!(
accounts_db.accounts_index.min_ongoing_scan_root().unwrap(),
*slot
);
}
}
accounts_db.accounts_cache.remove_slot(stall_slot);
// If there's <= max_cache_slots(), no slots should be flushed
if accounts_db.accounts_cache.num_slots() <= max_cache_slots() {
accounts_db.flush_accounts_cache(false, None);
assert_eq!(accounts_db.accounts_cache.num_slots(), num_slots);
}
(accounts_db, keys, slots, scan_tracker)
} |
search.ts | import { Locale, AvailableState } from "../../common"
import { normalizeLocaleKey } from "./normalize"
const citySuffixes = (suffixes: string[], city: string): string[] => {
if (suffixes.some(suffix => city.endsWith(suffix))) {
return [city]
}
return [city, ...suffixes.map(suffix => city + suffix)]
}
/* add in possible prefixes, assuming larger entities are most likely to have prefix dropped */
const possibleBeginnings = [
'city of ',
'town of ',
'village of ',
]
const wisconsinCities = (city: string): string[] => {
if (possibleBeginnings.some(beginning => city.startsWith(beginning))) {
return [city]
}
return [city, ...possibleBeginnings.map(beginning => beginning + city)]
}
/* cities that span multiple counties are listed without a county */
const wisconsinCounties = (county: string): string[] => {
return [county, '']
}
/** States that have cities independent of counties. These are usually for large cities
* So just try matching on cities first and then county if city doesn't match
*/
const keysCityState = (locale: Locale<AvailableState>): string[] => {
const cityLocales = citySuffixes([' city'], locale.city)
.map(city => normalizeLocaleKey({...locale, city}))
const countyLocales = normalizeLocaleKey({...locale, city: ''})
return cityLocales.concat([countyLocales])
}
const michiganCitySuffixes = (city: string): string[] => {
// Township / Charter Township are relatively interchangable
if (city.endsWith(' charter township')) {
return [city, city.replace(' charter township', ' township')]
} else if (city.endsWith(' township')) {
return [city, city.replace(' township', ' charter township')]
} else if (city.endsWith(' city')) { // we sometimes drop the name city
return [city]
} else {
return [city, city + ' city', city + ' township', city + ' charter township']
}
}
/* list of keys to try (in order) */
export const keys = (
_locale: Locale<AvailableState>,
): string[] => { | county: _locale.county ? _locale.county.toLocaleLowerCase() : _locale.county,
otherCities: _locale.otherCities ? _locale.otherCities.map(c => c.toLocaleLowerCase()) : _locale.otherCities,
}
switch(locale.state) {
case 'Arizona':
case 'Florida':
case 'Georgia':
case 'Maine':
case 'Minnesota':
case 'Nebraska':
case 'New York':
case 'Wyoming': {
return [normalizeLocaleKey(locale)]
}
case 'Michigan': {
// In Michigan, first try 'administrative_area_level_3' (otherCities)
// before 'locality' (city) and vary each them
const orderedCities = [
...(locale?.otherCities ?? []),
locale.city
].flatMap(michiganCitySuffixes)
return orderedCities.map(city => normalizeLocaleKey({...locale, city}))
}
case 'Wisconsin': {
if (!locale.county) return []
return wisconsinCounties(locale.county).flatMap(county =>
wisconsinCities(locale.city).map(city =>
normalizeLocaleKey({...locale, city, county})
)
)
}
case 'Maryland':
case 'Virginia':
case 'Nevada': {
return keysCityState(locale)
}
}
} | const locale = {
..._locale,
city: _locale.city.toLocaleLowerCase(), |
gulpfile.js | // Include gulp
var gulp = require('gulp');
// Include Our Plugins
var concat = require('gulp-concat'),
uglify = require('gulp-uglify'),
rename = require('gulp-rename'),
compass = require('gulp-compass'),
data = require('gulp-data'),
gutil = require('gulp-util'),
browserSync = require('browser-sync').create(),
del = require('del'),
plumber = require('gulp-plumber'),
coffee = require('gulp-coffee'),
jade = require('gulp-jade'),
fs = require('fs');
var getJsonData = function(file){
return JSON.parse(fs.readFileSync('./Development/jade/data/pages.json', 'utf8'));
};
gulp.task('jade', function() {
return gulp.src('Development/jade/*.jade')
.pipe(data(getJsonData))
.pipe(plumber({
errorHandler: function (error) {
console.log(error.message);
this.emit('end');
}}))
.pipe(jade({
pretty: true
}))
.pipe(gulp.dest("Production/"));
});
// Compile Our Sass with Compass
gulp.task('sass', function() {
return gulp.src('Development/scss/compile/*.scss')
.pipe(plumber({
errorHandler: function (error) {
console.log(error.message);
this.emit('end');
}}))
.pipe(compass({
'sass': 'Development/scss/compile', | }))
.on('error', function(err) {})
.pipe(gulp.dest('Production/css'));
});
gulp.task('css', ['sass'], function () {
del(['Production/css/**/*', '!Production/css/main.css']);
});
// JS - concat and min
gulp.task('js', function() {
return gulp.src('Development/scripts/*.js')
.pipe(concat('app.min.js'))
.pipe(uglify())
.pipe(gulp.dest('Production/js'));
});
//COFFE SCRIPT
gulp.task('coffee', function() {
gulp.src('Development/coffee/*.coffee')
.pipe(coffee({bare: true}).on('error', gutil.log))
.pipe(uglify())
.pipe(gulp.dest('Development/scripts/'))
});
// JS PLUGINS - concat and min
gulp.task('plugins', function() {
return gulp.src('Development/requirements/*.js')
.pipe(concat('plugins.min.js'))
.pipe(uglify())
.pipe(gulp.dest('Production/js'));
});
// Watch Files For Changes
gulp.task('watch', function() {
gulp.watch('Development/coffee/**/*', ['coffee']);
gulp.watch('Development/scripts/**/*', ['js']);
gulp.watch('Development/requirements/**/*', ['plugins']);
gulp.watch('Development/scss/**/*', ['sass']);
gulp.watch('Development/jade/**/*', ['jade']);
gulp.watch(['Production/**/*']).on('change', browserSync.reload);
});
// BROWSER SYNC
gulp.task('sync', function() {
browserSync.init({
server: {
baseDir: "Production/"
}
});
});
// Default Task
gulp.task('default', ['jade', 'sass', 'coffee', 'js', 'plugins', 'watch', 'sync']); | 'css': 'Production/css',
'images': 'Production/images',
'style': 'nested' |
solution2.py | # Map Approach [O(n)] :
# Idea is to create a hashmap to store freq of the elements, and lookup those elements while traversing the array to check if their sum is equal to the given sum or not
# GeeksforGeeks Explanation: https://youtu.be/bvKMZXc0jQU
def getPairsCount(arr, n, sum):
m = [0] * 1000
# Store counts of all elements in map m
for i in range(0, n):
m[arr[i]] # Stores the frequency of the number in the array
m[arr[i]] += 1
twice_count = 0
# Iterate through each element and increment the count (Every pair is counted twice)
for i in range(0, n):
twice_count += m[sum - arr[i]]
# if (arr[i], arr[i]) pair satisfies the condition, then we need to ensure that the count is decreased by one such that the (arr[i], arr[i]) pair is not considered
if (sum - arr[i] == arr[i]):
twice_count -= 1
# return the half of twice_count
return int(twice_count / 2)
n = int(input())
arr = list(map(int,input().split()))
sum = int(input()) |
print(getPairsCount(arr, n, sum)) |
|
uuid.rs | use std::borrow::Cow;
use poem::{http::HeaderValue, web::Field};
use serde_json::Value;
use uuid::Uuid;
use crate::{
registry::{MetaSchema, MetaSchemaRef},
types::{
ParseError, ParseFromJSON, ParseFromMultipartField, ParseFromParameter, ParseResult,
ToHeader, ToJSON, Type,
},
};
impl Type for Uuid {
const IS_REQUIRED: bool = true;
type RawValueType = Self;
type RawElementValueType = Self;
fn name() -> Cow<'static, str> {
"string(uuid)".into()
}
fn schema_ref() -> MetaSchemaRef {
MetaSchemaRef::Inline(Box::new(MetaSchema::new_with_format("string", "uuid")))
}
fn as_raw_value(&self) -> Option<&Self::RawValueType> {
Some(self)
}
fn raw_element_iter<'a>(
&'a self,
) -> Box<dyn Iterator<Item = &'a Self::RawElementValueType> + 'a> {
Box::new(self.as_raw_value().into_iter())
}
}
impl ParseFromJSON for Uuid {
fn parse_from_json(value: Option<Value>) -> ParseResult<Self> |
}
impl ParseFromParameter for Uuid {
fn parse_from_parameter(value: &str) -> ParseResult<Self> {
value.parse().map_err(ParseError::custom)
}
}
#[poem::async_trait]
impl ParseFromMultipartField for Uuid {
async fn parse_from_multipart(field: Option<Field>) -> ParseResult<Self> {
match field {
Some(field) => Ok(field.text().await?.parse()?),
None => Err(ParseError::expected_input()),
}
}
}
impl ToJSON for Uuid {
fn to_json(&self) -> Option<Value> {
Some(Value::String(self.to_string()))
}
}
impl ToHeader for Uuid {
fn to_header(&self) -> Option<HeaderValue> {
HeaderValue::from_str(&self.to_string()).ok()
}
}
| {
let value = value.unwrap_or_default();
if let Value::String(value) = value {
Ok(value.parse()?)
} else {
Err(ParseError::expected_type(value))
}
} |
scripts.js | function Cart (){
this.orders = [];
// this.ordersType = ["Delivery" , "Carry Out";
this.shippingType = "";
this.shippingCost=0;
this.total =0;
}
Cart.prototype.addOrder = function(order){
//this.orders.push(order);
this.orders.push(new Pizza());
this.orders[this.orders.length - 1].cost = order.cost;
this.orders[this.orders.length - 1].sizes = order.sizes;
this.orders[this.orders.length - 1].crust = order.crust;
this.orders[this.orders.length - 1].sauce = order.sauce;
this.orders[this.orders.length - 1].cheese = order.cheese;
this.orders[this.orders.length - 1].meatToppings = order.meatToppings;
this.orders[this.orders.length - 1].nonMeatToppings = order.nonMeatToppings;
}
Cart.prototype.countTotal = function(orders){
var total = this.shippingCost;
this.orders.forEach(function(order){
total += order.price();
});
return total;
}
function Pizza(){
this.cost=0;
this.sizes = ["small", "mediam", "large"];
this.crust = ["hand tossed","crunchy thin crust"];
this.sauce = ["BBQ Saucce","Tomato Sauce","Alfredo Sauce","Garlic Parmesan White Sauce"];
this.cheese = ["none","light","normal","extra"];
this.meatToppings = ["Chicken","Pepporoni","Beef","Sausage"];
this.nonMeatToppings = ["Hot Sauce","Garlic","Jalapeno Peppers","Onions",
"Banana Peppers",
"Diced Tomatos","Tomatos","Black Olives",
"Red Peppers","Spinach",
"Green Peppers","Mushrooms","Green Olives",
"Pineapple","Shredded Provolone Cheese",
"Shredded Parmesan Asiago","Feta Cheese",
"Cheddar Cheese"];
}
Pizza.prototype.price=function(){
// this.sizes.forEach(function(){
var cost = 0;
switch(this.sizes){
case "small":
cost += 4;
break;
case "medium":
cost += 4.5;
break;
case "large":
cost += 5;
break;
default:
}
this.meatToppings.forEach(function(){
cost += 1.5;
});
this.nonMeatToppings.forEach(function(topping){
cost += 0.5;
});
this.cost = cost;
return this.cost;
}
//
// function Address (street, city, state){
// this.street = street;
// this.city = city;
// this.state = state;
// }
// Contact.prototype.fullName= function(){
// return this.firstName + " " +this.lastName;
// }
function resetForm(){
$("#order")[0].reset();
}
$(document).ready(function(){
var newCart = new Cart();
var newOrder = new Pizza();
$(".photo-index").text("0");
$("img.delivery").click(function(event){
event.preventDefault();
$(".name").show();
$("#next").show();
$(".address").show();
$("#add-to-cart").hide();
newCart.shippingCost = 5;
newCart.shippingType = "Delivery";
// newOrder.orderType="delivery";
// $("#result").append("Your shipping fee is $5"+"<br>"+"Then Your price is "+ newOrder.price() + ");
});
$("img.to-go").click(function(){
$(".name").show();
$(".address").hide();
$("#next").show();
$("#add-to-cart").hide();
newCart.shippingCost = 0;
newCart.shippingType = "Carry Out";
});
$(".next").click(function(event){
event.preventDefault();
$("#build-your-own").show();
$(".size-crust").show();
$(".order-type").hide();
$(".name").hide();
$(".address").hide();
$("#next").hide();
$("#add-to-cart").hide();
$("button#submit").hide();
});
$("#back-to-start").click(function(event){
event.preventDefault();
$(".order-type").show();
$("#next").hide();
$(".size-crust").hide();
});
$("#next-cheese-sauce").click(function(event){
event.preventDefault();
$(".order-type").hide();
$(".name").hide();
$(".address").hide();
$("#next").hide();
$(".size-crust").hide();
$(".choose-cheese-sauce").show();
$("#back-to-size").show();
$("#submit").hide();
$("#add-to-cart").hide();
});
$("#back-to-size").click(function(event){
event.preventDefault();
$(".order-type").hide();
$(".name").hide();
$(".address").hide();
$("#next").hide();
$(".size-crust").show();
$(".choose-cheese-sauce").hide();
$("#back-to-size").hide();
$("#next-cheese-sauce").show();
$("#submit").hide();
$("#add-to-cart").hide();
});
$("#next-toppings").click(function(event){
event.preventDefault();
$(".order-type").hide();
$(".name").hide();
$(".address").hide();
$("#next").hide();
$("#submit").hide();
$(".size-crust").hide();
$(".choose-cheese-sauce").hide();
$("#next-toppings").hide();
$(".meats-non-meats").show();
$("#next-toppings").show();
$("#back-to-sauce").show();
$("#add-to-cart").show();
});
$("#back-to-sauce").click(function(event){
event.preventDefault();
$(".choose-cheese-sauce").show();
$(".meats-non-meats").hide();
$("#back-to-sauce").hide();
$("#add-to-cart").hide();
});
$(".delivery").click(function(){
$(".name").show();
$("#new-address").show();
$("#add-to-cart").hide();
});
$(".to-go").click(function(event){
event.preventDefault();
$(".name").show();
$(".new-address").hide();
});
$("#enter-address").click(function(event){
event.preventDefault();
$("#new-address").hide();
$(".main-photo").hide();
$("#build-your-own").show();
});
var pizzaCounter=0;
$("button.new-pizza").click(function(event){
event.preventDefault();
$("#add-to-cart").hide();
$("#back-to-sauce").hide();
$("button#submit").hide();
$(".meats-non-meats").hide();
$("#result").hide();
$("button.new-pizza").hide();
$(".size-crust").show();
$("#back-to-start").show();
$("#next-cheese-sauce").show();
});
// var newCart = new Cart();
// var newOrder = new Pizza();
$("#add-to-cart").click(function(event){
event.preventDefault();
newOrder.sizes= $("input:radio[name=size]:checked").val();
newOrder.crust = $("input:radio[name=crust]:checked").val();
newOrder.cheese = $("#cheese-amount").val();
newOrder.sauce= $("input:radio[name=sauce]:checked").val();
newOrder.meatToppings=[];
$("input:checkbox[name=meats]:checked").each(function(){
var selectedMeats = $(this).val();
newOrder.meatToppings.push(selectedMeats);
});
newOrder.nonMeatToppings = [];
$("input:checkbox[name=non-meats]:checked").each(function(){
var selectedNonMeats = $(this).val();
newOrder.nonMeatToppings.push(selectedNonMeats);
});
newCart.addOrder(newOrder);
$("button.new-pizza").show();
$("#submit").show();
$("#cart").show(); |
$(".meats-non-meats").hide();
pizzaCounter++;
$(".number").text(" "+pizzaCounter+" ");
$(".toppings").text("");
newCart.orders.forEach(function(order, index){
$(".toppings").append("<span class='labels'>"+ "Pizza " + (index + 1)+"</span>" + ": "+order.sizes+", "+order.crust+", "
+ order.cheese+" Cheese, "+order.sauce+", "
+ order.meatToppings+", "+ order.nonMeatToppings+", "
+ "price is: $" + order.price() + "<br><hr>");
});
$(".photo-index").text(pizzaCounter);
$(".total").text(newCart.countTotal());
// $(".toppings").text(" "+newCart.orders[0].sizes+", "+newCart.orders[0].crust+", "+
// newCart.orders[0].cheese+" cheese, "+newCart.orders[0].sauce+", "
// + newCart.orders[0].meatToppings+", "+ newCart.orders[0].nonMeatToppings+", "+
// "price is: "+newCart.orders[0].price());
newOrder.sizes="";
newOrder.crust="";
newOrder.cheese="";
newOrder.sauce="";
newOrder.meatToppings="";
newOrder.nonMeatToppings="";
resetForm();
// we need to hide every thing to display the cart
});
$("#order").submit(function(event){
event.preventDefault();
// $(".number").text(" "+(pizzaCounter+1)+" ");
$("#cart").hide();
$("#result").hide();
$(".new-pizza").hide();
$("#submit").hide();
$("#finish").prepend("YOUR ORDER HAS BEEN SUBMITTED.<br></br>");
$("#finish").prepend("THANKS FOR CHOOSING US!");
$(".flex-img1").attr("src","images/main.jpg");
$(".photo-index").text("0");
// $("#result").show();
});
}); | $("#add-to-cart").hide();
$("#result").show(); |
sys.rs | // Copyright (c) 2016 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or https://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
//! Low level implementation of buffers.
//!
//! Wraps directly around Vulkan buffers, with the exceptions of a few safety checks.
//!
//! The `UnsafeBuffer` type is the lowest-level buffer object provided by this library. It is used
//! internally by the higher-level buffer types. You are strongly encouraged to have excellent
//! knowledge of the Vulkan specs if you want to use an `UnsafeBuffer`.
//!
//! Here is what you must take care of when you use an `UnsafeBuffer`:
//!
//! - Synchronization, ie. avoid reading and writing simultaneously to the same buffer.
//! - Memory aliasing considerations. If you use the same memory to back multiple resources, you
//! must ensure that they are not used together and must enable some additional flags.
//! - Binding memory correctly and only once. If you use sparse binding, respect the rules of
//! sparse binding.
//! - Type safety.
use crate::check_errors;
use crate::device::Device;
use crate::device::DeviceOwned;
use crate::memory::DeviceMemory;
use crate::memory::DeviceMemoryAllocationError;
use crate::memory::MemoryRequirements;
use crate::sync::Sharing;
use crate::DeviceSize;
use crate::Error;
use crate::OomError;
use crate::VulkanObject;
use crate::{buffer::BufferUsage, Version};
use ash::vk::Handle;
use smallvec::SmallVec;
use std::error;
use std::fmt;
use std::hash::Hash;
use std::hash::Hasher;
use std::mem::MaybeUninit;
use std::ptr;
use std::sync::Arc;
/// Data storage in a GPU-accessible location.
#[derive(Debug)]
pub struct UnsafeBuffer {
handle: ash::vk::Buffer,
device: Arc<Device>,
size: DeviceSize,
usage: BufferUsage,
}
impl UnsafeBuffer {
/// Creates a new `UnsafeBuffer`.
///
/// # Panics
///
/// - Panics if `create_info.sharing` is [`Concurrent`](Sharing::Concurrent) with less than 2
/// items.
/// - Panics if `create_info.size` is zero.
/// - Panics if `create_info.usage` is empty.
pub fn new(
device: Arc<Device>,
create_info: UnsafeBufferCreateInfo,
) -> Result<UnsafeBuffer, BufferCreationError> {
let UnsafeBufferCreateInfo {
mut sharing,
size,
sparse,
usage,
_ne: _,
} = create_info;
// VUID-VkBufferCreateInfo-size-00912
assert!(size != 0);
// VUID-VkBufferCreateInfo-usage-requiredbitmask
assert!(usage != BufferUsage::none());
let mut flags = ash::vk::BufferCreateFlags::empty();
// Check sparse features
if let Some(sparse_level) = sparse {
// VUID-VkBufferCreateInfo-flags-00915
if !device.enabled_features().sparse_binding {
return Err(BufferCreationError::FeatureNotEnabled {
feature: "sparse_binding",
reason: "sparse was `Some`",
});
}
// VUID-VkBufferCreateInfo-flags-00916
if sparse_level.sparse_residency && !device.enabled_features().sparse_residency_buffer {
return Err(BufferCreationError::FeatureNotEnabled {
feature: "sparse_residency_buffer",
reason: "sparse was `Some` and `sparse_residency` was set",
});
}
// VUID-VkBufferCreateInfo-flags-00917
if sparse_level.sparse_aliased && !device.enabled_features().sparse_residency_aliased {
return Err(BufferCreationError::FeatureNotEnabled {
feature: "sparse_residency_aliased",
reason: "sparse was `Some` and `sparse_aliased` was set",
});
}
// VUID-VkBufferCreateInfo-flags-00918
flags |= sparse_level.into();
}
// Check sharing mode and queue families
let (sharing_mode, queue_family_indices) = match &mut sharing {
Sharing::Exclusive => (ash::vk::SharingMode::EXCLUSIVE, &[] as _),
Sharing::Concurrent(ids) => {
// VUID-VkBufferCreateInfo-sharingMode-00914
ids.sort_unstable();
ids.dedup();
assert!(ids.len() >= 2);
for &id in ids.iter() {
// VUID-VkBufferCreateInfo-sharingMode-01419
if device.physical_device().queue_family_by_id(id).is_none() {
return Err(BufferCreationError::SharingInvalidQueueFamilyId { id });
}
}
(ash::vk::SharingMode::CONCURRENT, ids.as_slice())
}
};
if let Some(max_buffer_size) = device.physical_device().properties().max_buffer_size {
// VUID-VkBufferCreateInfo-size-06409
if size > max_buffer_size {
return Err(BufferCreationError::MaxBufferSizeExceeded {
size,
max: max_buffer_size,
});
}
}
// Everything now ok. Creating the buffer.
let create_info = ash::vk::BufferCreateInfo::builder()
.flags(flags)
.size(size)
.usage(usage.into())
.sharing_mode(sharing_mode)
.queue_family_indices(queue_family_indices);
let handle = unsafe {
let fns = device.fns();
let mut output = MaybeUninit::uninit();
check_errors(fns.v1_0.create_buffer(
device.internal_object(),
&create_info.build(),
ptr::null(),
output.as_mut_ptr(),
))?;
output.assume_init()
};
let buffer = UnsafeBuffer {
handle,
device,
size,
usage,
};
Ok(buffer)
}
/// Returns the memory requirements for this buffer.
pub fn memory_requirements(&self) -> MemoryRequirements {
#[inline]
fn align(val: DeviceSize, al: DeviceSize) -> DeviceSize {
al * (1 + (val - 1) / al)
}
let buffer_memory_requirements_info2 = ash::vk::BufferMemoryRequirementsInfo2 {
buffer: self.handle,
..Default::default()
};
let mut memory_requirements2 = ash::vk::MemoryRequirements2::default();
let mut memory_dedicated_requirements = if self.device.api_version() >= Version::V1_1
|| self.device.enabled_extensions().khr_dedicated_allocation
{
Some(ash::vk::MemoryDedicatedRequirementsKHR::default())
} else {
None
};
if let Some(next) = memory_dedicated_requirements.as_mut() {
next.p_next = memory_requirements2.p_next;
memory_requirements2.p_next = next as *mut _ as *mut _;
}
unsafe {
let fns = self.device.fns();
if self.device.api_version() >= Version::V1_1
|| self
.device
.enabled_extensions()
.khr_get_memory_requirements2
{
if self.device.api_version() >= Version::V1_1 {
fns.v1_1.get_buffer_memory_requirements2(
self.device.internal_object(),
&buffer_memory_requirements_info2,
&mut memory_requirements2,
);
} else {
fns.khr_get_memory_requirements2
.get_buffer_memory_requirements2_khr(
self.device.internal_object(),
&buffer_memory_requirements_info2,
&mut memory_requirements2,
);
}
} else {
fns.v1_0.get_buffer_memory_requirements(
self.device.internal_object(),
self.handle,
&mut memory_requirements2.memory_requirements,
);
}
}
debug_assert!(memory_requirements2.memory_requirements.size >= self.size);
debug_assert!(memory_requirements2.memory_requirements.memory_type_bits != 0);
let mut memory_requirements = MemoryRequirements {
prefer_dedicated: memory_dedicated_requirements
.map_or(false, |dreqs| dreqs.prefers_dedicated_allocation != 0),
..MemoryRequirements::from(memory_requirements2.memory_requirements)
};
// We have to manually enforce some additional requirements for some buffer types.
let properties = self.device.physical_device().properties();
if self.usage.uniform_texel_buffer || self.usage.storage_texel_buffer {
memory_requirements.alignment = align(
memory_requirements.alignment,
properties.min_texel_buffer_offset_alignment,
);
}
if self.usage.storage_buffer {
memory_requirements.alignment = align(
memory_requirements.alignment,
properties.min_storage_buffer_offset_alignment,
);
}
if self.usage.uniform_buffer {
memory_requirements.alignment = align(
memory_requirements.alignment,
properties.min_uniform_buffer_offset_alignment,
);
}
memory_requirements
}
/// Binds device memory to this buffer.
pub unsafe fn bind_memory(
&self,
memory: &DeviceMemory,
offset: DeviceSize,
) -> Result<(), OomError> {
let fns = self.device.fns();
// We check for correctness in debug mode.
debug_assert!({
let mut mem_reqs = MaybeUninit::uninit();
fns.v1_0.get_buffer_memory_requirements(
self.device.internal_object(),
self.handle,
mem_reqs.as_mut_ptr(),
);
let mem_reqs = mem_reqs.assume_init();
mem_reqs.size <= (memory.size() - offset)
&& (offset % mem_reqs.alignment) == 0
&& mem_reqs.memory_type_bits & (1 << memory.memory_type().id()) != 0
});
// Check for alignment correctness.
{
let properties = self.device().physical_device().properties();
if self.usage().uniform_texel_buffer || self.usage().storage_texel_buffer {
debug_assert!(offset % properties.min_texel_buffer_offset_alignment == 0);
}
if self.usage().storage_buffer {
debug_assert!(offset % properties.min_storage_buffer_offset_alignment == 0);
}
if self.usage().uniform_buffer {
debug_assert!(offset % properties.min_uniform_buffer_offset_alignment == 0);
}
}
check_errors(fns.v1_0.bind_buffer_memory(
self.device.internal_object(),
self.handle,
memory.internal_object(),
offset,
))?;
Ok(())
}
/// Returns the size of the buffer in bytes.
#[inline]
pub fn size(&self) -> DeviceSize {
self.size
}
/// Returns the buffer the image was created with.
#[inline]
pub fn usage(&self) -> BufferUsage {
self.usage
}
/// Returns a key unique to each `UnsafeBuffer`. Can be used for the `conflicts_key` method.
#[inline]
pub fn key(&self) -> u64 {
self.handle.as_raw()
}
}
impl Drop for UnsafeBuffer {
#[inline]
fn drop(&mut self) {
unsafe {
let fns = self.device.fns();
fns.v1_0
.destroy_buffer(self.device.internal_object(), self.handle, ptr::null());
}
}
}
unsafe impl VulkanObject for UnsafeBuffer {
type Object = ash::vk::Buffer;
#[inline]
fn internal_object(&self) -> ash::vk::Buffer {
self.handle
}
}
unsafe impl DeviceOwned for UnsafeBuffer {
#[inline]
fn device(&self) -> &Arc<Device> {
&self.device
}
}
impl PartialEq for UnsafeBuffer {
#[inline]
fn eq(&self, other: &Self) -> bool {
self.handle == other.handle && self.device == other.device
}
}
impl Eq for UnsafeBuffer {}
impl Hash for UnsafeBuffer {
#[inline]
fn hash<H: Hasher>(&self, state: &mut H) {
self.handle.hash(state);
self.device.hash(state);
}
}
/// Parameters to create a new `UnsafeBuffer`.
#[derive(Clone, Debug)]
pub struct UnsafeBufferCreateInfo {
/// Whether the buffer can be shared across multiple queues, or is limited to a single queue.
///
/// The default value is [`Sharing::Exclusive`].
pub sharing: Sharing<SmallVec<[u32; 4]>>,
/// The size in bytes of the buffer.
///
/// The default value is `0`, which must be overridden.
pub size: DeviceSize,
/// Create a buffer with sparsely bound memory.
///
/// The default value is `None`.
pub sparse: Option<SparseLevel>,
/// How the buffer is going to be used.
///
/// The default value is [`BufferUsage::none()`], which must be overridden.
pub usage: BufferUsage,
pub _ne: crate::NonExhaustive,
}
impl Default for UnsafeBufferCreateInfo {
#[inline]
fn default() -> Self {
Self {
sharing: Sharing::Exclusive,
size: 0,
sparse: None,
usage: BufferUsage::none(),
_ne: crate::NonExhaustive(()),
}
}
}
/// Error that can happen when creating a buffer.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum BufferCreationError {
/// Allocating memory failed.
AllocError(DeviceMemoryAllocationError),
ExtensionNotEnabled {
extension: &'static str,
reason: &'static str,
},
FeatureNotEnabled {
feature: &'static str,
reason: &'static str,
},
/// The specified size exceeded the value of the `max_buffer_size` limit.
MaxBufferSizeExceeded { size: DeviceSize, max: DeviceSize },
/// The sharing mode was set to `Concurrent`, but one of the specified queue family ids was not
/// valid.
SharingInvalidQueueFamilyId { id: u32 },
}
impl error::Error for BufferCreationError {
#[inline]
fn source(&self) -> Option<&(dyn error::Error + 'static)> |
}
impl fmt::Display for BufferCreationError {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
Self::AllocError(_) => write!(fmt, "allocating memory failed"),
Self::ExtensionNotEnabled { extension, reason } => write!(
fmt,
"the extension {} must be enabled: {}",
extension, reason
),
Self::FeatureNotEnabled { feature, reason } => {
write!(fmt, "the feature {} must be enabled: {}", feature, reason)
}
Self::MaxBufferSizeExceeded { .. } => write!(
fmt,
"the specified size exceeded the value of the `max_buffer_size` limit"
),
Self::SharingInvalidQueueFamilyId { id } => {
write!(fmt, "the sharing mode was set to `Concurrent`, but one of the specified queue family ids was not valid")
}
}
}
}
impl From<OomError> for BufferCreationError {
#[inline]
fn from(err: OomError) -> BufferCreationError {
BufferCreationError::AllocError(err.into())
}
}
impl From<Error> for BufferCreationError {
#[inline]
fn from(err: Error) -> BufferCreationError {
match err {
err @ Error::OutOfHostMemory => {
BufferCreationError::AllocError(DeviceMemoryAllocationError::from(err))
}
err @ Error::OutOfDeviceMemory => {
BufferCreationError::AllocError(DeviceMemoryAllocationError::from(err))
}
_ => panic!("unexpected error: {:?}", err),
}
}
}
/// The level of sparse binding that a buffer should be created with.
#[derive(Clone, Copy, Debug, Default)]
pub struct SparseLevel {
pub sparse_residency: bool,
pub sparse_aliased: bool,
pub _ne: crate::NonExhaustive,
}
impl SparseLevel {
#[inline]
pub fn none() -> SparseLevel {
SparseLevel {
sparse_residency: false,
sparse_aliased: false,
_ne: crate::NonExhaustive(()),
}
}
}
impl From<SparseLevel> for ash::vk::BufferCreateFlags {
#[inline]
fn from(val: SparseLevel) -> Self {
let mut result = ash::vk::BufferCreateFlags::SPARSE_BINDING;
if val.sparse_residency {
result |= ash::vk::BufferCreateFlags::SPARSE_RESIDENCY;
}
if val.sparse_aliased {
result |= ash::vk::BufferCreateFlags::SPARSE_ALIASED;
}
result
}
}
#[cfg(test)]
mod tests {
use super::BufferCreationError;
use super::BufferUsage;
use super::SparseLevel;
use super::UnsafeBuffer;
use super::UnsafeBufferCreateInfo;
use crate::device::Device;
use crate::device::DeviceOwned;
#[test]
fn create() {
let (device, _) = gfx_dev_and_queue!();
let buf = UnsafeBuffer::new(
device.clone(),
UnsafeBufferCreateInfo {
size: 128,
usage: BufferUsage::all(),
..Default::default()
},
)
.unwrap();
let reqs = buf.memory_requirements();
assert!(reqs.size >= 128);
assert_eq!(buf.size(), 128);
assert_eq!(&**buf.device() as *const Device, &*device as *const Device);
}
#[test]
fn missing_feature_sparse_binding() {
let (device, _) = gfx_dev_and_queue!();
match UnsafeBuffer::new(
device,
UnsafeBufferCreateInfo {
size: 128,
sparse: Some(SparseLevel::none()),
usage: BufferUsage::all(),
..Default::default()
},
) {
Err(BufferCreationError::FeatureNotEnabled {
feature: "sparse_binding",
..
}) => (),
_ => panic!(),
}
}
#[test]
fn missing_feature_sparse_residency() {
let (device, _) = gfx_dev_and_queue!(sparse_binding);
match UnsafeBuffer::new(
device,
UnsafeBufferCreateInfo {
size: 128,
sparse: Some(SparseLevel {
sparse_residency: true,
sparse_aliased: false,
..Default::default()
}),
usage: BufferUsage::all(),
..Default::default()
},
) {
Err(BufferCreationError::FeatureNotEnabled {
feature: "sparse_residency_buffer",
..
}) => (),
_ => panic!(),
}
}
#[test]
fn missing_feature_sparse_aliased() {
let (device, _) = gfx_dev_and_queue!(sparse_binding);
match UnsafeBuffer::new(
device,
UnsafeBufferCreateInfo {
size: 128,
sparse: Some(SparseLevel {
sparse_residency: false,
sparse_aliased: true,
..Default::default()
}),
usage: BufferUsage::all(),
..Default::default()
},
) {
Err(BufferCreationError::FeatureNotEnabled {
feature: "sparse_residency_aliased",
..
}) => (),
_ => panic!(),
}
}
#[test]
fn create_empty_buffer() {
let (device, _) = gfx_dev_and_queue!();
assert_should_panic!({
UnsafeBuffer::new(
device,
UnsafeBufferCreateInfo {
size: 0,
usage: BufferUsage::all(),
..Default::default()
},
)
});
}
}
| {
match *self {
BufferCreationError::AllocError(ref err) => Some(err),
_ => None,
}
} |
polyfill.rs | // Copyright 2015-2016 Brian Smith.
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS DISCLAIM ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
// SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
// OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
// CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
//! Polyfills for functionality that will (hopefully) be added to Rust's
//! standard library soon.
#[inline(always)]
pub const fn | (x: usize) -> u64 {
x as u64
}
pub fn usize_from_u32(x: u32) -> usize {
x as usize
}
#[macro_use]
mod chunks_fixed;
mod array_flat_map;
pub(crate) mod array_map;
mod leading_zeros_skipped;
#[cfg(test)]
mod test;
pub use self::{
array_flat_map::ArrayFlatMap, chunks_fixed::*, leading_zeros_skipped::LeadingZerosStripped,
};
| u64_from_usize |
repack.rs | use parity_wasm::builder;
use parity_wasm::elements::Module;
use super::{ChiselModule, ModuleError, ModuleKind, ModuleTranslator};
pub struct Repack;
impl Repack {
pub fn new() -> Self {
Repack {}
}
}
impl<'a> ChiselModule<'a> for Repack {
type ObjectReference = &'a dyn ModuleTranslator;
fn id(&'a self) -> String {
"repack".to_string()
}
fn kind(&'a self) -> ModuleKind {
ModuleKind::Translator
}
fn as_abstract(&'a self) -> Self::ObjectReference {
self as Self::ObjectReference
}
}
impl ModuleTranslator for Repack {
fn translate_inplace(&self, _module: &mut Module) -> Result<bool, ModuleError> {
Err(ModuleError::NotSupported)
}
fn translate(&self, module: &Module) -> Result<Option<Module>, ModuleError> {
// TODO: check in names section is carried over.
let module = module.clone();
let module = builder::from_module(module).build();
Ok(Some(module))
}
}
#[cfg(test)]
mod tests {
use parity_wasm::builder;
use parity_wasm::elements::CustomSection;
use rustc_hex::FromHex;
use super::*;
#[test]
fn smoke_test() {
let module = Module::default();
let repack = Repack::new();
assert_eq!(module, repack.translate(&module).unwrap().unwrap());
}
#[test]
fn basic_sections_only() {
let module = builder::module()
.function()
.signature()
.build()
.body()
.build()
.build()
.export()
.field("main")
.internal()
.func(0)
.build()
.export()
.field("memory")
.internal()
.memory(0)
.build()
.build();
let repack = Repack::new();
assert_eq!(module, repack.translate(&module).unwrap().unwrap());
}
#[test]
fn custom_section() {
let mut module = builder::module()
.function()
.signature()
.build()
.body()
.build()
.build()
.export()
.field("main")
.internal()
.func(0)
.build()
.export()
.field("memory")
.internal()
.memory(0)
.build()
.build();
let custom = CustomSection::new("test".to_string(), vec![42u8; 16]);
module
.sections_mut()
.push(parity_wasm::elements::Section::Custom(custom));
let repack = Repack::new();
assert_ne!(module, repack.translate(&module).unwrap().unwrap());
} | #[test]
fn names_section() {
let input = FromHex::from_hex(
"0061736d010000000104016000000303020000070801046d61696e00010a
0a020300010b040010000b0014046e616d65010d0200047465737401046d
61696e",
)
.unwrap();
let module = Module::from_bytes(&input).unwrap();
// Forcefully parse names section here.
let module = module
.parse_names()
.expect("parsing the names section failed");
assert_eq!(module.names_section().is_some(), true);
let repack = Repack::new();
// Repack drops names section too.
let output = repack.translate(&module).unwrap().unwrap();
assert_eq!(output.has_names_section(), false);
}
} | |
new.rs | //! # New operations
//!
//! The new module contains functionality relating to the new command,
//! independent of the CLI.
use std::fs::OpenOptions;
use chrono::prelude::*;
use crate::{
errors::DiaryError,
utils::{editing, file_system},
Diary, EntryContent,
};
/// The options available to the new command.
pub struct NewOptions {
/// Whether or not to open the new entry for an initial entry.
pub open: bool,
}
/// Creates a new diary entry.
///
/// # Arguments
///
/// * `opts` - The options passed by the user at runtime.
/// * `diary` - Struct representing the diary.
/// * `date` - The date for which to create the new entry.
/// * `string_getter` - The function that obtains the string to add to the file.
///
/// # Returns
///
/// The unit upon successful creation of the entry.
/// DiaryError if the entry already exists.
/// DiaryError on any other IO issues.
pub fn new(
opts: &NewOptions,
diary: &Diary,
date: &Date<Local>,
string_getter: editing::StringGetter,
) -> Result<(), DiaryError> {
let mut new_entry_path = file_system::month_folder(diary.diary_path(), date);
file_system::create_month_folder(&new_entry_path)?;
let entry_name = diary.file_name(date);
new_entry_path.push(entry_name);
let result = OpenOptions::new()
.write(true)
.create_new(true)
.open(new_entry_path);
let mut file = match result {
Ok(mut file) => {
editing::add_user_content_to_file(&mut file, diary.file_type().title(date))?;
file
}
Err(e) => return Err(e.into()),
};
if opts.open {
let contents = string_getter("".to_owned())?;
editing::add_user_content_to_file(&mut file, contents)?;
};
Ok(())
}
#[cfg(test)]
mod test {
use std::fs;
use chrono::prelude::*;
use super::{new, NewOptions};
use crate::{config::Config, ops::testing, utils::editing::test::test_string_getter, Diary};
#[test]
fn new_success() {
let config = testing::temp_config();
testing::default_init(config.diary_path());
let diary = Diary::from_config(&config).unwrap();
let new_opts = NewOptions { open: false };
let date = Local.ymd(2021, 11, 6);
new(&new_opts, &diary, &date, test_string_getter).unwrap();
let test_path = diary.get_entry_path(&date);
assert!(test_path.exists());
}
#[test]
#[should_panic(expected = "value: UnInitialised")]
fn new_not_init() {
let config = testing::temp_config();
let diary = Diary::from_config(&config).unwrap();
let date = Local.ymd(2021, 11, 6);
let new_opts = NewOptions { open: false };
new(&new_opts, &diary, &date, test_string_getter).unwrap();
}
#[test]
#[should_panic(expected = "kind: AlreadyExists")]
fn new_fail_second_time() {
let config = testing::temp_config();
testing::default_init(config.diary_path());
let diary = Diary::from_config(&config).unwrap();
let new_opts = NewOptions { open: false }; | new(&new_opts, &diary, &date, test_string_getter).unwrap();
}
#[test]
#[should_panic(expected = "value: UnInitialised")]
fn new_not_init_default_config() {
let config = Config::default();
let diary = Diary::from_config(&config).unwrap();
let new_opts = NewOptions { open: false };
let date = Local.ymd(2021, 11, 6);
new(&new_opts, &diary, &date, test_string_getter).unwrap();
}
#[test]
fn new_open_file_success() {
let config = testing::temp_config();
testing::default_init(config.diary_path());
let diary = Diary::from_config(&config).unwrap();
let new_opts = NewOptions { open: true };
let date = Local.ymd(2021, 11, 6);
new(&new_opts, &diary, &date, test_string_getter).unwrap();
let diary_file = Diary::from_config(&config).unwrap();
let test_path = diary_file.get_entry_path(&date);
let content = fs::read_to_string(test_path).unwrap();
assert!(content.contains("Test content"));
}
} | let date = Local.ymd(2021, 11, 6);
new(&new_opts, &diary, &date, test_string_getter).unwrap(); |
project.py | import pypro.core
import os
class CreateConfig(pypro.core.Recipe):
def __init__(self, source, destination):
self.source = source
self.destination = destination
def run(self, runner, arguments=None):
# Read the template file
| content = ''
with open(self.source, 'r') as f:
content = f.read(os.path.getsize(self.source))
# Replace notations with actual values
content = pypro.core.Variables.replace(content)
# Write the config file
with open(self.destination, 'w') as f:
f.write(content) |
|
main.rs | use crate::interpreter::Interpreter;
use parser::Parser;
use std::path::PathBuf;
#[macro_use]
mod macros;
mod ast;
mod class;
mod dynamic;
mod expr_parser;
mod expression;
mod formatter;
mod function;
mod interpreter;
mod lexer;
mod method;
mod module;
mod operator;
mod parser;
mod runtime_error;
mod scope;
mod token;
pub fn | () {
let root_dir: PathBuf = [
std::env::current_dir().unwrap().to_str().unwrap(),
"interpreter",
"nog",
]
.iter()
.collect();
let mut root_path = root_dir.clone();
root_path.push("main.ns");
let mut parser = Parser::new();
let mut interpreter = Interpreter::new();
interpreter.source_locations.push(root_dir);
let content = std::fs::read_to_string(&root_path).unwrap();
parser.set_source(root_path, &content, 0);
match parser.parse() {
Ok(program) => {
program.print();
dbg!(&program.stmts);
if let Err(e) = interpreter.execute(&program) {
println!("RUNTIME ERROR: {}", e);
};
}
Err(e) => {
println!("PARSE ERROR: {}", e);
}
};
}
| main |
config.py | import copy
import os
from configobj import ConfigObj
from collections import OrderedDict
class CogctlConfig():
def __init__(self, filename):
self.filename = filename
if os.path.isfile(filename):
# If the file exists it should be valid, so just try to
# get the default profile name and the default profile
self._config = ConfigObj(filename)
self.default()
else:
self._config = ConfigObj()
def profile(self, profile):
"""
Raises KeyError if no such profile exists
"""
# Without copying, we're modifying the in-memory
# representation of the config file
p = copy.deepcopy(self._config[profile])
return CogctlConfig._normalize_entry(p)
def default_profile_name(self):
return self._config['defaults']['profile']
def default(self):
return self.profile(self.default_profile_name())
def add(self, profile_name, profile):
# NOTE: Doesn't do any kind of normalization or converting
# back to our legacy format... absent any other work, this
# will result in a mixture of old and new formats for each
# entry.
if 'defaults' not in self._config:
self._config['defaults'] = {'profile': profile_name}
# Controlling the ordering of keys in the new profile makes
# for deterministic testing when we write out new entries.
ordered = OrderedDict()
for k in sorted(profile.keys()):
ordered[k] = profile[k]
self._config[profile_name] = ordered
def set_default(self, profile_name):
""" Update the default profile. Raise KeyError if no such profile exists
"""
if profile_name not in self.profiles():
raise KeyError("Profile does not exist")
self._config['defaults']['profile'] = profile_name
def write(self):
# We manage the writing ourselves, because the object may have
# been initialized with a file that does not exist. Using
# ConfigObj's create_empty=True keyword makes things
# complicated because it creates the empty file at object
# creation time, not write time, which means we could be
# creating empty (and thus invalid) configuration files.
with open(self.filename, "wb") as f:
self._config.write(f)
def | (self):
""" Return a sorted list of profiles present."""
return sorted([p for p in self._config.keys()
if p != "defaults"])
def update_profile(self, profile_name):
"""Updates an old secure/host/port profile to a modern url-based one.
"""
p = self.profile(profile_name)
ordered = OrderedDict()
for k in sorted(p.keys()):
ordered[k] = p[k]
self._config[profile_name] = ordered
@staticmethod
def _normalize_entry(entry):
"""Consolidates url information into a single value.
Our old (Elixir implementation) INI-based configuration
sections split up the Cog API root URL information across
three different options:
* "secure": a Boolean indicating whether or not to use HTTPS
* "host"
* "port"
Here, we consolidate all these values into a single "url" value,
place it into the entry, and remove the now-unneeded options that
comprise it.
"""
if entry.get("url"):
# Consider it already normalized
return entry
if entry.pop("secure") == "true":
protocol = "https"
else:
protocol = "http"
host = entry.pop("host")
port = entry.pop("port")
entry["url"] = "%s://%s:%s" % (protocol, host, port)
return entry
| profiles |
triggerrun.go | package artifacts
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
"github.com/Azure/go-autorest/tracing"
"net/http"
)
// TriggerRunClient is the client for the TriggerRun methods of the Artifacts service.
type TriggerRunClient struct {
BaseClient
}
// NewTriggerRunClient creates an instance of the TriggerRunClient client.
func NewTriggerRunClient(endpoint string) TriggerRunClient {
return TriggerRunClient{New(endpoint)}
}
// CancelTriggerInstance cancel single trigger instance by runId.
// Parameters:
// triggerName - the trigger name.
// runID - the pipeline run identifier.
func (client TriggerRunClient) CancelTriggerInstance(ctx context.Context, triggerName string, runID string) (result autorest.Response, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/TriggerRunClient.CancelTriggerInstance")
defer func() {
sc := -1
if result.Response != nil {
sc = result.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: triggerName,
Constraints: []validation.Constraint{{Target: "triggerName", Name: validation.MaxLength, Rule: 260, Chain: nil},
{Target: "triggerName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "triggerName", Name: validation.Pattern, Rule: `^[A-Za-z0-9_][^<>*#.%&:\\+?/]*$`, Chain: nil}}}}); err != nil {
return result, validation.NewError("artifacts.TriggerRunClient", "CancelTriggerInstance", err.Error())
}
req, err := client.CancelTriggerInstancePreparer(ctx, triggerName, runID)
if err != nil {
err = autorest.NewErrorWithError(err, "artifacts.TriggerRunClient", "CancelTriggerInstance", nil, "Failure preparing request")
return
}
resp, err := client.CancelTriggerInstanceSender(req)
if err != nil {
result.Response = resp
err = autorest.NewErrorWithError(err, "artifacts.TriggerRunClient", "CancelTriggerInstance", resp, "Failure sending request")
return
}
result, err = client.CancelTriggerInstanceResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "artifacts.TriggerRunClient", "CancelTriggerInstance", resp, "Failure responding to request")
return
}
return
}
// CancelTriggerInstancePreparer prepares the CancelTriggerInstance request.
func (client TriggerRunClient) CancelTriggerInstancePreparer(ctx context.Context, triggerName string, runID string) (*http.Request, error) {
urlParameters := map[string]interface{}{
"endpoint": client.Endpoint,
}
pathParameters := map[string]interface{}{
"runId": autorest.Encode("path", runID),
"triggerName": autorest.Encode("path", triggerName),
}
const APIVersion = "2019-06-01-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsPost(),
autorest.WithCustomBaseURL("{endpoint}", urlParameters),
autorest.WithPathParameters("/triggers/{triggerName}/triggerRuns/{runId}/cancel", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// CancelTriggerInstanceSender sends the CancelTriggerInstance request. The method will close the
// http.Response Body if it receives an error.
func (client TriggerRunClient) CancelTriggerInstanceSender(req *http.Request) (*http.Response, error) {
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// CancelTriggerInstanceResponder handles the response to the CancelTriggerInstance request. The method always
// closes the http.Response Body.
func (client TriggerRunClient) CancelTriggerInstanceResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByClosing())
result.Response = resp
return
}
// QueryTriggerRunsByWorkspace query trigger runs.
// Parameters:
// filterParameters - parameters to filter the pipeline run.
func (client TriggerRunClient) QueryTriggerRunsByWorkspace(ctx context.Context, filterParameters RunFilterParameters) (result TriggerRunsQueryResponse, err error) {
if tracing.IsEnabled() |
if err := validation.Validate([]validation.Validation{
{TargetValue: filterParameters,
Constraints: []validation.Constraint{{Target: "filterParameters.LastUpdatedAfter", Name: validation.Null, Rule: true, Chain: nil},
{Target: "filterParameters.LastUpdatedBefore", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
return result, validation.NewError("artifacts.TriggerRunClient", "QueryTriggerRunsByWorkspace", err.Error())
}
req, err := client.QueryTriggerRunsByWorkspacePreparer(ctx, filterParameters)
if err != nil {
err = autorest.NewErrorWithError(err, "artifacts.TriggerRunClient", "QueryTriggerRunsByWorkspace", nil, "Failure preparing request")
return
}
resp, err := client.QueryTriggerRunsByWorkspaceSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "artifacts.TriggerRunClient", "QueryTriggerRunsByWorkspace", resp, "Failure sending request")
return
}
result, err = client.QueryTriggerRunsByWorkspaceResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "artifacts.TriggerRunClient", "QueryTriggerRunsByWorkspace", resp, "Failure responding to request")
return
}
return
}
// QueryTriggerRunsByWorkspacePreparer prepares the QueryTriggerRunsByWorkspace request.
func (client TriggerRunClient) QueryTriggerRunsByWorkspacePreparer(ctx context.Context, filterParameters RunFilterParameters) (*http.Request, error) {
urlParameters := map[string]interface{}{
"endpoint": client.Endpoint,
}
const APIVersion = "2019-06-01-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPost(),
autorest.WithCustomBaseURL("{endpoint}", urlParameters),
autorest.WithPath("/queryTriggerRuns"),
autorest.WithJSON(filterParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// QueryTriggerRunsByWorkspaceSender sends the QueryTriggerRunsByWorkspace request. The method will close the
// http.Response Body if it receives an error.
func (client TriggerRunClient) QueryTriggerRunsByWorkspaceSender(req *http.Request) (*http.Response, error) {
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// QueryTriggerRunsByWorkspaceResponder handles the response to the QueryTriggerRunsByWorkspace request. The method always
// closes the http.Response Body.
func (client TriggerRunClient) QueryTriggerRunsByWorkspaceResponder(resp *http.Response) (result TriggerRunsQueryResponse, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// RerunTriggerInstance rerun single trigger instance by runId.
// Parameters:
// triggerName - the trigger name.
// runID - the pipeline run identifier.
func (client TriggerRunClient) RerunTriggerInstance(ctx context.Context, triggerName string, runID string) (result autorest.Response, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/TriggerRunClient.RerunTriggerInstance")
defer func() {
sc := -1
if result.Response != nil {
sc = result.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: triggerName,
Constraints: []validation.Constraint{{Target: "triggerName", Name: validation.MaxLength, Rule: 260, Chain: nil},
{Target: "triggerName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "triggerName", Name: validation.Pattern, Rule: `^[A-Za-z0-9_][^<>*#.%&:\\+?/]*$`, Chain: nil}}}}); err != nil {
return result, validation.NewError("artifacts.TriggerRunClient", "RerunTriggerInstance", err.Error())
}
req, err := client.RerunTriggerInstancePreparer(ctx, triggerName, runID)
if err != nil {
err = autorest.NewErrorWithError(err, "artifacts.TriggerRunClient", "RerunTriggerInstance", nil, "Failure preparing request")
return
}
resp, err := client.RerunTriggerInstanceSender(req)
if err != nil {
result.Response = resp
err = autorest.NewErrorWithError(err, "artifacts.TriggerRunClient", "RerunTriggerInstance", resp, "Failure sending request")
return
}
result, err = client.RerunTriggerInstanceResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "artifacts.TriggerRunClient", "RerunTriggerInstance", resp, "Failure responding to request")
return
}
return
}
// RerunTriggerInstancePreparer prepares the RerunTriggerInstance request.
func (client TriggerRunClient) RerunTriggerInstancePreparer(ctx context.Context, triggerName string, runID string) (*http.Request, error) {
urlParameters := map[string]interface{}{
"endpoint": client.Endpoint,
}
pathParameters := map[string]interface{}{
"runId": autorest.Encode("path", runID),
"triggerName": autorest.Encode("path", triggerName),
}
const APIVersion = "2019-06-01-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsPost(),
autorest.WithCustomBaseURL("{endpoint}", urlParameters),
autorest.WithPathParameters("/triggers/{triggerName}/triggerRuns/{runId}/rerun", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// RerunTriggerInstanceSender sends the RerunTriggerInstance request. The method will close the
// http.Response Body if it receives an error.
func (client TriggerRunClient) RerunTriggerInstanceSender(req *http.Request) (*http.Response, error) {
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// RerunTriggerInstanceResponder handles the response to the RerunTriggerInstance request. The method always
// closes the http.Response Body.
func (client TriggerRunClient) RerunTriggerInstanceResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByClosing())
result.Response = resp
return
}
| {
ctx = tracing.StartSpan(ctx, fqdn+"/TriggerRunClient.QueryTriggerRunsByWorkspace")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
} |
canonicalizer.go | /*
* Copyright 2020 National Library of Norway.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package canonicalizer
import (
"strings"
"github.com/nlnwa/whatwg-url/errors"
"github.com/nlnwa/whatwg-url/url"
)
func New(opts ...url.ParserOption) url.Parser {
p := &profile{
Parser: url.NewParser(opts...),
sortQuery: NoSort,
}
for _, opt := range opts {
if o, ok := opt.(canonParserOption); ok {
o.applyProfile(p)
}
}
return p
}
type profile struct {
url.Parser
removeUserInfo bool
removePort bool
removeFragment bool
sortQuery querySort
repeatedPercentDecoding bool
defaultScheme string
}
func (p *profile) Parse(rawUrl string) (*url.Url, error) {
u, err := p.Parser.Parse(rawUrl)
if err != nil {
if errors.Code(err) == errors.FailRelativeUrlWithNoBase && p.defaultScheme != "" {
rawUrl = p.defaultScheme + "://" + rawUrl
u, err = p.Parser.Parse(rawUrl)
}
if err != nil {
return nil, err
}
}
return p.Canonicalize(u)
}
func (p *profile) ParseRef(rawUrl, ref string) (*url.Url, error) {
b, err := p.Parser.Parse(rawUrl)
if err != nil {
if errors.Code(err) == errors.FailRelativeUrlWithNoBase && p.defaultScheme != "" {
rawUrl = p.defaultScheme + "://" + rawUrl
b, err = p.Parser.Parse(rawUrl)
}
if err != nil {
return nil, err
}
}
u, err := b.Parse(ref)
if err != nil {
return nil, err
}
return p.Canonicalize(u)
}
func (p *profile) Canonicalize(u *url.Url) (*url.Url, error) {
if p.repeatedPercentDecoding {
if u.Hostname() != "" {
u.SetHostname(decodeEncode(u.Hostname(), url.HostPercentEncodeSet))
}
if u.Pathname() != "" {
u.SetPathname(decodeEncode(u.Pathname(), LaxPathPercentEncodeSet))
}
if u.Search() != "" {
u.SearchParams().Iterate(func(pair *url.NameValuePair) {
pair.Name = decodeEncode(pair.Name, RepeatedQueryPercentDecodeSet)
pair.Value = decodeEncode(pair.Value, RepeatedQueryPercentDecodeSet)
})
}
if u.Hash() != "" {
u.SetHash(decodeEncode(strings.TrimPrefix(u.Hash(), "#"), url.HostPercentEncodeSet))
}
}
if p.removePort {
u.SetPort("")
}
if p.removeUserInfo {
u.SetUsername("")
u.SetPassword("")
}
if p.removeFragment {
u.SetHash("")
}
switch p.sortQuery {
case SortKeys:
u.SearchParams().Sort()
case SortParameter:
u.SearchParams().SortAbsolute()
}
return u, nil
}
func decodeEncode(s string, tr *url.PercentEncodeSet) string {
r := percentEncode(repeatedDecode(s), tr)
return r
}
// repeatedDecode repeatedly percent-unescape a string until it has no more percent-escapes
func repeatedDecode(s string) string {
var r string
for {
r = decodePercentEncoded(s)
if s == r {
break
}
s = r
}
return s
}
func percentEncode(s string, tr *url.PercentEncodeSet) string {
sb := strings.Builder{}
for _, b := range []byte(s) {
sb.WriteString(percentEncodeByte(b, tr.Set('%')))
}
return sb.String()
}
func percentEncodeByte(b byte, tr *url.PercentEncodeSet) string {
if tr != nil && !tr.ByteShouldBeEncoded(b) {
return string(b)
}
percentEncoded := make([]byte, 3)
percentEncoded[0] = '%'
percentEncoded[1] = "0123456789ABCDEF"[b>>4]
percentEncoded[2] = "0123456789ABCDEF"[b&15]
return string(percentEncoded)
}
func decodePercentEncoded(s string) string {
sb := strings.Builder{}
bytes := []byte(s)
for i := 0; i < len(bytes); i++ {
if bytes[i] != '%' {
sb.WriteByte(bytes[i])
} else if len(bytes) < (i+3) ||
(!url.ASCIIHexDigit.Test(uint(bytes[i+1])) || !url.ASCIIHexDigit.Test(uint(bytes[i+2]))) {
sb.WriteByte(bytes[i])
} else {
b := unhex(bytes[i+1])<<4 | unhex(bytes[i+2])
sb.WriteByte(b)
i += 2
}
}
return sb.String()
}
func unhex(c byte) byte {
switch {
case '0' <= c && c <= '9':
return c - '0' | case 'A' <= c && c <= 'F':
return c - 'A' + 10
}
return 0
} | case 'a' <= c && c <= 'f':
return c - 'a' + 10 |
validation.py | # Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins.general import exceptions as ex
from sahara.plugins.general import utils as u
from sahara.plugins.vanilla.hadoop2 import config_helper as cu
from sahara.plugins.vanilla import utils as vu
from sahara.utils import general as gu
def validate_cluster_creating(pctx, cluster):
|
def validate_additional_ng_scaling(cluster, additional):
rm = vu.get_resourcemanager(cluster)
scalable_processes = _get_scalable_processes()
for ng_id in additional:
ng = gu.get_by_id(cluster.node_groups, ng_id)
if not set(ng.node_processes).issubset(scalable_processes):
msg = "Vanilla plugin cannot scale nodegroup with processes: %s"
raise ex.NodeGroupCannotBeScaled(ng.name,
msg % ' '.join(ng.node_processes))
if not rm and 'nodemanager' in ng.node_processes:
msg = ("Vanilla plugin cannot scale node group with processes "
"which have no master-processes run in cluster")
raise ex.NodeGroupCannotBeScaled(ng.name, msg)
def validate_existing_ng_scaling(pctx, cluster, existing):
scalable_processes = _get_scalable_processes()
dn_to_delete = 0
for ng in cluster.node_groups:
if ng.id in existing:
if ng.count > existing[ng.id] and "datanode" in ng.node_processes:
dn_to_delete += ng.count - existing[ng.id]
if not set(ng.node_processes).issubset(scalable_processes):
msg = ("Vanilla plugin cannot scale nodegroup "
"with processes: %s")
raise ex.NodeGroupCannotBeScaled(
ng.name, msg % ' '.join(ng.node_processes))
dn_amount = len(vu.get_datanodes(cluster))
rep_factor = cu.get_config_value(pctx, 'HDFS', 'dfs.replication', cluster)
if dn_to_delete > 0 and dn_amount - dn_to_delete < rep_factor:
msg = ("Vanilla plugin cannot shrink cluster because it would be not "
"enough nodes for replicas (replication factor is %s)")
raise ex.ClusterCannotBeScaled(
cluster.name, msg % rep_factor)
def _get_scalable_processes():
return ['datanode', 'nodemanager']
def _get_inst_count(cluster, process):
return sum([ng.count for ng in u.get_node_groups(cluster, process)])
| nn_count = _get_inst_count(cluster, 'namenode')
if nn_count != 1:
raise ex.InvalidComponentCountException('namenode', 1, nn_count)
snn_count = _get_inst_count(cluster, 'secondarynamenode')
if snn_count not in [0, 1]:
raise ex.InvalidComponentCountException('secondarynamenode', '0 or 1',
snn_count)
rm_count = _get_inst_count(cluster, 'resourcemanager')
if rm_count not in [0, 1]:
raise ex.InvalidComponentCountException('resourcemanager', '0 or 1',
rm_count)
hs_count = _get_inst_count(cluster, 'historyserver')
if hs_count not in [0, 1]:
raise ex.InvalidComponentCountException('historyserver', '0 or 1',
hs_count)
nm_count = _get_inst_count(cluster, 'nodemanager')
if rm_count == 0:
if nm_count > 0:
raise ex.RequiredServiceMissingException('resourcemanager',
required_by='nodemanager')
oo_count = _get_inst_count(cluster, 'oozie')
dn_count = _get_inst_count(cluster, 'datanode')
if oo_count not in [0, 1]:
raise ex.InvalidComponentCountException('oozie', '0 or 1', oo_count)
if oo_count == 1:
if dn_count < 1:
raise ex.RequiredServiceMissingException('datanode',
required_by='oozie')
if nm_count < 1:
raise ex.RequiredServiceMissingException('nodemanager',
required_by='oozie')
if hs_count != 1:
raise ex.RequiredServiceMissingException('historyserver',
required_by='oozie')
rep_factor = cu.get_config_value(pctx, 'HDFS', 'dfs.replication', cluster)
if dn_count < rep_factor:
raise ex.InvalidComponentCountException(
'datanode', rep_factor, dn_count, 'Number of datanodes must be not'
' less than dfs.replication.') |
get.go | package cost
import (
"fmt"
"log"
"strconv"
"time"
outputflag "github.com/openshift/osdctl/cmd/getoutput"
"github.com/openshift/osdctl/internal/utils/globalflags"
awsprovider "github.com/openshift/osdctl/pkg/provider/aws"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/costexplorer"
"github.com/aws/aws-sdk-go/service/organizations"
)
// getCmd represents the get command
func newCmdGet(streams genericclioptions.IOStreams, globalOpts *globalflags.GlobalOptions) *cobra.Command {
ops := newGetOptions(streams, globalOpts)
getCmd := &cobra.Command{
Use: "get",
Short: "Get total cost of a given OU",
Run: func(cmd *cobra.Command, args []string) {
cmdutil.CheckErr(ops.checkArgs(cmd, args))
cmdutil.CheckErr(ops.run())
},
}
getCmd.Flags().StringVar(&ops.ou, "ou", "", "get OU ID")
getCmd.Flags().BoolVarP(&ops.recursive, "recursive", "r", false, "recurse through OUs")
getCmd.Flags().StringVarP(&ops.time, "time", "t", "", "set time. One of 'LM', 'MTD', 'TYD', '3M', '6M', '1Y'")
getCmd.Flags().StringVar(&ops.start, "start", "", "set start date range")
getCmd.Flags().StringVar(&ops.end, "end", "", "set end date range")
getCmd.Flags().BoolVar(&ops.csv, "csv", false, "output result as csv")
return getCmd
}
func (o *getOptions) checkArgs(cmd *cobra.Command, _ []string) error {
// If no date range or time is define error out
if o.start == "" && o.end == "" && o.time == "" {
return cmdutil.UsageErrorf(cmd, "Please provide a date range or a predefined time")
}
// If both date range and time are defined error out
if o.start != "" && o.end != "" && o.time != "" {
return cmdutil.UsageErrorf(cmd, "Please provide either a date range or a predefined time")
}
// If either start or end is missing error out
if o.start != "" && o.end == "" {
return cmdutil.UsageErrorf(cmd, "Please provide end of date range")
}
if o.start == "" && o.end != "" {
return cmdutil.UsageErrorf(cmd, "Please provide start of date range")
}
if o.ou == "" {
return cmdutil.UsageErrorf(cmd, "Please provide OU")
}
o.output = o.GlobalOptions.Output
return nil
}
//Store flag options for get command
type getOptions struct {
ou string
recursive bool
time string
start string
end string
csv bool
output string
genericclioptions.IOStreams
GlobalOptions *globalflags.GlobalOptions
}
type getCostResponse struct {
OuId string `json:"ouid" yaml:"ouid"`
OuName string `json:"ouname" yaml:"ouname"`
CostUSD float64 `json:"costUSD" yaml:"costUSD"`
}
func (f getCostResponse) String() string {
return fmt.Sprintf(" OuId: %s\n OuName: %s\n Cost: %f\n", f.OuId, f.OuName, f.CostUSD)
}
func | (streams genericclioptions.IOStreams, globalOpts *globalflags.GlobalOptions) *getOptions {
return &getOptions{
IOStreams: streams,
GlobalOptions: globalOpts,
}
}
func (o *getOptions) run() error {
awsClient, err := opsCost.initAWSClients()
if err != nil {
return err
}
//Get information regarding Organizational Unit
OU := getOU(awsClient, o.ou)
var cost float64
var unit string
if o.recursive { //Get cost of given OU by aggregating costs of all (including immediate) accounts under OU
if err := o.getOUCostRecursive(&cost, &unit, OU, awsClient); err != nil {
log.Fatalln("Error getting cost of OU recursively:", err)
}
} else { //Get cost of given OU by aggregating costs of only immediate accounts under given OU
if err := o.getOUCost(&cost, &unit, OU, awsClient); err != nil {
log.Fatalln("Error getting cost of OU:", err)
}
}
o.printCostGet(cost, unit, o, OU)
return nil
}
//Get account IDs of immediate accounts under given OU
func getAccounts(OU *organizations.OrganizationalUnit, awsClient awsprovider.Client) ([]*string, error) {
var accountSlice []*string
var nextToken *string
//Populate accountSlice with accounts by looping until accounts.NextToken is null
for {
accounts, err := awsClient.ListAccountsForParent(&organizations.ListAccountsForParentInput{
ParentId: OU.Id,
NextToken: nextToken,
})
if err != nil {
return nil, err
}
for i := 0; i < len(accounts.Accounts); i++ {
accountSlice = append(accountSlice, accounts.Accounts[i].Id)
}
if accounts.NextToken == nil {
break
}
nextToken = accounts.NextToken //If NextToken != nil, keep looping
}
return accountSlice, nil
}
//Get the account IDs of all (not only immediate) accounts under OU
func getAccountsRecursive(OU *organizations.OrganizationalUnit, awsClient awsprovider.Client) ([]*string, error) {
var accountsIDs []*string
//Populate OUs
OUs, err := getOUs(OU, awsClient)
if err != nil {
return nil, err
}
//Loop through all child OUs to get account IDs from the accounts that comprise the OU
for _, childOU := range OUs {
accountsIDsOU, _ := getAccountsRecursive(childOU, awsClient)
accountsIDs = append(accountsIDs, accountsIDsOU...)
}
//Get account
accountsIDsOU, err := getAccounts(OU, awsClient)
if err != nil {
return nil, err
}
return append(accountsIDs, accountsIDsOU...), nil
}
//Get immediate OUs (child nodes) directly under given OU
func getOUs(OU *organizations.OrganizationalUnit, awsClient awsprovider.Client) ([]*organizations.OrganizationalUnit, error) {
var OUSlice []*organizations.OrganizationalUnit
var nextToken *string
//Populate OUSlice with OUs by looping until OUs.NextToken is null
for {
OUs, err := awsClient.ListOrganizationalUnitsForParent(&organizations.ListOrganizationalUnitsForParentInput{
ParentId: OU.Id,
NextToken: nextToken,
})
if err != nil {
return nil, err
}
//Add OUs to slice
for childOU := 0; childOU < len(OUs.OrganizationalUnits); childOU++ {
OUSlice = append(OUSlice, OUs.OrganizationalUnits[childOU])
}
if OUs.NextToken == nil {
break
}
nextToken = OUs.NextToken //If NextToken != nil, keep looping
}
return OUSlice, nil
}
//Get the account IDs of all (not only immediate) accounts under OU
func getOUsRecursive(OU *organizations.OrganizationalUnit, awsClient awsprovider.Client) ([]*organizations.OrganizationalUnit, error) {
var OUs []*organizations.OrganizationalUnit
//Populate OUs by getting immediate OUs (direct nodes)
currentOUs, err := getOUs(OU, awsClient)
if err != nil {
return nil, err
}
//Loop through all child OUs. Append the child OU, then append the OUs of the child OU
for _, currentOU := range currentOUs {
OUs = append(OUs, currentOU)
OUsRecursive, _ := getOUsRecursive(currentOU, awsClient)
OUs = append(OUs, OUsRecursive...)
}
return OUs, nil
}
//Get cost of given account
func (o *getOptions) getAccountCost(accountID *string, unit *string, awsClient awsprovider.Client, cost *float64) error {
var start, end, granularity string
if o.time != "" {
start, end = getTimePeriod(&o.time)
granularity = "MONTHLY"
}
if o.start != "" && o.end != "" {
start = o.start
end = o.end
granularity = "DAILY"
}
metrics := []string{
"NetUnblendedCost",
}
//Get cost information for chosen account
costs, err := awsClient.GetCostAndUsage(&costexplorer.GetCostAndUsageInput{
Filter: &costexplorer.Expression{
Dimensions: &costexplorer.DimensionValues{
Key: aws.String("LINKED_ACCOUNT"),
Values: []*string{
accountID,
},
},
},
TimePeriod: &costexplorer.DateInterval{
Start: aws.String(start),
End: aws.String(end),
},
Granularity: aws.String(granularity),
Metrics: aws.StringSlice(metrics),
})
if err != nil {
return err
}
//Loop through month-by-month cost and increment to get total cost
for month := 0; month < len(costs.ResultsByTime); month++ {
monthCost, err := strconv.ParseFloat(*costs.ResultsByTime[month].Total["NetUnblendedCost"].Amount, 64)
if err != nil {
return err
}
*cost += monthCost
}
//Save unit
*unit = *costs.ResultsByTime[0].Total["NetUnblendedCost"].Unit
return nil
}
//Get cost of given OU by aggregating costs of only immediate accounts under given OU
func (o *getOptions) getOUCost(cost *float64, unit *string, OU *organizations.OrganizationalUnit, awsClient awsprovider.Client) error {
//Populate accounts
accounts, err := getAccounts(OU, awsClient)
if err != nil {
return err
}
//Increment costs of accounts
for _, account := range accounts {
if err := o.getAccountCost(account, unit, awsClient, cost); err != nil {
return err
}
}
return nil
}
//Get cost of given OU by aggregating costs of all (including immediate) accounts under OU
func (o *getOptions) getOUCostRecursive(cost *float64, unit *string, OU *organizations.OrganizationalUnit, awsClient awsprovider.Client) error {
//Populate OUs
OUs, err := getOUs(OU, awsClient)
if err != nil {
return err
}
//Loop through all child OUs, get their costs, and store it to cost of current OU
for _, childOU := range OUs {
if err := o.getOUCostRecursive(cost, unit, childOU, awsClient); err != nil {
return err
}
}
//Return cost of child OUs + cost of immediate accounts under current OU
if err := o.getOUCost(cost, unit, OU, awsClient); err != nil {
return err
}
return nil
}
//Get time period based on time flag
func getTimePeriod(timePtr *string) (string, string) {
t := time.Now()
//Starting from the 1st of the current month last year i.e. if today is 2020-06-29, then start date is 2019-06-01
start := fmt.Sprintf("%d-%02d-%02d", t.Year()-1, t.Month(), 01)
end := fmt.Sprintf("%d-%02d-%02d", t.Year(), t.Month(), t.Day())
switch *timePtr {
case "LM": //Last Month
start = fmt.Sprintf("%d-%02d-%02d", t.Year(), t.Month()-1, 01)
end = fmt.Sprintf("%d-%02d-%02d", t.Year(), t.Month(), 01)
case "MTD":
start = fmt.Sprintf("%d-%02d-%02d", t.Year(), t.Month(), 01)
case "YTD":
start = fmt.Sprintf("%d-%02d-%02d", t.Year(), 01, 01)
case "3M":
if month := t.Month(); month > 3 {
start = t.AddDate(0, -3, 0).Format("2006-01-02")
} else {
start = t.AddDate(-1, 9, 0).Format("2006-01-02")
}
case "6M":
if month, _ := strconv.Atoi(time.Now().Format("01")); month > 6 {
start = t.AddDate(0, -6, 0).Format("2006-01-02")
} else {
start = t.AddDate(-1, 6, 0).Format("2006-01-02")
}
case "1Y":
start = t.AddDate(-1, 0, 0).Format("2006-01-02")
}
return start, end
}
func (o *getOptions) printCostGet(cost float64, unit string, ops *getOptions, OU *organizations.OrganizationalUnit) error {
resp := getCostResponse{
OuId: *OU.Id,
OuName: *OU.Name,
CostUSD: cost,
}
if ops.csv { //If csv option specified, print result in csv
fmt.Printf("\n%s,%f (%s)\n\n", *OU.Name, cost, unit)
return nil
}
if ops.recursive {
fmt.Println("Cost of all accounts under OU:")
}
outputflag.PrintResponse(o.output, resp)
return nil
}
| newGetOptions |
imux-server.rs | use ark_std::rand::Rng;
use async_std::{io::BufWriter, net::TcpListener, prelude::*, task};
use bench_utils::*;
use clap::{App, Arg, ArgMatches};
extern crate io_utils;
use io_utils::{counting::CountingIO, imux::IMuxAsync};
fn get_random_buf(log_len: u32) -> Vec<u8> {
let mut buf = vec![0u8; 2usize.pow(log_len)];
let mut rng = ark_std::test_rng();
rng.fill(&mut buf[..]);
buf
}
fn | () -> ArgMatches<'static> {
App::new("imux-server-example")
.arg(
Arg::with_name("port")
.short("p")
.long("port")
.takes_value(true)
.help("Server port (default 8000)")
.required(false),
)
.arg(
Arg::with_name("num")
.short("n")
.long("num")
.takes_value(true)
.help("Log of the number of bytes to send (default 25)")
.required(false),
)
.get_matches()
}
fn main() {
let args = get_args();
let port = args.value_of("port").unwrap_or("8000");
let server_addr = format!("0.0.0.0:{}", port);
let num = if args.is_present("num") {
clap::value_t!(args.value_of("num"), u32).unwrap()
} else {
25
};
let test_buf = get_random_buf(num);
task::block_on(async move {
// Form connections
let listener = TcpListener::bind(server_addr).await.unwrap();
let mut incoming = listener.incoming();
let mut writers = Vec::with_capacity(16);
for _ in 0..16 {
let stream = incoming.next().await.unwrap().unwrap();
writers.push(CountingIO::new(BufWriter::new(stream)));
}
let write_time = start_timer!(|| "Sending buffer across 1 connection");
// Send the message length
writers[0]
.write_all(&(test_buf.len() as u64).to_le_bytes())
.await
.unwrap();
// Send the rest of the message
writers[0].write_all(&test_buf).await.unwrap();
writers[0].flush().await.unwrap();
end_timer!(write_time);
add_to_trace!(|| "Communication", || format!(
"Wrote {} bytes",
writers[0].count()
));
// Reset counts
writers[0].reset();
let write_time = start_timer!(|| "Sending buffer across 16 connections");
let mut writer = IMuxAsync::new(writers);
writer.write(&test_buf).await.unwrap();
writer.flush().await.unwrap();
end_timer!(write_time);
add_to_trace!(|| "Communication", || format!(
"Wrote {} bytes",
writer.count()
));
});
}
| get_args |
util.py | import argparse
from collections import defaultdict
from typing import Dict, List
from sacrerouge import build_argument_parser
from sacrerouge.data import Metrics, MetricsDict
from sacrerouge.data.types import ReferenceType, SummaryType
from sacrerouge.io import JsonlReader
def load_summaries(file_path: str) -> List[SummaryType]:
fields = []
for data in JsonlReader(file_path).read():
fields.append(data['summary'])
return fields
def load_references(file_path: str) -> List[ReferenceType]:
fields = []
for data in JsonlReader(file_path).read():
if 'summary' in data:
fields.append([data['summary']['text']]) | elif 'summaries' in data:
fields.append([summary['text'] for summary in data['summaries']])
elif 'reference' in data:
fields.append([data['reference']['text']])
elif 'references' in data:
fields.append([reference['text'] for reference in data['references']])
return fields
def load_metrics_dicts(file_path: str) -> Dict[str, Dict[str, MetricsDict]]:
metrics_dicts = defaultdict(dict)
with JsonlReader(file_path, Metrics) as f:
for instance in f:
metrics_dicts[instance.instance_id][instance.summarizer_id] = instance.metrics
return metrics_dicts
def command_exists(parser: argparse.ArgumentParser, command: List[str]) -> bool:
"""
Checks to see if a specific command exists in the `parser`. The `parser` should
be the root `ArgumentParser` for the command. The method will traverse through
the `parser` to see if the `command` exists. This method does not work for checking
arguments of a specific command.
"""
# _subparsers is none when no subcommands exist
if parser._subparsers is None:
return False
for action in parser._subparsers._group_actions:
for choice, subparser in action.choices.items():
if choice == command[0]:
if len(command) == 1:
# The whole command has been matched
return True
else:
return command_exists(subparser, command[1:])
# We didn't find the first command, so it doesn't exist
return False
def sacrerouge_command_exists(command: List[str]) -> bool:
"""Verifies if the command exists for the 'sacrerouge' command."""
parser = build_argument_parser()
return command_exists(parser, command) | |
problem4.go | package chapter3
import (
"errors"
)
type QueueUsingStacks struct {
older *Stack
newer *Stack
}
func | () *QueueUsingStacks {
return &QueueUsingStacks{&Stack{}, &Stack{}}
}
func (s *QueueUsingStacks) Add(value int) {
s.newer.Push(value)
}
func (s *QueueUsingStacks) newToOld() {
for {
val, err := s.newer.Pop()
if err != nil {
break
}
s.older.Push(val)
}
}
func (s *QueueUsingStacks) Remove() (int, error) {
if s.older.IsEmpty() {
if s.newer.IsEmpty() {
return -1, errors.New("Cannot pop. QueueUsingStacks is empty.")
}
s.newToOld()
val, err := s.older.Pop()
if err != nil {
return -1, err
}
return val, nil
} else {
val, err := s.older.Pop()
if err != nil {
return -1, err
}
return val, nil
}
}
func (s *QueueUsingStacks) Peek() (int, error) {
if s.older.IsEmpty() {
if s.newer.IsEmpty() {
return -1, errors.New("Cannot peek. QueueUsingStacks is empty.")
}
s.newToOld()
val, err := s.older.Peek()
if err != nil {
return -1, err
}
return val, nil
} else {
val, err := s.older.Peek()
if err != nil {
return -1, err
}
return val, nil
}
}
func (s *QueueUsingStacks) IsEmpty() bool {
return s.newer.IsEmpty() && s.older.IsEmpty()
}
| GetQueueUsingStacks |
types.go | package versioned
type (
// configuration just loads a version field
// from a configuration
configuration struct {
Version string `yaml:"version" json:"version"`
}
// VersionReader is a bucket for config processing
VersionReader struct {
YAML YAML | YAML struct { }
// JSON is a bucket to process JSON files
JSON struct { }
)
// NewVersionReader creates a new version reader object
func NewVersionReader() *VersionReader {
return &VersionReader{
YAML: YAML{},
JSON: JSON{},
}
} | JSON JSON
}
// YAML is a bucket to process YAML files |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.