hexsha
stringlengths 40
40
| size
int64 4
1.05M
| content
stringlengths 4
1.05M
| avg_line_length
float64 1.33
100
| max_line_length
int64 1
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
bf1a51423eaa56a49562ea9612a4200c8e81fa4d | 42,043 | //======---------------------------------------------------------------======//
// //
// Copyright 2021 Evan Cox //
// //
// Licensed under the Apache License, Version 2.0 (the "License"); //
// you may not use this file except in compliance with the License. //
// You may obtain a copy of the License at //
// //
// http://www.apache.org/licenses/LICENSE-2.0 //
// //
// Unless required by applicable law or agreed to in writing, software //
// distributed under the License is distributed on an "AS IS" BASIS, //
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //
// See the License for the specific language governing permissions and //
// limitations under the License. //
// //
//======---------------------------------------------------------------======//
use crate::data::*;
use std::str::FromStr;
use toml::value::{Array, Table, Value};
#[cfg(test)]
use strum::EnumDiscriminants;
// I know the duplication is terrible, but I'm trying to avoid carrying
// dependencies in non-test mode as much as I can and I don't feel like
// implementing a proc-macro for it.
//
// Sue me.
#[cfg(test)]
#[derive(Debug, EnumDiscriminants)]
pub enum ParseError {
NotATable,
NoMetadata,
InvalidLibType,
InvalidSpdxLicense,
InvalidSemVerVersion(String),
WrongType(String),
InvalidLanguage(String),
InvalidLanguageVersion(String),
UnknownKey(String),
MissingKey(String),
InvalidTOML(String),
}
#[cfg(not(test))]
#[derive(Debug)]
pub enum ParseError {
NotATable,
NoMetadata,
InvalidLibType,
InvalidSpdxLicense,
InvalidSemVerVersion(String),
WrongType(String),
InvalidLanguage(String),
InvalidLanguageVersion(String),
UnknownKey(String),
MissingKey(String),
InvalidTOML(String),
}
#[cfg(test)]
pub type ParseErrorTypes = ParseErrorDiscriminants;
/// Represents a result from trying to parse a TOML config file.
pub type ParseResult<T> = Result<T, ParseError>;
use ParseError::*;
/// Tries to parse an input string as a TOML file.
///
/// # Parameters
/// - `input`: The string to try to parse
fn from_toml(input: &str) -> ParseResult<Table> {
use Value::*;
let value = match input.parse::<Value>() {
Ok(val) => val,
Err(e) => return Err(InvalidTOML(format!("TOML parse error: '{:?}'", e))),
};
match value {
Table(table) => Ok(table),
_ => Err(NotATable),
}
}
/// Tries to get a key from a table, if it doesnt exist, returns an error.
///
/// # Parameters
/// - `table`: The table to search in
/// - `key`: The key to search for
/// - `name`: Name of the table, used for errors
fn required_key<'a>(table: &'a Table, key: &str, name: &str) -> ParseResult<&'a Value> {
match table.get(key) {
Some(val) => Ok(val),
None => Err(MissingKey(format!(
"key '{}' is missing from table '{}'!",
key, name
))),
}
}
/// Attempts to read a key of a specific type. If the key doesn't exist
/// or is of the wrong type, an error is returned. Otherwise, the value is
/// returned.
///
/// # Parameters
/// - `table`: The table to search in for `key`
/// - `key`: The key to look for
/// - `name`: Name of the table, for use in error messages
/// - `apply`: The function to apply to the key's mapped value, something
/// like `Value::as_str` or `Value::as_bool`
fn required_type<'a, F, T>(table: &'a Table, key: &str, name: &str, apply: F) -> ParseResult<T>
where
F: Fn(&'a Value) -> Option<T>,
{
let key = required_key(table, key, name)?;
match apply(key) {
Some(s) => Ok(s),
None => Err(WrongType(format!(
"Expected key '{}' in table '{}' to be a {}!",
key,
name,
std::any::type_name::<T>()
))),
}
}
/// Attempts to read a key of a specific type. If the key doesn't exist,
/// Ok(None) is returned. If the key does exist but is of the wrong type,
/// an error is returned. Otherwise, the value is returned.
///
/// # Parameters
/// - `table`: The table to search in for `key`
/// - `key`: The key to look for
/// - `name`: Name of the table, for use in error messages
/// - `apply`: The function to apply to the key's mapped value, something
/// like `Value::as_str` or `Value::as_bool`
fn optional_type<'a, F, T>(
table: &'a Table,
key: &str,
name: &str,
apply: F,
) -> ParseResult<Option<T>>
where
F: Fn(&'a Value) -> Option<T>,
{
let value = table.get(key);
match value {
Some(value) => match apply(value) {
Some(s) => Ok(Some(s)),
None => Err(WrongType(format!(
"Expected key '{}' in table '{}' to be a {}!",
key,
name,
std::any::type_name::<T>()
))),
},
None => Ok(None),
}
}
/// Tries to get a string key from a table, converts into a `String` from `&str`
/// automatically.
///
/// Key must both exist and map to a string, or else an error is returned.
fn required_string(table: &Table, key: &str, name: &str) -> ParseResult<String> {
match required_type(table, key, name, Value::as_str) {
Ok(s) => Ok(s.to_owned()),
Err(e) => Err(e),
}
}
fn optional_string(table: &Table, key: &str, name: &str) -> ParseResult<Option<String>> {
let result = optional_type(table, key, name, Value::as_str)?;
match result {
Some(s) => Ok(Some(s.to_owned())),
None => Ok(None),
}
}
/// Tries to get a boolean key from a table.
///
/// Key must both exist and map to a bool, or else an error is returned.
fn required_bool(table: &Table, key: &str, name: &str) -> ParseResult<bool> {
required_type(table, key, name, Value::as_bool)
}
fn optional_bool(table: &Table, key: &str, name: &str) -> ParseResult<Option<bool>> {
optional_type(table, key, name, Value::as_bool)
}
fn string_array_from_array(array: &Array, name: &str) -> ParseResult<Vec<String>> {
let mut strings = Vec::<String>::with_capacity(array.len());
for entry in array {
if !entry.is_str() {
return Err(WrongType(format!(
"'{}' array must only contain strings!",
name
)));
}
strings.push(entry.as_str().unwrap().to_owned());
}
Ok(strings)
}
/// Reads an array of strings from a table
fn required_string_array(table: &Table, key: &str, name: &str) -> ParseResult<Vec<String>> {
let array = required_type(table, key, name, Value::as_array)?;
string_array_from_array(array, format!("{}.{}", name, key).as_str())
}
/// Gets an array of strings at `key` if the key exists,
/// else returns `Ok(None)`.
fn optional_string_array(table: &Table, key: &str, name: &str) -> ParseResult<Option<Vec<String>>> {
if !table.contains_key(key) {
return Ok(None);
}
match required_string_array(table, key, name) {
Ok(vec) => {
if !vec.is_empty() {
Ok(Some(vec))
} else {
Ok(None)
}
}
Err(e) => Err(e),
}
}
/// Tries to parse includes from a table.
///
/// If the key doesnt exist, `None` is returned. If the key does
/// exist and it's an empty array, `None` is returned. Otherwise,
/// an error is returned.
fn parse_include(table: &Table, name: &str) -> ParseResult<Option<Vec<String>>> {
match optional_string_array(table, "include", name) {
Ok(Some(vec)) => {
if vec.is_empty() {
Ok(None)
} else {
Ok(Some(vec))
}
}
Ok(None) => Ok(None),
Err(e) => Err(e),
}
}
/// Converts a language in the `main` key of a lang table into a real language
/// or errors out if it isn't valid.
fn parse_lang_main(table: &Table, name: &str) -> ParseResult<Language> {
let main_raw = required_string(table, "main", name)?;
match Language::from_name(&main_raw) {
Some(lang) => Ok(lang),
None => Err(InvalidLanguage(format!(
"in table '{}', language '{}' is unknown",
name, main_raw
))),
}
}
/// Tries to parse the `version` key in a `lang` table,
/// based on the result of the previous `lang` entry.
///
/// # Parameters
/// - `table`: The table to look in
/// - `lang`: A language resolved from the `lang` key
/// - `name`: Name of the table being read
fn parse_lang_version(table: &Table, lang: Language, name: &str) -> ParseResult<Language> {
let version = required_string(table, "version", name)?;
match lang.into_version(version.as_str()) {
Some(new) => Ok(new),
None => Err(InvalidLanguageVersion(format!(
"in '{}', version '{}' is unknown for language '{}'!",
name, version, lang
))),
}
}
/// Reads a `lang` key from a table.
///
/// Handles parsing both the string variation and the inline table
/// variation, and handles dispatching version/lang/whatever parsing
/// to the correct places when necessary.
///
/// # Parameters
/// - `table`: The table to read
/// - `name`: Name of the table being read, for error messages
fn parse_lang(table: &Table, name: &str) -> ParseResult<LanguageInfo> {
let lang = required_key(table, "lang", name)?;
let formatted_name = format!("{}.lang", name);
// `lang = "c++"` or similar, need to get default
// version for language and whatnot
if let Value::String(s) = lang {
return match LanguageInfo::from_name(s) {
Some(lang) => Ok(lang),
None => Err(InvalidLanguage(format!(
"in table '{}', language '{}' is unknown",
name, s,
))),
};
}
// `lang = { ... }`, need to extract specific info and check for errors
if let Value::Table(table) = lang {
let lang = parse_lang_main(table, formatted_name.as_str())?;
let full_lang = parse_lang_version(table, lang, formatted_name.as_str())?;
let extensions = required_bool(table, "extensions", formatted_name.as_str())?;
return Ok(LanguageInfo::new(full_lang, extensions));
}
// `lang` isn't one of the two valid forms
Err(InvalidLanguage(format!(
"expected '{}' to be a string or inline table!",
formatted_name
)))
}
/// Reads the common data for a target's table
///
/// # Parameters
/// - `table`: The table to read
/// - `name`: Name of the table being read, for error messages
fn parse_common(table: &Table, name: &str) -> ParseResult<DeserializedCommon> {
let language = parse_lang(table, name)?;
let include = parse_include(table, name)?;
Ok(DeserializedCommon::new(language, include))
}
/// Reads a single binary target from a table.
///
/// # Parameters
/// - `table`: The table to read
fn parse_binary(table: &Table) -> ParseResult<DeserializedBinary> {
let common = parse_common(table, "bin")?;
let name = required_string(table, "name", "bin")?;
let files = required_string_array(table, "files", "bin")?;
Ok(DeserializedBinary::new(common, name, files))
}
/// Gets a table that's expected to exist insi
fn required_table<'a>(table: &'a Table, table_name: &str) -> ParseResult<&'a Table> {
let entry = table.get(table_name);
if let None = entry {
return Err(MissingKey(format!(
"expected table '{}' to exist!",
table_name
)));
}
match entry.unwrap().as_table() {
Some(table) => Ok(table),
None => {
return Err(WrongType(format!(
"global key '{}' must be a table!",
table_name
)));
}
}
}
/// Parses a list of binaries from a table. Can handle no binaries,
/// one binary, etc.
fn parse_binaries(table: &Table) -> ParseResult<Vec<DeserializedBinary>> {
if !table.contains_key("bin") {
// no parse error occurred, having no binaries is a
// perfectly legitimate case
return Ok(vec![]);
}
let bin = {
let entry = table.get("bin").expect("literally how");
match entry.as_array() {
Some(arr) => arr,
None => {
return Err(WrongType("`bin` table must be array!".to_owned()));
}
}
};
let mut binaries = Vec::<DeserializedBinary>::with_capacity(bin.len());
// could be done with iterators, but that would have done a **lot** of
// loops over the data for no real reason other than error checking
// and converting `toml::Value`s into `toml::Table`s or whatever
for table in bin {
if !table.is_table() {
return Err(WrongType(format!(
"all bin entries must be inline tables! 'array of table' syntax works the best."
)));
}
match parse_binary(&table.as_table().unwrap()) {
Ok(bin) => binaries.push(bin),
Err(e) => return Err(e),
}
}
Ok(binaries)
}
/// Converts a toml::Value into an **individual** group
///
/// # Parameters
/// - `value`: The value to try to convert into a group
fn group_from_string(value: &Value, name: &str) -> ParseResult<DeserializedGroup> {
let file = match value {
Value::String(s) => s.to_owned(),
_ => {
return Err(WrongType(format!(
"`{}.files` array must only contain strings!",
name
)));
}
};
Ok(DeserializedGroup::new(file.clone(), vec![file], None))
}
/// Converts a toml::Value into a **non-individual** group
///
/// # Parameters
/// - `value`: The value to try to convert
fn group_from_table(value: &Value, name: &str) -> ParseResult<DeserializedGroup> {
let table = match value {
Value::Table(table) => table,
_ => return Err(NotATable),
};
let group_name = required_string(table, "name", name)?;
let files = required_string_array(table, "files", name)?;
let lang = if table.contains_key("lang") {
Some(parse_lang(table, name)?)
} else {
None
};
Ok(DeserializedGroup::new(group_name, files, lang))
}
/// Parses a list of groups when the target's configuration says that they're
/// individual groups, instead of real groups.
///
/// # Parameters
/// - `table`: The table to try to read `files` from
/// - `name`: The name of the target, for pretty errors
fn parse_individual(table: &Table, name: &str) -> ParseResult<DeserializedGroupList> {
let exes = required_type(table, "files", name, Value::as_array)?;
// filter out non-strings, and get groups from those. any non-strings early
// return and cause an error
let groups: ParseResult<Vec<DeserializedGroup>> = exes
.iter()
.map(|element| group_from_string(element, name))
.collect();
match groups {
Ok(groups) => Ok(DeserializedGroupList::new(groups, true)),
Err(e) => Err(e),
}
}
/// Parses a list of groups when the target's configuration says that they're
/// real groups, and thus they're parsed as tables.
///
/// # Parameters
/// - `table`: The table to try to read `exes` from
/// - `name`: The name of the target, for pretty errors
fn parse_groups(table: &Table, name: &str) -> ParseResult<DeserializedGroupList> {
let exes = required_type(table, "exes", name, Value::as_array)?;
// for each value in the list, try to 1. convert it into a table and 2. convert it
// into a group. If one or both of those cant be done, an error is propagated
let groups: ParseResult<Vec<DeserializedGroup>> = exes
.iter()
.map(|element| group_from_table(element, name))
.collect();
match groups {
Ok(groups) => Ok(DeserializedGroupList::new(groups, false)),
Err(e) => Err(e),
}
}
/// Parses the exe/files keys on tests/benchmarks, using the `individual` key.
///
/// Since the logic is identical, it's pulled out into a function.
fn parse_exes_or_files(table: &Table, name: &str) -> ParseResult<DeserializedGroupList> {
match optional_bool(table, "individual", name)? {
Some(true) => parse_individual(table, name),
_ => parse_groups(table, name),
}
}
fn parse_benches(table: &Table) -> ParseResult<Option<DeserializedBenchmarks>> {
if !table.contains_key("benches") {
return Ok(None);
}
let benches = required_table(table, "benches")?;
let common = parse_common(&benches, "benches")?;
let benches = parse_exes_or_files(&benches, "benches")?;
Ok(Some(DeserializedBenchmarks::new(common, benches)))
}
fn parse_tests(table: &Table) -> ParseResult<Option<DeserializedTests>> {
if !table.contains_key("tests") {
return Ok(None);
}
let tests = required_table(table, "tests")?;
let common = parse_common(&tests, "tests")?;
let benches = parse_exes_or_files(&tests, "tests")?;
Ok(Some(DeserializedTests::new(common, benches)))
}
fn parse_lib(table: &Table) -> ParseResult<Option<DeserializedLibrary>> {
if !table.contains_key("lib") {
return Ok(None);
}
let lib = required_table(table, "lib")?;
let common = parse_common(&lib, "lib")?;
let files = required_string_array(&lib, "files", "lib.files")?;
let lib_type = match required_string(&lib, "type", "lib")?.as_str() {
"static" => LibraryType::Static,
"dynamic" => LibraryType::Dynamic,
_ => return Err(InvalidLibType),
};
Ok(Some(DeserializedLibrary::new(common, lib_type, files)))
}
fn parse_meta(table: &Table) -> ParseResult<Metadata> {
let meta = required_table(table, "package")?;
let name = required_string(&meta, "name", "package")?;
let version = required_string(&meta, "version", "package")?;
let license = optional_string(&meta, "license", "package")?;
let description = optional_string(&meta, "description", "package")?;
let authors: Vec<Author> = required_string_array(&meta, "authors", "package")?
.drain(..)
.map(|string| Author::from_string(string))
.collect();
let version = match semver::Version::from_str(version.as_str()) {
Ok(version) => version,
Err(e) => {
return Err(InvalidSemVerVersion(format!(
"package.version must be a valid SemVer 2.0.0 string! error: '{}'",
e
)));
}
};
let license = match license {
None => None,
Some(s) => {
if s == "NONE" {
None
} else {
match spdx::Licensee::parse(s.as_str()) {
Ok(license) => Some(license),
Err(_) => return Err(InvalidSpdxLicense),
}
}
}
};
Ok(Metadata::new(name, version, authors, license, description))
}
fn into_package(table: Table) -> ParseResult<DeserializedPackage> {
let meta = parse_meta(&table)?;
let lib = parse_lib(&table)?;
let tests = parse_tests(&table)?;
let benches = parse_benches(&table)?;
let binaries = parse_binaries(&table)?;
Ok(DeserializedPackage::new(
meta, lib, tests, benches, binaries,
))
}
pub fn parse_package(input: &str) -> ParseResult<DeserializedPackage> {
match from_toml(input) {
Ok(table) => into_package(table),
Err(e) => Err(e),
}
}
pub fn parse_sourceless(_input: &str) -> () {
panic!("unimplemented!")
}
#[cfg(test)]
mod tests {
use super::*;
use crate::core::reader::ParseErrorTypes;
fn into_toml(data: &str) -> Table {
from_toml(data).unwrap()
}
#[test]
fn meta_minimum() {
let data = into_toml(
"[package]
name = 'test'
version = '0.0.1'
authors = []",
);
let meta = parse_meta(&data).unwrap();
assert_eq!(meta.authors, []);
assert_eq!(meta.version, semver::Version::new(0, 0, 1));
assert_eq!(meta.name, "test");
assert_eq!(meta.description, Metadata::default_description(None));
}
#[test]
fn meta_missing() {
let input_data = [
"[package]
version = '0.0.1'
authors = []",
"[package]
name = 'hello'
authors = []",
"[package]
name = 'hello'
version = '0.0.1'",
];
for input in input_data.iter() {
let data = into_toml(input);
match parse_meta(&data) {
Ok(_) => assert!(false),
Err(e) => assert_eq!(ParseErrorTypes::MissingKey, e.into()),
}
}
}
#[test]
fn meta_empty() {
let data = into_toml("");
match parse_meta(&data) {
Ok(_) => assert!(false),
Err(e) => assert_eq!(ParseErrorTypes::MissingKey, e.into()),
}
}
#[test]
fn meta_wrong_type() {
let data = into_toml("package = []");
match parse_meta(&data) {
Ok(_) => assert!(false),
Err(e) => assert_eq!(ParseErrorTypes::WrongType, e.into()),
}
}
#[test]
fn meta_all() {
let data = into_toml(
"[package]
name = 'test'
version = '13.6.2-alpha.6+82590'
authors = ['Andrew Donavin <[email protected]>']
description = 'this is a description'",
);
let meta = parse_meta(&data).unwrap();
assert_eq!(
meta.authors,
[Author::new(
"Andrew Donavin".to_owned(),
Some("[email protected]".to_owned()),
)]
);
assert_eq!(
meta.version,
semver::Version::parse("13.6.2-alpha.6+82590").unwrap()
);
assert_eq!(meta.name, "test");
assert_eq!(meta.description, "this is a description");
}
#[test]
fn lang() {
let data = [
"lang = { main = 'c++', version = 'c++11', extensions = false }",
"lang = { main = 'rust', version = '2018', extensions = true }",
"lang = { main = 'c', version = 'c2x', extensions = true }",
"lang = { main = 'c++', version = 'latest', extensions = true }",
"lang = { main = 'rust', version = 'default', extensions = true }",
"lang = 'c++'",
"lang = 'c+++'",
"lang = { main = 'c', version = 'c25', extensions = true }",
];
let expected: Vec<Result<LanguageInfo, ParseErrorTypes>> = vec![
Ok(LanguageInfo::new(Language::Cpp(CppVersion::Cpp11), false)),
Ok(LanguageInfo::new(
Language::Rust(RustEdition::Rust2018),
true,
)),
Ok(LanguageInfo::new(Language::C(CVersion::C2x), true)),
Ok(LanguageInfo::new(Language::Cpp(CppVersion::latest()), true)),
Ok(LanguageInfo::new(
Language::Rust(RustEdition::default()),
true,
)),
Ok(LanguageInfo::new(
Language::Cpp(CppVersion::default()),
false,
)),
Err(ParseErrorTypes::InvalidLanguage),
Err(ParseErrorTypes::InvalidLanguageVersion),
];
for (i, input_string) in data.iter().enumerate() {
let data = into_toml(input_string);
let result = parse_lang(&data, "<TEST>");
match expected[i] {
Ok(info) => assert_eq!(info, result.unwrap()),
// why does Rust not just let you not compare the held value?
// why do I need a whole ~10 dependencies just to check if
// two enums are of the same variant without writing a case for each?
Err(e) => assert_eq!(e, result.unwrap_err().into()),
}
}
}
#[test]
fn include() {
let data = [
"include = []",
"include = ['one', 'two', 'three']",
"",
"include = [[], 'one', 'two']",
];
let expected: Vec<Result<Option<Vec<String>>, ParseErrorTypes>> = vec![
Ok(None),
Ok(Some(vec!["one".into(), "two".into(), "three".into()])),
Ok(None),
Err(ParseErrorTypes::WrongType),
];
for (i, input_data) in data.iter().enumerate() {
let data = into_toml(input_data);
let result = parse_include(&data, "<TEST>");
match &expected[i] {
Ok(Some(vec)) => assert_eq!(vec, &result.unwrap().unwrap()),
Ok(None) => assert!(result.unwrap().is_none()),
Err(e) => assert_eq!(*e, result.unwrap_err().into()),
}
}
}
#[test]
fn binary_all_options() {
let data = into_toml(
"[[bin]]
name = 'hello'
files = ['src/main.cc']
include = ['/opt/why/would/you/do/this']
lang = 'c++'",
);
let bins = parse_binaries(&data).unwrap();
assert_eq!(bins[0].name(), "hello");
assert_eq!(bins[0].files(), ["src/main.cc"]);
assert_eq!(bins[0].lang(), LanguageInfo::from_name("c++").unwrap());
assert_eq!(bins[0].include(), ["/opt/why/would/you/do/this"]);
}
#[test]
fn binary_multiple() {
let data = into_toml(
"[[bin]]
name = ''
files = []
lang = 'rust'
[[bin]]
name = 'one'
files = ['src/**/*.c', 'src/main.c']
lang = 'c'
",
);
let bins = parse_binaries(&data);
assert!(bins.is_ok());
assert!(bins.unwrap().len() == 2)
}
#[test]
fn binary_missing_keys() {
let inputs = [
"[[bin]]
files = ['src/main.cc']
lang = 'c++'",
"[[bin]]
name = 'hello'
lang = 'c++'",
"[[bin]]
name = 'hello'
files = []",
];
for input in inputs.iter() {
let data = into_toml(input);
let result = parse_binaries(&data);
assert!(result.is_err());
// why does literally everything have to fulfill fmt::Debug?
// why is there not an option to just not pretty-print on `panic!`?
match result {
Ok(_) => assert!(false),
Err(e) => assert_eq!(ParseErrorTypes::MissingKey, e.into()),
}
}
}
#[test]
fn binary_none() {
let data = into_toml("");
let result = parse_binaries(&data);
// no binaries is expected
assert!(result.is_ok());
}
#[test]
fn tests_individual() {
let data = into_toml(
"
[tests]
lang = 'c++'
individual = true
files = [
'one.cc',
'two.cc',
'tests/should_compile/*.cc'
]",
);
let result = parse_tests(&data).unwrap().unwrap();
assert_eq!(result.lang(), LanguageInfo::from_name("c++").unwrap());
assert_eq!(result.include(), &[] as &[String]);
assert_eq!(
*result.group_list(),
DeserializedGroupList::new(
vec![
DeserializedGroup::new("one.cc".to_owned(), vec!["one.cc".to_string()], None),
DeserializedGroup::new("two.cc".to_owned(), vec!["two.cc".to_string()], None),
DeserializedGroup::new(
"tests/should_compile/*.cc".to_owned(),
vec!["tests/should_compile/*.cc".to_string()],
None,
)
],
true,
)
);
}
#[test]
fn tests_grouped() {
let input_data = [
"[tests]
lang = 'c++'
individual = false
exes = [
{ name = 'one', files = ['one.cc'] },
{ name = 'two', files = ['two.cc', 'tests/should_compile/*.cc'] },
]",
"[tests]
lang = 'c++'
exes = [
{ name = 'one', files = ['one.cc'] },
{ name = 'two', files = ['two.cc', 'tests/should_compile/*.cc'] },
]",
];
for input in input_data.iter() {
let data = into_toml(input);
let result = parse_tests(&data).unwrap().unwrap();
assert_eq!(result.lang(), LanguageInfo::from_name("c++").unwrap());
assert_eq!(result.include(), &[] as &[String]);
assert_eq!(
*result.group_list(),
DeserializedGroupList::new(
vec![
DeserializedGroup::new("one".to_owned(), vec!["one.cc".to_string()], None),
DeserializedGroup::new(
"two".to_owned(),
vec![
"two.cc".to_string(),
"tests/should_compile/*.cc".to_string()
],
None,
)
],
false,
)
);
}
}
#[test]
fn tests_none() {
let data = into_toml("");
let result = parse_tests(&data).unwrap();
assert!(result.is_none());
}
#[test]
fn tests_broken() {
let input_data = [
"[tests]
lang = 'c++'
individual = true
exes = [{ name = 'one', files = ['tests/one.cc'] }]",
"[tests]
lang = 'c++'
individual = false
files = ['tests/one.cc']",
];
for input in input_data.iter() {
let data = into_toml(input);
let result = parse_tests(&data);
// i am not adding fmt::Debug to literally everything, i refuse
match result {
Ok(_) => assert!(false),
Err(e) => assert_eq!(ParseErrorTypes::MissingKey, e.into()),
}
}
}
#[test]
fn benches_individual() {
let data = into_toml(
"
[benches]
lang = 'c++'
individual = true
files = [
'one.cc',
'two.cc',
'benches/should_compile/*.cc'
]",
);
let result = parse_benches(&data).unwrap().unwrap();
assert_eq!(result.lang(), LanguageInfo::from_name("c++").unwrap());
assert_eq!(result.include(), &[] as &[String]);
assert_eq!(
*result.group_list(),
DeserializedGroupList::new(
vec![
DeserializedGroup::new("one.cc".to_owned(), vec!["one.cc".to_owned()], None),
DeserializedGroup::new("two.cc".to_owned(), vec!["two.cc".to_owned()], None),
DeserializedGroup::new(
"benches/should_compile/*.cc".to_owned(),
vec!["benches/should_compile/*.cc".to_owned()],
None,
),
],
true,
)
);
}
#[test]
fn benches_grouped() {
let input_data = [
"[benches]
lang = 'c++'
individual = false
exes = [
{ name = 'one', files = ['one.cc'], lang = 'rust' },
{ name = 'two', files = ['two.cc', 'tests/should_compile/*.cc'] },
]",
"[benches]
lang = 'c++'
exes = [
{ name = 'one', files = ['one.cc'], lang = 'rust' },
{ name = 'two', files = ['two.cc', 'tests/should_compile/*.cc'] },
]",
];
for input in input_data.iter() {
let data = into_toml(input);
let result = parse_benches(&data).unwrap().unwrap();
assert_eq!(result.lang(), LanguageInfo::from_name("c++").unwrap());
assert_eq!(result.include(), &[] as &[String]);
assert_eq!(
*result.group_list(),
DeserializedGroupList::new(
vec![
DeserializedGroup::new(
"one".to_owned(),
vec!["one.cc".to_string()],
Some(LanguageInfo::from_name("rust").unwrap()),
),
DeserializedGroup::new(
"two".to_owned(),
vec![
"two.cc".to_string(),
"tests/should_compile/*.cc".to_string()
],
None,
)
],
false,
)
);
}
}
#[test]
fn benches_none() {
let data = into_toml("");
let result = parse_benches(&data).unwrap();
assert!(result.is_none());
}
#[test]
fn benches_broken() {
let input_data = [
"[benches]
lang = 'c++'
individual = true
exes = [['benches/one.cc']]",
"[benches]
lang = 'c++'
individual = false
files = ['benches/one.cc']",
];
for input in input_data.iter() {
let data = into_toml(input);
let result = parse_benches(&data);
// i am not adding fmt::Debug to literally everything, i refuse
match result {
Ok(_) => assert!(false),
Err(e) => assert_eq!(ParseErrorTypes::MissingKey, e.into()),
}
}
}
#[test]
fn lib_minimum() {
let input_data = [
"[lib]
files = []
lang = 'c++'
type = 'static'",
"[lib]
lang = 'c++'
type = 'dynamic'
files = []",
];
let expected = [LibraryType::Static, LibraryType::Dynamic];
for (i, input) in input_data.iter().enumerate() {
let data = into_toml(input);
let lib = parse_lib(&data).unwrap();
match lib {
Some(lib) => {
assert_eq!(lib.lib_type(), expected[i]);
assert_eq!(lib.lang(), LanguageInfo::from_name("c++").unwrap());
}
None => assert!(false),
}
}
}
#[test]
fn lib_broken() {
let input_data = [
"[lib]
lang = 'c++'",
"[lib]
lang = 'rust'
files = []",
];
for input in input_data.iter() {
let data = into_toml(input);
let result = parse_lib(&data);
// i am not adding fmt::Debug to literally everything, i refuse
match result {
Ok(_) => assert!(false),
Err(e) => assert_eq!(ParseErrorTypes::MissingKey, e.into()),
}
}
{
let invalid_type = "[lib]
type = 'not static or dynamic'
files = []
lang = 'c++'";
let data = into_toml(invalid_type);
match parse_lib(&data) {
Ok(_) => assert!(false),
Err(e) => assert_eq!(ParseErrorTypes::InvalidLibType, e.into()),
}
}
}
#[test]
fn lib_none() {
let data = into_toml("");
assert!(parse_lib(&data).unwrap().is_none());
}
#[test]
fn lib_all() {
let data = into_toml(
"[lib]
lang = 'c++'
include = ['my/include/folder']
type = 'dynamic'
files = ['lib/foo.cc', 'lib/**/*.cc']",
);
let result = parse_lib(&data).unwrap().unwrap();
assert_eq!(result.lang(), LanguageInfo::from_name("c++").unwrap());
assert_eq!(result.include(), ["my/include/folder"]);
assert_eq!(result.lib_type(), LibraryType::Dynamic);
assert_eq!(result.files(), ["lib/foo.cc", "lib/**/*.cc"]);
}
#[test]
fn none_license() {
let input_data = [
"[package]
name = 'test'
authors = []
version = '0.0.1'",
"[package]
name = 'test'
authors = []
version = '0.0.1'
license = 'NONE'",
];
for input in input_data.iter() {
let data = into_toml(input);
let result = parse_meta(&data).unwrap();
assert!(result.license.is_none());
}
}
#[test]
fn entire_config() {
let input_data = "
[package]
name = 'hello'
license = 'Apache-2.0'
version = '4.6.3-beta'
authors = ['Evan Cox']
[lib]
lang = { main = 'c++', version = 'latest', extensions = false }
include = ['include']
files = ['lib/*.cc']
type = 'dynamic'
[benches]
lang = 'c++'
individual = true
files = ['benches/one.cc', 'benches/sub/*.cc']
[tests]
lang = { main = 'c', version = 'c90', extensions = true }
include = ['tests/support']
exes = [
{ name = 'lib-tests', files = ['lib/**/*.test.cc'] },
{ name = 'src-tests', files = ['src/**/*.test.cc'], lang = 'c' },
]
[[bin]]
name = 'not-epic'
lang = 'rust'
files = ['src/not-epic/*.cc']
[[bin]]
name = 'epic'
lang = 'c'
files = ['src/epic/*.cc']";
let result = parse_package(input_data).unwrap();
assert!(result.benches().is_some());
assert!(result.tests().is_some());
assert!(result.lib().is_some());
assert_eq!(result.bins().len(), 2);
{
let meta = result.meta();
assert_eq!(meta.name, "hello");
assert_eq!(meta.version, semver::Version::parse("4.6.3-beta").unwrap());
assert_eq!(
meta.license,
Some(spdx::Licensee::parse("Apache-2.0").unwrap())
);
assert_eq!(meta.authors, [Author::new("Evan Cox".to_owned(), None)]);
}
{
let benches = result.benches().unwrap();
assert_eq!(benches.lang(), LanguageInfo::from_name("c++").unwrap());
assert_eq!(benches.include(), &[] as &[String]);
assert_eq!(
*benches.group_list(),
DeserializedGroupList::new(
vec![
DeserializedGroup::new(
"benches/one.cc".to_owned(),
vec!["benches/one.cc".to_owned()],
None,
),
DeserializedGroup::new(
"benches/sub/*.cc".to_owned(),
vec!["benches/sub/*.cc".to_owned()],
None,
),
],
true,
)
);
}
{
let tests = result.tests().unwrap();
assert_eq!(
tests.lang(),
LanguageInfo::new(Language::C(CVersion::C90), true)
);
assert_eq!(tests.include(), ["tests/support"]);
assert_eq!(
*tests.group_list(),
DeserializedGroupList::new(
vec![
DeserializedGroup::new(
"lib-tests".to_owned(),
vec!["lib/**/*.test.cc".to_owned()],
None,
),
DeserializedGroup::new(
"src-tests".to_owned(),
vec!["src/**/*.test.cc".to_owned()],
Some(LanguageInfo::from_name("c").unwrap()),
)
],
false,
),
);
}
{
let bins = result.bins();
{
assert_eq!(bins[0].name(), "not-epic");
assert_eq!(bins[0].lang(), LanguageInfo::from_name("rust").unwrap());
assert_eq!(bins[0].include(), &[] as &[String]);
assert_eq!(bins[0].files(), vec!["src/not-epic/*.cc"]);
}
{
assert_eq!(bins[1].name(), "epic");
assert_eq!(bins[1].lang(), LanguageInfo::from_name("c").unwrap());
assert_eq!(bins[1].include(), &[] as &[String]);
assert_eq!(bins[1].files(), vec!["src/epic/*.cc"]);
}
}
{
let lib = result.lib().unwrap();
assert_eq!(
lib.lang(),
LanguageInfo::new(Language::Cpp(CppVersion::latest()), false)
);
assert_eq!(lib.include(), ["include"]);
assert_eq!(lib.files(), ["lib/*.cc"]);
}
}
}
| 31.119911 | 100 | 0.493423 |
d6a6739c740e62ddf1ed99515ddbf68e0997c6f7 | 54,884 | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Implements a DHCPv6 client.
use {
anyhow::{Context as _, Result},
async_utils::futures::{FutureExt as _, ReplaceValue},
dhcpv6_core,
dns_server_watcher::DEFAULT_DNS_PORT,
fidl::endpoints::ServerEnd,
fidl_fuchsia_net as fnet,
fidl_fuchsia_net_dhcpv6::{
ClientConfig, ClientMarker, ClientRequest, ClientRequestStream,
ClientWatchServersResponder, InformationConfig, NewClientParams,
RELAY_AGENT_AND_SERVER_LINK_LOCAL_MULTICAST_ADDRESS, RELAY_AGENT_AND_SERVER_PORT,
},
fidl_fuchsia_net_ext as fnetext, fidl_fuchsia_net_name as fnetname, fuchsia_async as fasync,
fuchsia_zircon as zx,
futures::{
future::{AbortHandle, Abortable, Aborted},
select, stream,
stream::futures_unordered::FuturesUnordered,
Future, FutureExt as _, StreamExt as _, TryStreamExt as _,
},
packet::ParsablePacket,
packet_formats_dhcp::v6,
rand::{rngs::StdRng, thread_rng, FromEntropy, Rng},
std::{
collections::{
hash_map::{DefaultHasher, Entry},
HashMap,
},
convert::TryFrom,
hash::{Hash, Hasher},
net::{IpAddr, Ipv6Addr, SocketAddr},
num::TryFromIntError,
str::FromStr as _,
time::Duration,
},
};
#[derive(Debug, thiserror::Error)]
pub enum ClientError {
#[error("no timer scheduled for {:?}", _0)]
MissingTimer(dhcpv6_core::client::ClientTimerType),
#[error("a timer is already scheduled for {:?}", _0)]
TimerAlreadyExist(dhcpv6_core::client::ClientTimerType),
#[error("fidl error: {}", _0)]
Fidl(fidl::Error),
#[error("got watch request while the previous one is pending")]
DoubleWatch(),
#[error("unsupported DHCPv6 config: {:?}, no addresses or configurations", _0)]
UnsupportedConfigs(ClientConfig),
#[error("socket receive error: {:?}", _0)]
SocketRecv(std::io::Error),
#[error("unimplemented DHCPv6 functionality: {:?}()", _0)]
Unimplemented(String),
}
/// Theoretical size limit for UDP datagrams.
///
/// NOTE: This does not take [jumbograms](https://tools.ietf.org/html/rfc2675) into account.
const MAX_UDP_DATAGRAM_SIZE: usize = 65_535;
type TimerFut = ReplaceValue<fasync::Timer, dhcpv6_core::client::ClientTimerType>;
/// A DHCPv6 client.
pub(crate) struct Client<S: for<'a> AsyncSocket<'a>> {
/// The interface the client is running on.
interface_id: u64,
/// Stores the hash of the last observed version of DNS servers by a watcher.
///
/// The client uses this hash to determine whether new changes in DNS servers are observed and
/// updates should be replied to the watcher.
last_observed_dns_hash: u64,
/// Stores a responder to send DNS server updates.
dns_responder: Option<ClientWatchServersResponder>,
/// Maintains the state for the client.
state_machine: dhcpv6_core::client::ClientStateMachine<StdRng>,
/// The socket used to communicate with DHCPv6 servers.
socket: S,
/// The address to send outgoing messages to.
server_addr: SocketAddr,
/// A collection of abort handles to all currently scheduled timers.
timer_abort_handles: HashMap<dhcpv6_core::client::ClientTimerType, AbortHandle>,
/// A set of all scheduled timers.
timer_futs: FuturesUnordered<Abortable<TimerFut>>,
/// A stream of FIDL requests to this client.
request_stream: ClientRequestStream,
}
/// A trait that allows stubbing [`fuchsia_async::net::UdpSocket`] in tests.
pub(crate) trait AsyncSocket<'a> {
type RecvFromFut: Future<Output = Result<(usize, SocketAddr), std::io::Error>> + 'a;
type SendToFut: Future<Output = Result<usize, std::io::Error>> + 'a;
fn recv_from(&'a self, buf: &'a mut [u8]) -> Self::RecvFromFut;
fn send_to(&'a self, buf: &'a [u8], addr: SocketAddr) -> Self::SendToFut;
}
impl<'a> AsyncSocket<'a> for fasync::net::UdpSocket {
type RecvFromFut = fasync::net::RecvFrom<'a>;
type SendToFut = fasync::net::SendTo<'a>;
fn recv_from(&'a self, buf: &'a mut [u8]) -> Self::RecvFromFut {
self.recv_from(buf)
}
fn send_to(&'a self, buf: &'a [u8], addr: SocketAddr) -> Self::SendToFut {
self.send_to(buf, addr)
}
}
/// Converts `InformationConfig` to a collection of `v6::OptionCode`.
fn to_dhcpv6_option_codes(information_config: InformationConfig) -> Vec<v6::OptionCode> {
let InformationConfig { dns_servers, .. } = information_config;
let mut codes = Vec::new();
if dns_servers.unwrap_or(false) {
let () = codes.push(v6::OptionCode::DnsServers);
}
codes
}
/// Creates a state machine for the input client config.
fn create_state_machine(
transaction_id: [u8; 3],
config: ClientConfig,
) -> Result<
(dhcpv6_core::client::ClientStateMachine<StdRng>, dhcpv6_core::client::Actions),
ClientError,
> {
if let ClientConfig {
address_assignment_config: None,
information_config: Some(information_config),
..
} = config
{
return Ok(dhcpv6_core::client::ClientStateMachine::start_information_request(
transaction_id,
to_dhcpv6_option_codes(information_config),
StdRng::from_entropy(),
));
};
// TODO(https://fxbug.dev/69696) Implement address assignment state machine.
Err(ClientError::UnsupportedConfigs(config))
}
/// Calculates a hash for the input.
fn hash<H: Hash>(h: &H) -> u64 {
let mut dh = DefaultHasher::new();
let () = h.hash(&mut dh);
dh.finish()
}
impl<S: for<'a> AsyncSocket<'a>> Client<S> {
/// Starts the client in `config`.
///
/// Input `transaction_id` is used to label outgoing messages and match incoming ones.
pub(crate) async fn start(
transaction_id: [u8; 3],
config: ClientConfig,
interface_id: u64,
socket: S,
server_addr: SocketAddr,
request_stream: ClientRequestStream,
) -> Result<Self, ClientError> {
let (state_machine, actions) = create_state_machine(transaction_id, config)?;
let mut client = Self {
state_machine,
interface_id,
socket,
server_addr,
request_stream,
timer_abort_handles: HashMap::new(),
timer_futs: FuturesUnordered::new(),
// Server watcher's API requires blocking if the first call would return an empty list,
// so initialize this field with a hash of an empty list.
last_observed_dns_hash: hash(&Vec::<Ipv6Addr>::new()),
dns_responder: None,
};
let () = client.run_actions(actions).await?;
Ok(client)
}
/// Runs a list of actions sequentially.
async fn run_actions(
&mut self,
actions: dhcpv6_core::client::Actions,
) -> Result<(), ClientError> {
stream::iter(actions)
.map(Ok)
.try_fold(self, |client, action| async move {
match action {
dhcpv6_core::client::Action::SendMessage(buf) => {
let () = match client.socket.send_to(&buf, client.server_addr).await {
Ok(_size) => (),
Err(e) => {
let () = log::warn!(
"failed to send message to {}: {}; will retransmit later",
client.server_addr,
e
);
}
};
}
dhcpv6_core::client::Action::ScheduleTimer(timer_type, timeout) => {
let () = client.schedule_timer(timer_type, timeout)?;
}
dhcpv6_core::client::Action::CancelTimer(timer_type) => {
let () = client.cancel_timer(timer_type)?;
}
dhcpv6_core::client::Action::UpdateDnsServers(servers) => {
let () = client.maybe_send_dns_server_updates(servers)?;
}
};
Ok(client)
})
.await
.map(|_: &mut Client<S>| ())
}
/// Sends the latest DNS servers iff a watcher is watching, and the latest set of servers are
/// different from what the watcher has observed last time.
fn maybe_send_dns_server_updates(&mut self, servers: Vec<Ipv6Addr>) -> Result<(), ClientError> {
let servers_hash = hash(&servers);
if servers_hash == self.last_observed_dns_hash {
Ok(())
} else {
Ok(match self.dns_responder.take() {
Some(responder) => {
self.send_dns_server_updates(responder, servers, servers_hash)?
}
None => (),
})
}
}
/// Sends a list of DNS servers to a watcher through the input responder and updates the last
/// observed hash.
fn send_dns_server_updates(
&mut self,
responder: ClientWatchServersResponder,
servers: Vec<Ipv6Addr>,
hash: u64,
) -> Result<(), ClientError> {
let () = responder
.send(&mut servers.iter().map(|addr| {
let address = fnet::Ipv6Address { addr: addr.octets() };
let zone_index =
if is_unicast_link_local_strict(&address) { self.interface_id } else { 0 };
fnetname::DnsServer_ {
address: Some(fnet::SocketAddress::Ipv6(fnet::Ipv6SocketAddress {
address,
zone_index,
port: DEFAULT_DNS_PORT,
})),
source: Some(fnetname::DnsServerSource::Dhcpv6(
fnetname::Dhcpv6DnsServerSource {
source_interface: Some(self.interface_id),
..fnetname::Dhcpv6DnsServerSource::EMPTY
},
)),
..fnetname::DnsServer_::EMPTY
}
}))
// The channel will be closed on error, so return an error to stop the client.
.map_err(ClientError::Fidl)?;
self.last_observed_dns_hash = hash;
Ok(())
}
/// Schedules a timer for `timer_type` to fire after `timeout`.
fn schedule_timer(
&mut self,
timer_type: dhcpv6_core::client::ClientTimerType,
timeout: Duration,
) -> Result<(), ClientError> {
match self.timer_abort_handles.entry(timer_type) {
Entry::Vacant(entry) => {
let (handle, reg) = AbortHandle::new_pair();
let _: &mut AbortHandle = entry.insert(handle);
let () = self.timer_futs.push(Abortable::new(
fasync::Timer::new(fasync::Time::after(
i64::try_from(timeout.as_nanos())
.map(zx::Duration::from_nanos)
.unwrap_or_else(|_: TryFromIntError| {
let duration = zx::Duration::from_nanos(i64::MAX);
let () = log::warn!(
"time duration {:?} overflows zx::Duration, truncating to {:?}",
timeout,
duration
);
duration
}),
))
.replace_value(timer_type),
reg,
));
Ok(())
}
Entry::Occupied(_) => Err(ClientError::TimerAlreadyExist(timer_type)),
}
}
/// Cancels a previously scheduled timer for `timer_type`.
fn cancel_timer(
&mut self,
timer_type: dhcpv6_core::client::ClientTimerType,
) -> Result<(), ClientError> {
match self.timer_abort_handles.entry(timer_type) {
Entry::Vacant(_) => Err(ClientError::MissingTimer(timer_type)),
Entry::Occupied(entry) => Ok(entry.remove().abort()),
}
}
/// Handles a timeout.
async fn handle_timeout(
&mut self,
timer_type: dhcpv6_core::client::ClientTimerType,
) -> Result<(), ClientError> {
let () = self.cancel_timer(timer_type)?; // This timer just fired.
let actions = self.state_machine.handle_timeout(timer_type);
self.run_actions(actions).await
}
/// Handles a received message.
async fn handle_message_recv(&mut self, mut msg: &[u8]) -> Result<(), ClientError> {
let msg = match v6::Message::parse(&mut msg, ()) {
Ok(msg) => msg,
Err(e) => {
// Discard invalid messages.
//
// https://tools.ietf.org/html/rfc8415#section-16.
let () = log::warn!("failed to parse received message: {}", e);
return Ok(());
}
};
let actions = self.state_machine.handle_message_receive(msg);
self.run_actions(actions).await
}
/// Handles a FIDL request sent to this client.
fn handle_client_request(&mut self, request: ClientRequest) -> Result<(), ClientError> {
match request {
ClientRequest::WatchServers { responder } => match self.dns_responder {
Some(_) => {
// Drop the previous responder to close the channel.
self.dns_responder = None;
// Return an error to stop the client because the channel is closed.
Err(ClientError::DoubleWatch())
}
None => {
let dns_servers = self.state_machine.get_dns_servers();
let servers_hash = hash(&dns_servers);
if servers_hash != self.last_observed_dns_hash {
// Something has changed from the last time, update the watcher.
let () =
self.send_dns_server_updates(responder, dns_servers, servers_hash)?;
} else {
// Nothing has changed, update the watcher later.
self.dns_responder = Some(responder);
}
Ok(())
}
},
// TODO(https://fxbug.dev/72701) Implement the address watcher.
ClientRequest::WatchAddress { responder: _ } => {
Err(ClientError::Unimplemented("WatchAddress".to_string()))
}
// TODO(https://fxbug.dev/72702) Implement Shutdown.
ClientRequest::Shutdown { responder: _ } => {
Err(ClientError::Unimplemented("Shutdown".to_string()))
}
}
}
/// Handles the next event and returns the result.
///
/// Takes a pre-allocated buffer to avoid repeated allocation.
///
/// The returned `Option` is `None` if `request_stream` on the client is closed.
async fn handle_next_event(&mut self, buf: &mut [u8]) -> Result<Option<()>, ClientError> {
select! {
timer_res = self.timer_futs.select_next_some() => {
match timer_res {
Ok(timer_type) => {
let () = self.handle_timeout(timer_type).await?;
Ok(Some(()))
},
// The timer was aborted, do nothing.
Err(Aborted) => Ok(Some(())),
}
},
recv_from_res = self.socket.recv_from(buf).fuse() => {
let (size, _addr) = recv_from_res.map_err(ClientError::SocketRecv)?;
let () = self.handle_message_recv(&buf[..size]).await?;
Ok(Some(()))
},
request = self.request_stream.try_next() => {
match request {
Ok(request) => {
request.map(|request| self.handle_client_request(request)).transpose()
}
Err(e) => {
Ok(Some(log::warn!("FIDL client request error: {}", e)))
}
}
}
}
}
}
/// Creates a socket listening on the input address.
async fn create_socket(addr: SocketAddr) -> Result<fasync::net::UdpSocket> {
let socket = socket2::Socket::new(
socket2::Domain::IPV6,
socket2::Type::DGRAM,
Some(socket2::Protocol::UDP),
)?;
// It is possible to run multiple clients on the same address.
let () = socket.set_reuse_port(true)?;
let () = socket.bind(&addr.into())?;
fasync::net::UdpSocket::from_socket(socket.into()).context("converting socket")
}
/// Creates a transaction ID that can be used by the client as defined in [RFC 8415, Section 16.1].
///
/// [RFC 8415, Section 16.1]: https://tools.ietf.org/html/rfc8415#section-16.1
fn transaction_id() -> [u8; 3] {
let mut id = [0u8; 3];
let () = thread_rng().fill(&mut id[..]);
id
}
/// Returns `true` if the input address is a link-local address (`fe80::/64`).
///
/// TODO(https://github.com/rust-lang/rust/issues/27709): use is_unicast_link_local_strict() in
/// stable rust when it's available.
fn is_unicast_link_local_strict(addr: &fnet::Ipv6Address) -> bool {
addr.addr[..8] == [0xfe, 0x80, 0, 0, 0, 0, 0, 0]
}
/// Starts a client based on `params`.
///
/// `request` will be serviced by the client.
pub(crate) async fn serve_client(
params: NewClientParams,
request: ServerEnd<ClientMarker>,
) -> Result<()> {
if let NewClientParams {
interface_id: Some(interface_id),
address: Some(address),
config: Some(config),
..
} = params
{
if Ipv6Addr::from(address.address.addr).is_multicast()
|| (is_unicast_link_local_strict(&address.address)
&& address.zone_index != interface_id)
{
return request
.close_with_epitaph(zx::Status::INVALID_ARGS)
.context("closing request channel with epitaph");
}
let fnetext::SocketAddress(addr) = fnet::SocketAddress::Ipv6(address).into();
let servers_addr = IpAddr::from_str(RELAY_AGENT_AND_SERVER_LINK_LOCAL_MULTICAST_ADDRESS)
.with_context(|| {
format!(
"{} should be a valid IPv6 address",
RELAY_AGENT_AND_SERVER_LINK_LOCAL_MULTICAST_ADDRESS,
)
})?;
let mut client = Client::<fasync::net::UdpSocket>::start(
transaction_id(),
config,
interface_id,
create_socket(addr).await?,
SocketAddr::new(servers_addr, RELAY_AGENT_AND_SERVER_PORT),
request.into_stream().context("getting new client request stream from channel")?,
)
.await?;
let mut buf = vec![0u8; MAX_UDP_DATAGRAM_SIZE];
loop {
match client.handle_next_event(&mut buf).await? {
Some(()) => (),
None => break Ok(()),
}
}
} else {
// All param fields are required.
request
.close_with_epitaph(zx::Status::INVALID_ARGS)
.context("closing request channel with epitaph")
}
}
#[cfg(test)]
mod tests {
use {
super::*,
fidl::endpoints::create_endpoints,
fidl_fuchsia_net_dhcpv6::{ClientConfig, ClientMarker, DEFAULT_CLIENT_PORT},
fuchsia_async as fasync,
futures::{channel::mpsc, join},
matches::assert_matches,
net_declare::{
fidl_ip_v6, fidl_socket_addr, fidl_socket_addr_v6, std_ip_v6, std_socket_addr,
},
packet::serialize::InnerPacketBuilder,
std::{collections::HashSet, task::Poll},
};
/// Creates a test socket bound to an ephemeral port on localhost.
fn create_test_socket() -> (fasync::net::UdpSocket, SocketAddr) {
let addr: SocketAddr = std_socket_addr!("[::1]:0");
let socket = std::net::UdpSocket::bind(addr).expect("failed to create test socket");
let addr = socket.local_addr().expect("failed to get address of test socket");
(fasync::net::UdpSocket::from_socket(socket).expect("failed to create test socket"), addr)
}
/// Asserts `socket` receives an information request from `want_from_addr`.
async fn assert_received_information_request(
socket: &fasync::net::UdpSocket,
want_from_addr: SocketAddr,
) {
let mut buf = vec![0u8; MAX_UDP_DATAGRAM_SIZE];
let (size, from_addr) =
socket.recv_from(&mut buf).await.expect("failed to receive on test server socket");
assert_eq!(from_addr, want_from_addr);
let buf = &mut &buf[..size]; // Implements BufferView.
assert_eq!(
v6::Message::parse(buf, ()).map(|x| x.msg_type()),
Ok(v6::MessageType::InformationRequest)
)
}
#[test]
fn test_create_client_with_unsupported_config() {
assert_matches!(
create_state_machine([1, 2, 3], ClientConfig::EMPTY),
Err(ClientError::UnsupportedConfigs(ClientConfig {
address_assignment_config: None,
information_config: None,
..
}))
);
}
#[fasync::run_singlethreaded(test)]
async fn test_client_stops_on_channel_close() {
let (client_end, server_end) =
create_endpoints::<ClientMarker>().expect("failed to create test fidl channel");
let client_proxy = client_end.into_proxy().expect("failed to create test client proxy");
let ((), client_res) = join!(
async { drop(client_proxy) },
serve_client(
NewClientParams {
interface_id: Some(1),
address: Some(fidl_socket_addr_v6!("[::1]:546")),
config: Some(ClientConfig {
information_config: Some(InformationConfig { ..InformationConfig::EMPTY }),
..ClientConfig::EMPTY
}),
..NewClientParams::EMPTY
},
server_end,
),
);
client_res.expect("client future should return with Ok");
}
#[fasync::run_singlethreaded(test)]
async fn test_client_should_return_error_on_double_watch() {
let (client_end, server_end) =
create_endpoints::<ClientMarker>().expect("failed to create test fidl channel");
let client_proxy = client_end.into_proxy().expect("failed to create test client proxy");
let (caller1_res, caller2_res, client_res) = join!(
client_proxy.watch_servers(),
client_proxy.watch_servers(),
serve_client(
NewClientParams {
interface_id: Some(1),
address: Some(fidl_socket_addr_v6!("[::1]:546")),
config: Some(ClientConfig {
information_config: Some(InformationConfig { ..InformationConfig::EMPTY }),
..ClientConfig::EMPTY
}),
..NewClientParams::EMPTY
},
server_end,
)
);
assert_matches!(
caller1_res,
Err(fidl::Error::ClientChannelClosed { status: zx::Status::PEER_CLOSED, .. })
);
assert_matches!(
caller2_res,
Err(fidl::Error::ClientChannelClosed { status: zx::Status::PEER_CLOSED, .. })
);
assert!(client_res
.expect_err("client should fail with double watch error")
.to_string()
.contains("got watch request while the previous one is pending"));
}
#[test]
fn test_client_starts_with_valid_args() {
let mut exec = fasync::Executor::new().expect("failed to create test executor");
let (client_end, server_end) =
create_endpoints::<ClientMarker>().expect("failed to create test fidl channel");
let client_proxy = client_end.into_proxy().expect("failed to create test client proxy");
let test_fut = async {
join!(
client_proxy.watch_servers(),
serve_client(
NewClientParams {
interface_id: Some(1),
address: Some(fidl_socket_addr_v6!("[::1]:546")),
config: Some(ClientConfig {
information_config: Some(InformationConfig {
..InformationConfig::EMPTY
}),
..ClientConfig::EMPTY
}),
..NewClientParams::EMPTY
},
server_end
)
)
};
futures::pin_mut!(test_fut);
assert_matches!(exec.run_until_stalled(&mut test_fut), Poll::Pending);
}
#[fasync::run_singlethreaded(test)]
async fn test_client_fails_to_start_with_invalid_args() {
for params in vec![
// Missing required field.
NewClientParams {
interface_id: Some(1),
address: None,
config: Some(ClientConfig {
information_config: Some(InformationConfig { ..InformationConfig::EMPTY }),
..ClientConfig::EMPTY
}),
..NewClientParams::EMPTY
},
// Interface ID and zone index mismatch on link-local address.
NewClientParams {
interface_id: Some(2),
address: Some(fnet::Ipv6SocketAddress {
address: fidl_ip_v6!("fe80::1"),
port: DEFAULT_CLIENT_PORT,
zone_index: 1,
}),
config: Some(ClientConfig {
information_config: Some(InformationConfig { ..InformationConfig::EMPTY }),
..ClientConfig::EMPTY
}),
..NewClientParams::EMPTY
},
// Multicast address is invalid.
NewClientParams {
interface_id: Some(1),
address: Some(fnet::Ipv6SocketAddress {
address: fidl_ip_v6!("ff01::1"),
port: DEFAULT_CLIENT_PORT,
zone_index: 1,
}),
config: Some(ClientConfig {
information_config: Some(InformationConfig { ..InformationConfig::EMPTY }),
..ClientConfig::EMPTY
}),
..NewClientParams::EMPTY
},
] {
let (client_end, server_end) =
create_endpoints::<ClientMarker>().expect("failed to create test fidl channel");
let () =
serve_client(params, server_end).await.expect("start server failed unexpectedly");
// Calling any function on the client proxy should fail due to channel closed with
// `INVALID_ARGS`.
assert_matches!(
client_end.into_proxy().expect("failed to create test proxy").watch_servers().await,
Err(fidl::Error::ClientChannelClosed { status: zx::Status::INVALID_ARGS, .. })
);
}
}
#[test]
fn test_is_unicast_link_local_strict() {
assert_eq!(is_unicast_link_local_strict(&fidl_ip_v6!("fe80::")), true);
assert_eq!(is_unicast_link_local_strict(&fidl_ip_v6!("fe80::1")), true);
assert_eq!(is_unicast_link_local_strict(&fidl_ip_v6!("fe80::ffff:1:2:3")), true);
assert_eq!(is_unicast_link_local_strict(&fidl_ip_v6!("fe80::1:0:0:0:0")), false);
assert_eq!(is_unicast_link_local_strict(&fidl_ip_v6!("fe81::")), false);
}
fn create_test_dns_server(
address: fnet::Ipv6Address,
source_interface: u64,
zone_index: u64,
) -> fnetname::DnsServer_ {
fnetname::DnsServer_ {
address: Some(fnet::SocketAddress::Ipv6(fnet::Ipv6SocketAddress {
address,
zone_index,
port: DEFAULT_DNS_PORT,
})),
source: Some(fnetname::DnsServerSource::Dhcpv6(fnetname::Dhcpv6DnsServerSource {
source_interface: Some(source_interface),
..fnetname::Dhcpv6DnsServerSource::EMPTY
})),
..fnetname::DnsServer_::EMPTY
}
}
async fn send_reply_with_options(
socket: &fasync::net::UdpSocket,
to_addr: SocketAddr,
transaction_id: [u8; 3],
options: &[v6::DhcpOption<'_>],
) -> Result<()> {
let builder = v6::MessageBuilder::new(v6::MessageType::Reply, transaction_id, options);
let mut buf = vec![0u8; builder.bytes_len()];
let () = builder.serialize(&mut buf);
let _: usize = socket.send_to(&buf, to_addr).await?;
Ok(())
}
#[test]
fn test_client_should_respond_to_dns_watch_requests() {
let mut exec = fasync::Executor::new().expect("failed to create test ecexutor");
let transaction_id = [1, 2, 3];
let (client_end, server_end) =
create_endpoints::<ClientMarker>().expect("failed to create test fidl channel");
let client_proxy = client_end.into_proxy().expect("failed to create test client proxy");
let client_stream = server_end.into_stream().expect("failed to create test request stream");
let (client_socket, client_addr) = create_test_socket();
let (server_socket, server_addr) = create_test_socket();
let mut client = exec
.run_singlethreaded(Client::<fasync::net::UdpSocket>::start(
transaction_id,
ClientConfig {
information_config: Some(InformationConfig { ..InformationConfig::EMPTY }),
..ClientConfig::EMPTY
},
1, /* interface ID */
client_socket,
server_addr,
client_stream,
))
.expect("failed to create test client");
let (mut signal_client_to_refresh, mut client_should_refresh) = mpsc::channel::<()>(1);
let client_fut = async {
let mut buf = vec![0u8; MAX_UDP_DATAGRAM_SIZE];
loop {
select! {
res = client.handle_next_event(&mut buf).fuse() => {
match res.expect("test client failed to handle next event") {
Some(()) => (),
None => break (),
};
}
_ = client_should_refresh.next() => {
// Make the client ready for another reply immediately on signal, so it can
// start receiving updates without waiting for the full refresh timeout
// which is unrealistic test.
if client.timer_abort_handles.contains_key(&dhcpv6_core::client::ClientTimerType::Refresh) {
let () = client
.handle_timeout(dhcpv6_core::client::ClientTimerType::Refresh)
.await
.expect("test client failed to handle timeout");
} else {
panic!("no refresh timer is scheduled and refresh is requested in test");
}
},
}
}
}.fuse();
futures::pin_mut!(client_fut);
macro_rules! build_test_fut {
($test_fut:ident) => {
let $test_fut = async {
select! {
() = client_fut => panic!("test client returned unexpectly"),
r = client_proxy.watch_servers() => r,
}
};
futures::pin_mut!($test_fut);
};
}
{
// No DNS configurations received yet.
build_test_fut!(test_fut);
assert_matches!(exec.run_until_stalled(&mut test_fut), Poll::Pending);
// Send an empty list to the client, should not update watcher.
let () = exec
.run_singlethreaded(send_reply_with_options(
&server_socket,
client_addr,
transaction_id,
&[v6::DhcpOption::DnsServers(Vec::new())],
))
.expect("failed to send test reply");
assert_matches!(exec.run_until_stalled(&mut test_fut), Poll::Pending);
// Send a list of DNS servers, the watcher should be updated accordingly.
let () = signal_client_to_refresh
.try_send(())
.expect("failed to signal test client to refresh");
let () = exec
.run_singlethreaded(send_reply_with_options(
&server_socket,
client_addr,
transaction_id,
&[v6::DhcpOption::DnsServers(vec![std_ip_v6!("fe80::1:2")])],
))
.expect("failed to send test reply");
let want_servers = vec![create_test_dns_server(
fidl_ip_v6!("fe80::1:2"),
1, /* source interface */
1, /* zone index */
)];
assert_matches!(
exec.run_until_stalled(&mut test_fut),
Poll::Ready(Ok(servers)) if servers == want_servers
);
} // drop `test_fut` so `client_fut` is no longer mutably borrowed.
{
// No new changes, should not update watcher.
build_test_fut!(test_fut);
assert_matches!(exec.run_until_stalled(&mut test_fut), Poll::Pending);
// Send the same list of DNS servers, should not update watcher.
let () = signal_client_to_refresh
.try_send(())
.expect("failed to signal test client to refresh");
let () = exec
.run_singlethreaded(send_reply_with_options(
&server_socket,
client_addr,
transaction_id,
&[v6::DhcpOption::DnsServers(vec![std_ip_v6!("fe80::1:2")])],
))
.expect("failed to send test reply");
assert_matches!(exec.run_until_stalled(&mut test_fut), Poll::Pending);
// Send a different list of DNS servers, should update watcher.
let () = signal_client_to_refresh
.try_send(())
.expect("failed to signal test client to refresh");
let () = exec
.run_singlethreaded(send_reply_with_options(
&server_socket,
client_addr,
transaction_id,
&[v6::DhcpOption::DnsServers(vec![
std_ip_v6!("fe80::1:2"),
std_ip_v6!("1234::5:6"),
])],
))
.expect("failed to send test reply");
let want_servers = vec![
create_test_dns_server(
fidl_ip_v6!("fe80::1:2"),
1, /* source interface */
1, /* zone index */
),
// Only set zone index for link local addresses.
create_test_dns_server(
fidl_ip_v6!("1234::5:6"),
1, /* source interface */
0, /* zone index */
),
];
assert_matches!(
exec.run_until_stalled(&mut test_fut),
Poll::Ready(Ok(servers)) if servers == want_servers
);
} // drop `test_fut` so `client_fut` is no longer mutably borrowed.
{
// Send an empty list of DNS servers, should update watcher, because this is different from
// what the watcher has seen last time.
let () = signal_client_to_refresh
.try_send(())
.expect("failed to signal test client to refresh");
let () = exec
.run_singlethreaded(send_reply_with_options(
&server_socket,
client_addr,
transaction_id,
&[v6::DhcpOption::DnsServers(Vec::new())],
))
.expect("failed to send test reply");
build_test_fut!(test_fut);
let want_servers = Vec::<fnetname::DnsServer_>::new();
assert_matches!(
exec.run_until_stalled(&mut test_fut),
Poll::Ready(Ok(servers)) if servers == want_servers
);
} // drop `test_fut` so `client_fut` is no longer mutably borrowed.
}
#[fasync::run_singlethreaded(test)]
async fn test_client_should_respond_with_dns_servers_on_first_watch_if_non_empty() {
let transaction_id = [1, 2, 3];
let (client_end, server_end) =
create_endpoints::<ClientMarker>().expect("failed to create test fidl channel");
let client_proxy = client_end.into_proxy().expect("failed to create test client proxy");
let client_stream = server_end.into_stream().expect("failed to create test request stream");
let (client_socket, client_addr) = create_test_socket();
let (server_socket, server_addr) = create_test_socket();
let mut client = Client::<fasync::net::UdpSocket>::start(
transaction_id,
ClientConfig {
information_config: Some(InformationConfig { ..InformationConfig::EMPTY }),
..ClientConfig::EMPTY
},
1, /* interface ID */
client_socket,
server_addr,
client_stream,
)
.await
.expect("failed to create test client");
let () = send_reply_with_options(
&server_socket,
client_addr,
transaction_id,
&[v6::DhcpOption::DnsServers(vec![std_ip_v6!("fe80::1:2"), std_ip_v6!("1234::5:6")])],
)
.await
.expect("failed to send test message");
// Receive non-empty DNS servers before watch.
let mut buf = vec![0u8; MAX_UDP_DATAGRAM_SIZE];
assert_matches!(client.handle_next_event(&mut buf).await, Ok(Some(())));
// Emit aborted timer.
assert_matches!(client.handle_next_event(&mut buf).await, Ok(Some(())));
let want_servers = vec![
create_test_dns_server(
fidl_ip_v6!("fe80::1:2"),
1, /* source interface */
1, /* zone index */
),
create_test_dns_server(
fidl_ip_v6!("1234::5:6"),
1, /* source interface */
0, /* zone index */
),
];
assert_matches!(
join!(client.handle_next_event(&mut buf), client_proxy.watch_servers()),
(Ok(Some(())), Ok(servers)) if servers == want_servers
);
}
#[fasync::run_singlethreaded(test)]
async fn test_client_schedule_and_cancel_timers() {
let (_client_end, server_end) =
create_endpoints::<ClientMarker>().expect("failed to create test fidl channel");
let client_stream = server_end.into_stream().expect("failed to create test request stream");
let (client_socket, _client_addr) = create_test_socket();
let (_server_socket, server_addr) = create_test_socket();
let mut client = Client::<fasync::net::UdpSocket>::start(
[1, 2, 3], /* transaction ID */
ClientConfig {
information_config: Some(InformationConfig { ..InformationConfig::EMPTY }),
..ClientConfig::EMPTY
},
1, /* interface ID */
client_socket,
server_addr,
client_stream,
)
.await
.expect("failed to create test client");
// Stateless DHCP client starts by scheduling a retransmission timer.
assert_eq!(
client.timer_abort_handles.keys().collect::<Vec<_>>(),
vec![&dhcpv6_core::client::ClientTimerType::Retransmission]
);
let () = client
.cancel_timer(dhcpv6_core::client::ClientTimerType::Retransmission)
.expect("canceling retransmission timer on test client");
assert_eq!(
client.timer_abort_handles.keys().collect::<Vec<_>>(),
Vec::<&dhcpv6_core::client::ClientTimerType>::new()
);
let () = client
.schedule_timer(dhcpv6_core::client::ClientTimerType::Refresh, Duration::from_nanos(1))
.expect("scheduling refresh timer on test client");
let () = client
.schedule_timer(
dhcpv6_core::client::ClientTimerType::Retransmission,
Duration::from_nanos(2),
)
.expect("scheduling retransmission timer on test client");
assert_eq!(
client.timer_abort_handles.keys().collect::<HashSet<_>>(),
vec![
&dhcpv6_core::client::ClientTimerType::Retransmission,
&dhcpv6_core::client::ClientTimerType::Refresh
]
.into_iter()
.collect()
);
assert_matches!(
client.schedule_timer(
dhcpv6_core::client::ClientTimerType::Refresh,
Duration::from_nanos(1)
),
Err(ClientError::TimerAlreadyExist(dhcpv6_core::client::ClientTimerType::Refresh))
);
assert_matches!(
client.schedule_timer(
dhcpv6_core::client::ClientTimerType::Retransmission,
Duration::from_nanos(2)
),
Err(ClientError::TimerAlreadyExist(
dhcpv6_core::client::ClientTimerType::Retransmission
))
);
let () = client
.cancel_timer(dhcpv6_core::client::ClientTimerType::Refresh)
.expect("canceling retransmission timer on test client");
assert_eq!(
client.timer_abort_handles.keys().collect::<Vec<_>>(),
vec![&dhcpv6_core::client::ClientTimerType::Retransmission]
);
assert_matches!(
client.cancel_timer(dhcpv6_core::client::ClientTimerType::Refresh),
Err(ClientError::MissingTimer(dhcpv6_core::client::ClientTimerType::Refresh))
);
let () = client
.cancel_timer(dhcpv6_core::client::ClientTimerType::Retransmission)
.expect("canceling retransmission timer on test client");
assert_eq!(
client
.timer_abort_handles
.keys()
.collect::<Vec<&dhcpv6_core::client::ClientTimerType>>(),
Vec::<&dhcpv6_core::client::ClientTimerType>::new()
);
assert_matches!(
client.cancel_timer(dhcpv6_core::client::ClientTimerType::Retransmission),
Err(ClientError::MissingTimer(dhcpv6_core::client::ClientTimerType::Retransmission))
);
}
#[fasync::run_singlethreaded(test)]
async fn test_handle_next_event_on_client() {
let (client_end, server_end) =
create_endpoints::<ClientMarker>().expect("failed to create test fidl channel");
let client_proxy = client_end.into_proxy().expect("failed to create test client proxy");
let client_stream = server_end.into_stream().expect("failed to create test request stream");
let (client_socket, client_addr) = create_test_socket();
let (server_socket, server_addr) = create_test_socket();
let mut client = Client::<fasync::net::UdpSocket>::start(
[1, 2, 3], /* transaction ID */
ClientConfig {
information_config: Some(InformationConfig { ..InformationConfig::EMPTY }),
..ClientConfig::EMPTY
},
1, /* interface ID */
client_socket,
server_addr,
client_stream,
)
.await
.expect("failed to create test client");
// Starting the client in stateless should send an information request out.
assert_received_information_request(&server_socket, client_addr).await;
assert_eq!(
client.timer_abort_handles.keys().collect::<Vec<_>>(),
vec![&dhcpv6_core::client::ClientTimerType::Retransmission]
);
let mut buf = vec![0u8; MAX_UDP_DATAGRAM_SIZE];
// Trigger a retransmission.
assert_matches!(client.handle_next_event(&mut buf).await, Ok(Some(())));
assert_received_information_request(&server_socket, client_addr).await;
assert_eq!(
client.timer_abort_handles.keys().collect::<Vec<_>>(),
vec![&dhcpv6_core::client::ClientTimerType::Retransmission]
);
// Message targeting another transaction ID should be ignored.
let () = send_reply_with_options(&server_socket, client_addr, [5, 6, 7], &[])
.await
.expect("failed to send test message");
assert_matches!(client.handle_next_event(&mut buf).await, Ok(Some(())));
assert_eq!(
client.timer_abort_handles.keys().collect::<Vec<_>>(),
vec![&dhcpv6_core::client::ClientTimerType::Retransmission]
);
// Invalid messages should be discarded. Empty buffer is invalid.
let _: usize =
server_socket.send_to(&[], client_addr).await.expect("failed to send test message");
assert_matches!(client.handle_next_event(&mut buf).await, Ok(Some(())));
assert_eq!(
client.timer_abort_handles.keys().collect::<Vec<_>>(),
vec![&dhcpv6_core::client::ClientTimerType::Retransmission]
);
// Message targeting this client should cause the client to transition state.
let () = send_reply_with_options(&server_socket, client_addr, [1, 2, 3], &[])
.await
.expect("failed to send test message");
assert_matches!(client.handle_next_event(&mut buf).await, Ok(Some(())));
assert_eq!(
client.timer_abort_handles.keys().collect::<Vec<_>>(),
vec![&dhcpv6_core::client::ClientTimerType::Refresh]
);
// Discard aborted retransmission timer.
assert_matches!(client.handle_next_event(&mut buf).await, Ok(Some(())));
// Reschedule a shorter timer for Refresh so we don't spend time waiting in test.
client
.cancel_timer(dhcpv6_core::client::ClientTimerType::Refresh)
.expect("failed to cancel timer on test client");
// Discard cancelled refresh timer.
assert_matches!(client.handle_next_event(&mut buf).await, Ok(Some(())));
client
.schedule_timer(dhcpv6_core::client::ClientTimerType::Refresh, Duration::from_nanos(1))
.expect("failed to schedule timer on test client");
// Trigger a refresh.
assert_matches!(client.handle_next_event(&mut buf).await, Ok(Some(())));
assert_received_information_request(&server_socket, client_addr).await;
assert_eq!(
client.timer_abort_handles.keys().collect::<Vec<_>>(),
vec![&dhcpv6_core::client::ClientTimerType::Retransmission]
);
let test_fut = async {
assert_matches!(client.handle_next_event(&mut buf).await, Ok(Some(())));
client
.dns_responder
.take()
.expect("test client did not get a channel responder")
.send(&mut std::iter::once(fnetname::DnsServer_ {
address: Some(fidl_socket_addr!("[fe01::2:3]:42")),
source: Some(fnetname::DnsServerSource::Dhcpv6(
fnetname::Dhcpv6DnsServerSource {
source_interface: Some(42),
..fnetname::Dhcpv6DnsServerSource::EMPTY
},
)),
..fnetname::DnsServer_::EMPTY
}))
.expect("failed to send response on test channel");
};
let (watcher_res, ()) = join!(client_proxy.watch_servers(), test_fut);
let servers = watcher_res.expect("failed to watch servers");
assert_eq!(
servers,
vec![fnetname::DnsServer_ {
address: Some(fidl_socket_addr!("[fe01::2:3]:42")),
source: Some(fnetname::DnsServerSource::Dhcpv6(fnetname::Dhcpv6DnsServerSource {
source_interface: Some(42),
..fnetname::Dhcpv6DnsServerSource::EMPTY
},)),
..fnetname::DnsServer_::EMPTY
}]
);
// Drop the channel should cause `handle_next_event(&mut buf)` to return `None`.
drop(client_proxy);
assert_matches!(client.handle_next_event(&mut buf).await, Ok(None));
}
#[fasync::run_singlethreaded(test)]
async fn test_handle_next_event_respects_timer_order() {
let (_client_end, server_end) =
create_endpoints::<ClientMarker>().expect("failed to create test fidl channel");
let client_stream = server_end.into_stream().expect("failed to create test request stream");
let (client_socket, client_addr) = create_test_socket();
let (server_socket, server_addr) = create_test_socket();
let mut client = Client::<fasync::net::UdpSocket>::start(
[1, 2, 3], /* transaction ID */
ClientConfig {
information_config: Some(InformationConfig { ..InformationConfig::EMPTY }),
..ClientConfig::EMPTY
},
1, /* interface ID */
client_socket,
server_addr,
client_stream,
)
.await
.expect("failed to create test client");
let mut buf = vec![0u8; MAX_UDP_DATAGRAM_SIZE];
// A retransmission timer is scheduled when starting the client in stateless mode. Cancel
// it and create a new one with a longer timeout so the test is not flaky.
let () = client
.cancel_timer(dhcpv6_core::client::ClientTimerType::Retransmission)
.expect("failed to cancel timer on test client");
// Discard cancelled retransmission timer.
assert_matches!(client.handle_next_event(&mut buf).await, Ok(Some(())));
let () = client
.schedule_timer(
dhcpv6_core::client::ClientTimerType::Retransmission,
Duration::from_secs(1_000_000),
)
.expect("failed to schedule timer on test client");
assert_eq!(
client.timer_abort_handles.keys().collect::<Vec<_>>(),
vec![&dhcpv6_core::client::ClientTimerType::Retransmission]
);
// Trigger a message receive, the message is later discarded because transaction ID doesn't
// match.
let () = send_reply_with_options(&server_socket, client_addr, [5, 6, 7], &[])
.await
.expect("failed to send test message");
// There are now two pending events, the message receive is handled first because the timer
// is far into the future.
assert_matches!(client.handle_next_event(&mut buf).await, Ok(Some(())));
// The retransmission timer is still here.
assert_eq!(
client.timer_abort_handles.keys().collect::<Vec<_>>(),
vec![&dhcpv6_core::client::ClientTimerType::Retransmission]
);
// Inserts a refresh timer that precedes the retransmission.
let () = client
.schedule_timer(dhcpv6_core::client::ClientTimerType::Refresh, Duration::from_nanos(1))
.expect("scheduling refresh timer on test client");
// This timer is scheduled.
assert_eq!(
client.timer_abort_handles.keys().collect::<HashSet<_>>(),
vec![
&dhcpv6_core::client::ClientTimerType::Retransmission,
&dhcpv6_core::client::ClientTimerType::Refresh
]
.into_iter()
.collect()
);
// Now handle_next_event(&mut buf) should trigger a refresh because it precedes
// retransmission.
assert_matches!(client.handle_next_event(&mut buf).await, Ok(Some(())));
// The refresh timer is consumed.
assert_eq!(
client.timer_abort_handles.keys().collect::<Vec<_>>(),
vec![&dhcpv6_core::client::ClientTimerType::Retransmission]
);
}
#[fasync::run_singlethreaded(test)]
async fn test_handle_next_event_fails_on_recv_err() {
struct StubSocket {}
impl<'a> AsyncSocket<'a> for StubSocket {
type RecvFromFut = futures::future::Ready<Result<(usize, SocketAddr), std::io::Error>>;
type SendToFut = futures::future::Ready<Result<usize, std::io::Error>>;
fn recv_from(&'a self, _buf: &'a mut [u8]) -> Self::RecvFromFut {
futures::future::ready(Err(std::io::Error::new(
std::io::ErrorKind::Other,
"test recv error",
)))
}
fn send_to(&'a self, _buf: &'a [u8], _addr: SocketAddr) -> Self::SendToFut {
futures::future::ready(Ok(0))
}
}
let (_client_end, server_end) =
create_endpoints::<ClientMarker>().expect("failed to create test fidl channel");
let client_stream = server_end.into_stream().expect("failed to create test request stream");
let mut client = Client::<StubSocket>::start(
[1, 2, 3], /* transaction ID */
ClientConfig {
information_config: Some(InformationConfig { ..InformationConfig::EMPTY }),
..ClientConfig::EMPTY
},
1, /* interface ID */
StubSocket {},
std_socket_addr!("[::1]:0"),
client_stream,
)
.await
.expect("failed to create test client");
assert_matches!(
client.handle_next_event(&mut [0u8]).await,
Err(ClientError::SocketRecv(err)) if err.kind() == std::io::ErrorKind::Other
);
}
}
| 41.266165 | 116 | 0.555554 |
11e2cbd4233043e9569cda8c932fbfafcc423e7f | 4,752 | // Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
use std::error::Error as StdError;
use std::io;
use std::str::Utf8Error;
use std::string::FromUtf8Error;
use std::{error, str};
use tipb::expression::ScalarFuncSig;
use tipb::select;
use regex::Error as RegexpError;
use util;
pub const ERR_UNKNOWN: i32 = 1105;
pub const ERR_REGEXP: i32 = 1139;
pub const WARN_DATA_TRUNCATED: i32 = 1265;
pub const ERR_TRUNCATE_WRONG_VALUE: i32 = 1292;
pub const ERR_UNKNOWN_TIMEZONE: i32 = 1298;
pub const ERR_DIVISION_BY_ZERO: i32 = 1365;
pub const ERR_DATA_TOO_LONG: i32 = 1406;
pub const ERR_DATA_OUT_OF_RANGE: i32 = 1690;
quick_error! {
#[derive(Debug)]
pub enum Error {
InvalidDataType(reason: String) {
description("invalid data type")
display("{}", reason)
}
Encoding(err: Utf8Error) {
from()
cause(err)
description("encoding failed")
}
ColumnOffset(offset: usize) {
description("column offset not found")
display("illegal column offset: {}", offset)
}
UnknownSignature(sig: ScalarFuncSig) {
description("Unknown signature")
display("Unknown signature: {:?}", sig)
}
Eval(s: String,code:i32) {
description("evaluation failed")
display("{}", s)
}
Other(err: Box<error::Error + Send + Sync>) {
from()
cause(err.as_ref())
description(err.description())
display("unknown error {:?}", err)
}
}
}
impl Error {
pub fn overflow(data: &str, expr: &str) -> Error {
let msg = format!("{} value is out of range in {:?}", data, expr);
Error::Eval(msg, ERR_DATA_OUT_OF_RANGE)
}
pub fn truncated_wrong_val(data_type: &str, val: &str) -> Error {
let msg = format!("Truncated incorrect {} value: '{}'", data_type, val);
Error::Eval(msg, ERR_TRUNCATE_WRONG_VALUE)
}
pub fn truncated() -> Error {
Error::Eval("Data Truncated".into(), WARN_DATA_TRUNCATED)
}
pub fn cast_neg_int_as_unsigned() -> Error {
let msg = "Cast to unsigned converted negative integer to it's positive complement";
Error::Eval(msg.into(), ERR_UNKNOWN)
}
pub fn cast_as_signed_overflow() -> Error {
let msg =
"Cast to signed converted positive out-of-range integer to it's negative complement";
Error::Eval(msg.into(), ERR_UNKNOWN)
}
pub fn invalid_timezone(given_time_zone: &str) -> Error {
let msg = format!("unknown or incorrect time zone: {}", given_time_zone);
Error::Eval(msg, ERR_UNKNOWN_TIMEZONE)
}
pub fn division_by_zero() -> Error {
let msg = "Division by 0";
Error::Eval(msg.into(), ERR_DIVISION_BY_ZERO)
}
pub fn data_too_long(msg: String) -> Error {
if msg.is_empty() {
Error::Eval("Data Too Long".into(), ERR_DATA_TOO_LONG)
} else {
Error::Eval(msg, ERR_DATA_TOO_LONG)
}
}
pub fn code(&self) -> i32 {
match *self {
Error::Eval(_, code) => code,
_ => ERR_UNKNOWN,
}
}
pub fn is_overflow(&self) -> bool {
self.code() == ERR_DATA_OUT_OF_RANGE
}
pub fn unexpected_eof() -> Error {
util::codec::Error::unexpected_eof().into()
}
}
impl Into<select::Error> for Error {
fn into(self) -> select::Error {
let mut err = select::Error::new();
err.set_code(self.code());
err.set_msg(format!("{:?}", self));
err
}
}
impl From<FromUtf8Error> for Error {
fn from(err: FromUtf8Error) -> Error {
Error::Encoding(err.utf8_error())
}
}
impl From<util::codec::Error> for Error {
fn from(err: util::codec::Error) -> Error {
box_err!("codec:{:?}", err)
}
}
impl From<io::Error> for Error {
fn from(err: io::Error) -> Error {
let uerr: util::codec::Error = err.into();
uerr.into()
}
}
impl From<RegexpError> for Error {
fn from(err: RegexpError) -> Error {
let msg = format!("Got error '{:.64}' from regexp", err.description());
Error::Eval(msg, ERR_REGEXP)
}
}
pub type Result<T> = ::std::result::Result<T, Error>;
| 28.97561 | 97 | 0.599116 |
2601275792715cbbe79d5ecbd8976841967b6b96 | 2,992 | #![allow(unused_imports)]
use super::*;
use wasm_bindgen::prelude::*;
#[wasm_bindgen]
extern "C" {
# [wasm_bindgen (extends = :: js_sys :: Object , js_name = SpeechSynthesisVoice , typescript_type = "SpeechSynthesisVoice")]
#[derive(Debug, Clone, PartialEq, Eq)]
#[doc = "The `SpeechSynthesisVoice` class."]
#[doc = ""]
#[doc = "[MDN Documentation](https://developer.mozilla.org/en-US/docs/Web/API/SpeechSynthesisVoice)"]
#[doc = ""]
#[doc = "*This API requires the following crate features to be activated: `SpeechSynthesisVoice`*"]
pub type SpeechSynthesisVoice;
# [wasm_bindgen (structural , method , getter , js_class = "SpeechSynthesisVoice" , js_name = voiceURI)]
#[doc = "Getter for the `voiceURI` field of this object."]
#[doc = ""]
#[doc = "[MDN Documentation](https://developer.mozilla.org/en-US/docs/Web/API/SpeechSynthesisVoice/voiceURI)"]
#[doc = ""]
#[doc = "*This API requires the following crate features to be activated: `SpeechSynthesisVoice`*"]
pub fn voice_uri(this: &SpeechSynthesisVoice) -> String;
# [wasm_bindgen (structural , method , getter , js_class = "SpeechSynthesisVoice" , js_name = name)]
#[doc = "Getter for the `name` field of this object."]
#[doc = ""]
#[doc = "[MDN Documentation](https://developer.mozilla.org/en-US/docs/Web/API/SpeechSynthesisVoice/name)"]
#[doc = ""]
#[doc = "*This API requires the following crate features to be activated: `SpeechSynthesisVoice`*"]
pub fn name(this: &SpeechSynthesisVoice) -> String;
# [wasm_bindgen (structural , method , getter , js_class = "SpeechSynthesisVoice" , js_name = lang)]
#[doc = "Getter for the `lang` field of this object."]
#[doc = ""]
#[doc = "[MDN Documentation](https://developer.mozilla.org/en-US/docs/Web/API/SpeechSynthesisVoice/lang)"]
#[doc = ""]
#[doc = "*This API requires the following crate features to be activated: `SpeechSynthesisVoice`*"]
pub fn lang(this: &SpeechSynthesisVoice) -> String;
# [wasm_bindgen (structural , method , getter , js_class = "SpeechSynthesisVoice" , js_name = localService)]
#[doc = "Getter for the `localService` field of this object."]
#[doc = ""]
#[doc = "[MDN Documentation](https://developer.mozilla.org/en-US/docs/Web/API/SpeechSynthesisVoice/localService)"]
#[doc = ""]
#[doc = "*This API requires the following crate features to be activated: `SpeechSynthesisVoice`*"]
pub fn local_service(this: &SpeechSynthesisVoice) -> bool;
# [wasm_bindgen (structural , method , getter , js_class = "SpeechSynthesisVoice" , js_name = default)]
#[doc = "Getter for the `default` field of this object."]
#[doc = ""]
#[doc = "[MDN Documentation](https://developer.mozilla.org/en-US/docs/Web/API/SpeechSynthesisVoice/default)"]
#[doc = ""]
#[doc = "*This API requires the following crate features to be activated: `SpeechSynthesisVoice`*"]
pub fn default(this: &SpeechSynthesisVoice) -> bool;
}
| 59.84 | 128 | 0.674465 |
4a78fe187a5be3bd9d605737f6f2b1736e4d7d9c | 19,484 | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
use crate::reason::{self, Reason};
use eq_modulo_pos::{EqModuloPos, EqModuloPosAndReason};
use hcons::Hc;
use oxidized::{aast, ast_defs};
use pos::{Bytes, ModuleName, Positioned, Symbol, TypeConstName, TypeName};
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use std::collections::BTreeMap;
use std::fmt;
use utils::core::Ident;
pub use oxidized::{
aast_defs::{ReifyKind, Tprim as Prim},
ast_defs::{Abstraction, ClassishKind, ConstraintKind, Visibility},
typing_defs::ClassConstKind,
typing_defs_core::{ConsistentKind, Enforcement, Exact, ParamMode, ShapeKind},
typing_defs_flags::{self, ClassEltFlags, ClassEltFlagsArgs, FunParamFlags, FunTypeFlags},
xhp_attribute::{Tag, XhpAttribute},
};
// c.f. ast_defs::XhpEnumValue
#[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)]
pub enum XhpEnumValue {
XEVInt(isize),
XEVString(Symbol),
}
#[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)]
pub enum CeVisibility {
Public,
Private(TypeName),
Protected(TypeName),
Internal(ModuleName),
}
#[derive(
Clone,
Debug,
Eq,
EqModuloPos,
EqModuloPosAndReason,
Hash,
PartialEq,
Serialize,
Deserialize
)]
pub enum IfcFunDecl {
FDPolicied(Option<Symbol>),
FDInferFlows,
}
// The OCaml type `tshape_field_name` includes positions, but ignores those
// positions in its `ord` implementation. We can't do the same, though: Rust
// hash tables require impls of Hash and Eq to agree, and our Hash impl must
// take positions into account (else hash-consing will produce bad results). We
// could write a custom Ord impl which disagrees with the Eq impl, but it would
// violate the [PartialOrd requirement][] that `a == b` if and only if
// `partial_cmp(a, b) == Some(Equal)`, and the [Ord requirement][] for a strict
// total order.
//
// [PartialOrd requirement]: https://doc.rust-lang.org/std/cmp/trait.PartialOrd.html
// [Ord requirement]: https://doc.rust-lang.org/std/cmp/trait.Ord.html#corollaries
//
// Instead, we omit the positions from these keys, and store the field name's
// position as part of the map's value (in a `ShapeFieldNamePos`).
#[derive(
Copy,
Clone,
Debug,
Eq,
EqModuloPos,
EqModuloPosAndReason,
Hash,
Ord,
PartialEq,
PartialOrd
)]
#[derive(Serialize, Deserialize)]
pub enum TshapeFieldName {
TSFlitInt(Symbol),
TSFlitStr(Bytes),
TSFclassConst(TypeName, Symbol),
}
walkable!(TshapeFieldName);
/// The position of a shape field name; e.g., the position of `'a'` in
/// `shape('a' => int)`, or the positions of `Foo` and `X` in
/// `shape(Foo::X => int)`.
#[derive(
Clone,
Debug,
Eq,
EqModuloPos,
EqModuloPosAndReason,
Hash,
Ord,
PartialEq,
PartialOrd
)]
#[derive(Serialize, Deserialize)]
pub enum ShapeFieldNamePos<P> {
Simple(P),
ClassConst(P, P),
}
#[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)]
pub enum DependentType {
Texpr(Ident),
}
#[derive(
Clone,
Debug,
Eq,
EqModuloPos,
EqModuloPosAndReason,
Hash,
PartialEq,
Serialize,
Deserialize
)]
pub struct UserAttribute<P> {
pub name: Positioned<TypeName, P>,
pub classname_params: Box<[TypeName]>,
}
#[derive(
Clone,
Debug,
Eq,
EqModuloPos,
EqModuloPosAndReason,
Hash,
PartialEq,
Serialize,
Deserialize
)]
#[serde(bound = "R: Reason, TY: Serialize + DeserializeOwned")]
pub struct Tparam<R: Reason, TY> {
pub variance: ast_defs::Variance,
pub name: Positioned<TypeName, R::Pos>,
pub tparams: Box<[Tparam<R, TY>]>,
pub constraints: Box<[(ConstraintKind, TY)]>,
pub reified: ReifyKind,
pub user_attributes: Box<[UserAttribute<R::Pos>]>,
}
walkable!(impl<R: Reason, TY> for Tparam<R, TY> => [tparams, constraints]);
#[derive(
Clone,
Debug,
Eq,
EqModuloPos,
EqModuloPosAndReason,
Hash,
PartialEq,
Serialize,
Deserialize
)]
pub struct WhereConstraint<TY>(pub TY, pub ast_defs::ConstraintKind, pub TY);
walkable!(impl<R: Reason, TY> for WhereConstraint<TY> => [0, 1, 2]);
#[derive(
Clone,
Eq,
EqModuloPos,
EqModuloPosAndReason,
Hash,
PartialEq,
Serialize,
Deserialize
)]
#[serde(bound = "R: Reason")]
pub struct Ty<R: Reason>(R, Hc<Ty_<R>>);
walkable!(Ty<R> as visit_decl_ty => [0, 1]);
impl<R: Reason> Ty<R> {
#[inline]
pub fn new(reason: R, ty: Ty_<R>) -> Self {
Self(reason, Hc::new(ty))
}
pub fn prim(r: R, prim: Prim) -> Self {
Self::new(r, Ty_::Tprim(prim))
}
pub fn void(r: R) -> Self {
Self::prim(r, Prim::Tvoid)
}
pub fn any(r: R) -> Self {
Self::new(r, Ty_::Tany)
}
pub fn this(r: R) -> Self {
Self::new(r, Ty_::Tthis)
}
pub fn apply(
reason: R,
type_name: Positioned<TypeName, R::Pos>,
tparams: Box<[Ty<R>]>,
) -> Self {
Self::new(reason, Ty_::Tapply(Box::new((type_name, tparams))))
}
pub fn generic(reason: R, name: TypeName, tparams: Box<[Ty<R>]>) -> Self {
Self::new(reason, Ty_::Tgeneric(Box::new((name, tparams))))
}
#[inline]
pub fn access(reason: R, taccess: TaccessType<R, Ty<R>>) -> Self {
Self::new(reason, Ty_::Taccess(Box::new(taccess)))
}
pub fn pos(&self) -> &R::Pos {
self.0.pos()
}
pub fn reason(&self) -> &R {
&self.0
}
pub fn node(&self) -> &Hc<Ty_<R>> {
&self.1
}
pub fn node_ref(&self) -> &Ty_<R> {
&self.1
}
pub fn unwrap_class_type(&self) -> (&R, Positioned<TypeName, R::Pos>, &[Ty<R>]) {
use Ty_::*;
let r = self.reason();
match &**self.node() {
Tapply(id_and_args) => {
let (pos_id, args) = &**id_and_args;
(r, pos_id.clone(), args)
}
_ => (r, Positioned::new(r.pos().clone(), TypeName::from("")), &[]),
}
}
}
/// A shape may specify whether or not fields are required. For example, consider
/// this typedef:
///
/// ```
/// type ShapeWithOptionalField = shape(?'a' => ?int);
/// ```
///
/// With this definition, the field 'a' may be unprovided in a shape. In this
/// case, the field 'a' would have sf_optional set to true.
#[derive(
Clone,
Debug,
Eq,
EqModuloPos,
EqModuloPosAndReason,
Hash,
PartialEq,
Serialize,
Deserialize
)]
#[serde(bound = "R: Reason")]
pub struct ShapeFieldType<R: Reason> {
pub field_name_pos: ShapeFieldNamePos<R::Pos>,
pub optional: bool,
pub ty: Ty<R>,
}
walkable!(ShapeFieldType<R> => [ty]);
#[derive(
Clone,
Debug,
Eq,
EqModuloPos,
EqModuloPosAndReason,
Hash,
PartialEq,
Serialize,
Deserialize
)]
#[serde(bound = "R: Reason")]
pub enum Ty_<R: Reason> {
/// The late static bound type of a class
Tthis,
/// Either an object type or a type alias, ty list are the arguments
Tapply(Box<(Positioned<TypeName, R::Pos>, Box<[Ty<R>]>)>),
/// "Any" is the type of a variable with a missing annotation, and "mixed" is
/// the type of a variable annotated as "mixed". THESE TWO ARE VERY DIFFERENT!
/// Any unifies with anything, i.e., it is both a supertype and subtype of any
/// other type. You can do literally anything to it; it's the "trust me" type.
/// Mixed, on the other hand, is only a supertype of everything. You need to do
/// a case analysis to figure out what it is (i.e., its elimination form).
///
/// Here's an example to demonstrate:
///
/// ```
/// function f($x): int {
/// return $x + 1;
/// }
/// ```
///
/// In that example, $x has type Tany. This unifies with anything, so adding
/// one to it is allowed, and returning that as int is allowed.
///
/// In contrast, if $x were annotated as mixed, adding one to that would be
/// a type error -- mixed is not a subtype of int, and you must be a subtype
/// of int to take part in addition. (The converse is true though -- int is a
/// subtype of mixed.) A case analysis would need to be done on $x, via
/// is_int or similar.
///
/// mixed exists only in the decl_phase phase because it is desugared into ?nonnull
/// during the localization phase.
Tmixed,
Tlike(Ty<R>),
Tany,
Terr,
Tnonnull,
/// A dynamic type is a special type which sometimes behaves as if it were a
/// top type; roughly speaking, where a specific value of a particular type is
/// expected and that type is dynamic, anything can be given. We call this
/// behaviour "coercion", in that the types "coerce" to dynamic. In other ways it
/// behaves like a bottom type; it can be used in any sort of binary expression
/// or even have object methods called from it. However, it is in fact neither.
///
/// it captures dynamicism within function scope.
/// See tests in typecheck/dynamic/ for more examples.
Tdynamic,
/// Nullable, called "option" in the ML parlance.
Toption(Ty<R>),
/// All the primitive types: int, string, void, etc.
Tprim(aast::Tprim),
/// A wrapper around fun_type, which contains the full type information for a
/// function, method, lambda, etc.
Tfun(Box<FunType<R, Ty<R>>>),
/// Tuple, with ordered list of the types of the elements of the tuple.
Ttuple(Box<[Ty<R>]>),
/// Whether all fields of this shape are known, types of each of the
/// known arms.
Tshape(Box<(ShapeKind, BTreeMap<TshapeFieldName, ShapeFieldType<R>>)>),
Tvar(Ident),
/// The type of a generic parameter. The constraints on a generic parameter
/// are accessed through the lenv.tpenv component of the environment, which
/// is set up when checking the body of a function or method. See uses of
/// Typing_phase.add_generic_parameters_and_constraints. The list denotes
/// type arguments.
Tgeneric(Box<(TypeName, Box<[Ty<R>]>)>),
/// Union type.
/// The values that are members of this type are the union of the values
/// that are members of the components of the union.
/// Some examples (writing | for binary union)
/// Tunion [] is the "nothing" type, with no values
/// Tunion [int;float] is the same as num
/// Tunion [null;t] is the same as Toption t
Tunion(Box<[Ty<R>]>),
Tintersection(Box<[Ty<R>]>),
/// Tvec_or_dict (ty1, ty2) => "vec_or_dict<ty1, ty2>"
TvecOrDict(Box<(Ty<R>, Ty<R>)>),
Taccess(Box<TaccessType<R, Ty<R>>>),
}
// We've boxed all variants of Ty_ which are larger than two usizes, so the
// total size should be equal to `[usize; 3]` (one more for the discriminant).
// This is important because all variants use the same amount of memory and are
// passed around by value, so adding a large unboxed variant can cause a large
// regression.
static_assertions::assert_eq_size!(Ty_<reason::NReason>, [usize; 3]);
static_assertions::assert_eq_size!(Ty_<reason::BReason>, [usize; 3]);
impl<R: Reason> hcons::Consable for Ty_<R> {
#[inline]
fn conser() -> &'static hcons::Conser<Ty_<R>> {
R::decl_ty_conser()
}
}
impl<R: Reason> crate::visitor::Walkable<R> for Ty_<R> {
fn recurse(&self, v: &mut dyn crate::visitor::Visitor<R>) {
use Ty_::*;
match self {
Tthis | Tmixed | Tany | Terr | Tnonnull | Tdynamic | Tprim(_) | Tvar(_) => {}
Tapply(id_and_args) => {
let (_, args) = &**id_and_args;
args.accept(v)
}
Tlike(ty) | Toption(ty) => ty.accept(v),
Tfun(ft) => ft.accept(v),
Ttuple(tys) | Tunion(tys) | Tintersection(tys) => tys.accept(v),
Tshape(kind_and_fields) => {
let (_, fields) = &**kind_and_fields;
fields.accept(v)
}
Tgeneric(id_and_args) => {
let (_, args) = &**id_and_args;
args.accept(v)
}
TvecOrDict(key_and_val_tys) => {
let (kty, vty) = &**key_and_val_tys;
kty.accept(v);
vty.accept(v)
}
Taccess(tt) => tt.accept(v),
}
}
}
/// A Type const access expression of the form <type expr>::C.
#[derive(
Clone,
Debug,
Eq,
EqModuloPos,
EqModuloPosAndReason,
Hash,
PartialEq,
Serialize,
Deserialize
)]
#[serde(bound = "R: Reason, TY: Serialize + DeserializeOwned")]
pub struct TaccessType<R: Reason, TY> {
/// Type expression to the left of `::`
pub ty: TY,
/// Name of type const to the right of `::`
pub type_const: Positioned<TypeConstName, R::Pos>,
}
walkable!(impl<R: Reason, TY> for TaccessType<R, TY> => [ty]);
#[derive(
Clone,
Debug,
Eq,
EqModuloPos,
EqModuloPosAndReason,
Hash,
PartialEq,
Serialize,
Deserialize
)]
#[serde(bound = "R: Reason, TY: Serialize + DeserializeOwned")]
pub enum Capability<R: Reason, TY> {
CapDefaults(R::Pos),
CapTy(TY),
}
walkable!(impl<R: Reason, TY> for Capability<R, TY> => {
Self::CapDefaults(..) => [],
Self::CapTy(ty) => [ty],
});
/// Companion to fun_params type, intended to consolidate checking of
/// implicit params for functions.
#[derive(
Clone,
Debug,
Eq,
EqModuloPos,
EqModuloPosAndReason,
Hash,
PartialEq,
Serialize,
Deserialize
)]
#[serde(bound = "R: Reason, TY: Serialize + DeserializeOwned")]
pub struct FunImplicitParams<R: Reason, TY> {
pub capability: Capability<R, TY>,
}
walkable!(impl<R: Reason, TY> for FunImplicitParams<R, TY> => [capability]);
/// The type of a function AND a method.
#[derive(
Clone,
Debug,
Eq,
EqModuloPos,
EqModuloPosAndReason,
Hash,
PartialEq,
Serialize,
Deserialize
)]
#[serde(bound = "R: Reason, TY: Serialize + DeserializeOwned")]
pub struct FunType<R: Reason, TY> {
pub tparams: Box<[Tparam<R, TY>]>,
pub where_constraints: Box<[WhereConstraint<TY>]>,
pub params: FunParams<R, TY>,
pub implicit_params: FunImplicitParams<R, TY>,
/// Carries through the sync/async information from the aast
pub ret: PossiblyEnforcedTy<TY>,
pub flags: typing_defs_flags::FunTypeFlags,
pub ifc_decl: IfcFunDecl,
}
walkable!(impl<R: Reason, TY> for FunType<R, TY> => [
tparams, where_constraints, params, implicit_params, ret
]);
#[derive(
Clone,
Debug,
Eq,
EqModuloPos,
EqModuloPosAndReason,
Hash,
PartialEq,
Serialize,
Deserialize
)]
#[serde(bound = "TY: Serialize + DeserializeOwned")]
pub struct PossiblyEnforcedTy<TY> {
/// True if consumer of this type enforces it at runtime
pub enforced: Enforcement,
pub ty: TY,
}
walkable!(impl<R: Reason, TY> for PossiblyEnforcedTy<TY> => [ty]);
#[derive(
Clone,
Debug,
Eq,
EqModuloPos,
EqModuloPosAndReason,
Hash,
PartialEq,
Serialize,
Deserialize
)]
#[serde(bound = "R: Reason, TY: Serialize + DeserializeOwned")]
pub struct FunParam<R: Reason, TY> {
pub pos: R::Pos,
pub name: Option<Symbol>,
pub ty: PossiblyEnforcedTy<TY>,
pub flags: FunParamFlags,
}
walkable!(impl<R: Reason, TY> for FunParam<R, TY> => [ty]);
pub type FunParams<R, TY> = Box<[FunParam<R, TY>]>;
/// Origin of Class Constant References:
/// In order to be able to detect cycle definitions like
/// class C {
/// const int A = D::A;
/// }
/// class D {
/// const int A = C::A;
/// }
/// we need to remember which constants were used during initialization.
///
/// Currently the syntax of constants allows direct references to another class
/// like D::A, or self references using self::A.
///
/// class_const_from encodes the origin (class vs self).
#[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)]
pub enum ClassConstFrom {
Self_,
From(TypeName),
}
/// Class Constant References:
/// In order to be able to detect cycle definitions like
/// class C {
/// const int A = D::A;
/// }
/// class D {
/// const int A = C::A;
/// }
/// we need to remember which constants were used during initialization.
///
/// Currently the syntax of constants allows direct references to another class
/// like D::A, or self references using self::A.
#[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)]
pub struct ClassConstRef(pub ClassConstFrom, pub Symbol);
#[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)]
#[serde(bound = "R: Reason")]
pub struct ConstDecl<R: Reason> {
pub pos: R::Pos,
pub ty: Ty<R>,
}
walkable!(ConstDecl<R> => [ty]);
#[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)]
#[serde(bound = "R: Reason")]
pub struct FunElt<R: Reason> {
pub deprecated: Option<Bytes>,
pub module: Option<Positioned<ModuleName, R::Pos>>,
/// Top-level functions have limited visibilities
pub internal: bool,
pub ty: Ty<R>,
pub pos: R::Pos,
pub php_std_lib: bool,
pub support_dynamic_type: bool,
}
walkable!(FunElt<R> => [ty]);
#[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)]
#[serde(bound = "R: Reason")]
pub struct AbstractTypeconst<R: Reason> {
pub as_constraint: Option<Ty<R>>,
pub super_constraint: Option<Ty<R>>,
pub default: Option<Ty<R>>,
}
walkable!(AbstractTypeconst<R> => [as_constraint, super_constraint, default]);
#[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)]
#[serde(bound = "R: Reason")]
pub struct ConcreteTypeconst<R: Reason> {
pub ty: Ty<R>,
}
walkable!(ConcreteTypeconst<R> => [ty]);
#[derive(Clone, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)]
#[serde(bound = "R: Reason")]
pub enum Typeconst<R: Reason> {
TCAbstract(AbstractTypeconst<R>),
TCConcrete(ConcreteTypeconst<R>),
}
walkable!(Typeconst<R> => {
Self::TCAbstract(x) => [x],
Self::TCConcrete(x) => [x],
});
impl<R: Reason> fmt::Debug for Typeconst<R> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::TCAbstract(x) => x.fmt(f),
Self::TCConcrete(x) => x.fmt(f),
}
}
}
#[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)]
#[serde(bound = "R: Reason")]
pub struct EnumType<R: Reason> {
pub base: Ty<R>,
pub constraint: Option<Ty<R>>,
pub includes: Box<[Ty<R>]>,
}
walkable!(EnumType<R> => [base, constraint, includes]);
#[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)]
#[serde(bound = "R: Reason")]
pub struct TypedefType<R: Reason> {
pub module: Option<Positioned<ModuleName, R::Pos>>,
pub pos: R::Pos,
pub vis: aast::TypedefVisibility,
pub tparams: Box<[Tparam<R, Ty<R>>]>,
pub constraint: Option<Ty<R>>,
pub ty: Ty<R>,
pub is_ctx: bool,
pub attributes: Box<[UserAttribute<R::Pos>]>,
pub internal: bool,
}
walkable!(TypedefType<R> => [tparams, constraint, ty]);
walkable!(ast_defs::ConstraintKind);
#[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)]
pub struct ModuleDefType<R: Reason> {
pub pos: R::Pos,
}
walkable!(ModuleDefType<R> => []);
| 28.402332 | 93 | 0.630979 |
293fcff3fb9b7c887ad1cb7eb492b4c60d3bc9b7 | 730 | // This resembles the OpenCV read image example code:
// http://docs.opencv.org/3.1.0/db/deb/tutorial_display_image.html
extern crate cv;
use cv::highgui::*;
use cv::imgcodecs::ImageReadMode;
use cv::*;
fn main() {
let args: Vec<_> = std::env::args().collect();
if args.len() != 2 {
println!("Usage: display_image ImageToLoadAndDisplay");
std::process::exit(-1);
}
let mat = Mat::from_path(&args[1], ImageReadMode::Color).expect("Failed to read from path");
if !mat.is_valid() {
println!("Could not open or find the image");
std::process::exit(-1);
}
highgui_named_window("Display window", WindowFlag::Normal).unwrap();
mat.show("Display window", 0).unwrap();
}
| 29.2 | 96 | 0.636986 |
bb91976397892d1e5216b2b32dc92fe10e1f1818 | 11,835 | //! Safe Rust wrappers for types defined in the Python `datetime` library
//!
//! For more details about these types, see the [Python
//! documentation](https://docs.python.org/3/library/datetime.html)
#![allow(clippy::too_many_arguments)]
use crate::err::PyResult;
use crate::ffi;
#[cfg(PyPy)]
use crate::ffi::datetime::{PyDateTime_FromTimestamp, PyDate_FromTimestamp};
use crate::ffi::PyDateTimeAPI;
use crate::ffi::{PyDateTime_Check, PyDate_Check, PyDelta_Check, PyTZInfo_Check, PyTime_Check};
#[cfg(not(PyPy))]
use crate::ffi::{PyDateTime_DATE_GET_FOLD, PyDateTime_TIME_GET_FOLD};
use crate::ffi::{
PyDateTime_DATE_GET_HOUR, PyDateTime_DATE_GET_MICROSECOND, PyDateTime_DATE_GET_MINUTE,
PyDateTime_DATE_GET_SECOND,
};
use crate::ffi::{
PyDateTime_DELTA_GET_DAYS, PyDateTime_DELTA_GET_MICROSECONDS, PyDateTime_DELTA_GET_SECONDS,
};
use crate::ffi::{PyDateTime_GET_DAY, PyDateTime_GET_MONTH, PyDateTime_GET_YEAR};
use crate::ffi::{
PyDateTime_TIME_GET_HOUR, PyDateTime_TIME_GET_MICROSECOND, PyDateTime_TIME_GET_MINUTE,
PyDateTime_TIME_GET_SECOND,
};
use crate::types::PyTuple;
use crate::{AsPyPointer, PyAny, PyObject, Python, ToPyObject};
use std::os::raw::c_int;
#[cfg(not(PyPy))]
use std::ptr;
/// Access traits
/// Trait for accessing the date components of a struct containing a date.
pub trait PyDateAccess {
fn get_year(&self) -> i32;
fn get_month(&self) -> u8;
fn get_day(&self) -> u8;
}
/// Trait for accessing the components of a struct containing a timedelta.
///
/// Note: These access the individual components of a (day, second,
/// microsecond) representation of the delta, they are *not* intended as
/// aliases for calculating the total duration in each of these units.
pub trait PyDeltaAccess {
fn get_days(&self) -> i32;
fn get_seconds(&self) -> i32;
fn get_microseconds(&self) -> i32;
}
/// Trait for accessing the time components of a struct containing a time.
pub trait PyTimeAccess {
fn get_hour(&self) -> u8;
fn get_minute(&self) -> u8;
fn get_second(&self) -> u8;
fn get_microsecond(&self) -> u32;
#[cfg(not(PyPy))]
fn get_fold(&self) -> bool;
}
/// Bindings around `datetime.date`
#[repr(transparent)]
pub struct PyDate(PyAny);
pyobject_native_type!(
PyDate,
crate::ffi::PyDateTime_Date,
*PyDateTimeAPI.DateType,
Some("datetime"),
PyDate_Check
);
impl PyDate {
pub fn new(py: Python, year: i32, month: u8, day: u8) -> PyResult<&PyDate> {
unsafe {
let ptr = (PyDateTimeAPI.Date_FromDate)(
year,
c_int::from(month),
c_int::from(day),
PyDateTimeAPI.DateType,
);
py.from_owned_ptr_or_err(ptr)
}
}
/// Construct a `datetime.date` from a POSIX timestamp
///
/// This is equivalent to `datetime.date.fromtimestamp`
pub fn from_timestamp(py: Python, timestamp: i64) -> PyResult<&PyDate> {
let time_tuple = PyTuple::new(py, &[timestamp]);
unsafe {
#[cfg(PyPy)]
let ptr = PyDate_FromTimestamp(time_tuple.as_ptr());
#[cfg(not(PyPy))]
let ptr =
(PyDateTimeAPI.Date_FromTimestamp)(PyDateTimeAPI.DateType, time_tuple.as_ptr());
py.from_owned_ptr_or_err(ptr)
}
}
}
impl PyDateAccess for PyDate {
fn get_year(&self) -> i32 {
unsafe { PyDateTime_GET_YEAR(self.as_ptr()) as i32 }
}
fn get_month(&self) -> u8 {
unsafe { PyDateTime_GET_MONTH(self.as_ptr()) as u8 }
}
fn get_day(&self) -> u8 {
unsafe { PyDateTime_GET_DAY(self.as_ptr()) as u8 }
}
}
/// Bindings for `datetime.datetime`
#[repr(transparent)]
pub struct PyDateTime(PyAny);
pyobject_native_type!(
PyDateTime,
crate::ffi::PyDateTime_DateTime,
*PyDateTimeAPI.DateTimeType,
Some("datetime"),
PyDateTime_Check
);
impl PyDateTime {
pub fn new<'p>(
py: Python<'p>,
year: i32,
month: u8,
day: u8,
hour: u8,
minute: u8,
second: u8,
microsecond: u32,
tzinfo: Option<&PyObject>,
) -> PyResult<&'p PyDateTime> {
unsafe {
let ptr = (PyDateTimeAPI.DateTime_FromDateAndTime)(
year,
c_int::from(month),
c_int::from(day),
c_int::from(hour),
c_int::from(minute),
c_int::from(second),
microsecond as c_int,
opt_to_pyobj(py, tzinfo),
PyDateTimeAPI.DateTimeType,
);
py.from_owned_ptr_or_err(ptr)
}
}
#[cfg(not(PyPy))]
/// Alternate constructor that takes a `fold` parameter. A `true` value for this parameter
/// signifies a leap second
pub fn new_with_fold<'p>(
py: Python<'p>,
year: i32,
month: u8,
day: u8,
hour: u8,
minute: u8,
second: u8,
microsecond: u32,
tzinfo: Option<&PyObject>,
fold: bool,
) -> PyResult<&'p PyDateTime> {
unsafe {
let ptr = (PyDateTimeAPI.DateTime_FromDateAndTimeAndFold)(
year,
c_int::from(month),
c_int::from(day),
c_int::from(hour),
c_int::from(minute),
c_int::from(second),
microsecond as c_int,
opt_to_pyobj(py, tzinfo),
c_int::from(fold),
PyDateTimeAPI.DateTimeType,
);
py.from_owned_ptr_or_err(ptr)
}
}
/// Construct a `datetime` object from a POSIX timestamp
///
/// This is equivalent to `datetime.datetime.from_timestamp`
pub fn from_timestamp<'p>(
py: Python<'p>,
timestamp: f64,
time_zone_info: Option<&PyTzInfo>,
) -> PyResult<&'p PyDateTime> {
let timestamp: PyObject = timestamp.to_object(py);
let time_zone_info: PyObject = match time_zone_info {
Some(time_zone_info) => time_zone_info.to_object(py),
None => py.None(),
};
let args = PyTuple::new(py, &[timestamp, time_zone_info]);
unsafe {
#[cfg(PyPy)]
let ptr = PyDateTime_FromTimestamp(args.as_ptr());
#[cfg(not(PyPy))]
let ptr = {
(PyDateTimeAPI.DateTime_FromTimestamp)(
PyDateTimeAPI.DateTimeType,
args.as_ptr(),
ptr::null_mut(),
)
};
py.from_owned_ptr_or_err(ptr)
}
}
}
impl PyDateAccess for PyDateTime {
fn get_year(&self) -> i32 {
unsafe { PyDateTime_GET_YEAR(self.as_ptr()) as i32 }
}
fn get_month(&self) -> u8 {
unsafe { PyDateTime_GET_MONTH(self.as_ptr()) as u8 }
}
fn get_day(&self) -> u8 {
unsafe { PyDateTime_GET_DAY(self.as_ptr()) as u8 }
}
}
impl PyTimeAccess for PyDateTime {
fn get_hour(&self) -> u8 {
unsafe { PyDateTime_DATE_GET_HOUR(self.as_ptr()) as u8 }
}
fn get_minute(&self) -> u8 {
unsafe { PyDateTime_DATE_GET_MINUTE(self.as_ptr()) as u8 }
}
fn get_second(&self) -> u8 {
unsafe { PyDateTime_DATE_GET_SECOND(self.as_ptr()) as u8 }
}
fn get_microsecond(&self) -> u32 {
unsafe { PyDateTime_DATE_GET_MICROSECOND(self.as_ptr()) as u32 }
}
#[cfg(not(PyPy))]
fn get_fold(&self) -> bool {
unsafe { PyDateTime_DATE_GET_FOLD(self.as_ptr()) > 0 }
}
}
/// Bindings for `datetime.time`
#[repr(transparent)]
pub struct PyTime(PyAny);
pyobject_native_type!(
PyTime,
crate::ffi::PyDateTime_Time,
*PyDateTimeAPI.TimeType,
Some("datetime"),
PyTime_Check
);
impl PyTime {
pub fn new<'p>(
py: Python<'p>,
hour: u8,
minute: u8,
second: u8,
microsecond: u32,
tzinfo: Option<&PyObject>,
) -> PyResult<&'p PyTime> {
unsafe {
let ptr = (PyDateTimeAPI.Time_FromTime)(
c_int::from(hour),
c_int::from(minute),
c_int::from(second),
microsecond as c_int,
opt_to_pyobj(py, tzinfo),
PyDateTimeAPI.TimeType,
);
py.from_owned_ptr_or_err(ptr)
}
}
#[cfg(not(PyPy))]
/// Alternate constructor that takes a `fold` argument
pub fn new_with_fold<'p>(
py: Python<'p>,
hour: u8,
minute: u8,
second: u8,
microsecond: u32,
tzinfo: Option<&PyObject>,
fold: bool,
) -> PyResult<&'p PyTime> {
unsafe {
let ptr = (PyDateTimeAPI.Time_FromTimeAndFold)(
c_int::from(hour),
c_int::from(minute),
c_int::from(second),
microsecond as c_int,
opt_to_pyobj(py, tzinfo),
fold as c_int,
PyDateTimeAPI.TimeType,
);
py.from_owned_ptr_or_err(ptr)
}
}
}
impl PyTimeAccess for PyTime {
fn get_hour(&self) -> u8 {
unsafe { PyDateTime_TIME_GET_HOUR(self.as_ptr()) as u8 }
}
fn get_minute(&self) -> u8 {
unsafe { PyDateTime_TIME_GET_MINUTE(self.as_ptr()) as u8 }
}
fn get_second(&self) -> u8 {
unsafe { PyDateTime_TIME_GET_SECOND(self.as_ptr()) as u8 }
}
fn get_microsecond(&self) -> u32 {
unsafe { PyDateTime_TIME_GET_MICROSECOND(self.as_ptr()) as u32 }
}
#[cfg(not(PyPy))]
fn get_fold(&self) -> bool {
unsafe { PyDateTime_TIME_GET_FOLD(self.as_ptr()) != 0 }
}
}
/// Bindings for `datetime.tzinfo`
///
/// This is an abstract base class and should not be constructed directly.
#[repr(transparent)]
pub struct PyTzInfo(PyAny);
pyobject_native_type!(
PyTzInfo,
crate::ffi::PyObject,
*PyDateTimeAPI.TZInfoType,
Some("datetime"),
PyTZInfo_Check
);
/// Bindings for `datetime.timedelta`
#[repr(transparent)]
pub struct PyDelta(PyAny);
pyobject_native_type!(
PyDelta,
crate::ffi::PyDateTime_Delta,
*PyDateTimeAPI.DeltaType,
Some("datetime"),
PyDelta_Check
);
impl PyDelta {
pub fn new(
py: Python,
days: i32,
seconds: i32,
microseconds: i32,
normalize: bool,
) -> PyResult<&PyDelta> {
unsafe {
let ptr = (PyDateTimeAPI.Delta_FromDelta)(
days as c_int,
seconds as c_int,
microseconds as c_int,
normalize as c_int,
PyDateTimeAPI.DeltaType,
);
py.from_owned_ptr_or_err(ptr)
}
}
}
impl PyDeltaAccess for PyDelta {
fn get_days(&self) -> i32 {
unsafe { PyDateTime_DELTA_GET_DAYS(self.as_ptr()) as i32 }
}
fn get_seconds(&self) -> i32 {
unsafe { PyDateTime_DELTA_GET_SECONDS(self.as_ptr()) as i32 }
}
fn get_microseconds(&self) -> i32 {
unsafe { PyDateTime_DELTA_GET_MICROSECONDS(self.as_ptr()) as i32 }
}
}
// Utility function
fn opt_to_pyobj(py: Python, opt: Option<&PyObject>) -> *mut ffi::PyObject {
// Convenience function for unpacking Options to either an Object or None
match opt {
Some(tzi) => tzi.as_ptr(),
None => py.None().as_ptr(),
}
}
#[cfg(test)]
mod tests {
#[test]
fn test_new_with_fold() {
pyo3::Python::with_gil(|py| {
use pyo3::types::{PyDateTime, PyTimeAccess};
let a = PyDateTime::new_with_fold(py, 2021, 1, 23, 20, 32, 40, 341516, None, false);
let b = PyDateTime::new_with_fold(py, 2021, 1, 23, 20, 32, 40, 341516, None, true);
assert_eq!(a.unwrap().get_fold(), false);
assert_eq!(b.unwrap().get_fold(), true);
});
}
}
| 27.587413 | 96 | 0.582594 |
e9e5d5f97e50ef5dbbb45130d5ddf32c574604c6 | 1,753 | use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*;
use ::Operand::*;
use ::Reg::*;
use ::RegScale::*;
use ::test::run_test;
#[test]
fn vlddqu_1() {
run_test(&Instruction { mnemonic: Mnemonic::VLDDQU, operand1: Some(Direct(XMM1)), operand2: Some(IndirectScaledIndexed(EDX, EBX, Two, Some(OperandSize::Xmmword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[197, 251, 240, 12, 90], OperandSize::Dword)
}
#[test]
fn vlddqu_2() {
run_test(&Instruction { mnemonic: Mnemonic::VLDDQU, operand1: Some(Direct(XMM5)), operand2: Some(IndirectScaledDisplaced(RDX, Two, 267189977, Some(OperandSize::Xmmword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[197, 251, 240, 44, 85, 217, 254, 236, 15], OperandSize::Qword)
}
#[test]
fn vlddqu_3() {
run_test(&Instruction { mnemonic: Mnemonic::VLDDQU, operand1: Some(Direct(YMM5)), operand2: Some(IndirectScaledIndexed(EBX, EDI, Eight, Some(OperandSize::Ymmword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[197, 255, 240, 44, 251], OperandSize::Dword)
}
#[test]
fn vlddqu_4() {
run_test(&Instruction { mnemonic: Mnemonic::VLDDQU, operand1: Some(Direct(YMM4)), operand2: Some(IndirectDisplaced(RDX, 961091475, Some(OperandSize::Ymmword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[197, 255, 240, 162, 147, 23, 73, 57], OperandSize::Qword)
}
| 60.448276 | 373 | 0.709641 |
1116b29057d9596f21ab7ab01971436807d6ec07 | 26,778 | // #[allow(deprecated)] doesn't silence warnings on the method invocations,
// which would call the inherent methods if AsciiExt wasn't in scope.
#![cfg_attr(feature = "std", allow(deprecated))]
use core::{fmt, mem};
use core::ops::{Index, IndexMut, Range, RangeTo, RangeFrom, RangeFull};
use core::slice::{Iter, IterMut};
#[cfg(feature = "std")]
use std::error::Error;
#[cfg(feature = "std")]
use std::ascii::AsciiExt;
use ascii_char::AsciiChar;
#[cfg(feature = "std")]
use ascii_string::AsciiString;
/// AsciiStr represents a byte or string slice that only contains ASCII characters.
///
/// It wraps an `[AsciiChar]` and implements many of `str`s methods and traits.
///
/// It can be created by a checked conversion from a `str` or `[u8]`, or borrowed from an
/// `AsciiString`.
#[derive(PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct AsciiStr {
slice: [AsciiChar],
}
impl AsciiStr {
/// Coerces into an `AsciiStr` slice.
pub fn new<S: AsRef<AsciiStr> + ?Sized>(s: &S) -> &AsciiStr {
s.as_ref()
}
/// Converts `&self` to a `&str` slice.
#[inline]
pub fn as_str(&self) -> &str {
let ptr = self as *const AsciiStr as *const str;
unsafe { &*ptr }
}
/// Converts `&self` into a byte slice.
#[inline]
pub fn as_bytes(&self) -> &[u8] {
let ptr = self as *const AsciiStr as *const [u8];
unsafe { &*ptr }
}
/// Returns the entire string as slice of `AsciiChar`s.
#[inline]
pub fn as_slice(&self) -> &[AsciiChar] {
&self.slice
}
/// Returns the entire string as mutable slice of `AsciiChar`s.
#[inline]
pub fn as_mut_slice(&mut self) -> &mut [AsciiChar] {
&mut self.slice
}
/// Returns a raw pointer to the `AsciiStr`'s buffer.
///
/// The caller must ensure that the slice outlives the pointer this function returns, or else it
/// will end up pointing to garbage. Modifying the `AsciiStr` may cause it's buffer to be
/// reallocated, which would also make any pointers to it invalid.
#[inline]
pub fn as_ptr(&self) -> *const AsciiChar {
self.as_slice().as_ptr()
}
/// Returns an unsafe mutable pointer to the `AsciiStr`'s buffer.
///
/// The caller must ensure that the slice outlives the pointer this function returns, or else it
/// will end up pointing to garbage. Modifying the `AsciiStr` may cause it's buffer to be
/// reallocated, which would also make any pointers to it invalid.
#[inline]
pub fn as_mut_ptr(&mut self) -> *mut AsciiChar {
self.as_mut_slice().as_mut_ptr()
}
/// Copies the content of this `AsciiStr` into an owned `AsciiString`.
#[cfg(feature = "std")]
pub fn to_ascii_string(&self) -> AsciiString {
AsciiString::from(self.slice.to_vec())
}
/// Converts anything that can represent a byte slice into an `AsciiStr`.
///
/// # Examples
/// ```
/// # use ascii::AsciiStr;
/// let foo = AsciiStr::from_ascii("foo");
/// let err = AsciiStr::from_ascii("Ŋ");
/// assert_eq!(foo.unwrap().as_str(), "foo");
/// assert_eq!(err.unwrap_err().valid_up_to(), 0);
/// ```
#[inline]
pub fn from_ascii<B: ?Sized>(bytes: &B) -> Result<&AsciiStr, AsAsciiStrError>
where
B: AsRef<[u8]>,
{
bytes.as_ref().as_ascii_str()
}
/// Converts anything that can be represented as a byte slice to an `AsciiStr` without checking
/// for non-ASCII characters..
///
/// # Examples
/// ```
/// # use ascii::AsciiStr;
/// let foo = unsafe{ AsciiStr::from_ascii_unchecked("foo") };
/// assert_eq!(foo.as_str(), "foo");
/// ```
#[inline]
pub unsafe fn from_ascii_unchecked<B: ?Sized>(bytes: &B) -> &AsciiStr
where
B: AsRef<[u8]>,
{
bytes.as_ref().as_ascii_str_unchecked()
}
/// Returns the number of characters / bytes in this ASCII sequence.
///
/// # Examples
/// ```
/// # use ascii::AsciiStr;
/// let s = AsciiStr::from_ascii("foo").unwrap();
/// assert_eq!(s.len(), 3);
/// ```
#[inline]
pub fn len(&self) -> usize {
self.slice.len()
}
/// Returns true if the ASCII slice contains zero bytes.
///
/// # Examples
/// ```
/// # use ascii::AsciiStr;
/// let mut empty = AsciiStr::from_ascii("").unwrap();
/// let mut full = AsciiStr::from_ascii("foo").unwrap();
/// assert!(empty.is_empty());
/// assert!(!full.is_empty());
/// ```
#[inline]
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns an iterator over the characters of the `AsciiStr`.
#[inline]
pub fn chars(&self) -> Chars {
self.slice.iter()
}
/// Returns an iterator over the characters of the `AsciiStr` which allows you to modify the
/// value of each `AsciiChar`.
#[inline]
pub fn chars_mut(&mut self) -> CharsMut {
self.slice.iter_mut()
}
/// Returns an iterator over the lines of the `AsciiStr`, which are themselves `AsciiStr`s.
///
/// Lines are ended with either `LineFeed` (`\n`), or `CarriageReturn` then `LineFeed` (`\r\n`).
///
/// The final line ending is optional.
#[inline]
pub fn lines(&self) -> Lines {
Lines {
current_index: 0,
string: self,
}
}
/// Returns an ASCII string slice with leading and trailing whitespace removed.
///
/// # Examples
/// ```
/// # use ascii::AsciiStr;
/// let example = AsciiStr::from_ascii(" \twhite \tspace \t").unwrap();
/// assert_eq!("white \tspace", example.trim());
/// ```
pub fn trim(&self) -> &Self {
self.trim_right().trim_left()
}
/// Returns an ASCII string slice with leading whitespace removed.
///
/// # Examples
/// ```
/// # use ascii::AsciiStr;
/// let example = AsciiStr::from_ascii(" \twhite \tspace \t").unwrap();
/// assert_eq!("white \tspace \t", example.trim_left());
/// ```
pub fn trim_left(&self) -> &Self {
&self[self.chars().take_while(|a| a.is_whitespace()).count()..]
}
/// Returns an ASCII string slice with trailing whitespace removed.
///
/// # Examples
/// ```
/// # use ascii::AsciiStr;
/// let example = AsciiStr::from_ascii(" \twhite \tspace \t").unwrap();
/// assert_eq!(" \twhite \tspace", example.trim_right());
/// ```
pub fn trim_right(&self) -> &Self {
let trimmed = self.chars()
.rev()
.take_while(|a| a.is_whitespace())
.count();
&self[..self.len() - trimmed]
}
/// Compares two strings case-insensitively.
pub fn eq_ignore_ascii_case(&self, other: &Self) -> bool {
self.len() == other.len() &&
self.chars().zip(other.chars()).all(|(a, b)| {
a.eq_ignore_ascii_case(b)
})
}
/// Replaces lowercase letters with their uppercase equivalent.
pub fn make_ascii_uppercase(&mut self) {
for a in self.chars_mut() {
*a = a.to_ascii_uppercase();
}
}
/// Replaces uppercase letters with their lowercase equivalent.
pub fn make_ascii_lowercase(&mut self) {
for a in self.chars_mut() {
*a = a.to_ascii_lowercase();
}
}
/// Returns a copy of this string where letters 'a' to 'z' are mapped to 'A' to 'Z'.
#[cfg(feature="std")]
pub fn to_ascii_uppercase(&self) -> AsciiString {
let mut ascii_string = self.to_ascii_string();
ascii_string.make_ascii_uppercase();
ascii_string
}
/// Returns a copy of this string where letters 'A' to 'Z' are mapped to 'a' to 'z'.
#[cfg(feature="std")]
pub fn to_ascii_lowercase(&self) -> AsciiString {
let mut ascii_string = self.to_ascii_string();
ascii_string.make_ascii_lowercase();
ascii_string
}
}
impl PartialEq<str> for AsciiStr {
#[inline]
fn eq(&self, other: &str) -> bool {
self.as_str() == other
}
}
impl PartialEq<AsciiStr> for str {
#[inline]
fn eq(&self, other: &AsciiStr) -> bool {
other.as_str() == self
}
}
#[cfg(feature = "std")]
impl ToOwned for AsciiStr {
type Owned = AsciiString;
#[inline]
fn to_owned(&self) -> AsciiString {
self.to_ascii_string()
}
}
impl AsRef<[u8]> for AsciiStr {
#[inline]
fn as_ref(&self) -> &[u8] {
self.as_bytes()
}
}
impl AsRef<str> for AsciiStr {
#[inline]
fn as_ref(&self) -> &str {
self.as_str()
}
}
impl AsRef<[AsciiChar]> for AsciiStr {
#[inline]
fn as_ref(&self) -> &[AsciiChar] {
&self.slice
}
}
impl AsMut<[AsciiChar]> for AsciiStr {
#[inline]
fn as_mut(&mut self) -> &mut [AsciiChar] {
&mut self.slice
}
}
impl Default for &'static AsciiStr {
#[inline]
fn default() -> &'static AsciiStr {
unsafe { "".as_ascii_str_unchecked() }
}
}
impl<'a> From<&'a [AsciiChar]> for &'a AsciiStr {
#[inline]
fn from(slice: &[AsciiChar]) -> &AsciiStr {
let ptr = slice as *const [AsciiChar] as *const AsciiStr;
unsafe { &*ptr }
}
}
impl<'a> From<&'a mut [AsciiChar]> for &'a mut AsciiStr {
#[inline]
fn from(slice: &mut [AsciiChar]) -> &mut AsciiStr {
let ptr = slice as *mut [AsciiChar] as *mut AsciiStr;
unsafe { &mut *ptr }
}
}
#[cfg(feature = "std")]
impl From<Box<[AsciiChar]>> for Box<AsciiStr> {
#[inline]
fn from(owned: Box<[AsciiChar]>) -> Box<AsciiStr> {
let ptr = Box::into_raw(owned) as *mut AsciiStr;
unsafe { Box::from_raw(ptr) }
}
}
macro_rules! impl_into {
($wider: ty) => {
impl<'a> From<&'a AsciiStr> for &'a$wider {
#[inline]
fn from(slice: &AsciiStr) -> &$wider {
let ptr = slice as *const AsciiStr as *const $wider;
unsafe { &*ptr }
}
}
impl<'a> From<&'a mut AsciiStr> for &'a mut $wider {
#[inline]
fn from(slice: &mut AsciiStr) -> &mut $wider {
let ptr = slice as *mut AsciiStr as *mut $wider;
unsafe { &mut *ptr }
}
}
#[cfg(feature = "std")]
impl From<Box<AsciiStr>> for Box<$wider> {
#[inline]
fn from(owned: Box<AsciiStr>) -> Box<$wider> {
let ptr = Box::into_raw(owned) as *mut $wider;
unsafe { Box::from_raw(ptr) }
}
}
}
}
impl_into! {[AsciiChar]}
impl_into! {[u8]}
impl_into! {str}
impl fmt::Display for AsciiStr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(self.as_str(), f)
}
}
impl fmt::Debug for AsciiStr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(self.as_str(), f)
}
}
macro_rules! impl_index {
($idx:ty) => {
impl Index<$idx> for AsciiStr {
type Output = AsciiStr;
#[inline]
fn index(&self, index: $idx) -> &AsciiStr {
let ptr = &self.slice[index] as *const [AsciiChar] as *const AsciiStr;
unsafe { &* ptr }
}
}
impl IndexMut<$idx> for AsciiStr {
#[inline]
fn index_mut(&mut self, index: $idx) -> &mut AsciiStr {
let ptr = &mut self.slice[index] as *mut [AsciiChar] as *mut AsciiStr;
unsafe { &mut *ptr }
}
}
}
}
impl_index! { Range<usize> }
impl_index! { RangeTo<usize> }
impl_index! { RangeFrom<usize> }
impl_index! { RangeFull }
impl Index<usize> for AsciiStr {
type Output = AsciiChar;
#[inline]
fn index(&self, index: usize) -> &AsciiChar {
unsafe { mem::transmute(&self.slice[index]) }
}
}
impl IndexMut<usize> for AsciiStr {
#[inline]
fn index_mut(&mut self, index: usize) -> &mut AsciiChar {
unsafe { mem::transmute(&mut self.slice[index]) }
}
}
#[cfg(feature = "std")]
impl AsciiExt for AsciiStr {
type Owned = AsciiString;
#[inline]
fn is_ascii(&self) -> bool {
true
}
fn to_ascii_uppercase(&self) -> AsciiString {
let mut ascii_string = self.to_ascii_string();
ascii_string.make_ascii_uppercase();
ascii_string
}
fn to_ascii_lowercase(&self) -> AsciiString {
let mut ascii_string = self.to_ascii_string();
ascii_string.make_ascii_lowercase();
ascii_string
}
fn eq_ignore_ascii_case(&self, other: &Self) -> bool {
self.len() == other.len() &&
self.chars().zip(other.chars()).all(|(a, b)| {
a.eq_ignore_ascii_case(b)
})
}
fn make_ascii_uppercase(&mut self) {
for ascii in self.chars_mut() {
ascii.make_ascii_uppercase();
}
}
fn make_ascii_lowercase(&mut self) {
for ascii in self.chars_mut() {
ascii.make_ascii_lowercase();
}
}
}
impl<'a> IntoIterator for &'a AsciiStr {
type Item = &'a AsciiChar;
type IntoIter = Chars<'a>;
#[inline]
fn into_iter(self) -> Self::IntoIter {
self.chars()
}
}
impl<'a> IntoIterator for &'a mut AsciiStr {
type Item = &'a mut AsciiChar;
type IntoIter = CharsMut<'a>;
#[inline]
fn into_iter(self) -> Self::IntoIter {
self.chars_mut()
}
}
/// An immutable iterator over the characters of an `AsciiStr`.
pub type Chars<'a> = Iter<'a, AsciiChar>;
/// A mutable iterator over the characters of an `AsciiStr`.
pub type CharsMut<'a> = IterMut<'a, AsciiChar>;
/// An iterator over the lines of the internal character array.
#[derive(Clone, Debug)]
pub struct Lines<'a> {
current_index: usize,
string: &'a AsciiStr,
}
impl<'a> Iterator for Lines<'a> {
type Item = &'a AsciiStr;
fn next(&mut self) -> Option<Self::Item> {
let curr_idx = self.current_index;
let len = self.string.len();
if curr_idx >= len {
return None;
}
let mut next_idx = None;
let mut linebreak_skip = 0;
for i in curr_idx..(len - 1) {
match (self.string[i], self.string[i + 1]) {
(AsciiChar::CarriageReturn, AsciiChar::LineFeed) => {
next_idx = Some(i);
linebreak_skip = 2;
break;
}
(AsciiChar::LineFeed, _) => {
next_idx = Some(i);
linebreak_skip = 1;
break;
}
_ => {}
}
}
let next_idx = match next_idx {
Some(i) => i,
None => return None,
};
let line = &self.string[curr_idx..next_idx];
self.current_index = next_idx + linebreak_skip;
if line.is_empty() && self.current_index == self.string.len() {
// This is a trailing line break
None
} else {
Some(line)
}
}
}
/// Error that is returned when a sequence of `u8` are not all ASCII.
///
/// Is used by `As[Mut]AsciiStr` and the `from_ascii` method on `AsciiStr` and `AsciiString`.
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct AsAsciiStrError(usize);
const ERRORMSG_STR: &'static str = "one or more bytes are not ASCII";
impl AsAsciiStrError {
/// Returns the index of the first non-ASCII byte.
///
/// It is the maximum index such that `from_ascii(input[..index])` would return `Ok(_)`.
#[inline]
pub fn valid_up_to(self) -> usize {
self.0
}
#[cfg(not(feature = "std"))]
/// Returns a description for this error, like `std::error::Error::description`.
#[inline]
pub fn description(&self) -> &'static str {
ERRORMSG_STR
}
}
impl fmt::Display for AsAsciiStrError {
fn fmt(&self, fmtr: &mut fmt::Formatter) -> fmt::Result {
write!(fmtr, "the byte at index {} is not ASCII", self.0)
}
}
#[cfg(feature = "std")]
impl Error for AsAsciiStrError {
#[inline]
fn description(&self) -> &'static str {
ERRORMSG_STR
}
}
/// Convert slices of bytes to `AsciiStr`.
pub trait AsAsciiStr {
/// Convert to an ASCII slice without checking for non-ASCII characters.
unsafe fn as_ascii_str_unchecked(&self) -> &AsciiStr;
/// Convert to an ASCII slice.
fn as_ascii_str(&self) -> Result<&AsciiStr, AsAsciiStrError>;
}
/// Convert mutable slices of bytes to `AsciiStr`.
pub trait AsMutAsciiStr {
/// Convert to a mutable ASCII slice without checking for non-ASCII characters.
unsafe fn as_mut_ascii_str_unchecked(&mut self) -> &mut AsciiStr;
/// Convert to a mutable ASCII slice.
fn as_mut_ascii_str(&mut self) -> Result<&mut AsciiStr, AsAsciiStrError>;
}
// These generic implementations mirror the generic implementations for AsRef<T> in core.
impl<'a, T: ?Sized> AsAsciiStr for &'a T where T: AsAsciiStr {
#[inline]
fn as_ascii_str(&self) -> Result<&AsciiStr, AsAsciiStrError> {
<T as AsAsciiStr>::as_ascii_str(*self)
}
#[inline]
unsafe fn as_ascii_str_unchecked(&self) -> &AsciiStr {
<T as AsAsciiStr>::as_ascii_str_unchecked(*self)
}
}
impl<'a, T: ?Sized> AsAsciiStr for &'a mut T where T: AsAsciiStr {
#[inline]
fn as_ascii_str(&self) -> Result<&AsciiStr, AsAsciiStrError> {
<T as AsAsciiStr>::as_ascii_str(*self)
}
#[inline]
unsafe fn as_ascii_str_unchecked(&self) -> &AsciiStr {
<T as AsAsciiStr>::as_ascii_str_unchecked(*self)
}
}
impl<'a, T: ?Sized> AsMutAsciiStr for &'a mut T where T: AsMutAsciiStr {
#[inline]
fn as_mut_ascii_str(&mut self) -> Result<&mut AsciiStr, AsAsciiStrError> {
<T as AsMutAsciiStr>::as_mut_ascii_str(*self)
}
#[inline]
unsafe fn as_mut_ascii_str_unchecked(&mut self) -> &mut AsciiStr {
<T as AsMutAsciiStr>::as_mut_ascii_str_unchecked(*self)
}
}
impl AsAsciiStr for AsciiStr {
#[inline]
fn as_ascii_str(&self) -> Result<&AsciiStr, AsAsciiStrError> {
Ok(self)
}
#[inline]
unsafe fn as_ascii_str_unchecked(&self) -> &AsciiStr {
self
}
}
impl AsMutAsciiStr for AsciiStr {
#[inline]
fn as_mut_ascii_str(&mut self) -> Result<&mut AsciiStr, AsAsciiStrError> {
Ok(self)
}
#[inline]
unsafe fn as_mut_ascii_str_unchecked(&mut self) -> &mut AsciiStr {
self
}
}
impl AsAsciiStr for [AsciiChar] {
#[inline]
fn as_ascii_str(&self) -> Result<&AsciiStr, AsAsciiStrError> {
Ok(self.into())
}
#[inline]
unsafe fn as_ascii_str_unchecked(&self) -> &AsciiStr {
self.into()
}
}
impl AsMutAsciiStr for [AsciiChar] {
#[inline]
fn as_mut_ascii_str(&mut self) -> Result<&mut AsciiStr, AsAsciiStrError> {
Ok(self.into())
}
#[inline]
unsafe fn as_mut_ascii_str_unchecked(&mut self) -> &mut AsciiStr {
self.into()
}
}
impl AsAsciiStr for [u8] {
fn as_ascii_str(&self) -> Result<&AsciiStr, AsAsciiStrError> {
match self.iter().position(|&b| b > 127) {
Some(index) => Err(AsAsciiStrError(index)),
None => unsafe { Ok(self.as_ascii_str_unchecked()) },
}
}
#[inline]
unsafe fn as_ascii_str_unchecked(&self) -> &AsciiStr {
let ptr = self as *const [u8] as *const AsciiStr;
&*ptr
}
}
impl AsMutAsciiStr for [u8] {
fn as_mut_ascii_str(&mut self) -> Result<&mut AsciiStr, AsAsciiStrError> {
match self.iter().position(|&b| b > 127) {
Some(index) => Err(AsAsciiStrError(index)),
None => unsafe { Ok(self.as_mut_ascii_str_unchecked()) },
}
}
#[inline]
unsafe fn as_mut_ascii_str_unchecked(&mut self) -> &mut AsciiStr {
let ptr = self as *mut [u8] as *mut AsciiStr;
&mut *ptr
}
}
impl AsAsciiStr for str {
fn as_ascii_str(&self) -> Result<&AsciiStr, AsAsciiStrError> {
self.as_bytes().as_ascii_str()
}
#[inline]
unsafe fn as_ascii_str_unchecked(&self) -> &AsciiStr {
self.as_bytes().as_ascii_str_unchecked()
}
}
impl AsMutAsciiStr for str {
fn as_mut_ascii_str(&mut self) -> Result<&mut AsciiStr, AsAsciiStrError> {
match self.bytes().position(|b| b > 127) {
Some(index) => Err(AsAsciiStrError(index)),
None => unsafe { Ok(self.as_mut_ascii_str_unchecked()) },
}
}
#[inline]
unsafe fn as_mut_ascii_str_unchecked(&mut self) -> &mut AsciiStr {
let ptr = self as *mut str as *mut AsciiStr;
&mut *ptr
}
}
#[cfg(test)]
mod tests {
use AsciiChar;
use super::{AsciiStr, AsAsciiStr, AsMutAsciiStr, AsAsciiStrError};
#[test]
fn generic_as_ascii_str() {
fn generic<C: AsAsciiStr + ?Sized>(c: &C) -> Result<&AsciiStr, AsAsciiStrError> {
c.as_ascii_str()
}
let arr = [AsciiChar::A];
let ascii_str: &AsciiStr = arr.as_ref().into();
assert_eq!(generic("A"), Ok(ascii_str));
assert_eq!(generic(&b"A"[..]), Ok(ascii_str));
assert_eq!(generic(ascii_str), Ok(ascii_str));
assert_eq!(generic(&"A"), Ok(ascii_str));
assert_eq!(generic(&ascii_str), Ok(ascii_str));
assert_eq!(generic(&mut "A"), Ok(ascii_str));
}
#[test]
fn generic_as_mut_ascii_str() {
fn generic_mut<C: AsMutAsciiStr + ?Sized>(
c: &mut C,
) -> Result<&mut AsciiStr, AsAsciiStrError> {
c.as_mut_ascii_str()
}
let mut arr_mut = [AsciiChar::B];
let mut ascii_str_mut: &mut AsciiStr = arr_mut.as_mut().into();
// Need a second reference to prevent overlapping mutable borrows
let mut arr_mut_2 = [AsciiChar::B];
let ascii_str_mut_2: &mut AsciiStr = arr_mut_2.as_mut().into();
assert_eq!(generic_mut(&mut ascii_str_mut), Ok(&mut *ascii_str_mut_2));
assert_eq!(generic_mut(ascii_str_mut), Ok(&mut *ascii_str_mut_2));
}
#[test]
#[cfg(feature = "std")]
fn as_ascii_str() {
macro_rules! err {{$i:expr} => {Err(AsAsciiStrError($i))}}
let mut s: String = "abčd".to_string();
let mut b: Vec<u8> = s.clone().into();
assert_eq!(s.as_str().as_ascii_str(), err!(2));
assert_eq!(s.as_mut_str().as_mut_ascii_str(), err!(2));
assert_eq!(b.as_slice().as_ascii_str(), err!(2));
assert_eq!(b.as_mut_slice().as_mut_ascii_str(), err!(2));
let mut a = [AsciiChar::a, AsciiChar::b];
assert_eq!((&s[..2]).as_ascii_str(), Ok((&a[..]).into()));
assert_eq!((&b[..2]).as_ascii_str(), Ok((&a[..]).into()));
let a = Ok((&mut a[..]).into());
assert_eq!((&mut s[..2]).as_mut_ascii_str(), a);
assert_eq!((&mut b[..2]).as_mut_ascii_str(), a);
}
#[test]
fn default() {
let default: &'static AsciiStr = Default::default();
assert!(default.is_empty());
}
#[test]
fn as_str() {
let b = b"( ;";
let v = AsciiStr::from_ascii(b).unwrap();
assert_eq!(v.as_str(), "( ;");
assert_eq!(AsRef::<str>::as_ref(v), "( ;");
}
#[test]
fn as_bytes() {
let b = b"( ;";
let v = AsciiStr::from_ascii(b).unwrap();
assert_eq!(v.as_bytes(), b"( ;");
assert_eq!(AsRef::<[u8]>::as_ref(v), b"( ;");
}
#[test]
fn make_ascii_case() {
let mut bytes = ([b'a', b'@', b'A'], [b'A', b'@', b'a']);
let a = bytes.0.as_mut_ascii_str().unwrap();
let b = bytes.1.as_mut_ascii_str().unwrap();
assert!(a.eq_ignore_ascii_case(b));
assert!(b.eq_ignore_ascii_case(a));
a.make_ascii_lowercase();
b.make_ascii_uppercase();
assert_eq!(a, "a@a");
assert_eq!(b, "A@A");
}
#[test]
#[cfg(feature = "std")]
fn to_ascii_case() {
let bytes = ([b'a', b'@', b'A'], [b'A', b'@', b'a']);
let a = bytes.0.as_ascii_str().unwrap();
let b = bytes.1.as_ascii_str().unwrap();
assert_eq!(a.to_ascii_lowercase().as_str(), "a@a");
assert_eq!(a.to_ascii_uppercase().as_str(), "A@A");
assert_eq!(b.to_ascii_lowercase().as_str(), "a@a");
assert_eq!(b.to_ascii_uppercase().as_str(), "A@A");
}
#[test]
#[cfg(feature = "std")]
fn ascii_ext() {
#[allow(deprecated)]
use std::ascii::AsciiExt;
assert!(AsciiExt::is_ascii(<&AsciiStr>::default()));
let mut mutable = String::from("a@AA@a");
let parts = mutable.split_at_mut(3);
let a = parts.0.as_mut_ascii_str().unwrap();
let b = parts.1.as_mut_ascii_str().unwrap();
assert!(AsciiExt::eq_ignore_ascii_case(a, b));
assert_eq!(AsciiExt::to_ascii_lowercase(a).as_str(), "a@a");
assert_eq!(AsciiExt::to_ascii_uppercase(b).as_str(), "A@A");
AsciiExt::make_ascii_uppercase(a);
AsciiExt::make_ascii_lowercase(b);
assert_eq!(a, "A@A");
assert_eq!(b, "a@a");
}
#[test]
fn chars_iter() {
let chars = &[b'h', b'e', b'l', b'l', b'o', b' ', b'w', b'o', b'r', b'l', b'd', b'\0'];
let ascii = AsciiStr::from_ascii(chars).unwrap();
for (achar, byte) in ascii.chars().zip(chars.iter()) {
assert_eq!(achar, byte);
}
}
#[test]
fn chars_iter_mut() {
let chars = &mut [b'h', b'e', b'l', b'l', b'o', b' ', b'w', b'o', b'r', b'l', b'd', b'\0'];
let ascii = chars.as_mut_ascii_str().unwrap();
*ascii.chars_mut().next().unwrap() = AsciiChar::H;
assert_eq!(ascii[0], b'H');
}
#[test]
fn lines_iter() {
use core::iter::Iterator;
let lines: [&str; 3] = ["great work", "cool beans", "awesome stuff"];
let joined = "great work\ncool beans\r\nawesome stuff\n";
let ascii = AsciiStr::from_ascii(joined.as_bytes()).unwrap();
for (asciiline, line) in ascii.lines().zip(&lines) {
assert_eq!(asciiline, *line);
}
let trailing_line_break = b"\n";
let ascii = AsciiStr::from_ascii(&trailing_line_break).unwrap();
for _ in ascii.lines() {
unreachable!();
}
let empty_lines = b"\n\r\n\n\r\n";
let mut ensure_iterated = false;
let ascii = AsciiStr::from_ascii(&empty_lines).unwrap();
for line in ascii.lines() {
ensure_iterated = true;
assert!(line.is_empty());
}
assert!(ensure_iterated);
}
#[test]
#[cfg(feature = "std")]
fn fmt_ascii_str() {
let s = "abc".as_ascii_str().unwrap();
assert_eq!(format!("{}", s), "abc".to_string());
assert_eq!(format!("{:?}", s), "\"abc\"".to_string());
}
}
| 29.95302 | 100 | 0.567107 |
222843d747fe76aaa1ed5c75c3dc807d687786ed | 2,071 | #[doc = "Register `CID1` reader"]
pub struct R(crate::R<CID1_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<CID1_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<CID1_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<CID1_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Field `PREAMBLE` reader - Preamble"]
pub struct PREAMBLE_R(crate::FieldReader<u8, u8>);
impl PREAMBLE_R {
pub(crate) fn new(bits: u8) -> Self {
PREAMBLE_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for PREAMBLE_R {
type Target = crate::FieldReader<u8, u8>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `CCLASS` reader - Component Class"]
pub struct CCLASS_R(crate::FieldReader<u8, u8>);
impl CCLASS_R {
pub(crate) fn new(bits: u8) -> Self {
CCLASS_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for CCLASS_R {
type Target = crate::FieldReader<u8, u8>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl R {
#[doc = "Bits 0:3 - Preamble"]
#[inline(always)]
pub fn preamble(&self) -> PREAMBLE_R {
PREAMBLE_R::new((self.bits & 0x0f) as u8)
}
#[doc = "Bits 4:7 - Component Class"]
#[inline(always)]
pub fn cclass(&self) -> CCLASS_R {
CCLASS_R::new(((self.bits >> 4) & 0x0f) as u8)
}
}
#[doc = "Component Identification 1\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [cid1](index.html) module"]
pub struct CID1_SPEC;
impl crate::RegisterSpec for CID1_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [cid1::R](R) reader structure"]
impl crate::Readable for CID1_SPEC {
type Reader = R;
}
#[doc = "`reset()` method sets CID1 to value 0x10"]
impl crate::Resettable for CID1_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0x10
}
}
| 28.763889 | 233 | 0.608885 |
798d8ea6102bc879b595427539d5474c19a57b12 | 11,944 | use crate::{
chain_spec,
cli::{Cli, RelayChainCli, Subcommand},
service::{new_partial, ParachainRuntimeExecutor},
};
use codec::Encode;
use cumulus_client_service::genesis::generate_genesis_block;
use cumulus_primitives_core::ParaId;
use log::info;
use parachain_runtime::{Block, RuntimeApi};
use polkadot_parachain::primitives::AccountIdConversion;
use sc_cli::{
ChainSpec, CliConfiguration, DefaultConfigurationValues, ImportParams, KeystoreParams,
NetworkParams, Result, RuntimeVersion, SharedParams, SubstrateCli,
};
use sc_service::config::{BasePath, PrometheusConfig};
use sp_core::hexdisplay::HexDisplay;
use sp_runtime::traits::Block as BlockT;
use std::{io::Write, net::SocketAddr};
fn load_spec(
id: &str
) -> std::result::Result<Box<dyn sc_service::ChainSpec>, String> {
Ok(match id {
"dev" => Box::new(chain_spec::development_config()),
"westend" => Box::new(chain_spec::adz_westend()),
"" | "local" => Box::new(chain_spec::local_testnet_config()),
path => Box::new(chain_spec::ChainSpec::from_json_file(std::path::PathBuf::from(path))?),
})
}
impl SubstrateCli for Cli {
fn impl_name() -> String {
"Parachain Collator Template".into()
}
fn impl_version() -> String {
env!("SUBSTRATE_CLI_IMPL_VERSION").into()
}
fn description() -> String {
format!(
"Parachain Collator Template\n\nThe command-line arguments provided first will be \
passed to the parachain node, while the arguments provided after -- will be passed \
to the relaychain node.\n\n\
{} [parachain-args] -- [relaychain-args]",
Self::executable_name()
)
}
fn author() -> String {
env!("CARGO_PKG_AUTHORS").into()
}
fn support_url() -> String {
"https://github.com/substrate-developer-hub/substrate-parachain-template/issues/new".into()
}
fn copyright_start_year() -> i32 {
2017
}
fn load_spec(&self, id: &str) -> std::result::Result<Box<dyn sc_service::ChainSpec>, String> {
load_spec(id)
}
fn native_runtime_version(_: &Box<dyn ChainSpec>) -> &'static RuntimeVersion {
¶chain_runtime::VERSION
}
}
impl SubstrateCli for RelayChainCli {
fn impl_name() -> String {
"Parachain Collator Template".into()
}
fn impl_version() -> String {
env!("SUBSTRATE_CLI_IMPL_VERSION").into()
}
fn description() -> String {
"Parachain Collator Template\n\nThe command-line arguments provided first will be \
passed to the parachain node, while the arguments provided after -- will be passed \
to the relaychain node.\n\n\
parachain-collator [parachain-args] -- [relaychain-args]"
.into()
}
fn author() -> String {
env!("CARGO_PKG_AUTHORS").into()
}
fn support_url() -> String {
"https://github.com/substrate-developer-hub/substrate-parachain-template/issues/new".into()
}
fn copyright_start_year() -> i32 {
2017
}
fn load_spec(&self, id: &str) -> std::result::Result<Box<dyn sc_service::ChainSpec>, String> {
polkadot_cli::Cli::from_iter([RelayChainCli::executable_name().to_string()].iter())
.load_spec(id)
}
fn native_runtime_version(chain_spec: &Box<dyn ChainSpec>) -> &'static RuntimeVersion {
polkadot_cli::Cli::native_runtime_version(chain_spec)
}
}
fn extract_genesis_wasm(chain_spec: &Box<dyn sc_service::ChainSpec>) -> Result<Vec<u8>> {
let mut storage = chain_spec.build_storage()?;
storage
.top
.remove(sp_core::storage::well_known_keys::CODE)
.ok_or_else(|| "Could not find wasm file in genesis state!".into())
}
macro_rules! construct_async_run {
(|$components:ident, $cli:ident, $cmd:ident, $config:ident| $( $code:tt )* ) => {{
let runner = $cli.create_runner($cmd)?;
runner.async_run(|$config| {
let $components = new_partial::<
RuntimeApi,
ParachainRuntimeExecutor,
_
>(
&$config,
crate::service::parachain_build_import_queue,
)?;
let task_manager = $components.task_manager;
{ $( $code )* }.map(|v| (v, task_manager))
})
}}
}
/// Parse command line arguments into service configuration.
pub fn run() -> Result<()> {
let cli = Cli::from_args();
match &cli.subcommand {
Some(Subcommand::BuildSpec(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.sync_run(|config| cmd.run(config.chain_spec, config.network))
}
Some(Subcommand::CheckBlock(cmd)) => {
construct_async_run!(|components, cli, cmd, config| {
Ok(cmd.run(components.client, components.import_queue))
})
}
Some(Subcommand::ExportBlocks(cmd)) => {
construct_async_run!(|components, cli, cmd, config| {
Ok(cmd.run(components.client, config.database))
})
}
Some(Subcommand::ExportState(cmd)) => {
construct_async_run!(|components, cli, cmd, config| {
Ok(cmd.run(components.client, config.chain_spec))
})
}
Some(Subcommand::ImportBlocks(cmd)) => {
construct_async_run!(|components, cli, cmd, config| {
Ok(cmd.run(components.client, components.import_queue))
})
}
Some(Subcommand::PurgeChain(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.sync_run(|config| {
let polkadot_cli = RelayChainCli::new(
&config,
[RelayChainCli::executable_name().to_string()]
.iter()
.chain(cli.relaychain_args.iter()),
);
let tokio_handle = config.tokio_handle.clone();
let polkadot_config = SubstrateCli::create_configuration(
&polkadot_cli,
&polkadot_cli,
tokio_handle,
)
.map_err(|err| format!("Relay chain argument error: {}", err))?;
cmd.run(config, polkadot_config)
})
}
Some(Subcommand::Revert(cmd)) => construct_async_run!(|components, cli, cmd, config| {
Ok(cmd.run(components.client, components.backend))
}),
Some(Subcommand::ExportGenesisState(params)) => {
let mut builder = sc_cli::LoggerBuilder::new("");
builder.with_profiling(sc_tracing::TracingReceiver::Log, "");
let _ = builder.init();
let spec = load_spec(¶ms.chain.clone().unwrap_or_default())?;
let state_version = Cli::native_runtime_version(&spec).state_version();
let block: Block = generate_genesis_block(&spec, state_version)?;
let raw_header = block.header().encode();
let output_buf = if params.raw {
raw_header
} else {
format!("0x{:?}", HexDisplay::from(&block.header().encode())).into_bytes()
};
if let Some(output) = ¶ms.output {
std::fs::write(output, output_buf)?;
} else {
std::io::stdout().write_all(&output_buf)?;
}
Ok(())
}
Some(Subcommand::ExportGenesisWasm(params)) => {
let mut builder = sc_cli::LoggerBuilder::new("");
builder.with_profiling(sc_tracing::TracingReceiver::Log, "");
let _ = builder.init();
let raw_wasm_blob =
extract_genesis_wasm(&cli.load_spec(¶ms.chain.clone().unwrap_or_default())?)?;
let output_buf = if params.raw {
raw_wasm_blob
} else {
format!("0x{:?}", HexDisplay::from(&raw_wasm_blob)).into_bytes()
};
if let Some(output) = ¶ms.output {
std::fs::write(output, output_buf)?;
} else {
std::io::stdout().write_all(&output_buf)?;
}
Ok(())
}
Some(Subcommand::Benchmark(cmd)) => {
if cfg!(feature = "runtime-benchmarks") {
let runner = cli.create_runner(cmd)?;
runner.sync_run(|config| cmd.run::<Block, ParachainRuntimeExecutor>(config))
} else {
Err("Benchmarking wasn't enabled when building the node. \
You can enable it with `--features runtime-benchmarks`."
.into())
}
}
None => {
let runner = cli.create_runner(&cli.run.normalize())?;
runner.run_node_until_exit(|config| async move {
let para_id = chain_spec::Extensions::try_get(&*config.chain_spec)
.map(|e| e.para_id)
.ok_or_else(|| "Could not find parachain ID in chain-spec.")?;
let polkadot_cli = RelayChainCli::new(
&config,
[RelayChainCli::executable_name().to_string()]
.iter()
.chain(cli.relaychain_args.iter()),
);
let id = ParaId::from(para_id);
let parachain_account =
AccountIdConversion::<polkadot_primitives::v0::AccountId>::into_account(&id);
let state_version =
RelayChainCli::native_runtime_version(&config.chain_spec).state_version();
let block: Block = generate_genesis_block(&config.chain_spec, state_version)
.map_err(|e| format!("{:?}", e))?;
let genesis_state = format!("0x{:?}", HexDisplay::from(&block.header().encode()));
let tokio_handle = config.tokio_handle.clone();
let polkadot_config =
SubstrateCli::create_configuration(&polkadot_cli, &polkadot_cli, tokio_handle)
.map_err(|err| format!("Relay chain argument error: {}", err))?;
info!("Parachain id: {:?}", id);
info!("Parachain Account: {}", parachain_account);
info!("Parachain genesis state: {}", genesis_state);
info!("Is collating: {}", if config.role.is_authority() { "yes" } else { "no" });
crate::service::start_node(config, polkadot_config, id)
.await
.map(|r| r.0)
.map_err(Into::into)
})
}
}
}
impl DefaultConfigurationValues for RelayChainCli {
fn p2p_listen_port() -> u16 {
30334
}
fn rpc_ws_listen_port() -> u16 {
9945
}
fn rpc_http_listen_port() -> u16 {
9934
}
fn prometheus_listen_port() -> u16 {
9616
}
}
impl CliConfiguration<Self> for RelayChainCli {
fn shared_params(&self) -> &SharedParams {
self.base.base.shared_params()
}
fn import_params(&self) -> Option<&ImportParams> {
self.base.base.import_params()
}
fn network_params(&self) -> Option<&NetworkParams> {
self.base.base.network_params()
}
fn keystore_params(&self) -> Option<&KeystoreParams> {
self.base.base.keystore_params()
}
fn base_path(&self) -> Result<Option<BasePath>> {
Ok(self.shared_params().base_path().or_else(|| self.base_path.clone().map(Into::into)))
}
fn rpc_http(&self, default_listen_port: u16) -> Result<Option<SocketAddr>> {
self.base.base.rpc_http(default_listen_port)
}
fn rpc_ipc(&self) -> Result<Option<String>> {
self.base.base.rpc_ipc()
}
fn rpc_ws(&self, default_listen_port: u16) -> Result<Option<SocketAddr>> {
self.base.base.rpc_ws(default_listen_port)
}
fn prometheus_config(
&self,
default_listen_port: u16,
chain_spec: &Box<dyn ChainSpec>,
) -> Result<Option<PrometheusConfig>> {
self.base.base.prometheus_config(default_listen_port, chain_spec)
}
fn init<F>(
&self,
_support_url: &String,
_impl_version: &String,
_logger_hook: F,
_config: &sc_service::Configuration,
) -> Result<()>
where
F: FnOnce(&mut sc_cli::LoggerBuilder, &sc_service::Configuration),
{
unreachable!("PolkadotCli is never initialized; qed");
}
fn chain_id(&self, is_dev: bool) -> Result<String> {
let chain_id = self.base.base.chain_id(is_dev)?;
Ok(if chain_id.is_empty() { self.chain_id.clone().unwrap_or_default() } else { chain_id })
}
fn role(&self, is_dev: bool) -> Result<sc_service::Role> {
self.base.base.role(is_dev)
}
fn transaction_pool(&self) -> Result<sc_service::config::TransactionPoolOptions> {
self.base.base.transaction_pool()
}
fn state_cache_child_ratio(&self) -> Result<Option<usize>> {
self.base.base.state_cache_child_ratio()
}
fn rpc_methods(&self) -> Result<sc_service::config::RpcMethods> {
self.base.base.rpc_methods()
}
fn rpc_ws_max_connections(&self) -> Result<Option<usize>> {
self.base.base.rpc_ws_max_connections()
}
fn rpc_cors(&self, is_dev: bool) -> Result<Option<Vec<String>>> {
self.base.base.rpc_cors(is_dev)
}
fn default_heap_pages(&self) -> Result<Option<u64>> {
self.base.base.default_heap_pages()
}
fn force_authoring(&self) -> Result<bool> {
self.base.base.force_authoring()
}
fn disable_grandpa(&self) -> Result<bool> {
self.base.base.disable_grandpa()
}
fn max_runtime_instances(&self) -> Result<Option<usize>> {
self.base.base.max_runtime_instances()
}
fn announce_block(&self) -> Result<bool> {
self.base.base.announce_block()
}
fn telemetry_endpoints(
&self,
chain_spec: &Box<dyn ChainSpec>,
) -> Result<Option<sc_telemetry::TelemetryEndpoints>> {
self.base.base.telemetry_endpoints(chain_spec)
}
}
| 28.236407 | 95 | 0.6857 |
fea28ddb094f1489f2ce4bcb2467d24a1c348cae | 5,355 | // Copyright 2020-2022 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::io::{BufRead, BufReader};
use std::net::TcpStream;
use std::path::PathBuf;
use std::process::{Child, Command};
use std::{env, fs};
use std::{thread, time::Duration};
use lazy_static::lazy_static;
use nats::jetstream::{JetStream, JetStreamOptions};
use nats::Connection;
use regex::Regex;
pub struct Server {
child: Child,
logfile: PathBuf,
pidfile: PathBuf,
}
lazy_static! {
static ref SD_RE: Regex = Regex::new(r#".+\sStore Directory:\s+"([^"]+)""#).unwrap();
static ref CLIENT_RE: Regex = Regex::new(r#".+\sclient connections on\s+(\S+)"#).unwrap();
}
impl Drop for Server {
fn drop(&mut self) {
self.child.kill().unwrap();
self.child.wait().unwrap();
// Remove log if present.
if let Ok(log) = fs::read_to_string(self.logfile.as_os_str()) {
// Check if we had JetStream running and if so cleanup the storage directory.
if let Some(caps) = SD_RE.captures(&log) {
let sd = caps.get(1).map_or("", |m| m.as_str());
fs::remove_dir_all(sd).ok();
}
// Remove Logfile.
fs::remove_file(self.logfile.as_os_str()).ok();
}
}
}
impl Server {
// Grab client url.
// Helpful when dynamically allocating ports with -1.
pub fn client_url(&self) -> String {
let addr = self.client_addr();
let mut r = BufReader::with_capacity(1024, TcpStream::connect(addr).unwrap());
let mut line = String::new();
r.read_line(&mut line).expect("did not receive INFO");
let si = json::parse(&line["INFO".len()..]).unwrap();
let port = si["port"].as_u16().expect("could not parse port");
let mut scheme = "nats://";
if si["tls_required"].as_bool().unwrap_or(false) {
scheme = "tls://";
}
format!("{}127.0.0.1:{}", scheme, port)
}
// Allow user/pass override.
pub fn client_url_with(&self, user: &str, pass: &str) -> String {
use url::Url;
let mut url = Url::parse(&self.client_url()).expect("could not parse");
url.set_username(user).ok();
url.set_password(Some(pass)).ok();
url.as_str().to_string()
}
// Grab client addr from logs.
fn client_addr(&self) -> String {
// We may need to wait for log to be present.
// Wait up to 2s. (20 * 100ms)
for _ in 0..20 {
match fs::read_to_string(self.logfile.as_os_str()) {
Ok(l) => {
if let Some(cre) = CLIENT_RE.captures(&l) {
return cre.get(1).unwrap().as_str().replace("0.0.0.0", "127.0.0.1");
} else {
thread::sleep(Duration::from_millis(250));
}
}
_ => thread::sleep(Duration::from_millis(250)),
}
}
panic!("no client addr info");
}
pub fn client_pid(&self) -> usize {
String::from_utf8(fs::read(self.pidfile.clone()).unwrap())
.unwrap()
.parse()
.unwrap()
}
}
pub fn set_lame_duck_mode(s: &Server) {
let mut cmd = Command::new("nats-server");
cmd.arg("--signal")
.arg(format!("ldm={}", s.client_pid()))
.spawn()
.unwrap();
}
/// Starts a local NATS server with the given config that gets stopped and cleaned up on drop.
pub fn run_server(cfg: &str) -> Server {
let id = nuid::next();
let logfile = env::temp_dir().join(format!("nats-server-{}.log", id));
let store_dir = env::temp_dir().join(format!("store-dir-{}", id));
let pidfile = env::temp_dir().join(format!("nats-server-{}.pid", id));
// Always use dynamic ports so tests can run in parallel.
// Create env for a storage directory for jetstream.
let mut cmd = Command::new("nats-server");
cmd.arg("--store_dir")
.arg(store_dir.as_path().to_str().unwrap())
.arg("-p")
.arg("-1")
.arg("-l")
.arg(logfile.as_os_str())
.arg("-P")
.arg(pidfile.as_os_str());
if cfg != "" {
let path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
cmd.arg("-c").arg(path.join(cfg));
}
let child = cmd.spawn().unwrap();
Server {
child,
logfile,
pidfile,
}
}
/// Starts a local basic NATS server that gets stopped and cleaned up on drop.
pub fn run_basic_server() -> Server {
run_server("")
}
// Helper function to return server and client.
pub fn run_basic_jetstream() -> (Server, Connection, JetStream) {
let s = run_server("tests/configs/jetstream.conf");
let nc = nats::connect(&s.client_url()).unwrap();
let js = JetStream::new(nc.clone(), JetStreamOptions::default());
(s, nc, js)
}
| 33.26087 | 94 | 0.580579 |
7a98ae39d3ae9b0921b13b790aff2cf88a937cb3 | 3,277 | //! lint on C-like enums that are `repr(isize/usize)` and have values that
//! don't fit into an `i32`
use crate::consts::{miri_to_const, Constant};
use clippy_utils::diagnostics::span_lint;
use rustc_hir::{Item, ItemKind};
use rustc_lint::{LateContext, LateLintPass};
use rustc_middle::ty::util::IntTypeExt;
use rustc_middle::ty::{self, IntTy, UintTy};
use rustc_session::{declare_lint_pass, declare_tool_lint};
use std::convert::TryFrom;
declare_clippy_lint! {
/// **What it does:** Checks for C-like enumerations that are
/// `repr(isize/usize)` and have values that don't fit into an `i32`.
///
/// **Why is this bad?** This will truncate the variant value on 32 bit
/// architectures, but works fine on 64 bit.
///
/// **Known problems:** None.
///
/// **Example:**
/// ```rust
/// # #[cfg(target_pointer_width = "64")]
/// #[repr(usize)]
/// enum NonPortable {
/// X = 0x1_0000_0000,
/// Y = 0,
/// }
/// ```
pub ENUM_CLIKE_UNPORTABLE_VARIANT,
correctness,
"C-like enums that are `repr(isize/usize)` and have values that don't fit into an `i32`"
}
declare_lint_pass!(UnportableVariant => [ENUM_CLIKE_UNPORTABLE_VARIANT]);
impl<'tcx> LateLintPass<'tcx> for UnportableVariant {
#[allow(clippy::cast_possible_truncation, clippy::cast_possible_wrap, clippy::cast_sign_loss)]
fn check_item(&mut self, cx: &LateContext<'tcx>, item: &'tcx Item<'_>) {
if cx.tcx.data_layout.pointer_size.bits() != 64 {
return;
}
if let ItemKind::Enum(def, _) = &item.kind {
for var in def.variants {
if let Some(anon_const) = &var.disr_expr {
let def_id = cx.tcx.hir().body_owner_def_id(anon_const.body);
let mut ty = cx.tcx.type_of(def_id.to_def_id());
let constant = cx
.tcx
.const_eval_poly(def_id.to_def_id())
.ok()
.map(|val| rustc_middle::ty::Const::from_value(cx.tcx, val, ty));
if let Some(Constant::Int(val)) = constant.and_then(miri_to_const) {
if let ty::Adt(adt, _) = ty.kind() {
if adt.is_enum() {
ty = adt.repr.discr_type().to_ty(cx.tcx);
}
}
match ty.kind() {
ty::Int(IntTy::Isize) => {
let val = ((val as i128) << 64) >> 64;
if i32::try_from(val).is_ok() {
continue;
}
},
ty::Uint(UintTy::Usize) if val > u128::from(u32::MAX) => {},
_ => continue,
}
span_lint(
cx,
ENUM_CLIKE_UNPORTABLE_VARIANT,
var.span,
"C-like enum variant discriminant is not portable to 32-bit targets",
);
};
}
}
}
}
}
| 39.963415 | 98 | 0.483979 |
c1dc727449ac0ca681a240a02f0071d2ed5f80d0 | 2,305 | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A pass that checks to make sure private fields and methods aren't used
//! outside their scopes. This pass will also generate a set of exported items
//! which are available for use externally when compiled as a library.
use util::nodemap::{DefIdSet, FnvHashMap};
use std::hash::Hash;
use syntax::ast::NodeId;
// Accessibility levels, sorted in ascending order
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum AccessLevel {
// Exported items + items participating in various kinds of public interfaces,
// but not directly nameable. For example, if function `fn f() -> T {...}` is
// public, then type `T` is exported. Its values can be obtained by other crates
// even if the type itseld is not nameable.
// FIXME: Mostly unimplemented. Only `type` aliases export items currently.
Reachable,
// Public items + items accessible to other crates with help of `pub use` reexports
Exported,
// Items accessible to other crates directly, without help of reexports
Public,
}
// Accessibility levels for reachable HIR nodes
#[derive(Clone)]
pub struct AccessLevels<Id = NodeId> {
pub map: FnvHashMap<Id, AccessLevel>
}
impl<Id: Hash + Eq> AccessLevels<Id> {
pub fn is_reachable(&self, id: Id) -> bool {
self.map.contains_key(&id)
}
pub fn is_exported(&self, id: Id) -> bool {
self.map.get(&id) >= Some(&AccessLevel::Exported)
}
pub fn is_public(&self, id: Id) -> bool {
self.map.get(&id) >= Some(&AccessLevel::Public)
}
}
impl<Id: Hash + Eq> Default for AccessLevels<Id> {
fn default() -> Self {
AccessLevels { map: Default::default() }
}
}
/// A set containing all exported definitions from external crates.
/// The set does not contain any entries from local crates.
pub type ExternalExports = DefIdSet;
| 37.177419 | 87 | 0.69718 |
8f084d45c83f0b5e83a3daeb31c7deacfa73d59b | 181 | #[cfg(
any(
feature = "google-cloud-orchestration-airflow-service-v1",
feature = "google-cloud-orchestration-airflow-service-v1beta1",
)
)]
pub mod service;
| 22.625 | 71 | 0.651934 |
16521b84d511a3b248415521188037b49f073fa7 | 153,466 | //! Persistent accounts are stored in below path location:
//! <path>/<pid>/data/
//!
//! The persistent store would allow for this mode of operation:
//! - Concurrent single thread append with many concurrent readers.
//!
//! The underlying memory is memory mapped to a file. The accounts would be
//! stored across multiple files and the mappings of file and offset of a
//! particular account would be stored in a shared index. This will allow for
//! concurrent commits without blocking reads, which will sequentially write
//! to memory, ssd or disk, and should be as fast as the hardware allow for.
//! The only required in memory data structure with a write lock is the index,
//! which should be fast to update.
//!
//! AppendVec's only store accounts for single slots. To bootstrap the
//! index from a persistent store of AppendVec's, the entries include
//! a "write_version". A single global atomic `AccountsDB::write_version`
//! tracks the number of commits to the entire data store. So the latest
//! commit for each slot entry would be indexed.
use crate::{
accounts_index::{AccountsIndex, Ancestors, SlotList, SlotSlice},
append_vec::{AppendVec, StoredAccount, StoredMeta},
};
use byteorder::{ByteOrder, LittleEndian};
use lazy_static::lazy_static;
use log::*;
use rand::{thread_rng, Rng};
use rayon::{prelude::*, ThreadPool};
use serde::{Deserialize, Serialize};
use solana_measure::measure::Measure;
use solana_rayon_threadlimit::get_thread_count;
use solana_sdk::{
account::Account,
clock::{Epoch, Slot},
hash::{Hash, Hasher},
pubkey::Pubkey,
};
use std::{
collections::{HashMap, HashSet},
io::{Error as IOError, Result as IOResult},
iter::FromIterator,
ops::RangeBounds,
path::{Path, PathBuf},
sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering},
sync::{Arc, Mutex, MutexGuard, RwLock, RwLockReadGuard, RwLockWriteGuard},
time::Instant,
};
use tempfile::TempDir;
const PAGE_SIZE: u64 = 4 * 1024;
pub const DEFAULT_FILE_SIZE: u64 = PAGE_SIZE * 1024;
pub const DEFAULT_NUM_THREADS: u32 = 8;
pub const DEFAULT_NUM_DIRS: u32 = 4;
lazy_static! {
// FROZEN_ACCOUNT_PANIC is used to signal local_cluster that an AccountsDB panic has occurred,
// as |cargo test| cannot observe panics in other threads
pub static ref FROZEN_ACCOUNT_PANIC: Arc<AtomicBool> = Arc::new(AtomicBool::new(false));
}
#[derive(Debug, Default)]
pub struct ErrorCounters {
pub total: usize,
pub account_in_use: usize,
pub account_loaded_twice: usize,
pub account_not_found: usize,
pub blockhash_not_found: usize,
pub blockhash_too_old: usize,
pub call_chain_too_deep: usize,
pub duplicate_signature: usize,
pub instruction_error: usize,
pub insufficient_funds: usize,
pub invalid_account_for_fee: usize,
pub invalid_account_index: usize,
pub invalid_program_for_execution: usize,
pub not_allowed_during_cluster_maintenance: usize,
}
#[derive(Default, Debug, PartialEq, Clone)]
pub struct AccountInfo {
/// index identifying the append storage
store_id: AppendVecId,
/// offset into the storage
offset: usize,
/// lamports in the account used when squashing kept for optimization
/// purposes to remove accounts with zero balance.
lamports: u64,
}
/// An offset into the AccountsDB::storage vector
pub type AppendVecId = usize;
pub type SnapshotStorage = Vec<Arc<AccountStorageEntry>>;
pub type SnapshotStorages = Vec<SnapshotStorage>;
// Each slot has a set of storage entries.
pub(crate) type SlotStores = HashMap<usize, Arc<AccountStorageEntry>>;
trait Versioned {
fn version(&self) -> u64;
}
impl Versioned for (u64, Hash) {
fn version(&self) -> u64 {
self.0
}
}
impl Versioned for (u64, AccountInfo) {
fn version(&self) -> u64 {
self.0
}
}
#[derive(Clone, Default, Debug)]
pub struct AccountStorage(pub HashMap<Slot, SlotStores>);
impl AccountStorage {
fn scan_accounts(&self, account_info: &AccountInfo, slot: Slot) -> Option<(Account, Slot)> {
self.0
.get(&slot)
.and_then(|storage_map| storage_map.get(&account_info.store_id))
.and_then(|store| {
Some(
store
.accounts
.get_account(account_info.offset)?
.0
.clone_account(),
)
})
.map(|account| (account, slot))
}
}
#[derive(Debug, Eq, PartialEq, Copy, Clone, Deserialize, Serialize, AbiExample, AbiEnumVisitor)]
pub enum AccountStorageStatus {
Available = 0,
Full = 1,
Candidate = 2,
}
impl Default for AccountStorageStatus {
fn default() -> Self {
Self::Available
}
}
#[derive(Debug)]
pub enum BankHashVerificationError {
MismatchedAccountHash,
MismatchedBankHash,
MissingBankHash,
}
/// Persistent storage structure holding the accounts
#[derive(Debug)]
pub struct AccountStorageEntry {
pub(crate) id: AppendVecId,
pub(crate) slot: Slot,
/// storage holding the accounts
pub(crate) accounts: AppendVec,
/// Keeps track of the number of accounts stored in a specific AppendVec.
/// This is periodically checked to reuse the stores that do not have
/// any accounts in it
/// status corresponding to the storage, lets us know that
/// the append_vec, once maxed out, then emptied, can be reclaimed
count_and_status: RwLock<(usize, AccountStorageStatus)>,
/// This is the total number of accounts stored ever since initialized to keep
/// track of lifetime count of all store operations. And this differs from
/// count_and_status in that this field won't be decremented.
///
/// This is used as a rough estimate for slot shrinking. As such a relaxed
/// use case, this value ARE NOT strictly synchronized with count_and_status!
approx_store_count: AtomicUsize,
}
impl AccountStorageEntry {
pub fn new(path: &Path, slot: Slot, id: usize, file_size: u64) -> Self {
let tail = AppendVec::new_relative_path(slot, id);
let path = Path::new(path).join(&tail);
let accounts = AppendVec::new(&path, true, file_size as usize);
Self {
id,
slot,
accounts,
count_and_status: RwLock::new((0, AccountStorageStatus::Available)),
approx_store_count: AtomicUsize::new(0),
}
}
pub(crate) fn new_empty_map(id: AppendVecId, accounts_current_len: usize) -> Self {
Self {
id,
slot: 0,
accounts: AppendVec::new_empty_map(accounts_current_len),
count_and_status: RwLock::new((0, AccountStorageStatus::Available)),
approx_store_count: AtomicUsize::new(0),
}
}
pub fn set_status(&self, mut status: AccountStorageStatus) {
let mut count_and_status = self.count_and_status.write().unwrap();
let count = count_and_status.0;
if status == AccountStorageStatus::Full && count == 0 {
// this case arises when the append_vec is full (store_ptrs fails),
// but all accounts have already been removed from the storage
//
// the only time it's safe to call reset() on an append_vec is when
// every account has been removed
// **and**
// the append_vec has previously been completely full
//
self.accounts.reset();
status = AccountStorageStatus::Available;
}
*count_and_status = (count, status);
}
pub fn status(&self) -> AccountStorageStatus {
self.count_and_status.read().unwrap().1
}
pub fn count(&self) -> usize {
self.count_and_status.read().unwrap().0
}
pub fn approx_stored_count(&self) -> usize {
self.approx_store_count.load(Ordering::Relaxed)
}
pub fn has_accounts(&self) -> bool {
self.count() > 0
}
pub fn slot(&self) -> Slot {
self.slot
}
pub fn append_vec_id(&self) -> AppendVecId {
self.id
}
pub fn flush(&self) -> Result<(), IOError> {
self.accounts.flush()
}
fn add_account(&self) {
let mut count_and_status = self.count_and_status.write().unwrap();
*count_and_status = (count_and_status.0 + 1, count_and_status.1);
self.approx_store_count.fetch_add(1, Ordering::Relaxed);
}
fn try_available(&self) -> bool {
let mut count_and_status = self.count_and_status.write().unwrap();
let (count, status) = *count_and_status;
if status == AccountStorageStatus::Available {
*count_and_status = (count, AccountStorageStatus::Candidate);
true
} else {
false
}
}
fn remove_account(&self) -> usize {
let mut count_and_status = self.count_and_status.write().unwrap();
let (mut count, mut status) = *count_and_status;
if count == 1 && status == AccountStorageStatus::Full {
// this case arises when we remove the last account from the
// storage, but we've learned from previous write attempts that
// the storage is full
//
// the only time it's safe to call reset() on an append_vec is when
// every account has been removed
// **and**
// the append_vec has previously been completely full
//
// otherwise, the storage may be in flight with a store()
// call
self.accounts.reset();
status = AccountStorageStatus::Available;
}
// Some code path is removing accounts too many; this may result in an
// unintended reveal of old state for unrelated accounts.
assert!(
count > 0,
"double remove of account in slot: {}/store: {}!!",
self.slot,
self.id
);
count -= 1;
*count_and_status = (count, status);
count
}
pub fn set_file<P: AsRef<Path>>(&mut self, path: P) -> IOResult<()> {
self.accounts.set_file(path)
}
pub fn get_relative_path(&self) -> Option<PathBuf> {
AppendVec::get_relative_path(self.accounts.get_path())
}
pub fn get_path(&self) -> PathBuf {
self.accounts.get_path()
}
}
pub fn get_temp_accounts_paths(count: u32) -> IOResult<(Vec<TempDir>, Vec<PathBuf>)> {
let temp_dirs: IOResult<Vec<TempDir>> = (0..count).map(|_| TempDir::new()).collect();
let temp_dirs = temp_dirs?;
let paths: Vec<PathBuf> = temp_dirs.iter().map(|t| t.path().to_path_buf()).collect();
Ok((temp_dirs, paths))
}
#[derive(Clone, Default, Debug, Serialize, Deserialize, PartialEq, AbiExample)]
pub struct BankHashStats {
pub num_updated_accounts: u64,
pub num_removed_accounts: u64,
pub num_lamports_stored: u64,
pub total_data_len: u64,
pub num_executable_accounts: u64,
}
impl BankHashStats {
pub fn update(&mut self, account: &Account) {
if account.lamports == 0 {
self.num_removed_accounts += 1;
} else {
self.num_updated_accounts += 1;
}
self.total_data_len = self.total_data_len.wrapping_add(account.data.len() as u64);
if account.executable {
self.num_executable_accounts += 1;
}
self.num_lamports_stored = self.num_lamports_stored.wrapping_add(account.lamports);
}
pub fn merge(&mut self, other: &BankHashStats) {
self.num_updated_accounts += other.num_updated_accounts;
self.num_removed_accounts += other.num_removed_accounts;
self.total_data_len = self.total_data_len.wrapping_add(other.total_data_len);
self.num_lamports_stored = self
.num_lamports_stored
.wrapping_add(other.num_lamports_stored);
self.num_executable_accounts += other.num_executable_accounts;
}
}
#[derive(Clone, Default, Debug, Serialize, Deserialize, PartialEq, AbiExample)]
pub struct BankHashInfo {
pub hash: Hash,
pub snapshot_hash: Hash,
pub stats: BankHashStats,
}
#[derive(Debug)]
struct FrozenAccountInfo {
pub hash: Hash, // Hash generated by hash_frozen_account_data()
pub lamports: u64, // Account balance cannot be lower than this amount
}
// This structure handles the load/store of the accounts
#[derive(Debug)]
pub struct AccountsDB {
/// Keeps tracks of index into AppendVec on a per slot basis
pub accounts_index: RwLock<AccountsIndex<AccountInfo>>,
pub storage: RwLock<AccountStorage>,
/// distribute the accounts across storage lists
pub next_id: AtomicUsize,
pub shrink_candidate_slots: Mutex<Vec<Slot>>,
pub(crate) write_version: AtomicU64,
/// Set of storage paths to pick from
pub(crate) paths: Vec<PathBuf>,
/// Directory of paths this accounts_db needs to hold/remove
temp_paths: Option<Vec<TempDir>>,
/// Starting file size of appendvecs
file_size: u64,
/// Accounts that will cause a panic! if data modified or lamports decrease
frozen_accounts: HashMap<Pubkey, FrozenAccountInfo>,
/// Thread pool used for par_iter
pub thread_pool: ThreadPool,
pub thread_pool_clean: ThreadPool,
/// Number of append vecs to create to maximize parallelism when scanning
/// the accounts
min_num_stores: usize,
pub bank_hashes: RwLock<HashMap<Slot, BankHashInfo>>,
dead_slots: RwLock<HashSet<Slot>>,
stats: AccountsStats,
}
#[derive(Debug, Default)]
struct AccountsStats {
delta_hash_scan_time_total_us: AtomicU64,
delta_hash_accumulate_time_total_us: AtomicU64,
delta_hash_merge_time_total_us: AtomicU64,
delta_hash_num: AtomicU64,
}
fn make_min_priority_thread_pool() -> ThreadPool {
// Use lower thread count to reduce priority.
let num_threads = std::cmp::max(2, num_cpus::get() / 4);
rayon::ThreadPoolBuilder::new()
.thread_name(|i| format!("solana-accounts-cleanup-{}", i))
.num_threads(num_threads)
.build()
.unwrap()
}
#[cfg(all(test, RUSTC_WITH_SPECIALIZATION))]
impl solana_sdk::abi_example::AbiExample for AccountsDB {
fn example() -> Self {
let accounts_db = AccountsDB::new_single();
let key = Pubkey::default();
let some_data_len = 5;
let some_slot: Slot = 0;
let account = Account::new(1, some_data_len, &key);
accounts_db.store(some_slot, &[(&key, &account)]);
accounts_db.add_root(0);
accounts_db
}
}
impl Default for AccountsDB {
fn default() -> Self {
let num_threads = get_thread_count();
let mut bank_hashes = HashMap::new();
bank_hashes.insert(0, BankHashInfo::default());
AccountsDB {
accounts_index: RwLock::new(AccountsIndex::default()),
storage: RwLock::new(AccountStorage(HashMap::new())),
next_id: AtomicUsize::new(0),
shrink_candidate_slots: Mutex::new(Vec::new()),
write_version: AtomicU64::new(0),
paths: vec![],
temp_paths: None,
file_size: DEFAULT_FILE_SIZE,
thread_pool: rayon::ThreadPoolBuilder::new()
.num_threads(num_threads)
.thread_name(|i| format!("solana-accounts-db-{}", i))
.build()
.unwrap(),
thread_pool_clean: make_min_priority_thread_pool(),
min_num_stores: num_threads,
bank_hashes: RwLock::new(bank_hashes),
frozen_accounts: HashMap::new(),
dead_slots: RwLock::new(HashSet::new()),
stats: AccountsStats::default(),
}
}
}
impl AccountsDB {
pub fn new(paths: Vec<PathBuf>) -> Self {
let new = if !paths.is_empty() {
Self {
paths,
temp_paths: None,
..Self::default()
}
} else {
// Create a temporary set of accounts directories, used primarily
// for testing
let (temp_dirs, paths) = get_temp_accounts_paths(DEFAULT_NUM_DIRS).unwrap();
Self {
paths,
temp_paths: Some(temp_dirs),
..Self::default()
}
};
{
for path in new.paths.iter() {
std::fs::create_dir_all(path).expect("Create directory failed.");
}
}
new
}
#[cfg(test)]
pub fn new_single() -> Self {
AccountsDB {
min_num_stores: 0,
..AccountsDB::new(Vec::new())
}
}
#[cfg(test)]
pub fn new_sized(paths: Vec<PathBuf>, file_size: u64) -> Self {
AccountsDB {
file_size,
..AccountsDB::new(paths)
}
}
fn new_storage_entry(&self, slot: Slot, path: &Path, size: u64) -> AccountStorageEntry {
AccountStorageEntry::new(
path,
slot,
self.next_id.fetch_add(1, Ordering::Relaxed),
size,
)
}
// Reclaim older states of rooted non-zero lamport accounts as a general
// AccountsDB bloat mitigation and preprocess for better zero-lamport purging.
fn clean_old_rooted_accounts(&self, purges_in_root: Vec<Pubkey>) {
// This number isn't carefully chosen; just guessed randomly such that
// the hot loop will be the order of ~Xms.
const INDEX_CLEAN_BULK_COUNT: usize = 4096;
let mut clean_rooted = Measure::start("clean_old_root-ms");
let reclaim_vecs =
purges_in_root
.par_chunks(INDEX_CLEAN_BULK_COUNT)
.map(|pubkeys: &[Pubkey]| {
let mut reclaims = Vec::new();
let accounts_index = self.accounts_index.read().unwrap();
for pubkey in pubkeys {
accounts_index.clean_rooted_entries(&pubkey, &mut reclaims);
}
reclaims
});
let reclaims: Vec<_> = reclaim_vecs.flatten().collect();
clean_rooted.stop();
inc_new_counter_info!("clean-old-root-par-clean-ms", clean_rooted.as_ms() as usize);
let mut measure = Measure::start("clean_old_root_reclaims");
self.handle_reclaims_maybe_cleanup(&reclaims);
measure.stop();
debug!("{} {}", clean_rooted, measure);
inc_new_counter_info!("clean-old-root-reclaim-ms", measure.as_ms() as usize);
}
fn do_reset_uncleaned_roots(&self, candidates: &mut MutexGuard<Vec<Slot>>) {
let previous_roots = self.accounts_index.write().unwrap().reset_uncleaned_roots();
candidates.extend(previous_roots);
}
#[cfg(test)]
fn reset_uncleaned_roots(&self) {
self.do_reset_uncleaned_roots(&mut self.shrink_candidate_slots.lock().unwrap());
}
fn calc_delete_dependencies(
purges: &HashMap<Pubkey, (SlotList<AccountInfo>, u64)>,
store_counts: &mut HashMap<AppendVecId, (usize, HashSet<Pubkey>)>,
) {
// Another pass to check if there are some filtered accounts which
// do not match the criteria of deleting all appendvecs which contain them
// then increment their storage count.
let mut already_counted = HashSet::new();
for (_pubkey, (account_infos, ref_count_from_storage)) in purges.iter() {
let no_delete = if account_infos.len() as u64 != *ref_count_from_storage {
true
} else {
let mut no_delete = false;
for (_slot, account_info) in account_infos {
if store_counts.get(&account_info.store_id).unwrap().0 != 0 {
no_delete = true;
break;
}
}
no_delete
};
if no_delete {
let mut pending_store_ids: HashSet<usize> = HashSet::new();
for (_slot_id, account_info) in account_infos {
if !already_counted.contains(&account_info.store_id) {
pending_store_ids.insert(account_info.store_id);
}
}
while !pending_store_ids.is_empty() {
let id = pending_store_ids.iter().next().cloned().unwrap();
pending_store_ids.remove(&id);
if already_counted.contains(&id) {
continue;
}
store_counts.get_mut(&id).unwrap().0 += 1;
already_counted.insert(id);
let affected_pubkeys = &store_counts.get(&id).unwrap().1;
for key in affected_pubkeys {
for (_slot, account_info) in &purges.get(&key).unwrap().0 {
if !already_counted.contains(&account_info.store_id) {
pending_store_ids.insert(account_info.store_id);
}
}
}
}
}
}
}
fn purge_keys_exact(
&self,
pubkey_to_slot_set: Vec<(Pubkey, HashSet<Slot>)>,
) -> (Vec<(u64, AccountInfo)>, Vec<Pubkey>) {
let mut reclaims = Vec::new();
let mut dead_keys = Vec::new();
let accounts_index = self.accounts_index.read().unwrap();
for (pubkey, slots_set) in pubkey_to_slot_set {
let (new_reclaims, is_empty) = accounts_index.purge_exact(&pubkey, slots_set);
if is_empty {
dead_keys.push(pubkey);
}
reclaims.extend(new_reclaims);
}
(reclaims, dead_keys)
}
// Purge zero lamport accounts and older rooted account states as garbage
// collection
// Only remove those accounts where the entire rooted history of the account
// can be purged because there are no live append vecs in the ancestors
pub fn clean_accounts(&self) {
// hold a lock to prevent slot shrinking from running because it might modify some rooted
// slot storages which can not happen as long as we're cleaning accounts because we're also
// modifying the rooted slot storages!
let mut candidates = self.shrink_candidate_slots.lock().unwrap();
self.report_store_stats();
let mut accounts_scan = Measure::start("accounts_scan");
let accounts_index = self.accounts_index.read().unwrap();
let pubkeys: Vec<Pubkey> = accounts_index.account_maps.keys().cloned().collect();
// parallel scan the index.
let (mut purges, purges_in_root) = pubkeys
.par_chunks(4096)
.map(|pubkeys: &[Pubkey]| {
let mut purges_in_root = Vec::new();
let mut purges = HashMap::new();
for pubkey in pubkeys {
if let Some((list, index)) = accounts_index.get(pubkey, None) {
let (slot, account_info) = &list[index];
if account_info.lamports == 0 {
purges.insert(*pubkey, accounts_index.would_purge(pubkey));
} else if accounts_index.uncleaned_roots.contains(slot) {
purges_in_root.push(*pubkey);
}
}
}
(purges, purges_in_root)
})
.reduce(
|| (HashMap::new(), Vec::new()),
|mut m1, m2| {
// Collapse down the hashmaps/vecs into one.
m1.0.extend(m2.0);
m1.1.extend(m2.1);
m1
},
);
drop(accounts_index);
accounts_scan.stop();
let mut clean_old_rooted = Measure::start("clean_old_roots");
if !purges_in_root.is_empty() {
self.clean_old_rooted_accounts(purges_in_root);
}
self.do_reset_uncleaned_roots(&mut candidates);
clean_old_rooted.stop();
let mut store_counts_time = Measure::start("store_counts");
// Calculate store counts as if everything was purged
// Then purge if we can
let mut store_counts: HashMap<AppendVecId, (usize, HashSet<Pubkey>)> = HashMap::new();
let storage = self.storage.read().unwrap();
for (key, (account_infos, _ref_count)) in &purges {
for (slot, account_info) in account_infos {
let slot_storage = storage.0.get(&slot).unwrap();
let store = slot_storage.get(&account_info.store_id).unwrap();
if let Some(store_count) = store_counts.get_mut(&account_info.store_id) {
store_count.0 -= 1;
store_count.1.insert(*key);
} else {
let mut key_set = HashSet::new();
key_set.insert(*key);
store_counts.insert(
account_info.store_id,
(store.count_and_status.read().unwrap().0 - 1, key_set),
);
}
}
}
store_counts_time.stop();
drop(storage);
let mut calc_deps_time = Measure::start("calc_deps");
Self::calc_delete_dependencies(&purges, &mut store_counts);
calc_deps_time.stop();
// Only keep purges where the entire history of the account in the root set
// can be purged. All AppendVecs for those updates are dead.
let mut purge_filter = Measure::start("purge_filter");
purges.retain(|_pubkey, (account_infos, _ref_count)| {
for (_slot, account_info) in account_infos.iter() {
if store_counts.get(&account_info.store_id).unwrap().0 != 0 {
return false;
}
}
true
});
purge_filter.stop();
let mut reclaims_time = Measure::start("reclaims");
// Recalculate reclaims with new purge set
let pubkey_to_slot_set: Vec<_> = purges
.into_iter()
.map(|(key, (slots_list, _ref_count))| {
(
key,
HashSet::from_iter(slots_list.into_iter().map(|(slot, _)| slot)),
)
})
.collect();
let (reclaims, dead_keys) = self.purge_keys_exact(pubkey_to_slot_set);
self.handle_dead_keys(dead_keys);
self.handle_reclaims_maybe_cleanup(&reclaims);
reclaims_time.stop();
datapoint_info!(
"clean_accounts",
("accounts_scan", accounts_scan.as_us() as i64, i64),
("store_counts", store_counts_time.as_us() as i64, i64),
("purge_filter", purge_filter.as_us() as i64, i64),
("calc_deps", calc_deps_time.as_us() as i64, i64),
("reclaims", reclaims_time.as_us() as i64, i64),
);
}
fn handle_dead_keys(&self, dead_keys: Vec<Pubkey>) {
if !dead_keys.is_empty() {
let mut accounts_index = self.accounts_index.write().unwrap();
for key in &dead_keys {
if let Some((_ref_count, list)) = accounts_index.account_maps.get(key) {
if list.read().unwrap().is_empty() {
accounts_index.account_maps.remove(key);
}
}
}
}
}
fn handle_reclaims_maybe_cleanup(&self, reclaims: SlotSlice<AccountInfo>) {
let mut dead_accounts = Measure::start("reclaims::remove_dead_accounts");
let dead_slots = self.remove_dead_accounts(reclaims);
dead_accounts.stop();
let dead_slots_len = {
let mut dead_slots_w = self.dead_slots.write().unwrap();
dead_slots_w.extend(dead_slots);
dead_slots_w.len()
};
if dead_slots_len > 5000 {
self.process_dead_slots(None);
}
}
// Atomically process reclaims and new dead_slots in this thread, guaranteeing
// complete data removal for slots in reclaims.
fn handle_reclaims_ensure_cleanup(&self, reclaims: SlotSlice<AccountInfo>) {
let mut dead_accounts = Measure::start("reclaims::remove_dead_accounts");
let dead_slots = self.remove_dead_accounts(reclaims);
dead_accounts.stop();
let mut dead_slots_w = self.dead_slots.write().unwrap();
dead_slots_w.extend(dead_slots);
self.process_dead_slots(Some(dead_slots_w));
}
pub fn process_dead_slots<'a>(
&'a self,
dead_slots_w: Option<RwLockWriteGuard<'a, HashSet<Slot>>>,
) {
let empty = HashSet::new();
let mut dead_slots_w = dead_slots_w.unwrap_or_else(|| self.dead_slots.write().unwrap());
let dead_slots = std::mem::replace(&mut *dead_slots_w, empty);
drop(dead_slots_w);
let mut clean_dead_slots = Measure::start("reclaims::purge_slots");
self.clean_dead_slots(&dead_slots);
clean_dead_slots.stop();
let mut purge_slots = Measure::start("reclaims::purge_slots");
self.purge_slots(&dead_slots);
purge_slots.stop();
debug!(
"process_dead_slots({}): {} {}",
dead_slots.len(),
clean_dead_slots,
purge_slots
);
}
fn do_shrink_stale_slot(&self, slot: Slot) -> usize {
self.do_shrink_slot(slot, false)
}
fn do_shrink_slot_forced(&self, slot: Slot) {
self.do_shrink_slot(slot, true);
}
fn shrink_stale_slot(&self, candidates: &mut MutexGuard<Vec<Slot>>) -> usize {
if let Some(slot) = self.do_next_shrink_slot(candidates) {
self.do_shrink_stale_slot(slot)
} else {
0
}
}
// Reads all accounts in given slot's AppendVecs and filter only to alive,
// then create a minimum AppendVec filled with the alive.
fn do_shrink_slot(&self, slot: Slot, forced: bool) -> usize {
trace!("shrink_stale_slot: slot: {}", slot);
let mut stored_accounts = vec![];
{
let storage = self.storage.read().unwrap();
if let Some(stores) = storage.0.get(&slot) {
let mut alive_count = 0;
let mut stored_count = 0;
for store in stores.values() {
alive_count += store.count();
stored_count += store.approx_stored_count();
}
if (alive_count as f32 / stored_count as f32) >= 0.80 && !forced {
trace!(
"shrink_stale_slot: not enough space to shrink: {} / {}",
alive_count,
stored_count,
);
return 0;
}
for store in stores.values() {
let mut start = 0;
while let Some((account, next)) = store.accounts.get_account(start) {
stored_accounts.push((
account.meta.pubkey,
account.clone_account(),
*account.hash,
next - start,
(store.id, account.offset),
account.meta.write_version,
));
start = next;
}
}
}
}
let alive_accounts: Vec<_> = {
let accounts_index = self.accounts_index.read().unwrap();
stored_accounts
.iter()
.filter(
|(
pubkey,
_account,
_account_hash,
_storage_size,
(store_id, offset),
_write_version,
)| {
if let Some((list, _)) = accounts_index.get(pubkey, None) {
list.iter()
.any(|(_slot, i)| i.store_id == *store_id && i.offset == *offset)
} else {
false
}
},
)
.collect()
};
let alive_total: u64 = alive_accounts
.iter()
.map(
|(_pubkey, _account, _account_hash, account_size, _location, _write_verion)| {
*account_size as u64
},
)
.sum();
let aligned_total: u64 = (alive_total + (PAGE_SIZE - 1)) & !(PAGE_SIZE - 1);
debug!(
"shrinking: slot: {}, stored_accounts: {} => alive_accounts: {} ({} bytes; aligned to: {})",
slot,
stored_accounts.len(),
alive_accounts.len(),
alive_total,
aligned_total
);
if aligned_total > 0 {
let mut accounts = Vec::with_capacity(alive_accounts.len());
let mut hashes = Vec::with_capacity(alive_accounts.len());
let mut write_versions = Vec::with_capacity(alive_accounts.len());
for (pubkey, account, account_hash, _size, _location, write_version) in &alive_accounts
{
accounts.push((pubkey, account));
hashes.push(*account_hash);
write_versions.push(*write_version);
}
let shrunken_store = self.create_and_insert_store(slot, aligned_total);
// here, we're writing back alive_accounts. That should be an atomic operation
// without use of rather wide locks in this whole function, because we're
// mutating rooted slots; There should be no writers to them.
let infos = self.store_accounts_to(
slot,
&accounts,
&hashes,
|_| shrunken_store.clone(),
write_versions.into_iter(),
);
let reclaims = self.update_index(slot, infos, &accounts);
self.handle_reclaims_maybe_cleanup(&reclaims);
let mut storage = self.storage.write().unwrap();
if let Some(slot_storage) = storage.0.get_mut(&slot) {
slot_storage.retain(|_key, store| store.count() > 0);
}
}
alive_accounts.len()
}
// Infinitely returns rooted roots in cyclic order
fn do_next_shrink_slot(&self, candidates: &mut MutexGuard<Vec<Slot>>) -> Option<Slot> {
// At this point, a lock (= candidates) is ensured to be held to keep
// do_reset_uncleaned_roots() (in clean_accounts()) from updating candidates.
// Also, candidates in the lock may be swapped here if it's empty.
let next = candidates.pop();
if next.is_some() {
next
} else {
let mut new_all_slots = self.all_root_slots_in_index();
let next = new_all_slots.pop();
// refresh candidates for later calls!
**candidates = new_all_slots;
next
}
}
#[cfg(test)]
fn next_shrink_slot(&self) -> Option<Slot> {
let mut candidates = self.shrink_candidate_slots.lock().unwrap();
self.do_next_shrink_slot(&mut candidates)
}
fn all_root_slots_in_index(&self) -> Vec<Slot> {
let index = self.accounts_index.read().unwrap();
index.roots.iter().cloned().collect()
}
fn all_slots_in_storage(&self) -> Vec<Slot> {
let storage = self.storage.read().unwrap();
storage.0.keys().cloned().collect()
}
pub fn process_stale_slot(&self) -> usize {
let mut measure = Measure::start("stale_slot_shrink-ms");
let candidates = self.shrink_candidate_slots.try_lock();
if candidates.is_err() {
// skip and return immediately if locked by clean_accounts()
// the calling background thread will just retry later.
return 0;
}
// hold this lock as long as this shrinking process is running to avoid conflicts
// with clean_accounts().
let mut candidates = candidates.unwrap();
let count = self.shrink_stale_slot(&mut candidates);
measure.stop();
inc_new_counter_info!("stale_slot_shrink-ms", measure.as_ms() as usize);
count
}
#[cfg(test)]
fn shrink_all_stale_slots(&self) {
for slot in self.all_slots_in_storage() {
self.do_shrink_stale_slot(slot);
}
}
pub fn shrink_all_slots(&self) {
for slot in self.all_slots_in_storage() {
self.do_shrink_slot_forced(slot);
}
}
pub fn scan_accounts<F, A>(&self, ancestors: &Ancestors, scan_func: F) -> A
where
F: Fn(&mut A, Option<(&Pubkey, Account, Slot)>),
A: Default,
{
let mut collector = A::default();
let accounts_index = self.accounts_index.read().unwrap();
let storage = self.storage.read().unwrap();
accounts_index.scan_accounts(ancestors, |pubkey, (account_info, slot)| {
scan_func(
&mut collector,
storage
.scan_accounts(account_info, slot)
.map(|(account, slot)| (pubkey, account, slot)),
)
});
collector
}
pub fn range_scan_accounts<F, A, R>(&self, ancestors: &Ancestors, range: R, scan_func: F) -> A
where
F: Fn(&mut A, Option<(&Pubkey, Account, Slot)>),
A: Default,
R: RangeBounds<Pubkey>,
{
let mut collector = A::default();
let accounts_index = self.accounts_index.read().unwrap();
let storage = self.storage.read().unwrap();
accounts_index.range_scan_accounts(ancestors, range, |pubkey, (account_info, slot)| {
scan_func(
&mut collector,
storage
.scan_accounts(account_info, slot)
.map(|(account, slot)| (pubkey, account, slot)),
)
});
collector
}
/// Scan a specific slot through all the account storage in parallel with sequential read
// PERF: Sequentially read each storage entry in parallel
pub fn scan_account_storage<F, B>(&self, slot: Slot, scan_func: F) -> Vec<B>
where
F: Fn(&StoredAccount, AppendVecId, &mut B) + Send + Sync,
B: Send + Default,
{
self.scan_account_storage_inner(slot, scan_func, &self.storage.read().unwrap())
}
// The input storage must come from self.storage.read().unwrap()
fn scan_account_storage_inner<F, B>(
&self,
slot: Slot,
scan_func: F,
storage: &RwLockReadGuard<AccountStorage>,
) -> Vec<B>
where
F: Fn(&StoredAccount, AppendVecId, &mut B) + Send + Sync,
B: Send + Default,
{
let storage_maps: Vec<Arc<AccountStorageEntry>> = storage
.0
.get(&slot)
.unwrap_or(&HashMap::new())
.values()
.cloned()
.collect();
self.thread_pool.install(|| {
storage_maps
.into_par_iter()
.map(|storage| {
let accounts = storage.accounts.accounts(0);
let mut retval = B::default();
accounts.iter().for_each(|stored_account| {
scan_func(stored_account, storage.id, &mut retval)
});
retval
})
.collect()
})
}
pub fn set_hash(&self, slot: Slot, parent_slot: Slot) {
let mut bank_hashes = self.bank_hashes.write().unwrap();
if bank_hashes.get(&slot).is_some() {
error!(
"set_hash: already exists; multiple forks with shared slot {} as child (parent: {})!?",
slot, parent_slot,
);
return;
}
let new_hash_info = BankHashInfo {
hash: Hash::default(),
snapshot_hash: Hash::default(),
stats: BankHashStats::default(),
};
bank_hashes.insert(slot, new_hash_info);
}
pub fn load(
storage: &AccountStorage,
ancestors: &Ancestors,
accounts_index: &AccountsIndex<AccountInfo>,
pubkey: &Pubkey,
) -> Option<(Account, Slot)> {
let (lock, index) = accounts_index.get(pubkey, Some(ancestors))?;
let slot = lock[index].0;
//TODO: thread this as a ref
if let Some(slot_storage) = storage.0.get(&slot) {
let info = &lock[index].1;
slot_storage
.get(&info.store_id)
.and_then(|store| Some(store.accounts.get_account(info.offset)?.0.clone_account()))
.map(|account| (account, slot))
} else {
None
}
}
#[cfg(test)]
fn load_account_hash(&self, ancestors: &Ancestors, pubkey: &Pubkey) -> Hash {
let accounts_index = self.accounts_index.read().unwrap();
let (lock, index) = accounts_index.get(pubkey, Some(ancestors)).unwrap();
let slot = lock[index].0;
let storage = self.storage.read().unwrap();
let slot_storage = storage.0.get(&slot).unwrap();
let info = &lock[index].1;
let entry = slot_storage.get(&info.store_id).unwrap();
let account = entry.accounts.get_account(info.offset);
*account.as_ref().unwrap().0.hash
}
pub fn load_slow(&self, ancestors: &Ancestors, pubkey: &Pubkey) -> Option<(Account, Slot)> {
let accounts_index = self.accounts_index.read().unwrap();
let storage = self.storage.read().unwrap();
Self::load(&storage, ancestors, &accounts_index, pubkey)
}
fn find_storage_candidate(&self, slot: Slot) -> Arc<AccountStorageEntry> {
let mut create_extra = false;
let stores = self.storage.read().unwrap();
if let Some(slot_stores) = stores.0.get(&slot) {
if !slot_stores.is_empty() {
if slot_stores.len() <= self.min_num_stores {
let mut total_accounts = 0;
for store in slot_stores.values() {
total_accounts += store.count_and_status.read().unwrap().0;
}
// Create more stores so that when scanning the storage all CPUs have work
if (total_accounts / 16) >= slot_stores.len() {
create_extra = true;
}
}
// pick an available store at random by iterating from a random point
let to_skip = thread_rng().gen_range(0, slot_stores.len());
for (i, store) in slot_stores.values().cycle().skip(to_skip).enumerate() {
if store.try_available() {
let ret = store.clone();
drop(stores);
if create_extra {
self.create_and_insert_store(slot, self.file_size);
}
return ret;
}
// looked at every store, bail...
if i == slot_stores.len() {
break;
}
}
}
}
drop(stores);
let store = self.create_and_insert_store(slot, self.file_size);
store.try_available();
store
}
fn create_and_insert_store(&self, slot: Slot, size: u64) -> Arc<AccountStorageEntry> {
let path_index = thread_rng().gen_range(0, self.paths.len());
let store =
Arc::new(self.new_storage_entry(slot, &Path::new(&self.paths[path_index]), size));
let store_for_index = store.clone();
let mut stores = self.storage.write().unwrap();
let slot_storage = stores.0.entry(slot).or_insert_with(HashMap::new);
slot_storage.insert(store.id, store_for_index);
store
}
pub fn purge_slot(&self, slot: Slot) {
let mut slots = HashSet::new();
slots.insert(slot);
self.purge_slots(&slots);
}
pub fn purge_slots(&self, slots: &HashSet<Slot>) {
//add_root should be called first
let accounts_index = self.accounts_index.read().unwrap();
let non_roots: Vec<_> = slots
.iter()
.filter(|slot| !accounts_index.is_root(**slot))
.collect();
drop(accounts_index);
let mut storage = self.storage.write().unwrap();
for slot in non_roots {
storage.0.remove(&slot);
}
}
pub fn remove_unrooted_slot(&self, remove_slot: Slot) {
if self.accounts_index.read().unwrap().is_root(remove_slot) {
panic!("Trying to remove accounts for rooted slot {}", remove_slot);
}
let pubkey_sets: Vec<HashSet<Pubkey>> = self.scan_account_storage(
remove_slot,
|stored_account: &StoredAccount, _, accum: &mut HashSet<Pubkey>| {
accum.insert(stored_account.meta.pubkey);
},
);
// Purge this slot from the accounts index
let mut reclaims = vec![];
{
let pubkeys = pubkey_sets.iter().flatten();
let accounts_index = self.accounts_index.read().unwrap();
for pubkey in pubkeys {
accounts_index.clean_unrooted_entries_by_slot(remove_slot, pubkey, &mut reclaims);
}
}
// 1) Remove old bank hash from self.bank_hashes
// 2) Purge this slot's storage entries from self.storage
self.handle_reclaims_ensure_cleanup(&reclaims);
assert!(self.storage.read().unwrap().0.get(&remove_slot).is_none());
}
pub fn hash_stored_account(slot: Slot, account: &StoredAccount) -> Hash {
Self::hash_account_data(
slot,
account.account_meta.lamports,
&account.account_meta.owner,
account.account_meta.executable,
account.account_meta.rent_epoch,
account.data,
&account.meta.pubkey,
)
}
pub fn hash_account(slot: Slot, account: &Account, pubkey: &Pubkey) -> Hash {
Self::hash_account_data(
slot,
account.lamports,
&account.owner,
account.executable,
account.rent_epoch,
&account.data,
pubkey,
)
}
fn hash_frozen_account_data(account: &Account) -> Hash {
let mut hasher = Hasher::default();
hasher.hash(&account.data);
hasher.hash(&account.owner.as_ref());
if account.executable {
hasher.hash(&[1u8; 1]);
} else {
hasher.hash(&[0u8; 1]);
}
hasher.result()
}
pub fn hash_account_data(
slot: Slot,
lamports: u64,
owner: &Pubkey,
executable: bool,
rent_epoch: Epoch,
data: &[u8],
pubkey: &Pubkey,
) -> Hash {
if lamports == 0 {
return Hash::default();
}
let mut hasher = Hasher::default();
let mut buf = [0u8; 8];
LittleEndian::write_u64(&mut buf[..], lamports);
hasher.hash(&buf);
LittleEndian::write_u64(&mut buf[..], slot);
hasher.hash(&buf);
LittleEndian::write_u64(&mut buf[..], rent_epoch);
hasher.hash(&buf);
hasher.hash(&data);
if executable {
hasher.hash(&[1u8; 1]);
} else {
hasher.hash(&[0u8; 1]);
}
hasher.hash(&owner.as_ref());
hasher.hash(&pubkey.as_ref());
hasher.result()
}
fn bulk_assign_write_version(&self, count: usize) -> u64 {
self.write_version
.fetch_add(count as u64, Ordering::Relaxed)
}
fn store_accounts(
&self,
slot: Slot,
accounts: &[(&Pubkey, &Account)],
hashes: &[Hash],
) -> Vec<AccountInfo> {
let mut current_version = self.bulk_assign_write_version(accounts.len());
let write_version_producer = std::iter::from_fn(move || {
let ret = current_version;
current_version += 1;
Some(ret)
});
let storage_finder = |slot| self.find_storage_candidate(slot);
self.store_accounts_to(
slot,
accounts,
hashes,
storage_finder,
write_version_producer,
)
}
fn store_accounts_to<F: FnMut(Slot) -> Arc<AccountStorageEntry>, P: Iterator<Item = u64>>(
&self,
slot: Slot,
accounts: &[(&Pubkey, &Account)],
hashes: &[Hash],
mut storage_finder: F,
mut write_version_producer: P,
) -> Vec<AccountInfo> {
let default_account = Account::default();
let with_meta: Vec<(StoredMeta, &Account)> = accounts
.iter()
.map(|(pubkey, account)| {
let account = if account.lamports == 0 {
&default_account
} else {
*account
};
let data_len = account.data.len() as u64;
let meta = StoredMeta {
write_version: write_version_producer.next().unwrap(),
pubkey: **pubkey,
data_len,
};
(meta, account)
})
.collect();
let mut infos: Vec<AccountInfo> = Vec::with_capacity(with_meta.len());
while infos.len() < with_meta.len() {
let storage = storage_finder(slot);
let rvs = storage
.accounts
.append_accounts(&with_meta[infos.len()..], &hashes[infos.len()..]);
if rvs.is_empty() {
storage.set_status(AccountStorageStatus::Full);
// See if an account overflows the default append vec size.
let data_len = (with_meta[infos.len()].1.data.len() + 4096) as u64;
if data_len > self.file_size {
self.create_and_insert_store(slot, data_len * 2);
}
continue;
}
for (offset, (_, account)) in rvs.iter().zip(&with_meta[infos.len()..]) {
storage.add_account();
infos.push(AccountInfo {
store_id: storage.id,
offset: *offset,
lamports: account.lamports,
});
}
// restore the state to available
storage.set_status(AccountStorageStatus::Available);
}
infos
}
fn report_store_stats(&self) {
let mut total_count = 0;
let mut min = std::usize::MAX;
let mut min_slot = 0;
let mut max = 0;
let mut max_slot = 0;
let mut newest_slot = 0;
let mut oldest_slot = std::u64::MAX;
let stores = self.storage.read().unwrap();
for (slot, slot_stores) in &stores.0 {
total_count += slot_stores.len();
if slot_stores.len() < min {
min = slot_stores.len();
min_slot = *slot;
}
if slot_stores.len() > max {
max = slot_stores.len();
max_slot = *slot;
}
if *slot > newest_slot {
newest_slot = *slot;
}
if *slot < oldest_slot {
oldest_slot = *slot;
}
}
drop(stores);
info!("total_stores: {}, newest_slot: {}, oldest_slot: {}, max_slot: {} (num={}), min_slot: {} (num={})",
total_count, newest_slot, oldest_slot, max_slot, max, min_slot, min);
datapoint_info!("accounts_db-stores", ("total_count", total_count, i64));
datapoint_info!(
"accounts_db-perf-stats",
(
"delta_hash_num",
self.stats.delta_hash_num.swap(0, Ordering::Relaxed),
i64
),
(
"delta_hash_scan_us",
self.stats
.delta_hash_scan_time_total_us
.swap(0, Ordering::Relaxed),
i64
),
(
"delta_hash_merge_us",
self.stats
.delta_hash_merge_time_total_us
.swap(0, Ordering::Relaxed),
i64
),
(
"delta_hash_accumulate_us",
self.stats
.delta_hash_accumulate_time_total_us
.swap(0, Ordering::Relaxed),
i64
),
);
}
pub fn compute_merkle_root(hashes: Vec<(Pubkey, Hash)>, fanout: usize) -> Hash {
let hashes: Vec<_> = hashes.into_iter().map(|(_pubkey, hash)| hash).collect();
let mut hashes: Vec<_> = hashes.chunks(fanout).map(|x| x.to_vec()).collect();
while hashes.len() > 1 {
let mut time = Measure::start("time");
let new_hashes: Vec<Hash> = hashes
.par_iter()
.map(|h| {
let mut hasher = Hasher::default();
for v in h.iter() {
hasher.hash(v.as_ref());
}
hasher.result()
})
.collect();
time.stop();
debug!("hashing {} {}", hashes.len(), time);
hashes = new_hashes.chunks(fanout).map(|x| x.to_vec()).collect();
}
let mut hasher = Hasher::default();
hashes.into_iter().flatten().for_each(|hash| {
hasher.hash(hash.as_ref());
});
hasher.result()
}
fn accumulate_account_hashes(mut hashes: Vec<(Pubkey, Hash)>) -> Hash {
let mut sort = Measure::start("sort");
hashes.par_sort_by(|a, b| a.0.cmp(&b.0));
sort.stop();
let mut hash_time = Measure::start("hash");
let fanout = 16;
let res = Self::compute_merkle_root(hashes, fanout);
hash_time.stop();
debug!("{} {}", sort, hash_time);
res
}
fn calculate_accounts_hash(
&self,
ancestors: &Ancestors,
check_hash: bool,
) -> Result<Hash, BankHashVerificationError> {
use BankHashVerificationError::*;
let mut scan = Measure::start("scan");
let accounts_index = self.accounts_index.read().unwrap();
let storage = self.storage.read().unwrap();
let keys: Vec<_> = accounts_index.account_maps.keys().collect();
let mismatch_found = AtomicU64::new(0);
let hashes: Vec<_> = keys
.par_iter()
.filter_map(|pubkey| {
if let Some((list, index)) = accounts_index.get(pubkey, Some(ancestors)) {
let (slot, account_info) = &list[index];
if account_info.lamports != 0 {
storage
.0
.get(&slot)
.and_then(|storage_map| storage_map.get(&account_info.store_id))
.and_then(|store| {
let account = store.accounts.get_account(account_info.offset)?.0;
if check_hash {
let hash = Self::hash_stored_account(*slot, &account);
if hash != *account.hash {
mismatch_found.fetch_add(1, Ordering::Relaxed);
return None;
}
}
Some((**pubkey, *account.hash))
})
} else {
None
}
} else {
None
}
})
.collect();
if mismatch_found.load(Ordering::Relaxed) > 0 {
warn!(
"{} mismatched account hash(es) found",
mismatch_found.load(Ordering::Relaxed)
);
return Err(MismatchedAccountHash);
}
scan.stop();
let hash_total = hashes.len();
let mut accumulate = Measure::start("accumulate");
let accumulated_hash = Self::accumulate_account_hashes(hashes);
accumulate.stop();
datapoint_info!(
"update_accounts_hash",
("accounts_scan", scan.as_us(), i64),
("hash_accumulate", accumulate.as_us(), i64),
("hash_total", hash_total, i64),
);
Ok(accumulated_hash)
}
pub fn get_accounts_hash(&self, slot: Slot) -> Hash {
let bank_hashes = self.bank_hashes.read().unwrap();
let bank_hash_info = bank_hashes.get(&slot).unwrap();
bank_hash_info.snapshot_hash
}
pub fn update_accounts_hash(&self, slot: Slot, ancestors: &Ancestors) -> Hash {
let hash = self.calculate_accounts_hash(ancestors, false).unwrap();
let mut bank_hashes = self.bank_hashes.write().unwrap();
let mut bank_hash_info = bank_hashes.get_mut(&slot).unwrap();
bank_hash_info.snapshot_hash = hash;
hash
}
pub fn verify_bank_hash(
&self,
slot: Slot,
ancestors: &Ancestors,
) -> Result<(), BankHashVerificationError> {
use BankHashVerificationError::*;
let calculated_hash = self.calculate_accounts_hash(ancestors, true)?;
let bank_hashes = self.bank_hashes.read().unwrap();
if let Some(found_hash_info) = bank_hashes.get(&slot) {
if calculated_hash == found_hash_info.snapshot_hash {
Ok(())
} else {
warn!(
"mismatched bank hash for slot {}: {} (calculated) != {} (expected)",
slot, calculated_hash, found_hash_info.snapshot_hash
);
Err(MismatchedBankHash)
}
} else {
Err(MissingBankHash)
}
}
pub fn get_accounts_delta_hash(&self, slot: Slot) -> Hash {
let mut scan = Measure::start("scan");
let mut accumulator: Vec<HashMap<Pubkey, (u64, Hash)>> = self.scan_account_storage(
slot,
|stored_account: &StoredAccount,
_store_id: AppendVecId,
accum: &mut HashMap<Pubkey, (u64, Hash)>| {
accum.insert(
stored_account.meta.pubkey,
(stored_account.meta.write_version, *stored_account.hash),
);
},
);
scan.stop();
let mut merge = Measure::start("merge");
let mut account_maps = accumulator.pop().unwrap();
while let Some(maps) = accumulator.pop() {
AccountsDB::merge(&mut account_maps, &maps);
}
merge.stop();
let mut accumulate = Measure::start("accumulate");
let hashes: Vec<_> = account_maps
.into_iter()
.map(|(pubkey, (_, hash))| (pubkey, hash))
.collect();
let ret = Self::accumulate_account_hashes(hashes);
accumulate.stop();
self.stats
.delta_hash_scan_time_total_us
.fetch_add(scan.as_us(), Ordering::Relaxed);
self.stats
.delta_hash_merge_time_total_us
.fetch_add(merge.as_us(), Ordering::Relaxed);
self.stats
.delta_hash_accumulate_time_total_us
.fetch_add(accumulate.as_us(), Ordering::Relaxed);
self.stats.delta_hash_num.fetch_add(1, Ordering::Relaxed);
ret
}
fn update_index(
&self,
slot: Slot,
infos: Vec<AccountInfo>,
accounts: &[(&Pubkey, &Account)],
) -> SlotList<AccountInfo> {
let mut reclaims = SlotList::<AccountInfo>::with_capacity(infos.len() * 2);
let index = self.accounts_index.read().unwrap();
let mut update_index_work = Measure::start("update_index_work");
let inserts: Vec<_> = infos
.into_iter()
.zip(accounts.iter())
.filter_map(|(info, pubkey_account)| {
let pubkey = pubkey_account.0;
index
.update(slot, pubkey, info, &mut reclaims)
.map(|info| (pubkey, info))
})
.collect();
drop(index);
if !inserts.is_empty() {
let mut index = self.accounts_index.write().unwrap();
for (pubkey, info) in inserts {
index.insert(slot, pubkey, info, &mut reclaims);
}
}
update_index_work.stop();
reclaims
}
fn remove_dead_accounts(&self, reclaims: SlotSlice<AccountInfo>) -> HashSet<Slot> {
let storage = self.storage.read().unwrap();
let mut dead_slots = HashSet::new();
for (slot, account_info) in reclaims {
if let Some(slot_storage) = storage.0.get(slot) {
if let Some(store) = slot_storage.get(&account_info.store_id) {
assert_eq!(
*slot, store.slot,
"AccountDB::accounts_index corrupted. Storage should only point to one slot"
);
let count = store.remove_account();
if count == 0 {
dead_slots.insert(*slot);
}
}
}
}
dead_slots.retain(|slot| {
if let Some(slot_storage) = storage.0.get(&slot) {
for x in slot_storage.values() {
if x.count() != 0 {
return false;
}
}
}
true
});
dead_slots
}
pub fn clean_dead_slots(&self, dead_slots: &HashSet<Slot>) {
if !dead_slots.is_empty() {
{
let mut measure = Measure::start("clean_dead_slots-ms");
let storage = self.storage.read().unwrap();
let mut stores: Vec<Arc<AccountStorageEntry>> = vec![];
for slot in dead_slots.iter() {
if let Some(slot_storage) = storage.0.get(slot) {
for store in slot_storage.values() {
stores.push(store.clone());
}
}
}
drop(storage);
datapoint_debug!("clean_dead_slots", ("stores", stores.len(), i64));
let pubkeys: Vec<Vec<Pubkey>> = {
self.thread_pool_clean.install(|| {
stores
.into_par_iter()
.map(|store| {
let accounts = store.accounts.accounts(0);
accounts
.into_iter()
.map(|account| account.meta.pubkey)
.collect::<Vec<Pubkey>>()
})
.collect()
})
};
let index = self.accounts_index.read().unwrap();
for pubkey_v in pubkeys {
for pubkey in pubkey_v {
index.unref_from_storage(&pubkey);
}
}
drop(index);
measure.stop();
inc_new_counter_info!("clean_dead_slots-unref-ms", measure.as_ms() as usize);
let mut index = self.accounts_index.write().unwrap();
for slot in dead_slots.iter() {
index.clean_dead_slot(*slot);
}
}
{
let mut bank_hashes = self.bank_hashes.write().unwrap();
for slot in dead_slots.iter() {
bank_hashes.remove(slot);
}
}
}
}
fn hash_accounts(&self, slot: Slot, accounts: &[(&Pubkey, &Account)]) -> Vec<Hash> {
let mut stats = BankHashStats::default();
let hashes: Vec<_> = accounts
.iter()
.map(|(pubkey, account)| {
stats.update(account);
Self::hash_account(slot, account, pubkey)
})
.collect();
let mut bank_hashes = self.bank_hashes.write().unwrap();
let slot_info = bank_hashes
.entry(slot)
.or_insert_with(BankHashInfo::default);
slot_info.stats.merge(&stats);
hashes
}
pub(crate) fn freeze_accounts(&mut self, ancestors: &Ancestors, account_pubkeys: &[Pubkey]) {
for account_pubkey in account_pubkeys {
if let Some((account, _slot)) = self.load_slow(ancestors, &account_pubkey) {
let frozen_account_info = FrozenAccountInfo {
hash: Self::hash_frozen_account_data(&account),
lamports: account.lamports,
};
warn!(
"Account {} is now frozen at lamports={}, hash={}",
account_pubkey, frozen_account_info.lamports, frozen_account_info.hash
);
self.frozen_accounts
.insert(*account_pubkey, frozen_account_info);
} else {
panic!(
"Unable to freeze an account that does not exist: {}",
account_pubkey
);
}
}
}
/// Cause a panic if frozen accounts would be affected by data in `accounts`
fn assert_frozen_accounts(&self, accounts: &[(&Pubkey, &Account)]) {
if self.frozen_accounts.is_empty() {
return;
}
for (account_pubkey, account) in accounts.iter() {
if let Some(frozen_account_info) = self.frozen_accounts.get(*account_pubkey) {
if account.lamports < frozen_account_info.lamports {
FROZEN_ACCOUNT_PANIC.store(true, Ordering::Relaxed);
panic!(
"Frozen account {} modified. Lamports decreased from {} to {}",
account_pubkey, frozen_account_info.lamports, account.lamports,
)
}
let hash = Self::hash_frozen_account_data(&account);
if hash != frozen_account_info.hash {
FROZEN_ACCOUNT_PANIC.store(true, Ordering::Relaxed);
panic!(
"Frozen account {} modified. Hash changed from {} to {}",
account_pubkey, frozen_account_info.hash, hash,
)
}
}
}
}
/// Store the account update.
pub fn store(&self, slot: Slot, accounts: &[(&Pubkey, &Account)]) {
self.assert_frozen_accounts(accounts);
let hashes = self.hash_accounts(slot, accounts);
self.store_with_hashes(slot, accounts, &hashes);
}
fn store_with_hashes(&self, slot: Slot, accounts: &[(&Pubkey, &Account)], hashes: &[Hash]) {
let mut store_accounts = Measure::start("store::store_accounts");
let infos = self.store_accounts(slot, accounts, hashes);
store_accounts.stop();
let mut update_index = Measure::start("store::update_index");
let reclaims = self.update_index(slot, infos, accounts);
update_index.stop();
trace!("reclaim: {}", reclaims.len());
self.handle_reclaims_maybe_cleanup(&reclaims);
}
pub fn add_root(&self, slot: Slot) {
self.accounts_index.write().unwrap().add_root(slot)
}
pub fn get_snapshot_storages(&self, snapshot_slot: Slot) -> SnapshotStorages {
let accounts_index = self.accounts_index.read().unwrap();
let r_storage = self.storage.read().unwrap();
r_storage
.0
.iter()
.filter(|(slot, _slot_stores)| {
**slot <= snapshot_slot && accounts_index.is_root(**slot)
})
.map(|(_slot, slot_stores)| {
slot_stores
.values()
.filter(|x| x.has_accounts())
.cloned()
.collect()
})
.filter(|snapshot_storage: &SnapshotStorage| !snapshot_storage.is_empty())
.collect()
}
fn merge<X>(dest: &mut HashMap<Pubkey, X>, source: &HashMap<Pubkey, X>)
where
X: Versioned + Clone,
{
for (key, source_item) in source.iter() {
if let Some(dest_item) = dest.get(key) {
if dest_item.version() > source_item.version() {
continue;
}
}
dest.insert(*key, source_item.clone());
}
}
pub fn generate_index(&self) {
let mut accounts_index = self.accounts_index.write().unwrap();
let storage = self.storage.read().unwrap();
let mut slots: Vec<Slot> = storage.0.keys().cloned().collect();
slots.sort();
let mut last_log_update = Instant::now();
for (index, slot) in slots.iter().enumerate() {
let now = Instant::now();
if now.duration_since(last_log_update).as_secs() >= 10 {
info!("generating index: {}/{} slots...", index, slots.len());
last_log_update = now;
}
let accumulator: Vec<HashMap<Pubkey, Vec<(u64, AccountInfo)>>> = self
.scan_account_storage_inner(
*slot,
|stored_account: &StoredAccount,
store_id: AppendVecId,
accum: &mut HashMap<Pubkey, Vec<(u64, AccountInfo)>>| {
let account_info = AccountInfo {
store_id,
offset: stored_account.offset,
lamports: stored_account.account_meta.lamports,
};
let entry = accum
.entry(stored_account.meta.pubkey)
.or_insert_with(Vec::new);
entry.push((stored_account.meta.write_version, account_info));
},
&storage,
);
let mut accounts_map: HashMap<Pubkey, Vec<(u64, AccountInfo)>> = HashMap::new();
for accumulator_entry in accumulator.iter() {
for (pubkey, storage_entry) in accumulator_entry {
let entry = accounts_map.entry(*pubkey).or_insert_with(Vec::new);
entry.extend(storage_entry.iter().cloned());
}
}
// Need to restore indexes even with older write versions which may
// be shielding other accounts. When they are then purged, the
// original non-shielded account value will be visible when the account
// is restored from the append-vec
if !accumulator.is_empty() {
let mut _reclaims: Vec<(u64, AccountInfo)> = vec![];
for (pubkey, account_infos) in accounts_map.iter_mut() {
account_infos.sort_by(|a, b| a.0.cmp(&b.0));
for (_, account_info) in account_infos {
accounts_index.insert(*slot, pubkey, account_info.clone(), &mut _reclaims);
}
}
}
}
// Need to add these last, otherwise older updates will be cleaned
for slot in slots {
accounts_index.add_root(slot);
}
let mut counts = HashMap::new();
for slot_list in accounts_index.account_maps.values() {
for (_slot, account_entry) in slot_list.1.read().unwrap().iter() {
*counts.entry(account_entry.store_id).or_insert(0) += 1;
}
}
for slot_stores in storage.0.values() {
for (id, store) in slot_stores {
if let Some(count) = counts.get(&id) {
trace!(
"id: {} setting count: {} cur: {}",
id,
count,
store.count_and_status.read().unwrap().0
);
store.count_and_status.write().unwrap().0 = *count;
} else {
trace!("id: {} clearing count", id);
store.count_and_status.write().unwrap().0 = 0;
}
store
.approx_store_count
.store(store.accounts.accounts(0).len(), Ordering::Relaxed);
}
}
}
pub(crate) fn print_accounts_stats(&self, label: &'static str) {
self.print_index(label);
self.print_count_and_status(label);
}
fn print_index(&self, label: &'static str) {
let mut roots: Vec<_> = self
.accounts_index
.read()
.unwrap()
.roots
.iter()
.cloned()
.collect();
roots.sort();
info!("{}: accounts_index roots: {:?}", label, roots,);
for (pubkey, list) in &self.accounts_index.read().unwrap().account_maps {
info!(" key: {}", pubkey);
info!(" slots: {:?}", *list.1.read().unwrap());
}
}
fn print_count_and_status(&self, label: &'static str) {
let storage = self.storage.read().unwrap();
let mut slots: Vec<_> = storage.0.keys().cloned().collect();
slots.sort();
info!("{}: count_and status for {} slots:", label, slots.len());
for slot in &slots {
let slot_stores = storage.0.get(slot).unwrap();
let mut ids: Vec<_> = slot_stores.keys().cloned().collect();
ids.sort();
for id in &ids {
let entry = slot_stores.get(id).unwrap();
info!(
" slot: {} id: {} count_and_status: {:?} approx_store_count: {} len: {} capacity: {}",
slot,
id,
*entry.count_and_status.read().unwrap(),
entry.approx_store_count.load(Ordering::Relaxed),
entry.accounts.len(),
entry.accounts.capacity(),
);
}
}
}
}
#[cfg(test)]
pub mod tests {
// TODO: all the bank tests are bank specific, issue: 2194
use super::*;
use crate::{accounts_index::RefCount, append_vec::AccountMeta};
use assert_matches::assert_matches;
use rand::{thread_rng, Rng};
use solana_sdk::{account::Account, hash::HASH_BYTES};
use std::{fs, str::FromStr};
fn linear_ancestors(end_slot: u64) -> Ancestors {
let mut ancestors: Ancestors = vec![(0, 0)].into_iter().collect();
for i in 1..end_slot {
ancestors.insert(i, (i - 1) as usize);
}
ancestors
}
#[test]
fn test_accountsdb_add_root() {
solana_logger::setup();
let db = AccountsDB::new(Vec::new());
let key = Pubkey::default();
let account0 = Account::new(1, 0, &key);
db.store(0, &[(&key, &account0)]);
db.add_root(0);
let ancestors = vec![(1, 1)].into_iter().collect();
assert_eq!(db.load_slow(&ancestors, &key), Some((account0, 0)));
}
#[test]
fn test_accountsdb_latest_ancestor() {
solana_logger::setup();
let db = AccountsDB::new(Vec::new());
let key = Pubkey::default();
let account0 = Account::new(1, 0, &key);
db.store(0, &[(&key, &account0)]);
let account1 = Account::new(0, 0, &key);
db.store(1, &[(&key, &account1)]);
let ancestors = vec![(1, 1)].into_iter().collect();
assert_eq!(&db.load_slow(&ancestors, &key).unwrap().0, &account1);
let ancestors = vec![(1, 1), (0, 0)].into_iter().collect();
assert_eq!(&db.load_slow(&ancestors, &key).unwrap().0, &account1);
let accounts: Vec<Account> =
db.scan_accounts(&ancestors, |accounts: &mut Vec<Account>, option| {
if let Some(data) = option {
accounts.push(data.1);
}
});
assert_eq!(accounts, vec![account1]);
}
#[test]
fn test_accountsdb_latest_ancestor_with_root() {
solana_logger::setup();
let db = AccountsDB::new(Vec::new());
let key = Pubkey::default();
let account0 = Account::new(1, 0, &key);
db.store(0, &[(&key, &account0)]);
let account1 = Account::new(0, 0, &key);
db.store(1, &[(&key, &account1)]);
db.add_root(0);
let ancestors = vec![(1, 1)].into_iter().collect();
assert_eq!(&db.load_slow(&ancestors, &key).unwrap().0, &account1);
let ancestors = vec![(1, 1), (0, 0)].into_iter().collect();
assert_eq!(&db.load_slow(&ancestors, &key).unwrap().0, &account1);
}
#[test]
fn test_accountsdb_root_one_slot() {
solana_logger::setup();
let db = AccountsDB::new(Vec::new());
let key = Pubkey::default();
let account0 = Account::new(1, 0, &key);
// store value 1 in the "root", i.e. db zero
db.store(0, &[(&key, &account0)]);
// now we have:
//
// root0 -> key.lamports==1
// / \
// / \
// key.lamports==0 <- slot1 \
// slot2 -> key.lamports==1
// (via root0)
// store value 0 in one child
let account1 = Account::new(0, 0, &key);
db.store(1, &[(&key, &account1)]);
// masking accounts is done at the Accounts level, at accountsDB we see
// original account (but could also accept "None", which is implemented
// at the Accounts level)
let ancestors = vec![(0, 0), (1, 1)].into_iter().collect();
assert_eq!(&db.load_slow(&ancestors, &key).unwrap().0, &account1);
// we should see 1 token in slot 2
let ancestors = vec![(0, 0), (2, 2)].into_iter().collect();
assert_eq!(&db.load_slow(&ancestors, &key).unwrap().0, &account0);
db.add_root(0);
let ancestors = vec![(1, 1)].into_iter().collect();
assert_eq!(db.load_slow(&ancestors, &key), Some((account1, 1)));
let ancestors = vec![(2, 2)].into_iter().collect();
assert_eq!(db.load_slow(&ancestors, &key), Some((account0, 0))); // original value
}
#[test]
fn test_accountsdb_add_root_many() {
let db = AccountsDB::new(Vec::new());
let mut pubkeys: Vec<Pubkey> = vec![];
create_account(&db, &mut pubkeys, 0, 100, 0, 0);
for _ in 1..100 {
let idx = thread_rng().gen_range(0, 99);
let ancestors = vec![(0, 0)].into_iter().collect();
let account = db.load_slow(&ancestors, &pubkeys[idx]).unwrap();
let mut default_account = Account::default();
default_account.lamports = (idx + 1) as u64;
assert_eq!((default_account, 0), account);
}
db.add_root(0);
// check that all the accounts appear with a new root
for _ in 1..100 {
let idx = thread_rng().gen_range(0, 99);
let ancestors = vec![(0, 0)].into_iter().collect();
let account0 = db.load_slow(&ancestors, &pubkeys[idx]).unwrap();
let ancestors = vec![(1, 1)].into_iter().collect();
let account1 = db.load_slow(&ancestors, &pubkeys[idx]).unwrap();
let mut default_account = Account::default();
default_account.lamports = (idx + 1) as u64;
assert_eq!(&default_account, &account0.0);
assert_eq!(&default_account, &account1.0);
}
}
#[test]
fn test_accountsdb_count_stores() {
solana_logger::setup();
let db = AccountsDB::new_single();
let mut pubkeys: Vec<Pubkey> = vec![];
create_account(&db, &mut pubkeys, 0, 2, DEFAULT_FILE_SIZE as usize / 3, 0);
assert!(check_storage(&db, 0, 2));
let pubkey = Pubkey::new_rand();
let account = Account::new(1, DEFAULT_FILE_SIZE as usize / 3, &pubkey);
db.store(1, &[(&pubkey, &account)]);
db.store(1, &[(&pubkeys[0], &account)]);
{
let stores = db.storage.read().unwrap();
let slot_0_stores = &stores.0.get(&0).unwrap();
let slot_1_stores = &stores.0.get(&1).unwrap();
assert_eq!(slot_0_stores.len(), 1);
assert_eq!(slot_1_stores.len(), 1);
assert_eq!(slot_0_stores[&0].count(), 2);
assert_eq!(slot_1_stores[&1].count(), 2);
assert_eq!(slot_0_stores[&0].approx_stored_count(), 2);
assert_eq!(slot_1_stores[&1].approx_stored_count(), 2);
}
// adding root doesn't change anything
db.add_root(1);
{
let stores = db.storage.read().unwrap();
let slot_0_stores = &stores.0.get(&0).unwrap();
let slot_1_stores = &stores.0.get(&1).unwrap();
assert_eq!(slot_0_stores.len(), 1);
assert_eq!(slot_1_stores.len(), 1);
assert_eq!(slot_0_stores[&0].count(), 2);
assert_eq!(slot_1_stores[&1].count(), 2);
assert_eq!(slot_0_stores[&0].approx_stored_count(), 2);
assert_eq!(slot_1_stores[&1].approx_stored_count(), 2);
}
// overwrite old rooted account version; only the slot_0_stores.count() should be
// decremented
db.store(2, &[(&pubkeys[0], &account)]);
{
let stores = db.storage.read().unwrap();
let slot_0_stores = &stores.0.get(&0).unwrap();
let slot_1_stores = &stores.0.get(&1).unwrap();
assert_eq!(slot_0_stores.len(), 1);
assert_eq!(slot_1_stores.len(), 1);
assert_eq!(slot_0_stores[&0].count(), 1);
assert_eq!(slot_1_stores[&1].count(), 2);
assert_eq!(slot_0_stores[&0].approx_stored_count(), 2);
assert_eq!(slot_1_stores[&1].approx_stored_count(), 2);
}
}
#[test]
fn test_accounts_unsquashed() {
let key = Pubkey::default();
// 1 token in the "root", i.e. db zero
let db0 = AccountsDB::new(Vec::new());
let account0 = Account::new(1, 0, &key);
db0.store(0, &[(&key, &account0)]);
// 0 lamports in the child
let account1 = Account::new(0, 0, &key);
db0.store(1, &[(&key, &account1)]);
// masking accounts is done at the Accounts level, at accountsDB we see
// original account
let ancestors = vec![(0, 0), (1, 1)].into_iter().collect();
assert_eq!(db0.load_slow(&ancestors, &key), Some((account1, 1)));
let ancestors = vec![(0, 0)].into_iter().collect();
assert_eq!(db0.load_slow(&ancestors, &key), Some((account0, 0)));
}
#[test]
fn test_remove_unrooted_slot() {
let unrooted_slot = 9;
let db = AccountsDB::new(Vec::new());
let key = Pubkey::default();
let account0 = Account::new(1, 0, &key);
let ancestors: HashMap<_, _> = vec![(unrooted_slot, 1)].into_iter().collect();
db.store(unrooted_slot, &[(&key, &account0)]);
db.bank_hashes
.write()
.unwrap()
.insert(unrooted_slot, BankHashInfo::default());
assert!(db
.accounts_index
.read()
.unwrap()
.get(&key, Some(&ancestors))
.is_some());
assert_load_account(&db, unrooted_slot, key, 1);
// Purge the slot
db.remove_unrooted_slot(unrooted_slot);
assert!(db.load_slow(&ancestors, &key).is_none());
assert!(db.bank_hashes.read().unwrap().get(&unrooted_slot).is_none());
assert!(db.storage.read().unwrap().0.get(&unrooted_slot).is_none());
assert!(db
.accounts_index
.read()
.unwrap()
.account_maps
.get(&key)
.map(|pubkey_entry| pubkey_entry.1.read().unwrap().is_empty())
.unwrap_or(true));
assert!(db
.accounts_index
.read()
.unwrap()
.get(&key, Some(&ancestors))
.is_none());
// Test we can store for the same slot again and get the right information
let account0 = Account::new(2, 0, &key);
db.store(unrooted_slot, &[(&key, &account0)]);
assert_load_account(&db, unrooted_slot, key, 2);
}
#[test]
fn test_remove_unrooted_slot_snapshot() {
let unrooted_slot = 9;
let db = AccountsDB::new(Vec::new());
let key = Pubkey::new_rand();
let account0 = Account::new(1, 0, &key);
db.store(unrooted_slot, &[(&key, &account0)]);
// Purge the slot
db.remove_unrooted_slot(unrooted_slot);
// Add a new root
let key2 = Pubkey::new_rand();
let new_root = unrooted_slot + 1;
db.store(new_root, &[(&key2, &account0)]);
db.add_root(new_root);
// Simulate reconstruction from snapshot
let db = reconstruct_accounts_db_via_serialization(&db, new_root);
// Check root account exists
assert_load_account(&db, new_root, key2, 1);
// Check purged account stays gone
let unrooted_slot_ancestors: HashMap<_, _> = vec![(unrooted_slot, 1)].into_iter().collect();
assert!(db.load_slow(&unrooted_slot_ancestors, &key).is_none());
}
fn create_account(
accounts: &AccountsDB,
pubkeys: &mut Vec<Pubkey>,
slot: Slot,
num: usize,
space: usize,
num_vote: usize,
) {
let ancestors = vec![(slot, 0)].into_iter().collect();
for t in 0..num {
let pubkey = Pubkey::new_rand();
let account = Account::new((t + 1) as u64, space, &Account::default().owner);
pubkeys.push(pubkey);
assert!(accounts.load_slow(&ancestors, &pubkey).is_none());
accounts.store(slot, &[(&pubkey, &account)]);
}
for t in 0..num_vote {
let pubkey = Pubkey::new_rand();
let account = Account::new((num + t + 1) as u64, space, &solana_vote_program::id());
pubkeys.push(pubkey);
let ancestors = vec![(slot, 0)].into_iter().collect();
assert!(accounts.load_slow(&ancestors, &pubkey).is_none());
accounts.store(slot, &[(&pubkey, &account)]);
}
}
fn update_accounts(accounts: &AccountsDB, pubkeys: &[Pubkey], slot: Slot, range: usize) {
for _ in 1..1000 {
let idx = thread_rng().gen_range(0, range);
let ancestors = vec![(slot, 0)].into_iter().collect();
if let Some((mut account, _)) = accounts.load_slow(&ancestors, &pubkeys[idx]) {
account.lamports += 1;
accounts.store(slot, &[(&pubkeys[idx], &account)]);
if account.lamports == 0 {
let ancestors = vec![(slot, 0)].into_iter().collect();
assert!(accounts.load_slow(&ancestors, &pubkeys[idx]).is_none());
} else {
let mut default_account = Account::default();
default_account.lamports = account.lamports;
assert_eq!(default_account, account);
}
}
}
}
fn check_storage(accounts: &AccountsDB, slot: Slot, count: usize) -> bool {
let storage = accounts.storage.read().unwrap();
assert_eq!(storage.0[&slot].len(), 1);
let slot_storage = storage.0.get(&slot).unwrap();
let mut total_count: usize = 0;
for store in slot_storage.values() {
assert_eq!(store.status(), AccountStorageStatus::Available);
total_count += store.count();
}
assert_eq!(total_count, count);
let (expected_store_count, actual_store_count): (usize, usize) = (
slot_storage.values().map(|s| s.approx_stored_count()).sum(),
slot_storage
.values()
.map(|s| s.accounts.accounts(0).len())
.sum(),
);
assert_eq!(expected_store_count, actual_store_count);
total_count == count
}
fn check_accounts(
accounts: &AccountsDB,
pubkeys: &[Pubkey],
slot: Slot,
num: usize,
count: usize,
) {
let ancestors = vec![(slot, 0)].into_iter().collect();
for _ in 0..num {
let idx = thread_rng().gen_range(0, num);
let account = accounts.load_slow(&ancestors, &pubkeys[idx]);
let account1 = Some((
Account::new((idx + count) as u64, 0, &Account::default().owner),
slot,
));
assert_eq!(account, account1);
}
}
#[allow(clippy::needless_range_loop)]
fn modify_accounts(
accounts: &AccountsDB,
pubkeys: &[Pubkey],
slot: Slot,
num: usize,
count: usize,
) {
for idx in 0..num {
let account = Account::new((idx + count) as u64, 0, &Account::default().owner);
accounts.store(slot, &[(&pubkeys[idx], &account)]);
}
}
#[test]
fn test_account_one() {
let (_accounts_dirs, paths) = get_temp_accounts_paths(1).unwrap();
let db = AccountsDB::new(paths);
let mut pubkeys: Vec<Pubkey> = vec![];
create_account(&db, &mut pubkeys, 0, 1, 0, 0);
let ancestors = vec![(0, 0)].into_iter().collect();
let account = db.load_slow(&ancestors, &pubkeys[0]).unwrap();
let mut default_account = Account::default();
default_account.lamports = 1;
assert_eq!((default_account, 0), account);
}
#[test]
fn test_account_many() {
let (_accounts_dirs, paths) = get_temp_accounts_paths(2).unwrap();
let db = AccountsDB::new(paths);
let mut pubkeys: Vec<Pubkey> = vec![];
create_account(&db, &mut pubkeys, 0, 100, 0, 0);
check_accounts(&db, &pubkeys, 0, 100, 1);
}
#[test]
fn test_account_update() {
let accounts = AccountsDB::new_single();
let mut pubkeys: Vec<Pubkey> = vec![];
create_account(&accounts, &mut pubkeys, 0, 100, 0, 0);
update_accounts(&accounts, &pubkeys, 0, 99);
assert_eq!(check_storage(&accounts, 0, 100), true);
}
#[test]
fn test_account_grow_many() {
let (_accounts_dir, paths) = get_temp_accounts_paths(2).unwrap();
let size = 4096;
let accounts = AccountsDB::new_sized(paths, size);
let mut keys = vec![];
for i in 0..9 {
let key = Pubkey::new_rand();
let account = Account::new(i + 1, size as usize / 4, &key);
accounts.store(0, &[(&key, &account)]);
keys.push(key);
}
let ancestors = vec![(0, 0)].into_iter().collect();
for (i, key) in keys.iter().enumerate() {
assert_eq!(
accounts.load_slow(&ancestors, &key).unwrap().0.lamports,
(i as u64) + 1
);
}
let mut append_vec_histogram = HashMap::new();
for storage in accounts
.storage
.read()
.unwrap()
.0
.values()
.flat_map(|x| x.values())
{
*append_vec_histogram.entry(storage.slot).or_insert(0) += 1;
}
for count in append_vec_histogram.values() {
assert!(*count >= 2);
}
}
#[test]
fn test_account_grow() {
let accounts = AccountsDB::new_single();
let count = [0, 1];
let status = [AccountStorageStatus::Available, AccountStorageStatus::Full];
let pubkey1 = Pubkey::new_rand();
let account1 = Account::new(1, DEFAULT_FILE_SIZE as usize / 2, &pubkey1);
accounts.store(0, &[(&pubkey1, &account1)]);
{
let stores = accounts.storage.read().unwrap();
assert_eq!(stores.0.len(), 1);
assert_eq!(stores.0[&0][&0].count(), 1);
assert_eq!(stores.0[&0][&0].status(), AccountStorageStatus::Available);
}
let pubkey2 = Pubkey::new_rand();
let account2 = Account::new(1, DEFAULT_FILE_SIZE as usize / 2, &pubkey2);
accounts.store(0, &[(&pubkey2, &account2)]);
{
let stores = accounts.storage.read().unwrap();
assert_eq!(stores.0.len(), 1);
assert_eq!(stores.0[&0].len(), 2);
assert_eq!(stores.0[&0][&0].count(), 1);
assert_eq!(stores.0[&0][&0].status(), AccountStorageStatus::Full);
assert_eq!(stores.0[&0][&1].count(), 1);
assert_eq!(stores.0[&0][&1].status(), AccountStorageStatus::Available);
}
let ancestors = vec![(0, 0)].into_iter().collect();
assert_eq!(
accounts.load_slow(&ancestors, &pubkey1).unwrap().0,
account1
);
assert_eq!(
accounts.load_slow(&ancestors, &pubkey2).unwrap().0,
account2
);
// lots of stores, but 3 storages should be enough for everything
for i in 0..25 {
let index = i % 2;
accounts.store(0, &[(&pubkey1, &account1)]);
{
let stores = accounts.storage.read().unwrap();
assert_eq!(stores.0.len(), 1);
assert_eq!(stores.0[&0].len(), 3);
assert_eq!(stores.0[&0][&0].count(), count[index]);
assert_eq!(stores.0[&0][&0].status(), status[0]);
assert_eq!(stores.0[&0][&1].count(), 1);
assert_eq!(stores.0[&0][&1].status(), status[1]);
assert_eq!(stores.0[&0][&2].count(), count[index ^ 1]);
assert_eq!(stores.0[&0][&2].status(), status[0]);
}
let ancestors = vec![(0, 0)].into_iter().collect();
assert_eq!(
accounts.load_slow(&ancestors, &pubkey1).unwrap().0,
account1
);
assert_eq!(
accounts.load_slow(&ancestors, &pubkey2).unwrap().0,
account2
);
}
}
#[test]
fn test_purge_slot_not_root() {
let accounts = AccountsDB::new(Vec::new());
let mut pubkeys: Vec<Pubkey> = vec![];
create_account(&accounts, &mut pubkeys, 0, 1, 0, 0);
let ancestors = vec![(0, 0)].into_iter().collect();
assert!(accounts.load_slow(&ancestors, &pubkeys[0]).is_some());
accounts.purge_slot(0);
assert!(accounts.load_slow(&ancestors, &pubkeys[0]).is_none());
}
#[test]
fn test_purge_slot_after_root() {
let accounts = AccountsDB::new(Vec::new());
let mut pubkeys: Vec<Pubkey> = vec![];
create_account(&accounts, &mut pubkeys, 0, 1, 0, 0);
let ancestors = vec![(0, 0)].into_iter().collect();
accounts.add_root(0);
accounts.purge_slot(0);
assert!(accounts.load_slow(&ancestors, &pubkeys[0]).is_some());
}
#[test]
fn test_lazy_gc_slot() {
solana_logger::setup();
//This test is pedantic
//A slot is purged when a non root bank is cleaned up. If a slot is behind root but it is
//not root, it means we are retaining dead banks.
let accounts = AccountsDB::new(Vec::new());
let pubkey = Pubkey::new_rand();
let account = Account::new(1, 0, &Account::default().owner);
//store an account
accounts.store(0, &[(&pubkey, &account)]);
let ancestors = vec![(0, 0)].into_iter().collect();
let id = {
let index = accounts.accounts_index.read().unwrap();
let (list, idx) = index.get(&pubkey, Some(&ancestors)).unwrap();
list[idx].1.store_id
};
accounts.add_root(1);
//slot is still there, since gc is lazy
assert!(accounts.storage.read().unwrap().0[&0].get(&id).is_some());
//store causes clean
accounts.store(1, &[(&pubkey, &account)]);
//slot is gone
accounts.print_accounts_stats("pre-clean");
accounts.clean_accounts();
accounts.process_dead_slots(None);
assert!(accounts.storage.read().unwrap().0.get(&0).is_none());
//new value is there
let ancestors = vec![(1, 1)].into_iter().collect();
assert_eq!(accounts.load_slow(&ancestors, &pubkey), Some((account, 1)));
}
impl AccountsDB {
fn alive_account_count_in_store(&self, slot: Slot) -> usize {
let storage = self.storage.read().unwrap();
let slot_storage = storage.0.get(&slot);
if let Some(slot_storage) = slot_storage {
slot_storage.values().map(|store| store.count()).sum()
} else {
0
}
}
fn all_account_count_in_append_vec(&self, slot: Slot) -> usize {
let storage = self.storage.read().unwrap();
let slot_storage = storage.0.get(&slot);
if let Some(slot_storage) = slot_storage {
let count = slot_storage
.values()
.map(|store| store.accounts.accounts(0).len())
.sum();
let stored_count: usize = slot_storage
.values()
.map(|store| store.approx_stored_count())
.sum();
assert_eq!(stored_count, count);
count
} else {
0
}
}
fn ref_count_for_pubkey(&self, pubkey: &Pubkey) -> RefCount {
self.accounts_index
.read()
.unwrap()
.ref_count_from_storage(&pubkey)
}
fn uncleaned_root_count(&self) -> usize {
self.accounts_index.read().unwrap().uncleaned_roots.len()
}
}
#[test]
fn test_clean_old_with_normal_account() {
solana_logger::setup();
let accounts = AccountsDB::new(Vec::new());
let pubkey = Pubkey::new_rand();
let account = Account::new(1, 0, &Account::default().owner);
//store an account
accounts.store(0, &[(&pubkey, &account)]);
accounts.store(1, &[(&pubkey, &account)]);
// simulate slots are rooted after while
accounts.add_root(0);
accounts.add_root(1);
//even if rooted, old state isn't cleaned up
assert_eq!(accounts.alive_account_count_in_store(0), 1);
assert_eq!(accounts.alive_account_count_in_store(1), 1);
accounts.clean_accounts();
//now old state is cleaned up
assert_eq!(accounts.alive_account_count_in_store(0), 0);
assert_eq!(accounts.alive_account_count_in_store(1), 1);
}
#[test]
fn test_clean_old_with_zero_lamport_account() {
solana_logger::setup();
let accounts = AccountsDB::new(Vec::new());
let pubkey1 = Pubkey::new_rand();
let pubkey2 = Pubkey::new_rand();
let normal_account = Account::new(1, 0, &Account::default().owner);
let zero_account = Account::new(0, 0, &Account::default().owner);
//store an account
accounts.store(0, &[(&pubkey1, &normal_account)]);
accounts.store(1, &[(&pubkey1, &zero_account)]);
accounts.store(0, &[(&pubkey2, &normal_account)]);
accounts.store(1, &[(&pubkey2, &normal_account)]);
//simulate slots are rooted after while
accounts.add_root(0);
accounts.add_root(1);
//even if rooted, old state isn't cleaned up
assert_eq!(accounts.alive_account_count_in_store(0), 2);
assert_eq!(accounts.alive_account_count_in_store(1), 2);
accounts.clean_accounts();
//still old state behind zero-lamport account isn't cleaned up
assert_eq!(accounts.alive_account_count_in_store(0), 1);
assert_eq!(accounts.alive_account_count_in_store(1), 2);
}
#[test]
fn test_clean_old_with_both_normal_and_zero_lamport_accounts() {
solana_logger::setup();
let accounts = AccountsDB::new(Vec::new());
let pubkey1 = Pubkey::new_rand();
let pubkey2 = Pubkey::new_rand();
let normal_account = Account::new(1, 0, &Account::default().owner);
let zero_account = Account::new(0, 0, &Account::default().owner);
//store an account
accounts.store(0, &[(&pubkey1, &normal_account)]);
accounts.store(1, &[(&pubkey1, &zero_account)]);
accounts.store(0, &[(&pubkey2, &normal_account)]);
accounts.store(2, &[(&pubkey2, &normal_account)]);
//simulate slots are rooted after while
accounts.add_root(0);
accounts.add_root(1);
accounts.add_root(2);
//even if rooted, old state isn't cleaned up
assert_eq!(accounts.alive_account_count_in_store(0), 2);
assert_eq!(accounts.alive_account_count_in_store(1), 1);
assert_eq!(accounts.alive_account_count_in_store(2), 1);
accounts.clean_accounts();
//both zero lamport and normal accounts are cleaned up
assert_eq!(accounts.alive_account_count_in_store(0), 0);
assert_eq!(accounts.alive_account_count_in_store(1), 0);
assert_eq!(accounts.alive_account_count_in_store(2), 1);
}
#[test]
fn test_uncleaned_roots_with_account() {
solana_logger::setup();
let accounts = AccountsDB::new(Vec::new());
let pubkey = Pubkey::new_rand();
let account = Account::new(1, 0, &Account::default().owner);
//store an account
accounts.store(0, &[(&pubkey, &account)]);
assert_eq!(accounts.uncleaned_root_count(), 0);
// simulate slots are rooted after while
accounts.add_root(0);
assert_eq!(accounts.uncleaned_root_count(), 1);
//now uncleaned roots are cleaned up
accounts.clean_accounts();
assert_eq!(accounts.uncleaned_root_count(), 0);
}
#[test]
fn test_uncleaned_roots_with_no_account() {
solana_logger::setup();
let accounts = AccountsDB::new(Vec::new());
assert_eq!(accounts.uncleaned_root_count(), 0);
// simulate slots are rooted after while
accounts.add_root(0);
assert_eq!(accounts.uncleaned_root_count(), 1);
//now uncleaned roots are cleaned up
accounts.clean_accounts();
assert_eq!(accounts.uncleaned_root_count(), 0);
}
#[test]
fn test_accounts_db_serialize1() {
solana_logger::setup();
let accounts = AccountsDB::new_single();
let mut pubkeys: Vec<Pubkey> = vec![];
// Create 100 accounts in slot 0
create_account(&accounts, &mut pubkeys, 0, 100, 0, 0);
assert_eq!(check_storage(&accounts, 0, 100), true);
check_accounts(&accounts, &pubkeys, 0, 100, 1);
// do some updates to those accounts and re-check
modify_accounts(&accounts, &pubkeys, 0, 100, 2);
check_accounts(&accounts, &pubkeys, 0, 100, 2);
accounts.add_root(0);
let mut pubkeys1: Vec<Pubkey> = vec![];
let latest_slot = 1;
// Modify the first 10 of the slot 0 accounts as updates in slot 1
modify_accounts(&accounts, &pubkeys, latest_slot, 10, 3);
// Create 10 new accounts in slot 1
create_account(&accounts, &mut pubkeys1, latest_slot, 10, 0, 0);
// Store a lamports=0 account in slot 1
let account = Account::new(0, 0, &Account::default().owner);
accounts.store(latest_slot, &[(&pubkeys[30], &account)]);
accounts.add_root(latest_slot);
info!("added root 1");
let latest_slot = 2;
let mut pubkeys2: Vec<Pubkey> = vec![];
// Modify original slot 0 accounts in slot 2
modify_accounts(&accounts, &pubkeys, latest_slot, 20, 4);
// Create 10 new accounts in slot 2
create_account(&accounts, &mut pubkeys2, latest_slot, 10, 0, 0);
// Store a lamports=0 account in slot 2
let account = Account::new(0, 0, &Account::default().owner);
accounts.store(latest_slot, &[(&pubkeys[31], &account)]);
accounts.add_root(latest_slot);
assert!(check_storage(&accounts, 0, 90));
assert!(check_storage(&accounts, 1, 21));
assert!(check_storage(&accounts, 2, 31));
let daccounts = reconstruct_accounts_db_via_serialization(&accounts, latest_slot);
assert_eq!(
daccounts.write_version.load(Ordering::Relaxed),
accounts.write_version.load(Ordering::Relaxed)
);
assert_eq!(
daccounts.next_id.load(Ordering::Relaxed),
accounts.next_id.load(Ordering::Relaxed)
);
// Get the hash for the latest slot, which should be the only hash in the
// bank_hashes map on the deserialized AccountsDb
assert_eq!(daccounts.bank_hashes.read().unwrap().len(), 2);
assert_eq!(
daccounts.bank_hashes.read().unwrap().get(&latest_slot),
accounts.bank_hashes.read().unwrap().get(&latest_slot)
);
daccounts.print_count_and_status("daccounts");
// Don't check the first 35 accounts which have not been modified on slot 0
check_accounts(&daccounts, &pubkeys[35..], 0, 65, 37);
check_accounts(&daccounts, &pubkeys1, 1, 10, 1);
assert!(check_storage(&daccounts, 0, 100));
assert!(check_storage(&daccounts, 1, 21));
assert!(check_storage(&daccounts, 2, 31));
let ancestors = linear_ancestors(latest_slot);
assert_eq!(
daccounts.update_accounts_hash(latest_slot, &ancestors),
accounts.update_accounts_hash(latest_slot, &ancestors)
);
}
fn assert_load_account(
accounts: &AccountsDB,
slot: Slot,
pubkey: Pubkey,
expected_lamports: u64,
) {
let ancestors = vec![(slot, 0)].into_iter().collect();
let (account, slot) = accounts.load_slow(&ancestors, &pubkey).unwrap();
assert_eq!((account.lamports, slot), (expected_lamports, slot));
}
fn assert_not_load_account(accounts: &AccountsDB, slot: Slot, pubkey: Pubkey) {
let ancestors = vec![(slot, 0)].into_iter().collect();
assert!(accounts.load_slow(&ancestors, &pubkey).is_none());
}
fn reconstruct_accounts_db_via_serialization(accounts: &AccountsDB, slot: Slot) -> AccountsDB {
let daccounts =
crate::serde_snapshot::reconstruct_accounts_db_via_serialization(accounts, slot);
daccounts.print_count_and_status("daccounts");
daccounts
}
fn assert_no_stores(accounts: &AccountsDB, slot: Slot) {
let stores = accounts.storage.read().unwrap();
info!("{:?}", stores.0.get(&slot));
assert!(stores.0.get(&slot).is_none() || stores.0.get(&slot).unwrap().is_empty());
}
#[test]
fn test_accounts_db_purge_keep_live() {
solana_logger::setup();
let some_lamport = 223;
let zero_lamport = 0;
let no_data = 0;
let owner = Account::default().owner;
let account = Account::new(some_lamport, no_data, &owner);
let pubkey = Pubkey::new_rand();
let account2 = Account::new(some_lamport, no_data, &owner);
let pubkey2 = Pubkey::new_rand();
let zero_lamport_account = Account::new(zero_lamport, no_data, &owner);
let accounts = AccountsDB::new_single();
accounts.add_root(0);
let mut current_slot = 1;
accounts.store(current_slot, &[(&pubkey, &account)]);
// Store another live account to slot 1 which will prevent any purge
// since the store count will not be zero
accounts.store(current_slot, &[(&pubkey2, &account2)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.store(current_slot, &[(&pubkey, &zero_lamport_account)]);
accounts.add_root(current_slot);
assert_load_account(&accounts, current_slot, pubkey, zero_lamport);
current_slot += 1;
accounts.add_root(current_slot);
accounts.print_accounts_stats("pre_purge");
accounts.clean_accounts();
accounts.print_accounts_stats("post_purge");
// Make sure the index is not touched
assert_eq!(
accounts
.accounts_index
.read()
.unwrap()
.account_maps
.get(&pubkey)
.unwrap()
.1
.read()
.unwrap()
.len(),
2
);
// slot 1 & 2 should have stores
check_storage(&accounts, 1, 2);
check_storage(&accounts, 2, 1);
}
#[test]
fn test_accounts_db_purge1() {
solana_logger::setup();
let some_lamport = 223;
let zero_lamport = 0;
let no_data = 0;
let owner = Account::default().owner;
let account = Account::new(some_lamport, no_data, &owner);
let pubkey = Pubkey::new_rand();
let zero_lamport_account = Account::new(zero_lamport, no_data, &owner);
let accounts = AccountsDB::new_single();
accounts.add_root(0);
let mut current_slot = 1;
accounts.set_hash(current_slot, current_slot - 1);
accounts.store(current_slot, &[(&pubkey, &account)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.set_hash(current_slot, current_slot - 1);
accounts.store(current_slot, &[(&pubkey, &zero_lamport_account)]);
accounts.add_root(current_slot);
assert_load_account(&accounts, current_slot, pubkey, zero_lamport);
// Otherwise slot 2 will not be removed
current_slot += 1;
accounts.set_hash(current_slot, current_slot - 1);
accounts.add_root(current_slot);
accounts.print_accounts_stats("pre_purge");
let ancestors = linear_ancestors(current_slot);
info!("ancestors: {:?}", ancestors);
let hash = accounts.update_accounts_hash(current_slot, &ancestors);
accounts.clean_accounts();
accounts.process_dead_slots(None);
assert_eq!(
accounts.update_accounts_hash(current_slot, &ancestors),
hash
);
accounts.print_accounts_stats("post_purge");
// Make sure the index is for pubkey cleared
assert!(accounts
.accounts_index
.read()
.unwrap()
.account_maps
.get(&pubkey)
.is_none());
// slot 1 & 2 should not have any stores
assert_no_stores(&accounts, 1);
assert_no_stores(&accounts, 2);
}
#[test]
fn test_accounts_db_serialize_zero_and_free() {
solana_logger::setup();
let some_lamport = 223;
let zero_lamport = 0;
let no_data = 0;
let owner = Account::default().owner;
let account = Account::new(some_lamport, no_data, &owner);
let pubkey = Pubkey::new_rand();
let zero_lamport_account = Account::new(zero_lamport, no_data, &owner);
let account2 = Account::new(some_lamport + 1, no_data, &owner);
let pubkey2 = Pubkey::new_rand();
let filler_account = Account::new(some_lamport, no_data, &owner);
let filler_account_pubkey = Pubkey::new_rand();
let accounts = AccountsDB::new_single();
let mut current_slot = 1;
accounts.store(current_slot, &[(&pubkey, &account)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.store(current_slot, &[(&pubkey, &zero_lamport_account)]);
accounts.store(current_slot, &[(&pubkey2, &account2)]);
// Store enough accounts such that an additional store for slot 2 is created.
while accounts
.storage
.read()
.unwrap()
.0
.get(¤t_slot)
.unwrap()
.len()
< 2
{
accounts.store(current_slot, &[(&filler_account_pubkey, &filler_account)]);
}
accounts.add_root(current_slot);
assert_load_account(&accounts, current_slot, pubkey, zero_lamport);
accounts.print_accounts_stats("accounts");
accounts.clean_accounts();
accounts.print_accounts_stats("accounts_post_purge");
let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot);
accounts.print_accounts_stats("reconstructed");
assert_load_account(&accounts, current_slot, pubkey, zero_lamport);
}
fn with_chained_zero_lamport_accounts<F>(f: F)
where
F: Fn(AccountsDB, Slot) -> AccountsDB,
{
let some_lamport = 223;
let zero_lamport = 0;
let dummy_lamport = 999;
let no_data = 0;
let owner = Account::default().owner;
let account = Account::new(some_lamport, no_data, &owner);
let account2 = Account::new(some_lamport + 100_001, no_data, &owner);
let account3 = Account::new(some_lamport + 100_002, no_data, &owner);
let zero_lamport_account = Account::new(zero_lamport, no_data, &owner);
let pubkey = Pubkey::new_rand();
let purged_pubkey1 = Pubkey::new_rand();
let purged_pubkey2 = Pubkey::new_rand();
let dummy_account = Account::new(dummy_lamport, no_data, &owner);
let dummy_pubkey = Pubkey::default();
let accounts = AccountsDB::new_single();
let mut current_slot = 1;
accounts.store(current_slot, &[(&pubkey, &account)]);
accounts.store(current_slot, &[(&purged_pubkey1, &account2)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.store(current_slot, &[(&purged_pubkey1, &zero_lamport_account)]);
accounts.store(current_slot, &[(&purged_pubkey2, &account3)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.store(current_slot, &[(&purged_pubkey2, &zero_lamport_account)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.store(current_slot, &[(&dummy_pubkey, &dummy_account)]);
accounts.add_root(current_slot);
accounts.print_accounts_stats("pre_f");
accounts.update_accounts_hash(4, &HashMap::default());
let accounts = f(accounts, current_slot);
accounts.print_accounts_stats("post_f");
assert_load_account(&accounts, current_slot, pubkey, some_lamport);
assert_load_account(&accounts, current_slot, purged_pubkey1, 0);
assert_load_account(&accounts, current_slot, purged_pubkey2, 0);
assert_load_account(&accounts, current_slot, dummy_pubkey, dummy_lamport);
accounts.verify_bank_hash(4, &HashMap::default()).unwrap();
}
#[test]
fn test_accounts_purge_chained_purge_before_snapshot_restore() {
solana_logger::setup();
with_chained_zero_lamport_accounts(|accounts, current_slot| {
accounts.clean_accounts();
reconstruct_accounts_db_via_serialization(&accounts, current_slot)
});
}
#[test]
fn test_accounts_purge_chained_purge_after_snapshot_restore() {
solana_logger::setup();
with_chained_zero_lamport_accounts(|accounts, current_slot| {
let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot);
accounts.print_accounts_stats("after_reconstruct");
accounts.clean_accounts();
reconstruct_accounts_db_via_serialization(&accounts, current_slot)
});
}
#[test]
#[ignore]
fn test_store_account_stress() {
let slot = 42;
let num_threads = 2;
let min_file_bytes = std::mem::size_of::<StoredMeta>()
+ std::mem::size_of::<crate::append_vec::AccountMeta>();
let db = Arc::new(AccountsDB::new_sized(Vec::new(), min_file_bytes as u64));
db.add_root(slot);
let thread_hdls: Vec<_> = (0..num_threads)
.map(|_| {
let db = db.clone();
std::thread::Builder::new()
.name("account-writers".to_string())
.spawn(move || {
let pubkey = Pubkey::new_rand();
let mut account = Account::new(1, 0, &pubkey);
let mut i = 0;
loop {
let account_bal = thread_rng().gen_range(1, 99);
account.lamports = account_bal;
db.store(slot, &[(&pubkey, &account)]);
let (account, slot) =
db.load_slow(&HashMap::new(), &pubkey).unwrap_or_else(|| {
panic!("Could not fetch stored account {}, iter {}", pubkey, i)
});
assert_eq!(slot, slot);
assert_eq!(account.lamports, account_bal);
i += 1;
}
})
.unwrap()
})
.collect();
for t in thread_hdls {
t.join().unwrap();
}
}
#[test]
fn test_accountsdb_scan_accounts() {
solana_logger::setup();
let db = AccountsDB::new(Vec::new());
let key = Pubkey::default();
let key0 = Pubkey::new_rand();
let account0 = Account::new(1, 0, &key);
db.store(0, &[(&key0, &account0)]);
let key1 = Pubkey::new_rand();
let account1 = Account::new(2, 0, &key);
db.store(1, &[(&key1, &account1)]);
let ancestors = vec![(0, 0)].into_iter().collect();
let accounts: Vec<Account> =
db.scan_accounts(&ancestors, |accounts: &mut Vec<Account>, option| {
if let Some(data) = option {
accounts.push(data.1);
}
});
assert_eq!(accounts, vec![account0]);
let ancestors = vec![(1, 1), (0, 0)].into_iter().collect();
let accounts: Vec<Account> =
db.scan_accounts(&ancestors, |accounts: &mut Vec<Account>, option| {
if let Some(data) = option {
accounts.push(data.1);
}
});
assert_eq!(accounts.len(), 2);
}
#[test]
fn test_cleanup_key_not_removed() {
solana_logger::setup();
let db = AccountsDB::new_single();
let key = Pubkey::default();
let key0 = Pubkey::new_rand();
let account0 = Account::new(1, 0, &key);
db.store(0, &[(&key0, &account0)]);
let key1 = Pubkey::new_rand();
let account1 = Account::new(2, 0, &key);
db.store(1, &[(&key1, &account1)]);
db.print_accounts_stats("pre");
let slots: HashSet<Slot> = HashSet::from_iter(vec![1].into_iter());
let purge_keys = vec![(key1, slots)];
let (_reclaims, dead_keys) = db.purge_keys_exact(purge_keys);
let account2 = Account::new(3, 0, &key);
db.store(2, &[(&key1, &account2)]);
db.handle_dead_keys(dead_keys);
db.print_accounts_stats("post");
let ancestors = vec![(2, 0)].into_iter().collect();
assert_eq!(db.load_slow(&ancestors, &key1).unwrap().0.lamports, 3);
}
#[test]
fn test_store_large_account() {
solana_logger::setup();
let db = AccountsDB::new(Vec::new());
let key = Pubkey::default();
let data_len = DEFAULT_FILE_SIZE as usize + 7;
let account = Account::new(1, data_len, &key);
db.store(0, &[(&key, &account)]);
let ancestors = vec![(0, 0)].into_iter().collect();
let ret = db.load_slow(&ancestors, &key).unwrap();
assert_eq!(ret.0.data.len(), data_len);
}
pub fn copy_append_vecs<P: AsRef<Path>>(
accounts_db: &AccountsDB,
output_dir: P,
) -> IOResult<()> {
let storage_entries = accounts_db.get_snapshot_storages(Slot::max_value());
for storage in storage_entries.iter().flatten() {
let storage_path = storage.get_path();
let output_path = output_dir.as_ref().join(
storage_path
.file_name()
.expect("Invalid AppendVec file path"),
);
fs::copy(storage_path, output_path)?;
}
Ok(())
}
#[test]
fn test_hash_frozen_account_data() {
let account = Account::new(1, 42, &Pubkey::default());
let hash = AccountsDB::hash_frozen_account_data(&account);
assert_ne!(hash, Hash::default()); // Better not be the default Hash
// Lamports changes to not affect the hash
let mut account_modified = account.clone();
account_modified.lamports -= 1;
assert_eq!(
hash,
AccountsDB::hash_frozen_account_data(&account_modified)
);
// Rent epoch may changes to not affect the hash
let mut account_modified = account.clone();
account_modified.rent_epoch += 1;
assert_eq!(
hash,
AccountsDB::hash_frozen_account_data(&account_modified)
);
// Account data may not be modified
let mut account_modified = account.clone();
account_modified.data[0] = 42;
assert_ne!(
hash,
AccountsDB::hash_frozen_account_data(&account_modified)
);
// Owner may not be modified
let mut account_modified = account.clone();
account_modified.owner =
Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap();
assert_ne!(
hash,
AccountsDB::hash_frozen_account_data(&account_modified)
);
// Executable may not be modified
let mut account_modified = account;
account_modified.executable = true;
assert_ne!(
hash,
AccountsDB::hash_frozen_account_data(&account_modified)
);
}
#[test]
fn test_frozen_account_lamport_increase() {
let frozen_pubkey =
Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap();
let mut db = AccountsDB::new(Vec::new());
let mut account = Account::new(1, 42, &frozen_pubkey);
db.store(0, &[(&frozen_pubkey, &account)]);
let ancestors = vec![(0, 0)].into_iter().collect();
db.freeze_accounts(&ancestors, &[frozen_pubkey]);
// Store with no account changes is ok
db.store(0, &[(&frozen_pubkey, &account)]);
// Store with an increase in lamports is ok
account.lamports = 2;
db.store(0, &[(&frozen_pubkey, &account)]);
// Store with an decrease that does not go below the frozen amount of lamports is tolerated
account.lamports = 1;
db.store(0, &[(&frozen_pubkey, &account)]);
// A store of any value over the frozen value of '1' across different slots is also ok
account.lamports = 3;
db.store(1, &[(&frozen_pubkey, &account)]);
account.lamports = 2;
db.store(2, &[(&frozen_pubkey, &account)]);
account.lamports = 1;
db.store(3, &[(&frozen_pubkey, &account)]);
}
#[test]
#[should_panic(
expected = "Frozen account My11111111111111111111111111111111111111111 modified. Lamports decreased from 1 to 0"
)]
fn test_frozen_account_lamport_decrease() {
let frozen_pubkey =
Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap();
let mut db = AccountsDB::new(Vec::new());
let mut account = Account::new(1, 42, &frozen_pubkey);
db.store(0, &[(&frozen_pubkey, &account)]);
let ancestors = vec![(0, 0)].into_iter().collect();
db.freeze_accounts(&ancestors, &[frozen_pubkey]);
// Store with a decrease below the frozen amount of lamports is not ok
account.lamports -= 1;
db.store(0, &[(&frozen_pubkey, &account)]);
}
#[test]
#[should_panic(
expected = "Unable to freeze an account that does not exist: My11111111111111111111111111111111111111111"
)]
fn test_frozen_account_nonexistent() {
let frozen_pubkey =
Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap();
let mut db = AccountsDB::new(Vec::new());
let ancestors = vec![(0, 0)].into_iter().collect();
db.freeze_accounts(&ancestors, &[frozen_pubkey]);
}
#[test]
#[should_panic(
expected = "Frozen account My11111111111111111111111111111111111111111 modified. Hash changed from 8wHcxDkjiwdrkPAsDnmNrF1UDGJFAtZzPQBSVweY3yRA to JdscGYB1uczVssmYuJusDD1Bfe6wpNeeho8XjcH8inN"
)]
fn test_frozen_account_data_modified() {
let frozen_pubkey =
Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap();
let mut db = AccountsDB::new(Vec::new());
let mut account = Account::new(1, 42, &frozen_pubkey);
db.store(0, &[(&frozen_pubkey, &account)]);
let ancestors = vec![(0, 0)].into_iter().collect();
db.freeze_accounts(&ancestors, &[frozen_pubkey]);
account.data[0] = 42;
db.store(0, &[(&frozen_pubkey, &account)]);
}
#[test]
fn test_hash_stored_account() {
// This test uses some UNSAFE trick to detect most of account's field
// addition and deletion without changing the hash code
const ACCOUNT_DATA_LEN: usize = 3;
// the type of InputTuple elements must not contain references;
// they should be simple scalars or data blobs
type InputTuple = (
Slot,
StoredMeta,
AccountMeta,
[u8; ACCOUNT_DATA_LEN],
usize, // for StoredAccount::offset
Hash,
);
const INPUT_LEN: usize = std::mem::size_of::<InputTuple>();
type InputBlob = [u8; INPUT_LEN];
let mut blob: InputBlob = [0u8; INPUT_LEN];
// spray memory with decreasing counts so that, data layout can be detected.
for (i, byte) in blob.iter_mut().enumerate() {
*byte = (INPUT_LEN - i) as u8;
}
//UNSAFE: forcibly cast the special byte pattern to actual account fields.
let (slot, meta, account_meta, data, offset, hash): InputTuple =
unsafe { std::mem::transmute::<InputBlob, InputTuple>(blob) };
let stored_account = StoredAccount {
meta: &meta,
account_meta: &account_meta,
data: &data,
offset,
hash: &hash,
};
let account = stored_account.clone_account();
let expected_account_hash =
Hash::from_str("5iRNZVcAnq9JLYjSF2ibFhGEeq48r9Eq9HXxwm3BxywN").unwrap();
assert_eq!(
AccountsDB::hash_stored_account(slot, &stored_account),
expected_account_hash,
"StoredAccount's data layout might be changed; update hashing if needed."
);
assert_eq!(
AccountsDB::hash_account(slot, &account, &stored_account.meta.pubkey),
expected_account_hash,
"Account-based hashing must be consistent with StoredAccount-based one."
);
}
#[test]
fn test_bank_hash_stats() {
solana_logger::setup();
let db = AccountsDB::new(Vec::new());
let key = Pubkey::default();
let some_data_len = 5;
let some_slot: Slot = 0;
let account = Account::new(1, some_data_len, &key);
let ancestors = vec![(some_slot, 0)].into_iter().collect();
db.store(some_slot, &[(&key, &account)]);
let mut account = db.load_slow(&ancestors, &key).unwrap().0;
account.lamports -= 1;
account.executable = true;
db.store(some_slot, &[(&key, &account)]);
db.add_root(some_slot);
let bank_hashes = db.bank_hashes.read().unwrap();
let bank_hash = bank_hashes.get(&some_slot).unwrap();
assert_eq!(bank_hash.stats.num_updated_accounts, 1);
assert_eq!(bank_hash.stats.num_removed_accounts, 1);
assert_eq!(bank_hash.stats.num_lamports_stored, 1);
assert_eq!(bank_hash.stats.total_data_len, 2 * some_data_len as u64);
assert_eq!(bank_hash.stats.num_executable_accounts, 1);
}
#[test]
fn test_verify_bank_hash() {
use BankHashVerificationError::*;
solana_logger::setup();
let db = AccountsDB::new(Vec::new());
let key = Pubkey::default();
let some_data_len = 0;
let some_slot: Slot = 0;
let account = Account::new(1, some_data_len, &key);
let ancestors = vec![(some_slot, 0)].into_iter().collect();
db.store(some_slot, &[(&key, &account)]);
db.add_root(some_slot);
db.update_accounts_hash(some_slot, &ancestors);
assert_matches!(db.verify_bank_hash(some_slot, &ancestors), Ok(_));
db.bank_hashes.write().unwrap().remove(&some_slot).unwrap();
assert_matches!(
db.verify_bank_hash(some_slot, &ancestors),
Err(MissingBankHash)
);
let some_bank_hash = Hash::new(&[0xca; HASH_BYTES]);
let bank_hash_info = BankHashInfo {
hash: some_bank_hash,
snapshot_hash: Hash::new(&[0xca; HASH_BYTES]),
stats: BankHashStats::default(),
};
db.bank_hashes
.write()
.unwrap()
.insert(some_slot, bank_hash_info);
assert_matches!(
db.verify_bank_hash(some_slot, &ancestors),
Err(MismatchedBankHash)
);
}
#[test]
fn test_verify_bank_hash_no_account() {
solana_logger::setup();
let db = AccountsDB::new(Vec::new());
let some_slot: Slot = 0;
let ancestors = vec![(some_slot, 0)].into_iter().collect();
db.bank_hashes
.write()
.unwrap()
.insert(some_slot, BankHashInfo::default());
db.add_root(some_slot);
db.update_accounts_hash(some_slot, &ancestors);
assert_matches!(db.verify_bank_hash(some_slot, &ancestors), Ok(_));
}
#[test]
fn test_verify_bank_hash_bad_account_hash() {
use BankHashVerificationError::*;
solana_logger::setup();
let db = AccountsDB::new(Vec::new());
let key = Pubkey::default();
let some_data_len = 0;
let some_slot: Slot = 0;
let account = Account::new(1, some_data_len, &key);
let ancestors = vec![(some_slot, 0)].into_iter().collect();
let accounts = &[(&key, &account)];
// update AccountsDB's bank hash but discard real account hashes
db.hash_accounts(some_slot, accounts);
// provide bogus account hashes
let some_hash = Hash::new(&[0xca; HASH_BYTES]);
db.store_with_hashes(some_slot, accounts, &[some_hash]);
db.add_root(some_slot);
assert_matches!(
db.verify_bank_hash(some_slot, &ancestors),
Err(MismatchedAccountHash)
);
}
#[test]
fn test_bad_bank_hash() {
use solana_sdk::signature::{Keypair, Signer};
let db = AccountsDB::new(Vec::new());
let some_slot: Slot = 0;
let ancestors: Ancestors = [(some_slot, 0)].iter().copied().collect();
for _ in 0..10_000 {
let num_accounts = thread_rng().gen_range(0, 100);
let accounts_keys: Vec<_> = (0..num_accounts)
.map(|_| {
let key = Keypair::new().pubkey();
let lamports = thread_rng().gen_range(0, 100);
let some_data_len = thread_rng().gen_range(0, 1000);
let account = Account::new(lamports, some_data_len, &key);
(key, account)
})
.collect();
let account_refs: Vec<_> = accounts_keys
.iter()
.map(|(key, account)| (key, account))
.collect();
db.store(some_slot, &account_refs);
for (key, account) in &accounts_keys {
assert_eq!(
db.load_account_hash(&ancestors, key),
AccountsDB::hash_account(some_slot, &account, &key)
);
}
}
}
#[test]
fn test_get_snapshot_storages_empty() {
let db = AccountsDB::new(Vec::new());
assert!(db.get_snapshot_storages(0).is_empty());
}
#[test]
fn test_get_snapshot_storages_only_older_than_or_equal_to_snapshot_slot() {
let db = AccountsDB::new(Vec::new());
let key = Pubkey::default();
let account = Account::new(1, 0, &key);
let before_slot = 0;
let base_slot = before_slot + 1;
let after_slot = base_slot + 1;
db.add_root(base_slot);
db.store(base_slot, &[(&key, &account)]);
assert!(db.get_snapshot_storages(before_slot).is_empty());
assert_eq!(1, db.get_snapshot_storages(base_slot).len());
assert_eq!(1, db.get_snapshot_storages(after_slot).len());
}
#[test]
fn test_get_snapshot_storages_only_non_empty() {
let db = AccountsDB::new(Vec::new());
let key = Pubkey::default();
let account = Account::new(1, 0, &key);
let base_slot = 0;
let after_slot = base_slot + 1;
db.store(base_slot, &[(&key, &account)]);
db.storage
.write()
.unwrap()
.0
.get_mut(&base_slot)
.unwrap()
.clear();
db.add_root(base_slot);
assert!(db.get_snapshot_storages(after_slot).is_empty());
db.store(base_slot, &[(&key, &account)]);
assert_eq!(1, db.get_snapshot_storages(after_slot).len());
}
#[test]
fn test_get_snapshot_storages_only_roots() {
let db = AccountsDB::new(Vec::new());
let key = Pubkey::default();
let account = Account::new(1, 0, &key);
let base_slot = 0;
let after_slot = base_slot + 1;
db.store(base_slot, &[(&key, &account)]);
assert!(db.get_snapshot_storages(after_slot).is_empty());
db.add_root(base_slot);
assert_eq!(1, db.get_snapshot_storages(after_slot).len());
}
#[test]
fn test_get_snapshot_storages_exclude_empty() {
let db = AccountsDB::new(Vec::new());
let key = Pubkey::default();
let account = Account::new(1, 0, &key);
let base_slot = 0;
let after_slot = base_slot + 1;
db.store(base_slot, &[(&key, &account)]);
db.add_root(base_slot);
assert_eq!(1, db.get_snapshot_storages(after_slot).len());
let storage = db.storage.read().unwrap();
storage.0[&0].values().next().unwrap().remove_account();
assert!(db.get_snapshot_storages(after_slot).is_empty());
}
#[test]
#[should_panic(expected = "double remove of account in slot: 0/store: 0!!")]
fn test_storage_remove_account_double_remove() {
let accounts = AccountsDB::new(Vec::new());
let pubkey = Pubkey::new_rand();
let account = Account::new(1, 0, &Account::default().owner);
accounts.store(0, &[(&pubkey, &account)]);
let storage = accounts.storage.read().unwrap();
let storage_entry = storage.0[&0].values().next().unwrap();
storage_entry.remove_account();
storage_entry.remove_account();
}
#[test]
fn test_accounts_purge_long_chained_after_snapshot_restore() {
solana_logger::setup();
let old_lamport = 223;
let zero_lamport = 0;
let no_data = 0;
let owner = Account::default().owner;
let account = Account::new(old_lamport, no_data, &owner);
let account2 = Account::new(old_lamport + 100_001, no_data, &owner);
let account3 = Account::new(old_lamport + 100_002, no_data, &owner);
let dummy_account = Account::new(99_999_999, no_data, &owner);
let zero_lamport_account = Account::new(zero_lamport, no_data, &owner);
let pubkey = Pubkey::new_rand();
let dummy_pubkey = Pubkey::new_rand();
let purged_pubkey1 = Pubkey::new_rand();
let purged_pubkey2 = Pubkey::new_rand();
let mut current_slot = 0;
let accounts = AccountsDB::new_single();
// create intermediate updates to purged_pubkey1 so that
// generate_index must add slots as root last at once
current_slot += 1;
accounts.store(current_slot, &[(&pubkey, &account)]);
accounts.store(current_slot, &[(&purged_pubkey1, &account2)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.store(current_slot, &[(&purged_pubkey1, &account2)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.store(current_slot, &[(&purged_pubkey1, &account2)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.store(current_slot, &[(&purged_pubkey1, &zero_lamport_account)]);
accounts.store(current_slot, &[(&purged_pubkey2, &account3)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.store(current_slot, &[(&purged_pubkey2, &zero_lamport_account)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.store(current_slot, &[(&dummy_pubkey, &dummy_account)]);
accounts.add_root(current_slot);
accounts.print_count_and_status("before reconstruct");
let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot);
accounts.print_count_and_status("before purge zero");
accounts.clean_accounts();
accounts.print_count_and_status("after purge zero");
assert_load_account(&accounts, current_slot, pubkey, old_lamport);
assert_load_account(&accounts, current_slot, purged_pubkey1, 0);
assert_load_account(&accounts, current_slot, purged_pubkey2, 0);
}
#[test]
fn test_accounts_clean_after_snapshot_restore_then_old_revives() {
solana_logger::setup();
let old_lamport = 223;
let zero_lamport = 0;
let no_data = 0;
let dummy_lamport = 999_999;
let owner = Account::default().owner;
let account = Account::new(old_lamport, no_data, &owner);
let account2 = Account::new(old_lamport + 100_001, no_data, &owner);
let account3 = Account::new(old_lamport + 100_002, no_data, &owner);
let dummy_account = Account::new(dummy_lamport, no_data, &owner);
let zero_lamport_account = Account::new(zero_lamport, no_data, &owner);
let pubkey1 = Pubkey::new_rand();
let pubkey2 = Pubkey::new_rand();
let dummy_pubkey = Pubkey::new_rand();
let mut current_slot = 0;
let accounts = AccountsDB::new_single();
// A: Initialize AccountsDB with pubkey1 and pubkey2
current_slot += 1;
accounts.store(current_slot, &[(&pubkey1, &account)]);
accounts.store(current_slot, &[(&pubkey2, &account)]);
accounts.add_root(current_slot);
// B: Test multiple updates to pubkey1 in a single slot/storage
current_slot += 1;
assert_eq!(0, accounts.alive_account_count_in_store(current_slot));
assert_eq!(1, accounts.ref_count_for_pubkey(&pubkey1));
accounts.store(current_slot, &[(&pubkey1, &account2)]);
accounts.store(current_slot, &[(&pubkey1, &account2)]);
assert_eq!(1, accounts.alive_account_count_in_store(current_slot));
assert_eq!(3, accounts.ref_count_for_pubkey(&pubkey1));
accounts.add_root(current_slot);
// C: Yet more update to trigger lazy clean of step A
current_slot += 1;
assert_eq!(3, accounts.ref_count_for_pubkey(&pubkey1));
accounts.store(current_slot, &[(&pubkey1, &account3)]);
assert_eq!(4, accounts.ref_count_for_pubkey(&pubkey1));
accounts.add_root(current_slot);
// D: Make pubkey1 0-lamport; also triggers clean of step B
current_slot += 1;
assert_eq!(4, accounts.ref_count_for_pubkey(&pubkey1));
accounts.store(current_slot, &[(&pubkey1, &zero_lamport_account)]);
accounts.process_dead_slots(None);
assert_eq!(
3, /* == 4 - 2 + 1 */
accounts.ref_count_for_pubkey(&pubkey1)
);
accounts.add_root(current_slot);
// E: Avoid missing bank hash error
current_slot += 1;
accounts.store(current_slot, &[(&dummy_pubkey, &dummy_account)]);
accounts.add_root(current_slot);
assert_load_account(&accounts, current_slot, pubkey1, zero_lamport);
assert_load_account(&accounts, current_slot, pubkey2, old_lamport);
assert_load_account(&accounts, current_slot, dummy_pubkey, dummy_lamport);
// At this point, there is no index entries for A and B
// If step C and step D should be purged, snapshot restore would cause
// pubkey1 to be revived as the state of step A.
// So, prevent that from happening by introducing refcount
accounts.clean_accounts();
let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot);
accounts.clean_accounts();
assert_load_account(&accounts, current_slot, pubkey1, zero_lamport);
assert_load_account(&accounts, current_slot, pubkey2, old_lamport);
assert_load_account(&accounts, current_slot, dummy_pubkey, dummy_lamport);
// F: Finally, make Step A cleanable
current_slot += 1;
accounts.store(current_slot, &[(&pubkey2, &account)]);
accounts.add_root(current_slot);
// Do clean
accounts.clean_accounts();
// Ensure pubkey2 is cleaned from the index finally
assert_not_load_account(&accounts, current_slot, pubkey1);
assert_load_account(&accounts, current_slot, pubkey2, old_lamport);
assert_load_account(&accounts, current_slot, dummy_pubkey, dummy_lamport);
}
#[test]
fn test_clean_dead_slots_empty() {
let accounts = AccountsDB::new_single();
let mut dead_slots = HashSet::new();
dead_slots.insert(10);
accounts.clean_dead_slots(&dead_slots);
}
#[test]
fn test_shrink_all_slots_none() {
let accounts = AccountsDB::new_single();
for _ in 0..10 {
assert_eq!(0, accounts.process_stale_slot());
}
accounts.shrink_all_slots();
}
#[test]
fn test_shrink_next_slots() {
let accounts = AccountsDB::new_single();
let mut current_slot = 7;
assert_eq!(
vec![None, None, None],
(0..3)
.map(|_| accounts.next_shrink_slot())
.collect::<Vec<_>>()
);
accounts.add_root(current_slot);
assert_eq!(
vec![Some(7), Some(7), Some(7)],
(0..3)
.map(|_| accounts.next_shrink_slot())
.collect::<Vec<_>>()
);
current_slot += 1;
accounts.add_root(current_slot);
let slots = (0..6)
.map(|_| accounts.next_shrink_slot())
.collect::<Vec<_>>();
// Because the origin of this data is HashMap (not BTreeMap), key order is arbitrary per cycle.
assert!(
vec![Some(7), Some(8), Some(7), Some(8), Some(7), Some(8)] == slots
|| vec![Some(8), Some(7), Some(8), Some(7), Some(8), Some(7)] == slots
);
}
#[test]
fn test_shrink_reset_uncleaned_roots() {
let accounts = AccountsDB::new_single();
accounts.reset_uncleaned_roots();
assert_eq!(
*accounts.shrink_candidate_slots.lock().unwrap(),
vec![] as Vec<Slot>
);
accounts.add_root(0);
accounts.add_root(1);
accounts.add_root(2);
accounts.reset_uncleaned_roots();
let actual_slots = accounts.shrink_candidate_slots.lock().unwrap().clone();
assert_eq!(actual_slots, vec![] as Vec<Slot>);
accounts.reset_uncleaned_roots();
let mut actual_slots = accounts.shrink_candidate_slots.lock().unwrap().clone();
actual_slots.sort();
assert_eq!(actual_slots, vec![0, 1, 2]);
accounts.accounts_index.write().unwrap().roots.clear();
let mut actual_slots = (0..5)
.map(|_| accounts.next_shrink_slot())
.collect::<Vec<_>>();
actual_slots.sort();
assert_eq!(actual_slots, vec![None, None, Some(0), Some(1), Some(2)],);
}
#[test]
fn test_shrink_stale_slots_processed() {
solana_logger::setup();
let accounts = AccountsDB::new_single();
let pubkey_count = 100;
let pubkeys: Vec<_> = (0..pubkey_count).map(|_| Pubkey::new_rand()).collect();
let some_lamport = 223;
let no_data = 0;
let owner = Account::default().owner;
let account = Account::new(some_lamport, no_data, &owner);
let mut current_slot = 0;
current_slot += 1;
for pubkey in &pubkeys {
accounts.store(current_slot, &[(&pubkey, &account)]);
}
let shrink_slot = current_slot;
accounts.add_root(current_slot);
current_slot += 1;
let pubkey_count_after_shrink = 10;
let updated_pubkeys = &pubkeys[0..pubkey_count - pubkey_count_after_shrink];
for pubkey in updated_pubkeys {
accounts.store(current_slot, &[(&pubkey, &account)]);
}
accounts.add_root(current_slot);
accounts.clean_accounts();
assert_eq!(
pubkey_count,
accounts.all_account_count_in_append_vec(shrink_slot)
);
accounts.shrink_all_slots();
assert_eq!(
pubkey_count_after_shrink,
accounts.all_account_count_in_append_vec(shrink_slot)
);
let no_ancestors = HashMap::default();
accounts.update_accounts_hash(current_slot, &no_ancestors);
accounts
.verify_bank_hash(current_slot, &no_ancestors)
.unwrap();
let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot);
accounts
.verify_bank_hash(current_slot, &no_ancestors)
.unwrap();
// repeating should be no-op
accounts.shrink_all_slots();
assert_eq!(
pubkey_count_after_shrink,
accounts.all_account_count_in_append_vec(shrink_slot)
);
}
#[test]
fn test_shrink_stale_slots_skipped() {
solana_logger::setup();
let accounts = AccountsDB::new_single();
let pubkey_count = 100;
let pubkeys: Vec<_> = (0..pubkey_count).map(|_| Pubkey::new_rand()).collect();
let some_lamport = 223;
let no_data = 0;
let owner = Account::default().owner;
let account = Account::new(some_lamport, no_data, &owner);
let mut current_slot = 0;
current_slot += 1;
for pubkey in &pubkeys {
accounts.store(current_slot, &[(&pubkey, &account)]);
}
let shrink_slot = current_slot;
accounts.add_root(current_slot);
current_slot += 1;
let pubkey_count_after_shrink = 90;
let updated_pubkeys = &pubkeys[0..pubkey_count - pubkey_count_after_shrink];
for pubkey in updated_pubkeys {
accounts.store(current_slot, &[(&pubkey, &account)]);
}
accounts.add_root(current_slot);
accounts.clean_accounts();
assert_eq!(
pubkey_count,
accounts.all_account_count_in_append_vec(shrink_slot)
);
// Only, try to shrink stale slots.
accounts.shrink_all_stale_slots();
assert_eq!(
pubkey_count,
accounts.all_account_count_in_append_vec(shrink_slot)
);
// Now, do full-shrink.
accounts.shrink_all_slots();
assert_eq!(
pubkey_count_after_shrink,
accounts.all_account_count_in_append_vec(shrink_slot)
);
}
#[test]
fn test_delete_dependencies() {
solana_logger::setup();
let mut accounts_index = AccountsIndex::default();
let key0 = Pubkey::new_from_array([0u8; 32]);
let key1 = Pubkey::new_from_array([1u8; 32]);
let key2 = Pubkey::new_from_array([2u8; 32]);
let info0 = AccountInfo {
store_id: 0,
offset: 0,
lamports: 0,
};
let info1 = AccountInfo {
store_id: 1,
offset: 0,
lamports: 0,
};
let info2 = AccountInfo {
store_id: 2,
offset: 0,
lamports: 0,
};
let info3 = AccountInfo {
store_id: 3,
offset: 0,
lamports: 0,
};
let mut reclaims = vec![];
accounts_index.insert(0, &key0, info0, &mut reclaims);
accounts_index.insert(1, &key0, info1.clone(), &mut reclaims);
accounts_index.insert(1, &key1, info1, &mut reclaims);
accounts_index.insert(2, &key1, info2.clone(), &mut reclaims);
accounts_index.insert(2, &key2, info2, &mut reclaims);
accounts_index.insert(3, &key2, info3, &mut reclaims);
accounts_index.add_root(0);
accounts_index.add_root(1);
accounts_index.add_root(2);
accounts_index.add_root(3);
let mut purges = HashMap::new();
purges.insert(key0, accounts_index.would_purge(&key0));
purges.insert(key1, accounts_index.would_purge(&key1));
purges.insert(key2, accounts_index.would_purge(&key2));
for (key, (list, ref_count)) in &purges {
info!(" purge {} ref_count {} =>", key, ref_count);
for x in list {
info!(" {:?}", x);
}
}
let mut store_counts = HashMap::new();
store_counts.insert(0, (0, HashSet::from_iter(vec![key0])));
store_counts.insert(1, (0, HashSet::from_iter(vec![key0, key1])));
store_counts.insert(2, (0, HashSet::from_iter(vec![key1, key2])));
store_counts.insert(3, (1, HashSet::from_iter(vec![key2])));
AccountsDB::calc_delete_dependencies(&purges, &mut store_counts);
let mut stores: Vec<_> = store_counts.keys().cloned().collect();
stores.sort();
for store in &stores {
info!(
"store: {:?} : {:?}",
store,
store_counts.get(&store).unwrap()
);
}
for x in 0..3 {
assert!(store_counts[&x].0 >= 1);
}
}
#[test]
fn test_shrink_and_clean() {
solana_logger::setup();
// repeat the whole test scenario
for _ in 0..5 {
let accounts = Arc::new(AccountsDB::new_single());
let accounts_for_shrink = accounts.clone();
// spawn the slot shrinking background thread
let exit = Arc::new(AtomicBool::default());
let exit_for_shrink = exit.clone();
let shrink_thread = std::thread::spawn(move || loop {
if exit_for_shrink.load(Ordering::Relaxed) {
break;
}
accounts_for_shrink.process_stale_slot();
});
let mut alive_accounts = vec![];
let owner = Pubkey::default();
// populate the AccountsDB with plenty of food for slot shrinking
// also this simulates realistic some heavy spike account updates in the wild
for current_slot in 0..1000 {
while alive_accounts.len() <= 10 {
alive_accounts.push((
Pubkey::new_rand(),
Account::new(thread_rng().gen_range(0, 50), 0, &owner),
));
}
alive_accounts.retain(|(_pubkey, account)| account.lamports >= 1);
for (pubkey, account) in alive_accounts.iter_mut() {
account.lamports -= 1;
accounts.store(current_slot, &[(&pubkey, &account)]);
}
accounts.add_root(current_slot);
}
// let's dance.
for _ in 0..10 {
accounts.clean_accounts();
std::thread::sleep(std::time::Duration::from_millis(100));
}
// cleanup
exit.store(true, Ordering::Relaxed);
shrink_thread.join().unwrap();
}
}
}
| 36.16922 | 200 | 0.559375 |
eb032f57babae2450e2f203c740ec907438361e4 | 1,860 | use std::io::{Read, Seek, SeekFrom, ErrorKind};
use std::fs::File;
pub fn file_contents_equal(mut file1: File, mut file2: File) -> bool {
let file1_metadata = file1.metadata().unwrap();
let file2_metadata = file2.metadata().unwrap();
if file1_metadata.len() != file2_metadata.len() {
return false;
}
let mut file1_remainder: Vec<u8> = Vec::new();
let mut file2_remainder: Vec<u8> = Vec::new();
loop {
let mut file1_block: [u8; 4096] = [0; 4096];
let file1_seek_pos = file1.stream_position().unwrap();
let file1_read_result = file1.read_exact(&mut file1_block);
let mut file2_block: [u8; 4096] = [0; 4096];
let file2_seek_pos = file2.stream_position().unwrap();
let file2_read_result = file2.read_exact(&mut file2_block);
if file1_read_result.is_ok() && file2_read_result.is_ok() {
if file1_block != file2_block {
return false;
}
// Fix the matches stuff
} else if let (Err(file1_err), Err(file2_err)) = (file1_read_result, file2_read_result) {
// UnexpectedEof -> cursor seek position is unspecified
assert!(file1_err.kind() == ErrorKind::UnexpectedEof);
assert!(file2_err.kind() == ErrorKind::UnexpectedEof);
// Reset to known good point before reading in the rest
file1.seek(SeekFrom::Start(file1_seek_pos)).unwrap();
file2.seek(SeekFrom::Start(file2_seek_pos)).unwrap();
file1.read_to_end(&mut file1_remainder).unwrap();
file2.read_to_end(&mut file2_remainder).unwrap();
if file1_block != file2_block {
return false;
} else {
return true;
}
} else {
panic!("{}", "Error reading from one of the files");
}
}
} | 40.434783 | 97 | 0.596774 |
72ebeb9363dea05bfc1527cf7aba928fa355cc92 | 573 | #[allow(non_camel_case_types)]
pub type uint = u32;
#[allow(non_camel_case_types)]
pub type iint = i32;
mod bool;
mod variable;
mod sign;
mod literal;
mod clause;
// re-export all types
pub use self::bool::Bool;
pub use self::variable::Variable;
pub use self::sign::Sign;
pub use self::literal::Literal;
pub use self::clause::Clause;
/// A shortcut notation to make a literal out of a number value
pub fn lit(l: iint) -> Literal { Literal::from(l) }
/// A shortcut notation to make a variable out of a number value
pub fn var(v: uint) -> Variable { Variable::from(v) } | 24.913043 | 64 | 0.713787 |
75b7c0dd123fcbf82331954e0d63504616999edb | 2,951 | #![doc = include_str!("../README.md")]
#![cfg_attr(docsrs, feature(doc_cfg))]
#![deny(
nonstandard_style,
rust_2018_idioms,
rustdoc::broken_intra_doc_links,
rustdoc::private_intra_doc_links
)]
#![forbid(non_ascii_idents, unsafe_code)]
#![warn(
deprecated_in_future,
missing_copy_implementations,
missing_debug_implementations,
missing_docs,
unreachable_pub,
unused_import_braces,
unused_labels,
unused_lifetimes,
unused_qualifications,
unused_results
)]
mod config;
use deadpool::{async_trait, managed};
use lapin::{ConnectionProperties, Error, tcp::OwnedTLSConfig};
pub use lapin;
pub use self::config::{Config, ConfigError};
pub use deadpool::managed::reexports::*;
deadpool::managed_reexports!(
"lapin",
Manager,
deadpool::managed::Object<Manager>,
Error,
ConfigError
);
/// Type alias for ['Object']
pub type Connection = managed::Object<Manager>;
type RecycleResult = managed::RecycleResult<Error>;
type RecycleError = managed::RecycleError<Error>;
/// [`Manager`] for creating and recycling [`lapin::Connection`].
///
/// [`Manager`]: managed::Manager
pub struct Manager {
addr: String,
connection_properties: ConnectionProperties,
cert_chain: Option<String>,
}
impl std::fmt::Debug for Manager {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Manager")
.field("addr", &self.addr)
.field(
"connection_properties",
&self::config::ConnProps(&self.connection_properties),
)
.finish()
}
}
impl Manager {
/// Creates a new [`Manager`] using the given AMQP address and
/// [`lapin::ConnectionProperties`].
#[must_use]
pub fn new<S: Into<String>>(addr: S, connection_properties: ConnectionProperties, cert_chain: Option<String>) -> Self {
Self {
addr: addr.into(),
connection_properties,
cert_chain,
}
}
}
#[async_trait]
impl managed::Manager for Manager {
type Type = lapin::Connection;
type Error = Error;
async fn create(&self) -> Result<lapin::Connection, Error> {
let tlsconfig = OwnedTLSConfig {
identity : None,
cert_chain: self.cert_chain.clone()
};
let conn =
lapin::Connection::connect_with_config(self.addr.as_str(),self.connection_properties.clone(), tlsconfig )
//lapin::Connection::connect(self.addr.as_str(), self.connection_properties.clone())
.await?;
Ok(conn)
}
async fn recycle(&self, conn: &mut lapin::Connection) -> RecycleResult {
match conn.status().state() {
lapin::ConnectionState::Connected => Ok(()),
other_state => Err(RecycleError::Message(format!(
"lapin connection is in state: {:?}",
other_state
))),
}
}
}
| 26.827273 | 123 | 0.62589 |
4bdf13affb18dae6f4c5ccd47bc0c3f2fc5a3fb7 | 1,558 | use super::Class;
use proc_macro2::TokenStream;
use quote::quote;
pub fn generate(class: &Class) -> TokenStream {
if !class.has_class_factory {
return TokenStream::new();
}
let class_factory_ident = crate::utils::class_factory_ident(&class.name);
let class_name = &class.name;
let user_fields = class.fields.iter().map(|f| {
let ty = &f.ty;
quote! { <#ty as ::core::default::Default>::default() }
});
quote! {
::com::class! {
#[no_class_factory]
pub class #class_factory_ident: ::com::interfaces::IClassFactory {}
impl ::com::interfaces::IClassFactory for #class_factory_ident {
unsafe fn CreateInstance(
&self,
aggr: *mut ::core::ptr::NonNull<<::com::interfaces::IUnknown as ::com::Interface>::VTable>,
riid: *const ::com::sys::IID,
ppv: *mut *mut ::core::ffi::c_void,
) -> ::com::sys::HRESULT {
assert!(!riid.is_null(), "iid passed to CreateInstance was null");
if !aggr.is_null() {
return ::com::sys::CLASS_E_NOAGGREGATION;
}
let instance = #class_name::allocate(#(#user_fields),*);
instance.QueryInterface(riid, ppv)
}
unsafe fn LockServer(&self, _increment: com::sys::BOOL) -> com::sys::HRESULT {
::com::sys::S_OK
}
}
}
}
}
| 35.409091 | 111 | 0.50706 |
90073d2368230629b15ea46291e807db09d15585 | 112 | mod wavefront;
fn main() {
let model = wavefront::WavefrontModel::new("../models/cube.obj");
model.draw();
} | 16 | 66 | 0.660714 |
228f350b12f74aebf07e4da256ac2b1e3450a9f5 | 22,698 | use std::convert::TryFrom;
use serde::de::Error as DeError;
use serde::{Serialize, Serializer};
use super::prelude::*;
use crate::builder::{
CreateInteractionResponse,
CreateInteractionResponseFollowup,
EditInteractionResponse,
};
use crate::http::Http;
use crate::model::interactions::InteractionType;
use crate::utils;
/// An interaction triggered by a message component.
#[derive(Clone, Debug, Serialize)]
#[non_exhaustive]
pub struct MessageComponentInteraction {
/// Id of the interaction.
pub id: InteractionId,
/// Id of the application this interaction is for.
pub application_id: ApplicationId,
/// The type of interaction.
#[serde(rename = "type")]
pub kind: InteractionType,
/// The data of the interaction which was triggered.
pub data: MessageComponentInteractionData,
/// The message this interaction was triggered by, if
/// it is a component.
pub message: InteractionMessage,
/// The guild Id this interaction was sent from, if there is one.
#[serde(skip_serializing_if = "Option::is_none")]
pub guild_id: Option<GuildId>,
/// The channel Id this interaction was sent from.
pub channel_id: ChannelId,
/// The `member` data for the invoking user.
///
/// **Note**: It is only present if the interaction is triggered in a guild.
#[serde(skip_serializing_if = "Option::is_none")]
pub member: Option<Member>,
/// The `user` object for the invoking user.
pub user: User,
/// A continuation token for responding to the interaction.
pub token: String,
/// Always `1`.
pub version: u8,
}
impl MessageComponentInteraction {
/// Gets the interaction response.
///
/// # Errors
///
/// Returns an [`Error::Http`] if there is no interaction response.
///
/// [`Error::Http`]: crate::error::Error::Http
pub async fn get_interaction_response(&self, http: impl AsRef<Http>) -> Result<Message> {
http.as_ref().get_original_interaction_response(&self.token).await
}
/// Creates a response to the interaction received.
///
/// **Note**: Message contents must be under 2000 unicode code points.
///
/// # Errors
///
/// Returns an [`Error::Model`] if the message content is too long.
/// May also return an [`Error::Http`] if the API returns an error,
/// or an [`Error::Json`] if there is an error in deserializing the
/// API response.
///
/// # Errors
///
/// [`Error::Model`]: crate::error::Error::Model
/// [`Error::Http`]: crate::error::Error::Http
/// [`Error::Json`]: crate::error::Error::Json
pub async fn create_interaction_response<F>(&self, http: impl AsRef<Http>, f: F) -> Result<()>
where
F: FnOnce(&mut CreateInteractionResponse) -> &mut CreateInteractionResponse,
{
let mut interaction_response = CreateInteractionResponse::default();
f(&mut interaction_response);
let map = utils::hashmap_to_json_map(interaction_response.0);
Message::check_content_length(&map)?;
Message::check_embed_length(&map)?;
http.as_ref().create_interaction_response(self.id.0, &self.token, &Value::Object(map)).await
}
/// Edits the initial interaction response.
///
/// `application_id` will usually be the bot's [`UserId`], except in cases of bots being very old.
///
/// Refer to Discord's docs for Edit Webhook Message for field information.
///
/// **Note**: Message contents must be under 2000 unicode code points, does not work on ephemeral messages.
///
/// [`UserId`]: crate::model::id::UserId
///
/// # Errors
///
/// Returns [`Error::Model`] if the edited content is too long.
/// May also return [`Error::Http`] if the API returns an error,
/// or an [`Error::Json`] if there is an error deserializing the response.
///
/// [`Error::Model`]: crate::error::Error::Model
/// [`Error::Http`]: crate::error::Error::Http
/// [`Error::Json`]: crate::error::Error::Json
pub async fn edit_original_interaction_response<F>(
&self,
http: impl AsRef<Http>,
f: F,
) -> Result<Message>
where
F: FnOnce(&mut EditInteractionResponse) -> &mut EditInteractionResponse,
{
let mut interaction_response = EditInteractionResponse::default();
f(&mut interaction_response);
let map = utils::hashmap_to_json_map(interaction_response.0);
Message::check_content_length(&map)?;
Message::check_embed_length(&map)?;
http.as_ref().edit_original_interaction_response(&self.token, &Value::Object(map)).await
}
/// Deletes the initial interaction response.
///
/// # Errors
///
/// May return [`Error::Http`] if the API returns an error.
/// Such as if the response was already deleted.
pub async fn delete_original_interaction_response(&self, http: impl AsRef<Http>) -> Result<()> {
http.as_ref().delete_original_interaction_response(&self.token).await
}
/// Creates a followup response to the response sent.
///
/// **Note**: Message contents must be under 2000 unicode code points.
///
/// # Errors
///
/// Will return [`Error::Model`] if the content is too long.
/// May also return [`Error::Http`] if the API returns an error,
/// or a [`Error::Json`] if there is an error in deserializing the response.
///
/// [`Error::Model`]: crate::error::Error::Model
/// [`Error::Http`]: crate::error::Error::Http
/// [`Error::Json`]: crate::error::Error::Json
pub async fn create_followup_message<'a, F>(
&self,
http: impl AsRef<Http>,
f: F,
) -> Result<Message>
where
for<'b> F: FnOnce(
&'b mut CreateInteractionResponseFollowup<'a>,
) -> &'b mut CreateInteractionResponseFollowup<'a>,
{
let mut interaction_response = CreateInteractionResponseFollowup::default();
f(&mut interaction_response);
let map = utils::hashmap_to_json_map(interaction_response.0);
Message::check_content_length(&map)?;
Message::check_embed_length(&map)?;
http.as_ref().create_followup_message(&self.token, &Value::Object(map)).await
}
/// Edits a followup response to the response sent.
///
/// **Note**: Message contents must be under 2000 unicode code points.
///
/// # Errors
///
/// Will return [`Error::Model`] if the content is too long.
/// May also return [`Error::Http`] if the API returns an error,
/// or a [`Error::Json`] if there is an error in deserializing the response.
///
/// [`Error::Model`]: crate::error::Error::Model
/// [`Error::Http`]: crate::error::Error::Http
/// [`Error::Json`]: crate::error::Error::Json
pub async fn edit_followup_message<'a, F, M: Into<MessageId>>(
&self,
http: impl AsRef<Http>,
message_id: M,
f: F,
) -> Result<Message>
where
for<'b> F: FnOnce(
&'b mut CreateInteractionResponseFollowup<'a>,
) -> &'b mut CreateInteractionResponseFollowup<'a>,
{
let mut interaction_response = CreateInteractionResponseFollowup::default();
f(&mut interaction_response);
let map = utils::hashmap_to_json_map(interaction_response.0);
Message::check_content_length(&map)?;
Message::check_embed_length(&map)?;
http.as_ref()
.edit_followup_message(&self.token, message_id.into().into(), &Value::Object(map))
.await
}
/// Deletes a followup message.
///
/// # Errors
///
/// May return [`Error::Http`] if the API returns an error.
/// Such as if the response was already deleted.
pub async fn delete_followup_message<M: Into<MessageId>>(
&self,
http: impl AsRef<Http>,
message_id: M,
) -> Result<()> {
http.as_ref().delete_followup_message(&self.token, message_id.into().into()).await
}
}
impl<'de> Deserialize<'de> for MessageComponentInteraction {
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> StdResult<Self, D::Error> {
let mut map = JsonMap::deserialize(deserializer)?;
let id = map.get("guild_id").and_then(|x| x.as_str()).and_then(|x| x.parse::<u64>().ok());
if let Some(guild_id) = id {
if let Some(member) = map.get_mut("member").and_then(|x| x.as_object_mut()) {
member.insert("guild_id".to_string(), Value::Number(Number::from(guild_id)));
}
if let Some(data) = map.get_mut("data") {
if let Some(resolved) = data.get_mut("resolved") {
if let Some(roles) = resolved.get_mut("roles") {
if let Some(values) = roles.as_object_mut() {
for value in values.values_mut() {
value.as_object_mut().unwrap().insert(
"guild_id".to_string(),
Value::String(guild_id.to_string()),
);
}
}
}
if let Some(channels) = resolved.get_mut("channels") {
if let Some(values) = channels.as_object_mut() {
for value in values.values_mut() {
value.as_object_mut().unwrap().insert(
"guild_id".to_string(),
Value::String(guild_id.to_string()),
);
}
}
}
}
}
}
let id = map
.remove("id")
.ok_or_else(|| DeError::custom("expected id"))
.and_then(InteractionId::deserialize)
.map_err(DeError::custom)?;
let application_id = map
.remove("application_id")
.ok_or_else(|| DeError::custom("expected application id"))
.and_then(ApplicationId::deserialize)
.map_err(DeError::custom)?;
let kind = map
.remove("type")
.ok_or_else(|| DeError::custom("expected type"))
.and_then(InteractionType::deserialize)
.map_err(DeError::custom)?;
let data = map
.remove("data")
.ok_or_else(|| DeError::custom("expected data"))
.and_then(MessageComponentInteractionData::deserialize)
.map_err(DeError::custom)?;
let guild_id = match map.contains_key("guild_id") {
true => Some(
map.remove("guild_id")
.ok_or_else(|| DeError::custom("expected guild_id"))
.and_then(GuildId::deserialize)
.map_err(DeError::custom)?,
),
false => None,
};
let channel_id = map
.remove("channel_id")
.ok_or_else(|| DeError::custom("expected channel_id"))
.and_then(ChannelId::deserialize)
.map_err(DeError::custom)?;
let member = match map.contains_key("member") {
true => Some(
map.remove("member")
.ok_or_else(|| DeError::custom("expected member"))
.and_then(Member::deserialize)
.map_err(DeError::custom)?,
),
false => None,
};
let user = match map.contains_key("user") {
true => map
.remove("user")
.ok_or_else(|| DeError::custom("expected user"))
.and_then(User::deserialize)
.map_err(DeError::custom)?,
false => member.as_ref().expect("expected user or member").user.clone(),
};
let message = {
let message = map
.remove("message")
.ok_or_else(|| DeError::custom("expected message"))
.and_then(JsonMap::deserialize)
.map_err(DeError::custom)?;
let partial = !message.contains_key("author");
let value: Value = message.into();
if partial {
InteractionMessage::Ephemeral(
EphemeralMessage::deserialize(value).map_err(DeError::custom)?,
)
} else {
InteractionMessage::Regular(Message::deserialize(value).map_err(DeError::custom)?)
}
};
let token = map
.remove("token")
.ok_or_else(|| DeError::custom("expected token"))
.and_then(String::deserialize)
.map_err(DeError::custom)?;
let version = map
.remove("version")
.ok_or_else(|| DeError::custom("expected version"))
.and_then(u8::deserialize)
.map_err(DeError::custom)?;
Ok(Self {
id,
application_id,
kind,
data,
message,
guild_id,
channel_id,
member,
user,
token,
version,
})
}
}
/// A message component interaction data, provided by [`MessageComponentInteraction::data`]
#[derive(Clone, Debug, Deserialize, Serialize)]
#[non_exhaustive]
pub struct MessageComponentInteractionData {
/// The custom id of the component.
pub custom_id: String,
/// The type of the component.
pub component_type: ComponentType,
/// The given values of the [`SelectMenu`]s
#[serde(default)]
pub values: Vec<String>,
}
// A component.
#[derive(Clone, Debug)]
#[non_exhaustive]
pub enum Component {
ActionRow(ActionRow),
Button(Button),
SelectMenu(SelectMenu),
}
impl<'de> Deserialize<'de> for Component {
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> StdResult<Self, D::Error> {
let map = JsonMap::deserialize(deserializer)?;
let kind = map
.get("type")
.ok_or_else(|| DeError::custom("expected type"))
.and_then(ComponentType::deserialize)
.map_err(DeError::custom)?;
match kind {
ComponentType::ActionRow => serde_json::from_value::<ActionRow>(Value::Object(map))
.map(Component::ActionRow)
.map_err(DeError::custom),
ComponentType::Button => serde_json::from_value::<Button>(Value::Object(map))
.map(Component::Button)
.map_err(DeError::custom),
ComponentType::SelectMenu => serde_json::from_value::<SelectMenu>(Value::Object(map))
.map(Component::SelectMenu)
.map_err(DeError::custom),
ComponentType::Unknown => Err(DeError::custom("Unknown component type")),
}
}
}
impl Serialize for Component {
fn serialize<S>(&self, serializer: S) -> StdResult<S::Ok, S::Error>
where
S: Serializer,
{
match self {
Component::ActionRow(c) => ActionRow::serialize(c, serializer),
Component::Button(c) => Button::serialize(c, serializer),
Component::SelectMenu(c) => SelectMenu::serialize(c, serializer),
}
}
}
impl From<ActionRow> for Component {
fn from(component: ActionRow) -> Self {
Component::ActionRow(component)
}
}
impl From<Button> for Component {
fn from(component: Button) -> Self {
Component::Button(component)
}
}
impl From<SelectMenu> for Component {
fn from(component: SelectMenu) -> Self {
Component::SelectMenu(component)
}
}
/// The type of a component
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, PartialOrd, Ord)]
#[non_exhaustive]
#[repr(u8)]
pub enum ComponentType {
ActionRow = 1,
Button = 2,
SelectMenu = 3,
Unknown = !0,
}
enum_number!(ComponentType {
ActionRow,
Button,
SelectMenu
});
/// An action row.
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct ActionRow {
/// The type of component this ActionRow is.
#[serde(rename = "type")]
pub kind: ComponentType,
/// The components of this ActionRow.
#[serde(default)]
pub components: Vec<ActionRowComponent>,
}
// A component which can be inside of an [`ActionRow`].
#[derive(Clone, Debug)]
#[non_exhaustive]
pub enum ActionRowComponent {
Button(Button),
SelectMenu(SelectMenu),
}
impl<'de> Deserialize<'de> for ActionRowComponent {
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> StdResult<Self, D::Error> {
let map = JsonMap::deserialize(deserializer)?;
let kind = map
.get("type")
.ok_or_else(|| DeError::custom("expected type"))
.and_then(ComponentType::deserialize)
.map_err(DeError::custom)?;
match kind {
ComponentType::Button => serde_json::from_value::<Button>(Value::Object(map))
.map(ActionRowComponent::Button)
.map_err(DeError::custom),
ComponentType::SelectMenu => serde_json::from_value::<SelectMenu>(Value::Object(map))
.map(ActionRowComponent::SelectMenu)
.map_err(DeError::custom),
_ => Err(DeError::custom("Unknown component type")),
}
}
}
impl Serialize for ActionRowComponent {
fn serialize<S>(&self, serializer: S) -> StdResult<S::Ok, S::Error>
where
S: Serializer,
{
match self {
ActionRowComponent::Button(c) => Button::serialize(c, serializer),
ActionRowComponent::SelectMenu(c) => SelectMenu::serialize(c, serializer),
}
}
}
impl From<ActionRowComponent> for Component {
fn from(component: ActionRowComponent) -> Self {
match component {
ActionRowComponent::Button(b) => Component::Button(b),
ActionRowComponent::SelectMenu(s) => Component::SelectMenu(s),
}
}
}
impl TryFrom<Component> for ActionRowComponent {
type Error = Error;
fn try_from(value: Component) -> Result<Self> {
match value {
Component::ActionRow(_) => Err(Error::Model(ModelError::InvalidComponentType)),
Component::Button(b) => Ok(ActionRowComponent::Button(b)),
Component::SelectMenu(s) => Ok(ActionRowComponent::SelectMenu(s)),
}
}
}
impl From<Button> for ActionRowComponent {
fn from(component: Button) -> Self {
ActionRowComponent::Button(component)
}
}
impl From<SelectMenu> for ActionRowComponent {
fn from(component: SelectMenu) -> Self {
ActionRowComponent::SelectMenu(component)
}
}
/// A button component.
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct Button {
/// The component type, it will always be [`ComponentType::Button`].
#[serde(rename = "type")]
pub kind: ComponentType,
/// The button style.
pub style: ButtonStyle,
/// The text which appears on the button.
pub label: Option<String>,
/// The emoji of this button, if there is one.
pub emoji: Option<ReactionType>,
/// An identifier defined by the developer for the button.
pub custom_id: Option<String>,
/// The url of the button, if there is one.
pub url: Option<String>,
/// Whether the button is disabled.
#[serde(default)]
pub disabled: bool,
}
/// The style of a button.
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, PartialOrd, Ord)]
#[non_exhaustive]
#[repr(u8)]
pub enum ButtonStyle {
Primary = 1,
Secondary = 2,
Success = 3,
Danger = 4,
Link = 5,
Unknown = !0,
}
enum_number!(ButtonStyle {
Primary,
Secondary,
Success,
Danger,
Link
});
/// A select menu component.
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct SelectMenu {
/// The component type, it will always be [`ComponentType::SelectMenu`].
#[serde(rename = "type")]
pub kind: ComponentType,
/// The placeholder shown when nothing is selected.
pub placeholder: Option<String>,
/// An identifier defined by the developer for the select menu.
pub custom_id: Option<String>,
/// The minimum number of selections allowed.
pub min_values: Option<u64>,
/// The maximum number of selections allowed.
pub max_values: Option<u64>,
/// The options of this select menu.
#[serde(default)]
pub options: Vec<SelectMenuOption>,
}
/// A select menu component options.
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct SelectMenuOption {
/// The text displayed on this option.
pub label: String,
/// The value to be sent for this option.
pub value: String,
/// The description shown for this option.
pub description: Option<String>,
/// The emoji displayed on this option.
pub emoji: Option<ReactionType>,
/// Render this option as the default selection.
#[serde(default)]
pub default: bool,
}
/// The [`MessageComponentInteraction::message`] field.
#[derive(Clone, Debug, Deserialize)]
pub enum InteractionMessage {
Regular(Message),
Ephemeral(EphemeralMessage),
}
impl InteractionMessage {
/// Whether the message is ephemeral.
pub fn is_ephemeral(&self) -> bool {
matches!(self, InteractionMessage::Ephemeral(_))
}
/// Gets the message Id.
pub fn id(&self) -> MessageId {
match self {
InteractionMessage::Regular(m) => m.id,
InteractionMessage::Ephemeral(m) => m.id,
}
}
/// Converts this to a regular message,
/// if it is one.
pub fn regular(self) -> Option<Message> {
match self {
InteractionMessage::Regular(m) => Some(m),
InteractionMessage::Ephemeral(_) => None,
}
}
/// Converts this to an ephemeral message,
/// if it is one.
pub fn ephemeral(self) -> Option<EphemeralMessage> {
match self {
InteractionMessage::Regular(_) => None,
InteractionMessage::Ephemeral(m) => Some(m),
}
}
}
impl Serialize for InteractionMessage {
fn serialize<S>(&self, serializer: S) -> StdResult<S::Ok, S::Error>
where
S: Serializer,
{
match self {
InteractionMessage::Regular(c) => Message::serialize(c, serializer),
InteractionMessage::Ephemeral(c) => EphemeralMessage::serialize(c, serializer),
}
}
}
/// An ephemeral message given in an interaction.
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct EphemeralMessage {
/// The message flags.
pub flags: MessageFlags,
/// The message Id.
pub id: MessageId,
}
| 32.895652 | 113 | 0.591506 |
4b2e8f470b905bb8ccecabb9fdf5af146d49312a | 18,377 | #[doc = "Register `SC` reader"]
pub struct R(crate::R<SC_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<SC_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::convert::From<crate::R<SC_SPEC>> for R {
fn from(reader: crate::R<SC_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `SC` writer"]
pub struct W(crate::W<SC_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<SC_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl core::convert::From<crate::W<SC_SPEC>> for W {
fn from(writer: crate::W<SC_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Real-Time Counter Output\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum RTCO_A {
#[doc = "0: Real-time counter output disabled."]
_0 = 0,
#[doc = "1: Real-time counter output enabled."]
_1 = 1,
}
impl From<RTCO_A> for bool {
#[inline(always)]
fn from(variant: RTCO_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `RTCO` reader - Real-Time Counter Output"]
pub struct RTCO_R(crate::FieldReader<bool, RTCO_A>);
impl RTCO_R {
pub(crate) fn new(bits: bool) -> Self {
RTCO_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> RTCO_A {
match self.bits {
false => RTCO_A::_0,
true => RTCO_A::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline(always)]
pub fn is_0(&self) -> bool {
**self == RTCO_A::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline(always)]
pub fn is_1(&self) -> bool {
**self == RTCO_A::_1
}
}
impl core::ops::Deref for RTCO_R {
type Target = crate::FieldReader<bool, RTCO_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `RTCO` writer - Real-Time Counter Output"]
pub struct RTCO_W<'a> {
w: &'a mut W,
}
impl<'a> RTCO_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: RTCO_A) -> &'a mut W {
self.bit(variant.into())
}
#[doc = "Real-time counter output disabled."]
#[inline(always)]
pub fn _0(self) -> &'a mut W {
self.variant(RTCO_A::_0)
}
#[doc = "Real-time counter output enabled."]
#[inline(always)]
pub fn _1(self) -> &'a mut W {
self.variant(RTCO_A::_1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 4)) | ((value as u32 & 0x01) << 4);
self.w
}
}
#[doc = "Real-Time Interrupt Enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum RTIE_A {
#[doc = "0: Real-time interrupt requests are disabled. Use software polling."]
_0 = 0,
#[doc = "1: Real-time interrupt requests are enabled."]
_1 = 1,
}
impl From<RTIE_A> for bool {
#[inline(always)]
fn from(variant: RTIE_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `RTIE` reader - Real-Time Interrupt Enable"]
pub struct RTIE_R(crate::FieldReader<bool, RTIE_A>);
impl RTIE_R {
pub(crate) fn new(bits: bool) -> Self {
RTIE_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> RTIE_A {
match self.bits {
false => RTIE_A::_0,
true => RTIE_A::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline(always)]
pub fn is_0(&self) -> bool {
**self == RTIE_A::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline(always)]
pub fn is_1(&self) -> bool {
**self == RTIE_A::_1
}
}
impl core::ops::Deref for RTIE_R {
type Target = crate::FieldReader<bool, RTIE_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `RTIE` writer - Real-Time Interrupt Enable"]
pub struct RTIE_W<'a> {
w: &'a mut W,
}
impl<'a> RTIE_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: RTIE_A) -> &'a mut W {
self.bit(variant.into())
}
#[doc = "Real-time interrupt requests are disabled. Use software polling."]
#[inline(always)]
pub fn _0(self) -> &'a mut W {
self.variant(RTIE_A::_0)
}
#[doc = "Real-time interrupt requests are enabled."]
#[inline(always)]
pub fn _1(self) -> &'a mut W {
self.variant(RTIE_A::_1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 6)) | ((value as u32 & 0x01) << 6);
self.w
}
}
#[doc = "Real-Time Interrupt Flag\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum RTIF_A {
#[doc = "0: RTC counter has not reached the value in the RTC modulo register."]
_0 = 0,
#[doc = "1: RTC counter has reached the value in the RTC modulo register."]
_1 = 1,
}
impl From<RTIF_A> for bool {
#[inline(always)]
fn from(variant: RTIF_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `RTIF` reader - Real-Time Interrupt Flag"]
pub struct RTIF_R(crate::FieldReader<bool, RTIF_A>);
impl RTIF_R {
pub(crate) fn new(bits: bool) -> Self {
RTIF_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> RTIF_A {
match self.bits {
false => RTIF_A::_0,
true => RTIF_A::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline(always)]
pub fn is_0(&self) -> bool {
**self == RTIF_A::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline(always)]
pub fn is_1(&self) -> bool {
**self == RTIF_A::_1
}
}
impl core::ops::Deref for RTIF_R {
type Target = crate::FieldReader<bool, RTIF_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `RTIF` writer - Real-Time Interrupt Flag"]
pub struct RTIF_W<'a> {
w: &'a mut W,
}
impl<'a> RTIF_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: RTIF_A) -> &'a mut W {
self.bit(variant.into())
}
#[doc = "RTC counter has not reached the value in the RTC modulo register."]
#[inline(always)]
pub fn _0(self) -> &'a mut W {
self.variant(RTIF_A::_0)
}
#[doc = "RTC counter has reached the value in the RTC modulo register."]
#[inline(always)]
pub fn _1(self) -> &'a mut W {
self.variant(RTIF_A::_1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 7)) | ((value as u32 & 0x01) << 7);
self.w
}
}
#[doc = "Real-Time Clock Prescaler Select\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum RTCPS_A {
#[doc = "0: Off"]
_000 = 0,
#[doc = "1: If RTCLKS = x0, it is 1; if RTCLKS = x1, it is 128."]
_001 = 1,
#[doc = "2: If RTCLKS = x0, it is 2; if RTCLKS = x1, it is 256."]
_010 = 2,
#[doc = "3: If RTCLKS = x0, it is 4; if RTCLKS = x1, it is 512."]
_011 = 3,
#[doc = "4: If RTCLKS = x0, it is 8; if RTCLKS = x1, it is 1024."]
_100 = 4,
#[doc = "5: If RTCLKS = x0, it is 16; if RTCLKS = x1, it is 2048."]
_101 = 5,
#[doc = "6: If RTCLKS = x0, it is 32; if RTCLKS = x1, it is 100."]
_110 = 6,
#[doc = "7: If RTCLKS = x0, it is 64; if RTCLKS = x1, it is 1000."]
_111 = 7,
}
impl From<RTCPS_A> for u8 {
#[inline(always)]
fn from(variant: RTCPS_A) -> Self {
variant as _
}
}
#[doc = "Field `RTCPS` reader - Real-Time Clock Prescaler Select"]
pub struct RTCPS_R(crate::FieldReader<u8, RTCPS_A>);
impl RTCPS_R {
pub(crate) fn new(bits: u8) -> Self {
RTCPS_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> RTCPS_A {
match self.bits {
0 => RTCPS_A::_000,
1 => RTCPS_A::_001,
2 => RTCPS_A::_010,
3 => RTCPS_A::_011,
4 => RTCPS_A::_100,
5 => RTCPS_A::_101,
6 => RTCPS_A::_110,
7 => RTCPS_A::_111,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `_000`"]
#[inline(always)]
pub fn is_000(&self) -> bool {
**self == RTCPS_A::_000
}
#[doc = "Checks if the value of the field is `_001`"]
#[inline(always)]
pub fn is_001(&self) -> bool {
**self == RTCPS_A::_001
}
#[doc = "Checks if the value of the field is `_010`"]
#[inline(always)]
pub fn is_010(&self) -> bool {
**self == RTCPS_A::_010
}
#[doc = "Checks if the value of the field is `_011`"]
#[inline(always)]
pub fn is_011(&self) -> bool {
**self == RTCPS_A::_011
}
#[doc = "Checks if the value of the field is `_100`"]
#[inline(always)]
pub fn is_100(&self) -> bool {
**self == RTCPS_A::_100
}
#[doc = "Checks if the value of the field is `_101`"]
#[inline(always)]
pub fn is_101(&self) -> bool {
**self == RTCPS_A::_101
}
#[doc = "Checks if the value of the field is `_110`"]
#[inline(always)]
pub fn is_110(&self) -> bool {
**self == RTCPS_A::_110
}
#[doc = "Checks if the value of the field is `_111`"]
#[inline(always)]
pub fn is_111(&self) -> bool {
**self == RTCPS_A::_111
}
}
impl core::ops::Deref for RTCPS_R {
type Target = crate::FieldReader<u8, RTCPS_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `RTCPS` writer - Real-Time Clock Prescaler Select"]
pub struct RTCPS_W<'a> {
w: &'a mut W,
}
impl<'a> RTCPS_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: RTCPS_A) -> &'a mut W {
self.bits(variant.into())
}
#[doc = "Off"]
#[inline(always)]
pub fn _000(self) -> &'a mut W {
self.variant(RTCPS_A::_000)
}
#[doc = "If RTCLKS = x0, it is 1; if RTCLKS = x1, it is 128."]
#[inline(always)]
pub fn _001(self) -> &'a mut W {
self.variant(RTCPS_A::_001)
}
#[doc = "If RTCLKS = x0, it is 2; if RTCLKS = x1, it is 256."]
#[inline(always)]
pub fn _010(self) -> &'a mut W {
self.variant(RTCPS_A::_010)
}
#[doc = "If RTCLKS = x0, it is 4; if RTCLKS = x1, it is 512."]
#[inline(always)]
pub fn _011(self) -> &'a mut W {
self.variant(RTCPS_A::_011)
}
#[doc = "If RTCLKS = x0, it is 8; if RTCLKS = x1, it is 1024."]
#[inline(always)]
pub fn _100(self) -> &'a mut W {
self.variant(RTCPS_A::_100)
}
#[doc = "If RTCLKS = x0, it is 16; if RTCLKS = x1, it is 2048."]
#[inline(always)]
pub fn _101(self) -> &'a mut W {
self.variant(RTCPS_A::_101)
}
#[doc = "If RTCLKS = x0, it is 32; if RTCLKS = x1, it is 100."]
#[inline(always)]
pub fn _110(self) -> &'a mut W {
self.variant(RTCPS_A::_110)
}
#[doc = "If RTCLKS = x0, it is 64; if RTCLKS = x1, it is 1000."]
#[inline(always)]
pub fn _111(self) -> &'a mut W {
self.variant(RTCPS_A::_111)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x07 << 8)) | ((value as u32 & 0x07) << 8);
self.w
}
}
#[doc = "Real-Time Clock Source Select\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum RTCLKS_A {
#[doc = "0: External clock source."]
_00 = 0,
#[doc = "1: Real-time clock source is 1 kHz (LPOCLK)."]
_01 = 1,
#[doc = "2: Internal reference clock (ICSIRCLK)."]
_10 = 2,
#[doc = "3: Bus clock."]
_11 = 3,
}
impl From<RTCLKS_A> for u8 {
#[inline(always)]
fn from(variant: RTCLKS_A) -> Self {
variant as _
}
}
#[doc = "Field `RTCLKS` reader - Real-Time Clock Source Select"]
pub struct RTCLKS_R(crate::FieldReader<u8, RTCLKS_A>);
impl RTCLKS_R {
pub(crate) fn new(bits: u8) -> Self {
RTCLKS_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> RTCLKS_A {
match self.bits {
0 => RTCLKS_A::_00,
1 => RTCLKS_A::_01,
2 => RTCLKS_A::_10,
3 => RTCLKS_A::_11,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `_00`"]
#[inline(always)]
pub fn is_00(&self) -> bool {
**self == RTCLKS_A::_00
}
#[doc = "Checks if the value of the field is `_01`"]
#[inline(always)]
pub fn is_01(&self) -> bool {
**self == RTCLKS_A::_01
}
#[doc = "Checks if the value of the field is `_10`"]
#[inline(always)]
pub fn is_10(&self) -> bool {
**self == RTCLKS_A::_10
}
#[doc = "Checks if the value of the field is `_11`"]
#[inline(always)]
pub fn is_11(&self) -> bool {
**self == RTCLKS_A::_11
}
}
impl core::ops::Deref for RTCLKS_R {
type Target = crate::FieldReader<u8, RTCLKS_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `RTCLKS` writer - Real-Time Clock Source Select"]
pub struct RTCLKS_W<'a> {
w: &'a mut W,
}
impl<'a> RTCLKS_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: RTCLKS_A) -> &'a mut W {
self.bits(variant.into())
}
#[doc = "External clock source."]
#[inline(always)]
pub fn _00(self) -> &'a mut W {
self.variant(RTCLKS_A::_00)
}
#[doc = "Real-time clock source is 1 kHz (LPOCLK)."]
#[inline(always)]
pub fn _01(self) -> &'a mut W {
self.variant(RTCLKS_A::_01)
}
#[doc = "Internal reference clock (ICSIRCLK)."]
#[inline(always)]
pub fn _10(self) -> &'a mut W {
self.variant(RTCLKS_A::_10)
}
#[doc = "Bus clock."]
#[inline(always)]
pub fn _11(self) -> &'a mut W {
self.variant(RTCLKS_A::_11)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 14)) | ((value as u32 & 0x03) << 14);
self.w
}
}
impl R {
#[doc = "Bit 4 - Real-Time Counter Output"]
#[inline(always)]
pub fn rtco(&self) -> RTCO_R {
RTCO_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bit 6 - Real-Time Interrupt Enable"]
#[inline(always)]
pub fn rtie(&self) -> RTIE_R {
RTIE_R::new(((self.bits >> 6) & 0x01) != 0)
}
#[doc = "Bit 7 - Real-Time Interrupt Flag"]
#[inline(always)]
pub fn rtif(&self) -> RTIF_R {
RTIF_R::new(((self.bits >> 7) & 0x01) != 0)
}
#[doc = "Bits 8:10 - Real-Time Clock Prescaler Select"]
#[inline(always)]
pub fn rtcps(&self) -> RTCPS_R {
RTCPS_R::new(((self.bits >> 8) & 0x07) as u8)
}
#[doc = "Bits 14:15 - Real-Time Clock Source Select"]
#[inline(always)]
pub fn rtclks(&self) -> RTCLKS_R {
RTCLKS_R::new(((self.bits >> 14) & 0x03) as u8)
}
}
impl W {
#[doc = "Bit 4 - Real-Time Counter Output"]
#[inline(always)]
pub fn rtco(&mut self) -> RTCO_W {
RTCO_W { w: self }
}
#[doc = "Bit 6 - Real-Time Interrupt Enable"]
#[inline(always)]
pub fn rtie(&mut self) -> RTIE_W {
RTIE_W { w: self }
}
#[doc = "Bit 7 - Real-Time Interrupt Flag"]
#[inline(always)]
pub fn rtif(&mut self) -> RTIF_W {
RTIF_W { w: self }
}
#[doc = "Bits 8:10 - Real-Time Clock Prescaler Select"]
#[inline(always)]
pub fn rtcps(&mut self) -> RTCPS_W {
RTCPS_W { w: self }
}
#[doc = "Bits 14:15 - Real-Time Clock Source Select"]
#[inline(always)]
pub fn rtclks(&mut self) -> RTCLKS_W {
RTCLKS_W { w: self }
}
#[doc = "Writes raw bits to the register."]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "RTC Status and Control Register\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [sc](index.html) module"]
pub struct SC_SPEC;
impl crate::RegisterSpec for SC_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [sc::R](R) reader structure"]
impl crate::Readable for SC_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [sc::W](W) writer structure"]
impl crate::Writable for SC_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets SC to value 0"]
impl crate::Resettable for SC_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
}
| 29.784441 | 414 | 0.547315 |
087394c1308a4a509e0bdec5df8906f039066da7 | 359 | use rand::Rng;
const BASE62: &[u8] = b"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz";
const NAME_LEN: usize = 64;
pub fn get_random_name() -> String {
let mut id = String::with_capacity(NAME_LEN);
let mut rng = rand::thread_rng();
for _ in 0..NAME_LEN {
id.push(BASE62[rng.gen::<usize>() % 62] as char);
}
id
}
| 25.642857 | 88 | 0.662953 |
efa35a6f36ff0ed47b56c89629f73e6b280ee7ab | 16,043 | // Copyright (c) The cargo-guppy Contributors
// SPDX-License-Identifier: MIT OR Apache-2.0
use fixtures::{
json::{self, JsonFixture},
package_id,
};
use guppy::graph::{
feature::{named_feature_filter, FeatureId, FeatureLabel, StandardFeatures},
BuildTargetId, BuildTargetKind, DependencyDirection, DotWrite, PackageDotVisitor, PackageLink,
PackageMetadata,
};
use std::{fmt, iter};
mod small {
use super::*;
use crate::feature_helpers::assert_features_for_package;
use fixtures::json::METADATA_CYCLE_FEATURES_BASE;
use pretty_assertions::assert_eq;
// Test specific details extracted from metadata1.json.
#[test]
fn metadata1() {
let metadata1 = JsonFixture::metadata1();
metadata1.verify();
let graph = metadata1.graph();
assert_eq!(
graph.workspace().target_directory(),
"/fakepath/testcrate/target",
"target directory matches"
);
let testcrate = graph
.metadata(&package_id(json::METADATA1_TESTCRATE))
.expect("root crate should exist");
let mut root_deps: Vec<_> = testcrate.direct_links().collect();
assert_eq!(root_deps.len(), 1, "the root crate has one dependency");
let link = root_deps.pop().expect("the root crate has one dependency");
// XXX test for details of dependency edges as well?
assert!(link.normal().is_present(), "normal dependency is defined");
assert!(link.build().is_present(), "build dependency is defined");
assert!(link.dev().is_present(), "dev dependency is defined");
// Print out dot graphs for small subgraphs.
static EXPECTED_DOT: &str = r#"digraph {
0 [label="winapi-x86_64-pc-windows-gnu"]
11 [label="mach"]
13 [label="winapi"]
14 [label="libc"]
20 [label="winapi-i686-pc-windows-gnu"]
26 [label="region"]
31 [label="bitflags"]
11 -> 14 [label="libc"]
13 -> 20 [label="winapi-i686-pc-windows-gnu"]
13 -> 0 [label="winapi-x86_64-pc-windows-gnu"]
26 -> 31 [label="bitflags"]
26 -> 14 [label="libc"]
26 -> 11 [label="mach"]
26 -> 13 [label="winapi"]
}
"#;
let package_set = graph
.query_forward(iter::once(&package_id(json::METADATA1_REGION)))
.unwrap()
.resolve();
assert_eq!(
EXPECTED_DOT,
format!("{}", package_set.display_dot(NameVisitor)),
"dot output matches"
);
// For reverse reachable ensure that the arrows are in the correct direction.
static EXPECTED_DOT_REVERSED: &str = r#"digraph {
1 [label="datatest"]
9 [label="serde_yaml"]
15 [label="dtoa"]
18 [label="testcrate"]
1 -> 9 [label="serde_yaml"]
9 -> 15 [label="dtoa"]
18 -> 1 [label="datatest"]
}
"#;
let package_set = graph
.query_reverse(iter::once(&package_id(json::METADATA1_DTOA)))
.unwrap()
.resolve();
assert_eq!(
EXPECTED_DOT_REVERSED,
format!("{}", package_set.display_dot(NameVisitor)),
"reversed dot output matches"
);
// ---
// Check that resolve_with works by dropping all edges into libc (compare to example above).
static EXPECTED_DOT_NO_LIBC: &str = r#"digraph {
0 [label="winapi-x86_64-pc-windows-gnu"]
11 [label="mach"]
13 [label="winapi"]
20 [label="winapi-i686-pc-windows-gnu"]
26 [label="region"]
31 [label="bitflags"]
13 -> 20 [label="winapi-i686-pc-windows-gnu"]
13 -> 0 [label="winapi-x86_64-pc-windows-gnu"]
26 -> 31 [label="bitflags"]
26 -> 11 [label="mach"]
26 -> 13 [label="winapi"]
}
"#;
let package_set = graph
.query_forward(iter::once(&package_id(json::METADATA1_REGION)))
.unwrap()
.resolve_with_fn(|_, link| link.to().name() != "libc");
assert_eq!(
EXPECTED_DOT_NO_LIBC,
format!("{}", package_set.display_dot(NameVisitor)),
"dot output matches"
);
// ---
let feature_graph = graph.feature_graph();
assert_eq!(feature_graph.feature_count(), 506, "feature count");
assert_eq!(feature_graph.link_count(), 631, "link count");
let feature_set = feature_graph
.query_workspace(StandardFeatures::All)
.resolve();
let root_ids: Vec<_> = feature_set.root_ids(DependencyDirection::Forward).collect();
let testcrate_id = package_id(json::METADATA1_TESTCRATE);
let expected = vec![FeatureId::named(&testcrate_id, "datatest")];
assert_eq!(root_ids, expected, "feature graph root IDs match");
}
proptest_suite!(metadata1);
#[test]
fn metadata2() {
let metadata2 = JsonFixture::metadata2();
metadata2.verify();
let feature_graph = metadata2.graph().feature_graph();
assert_eq!(feature_graph.feature_count(), 484, "feature count");
assert_eq!(feature_graph.link_count(), 589, "link count");
let root_ids: Vec<_> = feature_graph
.query_workspace(StandardFeatures::None)
.resolve()
.root_ids(DependencyDirection::Forward)
.collect();
let testcrate_id = package_id(json::METADATA2_TESTCRATE);
let expected = vec![FeatureId::base(&testcrate_id)];
assert_eq!(root_ids, expected, "feature graph root IDs match");
}
proptest_suite!(metadata2);
#[test]
fn metadata_dups() {
let metadata_dups = JsonFixture::metadata_dups();
metadata_dups.verify();
}
proptest_suite!(metadata_dups);
#[test]
fn metadata_cycle1() {
let metadata_cycle1 = JsonFixture::metadata_cycle1();
metadata_cycle1.verify();
}
proptest_suite!(metadata_cycle1);
#[test]
fn metadata_cycle2() {
let metadata_cycle2 = JsonFixture::metadata_cycle2();
metadata_cycle2.verify();
}
proptest_suite!(metadata_cycle2);
#[test]
fn metadata_cycle_features() {
let metadata_cycle_features = JsonFixture::metadata_cycle_features();
metadata_cycle_features.verify();
let feature_graph = metadata_cycle_features.graph().feature_graph();
let base_id = package_id(METADATA_CYCLE_FEATURES_BASE);
let default_id = FeatureId::named(&base_id, "default");
// default, default-enable and default-transitive are default features.
for &f in &["default", "default-enable", "default-transitive"] {
let this_id = FeatureId::named(&base_id, f);
assert!(
feature_graph
.is_default_feature(this_id)
.expect("valid feature ID"),
"{} is a default feature",
f,
);
assert!(
feature_graph
.depends_on(default_id, this_id)
.expect("valid feature IDs"),
"{} should depend on {} but does not",
default_id,
this_id,
);
}
// helper-enable and helper-transitive are *not* default features even though they are
// enabled by the cyclic dev dependency. But the dependency relation is present.
for &f in &["helper-enable", "helper-transitive"] {
let this_id = FeatureId::named(&base_id, f);
assert!(
!feature_graph
.is_default_feature(this_id)
.expect("valid feature ID"),
"{} is NOT a default feature",
f,
);
assert!(
feature_graph
.depends_on(default_id, this_id)
.expect("valid feature IDs"),
"{} should depend on {} but does not",
default_id,
this_id,
);
}
}
proptest_suite!(metadata_cycle_features);
#[test]
fn metadata_targets1() {
let metadata_targets1 = JsonFixture::metadata_targets1();
metadata_targets1.verify();
let package_graph = metadata_targets1.graph();
let package_set = package_graph.resolve_all();
let feature_graph = metadata_targets1.graph().feature_graph();
assert_eq!(feature_graph.feature_count(), 38, "feature count");
// Some code that might be useful for debugging.
if false {
for (source, target, edge) in feature_graph
.resolve_all()
.links(DependencyDirection::Forward)
{
let source_metadata = package_graph.metadata(source.package_id()).unwrap();
let target_metadata = package_graph.metadata(target.package_id()).unwrap();
println!(
"feature link: {}:{} {} -> {}:{} {} {:?}",
source_metadata.name(),
source_metadata.version(),
source.label(),
target_metadata.name(),
target_metadata.version(),
target.label(),
edge
);
}
}
assert_eq!(feature_graph.link_count(), 58, "feature link count");
// Check that resolve_packages + a feature filter works.
let feature_set = package_set.to_feature_set(named_feature_filter(
StandardFeatures::Default,
["foo", "bar"].iter().copied(),
));
let dep_a_id = package_id(json::METADATA_TARGETS1_DEP_A);
assert!(feature_set
.contains((&dep_a_id, FeatureLabel::Named("foo")))
.expect("valid feature ID"));
assert!(feature_set
.contains((&dep_a_id, FeatureLabel::Named("bar")))
.expect("valid feature ID"));
assert!(!feature_set
.contains((&dep_a_id, FeatureLabel::Named("baz")))
.expect("valid feature ID"));
assert!(!feature_set
.contains((&dep_a_id, FeatureLabel::Named("quux")))
.expect("valid feature ID"));
assert_features_for_package(
&feature_set,
&package_id(json::METADATA_TARGETS1_TESTCRATE),
Some(&[FeatureLabel::Base]),
"testcrate",
);
assert_features_for_package(
&feature_set,
&dep_a_id,
Some(&[
FeatureLabel::Base,
FeatureLabel::Named("bar"),
FeatureLabel::Named("foo"),
]),
"dep a",
);
assert_features_for_package(
&feature_set,
&package_id(json::METADATA_TARGETS1_LAZY_STATIC_1),
Some(&[FeatureLabel::Base]),
"lazy_static",
);
}
proptest_suite!(metadata_targets1);
#[test]
fn metadata_build_targets1() {
let metadata_build_targets1 = JsonFixture::metadata_build_targets1();
metadata_build_targets1.verify();
}
// No need for proptests because there are no dependencies involved.
#[test]
fn metadata_proc_macro1() {
let metadata = JsonFixture::metadata_proc_macro1();
metadata.verify();
let graph = metadata.graph();
let package = graph
.metadata(&package_id(json::METADATA_PROC_MACRO1_MACRO))
.expect("valid package ID");
assert!(package.is_proc_macro(), "is proc macro");
assert!(matches!(
package
.build_target(&BuildTargetId::Library)
.expect("library package is present")
.kind(),
BuildTargetKind::ProcMacro
));
}
// No need for proptests because this is a really simple test.
}
mod large {
use super::*;
use fixtures::dep_helpers::GraphAssert;
#[test]
fn metadata_libra() {
let metadata_libra = JsonFixture::metadata_libra();
metadata_libra.verify();
}
proptest_suite!(metadata_libra);
#[test]
fn metadata_libra_f0091a4() {
let metadata = JsonFixture::metadata_libra_f0091a4();
metadata.verify();
}
proptest_suite!(metadata_libra_f0091a4);
#[test]
fn metadata_libra_9ffd93b() {
let metadata = JsonFixture::metadata_libra_9ffd93b();
metadata.verify();
let graph = metadata.graph();
graph.assert_depends_on(
&package_id(json::METADATA_LIBRA_ADMISSION_CONTROL_SERVICE),
&package_id(json::METADATA_LIBRA_EXECUTOR_UTILS),
DependencyDirection::Forward,
"admission-control-service should depend on executor-utils",
);
graph.assert_not_depends_on(
&package_id(json::METADATA_LIBRA_EXECUTOR_UTILS),
&package_id(json::METADATA_LIBRA_ADMISSION_CONTROL_SERVICE),
DependencyDirection::Forward,
"executor-utils should not depend on admission-control-service",
);
let proc_macro_packages: Vec<_> = graph
.workspace()
.iter_by_path()
.filter_map(|(_, metadata)| {
if metadata.is_proc_macro() {
Some(metadata.name())
} else {
None
}
})
.collect();
assert_eq!(
proc_macro_packages,
["num-variants", "libra-crypto-derive"],
"proc macro packages"
);
let build_script_packages: Vec<_> = graph
.workspace()
.iter_by_path()
.filter_map(|(_, metadata)| {
if metadata.has_build_script() {
Some(metadata.name())
} else {
None
}
})
.collect();
assert_eq!(
build_script_packages,
[
"admission-control-proto",
"libra-dev",
"debug-interface",
"libra-metrics",
"storage-proto",
"libra_fuzzer_fuzz",
"libra-types"
],
"build script packages"
);
let mut build_dep_but_no_build_script: Vec<_> = graph
.resolve_all()
.links(DependencyDirection::Forward)
.filter_map(|link| {
if link.build().is_present() && !link.from().has_build_script() {
Some(link.from().name())
} else {
None
}
})
.collect();
build_dep_but_no_build_script.sort_unstable();
assert_eq!(
build_dep_but_no_build_script,
["libra-mempool", "rusoto_signature"],
"packages with build deps but no build scripts"
);
}
proptest_suite!(metadata_libra_9ffd93b);
}
mod guppy_tests {
use super::*;
use fixtures::json::METADATA_GUPPY_CARGO_GUPPY;
use guppy::PackageId;
#[test]
fn metadata_guppy_44b62fa() {
let metadata = JsonFixture::metadata_guppy_44b62fa();
metadata.verify();
// This is --no-deps metadata: check that there are no dependency edges at all.
let graph = metadata.graph();
let package = graph
.metadata(&PackageId::new(METADATA_GUPPY_CARGO_GUPPY))
.expect("cargo-guppy package found");
assert_eq!(
package.direct_links().count(),
0,
"no-deps => package has no direct links"
);
assert_eq!(graph.link_count(), 0, "no-deps => no edges");
}
proptest_suite!(metadata_guppy_44b62fa);
}
struct NameVisitor;
impl PackageDotVisitor for NameVisitor {
fn visit_package(&self, package: PackageMetadata<'_>, f: &mut DotWrite<'_, '_>) -> fmt::Result {
write!(f, "{}", package.name())
}
fn visit_link(&self, link: PackageLink<'_>, f: &mut DotWrite<'_, '_>) -> fmt::Result {
write!(f, "{}", link.dep_name())
}
}
| 33.010288 | 100 | 0.565667 |
ddf7b96b09dfcb5d1b647df3e1d7340223845744 | 16,314 | // Original work Copyright 2013 The Rust Project Developers.
// Modified work Copyright 2016 gcarq.
// See the LICENSE file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The `BufReader` is a drop-in replacement for `std::io::BufReader` with seeking support.
//!
//! If `.seek(SeekFrom::Current(n))` is called and `n` is in range of the internal buffer the
//! underlying reader is not invoked. This has the side effect that you can no longer access
//! the underlying buffer directly after being consumed by `BufReader`,
//! because its position could be out of sync.
//!
//! # Examples
//!
//! ```
//! use std::io::{self, Cursor, Read, Seek, SeekFrom};
//! use seek_bufread::BufReader;
//!
//! let inner = Cursor::new([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
//! let mut reader = BufReader::new(inner);
//!
//! reader.seek(SeekFrom::Current(4)).unwrap();
//! let mut buf = [0; 8];
//!
//! // read bytes from internal buffer
//! reader.read(&mut buf).unwrap();
//! assert_eq!(buf, [4, 5, 6, 7, 8, 9, 10, 11]);
//! ```
use std::fmt;
use std::io::{self, BufRead, Read, Seek, SeekFrom};
const DEFAULT_BUF_SIZE: usize = 8 * 1024;
/// The `BufReader` struct adds buffering to any reader.
///
/// It can be excessively inefficient to work directly with a `Read` instance.
/// For example, every call to `read` on `TcpStream` results in a system call.
/// A `BufReader` performs large, infrequent reads on the underlying `Read`
/// and maintains an in-memory buffer of the results.
///
/// # Examples
///
/// ```
/// use std::io::prelude::*;
/// use std::fs::File;
/// use seek_bufread::BufReader;
///
/// # fn foo() -> std::io::Result<()> {
/// let mut f = try!(File::open("log.txt"));
/// let mut reader = BufReader::new(f);
///
/// let mut line = String::new();
/// let len = try!(reader.read_line(&mut line));
/// println!("First line is {} bytes long", len);
/// # Ok(())
/// # }
/// ```
pub struct BufReader<R> {
inner: R, // internal reader
buf: Box<[u8]>, // internal buffer
buf_pos: usize, // position within buf
cap: usize, // buf capacity
absolute_pos: u64, // absolute position
}
impl<R: Read + Seek> BufReader<R> {
/// Creates a new `BufReader` with a default buffer capacity (8192 bytes).
///
/// # Examples
///
/// ```
/// use std::fs::File;
/// use seek_bufread::BufReader;
///
/// # fn foo() -> std::io::Result<()> {
/// let mut f = try!(File::open("log.txt"));
/// let mut reader = BufReader::new(f);
/// # Ok(())
/// # }
/// ```
pub fn new(inner: R) -> BufReader<R> {
BufReader::with_capacity(DEFAULT_BUF_SIZE, inner)
}
/// Creates a new `BufReader` with the specified buffer capacity.
///
/// # Examples
///
/// Creating a buffer with ten bytes of capacity:
///
/// ```
/// use std::fs::File;
/// use seek_bufread::BufReader;
///
/// # fn foo() -> std::io::Result<()> {
/// let mut f = try!(File::open("log.txt"));
/// let mut reader = BufReader::with_capacity(10, f);
/// # Ok(())
/// # }
/// ```
pub fn with_capacity(cap: usize, inner: R) -> BufReader<R> {
BufReader {
inner: inner,
buf: vec![0; cap].into_boxed_slice(),
buf_pos: 0,
cap: 0,
absolute_pos: 0,
}
}
/// Returns the absolute file pointer position.
pub fn position(&self) -> u64 { self.absolute_pos }
/// Returns the total buffer capacity.
pub fn capacity(&self) -> usize { self.cap }
/// Returns the current number of remaining bytes available in the buffer.
pub fn available(&self) -> usize {
self.cap.checked_sub(self.buf_pos).unwrap_or(0)
}
/// Consumes `self`, synchronizes the inner reader position and returns the inner reader.
pub fn into_inner(mut self) -> io::Result<R> {
// Sync position of internal reader
try!(self.inner.seek(SeekFrom::Start(self.absolute_pos)));
Ok(self.inner)
}
/// Syncs the position of our underlying reader and empties the buffer
fn sync_and_flush(&mut self, pos: SeekFrom) -> io::Result<u64> {
self.buf_pos = self.cap;
self.absolute_pos = try!(self.inner.seek(pos));
Ok(self.absolute_pos)
}
/// Seeks `n` bytes backwards from current position
fn seek_backward(&mut self, n: i64) -> io::Result<u64> {
let n_abs = n.abs() as usize;
if self.buf_pos.checked_sub(n_abs).is_some() {
// Seek our internal buffer
self.absolute_pos -= n_abs as u64;
self.buf_pos -= n_abs;
Ok(self.absolute_pos)
} else {
// Out of scope. Seek inner reader to new position and reset buffer
let new_pos = self.absolute_pos - n_abs as u64;
self.sync_and_flush(SeekFrom::Start(new_pos))
}
}
/// Seeks `n` bytes forwards from current position
fn seek_forward(&mut self, n: usize) -> io::Result<u64> {
if self.available().checked_sub(n).is_some() {
self.consume(n);
Ok(self.absolute_pos)
} else {
// Out of scope. Seek inner reader to new position and reset buffer
let new_pos = self.absolute_pos + n as u64;
self.sync_and_flush(SeekFrom::Start(new_pos))
}
}
}
impl<R: Read> Read for BufReader<R> {
/// Reads the next available bytes from buffer or inner stream.
/// Doesn't guarantee the whole buffer is filled.
/// Returns number of read bytes.
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let n_exp = buf.len();
let mut n_total = 0;
while n_total < n_exp {
let n_read = try!(try!(self.fill_buf()).read(&mut buf[n_total..]));
if n_read == 0 {
break;
}
self.consume(n_read);
n_total += n_read;
}
Ok(n_total)
}
}
impl<R: Read> BufRead for BufReader<R> {
fn fill_buf(&mut self) -> io::Result<&[u8]> {
// If we've reached the end of our internal buffer then we need to fetch
// some more data from the underlying reader.
if self.cap == self.buf_pos {
self.cap = try!(self.inner.read(&mut self.buf));
self.buf_pos = 0;
}
Ok(&self.buf[self.buf_pos..self.cap])
}
fn consume(&mut self, amt: usize) {
self.buf_pos += amt;
self.absolute_pos += amt as u64;
}
}
impl<R: Read + Seek> Seek for BufReader<R> {
/// Seek to an offset, in bytes, in the buffer or the underlying reader.
///
/// The position used for seeking with `SeekFrom::Current(_)` is the
/// current position of the underlying reader plus the current position
/// in the internal buffer.
///
/// Calling `.unwrap()` immediately after a seek doesn't guarantee
/// the underlying reader at the same position!
///
/// See `std::io::Seek` for more details.
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
match pos {
SeekFrom::Current(n) => {
match n >= 0 {
true => self.seek_forward(n as usize),
false => self.seek_backward(n)
}
}
SeekFrom::Start(n) => {
// Check difference between actual and requested position
match n.checked_sub(self.absolute_pos) {
Some(n_bytes) => self.seek_forward(n_bytes as usize),
None => self.sync_and_flush(pos)
}
}
_ => self.sync_and_flush(pos)
}
}
}
impl<R> fmt::Debug for BufReader<R> where R: fmt::Debug + Read + Seek {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("BufReader")
.field("reader", &self.inner)
.field("available", &self.available())
.field("capacity", &self.cap)
.field("position", &self.absolute_pos)
.finish()
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::io::{self, Cursor, Read, Seek, SeekFrom};
#[test]
fn default_behaviour() {
let mut reader = BufReader::new(Cursor::new([5, 6, 7, 0, 1, 2, 3, 4]));
let mut buf = [0; 8];
reader.read(&mut buf).unwrap();
assert_eq!(buf, [5, 6, 7, 0, 1, 2, 3, 4]);
let mut buf = [0; 8];
reader.read(&mut buf).unwrap();
assert_eq!(buf, [0, 0, 0, 0, 0, 0, 0, 0]);
}
#[test]
fn default_behaviour_std() {
let mut reader = io::BufReader::new(Cursor::new([5, 6, 7, 0, 1, 2, 3, 4]));
let mut buf = [0; 8];
reader.read(&mut buf).unwrap();
assert_eq!(buf, [5, 6, 7, 0, 1, 2, 3, 4]);
let mut buf = [0; 8];
reader.read(&mut buf).unwrap();
assert_eq!(buf, [0, 0, 0, 0, 0, 0, 0, 0]);
}
#[test]
fn small_capacity() {
let inner = Cursor::new([5, 6, 7, 0, 1, 2, 3, 4]);
let mut reader = BufReader::with_capacity(2, inner);
let mut buf = [0, 0, 0];
reader.read(&mut buf).unwrap();
assert_eq!(buf, [5, 6, 7]);
let mut buf = [0, 0];
reader.read(&mut buf).unwrap();
assert_eq!(buf, [0, 1]);
let mut buf = [0];
reader.read(&mut buf).unwrap();
assert_eq!(buf, [2]);
}
#[test]
fn small_capacity_std() {
let inner = Cursor::new([5, 6, 7, 0, 1, 2, 3, 4]);
let mut reader = io::BufReader::with_capacity(2, inner);
let mut buf = [0, 0, 0];
reader.read(&mut buf).unwrap();
assert_eq!(buf, [5, 6, 7]);
let mut buf = [0, 0];
reader.read(&mut buf).unwrap();
assert_eq!(buf, [0, 1]);
let mut buf = [0];
reader.read(&mut buf).unwrap();
assert_eq!(buf, [2]);
}
#[test]
fn seek_start() {
let inner = Cursor::new([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
let mut reader = BufReader::with_capacity(10, inner);
reader.seek(SeekFrom::Start(3)).unwrap();
let mut buf = [0; 8];
reader.read(&mut buf).unwrap();
assert_eq!(buf, [3, 4, 5, 6, 7, 8, 9, 10]);
reader.seek(SeekFrom::Start(0)).unwrap();
let mut buf = [0; 8];
reader.read(&mut buf).unwrap();
assert_eq!(buf, [0, 1, 2, 3, 4, 5, 6, 7]);
reader.seek(SeekFrom::Start(13)).unwrap();
let mut buf = [0; 8];
reader.read(&mut buf).unwrap();
assert_eq!(buf, [13, 14, 15, 16, 0, 0, 0, 0]);
reader.seek(SeekFrom::Start(0)).unwrap();
let mut buf = [0; 8];
reader.read(&mut buf).unwrap();
assert_eq!(buf, [0, 1, 2, 3, 4, 5, 6, 7]);
}
#[test]
fn seek_start_std() {
let inner = Cursor::new([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
let mut reader = io::BufReader::with_capacity(10, inner);
reader.seek(SeekFrom::Start(3)).unwrap();
let mut buf = [0; 8];
reader.read(&mut buf).unwrap();
assert_eq!(buf, [3, 4, 5, 6, 7, 8, 9, 10]);
reader.seek(SeekFrom::Start(0)).unwrap();
let mut buf = [0; 8];
reader.read(&mut buf).unwrap();
assert_eq!(buf, [0, 1, 2, 3, 4, 5, 6, 7]);
reader.seek(SeekFrom::Start(13)).unwrap();
let mut buf = [0; 8];
reader.read(&mut buf).unwrap();
assert_eq!(buf, [13, 14, 15, 16, 0, 0, 0, 0]);
reader.seek(SeekFrom::Start(0)).unwrap();
let mut buf = [0; 8];
reader.read(&mut buf).unwrap();
assert_eq!(buf, [0, 1, 2, 3, 4, 5, 6, 7]);
}
#[test]
fn seek_current_positive() {
let inner = Cursor::new([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
let mut reader = BufReader::with_capacity(20, inner);
reader.seek(SeekFrom::Current(2)).unwrap();
let mut buf = [0; 8];
reader.read(&mut buf).unwrap();
assert_eq!(buf, [2, 3, 4, 5, 6, 7, 8, 9]);
reader.seek(SeekFrom::Current(6)).unwrap();
let mut buf = [0; 8];
reader.read(&mut buf).unwrap();
assert_eq!(buf, [16, 0, 0, 0, 0, 0, 0, 0]);
}
#[test]
fn seek_current_positive_std() {
let inner = Cursor::new([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
let mut reader = io::BufReader::with_capacity(20, inner);
reader.seek(SeekFrom::Current(2)).unwrap();
let mut buf = [0; 8];
reader.read(&mut buf).unwrap();
assert_eq!(buf, [2, 3, 4, 5, 6, 7, 8, 9]);
reader.seek(SeekFrom::Current(6)).unwrap();
let mut buf = [0; 8];
reader.read(&mut buf).unwrap();
assert_eq!(buf, [16, 0, 0, 0, 0, 0, 0, 0]);
}
#[test]
fn seek_current_negative() {
let inner = Cursor::new([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
let mut reader = BufReader::with_capacity(3, inner);
reader.seek(SeekFrom::Current(4)).unwrap();
let mut buf = [0; 4];
reader.read(&mut buf).unwrap();
assert_eq!(buf, [4, 5, 6, 7]);
reader.seek(SeekFrom::Current(-2)).unwrap();
let mut buf = [0; 4];
reader.read(&mut buf).unwrap();
assert_eq!(buf, [6, 7, 8, 9]);
reader.seek(SeekFrom::Current(-4)).unwrap();
let mut buf = [0; 4];
reader.read(&mut buf).unwrap();
assert_eq!(buf, [6, 7, 8, 9]);
}
#[test]
fn seek_current_negative_std() {
let inner = Cursor::new([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
let mut reader = io::BufReader::with_capacity(3, inner);
reader.seek(SeekFrom::Current(4)).unwrap();
let mut buf = [0; 4];
reader.read(&mut buf).unwrap();
assert_eq!(buf, [4, 5, 6, 7]);
reader.seek(SeekFrom::Current(-2)).unwrap();
let mut buf = [0; 4];
reader.read(&mut buf).unwrap();
assert_eq!(buf, [6, 7, 8, 9]);
reader.seek(SeekFrom::Current(-4)).unwrap();
let mut buf = [0; 4];
reader.read(&mut buf).unwrap();
assert_eq!(buf, [6, 7, 8, 9]);
}
#[test]
fn seek_end() {
let inner = Cursor::new([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
let mut reader = BufReader::with_capacity(2, inner);
reader.seek(SeekFrom::End(-6)).unwrap();
let mut buf = [0; 8];
reader.read(&mut buf).unwrap();
assert_eq!(buf, [11, 12, 13, 14, 15, 16, 0, 0]);
reader.seek(SeekFrom::End(0)).unwrap();
let mut buf = [0; 8];
reader.read(&mut buf).unwrap();
assert_eq!(buf, [0, 0, 0, 0, 0, 0, 0, 0]);
}
#[test]
fn seek_end_std() {
let inner = Cursor::new([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
let mut reader = io::BufReader::with_capacity(2, inner);
reader.seek(SeekFrom::End(-6)).unwrap();
let mut buf = [0; 8];
reader.read(&mut buf).unwrap();
assert_eq!(buf, [11, 12, 13, 14, 15, 16, 0, 0]);
reader.seek(SeekFrom::End(0)).unwrap();
let mut buf = [0; 8];
reader.read(&mut buf).unwrap();
assert_eq!(buf, [0, 0, 0, 0, 0, 0, 0, 0]);
}
#[test]
fn into_inner() {
let inner = Cursor::new([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
let mut reader = BufReader::with_capacity(4, inner);
reader.seek(SeekFrom::Current(5)).unwrap();
let mut buf = [0; 8];
reader.read(&mut buf).unwrap();
assert_eq!(buf, [5, 6, 7, 8, 9, 10, 11, 12]);
reader.seek(SeekFrom::Current(-2)).unwrap();
let mut buf = [0; 2];
reader.read(&mut buf).unwrap();
assert_eq!(buf, [11, 12]);
let mut inner = reader.into_inner().unwrap();
let mut buf = [0; 8];
inner.read(&mut buf).unwrap();
assert_eq!(buf, [13, 14, 15, 16, 0, 0, 0, 0]);
}
}
| 33.024291 | 93 | 0.534326 |
cc94447b14647376adf43ab3fa6fc87de1782731 | 364 | //! Chain Panic
/// Panic Self
pub trait Panic: Sized + Sync + Send + 'static {
#[inline]
/// Panic Self
fn panic(self) -> ! {
panic!(self)
}
}
impl<T: Sync + Send + 'static> Panic for T {}
#[cfg(test)]
mod tests {
use super::*;
#[test]
#[should_panic(expected = "123")]
fn test_panic() {
"123".panic();
}
}
| 15.166667 | 48 | 0.505495 |
fcc652b8e535227bcaaef92df70e59523a61f122 | 19,184 | // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
/// Paginator for [`ListCertificateAuthorities`](crate::operation::ListCertificateAuthorities)
pub struct ListCertificateAuthoritiesPaginator<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<crate::client::Handle<C, M, R>>,
builder: crate::input::list_certificate_authorities_input::Builder,
}
impl<C, M, R> ListCertificateAuthoritiesPaginator<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle<C, M, R>>,
builder: crate::input::list_certificate_authorities_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `certificate_authorities`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::ListCertificateAuthoritiesPaginatorItems<C, M, R> {
crate::paginator::ListCertificateAuthoritiesPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::ListCertificateAuthoritiesOutput,
aws_smithy_http::result::SdkError<crate::error::ListCertificateAuthoritiesError>,
>,
> + Unpin
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::ListCertificateAuthoritiesInputOperationOutputAlias,
crate::output::ListCertificateAuthoritiesOutput,
crate::error::ListCertificateAuthoritiesError,
crate::input::ListCertificateAuthoritiesInputOperationRetryAlias,
>,
{
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_list_certificate_authorities_output_next_token(resp);
if new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
input.next_token.as_deref().unwrap_or_default().is_empty()
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`ListPermissions`](crate::operation::ListPermissions)
pub struct ListPermissionsPaginator<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<crate::client::Handle<C, M, R>>,
builder: crate::input::list_permissions_input::Builder,
}
impl<C, M, R> ListPermissionsPaginator<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle<C, M, R>>,
builder: crate::input::list_permissions_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `permissions`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::ListPermissionsPaginatorItems<C, M, R> {
crate::paginator::ListPermissionsPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::ListPermissionsOutput,
aws_smithy_http::result::SdkError<crate::error::ListPermissionsError>,
>,
> + Unpin
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::ListPermissionsInputOperationOutputAlias,
crate::output::ListPermissionsOutput,
crate::error::ListPermissionsError,
crate::input::ListPermissionsInputOperationRetryAlias,
>,
{
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_list_permissions_output_next_token(resp);
if new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
input.next_token.as_deref().unwrap_or_default().is_empty()
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`ListTags`](crate::operation::ListTags)
pub struct ListTagsPaginator<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<crate::client::Handle<C, M, R>>,
builder: crate::input::list_tags_input::Builder,
}
impl<C, M, R> ListTagsPaginator<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle<C, M, R>>,
builder: crate::input::list_tags_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `tags`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::ListTagsPaginatorItems<C, M, R> {
crate::paginator::ListTagsPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::ListTagsOutput,
aws_smithy_http::result::SdkError<crate::error::ListTagsError>,
>,
> + Unpin
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::ListTagsInputOperationOutputAlias,
crate::output::ListTagsOutput,
crate::error::ListTagsError,
crate::input::ListTagsInputOperationRetryAlias,
>,
{
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_list_tags_output_next_token(resp);
if new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
input.next_token.as_deref().unwrap_or_default().is_empty()
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Flattened paginator for `ListCertificateAuthoritiesPaginator`
///
/// This is created with [`.items()`](ListCertificateAuthoritiesPaginator::items)
pub struct ListCertificateAuthoritiesPaginatorItems<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
>(ListCertificateAuthoritiesPaginator<C, M, R>);
impl<C, M, R> ListCertificateAuthoritiesPaginatorItems<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::CertificateAuthority,
aws_smithy_http::result::SdkError<crate::error::ListCertificateAuthoritiesError>,
>,
> + Unpin
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::ListCertificateAuthoritiesInputOperationOutputAlias,
crate::output::ListCertificateAuthoritiesOutput,
crate::error::ListCertificateAuthoritiesError,
crate::input::ListCertificateAuthoritiesInputOperationRetryAlias,
>,
{
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| crate::lens::lens_structure_crate_output_list_certificate_authorities_output_certificate_authorities(page).unwrap_or_default().into_iter())
}
}
/// Flattened paginator for `ListPermissionsPaginator`
///
/// This is created with [`.items()`](ListPermissionsPaginator::items)
pub struct ListPermissionsPaginatorItems<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
>(ListPermissionsPaginator<C, M, R>);
impl<C, M, R> ListPermissionsPaginatorItems<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::Permission,
aws_smithy_http::result::SdkError<crate::error::ListPermissionsError>,
>,
> + Unpin
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::ListPermissionsInputOperationOutputAlias,
crate::output::ListPermissionsOutput,
crate::error::ListPermissionsError,
crate::input::ListPermissionsInputOperationRetryAlias,
>,
{
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| {
crate::lens::lens_structure_crate_output_list_permissions_output_permissions(page)
.unwrap_or_default()
.into_iter()
})
}
}
/// Flattened paginator for `ListTagsPaginator`
///
/// This is created with [`.items()`](ListTagsPaginator::items)
pub struct ListTagsPaginatorItems<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
>(ListTagsPaginator<C, M, R>);
impl<C, M, R> ListTagsPaginatorItems<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::Tag,
aws_smithy_http::result::SdkError<crate::error::ListTagsError>,
>,
> + Unpin
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::ListTagsInputOperationOutputAlias,
crate::output::ListTagsOutput,
crate::error::ListTagsError,
crate::input::ListTagsInputOperationRetryAlias,
>,
{
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| {
crate::lens::lens_structure_crate_output_list_tags_output_tags(page)
.unwrap_or_default()
.into_iter()
})
}
}
| 41.52381 | 231 | 0.57256 |
61cac03ec0bf4b06dfcca273b2a7ef211c6457b8 | 152 | extern crate slog_async;
extern crate slog_term;
pub mod binary_tree;
pub mod cell;
pub mod grid;
pub mod sidewinder;
pub mod utils;
pub use grid::*;
| 13.818182 | 24 | 0.756579 |
e46888b9839666fa09a425497952966518bea048 | 228 | #pragma version(1)
#pragma rs java_package_name(foo)
int root(uint32_t ain) {
return 0;
}
void in_only(uint32_t ain) {
}
int out_only() {
return 0;
}
int everything(uint32_t ain, uint32_t x, uint32_t y) {
return 0;
}
| 12 | 54 | 0.684211 |
8a830481e52002ee28c54b47cbde05a984d88882 | 543 | #[macro_use]
extern crate symbolics_core;
use symbolics_core::consts::e;
#[test]
fn quadratic_s() {
let quad = s!(a) * (s!(x) ^ 2.) + s!(b) * s!(x) + s!(c);
assert_eq!(format!("{:?}", quad),
"Add(Add(Mul(Symbol(\"a\"), Pow(Symbol(\"x\"), Num(2.0))), Mul(Symbol(\"b\"), Symbol(\"x\"))), Symbol(\"c\"))");;
}
#[test]
fn exponential_nref() {
let k = s!(k);
let t = s!(t);
let exp = e() ^ (-!&k * !&t);
assert_eq!(format!("{:?}", exp),
"Pow(Symbol(\"$e\"), Mul(Mul(Symbol(\"k\"), Num(-1.0)), Symbol(\"t\")))");
}
| 27.15 | 117 | 0.484346 |
8a8ef8d13ed644113c531ff9f52f81fd6c8d207d | 1,596 | #![warn(clippy::all, clippy::pedantic, clippy::nursery, clippy::cargo)]
#![feature(test)]
#![feature(destructuring_assignment)]
#![allow(non_snake_case)]
//! `truncnorm` provides (potentially) high-dimensional multivariate Normal
//! and TruncatedNormal distributions as well as low level binding to Gaussian
//! error functions.
//!
//! I've written all this code for my dissertation work. I've put
//! some effort into correctness and speed, but both could surely be improved.
//! Rely on this code at your own risk as no guarantees can be made about it.
//! Feel free to contact me if you're interested in using this for any purpose
//! and it doesn't work 100%.
//!
//! Largely based on the Matlab [Truncated Normal and Student's t-distribution toolbox](https://www.mathworks.com/matlabcentral/fileexchange/53796-truncated-normal-and-student-s-t-distribution-toolbox)
//! and [Faddeeva C library](http://ab-initio.mit.edu/wiki/index.php/Faddeeva_Package)
//!
//! Cheers!
//! Eleanor Quint
#[cfg(feature = "openblas-system")]
extern crate blas_src;
extern crate ndarray;
extern crate ndarray_linalg;
extern crate ndarray_rand;
extern crate ndarray_stats;
extern crate statrs;
mod dist_util;
pub mod distributions;
mod faddeeva;
pub mod tilting;
pub mod truncnorm;
mod util;
/// `erf`/`erfc` family of error functions
///
/// Uses bindings to the [faddeeva](http://ab-initio.mit.edu/wiki/index.php/Faddeeva_Package)
/// C++ package and [statrs](https://crates.io/crates/statrs)
pub mod gauss {
pub use crate::faddeeva::{erf, erfc, erfcx};
pub use statrs::function::erf::erfc_inv;
}
| 37.116279 | 201 | 0.742481 |
b93bdb096450b993326121375912a4f5c00361ec | 1,288 | use std::io;
use std::cmp;
use std::collections::HashMap;
fn main() {
let (n, k) = {
let i = read::<usize>();
(i[0], i[1])
};
let an = read::<usize>();
let mut s = vec![0; n + 1];
s[1] = an[0];
for i in 2..n + 1 {
s[i] = s[i - 1] + an[i - 1];
}
let mut si = Vec::new();
for i in 0..n + 1 {
si.push((s[i] - i) % k);
}
let mut cnt = HashMap::new();
for i in 0..cmp::min(n + 1, k - 1) {
*cnt.entry(si[i]).or_insert(0) += 1;
}
let mut ans: usize = 0;
for i in 0..n + 1 {
*cnt.entry(si[i]).or_insert(0) -= 1;
if i + k <= n + 1 {
*cnt.entry(si[i + k - 1]).or_insert(0) += 1;
}
ans += cmp::max(*cnt.get(&si[i]).unwrap_or(&0), 0);
}
println!("{}", ans);
}
#[allow(dead_code)]
fn read<T>() -> Vec<T>
where T:
std::str::FromStr,
T::Err: std::fmt::Debug {
let mut buf = String::new();
io::stdin().read_line(&mut buf).unwrap();
buf.split_whitespace()
.map(|s| s.trim().parse().unwrap())
.collect()
}
#[allow(dead_code)]
fn read_one<T>() -> T
where T:
std::str::FromStr,
T::Err: std::fmt::Debug {
let mut buf = String::new();
io::stdin().read_line(&mut buf).unwrap();
buf.trim().parse().unwrap()
}
| 20.125 | 59 | 0.465839 |
2101c9c9553881f184bb7bb3d65f666d66eb5b25 | 15,224 | /*
* Hurl (https://hurl.dev)
* Copyright (C) 2022 Orange
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
use std::collections::HashMap;
#[allow(unused)]
use std::io::prelude::*;
use std::path::Path;
use crate::http;
use hurl_core::ast::*;
use super::body::eval_body;
use super::core::Error;
use super::template::eval_template;
use super::value::Value;
use crate::runner::multipart::eval_multipart_param;
pub fn eval_request(
request: Request,
variables: &HashMap<String, Value>,
context_dir: &Path,
) -> Result<http::RequestSpec, Error> {
let method = eval_method(request.method.clone());
let url = eval_template(&request.url, variables)?;
// headers
let mut headers: Vec<http::Header> = vec![];
for header in request.clone().headers {
let name = header.key.value;
let value = eval_template(&header.value, variables)?;
headers.push(http::Header { name, value });
}
if let Some(kv) = request.clone().basic_auth() {
let value = eval_template(&kv.value, variables)?;
let user_password = format!("{}:{}", kv.key.value, value);
let authorization = base64::encode(user_password.as_bytes());
let name = "Authorization".to_string();
let value = format!("Basic {}", authorization);
headers.push(http::Header { name, value });
}
let mut querystring: Vec<http::Param> = vec![];
for param in request.clone().querystring_params() {
let name = param.key.value;
let value = eval_template(¶m.value, variables)?;
querystring.push(http::Param { name, value });
}
let mut form: Vec<http::Param> = vec![];
for param in request.clone().form_params() {
let name = param.key.value;
let value = eval_template(¶m.value, variables)?;
form.push(http::Param { name, value });
}
// if !self.clone().form_params().is_empty() {
// headers.push(http::ast::Header {
// name: String::from("Content-Type"),
// value: String::from("application/x-www-form-urlencoded"),
// });
// }
let mut cookies = vec![];
for cookie in request.clone().cookies() {
let value = eval_template(&cookie.value, variables)?;
let cookie = http::RequestCookie {
name: cookie.clone().name.value,
value,
};
cookies.push(cookie);
}
let body = match request.clone().body {
Some(body) => eval_body(body, variables, context_dir)?,
None => http::Body::Binary(vec![]),
};
let mut multipart = vec![];
for multipart_param in request.clone().multipart_form_data() {
let param = eval_multipart_param(multipart_param, variables, context_dir)?;
multipart.push(param);
}
let content_type = if !form.is_empty() {
Some("application/x-www-form-urlencoded".to_string())
} else if !multipart.is_empty() {
Some("multipart/form-data".to_string())
} else if let Some(Body {
value: Bytes::Json { .. },
..
}) = request.body
{
Some("application/json".to_string())
} else if let Some(Body {
value: Bytes::Xml { .. },
..
}) = request.body
{
Some("application/xml".to_string())
} else {
None
};
// add implicit content type
// if self.content_type().is_none() {
// if let Some(body) = self.body {
// if let Bytes::Json { .. } = body.value {
// headers.push(http::ast::Header {
// name: String::from("Content-Type"),
// value: String::from("application/json"),
// });
// }
// }
// }
Ok(http::RequestSpec {
method,
url,
headers,
querystring,
form,
multipart,
cookies,
body,
content_type,
})
}
// pub fn get_content_type(request: Request) -> Option<Template> {
// for header in request.headers.clone() {
// if header.key.value.to_lowercase().as_str() == "content-type" {
// return Some(header.value);
// }
// }
// None
// }
///
/// experimental feature
/// @cookie_storage_add
///
pub fn cookie_storage_set(request: Request) -> Option<String> {
for line_terminator in request.line_terminators.iter() {
if let Some(s) = line_terminator.comment.clone() {
if s.value.contains("@cookie_storage_set:") {
let index = "#@cookie_storage_set:".to_string().len();
let value = &s.value[index..s.value.len()].to_string().trim().to_string();
return Some(value.to_string());
}
}
}
None
}
///
/// experimental feature
/// @cookie_storage_clear
///
pub fn cookie_storage_clear(request: Request) -> bool {
for line_terminator in request.line_terminators.iter() {
if let Some(s) = line_terminator.comment.clone() {
if s.value.contains("@cookie_storage_clear") {
return true;
}
}
}
false
}
fn eval_method(method: Method) -> http::Method {
match method {
Method::Get => http::Method::Get,
Method::Head => http::Method::Head,
Method::Post => http::Method::Post,
Method::Put => http::Method::Put,
Method::Delete => http::Method::Delete,
Method::Connect => http::Method::Connect,
Method::Options => http::Method::Options,
Method::Trace => http::Method::Trace,
Method::Patch => http::Method::Patch,
}
}
#[cfg(test)]
mod tests {
use hurl_core::ast::SourceInfo;
use super::super::core::RunnerError;
use super::*;
pub fn whitespace() -> Whitespace {
Whitespace {
value: String::from(" "),
source_info: SourceInfo::init(0, 0, 0, 0),
}
}
pub fn hello_request() -> Request {
let line_terminator = LineTerminator {
space0: whitespace(),
comment: None,
newline: whitespace(),
};
Request {
line_terminators: vec![],
space0: whitespace(),
method: Method::Get,
space1: whitespace(),
url: Template {
elements: vec![
TemplateElement::Expression(Expr {
space0: whitespace(),
variable: Variable {
name: String::from("base_url"),
source_info: SourceInfo::init(1, 7, 1, 15),
},
space1: whitespace(),
}),
TemplateElement::String {
value: String::from("/hello"),
encoded: String::from("/hello"),
},
],
quotes: false,
source_info: SourceInfo::init(0, 0, 0, 0),
},
line_terminator0: line_terminator,
headers: vec![],
sections: vec![],
body: None,
source_info: SourceInfo::init(0, 0, 0, 0),
}
}
pub fn simple_key_value(key: EncodedString, value: Template) -> KeyValue {
let line_terminator = LineTerminator {
space0: whitespace(),
comment: None,
newline: whitespace(),
};
KeyValue {
line_terminators: vec![],
space0: whitespace(),
key,
space1: whitespace(),
space2: whitespace(),
value,
line_terminator0: line_terminator,
}
}
pub fn query_request() -> Request {
let line_terminator = LineTerminator {
space0: whitespace(),
comment: None,
newline: whitespace(),
};
Request {
line_terminators: vec![],
space0: whitespace(),
method: Method::Get,
space1: whitespace(),
url: Template {
elements: vec![TemplateElement::String {
value: String::from("http://localhost:8000/querystring-params"),
encoded: String::from("http://localhost:8000/querystring-params"),
}],
quotes: false,
source_info: SourceInfo::init(0, 0, 0, 0),
},
line_terminator0: line_terminator.clone(),
headers: vec![],
sections: vec![Section {
line_terminators: vec![],
space0: whitespace(),
line_terminator0: line_terminator,
value: SectionValue::QueryParams(vec![
simple_key_value(
EncodedString {
quotes: false,
value: "param1".to_string(),
encoded: "param1".to_string(),
source_info: SourceInfo::init(0, 0, 0, 0),
},
Template {
quotes: false,
elements: vec![TemplateElement::Expression(Expr {
space0: whitespace(),
variable: Variable {
name: String::from("param1"),
source_info: SourceInfo::init(1, 7, 1, 15),
},
space1: whitespace(),
})],
source_info: SourceInfo::init(0, 0, 0, 0),
},
),
simple_key_value(
EncodedString {
quotes: false,
value: "param2".to_string(),
encoded: "param2".to_string(),
source_info: SourceInfo::init(0, 0, 0, 0),
},
Template {
quotes: false,
elements: vec![TemplateElement::String {
value: "a b".to_string(),
encoded: "a b".to_string(),
}],
source_info: SourceInfo::init(0, 0, 0, 0),
},
),
]),
source_info: SourceInfo::init(0, 0, 0, 0),
}],
body: None,
source_info: SourceInfo::init(0, 0, 0, 0),
}
}
#[test]
pub fn test_error_variable() {
let variables = HashMap::new();
let error = eval_request(hello_request(), &variables, Path::new(""))
.err()
.unwrap();
assert_eq!(error.source_info, SourceInfo::init(1, 7, 1, 15));
assert_eq!(
error.inner,
RunnerError::TemplateVariableNotDefined {
name: String::from("base_url")
}
);
}
#[test]
pub fn test_hello_request() {
let mut variables = HashMap::new();
variables.insert(
String::from("base_url"),
Value::String(String::from("http://localhost:8000")),
);
let http_request = eval_request(hello_request(), &variables, Path::new("")).unwrap();
assert_eq!(http_request, http::hello_http_request());
}
#[test]
pub fn test_query_request() {
let mut variables = HashMap::new();
variables.insert(
String::from("param1"),
Value::String(String::from("value1")),
);
let http_request = eval_request(query_request(), &variables, Path::new("")).unwrap();
assert_eq!(http_request, http::query_http_request());
}
#[test]
fn clear_cookie_store() {
assert!(!cookie_storage_clear(hello_request()));
let line_terminator = LineTerminator {
space0: whitespace(),
comment: None,
newline: whitespace(),
};
assert!(cookie_storage_clear(Request {
line_terminators: vec![LineTerminator {
space0: whitespace(),
comment: Some(Comment {
value: "@cookie_storage_clear".to_string()
}),
newline: whitespace(),
}],
space0: whitespace(),
method: Method::Get,
space1: whitespace(),
url: Template {
elements: vec![TemplateElement::String {
value: String::from("http:///localhost"),
encoded: String::from("http://localhost"),
},],
quotes: false,
source_info: SourceInfo::init(0, 0, 0, 0),
},
line_terminator0: line_terminator,
headers: vec![],
sections: vec![],
body: None,
source_info: SourceInfo::init(0, 0, 0, 0),
}));
}
#[test]
fn add_cookie_in_storage() {
assert_eq!(None, cookie_storage_set(hello_request()));
let line_terminator = LineTerminator {
space0: whitespace(),
comment: None,
newline: whitespace(),
};
assert_eq!(
Some("localhost\tFALSE\t/\tFALSE\t0\tcookie1\tvalueA".to_string()),
cookie_storage_set(Request {
line_terminators: vec![LineTerminator {
space0: whitespace(),
comment: Some(Comment {
value:
"@cookie_storage_set: localhost\tFALSE\t/\tFALSE\t0\tcookie1\tvalueA"
.to_string()
}),
newline: whitespace(),
}],
space0: whitespace(),
method: Method::Get,
space1: whitespace(),
url: Template {
elements: vec![TemplateElement::String {
value: String::from("http:///localhost"),
encoded: String::from("http://localhost"),
},],
quotes: false,
source_info: SourceInfo::init(0, 0, 0, 0),
},
line_terminator0: line_terminator,
headers: vec![],
sections: vec![],
body: None,
source_info: SourceInfo::init(0, 0, 0, 0),
})
);
}
}
| 33.607064 | 97 | 0.492446 |
28d717d59447027a929a18405ccc85fe2b4605c2 | 10,787 | #![warn(
clippy::all,
clippy::doc_markdown,
clippy::dbg_macro,
clippy::todo,
clippy::mem_forget,
// TODO: enable once the false positive bug is solved
// clippy::use_self,
clippy::filter_map_next,
clippy::needless_continue,
clippy::needless_borrow,
clippy::match_wildcard_for_single_variants,
clippy::mismatched_target_os,
clippy::match_on_vec_items,
clippy::imprecise_flops,
clippy::suboptimal_flops,
clippy::lossy_float_literal,
clippy::rest_pat_in_fully_bound_structs,
clippy::fn_params_excessive_bools,
clippy::inefficient_to_string,
clippy::linkedlist,
clippy::macro_use_imports,
clippy::option_option,
clippy::verbose_file_reads,
clippy::unnested_or_patterns,
rust_2018_idioms,
missing_debug_implementations,
missing_copy_implementations,
trivial_casts,
trivial_numeric_casts,
nonstandard_style,
unused_import_braces,
unused_qualifications,
)]
#![deny(
clippy::await_holding_lock,
clippy::if_let_mutex,
clippy::indexing_slicing,
clippy::mem_forget,
clippy::ok_expect,
clippy::unimplemented,
clippy::unwrap_used,
unsafe_code,
unstable_features,
unused_results
)]
#![allow(clippy::match_single_binding, clippy::inconsistent_struct_constructor)]
#[cfg(test)]
#[macro_use]
extern crate pretty_assertions;
mod add;
mod build;
mod cli;
mod compile_package;
mod config;
mod dependencies;
mod docs;
mod format;
mod fs;
mod hex;
mod http;
mod new;
mod panic;
mod project;
mod publish;
mod run;
mod shell;
use config::root_config;
pub use gleam_core::{
error::{Error, Result},
warning::Warning,
};
use gleam_core::{
build::{package_compiler, Target},
diagnostic::{self, Severity},
error::wrap,
hex::RetirementReason,
project::Analysed,
};
use hex::ApiKeyCommand as _;
use std::{
io::Write,
path::{Path, PathBuf},
};
use structopt::{clap::AppSettings, StructOpt};
use strum::VariantNames;
const VERSION: &str = env!("CARGO_PKG_VERSION");
#[derive(StructOpt, Debug)]
#[structopt(global_settings = &[AppSettings::ColoredHelp, AppSettings::VersionlessSubcommands])]
enum Command {
/// Build the project
Build {
/// Emit compile time warnings as errors
#[structopt(long)]
warnings_as_errors: bool,
},
/// Publish the project to the Hex package manager
Publish,
/// Render HTML documentation
Docs(Docs),
/// Work with dependency packages
Deps(Dependencies),
/// Work with the Hex package manager
Hex(Hex),
/// Create a new project
New(NewOptions),
/// Format source code
Format {
/// Files to format
#[structopt(default_value = ".")]
files: Vec<String>,
/// Read source from STDIN
#[structopt(long)]
stdin: bool,
/// Check if inputs are formatted without changing them
#[structopt(long)]
check: bool,
},
/// Start an Erlang shell
Shell,
/// Run the project
#[structopt(settings = &[AppSettings::TrailingVarArg])]
Run { arguments: Vec<String> },
/// Run the project tests
#[structopt(settings = &[AppSettings::TrailingVarArg])]
Test { arguments: Vec<String> },
/// Compile a single Gleam package
#[structopt(setting = AppSettings::Hidden)]
CompilePackage(CompilePackage),
/// Read and print gleam.toml for debugging
#[structopt(setting = AppSettings::Hidden)]
PrintConfig,
/// Add a new project dependency
Add {
package: String,
/// Add the package as a dev-only dependency
#[structopt(long)]
dev: bool,
},
}
#[derive(StructOpt, Debug, Clone)]
#[structopt(flatten)]
pub struct NewOptions {
/// Location of the project root
pub project_root: String,
/// Name of the project
#[structopt(long)]
pub name: Option<String>,
/// Description of the project
#[structopt(long, default_value = "A Gleam project")]
pub description: String,
#[structopt(
long,
possible_values = &new::Template::VARIANTS,
case_insensitive = true,
default_value = "lib"
)]
pub template: new::Template,
}
#[derive(StructOpt, Debug)]
#[structopt(flatten)]
pub struct CompilePackage {
/// The compilation target for the generated project
#[structopt(long, case_insensitive = true, default_value = "erlang")]
target: Target,
/// The name of the package being compiler
#[structopt(long = "name")]
package_name: String,
/// A directory of source Gleam code
#[structopt(long = "src")]
src_directory: PathBuf,
/// A directory of test Gleam code
#[structopt(long = "test")]
test_directory: Option<PathBuf>,
/// A directory to write compiled code to
#[structopt(long = "out")]
output_directory: PathBuf,
/// A path to a compiled dependency library
#[structopt(long = "lib")]
libraries: Vec<PathBuf>,
}
impl CompilePackage {
pub fn into_package_compiler_options(self) -> package_compiler::Options {
package_compiler::Options {
target: self.target,
name: self.package_name,
src_path: self.src_directory,
test_path: self.test_directory,
out_path: self.output_directory,
write_metadata: true,
}
}
}
#[derive(StructOpt, Debug)]
enum Dependencies {
/// List packages from the root config
List,
/// Download packages to the local cache
Download,
}
#[derive(StructOpt, Debug)]
enum Hex {
/// Retire a release from Hex
Retire {
package: String,
version: String,
#[structopt(possible_values = &RetirementReason::VARIANTS)]
reason: RetirementReason,
message: Option<String>,
},
/// Un-retire a release from Hex
Unretire { package: String, version: String },
}
#[derive(StructOpt, Debug)]
enum Docs {
/// Render HTML docs locally
Build,
/// Publish HTML docs to HexDocs
Publish,
/// Remove HTML docs from HexDocs
Remove {
/// The name of the package
#[structopt(long)]
package: String,
/// The version of the docs to remove
#[structopt(long)]
version: String,
},
}
fn main() {
initialise_logger();
panic::add_handler();
let stderr = cli::stderr_buffer_writer();
let result = match Command::from_args() {
Command::Build { warnings_as_errors } => command_build(&stderr, warnings_as_errors),
Command::Docs(Docs::Build) => docs::build(),
Command::Docs(Docs::Publish) => docs::PublishCommand::publish(),
Command::Docs(Docs::Remove { package, version }) => docs::remove(package, version),
Command::Format {
stdin,
files,
check,
} => format::run(stdin, check, files),
Command::Deps(Dependencies::List) => dependencies::list(),
Command::Deps(Dependencies::Download) => dependencies::download(None).map(|_| ()),
Command::New(options) => new::create(options, VERSION),
Command::Shell => shell::command(),
Command::Run { arguments } => run::command(&arguments, run::Which::Src),
Command::Test { arguments } => run::command(&arguments, run::Which::Test),
Command::CompilePackage(opts) => compile_package::command(opts),
Command::Publish => publish::command(),
Command::PrintConfig => print_config(),
Command::Hex(Hex::Retire {
package,
version,
reason,
message,
}) => hex::RetireCommand::new(package, version, reason, message).run(),
Command::Hex(Hex::Unretire { package, version }) => {
hex::UnretireCommand::new(package, version).run()
}
Command::Add { package, dev } => add::command(package, dev),
};
match result {
Ok(_) => {
tracing::info!("Successfully completed");
}
Err(error) => {
tracing::error!(error = ?error, "Failed");
let mut buffer = stderr.buffer();
error.pretty(&mut buffer);
stderr.print(&buffer).expect("Final result error writing");
std::process::exit(1);
}
}
}
const REBAR_DEPRECATION_NOTICE: &str = "The built-in rebar3 support is deprecated and will \
be removed in a future version of Gleam.
Please switch to the new Gleam build tool or update your project to use the new `gleam \
compile-package` API with your existing build tool.
";
fn command_build(stderr: &termcolor::BufferWriter, warnings_as_errors: bool) -> Result<(), Error> {
let mut buffer = stderr.buffer();
let root = Path::new("./");
// Use new build tool if not in a rebar or mix project
if !root.join("rebar.config").exists() && !root.join("mix.exs").exists() {
return build::main().map(|_| ());
}
diagnostic::write_title(
&mut buffer,
"Deprecated rebar3 build command",
Severity::Warning,
);
buffer
.write_all(wrap(REBAR_DEPRECATION_NOTICE).as_bytes())
.expect("rebar deprecation message");
buffer.flush().expect("flush");
stderr
.print(&buffer)
.expect("command_build_rebar_deprecated_write");
// Read and type check project
let (_config, analysed) = project::read_and_analyse(&root)?;
// Generate Erlang code
let output_files = gleam_core::erlang::generate_erlang(&analysed);
// Print warnings
let warning_count = print_warnings(&analysed);
// Exit if warnings_as_errors and warnings
if warnings_as_errors && warning_count > 0 {
return Err(Error::ForbiddenWarnings {
count: warning_count,
});
}
// Reset output directory
fs::delete_dir(&root.join(project::OUTPUT_DIR_NAME))?;
// Delete the gen directory before generating the newly compiled files
fs::write_outputs(&output_files)?;
println!("Done!");
Ok(())
}
fn print_config() -> Result<()> {
let config = root_config()?;
println!("{:#?}", config);
Ok(())
}
fn initialise_logger() {
tracing_subscriber::fmt()
.with_env_filter(&std::env::var("GLEAM_LOG").unwrap_or_else(|_| "off".to_string()))
.with_target(false)
.without_time()
.init();
}
fn print_warnings(analysed: &[Analysed]) -> usize {
analysed
.iter()
.flat_map(|a| &a.warnings)
.inspect(|w| print_warning(w))
.count()
}
fn print_warning(w: &Warning) {
let buffer_writer = cli::stderr_buffer_writer();
let mut buffer = buffer_writer.buffer();
w.pretty(&mut buffer);
#[allow(clippy::unwrap_used)]
buffer_writer.print(&buffer).unwrap();
}
| 25.262295 | 99 | 0.629461 |
4af8c742bc24cadbbf1730062b9706183f539a86 | 950 | use laythe_env::{
fs::{Fs, FsImpl, SlDirEntry},
io::IoImpl,
};
use std::{
fs::{canonicalize, read_to_string},
io,
path::{Path, PathBuf},
};
#[derive(Debug)]
pub struct IoFsNative();
impl IoImpl<Fs> for IoFsNative {
fn make(&self) -> Fs {
Fs::new(Box::new(FsNative()))
}
}
#[derive(Clone)]
pub struct FsNative();
impl Default for FsNative {
fn default() -> Self {
Self()
}
}
impl FsImpl for FsNative {
fn read_to_string(&self, path: &Path) -> io::Result<String> {
read_to_string(path)
}
fn canonicalize(&self, path: &Path) -> io::Result<PathBuf> {
canonicalize(path)
}
fn read_directory(&self, _path: &Path) -> io::Result<SlDirEntry> {
todo!()
}
fn relative_path(&self, base: &Path, import: &Path) -> io::Result<PathBuf> {
import
.strip_prefix(base)
.map(|prefix| prefix.to_path_buf())
.map_err(|err| io::Error::new(io::ErrorKind::InvalidInput, err.to_string()))
}
}
| 19.387755 | 82 | 0.62 |
ac0914795924cd3ff4f81e3e661d9b3f746c7dfe | 6,241 | //! Interaction with the Cloudflare API.
//!
//! This module implements the necessary GET and PUT
//! requests to update a DNS record programmatically.
use std::any;
use std::error;
use std::fmt;
use std::net;
use anyhow::anyhow;
use anyhow::Context;
use serde::Deserialize;
use serde::Serialize;
static CLOUDFLARE_API_URL: &str = "https://api.cloudflare.com/client/v4/";
pub struct Client<'c> {
token: String,
inner: &'c reqwest::blocking::Client,
}
/// Represents the response from `GET /zones`:
///
/// https://api.cloudflare.com/#zone-list-zones
#[derive(Debug, Deserialize)]
struct Zones {
#[serde(flatten)]
meta: Meta,
result: Vec<Zone>,
}
impl From<Zones> for anyhow::Result<Vec<Zone>> {
fn from(zones: Zones) -> anyhow::Result<Vec<Zone>> {
anyhow::Result::<()>::from(zones.meta)?;
Ok(zones.result)
}
}
/// Represents a single zone from `GET /zones`.
#[derive(Debug, Deserialize)]
struct Zone {
id: ZoneID,
name: String,
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(transparent)]
pub struct ZoneID(String);
impl fmt::Display for ZoneID {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{}", self.0)
}
}
/// Represents the response from `GET zones/:zone_identifier/dns_records`:
///
/// https://api.cloudflare.com/#dns-records-for-a-zone-list-dns-records
#[derive(Debug, Deserialize)]
struct Records {
#[serde(flatten)]
meta: Meta,
result: Vec<Record<String>>,
}
impl From<Records> for anyhow::Result<Vec<Record<String>>> {
fn from(records: Records) -> anyhow::Result<Vec<Record<String>>> {
anyhow::Result::<()>::from(records.meta)?;
Ok(records.result)
}
}
/// Represents a single DNS record from `GET zones/:zone_identifier/dns_records`.
#[derive(Debug, Serialize, Deserialize)]
pub struct Record<C> {
id: RecordID,
r#type: String,
name: String,
content: C,
ttl: serde_json::Value,
proxied: bool,
}
impl Record<net::IpAddr> {
pub fn id(&self) -> &RecordID {
&self.id
}
pub fn ip(&self) -> net::IpAddr {
self.content
}
pub fn set_ip(&mut self, ip: net::IpAddr) {
self.content = ip;
}
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(transparent)]
pub struct RecordID(String);
impl fmt::Display for RecordID {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{}", self.0)
}
}
#[derive(Debug, Deserialize)]
struct Meta {
success: bool,
errors: serde_json::Value,
messages: serde_json::Value,
}
impl fmt::Display for Meta {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
writeln!(fmt, "{:#}", self.errors)?;
write!(fmt, "{:#}", self.messages)
}
}
impl error::Error for Meta {}
impl From<Meta> for anyhow::Result<()> {
fn from(meta: Meta) -> anyhow::Result<()> {
if meta.success {
Ok(())
} else {
Err(anyhow!(meta))
}
}
}
impl<'c> Client<'c> {
pub fn new(inner: &'c reqwest::blocking::Client, token: String) -> Self {
Client {
inner,
token,
}
}
/// Retrieve the ID of a single zone.
pub fn get_zone_id(&self, zone_name: &str) -> anyhow::Result<ZoneID> {
let mut zones = self
.get::<Zones>("zones")
.with_context(|| format!("Could not get ID of zone {}", zone_name))
.and_then(anyhow::Result::<Vec<Zone>>::from)?;
zones.retain(|zone| &zone.name == zone_name);
match zones.len() {
| 0 => Err(anyhow!("No matching zones found")),
| 1 => Ok(zones.remove(0).id),
| _ => unreachable!(),
}
}
pub fn get_dns_record(
&self,
zone_name: &str,
zone_id: &ZoneID,
record_name: &str,
) -> anyhow::Result<Record<net::IpAddr>> {
let mut records = self
.get::<Records>(&format!("zones/{}/dns_records", zone_id))
.with_context(|| format!("Could not get DNS record for {} in zone {}", record_name, zone_name))
.and_then(anyhow::Result::<Vec<Record<String>>>::from)?;
records.retain(|record| &record.name == record_name);
match records.len() {
| 0 => Err(anyhow!("No matching DNS records found")),
| 1 => {
let record = records.remove(0);
let ip = record.content
.parse::<net::IpAddr>()
.with_context(|| format!("Could not parse content of DNS record for {}", record_name))?;
Ok(Record {
id: record.id,
r#type: record.r#type,
name: record.name,
content: ip,
ttl: record.ttl,
proxied: record.proxied,
})
}
| _ => unreachable!(),
}
}
pub fn put_dns_record(
&self,
zone_id: &ZoneID,
zone_name: &str,
record_id: &RecordID,
record_name: &str,
record: &Record<net::IpAddr>,
) -> anyhow::Result<()> {
self.put(&format!("zones/{}/dns_records/{}", zone_id, record_id), record)
.with_context(|| format!("Could not PUT DNS record for {} in zone {}", record_name, zone_name))
}
fn get<T: serde::de::DeserializeOwned>(&self, route: &str) -> anyhow::Result<T> {
let url = format!("{}{}", CLOUDFLARE_API_URL, route);
self.inner
.get(&url)
.bearer_auth(&self.token)
.send()
.with_context(|| format!("Failed to send GET request to {}", url))?
.json()
.with_context(|| format!("Failed to parse JSON as {} from {}", any::type_name::<T>(), url))
}
fn put<T: serde::Serialize>(&self, route: &str, data: &T) -> anyhow::Result<()> {
let url = format!("{}{}", CLOUDFLARE_API_URL, route);
self.inner
.put(&url)
.bearer_auth(&self.token)
.json(data)
.send()
.with_context(|| format!("Failed to send PUT request to {}", url))?
.error_for_status()
.with_context(|| format!("Received error response for PUT request to {}", url))
.map(drop)
}
}
| 27.372807 | 107 | 0.559045 |
11021f9d7b5e93869b276945189df4338ea6efea | 1,243 | use uuid::Uuid;
use crate::core::command::CommandHandler;
use crate::core::eventstore;
use crate::core::eventstore::AggregateStore;
use crate::core::result::Result;
use crate::shopping::domain::order::{Order, OrderEvent};
pub struct CreateOrderCommand {
pub customer_id: Uuid,
pub session_id: Uuid,
}
#[async_trait]
impl CommandHandler<Result<Uuid>> for CreateOrderCommand {
async fn execute(&self) -> Result<Uuid> {
let order_id = Uuid::new_v4();
let order = Order::new(&order_id, &self.customer_id);
let store: AggregateStore = eventstore::AggregateStore::create().await;
store.save(&order_id.to_string(), order).await?;
Result::Ok(order_id)
}
}
pub struct OrderSubmitPaymentCommand {
pub order_id: Uuid,
pub payment_id: Uuid,
}
#[async_trait]
impl CommandHandler<Result<()>> for OrderSubmitPaymentCommand {
async fn execute(&self) -> Result<()> {
let store: AggregateStore = eventstore::AggregateStore::create().await;
let mut order = store
.load::<Order, OrderEvent>(self.order_id.to_string())
.await?;
order.submit_payment(&self.payment_id)?;
store.save(&self.order_id.to_string(), order).await
}
}
| 26.446809 | 79 | 0.668544 |
62d7db4b5563262b2cb7a97cbf258d4f86cbf2b4 | 170,109 | // Copyright 2016 TiKV Project Authors. Licensed under Apache-2.0.
//! Interact with persistent storage.
//!
//! The [`Storage`](storage::Storage) structure provides raw and transactional APIs on top of
//! a lower-level [`Engine`](storage::kv::Engine).
//!
//! There are multiple [`Engine`](storage::kv::Engine) implementations, [`RaftKv`](server::raftkv::RaftKv)
//! is used by the [`Server`](server::Server). The [`BTreeEngine`](storage::kv::BTreeEngine) and
//! [`RocksEngine`](storage::RocksEngine) are used for testing only.
pub mod config;
pub mod errors;
pub mod kv;
pub mod lock_manager;
pub(crate) mod metrics;
pub mod mvcc;
pub mod txn;
mod read_pool;
mod types;
pub use self::{
errors::{get_error_kind_from_header, get_tag_from_header, Error, ErrorHeaderKind, ErrorInner},
kv::{
CbContext, CfStatistics, Cursor, Engine, FlowStatistics, FlowStatsReporter, Iterator,
RocksEngine, ScanMode, Snapshot, Statistics, TestEngineBuilder,
},
read_pool::{build_read_pool, build_read_pool_for_test},
txn::{ProcessResult, Scanner, SnapshotStore, Store},
types::{PessimisticLockRes, StorageCallback, TxnStatus},
};
use crate::read_pool::{ReadPool, ReadPoolHandle};
use crate::storage::metrics::CommandKind;
use crate::storage::{
config::Config,
kv::{with_tls_engine, Modify, WriteData},
lock_manager::{DummyLockManager, LockManager},
metrics::*,
txn::{commands::TypedCommand, scheduler::Scheduler as TxnScheduler, Command},
types::StorageCallbackType,
};
use engine_traits::{CfName, ALL_CFS, CF_DEFAULT, DATA_CFS};
use engine_traits::{IterOptions, DATA_KEY_PREFIX_LEN};
use futures::Future;
use futures03::prelude::*;
use kvproto::kvrpcpb::{CommandPri, Context, GetRequest, KeyRange, RawGetRequest};
use raftstore::store::util::build_key_range;
use rand::prelude::*;
use std::sync::{atomic, Arc};
use tikv_util::time::Instant;
use txn_types::{Key, KvPair, TimeStamp, TsSet, Value};
pub type Result<T> = std::result::Result<T, Error>;
pub type Callback<T> = Box<dyn FnOnce(Result<T>) + Send>;
/// [`Storage`] implements transactional KV APIs and raw KV APIs on a given [`Engine`]. An [`Engine`]
/// provides low level KV functionality. [`Engine`] has multiple implementations. When a TiKV server
/// is running, a [`RaftKv`] will be the underlying [`Engine`] of [`Storage`]. The other two types of
/// engines are for test purpose.
///
///[`Storage`] is reference counted and cloning [`Storage`] will just increase the reference counter.
/// Storage resources (i.e. threads, engine) will be released when all references are dropped.
///
/// Notice that read and write methods may not be performed over full data in most cases, i.e. when
/// underlying engine is [`RaftKv`], which limits data access in the range of a single region
/// according to specified `ctx` parameter. However,
/// [`unsafe_destroy_range`](Storage::unsafe_destroy_range) is the only exception. It's
/// always performed on the whole TiKV.
///
/// Operations of [`Storage`] can be divided into two types: MVCC operations and raw operations.
/// MVCC operations uses MVCC keys, which usually consist of several physical keys in different
/// CFs. In default CF and write CF, the key will be memcomparable-encoded and append the timestamp
/// to it, so that multiple versions can be saved at the same time.
/// Raw operations use raw keys, which are saved directly to the engine without memcomparable-
/// encoding and appending timestamp.
pub struct Storage<E: Engine, L: LockManager> {
// TODO: Too many Arcs, would be slow when clone.
engine: E,
sched: TxnScheduler<E, L>,
/// The thread pool used to run most read operations.
read_pool: ReadPoolHandle,
/// How many strong references. Thread pool and workers will be stopped
/// once there are no more references.
// TODO: This should be implemented in thread pool and worker.
refs: Arc<atomic::AtomicUsize>,
// Fields below are storage configurations.
max_key_size: usize,
pessimistic_txn_enabled: bool,
}
impl<E: Engine, L: LockManager> Clone for Storage<E, L> {
#[inline]
fn clone(&self) -> Self {
let refs = self.refs.fetch_add(1, atomic::Ordering::SeqCst);
trace!(
"Storage referenced"; "original_ref" => refs
);
Self {
engine: self.engine.clone(),
sched: self.sched.clone(),
read_pool: self.read_pool.clone(),
refs: self.refs.clone(),
max_key_size: self.max_key_size,
pessimistic_txn_enabled: self.pessimistic_txn_enabled,
}
}
}
impl<E: Engine, L: LockManager> Drop for Storage<E, L> {
#[inline]
fn drop(&mut self) {
let refs = self.refs.fetch_sub(1, atomic::Ordering::SeqCst);
trace!(
"Storage de-referenced"; "original_ref" => refs
);
if refs != 1 {
return;
}
info!("Storage stopped.");
}
}
macro_rules! check_key_size {
($key_iter: expr, $max_key_size: expr, $callback: ident) => {
for k in $key_iter {
let key_size = k.len();
if key_size > $max_key_size {
$callback(Err(Error::from(ErrorInner::KeyTooLarge(
key_size,
$max_key_size,
))));
return Ok(());
}
}
};
}
impl<E: Engine, L: LockManager> Storage<E, L> {
/// Create a `Storage` from given engine.
pub fn from_engine(
engine: E,
config: &Config,
read_pool: ReadPoolHandle,
lock_mgr: Option<L>,
pipelined_pessimistic_lock: bool,
) -> Result<Self> {
let pessimistic_txn_enabled = lock_mgr.is_some();
let sched = TxnScheduler::new(
engine.clone(),
lock_mgr,
config.scheduler_concurrency,
config.scheduler_worker_pool_size,
config.scheduler_pending_write_threshold.0 as usize,
pipelined_pessimistic_lock,
);
info!("Storage started.");
Ok(Storage {
engine,
sched,
read_pool,
refs: Arc::new(atomic::AtomicUsize::new(1)),
max_key_size: config.max_key_size,
pessimistic_txn_enabled,
})
}
/// Get the underlying `Engine` of the `Storage`.
pub fn get_engine(&self) -> E {
self.engine.clone()
}
fn snapshot(engine: &E, ctx: &Context) -> impl std::future::Future<Output = Result<E::Snap>> {
kv::snapshot(engine, ctx)
.map_err(txn::Error::from)
.map_err(Error::from)
}
#[inline]
fn with_tls_engine<F, R>(f: F) -> R
where
F: FnOnce(&E) -> R,
{
// Safety: the read pools ensure that a TLS engine exists.
unsafe { with_tls_engine(f) }
}
/// Get value of the given key from a snapshot.
///
/// Only writes that are committed before `start_ts` are visible.
pub fn get(
&self,
mut ctx: Context,
key: Key,
start_ts: TimeStamp,
) -> impl Future<Item = Option<Value>, Error = Error> {
const CMD: CommandKind = CommandKind::get;
let priority = ctx.get_priority();
let priority_tag = get_priority_tag(priority);
let res = self.read_pool.spawn_handle(
async move {
if let Ok(key) = key.to_owned().into_raw() {
tls_collect_qps(ctx.get_region_id(), ctx.get_peer(), &key, &key, false);
}
KV_COMMAND_COUNTER_VEC_STATIC.get(CMD).inc();
SCHED_COMMANDS_PRI_COUNTER_VEC_STATIC
.get(priority_tag)
.inc();
let command_duration = tikv_util::time::Instant::now_coarse();
// The bypass_locks set will be checked at most once. `TsSet::vec` is more efficient
// here.
let bypass_locks = TsSet::vec_from_u64s(ctx.take_resolved_locks());
let snapshot = Self::with_tls_engine(|engine| Self::snapshot(engine, &ctx)).await?;
{
let begin_instant = Instant::now_coarse();
let mut statistics = Statistics::default();
let snap_store = SnapshotStore::new(
snapshot,
start_ts,
ctx.get_isolation_level(),
!ctx.get_not_fill_cache(),
bypass_locks,
false,
);
let result = snap_store
.get(&key, &mut statistics)
// map storage::txn::Error -> storage::Error
.map_err(Error::from)
.map(|r| {
KV_COMMAND_KEYREAD_HISTOGRAM_STATIC.get(CMD).observe(1_f64);
r
});
metrics::tls_collect_scan_details(CMD, &statistics);
metrics::tls_collect_read_flow(ctx.get_region_id(), &statistics);
SCHED_PROCESSING_READ_HISTOGRAM_STATIC
.get(CMD)
.observe(begin_instant.elapsed_secs());
SCHED_HISTOGRAM_VEC_STATIC
.get(CMD)
.observe(command_duration.elapsed_secs());
result
}
},
priority,
thread_rng().next_u64(),
);
res.map_err(|_| Error::from(ErrorInner::SchedTooBusy))
.flatten()
}
/// Get values of a set of keys with seperate context from a snapshot, return a list of `Result`s.
///
/// Only writes that are committed before their respective `start_ts` are visible.
pub fn batch_get_command(
&self,
gets: Vec<PointGetCommand>,
) -> impl Future<Item = Vec<Result<Option<Vec<u8>>>>, Error = Error> {
const CMD: CommandKind = CommandKind::batch_get_command;
// all requests in a batch have the same region, epoch, term, replica_read
let ctx = gets[0].ctx.clone();
let priority = ctx.get_priority();
let priority_tag = get_priority_tag(priority);
let res = self.read_pool.spawn_handle(
async move {
for get in &gets {
if let Ok(key) = get.key.to_owned().into_raw() {
tls_collect_qps(
get.ctx.get_region_id(),
get.ctx.get_peer(),
&key,
&key,
false,
);
}
}
KV_COMMAND_COUNTER_VEC_STATIC.get(CMD).inc();
SCHED_COMMANDS_PRI_COUNTER_VEC_STATIC
.get(priority_tag)
.inc();
let command_duration = tikv_util::time::Instant::now_coarse();
let snapshot = Self::with_tls_engine(|engine| Self::snapshot(engine, &ctx)).await?;
{
let begin_instant = Instant::now_coarse();
let mut statistics = Statistics::default();
let mut snap_store = SnapshotStore::new(
snapshot,
TimeStamp::zero(),
ctx.get_isolation_level(),
!ctx.get_not_fill_cache(),
Default::default(),
false,
);
let mut results = vec![];
// TODO: optimize using seek.
for mut get in gets {
snap_store.set_start_ts(get.ts.unwrap());
snap_store.set_isolation_level(get.ctx.get_isolation_level());
// The bypass_locks set will be checked at most once. `TsSet::vec`
// is more efficient here.
snap_store
.set_bypass_locks(TsSet::vec_from_u64s(get.ctx.take_resolved_locks()));
results.push(
snap_store
.get(&get.key, &mut statistics)
.map_err(Error::from),
);
}
metrics::tls_collect_scan_details(CMD, &statistics);
metrics::tls_collect_read_flow(ctx.get_region_id(), &statistics);
SCHED_PROCESSING_READ_HISTOGRAM_STATIC
.get(CMD)
.observe(begin_instant.elapsed_secs());
SCHED_HISTOGRAM_VEC_STATIC
.get(CMD)
.observe(command_duration.elapsed_secs());
Ok(results)
}
},
priority,
thread_rng().next_u64(),
);
res.map_err(|_| Error::from(ErrorInner::SchedTooBusy))
.flatten()
}
/// Get values of a set of keys in a batch from the snapshot.
///
/// Only writes that are committed before `start_ts` are visible.
pub fn batch_get(
&self,
mut ctx: Context,
keys: Vec<Key>,
start_ts: TimeStamp,
) -> impl Future<Item = Vec<Result<KvPair>>, Error = Error> {
const CMD: CommandKind = CommandKind::batch_get;
let priority = ctx.get_priority();
let priority_tag = get_priority_tag(priority);
let res = self.read_pool.spawn_handle(
async move {
let mut key_ranges = vec![];
for key in &keys {
if let Ok(key) = key.to_owned().into_raw() {
key_ranges.push(build_key_range(&key, &key, false));
}
}
tls_collect_qps_batch(ctx.get_region_id(), ctx.get_peer(), key_ranges);
KV_COMMAND_COUNTER_VEC_STATIC.get(CMD).inc();
SCHED_COMMANDS_PRI_COUNTER_VEC_STATIC
.get(priority_tag)
.inc();
let command_duration = tikv_util::time::Instant::now_coarse();
let bypass_locks = TsSet::from_u64s(ctx.take_resolved_locks());
let snapshot = Self::with_tls_engine(|engine| Self::snapshot(engine, &ctx)).await?;
{
let begin_instant = Instant::now_coarse();
let mut statistics = Statistics::default();
let snap_store = SnapshotStore::new(
snapshot,
start_ts,
ctx.get_isolation_level(),
!ctx.get_not_fill_cache(),
bypass_locks,
false,
);
let result = snap_store
.batch_get(&keys, &mut statistics)
.map_err(Error::from)
.map(|v| {
let kv_pairs: Vec<_> = v
.into_iter()
.zip(keys)
.filter(|&(ref v, ref _k)| {
!(v.is_ok() && v.as_ref().unwrap().is_none())
})
.map(|(v, k)| match v {
Ok(Some(x)) => Ok((k.into_raw().unwrap(), x)),
Err(e) => Err(Error::from(e)),
_ => unreachable!(),
})
.collect();
KV_COMMAND_KEYREAD_HISTOGRAM_STATIC
.get(CMD)
.observe(kv_pairs.len() as f64);
kv_pairs
});
metrics::tls_collect_scan_details(CMD, &statistics);
metrics::tls_collect_read_flow(ctx.get_region_id(), &statistics);
SCHED_PROCESSING_READ_HISTOGRAM_STATIC
.get(CMD)
.observe(begin_instant.elapsed_secs());
SCHED_HISTOGRAM_VEC_STATIC
.get(CMD)
.observe(command_duration.elapsed_secs());
result
}
},
priority,
thread_rng().next_u64(),
);
res.map_err(|_| Error::from(ErrorInner::SchedTooBusy))
.flatten()
}
/// Scan keys in [`start_key`, `end_key`) up to `limit` keys from the snapshot.
///
/// If `end_key` is `None`, it means the upper bound is unbounded.
///
/// Only writes committed before `start_ts` are visible.
pub fn scan(
&self,
mut ctx: Context,
start_key: Key,
end_key: Option<Key>,
limit: usize,
start_ts: TimeStamp,
key_only: bool,
reverse_scan: bool,
) -> impl Future<Item = Vec<Result<KvPair>>, Error = Error> {
const CMD: CommandKind = CommandKind::scan;
let priority = ctx.get_priority();
let priority_tag = get_priority_tag(priority);
let res = self.read_pool.spawn_handle(
async move {
if let Ok(start_key) = start_key.to_owned().into_raw() {
let mut key = vec![];
if let Some(end_key) = &end_key {
if let Ok(end_key) = end_key.to_owned().into_raw() {
key = end_key;
}
}
tls_collect_qps(
ctx.get_region_id(),
ctx.get_peer(),
&start_key,
&key,
reverse_scan,
);
}
KV_COMMAND_COUNTER_VEC_STATIC.get(CMD).inc();
SCHED_COMMANDS_PRI_COUNTER_VEC_STATIC
.get(priority_tag)
.inc();
let command_duration = tikv_util::time::Instant::now_coarse();
let bypass_locks = TsSet::from_u64s(ctx.take_resolved_locks());
let snapshot = Self::with_tls_engine(|engine| Self::snapshot(engine, &ctx)).await?;
{
let begin_instant = Instant::now_coarse();
let snap_store = SnapshotStore::new(
snapshot,
start_ts,
ctx.get_isolation_level(),
!ctx.get_not_fill_cache(),
bypass_locks,
false,
);
let mut scanner;
if !reverse_scan {
scanner =
snap_store.scanner(false, key_only, false, Some(start_key), end_key)?;
} else {
scanner =
snap_store.scanner(true, key_only, false, end_key, Some(start_key))?;
};
let res = scanner.scan(limit);
let statistics = scanner.take_statistics();
metrics::tls_collect_scan_details(CMD, &statistics);
metrics::tls_collect_read_flow(ctx.get_region_id(), &statistics);
SCHED_PROCESSING_READ_HISTOGRAM_STATIC
.get(CMD)
.observe(begin_instant.elapsed_secs());
SCHED_HISTOGRAM_VEC_STATIC
.get(CMD)
.observe(command_duration.elapsed_secs());
res.map_err(Error::from).map(|results| {
KV_COMMAND_KEYREAD_HISTOGRAM_STATIC
.get(CMD)
.observe(results.len() as f64);
results
.into_iter()
.map(|x| x.map_err(Error::from))
.collect()
})
}
},
priority,
thread_rng().next_u64(),
);
res.map_err(|_| Error::from(ErrorInner::SchedTooBusy))
.flatten()
}
pub fn sched_txn_command<T: StorageCallbackType>(
&self,
cmd: TypedCommand<T>,
callback: Callback<T>,
) -> Result<()> {
use crate::storage::txn::commands::{
AcquirePessimisticLock, Prewrite, PrewritePessimistic,
};
let cmd: Command = cmd.into();
if cmd.requires_pessimistic_txn() && !self.pessimistic_txn_enabled {
callback(Err(Error::from(ErrorInner::PessimisticTxnNotEnabled)));
return Ok(());
}
match &cmd {
Command::Prewrite(Prewrite { mutations, .. }) => {
check_key_size!(
mutations.iter().map(|m| m.key().as_encoded()),
self.max_key_size,
callback
);
}
Command::PrewritePessimistic(PrewritePessimistic { mutations, .. }) => {
check_key_size!(
mutations.iter().map(|(m, _)| m.key().as_encoded()),
self.max_key_size,
callback
);
}
Command::AcquirePessimisticLock(AcquirePessimisticLock { keys, .. }) => {
check_key_size!(
keys.iter().map(|k| k.0.as_encoded()),
self.max_key_size,
callback
);
}
_ => {}
}
fail_point!("storage_drop_message", |_| Ok(()));
cmd.incr_cmd_metric();
self.sched.run_cmd(cmd, T::callback(callback));
Ok(())
}
/// Delete all keys in the range [`start_key`, `end_key`).
///
/// All keys in the range will be deleted permanently regardless of their timestamps.
/// This means that deleted keys will not be retrievable by specifying an older timestamp.
/// If `notify_only` is set, the data will not be immediately deleted, but the operation will
/// still be replicated via Raft. This is used to notify that the data will be deleted by
/// `unsafe_destroy_range` soon.
pub fn delete_range(
&self,
ctx: Context,
start_key: Key,
end_key: Key,
notify_only: bool,
callback: Callback<()>,
) -> Result<()> {
let mut modifies = Vec::with_capacity(DATA_CFS.len());
for cf in DATA_CFS {
modifies.push(Modify::DeleteRange(
cf,
start_key.clone(),
end_key.clone(),
notify_only,
));
}
self.engine.async_write(
&ctx,
WriteData::from_modifies(modifies),
Box::new(|(_, res): (_, kv::Result<_>)| callback(res.map_err(Error::from))),
)?;
KV_COMMAND_COUNTER_VEC_STATIC.delete_range.inc();
Ok(())
}
/// Get the value of a raw key.
pub fn raw_get(
&self,
ctx: Context,
cf: String,
key: Vec<u8>,
) -> impl Future<Item = Option<Vec<u8>>, Error = Error> {
const CMD: CommandKind = CommandKind::raw_get;
let priority = ctx.get_priority();
let priority_tag = get_priority_tag(priority);
let res = self.read_pool.spawn_handle(
async move {
tls_collect_qps(ctx.get_region_id(), ctx.get_peer(), &key, &key, false);
KV_COMMAND_COUNTER_VEC_STATIC.get(CMD).inc();
SCHED_COMMANDS_PRI_COUNTER_VEC_STATIC
.get(priority_tag)
.inc();
let command_duration = tikv_util::time::Instant::now_coarse();
let snapshot = Self::with_tls_engine(|engine| Self::snapshot(engine, &ctx)).await?;
{
let begin_instant = Instant::now_coarse();
let cf = Self::rawkv_cf(&cf)?;
// no scan_count for this kind of op.
let key_len = key.len();
let r = snapshot.get_cf(cf, &Key::from_encoded(key))?;
if let Some(ref value) = r {
let mut stats = Statistics::default();
stats.data.flow_stats.read_keys = 1;
stats.data.flow_stats.read_bytes = key_len + value.len();
tls_collect_read_flow(ctx.get_region_id(), &stats);
KV_COMMAND_KEYREAD_HISTOGRAM_STATIC.get(CMD).observe(1_f64);
}
SCHED_PROCESSING_READ_HISTOGRAM_STATIC
.get(CMD)
.observe(begin_instant.elapsed_secs());
SCHED_HISTOGRAM_VEC_STATIC
.get(CMD)
.observe(command_duration.elapsed_secs());
Ok(r)
}
},
priority,
thread_rng().next_u64(),
);
res.map_err(|_| Error::from(ErrorInner::SchedTooBusy))
.flatten()
}
/// Get the values of a set of raw keys, return a list of `Result`s.
pub fn raw_batch_get_command(
&self,
cf: String,
gets: Vec<PointGetCommand>,
) -> impl Future<Item = Vec<Result<Option<Vec<u8>>>>, Error = Error> {
const CMD: CommandKind = CommandKind::raw_batch_get_command;
// all requests in a batch have the same region, epoch, term, replica_read
let ctx = gets[0].ctx.clone();
let priority = ctx.get_priority();
let priority_tag = get_priority_tag(priority);
let res = self.read_pool.spawn_handle(
async move {
for get in &gets {
if let Ok(key) = get.key.to_owned().into_raw() {
// todo no raw?
tls_collect_qps(
get.ctx.get_region_id(),
get.ctx.get_peer(),
&key,
&key,
false,
);
}
}
KV_COMMAND_COUNTER_VEC_STATIC.get(CMD).inc();
SCHED_COMMANDS_PRI_COUNTER_VEC_STATIC
.get(priority_tag)
.inc();
let command_duration = tikv_util::time::Instant::now_coarse();
let snapshot = Self::with_tls_engine(|engine| Self::snapshot(engine, &ctx)).await?;
{
let begin_instant = Instant::now_coarse();
let cf = Self::rawkv_cf(&cf)?;
let mut results = vec![];
// no scan_count for this kind of op.
let mut stats = Statistics::default();
// TODO: optimize using seek.
for get in gets {
let key = &get.key;
let res = snapshot
.get_cf(cf, key)
.map(|v| {
stats.data.flow_stats.read_keys += 1;
stats.data.flow_stats.read_bytes += key.as_encoded().len()
+ v.as_ref().map(|v| v.len()).unwrap_or(0);
v
})
.map_err(Error::from);
results.push(res);
}
metrics::tls_collect_read_flow(ctx.get_region_id(), &stats);
SCHED_PROCESSING_READ_HISTOGRAM_STATIC
.get(CMD)
.observe(begin_instant.elapsed_secs());
SCHED_HISTOGRAM_VEC_STATIC
.get(CMD)
.observe(command_duration.elapsed_secs());
Ok(results)
}
},
priority,
thread_rng().next_u64(),
);
res.map_err(|_| Error::from(ErrorInner::SchedTooBusy))
.flatten()
}
/// Get the values of some raw keys in a batch.
pub fn raw_batch_get(
&self,
ctx: Context,
cf: String,
keys: Vec<Vec<u8>>,
) -> impl Future<Item = Vec<Result<KvPair>>, Error = Error> {
const CMD: CommandKind = CommandKind::raw_batch_get;
let priority = ctx.get_priority();
let priority_tag = get_priority_tag(priority);
let res = self.read_pool.spawn_handle(
async move {
let mut key_ranges = vec![];
for key in &keys {
key_ranges.push(build_key_range(key, key, false));
}
tls_collect_qps_batch(ctx.get_region_id(), ctx.get_peer(), key_ranges);
KV_COMMAND_COUNTER_VEC_STATIC.get(CMD).inc();
SCHED_COMMANDS_PRI_COUNTER_VEC_STATIC
.get(priority_tag)
.inc();
let command_duration = tikv_util::time::Instant::now_coarse();
let snapshot = Self::with_tls_engine(|engine| Self::snapshot(engine, &ctx)).await?;
{
let begin_instant = Instant::now_coarse();
let keys: Vec<Key> = keys.into_iter().map(Key::from_encoded).collect();
let cf = Self::rawkv_cf(&cf)?;
// no scan_count for this kind of op.
let mut stats = Statistics::default();
let result: Vec<Result<KvPair>> = keys
.into_iter()
.map(|k| {
let v = snapshot.get_cf(cf, &k);
(k, v)
})
.filter(|&(_, ref v)| !(v.is_ok() && v.as_ref().unwrap().is_none()))
.map(|(k, v)| match v {
Ok(Some(v)) => {
stats.data.flow_stats.read_keys += 1;
stats.data.flow_stats.read_bytes += k.as_encoded().len() + v.len();
Ok((k.into_encoded(), v))
}
Err(e) => Err(Error::from(e)),
_ => unreachable!(),
})
.collect();
KV_COMMAND_KEYREAD_HISTOGRAM_STATIC
.get(CMD)
.observe(stats.data.flow_stats.read_keys as f64);
tls_collect_read_flow(ctx.get_region_id(), &stats);
SCHED_PROCESSING_READ_HISTOGRAM_STATIC
.get(CMD)
.observe(begin_instant.elapsed_secs());
SCHED_HISTOGRAM_VEC_STATIC
.get(CMD)
.observe(command_duration.elapsed_secs());
Ok(result)
}
},
priority,
thread_rng().next_u64(),
);
res.map_err(|_| Error::from(ErrorInner::SchedTooBusy))
.flatten()
}
/// Write a raw key to the storage.
pub fn raw_put(
&self,
ctx: Context,
cf: String,
key: Vec<u8>,
value: Vec<u8>,
callback: Callback<()>,
) -> Result<()> {
check_key_size!(Some(&key).into_iter(), self.max_key_size, callback);
self.engine.async_write(
&ctx,
WriteData::from_modifies(vec![Modify::Put(
Self::rawkv_cf(&cf)?,
Key::from_encoded(key),
value,
)]),
Box::new(|(_, res): (_, kv::Result<_>)| callback(res.map_err(Error::from))),
)?;
KV_COMMAND_COUNTER_VEC_STATIC.raw_put.inc();
Ok(())
}
/// Write some keys to the storage in a batch.
pub fn raw_batch_put(
&self,
ctx: Context,
cf: String,
pairs: Vec<KvPair>,
callback: Callback<()>,
) -> Result<()> {
let cf = Self::rawkv_cf(&cf)?;
check_key_size!(
pairs.iter().map(|(ref k, _)| k),
self.max_key_size,
callback
);
let modifies = pairs
.into_iter()
.map(|(k, v)| Modify::Put(cf, Key::from_encoded(k), v))
.collect();
self.engine.async_write(
&ctx,
WriteData::from_modifies(modifies),
Box::new(|(_, res): (_, kv::Result<_>)| callback(res.map_err(Error::from))),
)?;
KV_COMMAND_COUNTER_VEC_STATIC.raw_batch_put.inc();
Ok(())
}
/// Delete a raw key from the storage.
pub fn raw_delete(
&self,
ctx: Context,
cf: String,
key: Vec<u8>,
callback: Callback<()>,
) -> Result<()> {
check_key_size!(Some(&key).into_iter(), self.max_key_size, callback);
self.engine.async_write(
&ctx,
WriteData::from_modifies(vec![Modify::Delete(
Self::rawkv_cf(&cf)?,
Key::from_encoded(key),
)]),
Box::new(|(_, res): (_, kv::Result<_>)| callback(res.map_err(Error::from))),
)?;
KV_COMMAND_COUNTER_VEC_STATIC.raw_delete.inc();
Ok(())
}
/// Delete all raw keys in [`start_key`, `end_key`).
pub fn raw_delete_range(
&self,
ctx: Context,
cf: String,
start_key: Vec<u8>,
end_key: Vec<u8>,
callback: Callback<()>,
) -> Result<()> {
check_key_size!(
Some(&start_key)
.into_iter()
.chain(Some(&end_key).into_iter()),
self.max_key_size,
callback
);
let cf = Self::rawkv_cf(&cf)?;
let start_key = Key::from_encoded(start_key);
let end_key = Key::from_encoded(end_key);
self.engine.async_write(
&ctx,
WriteData::from_modifies(vec![Modify::DeleteRange(cf, start_key, end_key, false)]),
Box::new(|(_, res): (_, kv::Result<_>)| callback(res.map_err(Error::from))),
)?;
KV_COMMAND_COUNTER_VEC_STATIC.raw_delete_range.inc();
Ok(())
}
/// Delete some raw keys in a batch.
pub fn raw_batch_delete(
&self,
ctx: Context,
cf: String,
keys: Vec<Vec<u8>>,
callback: Callback<()>,
) -> Result<()> {
let cf = Self::rawkv_cf(&cf)?;
check_key_size!(keys.iter(), self.max_key_size, callback);
let modifies = keys
.into_iter()
.map(|k| Modify::Delete(cf, Key::from_encoded(k)))
.collect();
self.engine.async_write(
&ctx,
WriteData::from_modifies(modifies),
Box::new(|(_, res): (_, kv::Result<_>)| callback(res.map_err(Error::from))),
)?;
KV_COMMAND_COUNTER_VEC_STATIC.raw_batch_delete.inc();
Ok(())
}
/// Scan raw keys in [`start_key`, `end_key`), returns at most `limit` keys. If `end_key` is
/// `None`, it means unbounded.
///
/// If `key_only` is true, the value corresponding to the key will not be read. Only scanned
/// keys will be returned.
fn forward_raw_scan(
snapshot: &E::Snap,
cf: &str,
start_key: &Key,
end_key: Option<Key>,
limit: usize,
statistics: &mut Statistics,
key_only: bool,
) -> Result<Vec<Result<KvPair>>> {
let mut option = IterOptions::default();
if let Some(end) = end_key {
option.set_upper_bound(end.as_encoded(), DATA_KEY_PREFIX_LEN);
}
if key_only {
option.set_key_only(key_only);
}
let mut cursor = snapshot.iter_cf(Self::rawkv_cf(cf)?, option, ScanMode::Forward)?;
let statistics = statistics.mut_cf_statistics(cf);
if !cursor.seek(start_key, statistics)? {
return Ok(vec![]);
}
let mut pairs = vec![];
while cursor.valid()? && pairs.len() < limit {
pairs.push(Ok((
cursor.key(statistics).to_owned(),
if key_only {
vec![]
} else {
cursor.value(statistics).to_owned()
},
)));
cursor.next(statistics);
}
Ok(pairs)
}
/// Scan raw keys in [`end_key`, `start_key`) in reverse order, returns at most `limit` keys. If
/// `start_key` is `None`, it means it's unbounded.
///
/// If `key_only` is true, the value
/// corresponding to the key will not be read out. Only scanned keys will be returned.
fn reverse_raw_scan(
snapshot: &E::Snap,
cf: &str,
start_key: &Key,
end_key: Option<Key>,
limit: usize,
statistics: &mut Statistics,
key_only: bool,
) -> Result<Vec<Result<KvPair>>> {
let mut option = IterOptions::default();
if let Some(end) = end_key {
option.set_lower_bound(end.as_encoded(), DATA_KEY_PREFIX_LEN);
}
if key_only {
option.set_key_only(key_only);
}
let mut cursor = snapshot.iter_cf(Self::rawkv_cf(cf)?, option, ScanMode::Backward)?;
let statistics = statistics.mut_cf_statistics(cf);
if !cursor.reverse_seek(start_key, statistics)? {
return Ok(vec![]);
}
let mut pairs = vec![];
while cursor.valid()? && pairs.len() < limit {
pairs.push(Ok((
cursor.key(statistics).to_owned(),
if key_only {
vec![]
} else {
cursor.value(statistics).to_owned()
},
)));
cursor.prev(statistics);
}
Ok(pairs)
}
/// Scan raw keys in a range.
///
/// If `reverse_scan` is false, the range is [`start_key`, `end_key`); otherwise, the range is
/// [`end_key`, `start_key`) and it scans from `start_key` and goes backwards. If `end_key` is `None`, it
/// means unbounded.
///
/// This function scans at most `limit` keys.
///
/// If `key_only` is true, the value
/// corresponding to the key will not be read out. Only scanned keys will be returned.
pub fn raw_scan(
&self,
ctx: Context,
cf: String,
start_key: Vec<u8>,
end_key: Option<Vec<u8>>,
limit: usize,
key_only: bool,
reverse_scan: bool,
) -> impl Future<Item = Vec<Result<KvPair>>, Error = Error> {
const CMD: CommandKind = CommandKind::raw_scan;
let priority = ctx.get_priority();
let priority_tag = get_priority_tag(priority);
let res = self.read_pool.spawn_handle(
async move {
{
let end_key = match &end_key {
Some(end_key) => end_key.to_vec(),
None => vec![],
};
tls_collect_qps(
ctx.get_region_id(),
ctx.get_peer(),
&start_key,
&end_key,
reverse_scan,
);
}
KV_COMMAND_COUNTER_VEC_STATIC.get(CMD).inc();
SCHED_COMMANDS_PRI_COUNTER_VEC_STATIC
.get(priority_tag)
.inc();
let command_duration = tikv_util::time::Instant::now_coarse();
let snapshot = Self::with_tls_engine(|engine| Self::snapshot(engine, &ctx)).await?;
{
let begin_instant = Instant::now_coarse();
let end_key = end_key.map(Key::from_encoded);
let mut statistics = Statistics::default();
let result = if reverse_scan {
Self::reverse_raw_scan(
&snapshot,
&cf,
&Key::from_encoded(start_key),
end_key,
limit,
&mut statistics,
key_only,
)
.map_err(Error::from)
} else {
Self::forward_raw_scan(
&snapshot,
&cf,
&Key::from_encoded(start_key),
end_key,
limit,
&mut statistics,
key_only,
)
.map_err(Error::from)
};
metrics::tls_collect_read_flow(ctx.get_region_id(), &statistics);
KV_COMMAND_KEYREAD_HISTOGRAM_STATIC
.get(CMD)
.observe(statistics.write.flow_stats.read_keys as f64);
metrics::tls_collect_scan_details(CMD, &statistics);
SCHED_PROCESSING_READ_HISTOGRAM_STATIC
.get(CMD)
.observe(begin_instant.elapsed_secs());
SCHED_HISTOGRAM_VEC_STATIC
.get(CMD)
.observe(command_duration.elapsed_secs());
result
}
},
priority,
thread_rng().next_u64(),
);
res.map_err(|_| Error::from(ErrorInner::SchedTooBusy))
.flatten()
}
/// Check the given raw kv CF name. Return the CF name, or `Err` if given CF name is invalid.
/// The CF name can be one of `"default"`, `"write"` and `"lock"`. If given `cf` is empty,
/// `CF_DEFAULT` (`"default"`) will be returned.
fn rawkv_cf(cf: &str) -> Result<CfName> {
if cf.is_empty() {
return Ok(CF_DEFAULT);
}
for c in DATA_CFS {
if cf == *c {
return Ok(c);
}
}
Err(Error::from(ErrorInner::InvalidCf(cf.to_owned())))
}
/// Check if key range is valid
///
/// - If `reverse` is true, `end_key` is less than `start_key`. `end_key` is the lower bound.
/// - If `reverse` is false, `end_key` is greater than `start_key`. `end_key` is the upper bound.
fn check_key_ranges(ranges: &[KeyRange], reverse: bool) -> bool {
let ranges_len = ranges.len();
for i in 0..ranges_len {
let start_key = ranges[i].get_start_key();
let mut end_key = ranges[i].get_end_key();
if end_key.is_empty() && i + 1 != ranges_len {
end_key = ranges[i + 1].get_start_key();
}
if !end_key.is_empty()
&& (!reverse && start_key >= end_key || reverse && start_key <= end_key)
{
return false;
}
}
true
}
/// Scan raw keys in multiple ranges in a batch.
pub fn raw_batch_scan(
&self,
ctx: Context,
cf: String,
mut ranges: Vec<KeyRange>,
each_limit: usize,
key_only: bool,
reverse_scan: bool,
) -> impl Future<Item = Vec<Result<KvPair>>, Error = Error> {
const CMD: CommandKind = CommandKind::raw_batch_scan;
let priority = ctx.get_priority();
let priority_tag = get_priority_tag(priority);
let res = self.read_pool.spawn_handle(
async move {
KV_COMMAND_COUNTER_VEC_STATIC.get(CMD).inc();
SCHED_COMMANDS_PRI_COUNTER_VEC_STATIC
.get(priority_tag)
.inc();
let command_duration = tikv_util::time::Instant::now_coarse();
let snapshot = Self::with_tls_engine(|engine| Self::snapshot(engine, &ctx)).await?;
{
let begin_instant = Instant::now();
let mut statistics = Statistics::default();
if !Self::check_key_ranges(&ranges, reverse_scan) {
return Err(box_err!("Invalid KeyRanges"));
};
let mut result = Vec::new();
let ranges_len = ranges.len();
for i in 0..ranges_len {
let start_key = Key::from_encoded(ranges[i].take_start_key());
let end_key = ranges[i].take_end_key();
let end_key = if end_key.is_empty() {
if i + 1 == ranges_len {
None
} else {
Some(Key::from_encoded_slice(ranges[i + 1].get_start_key()))
}
} else {
Some(Key::from_encoded(end_key))
};
let pairs = if reverse_scan {
Self::reverse_raw_scan(
&snapshot,
&cf,
&start_key,
end_key,
each_limit,
&mut statistics,
key_only,
)?
} else {
Self::forward_raw_scan(
&snapshot,
&cf,
&start_key,
end_key,
each_limit,
&mut statistics,
key_only,
)?
};
result.extend(pairs.into_iter());
}
let mut key_ranges = vec![];
for range in ranges {
key_ranges.push(build_key_range(
&range.start_key,
&range.end_key,
reverse_scan,
));
}
tls_collect_qps_batch(ctx.get_region_id(), ctx.get_peer(), key_ranges);
metrics::tls_collect_read_flow(ctx.get_region_id(), &statistics);
KV_COMMAND_KEYREAD_HISTOGRAM_STATIC
.get(CMD)
.observe(statistics.write.flow_stats.read_keys as f64);
metrics::tls_collect_scan_details(CMD, &statistics);
SCHED_PROCESSING_READ_HISTOGRAM_STATIC
.get(CMD)
.observe(begin_instant.elapsed_secs());
SCHED_HISTOGRAM_VEC_STATIC
.get(CMD)
.observe(command_duration.elapsed_secs());
Ok(result)
}
},
priority,
thread_rng().next_u64(),
);
res.map_err(|_| Error::from(ErrorInner::SchedTooBusy))
.flatten()
}
}
/// Get a single value.
pub struct PointGetCommand {
pub ctx: Context,
pub key: Key,
/// None if this is a raw get, Some if this is a transactional get.
pub ts: Option<TimeStamp>,
}
impl PointGetCommand {
pub fn from_get(request: &mut GetRequest) -> Self {
PointGetCommand {
ctx: request.take_context(),
key: Key::from_raw(request.get_key()),
ts: Some(request.get_version().into()),
}
}
pub fn from_raw_get(request: &mut RawGetRequest) -> Self {
PointGetCommand {
ctx: request.take_context(),
// FIXME: It is weird in semantics because the key in the request is actually in the
// raw format. We should fix it when the meaning of type `Key` is well defined.
key: Key::from_encoded(request.take_key()),
ts: None,
}
}
#[cfg(test)]
pub fn from_key_ts(key: Key, ts: Option<TimeStamp>) -> Self {
PointGetCommand {
ctx: Context::default(),
key,
ts,
}
}
}
fn get_priority_tag(priority: CommandPri) -> CommandPriority {
match priority {
CommandPri::Low => CommandPriority::low,
CommandPri::Normal => CommandPriority::normal,
CommandPri::High => CommandPriority::high,
}
}
/// A builder to build a temporary `Storage<E>`.
///
/// Only used for test purpose.
#[must_use]
pub struct TestStorageBuilder<E: Engine, L: LockManager> {
engine: E,
config: Config,
pipelined_pessimistic_lock: bool,
lock_mgr: Option<L>,
}
impl TestStorageBuilder<RocksEngine, DummyLockManager> {
/// Build `Storage<RocksEngine>`.
pub fn new() -> Self {
Self {
engine: TestEngineBuilder::new().build().unwrap(),
config: Config::default(),
pipelined_pessimistic_lock: false,
lock_mgr: None,
}
}
}
impl<E: Engine, L: LockManager> TestStorageBuilder<E, L> {
pub fn from_engine(engine: E) -> Self {
Self {
engine,
config: Config::default(),
pipelined_pessimistic_lock: false,
lock_mgr: None,
}
}
/// Customize the config of the `Storage`.
///
/// By default, `Config::default()` will be used.
pub fn config(mut self, config: Config) -> Self {
self.config = config;
self
}
pub fn set_lock_mgr(mut self, lock_mgr: L) -> Self {
self.lock_mgr = Some(lock_mgr);
self
}
pub fn set_pipelined_pessimistic_lock(mut self, enabled: bool) -> Self {
self.pipelined_pessimistic_lock = enabled;
self
}
/// Build a `Storage<E>`.
pub fn build(self) -> Result<Storage<E, L>> {
let read_pool = build_read_pool_for_test(
&crate::config::StorageReadPoolConfig::default_for_test(),
self.engine.clone(),
);
Storage::from_engine(
self.engine,
&self.config,
ReadPool::from(read_pool).handle(),
self.lock_mgr,
self.pipelined_pessimistic_lock,
)
}
}
pub mod test_util {
use super::*;
use crate::storage::txn::commands;
use std::{
fmt::Debug,
sync::mpsc::{channel, Sender},
};
pub fn expect_none(x: Result<Option<Value>>) {
assert_eq!(x.unwrap(), None);
}
pub fn expect_value(v: Vec<u8>, x: Result<Option<Value>>) {
assert_eq!(x.unwrap().unwrap(), v);
}
pub fn expect_multi_values(v: Vec<Option<KvPair>>, x: Result<Vec<Result<KvPair>>>) {
let x: Vec<Option<KvPair>> = x.unwrap().into_iter().map(Result::ok).collect();
assert_eq!(x, v);
}
pub fn expect_error<T, F>(err_matcher: F, x: Result<T>)
where
F: FnOnce(Error) + Send + 'static,
{
match x {
Err(e) => err_matcher(e),
_ => panic!("expect result to be an error"),
}
}
pub fn expect_ok_callback<T: Debug>(done: Sender<i32>, id: i32) -> Callback<T> {
Box::new(move |x: Result<T>| {
x.unwrap();
done.send(id).unwrap();
})
}
pub fn expect_fail_callback<T, F>(done: Sender<i32>, id: i32, err_matcher: F) -> Callback<T>
where
F: FnOnce(Error) + Send + 'static,
{
Box::new(move |x: Result<T>| {
expect_error(err_matcher, x);
done.send(id).unwrap();
})
}
pub fn expect_too_busy_callback<T>(done: Sender<i32>, id: i32) -> Callback<T> {
Box::new(move |x: Result<T>| {
expect_error(
|err| match err {
Error(box ErrorInner::SchedTooBusy) => {}
e => panic!("unexpected error chain: {:?}, expect too busy", e),
},
x,
);
done.send(id).unwrap();
})
}
pub fn expect_value_callback<T: PartialEq + Debug + Send + 'static>(
done: Sender<i32>,
id: i32,
value: T,
) -> Callback<T> {
Box::new(move |x: Result<T>| {
assert_eq!(x.unwrap(), value);
done.send(id).unwrap();
})
}
pub fn expect_pessimistic_lock_res_callback(
done: Sender<i32>,
pessimistic_lock_res: PessimisticLockRes,
) -> Callback<Result<PessimisticLockRes>> {
Box::new(move |res: Result<Result<PessimisticLockRes>>| {
assert_eq!(res.unwrap().unwrap(), pessimistic_lock_res);
done.send(0).unwrap();
})
}
type PessimisticLockCommand = TypedCommand<Result<PessimisticLockRes>>;
pub fn new_acquire_pessimistic_lock_command(
keys: Vec<(Key, bool)>,
start_ts: impl Into<TimeStamp>,
for_update_ts: impl Into<TimeStamp>,
return_values: bool,
) -> PessimisticLockCommand {
let primary = keys[0].0.clone().to_raw().unwrap();
let for_update_ts: TimeStamp = for_update_ts.into();
commands::AcquirePessimisticLock::new(
keys,
primary,
start_ts.into(),
3000,
false,
for_update_ts,
None,
return_values,
for_update_ts.next(),
Context::default(),
)
}
pub fn delete_pessimistic_lock<E: Engine, L: LockManager>(
storage: &Storage<E, L>,
key: Key,
start_ts: u64,
for_update_ts: u64,
) {
let (tx, rx) = channel();
storage
.sched_txn_command(
commands::PessimisticRollback::new(
vec![key],
start_ts.into(),
for_update_ts.into(),
Context::default(),
),
expect_ok_callback(tx, 0),
)
.unwrap();
rx.recv().unwrap();
}
}
#[cfg(test)]
mod tests {
use super::{test_util::*, *};
use crate::config::TitanDBConfig;
use crate::storage::{
config::BlockCacheConfig,
kv::{Error as EngineError, ErrorInner as EngineErrorInner},
lock_manager::{Lock, WaitTimeout},
mvcc::{Error as MvccError, ErrorInner as MvccErrorInner},
txn::{commands, Error as TxnError, ErrorInner as TxnErrorInner},
};
use engine_rocks::raw_util::CFOptions;
use engine_traits::{CF_LOCK, CF_RAFT, CF_WRITE};
use futures03::executor::block_on;
use kvproto::kvrpcpb::{CommandPri, LockInfo};
use std::{
sync::{
atomic::{AtomicBool, Ordering},
mpsc::{channel, Sender},
Arc,
},
time::Duration,
};
use tikv_util::collections::HashMap;
use tikv_util::config::ReadableSize;
use txn_types::Mutation;
#[test]
fn test_get_put() {
let storage = TestStorageBuilder::new().build().unwrap();
let (tx, rx) = channel();
expect_none(
storage
.get(Context::default(), Key::from_raw(b"x"), 100.into())
.wait(),
);
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
vec![Mutation::Put((Key::from_raw(b"x"), b"100".to_vec()))],
b"x".to_vec(),
100.into(),
),
expect_ok_callback(tx.clone(), 1),
)
.unwrap();
rx.recv().unwrap();
expect_error(
|e| match e {
Error(box ErrorInner::Txn(TxnError(box TxnErrorInner::Mvcc(mvcc::Error(
box mvcc::ErrorInner::KeyIsLocked { .. },
))))) => (),
e => panic!("unexpected error chain: {:?}", e),
},
storage
.get(Context::default(), Key::from_raw(b"x"), 101.into())
.wait(),
);
storage
.sched_txn_command(
commands::Commit::new(
vec![Key::from_raw(b"x")],
100.into(),
101.into(),
Context::default(),
),
expect_ok_callback(tx, 3),
)
.unwrap();
rx.recv().unwrap();
expect_none(
storage
.get(Context::default(), Key::from_raw(b"x"), 100.into())
.wait(),
);
expect_value(
b"100".to_vec(),
storage
.get(Context::default(), Key::from_raw(b"x"), 101.into())
.wait(),
);
}
#[test]
fn test_cf_error() {
// New engine lacks normal column families.
let engine = TestEngineBuilder::new().cfs(["foo"]).build().unwrap();
let storage = TestStorageBuilder::<_, DummyLockManager>::from_engine(engine)
.build()
.unwrap();
let (tx, rx) = channel();
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
vec![
Mutation::Put((Key::from_raw(b"a"), b"aa".to_vec())),
Mutation::Put((Key::from_raw(b"b"), b"bb".to_vec())),
Mutation::Put((Key::from_raw(b"c"), b"cc".to_vec())),
],
b"a".to_vec(),
1.into(),
),
expect_fail_callback(tx, 0, |e| match e {
Error(box ErrorInner::Txn(TxnError(box TxnErrorInner::Mvcc(mvcc::Error(
box mvcc::ErrorInner::Engine(EngineError(box EngineErrorInner::Request(
..,
))),
))))) => {}
e => panic!("unexpected error chain: {:?}", e),
}),
)
.unwrap();
rx.recv().unwrap();
expect_error(
|e| match e {
Error(box ErrorInner::Txn(TxnError(box TxnErrorInner::Mvcc(mvcc::Error(
box mvcc::ErrorInner::Engine(EngineError(box EngineErrorInner::Request(..))),
))))) => (),
e => panic!("unexpected error chain: {:?}", e),
},
storage
.get(Context::default(), Key::from_raw(b"x"), 1.into())
.wait(),
);
expect_error(
|e| match e {
Error(box ErrorInner::Txn(TxnError(box TxnErrorInner::Mvcc(mvcc::Error(
box mvcc::ErrorInner::Engine(EngineError(box EngineErrorInner::Request(..))),
))))) => (),
e => panic!("unexpected error chain: {:?}", e),
},
storage
.scan(
Context::default(),
Key::from_raw(b"x"),
None,
1000,
1.into(),
false,
false,
)
.wait(),
);
expect_error(
|e| match e {
Error(box ErrorInner::Txn(TxnError(box TxnErrorInner::Mvcc(mvcc::Error(
box mvcc::ErrorInner::Engine(EngineError(box EngineErrorInner::Request(..))),
))))) => (),
e => panic!("unexpected error chain: {:?}", e),
},
storage
.batch_get(
Context::default(),
vec![Key::from_raw(b"c"), Key::from_raw(b"d")],
1.into(),
)
.wait(),
);
let x = storage
.batch_get_command(vec![
PointGetCommand::from_key_ts(Key::from_raw(b"c"), Some(1.into())),
PointGetCommand::from_key_ts(Key::from_raw(b"d"), Some(1.into())),
])
.wait()
.unwrap();
for v in x {
expect_error(
|e| match e {
Error(box ErrorInner::Txn(TxnError(box TxnErrorInner::Mvcc(mvcc::Error(
box mvcc::ErrorInner::Engine(EngineError(box EngineErrorInner::Request(
..,
))),
))))) => {}
e => panic!("unexpected error chain: {:?}", e),
},
v,
);
}
}
#[test]
fn test_scan() {
let storage = TestStorageBuilder::new().build().unwrap();
let (tx, rx) = channel();
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
vec![
Mutation::Put((Key::from_raw(b"a"), b"aa".to_vec())),
Mutation::Put((Key::from_raw(b"b"), b"bb".to_vec())),
Mutation::Put((Key::from_raw(b"c"), b"cc".to_vec())),
],
b"a".to_vec(),
1.into(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
// Forward
expect_multi_values(
vec![None, None, None],
storage
.scan(
Context::default(),
Key::from_raw(b"\x00"),
None,
1000,
5.into(),
false,
false,
)
.wait(),
);
// Backward
expect_multi_values(
vec![None, None, None],
storage
.scan(
Context::default(),
Key::from_raw(b"\xff"),
None,
1000,
5.into(),
false,
true,
)
.wait(),
);
// Forward with bound
expect_multi_values(
vec![None, None],
storage
.scan(
Context::default(),
Key::from_raw(b"\x00"),
Some(Key::from_raw(b"c")),
1000,
5.into(),
false,
false,
)
.wait(),
);
// Backward with bound
expect_multi_values(
vec![None, None],
storage
.scan(
Context::default(),
Key::from_raw(b"\xff"),
Some(Key::from_raw(b"b")),
1000,
5.into(),
false,
true,
)
.wait(),
);
// Forward with limit
expect_multi_values(
vec![None, None],
storage
.scan(
Context::default(),
Key::from_raw(b"\x00"),
None,
2,
5.into(),
false,
false,
)
.wait(),
);
// Backward with limit
expect_multi_values(
vec![None, None],
storage
.scan(
Context::default(),
Key::from_raw(b"\xff"),
None,
2,
5.into(),
false,
true,
)
.wait(),
);
storage
.sched_txn_command(
commands::Commit::new(
vec![
Key::from_raw(b"a"),
Key::from_raw(b"b"),
Key::from_raw(b"c"),
],
1.into(),
2.into(),
Context::default(),
),
expect_ok_callback(tx, 1),
)
.unwrap();
rx.recv().unwrap();
// Forward
expect_multi_values(
vec![
Some((b"a".to_vec(), b"aa".to_vec())),
Some((b"b".to_vec(), b"bb".to_vec())),
Some((b"c".to_vec(), b"cc".to_vec())),
],
storage
.scan(
Context::default(),
Key::from_raw(b"\x00"),
None,
1000,
5.into(),
false,
false,
)
.wait(),
);
// Backward
expect_multi_values(
vec![
Some((b"c".to_vec(), b"cc".to_vec())),
Some((b"b".to_vec(), b"bb".to_vec())),
Some((b"a".to_vec(), b"aa".to_vec())),
],
storage
.scan(
Context::default(),
Key::from_raw(b"\xff"),
None,
1000,
5.into(),
false,
true,
)
.wait(),
);
// Forward with bound
expect_multi_values(
vec![
Some((b"a".to_vec(), b"aa".to_vec())),
Some((b"b".to_vec(), b"bb".to_vec())),
],
storage
.scan(
Context::default(),
Key::from_raw(b"\x00"),
Some(Key::from_raw(b"c")),
1000,
5.into(),
false,
false,
)
.wait(),
);
// Backward with bound
expect_multi_values(
vec![
Some((b"c".to_vec(), b"cc".to_vec())),
Some((b"b".to_vec(), b"bb".to_vec())),
],
storage
.scan(
Context::default(),
Key::from_raw(b"\xff"),
Some(Key::from_raw(b"b")),
1000,
5.into(),
false,
true,
)
.wait(),
);
// Forward with limit
expect_multi_values(
vec![
Some((b"a".to_vec(), b"aa".to_vec())),
Some((b"b".to_vec(), b"bb".to_vec())),
],
storage
.scan(
Context::default(),
Key::from_raw(b"\x00"),
None,
2,
5.into(),
false,
false,
)
.wait(),
);
// Backward with limit
expect_multi_values(
vec![
Some((b"c".to_vec(), b"cc".to_vec())),
Some((b"b".to_vec(), b"bb".to_vec())),
],
storage
.scan(
Context::default(),
Key::from_raw(b"\xff"),
None,
2,
5.into(),
false,
true,
)
.wait(),
);
}
#[test]
fn test_scan_with_key_only() {
let mut titan_db_config = TitanDBConfig::default();
titan_db_config.enabled = true;
let mut db_config = crate::config::DbConfig::default();
db_config.titan = titan_db_config;
let engine = {
let path = "".to_owned();
let cfs = crate::storage::ALL_CFS.to_vec();
let cfg_rocksdb = db_config;
let cache = BlockCacheConfig::default().build_shared_cache();
let cfs_opts = vec![
CFOptions::new(CF_DEFAULT, cfg_rocksdb.defaultcf.build_opt(&cache)),
CFOptions::new(CF_LOCK, cfg_rocksdb.lockcf.build_opt(&cache)),
CFOptions::new(CF_WRITE, cfg_rocksdb.writecf.build_opt(&cache)),
CFOptions::new(CF_RAFT, cfg_rocksdb.raftcf.build_opt(&cache)),
];
RocksEngine::new(&path, &cfs, Some(cfs_opts), cache.is_some())
}
.unwrap();
let storage = TestStorageBuilder::<_, DummyLockManager>::from_engine(engine)
.build()
.unwrap();
let (tx, rx) = channel();
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
vec![
Mutation::Put((Key::from_raw(b"a"), b"aa".to_vec())),
Mutation::Put((Key::from_raw(b"b"), b"bb".to_vec())),
Mutation::Put((Key::from_raw(b"c"), b"cc".to_vec())),
],
b"a".to_vec(),
1.into(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
// Forward
expect_multi_values(
vec![None, None, None],
storage
.scan(
Context::default(),
Key::from_raw(b"\x00"),
None,
1000,
5.into(),
true,
false,
)
.wait(),
);
// Backward
expect_multi_values(
vec![None, None, None],
storage
.scan(
Context::default(),
Key::from_raw(b"\xff"),
None,
1000,
5.into(),
true,
true,
)
.wait(),
);
// Forward with bound
expect_multi_values(
vec![None, None],
storage
.scan(
Context::default(),
Key::from_raw(b"\x00"),
Some(Key::from_raw(b"c")),
1000,
5.into(),
true,
false,
)
.wait(),
);
// Backward with bound
expect_multi_values(
vec![None, None],
storage
.scan(
Context::default(),
Key::from_raw(b"\xff"),
Some(Key::from_raw(b"b")),
1000,
5.into(),
true,
true,
)
.wait(),
);
// Forward with limit
expect_multi_values(
vec![None, None],
storage
.scan(
Context::default(),
Key::from_raw(b"\x00"),
None,
2,
5.into(),
true,
false,
)
.wait(),
);
// Backward with limit
expect_multi_values(
vec![None, None],
storage
.scan(
Context::default(),
Key::from_raw(b"\xff"),
None,
2,
5.into(),
true,
true,
)
.wait(),
);
storage
.sched_txn_command(
commands::Commit::new(
vec![
Key::from_raw(b"a"),
Key::from_raw(b"b"),
Key::from_raw(b"c"),
],
1.into(),
2.into(),
Context::default(),
),
expect_ok_callback(tx, 1),
)
.unwrap();
rx.recv().unwrap();
// Forward
expect_multi_values(
vec![
Some((b"a".to_vec(), vec![])),
Some((b"b".to_vec(), vec![])),
Some((b"c".to_vec(), vec![])),
],
storage
.scan(
Context::default(),
Key::from_raw(b"\x00"),
None,
1000,
5.into(),
true,
false,
)
.wait(),
);
// Backward
expect_multi_values(
vec![
Some((b"c".to_vec(), vec![])),
Some((b"b".to_vec(), vec![])),
Some((b"a".to_vec(), vec![])),
],
storage
.scan(
Context::default(),
Key::from_raw(b"\xff"),
None,
1000,
5.into(),
true,
true,
)
.wait(),
);
// Forward with bound
expect_multi_values(
vec![Some((b"a".to_vec(), vec![])), Some((b"b".to_vec(), vec![]))],
storage
.scan(
Context::default(),
Key::from_raw(b"\x00"),
Some(Key::from_raw(b"c")),
1000,
5.into(),
true,
false,
)
.wait(),
);
// Backward with bound
expect_multi_values(
vec![Some((b"c".to_vec(), vec![])), Some((b"b".to_vec(), vec![]))],
storage
.scan(
Context::default(),
Key::from_raw(b"\xff"),
Some(Key::from_raw(b"b")),
1000,
5.into(),
true,
true,
)
.wait(),
);
// Forward with limit
expect_multi_values(
vec![Some((b"a".to_vec(), vec![])), Some((b"b".to_vec(), vec![]))],
storage
.scan(
Context::default(),
Key::from_raw(b"\x00"),
None,
2,
5.into(),
true,
false,
)
.wait(),
);
// Backward with limit
expect_multi_values(
vec![Some((b"c".to_vec(), vec![])), Some((b"b".to_vec(), vec![]))],
storage
.scan(
Context::default(),
Key::from_raw(b"\xff"),
None,
2,
5.into(),
true,
true,
)
.wait(),
);
}
#[test]
fn test_batch_get() {
let storage = TestStorageBuilder::new().build().unwrap();
let (tx, rx) = channel();
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
vec![
Mutation::Put((Key::from_raw(b"a"), b"aa".to_vec())),
Mutation::Put((Key::from_raw(b"b"), b"bb".to_vec())),
Mutation::Put((Key::from_raw(b"c"), b"cc".to_vec())),
],
b"a".to_vec(),
1.into(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
expect_multi_values(
vec![None],
storage
.batch_get(
Context::default(),
vec![Key::from_raw(b"c"), Key::from_raw(b"d")],
2.into(),
)
.wait(),
);
storage
.sched_txn_command(
commands::Commit::new(
vec![
Key::from_raw(b"a"),
Key::from_raw(b"b"),
Key::from_raw(b"c"),
],
1.into(),
2.into(),
Context::default(),
),
expect_ok_callback(tx, 1),
)
.unwrap();
rx.recv().unwrap();
expect_multi_values(
vec![
Some((b"c".to_vec(), b"cc".to_vec())),
Some((b"a".to_vec(), b"aa".to_vec())),
Some((b"b".to_vec(), b"bb".to_vec())),
],
storage
.batch_get(
Context::default(),
vec![
Key::from_raw(b"c"),
Key::from_raw(b"x"),
Key::from_raw(b"a"),
Key::from_raw(b"b"),
],
5.into(),
)
.wait(),
);
}
#[test]
fn test_batch_get_command() {
let storage = TestStorageBuilder::new().build().unwrap();
let (tx, rx) = channel();
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
vec![
Mutation::Put((Key::from_raw(b"a"), b"aa".to_vec())),
Mutation::Put((Key::from_raw(b"b"), b"bb".to_vec())),
Mutation::Put((Key::from_raw(b"c"), b"cc".to_vec())),
],
b"a".to_vec(),
1.into(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
let mut x = storage
.batch_get_command(vec![
PointGetCommand::from_key_ts(Key::from_raw(b"c"), Some(2.into())),
PointGetCommand::from_key_ts(Key::from_raw(b"d"), Some(2.into())),
])
.wait()
.unwrap();
expect_error(
|e| match e {
Error(box ErrorInner::Txn(TxnError(box TxnErrorInner::Mvcc(mvcc::Error(
box mvcc::ErrorInner::KeyIsLocked(..),
))))) => (),
e => panic!("unexpected error chain: {:?}", e),
},
x.remove(0),
);
assert_eq!(x.remove(0).unwrap(), None);
storage
.sched_txn_command(
commands::Commit::new(
vec![
Key::from_raw(b"a"),
Key::from_raw(b"b"),
Key::from_raw(b"c"),
],
1.into(),
2.into(),
Context::default(),
),
expect_ok_callback(tx, 1),
)
.unwrap();
rx.recv().unwrap();
let x: Vec<Option<Vec<u8>>> = storage
.batch_get_command(vec![
PointGetCommand::from_key_ts(Key::from_raw(b"c"), Some(5.into())),
PointGetCommand::from_key_ts(Key::from_raw(b"x"), Some(5.into())),
PointGetCommand::from_key_ts(Key::from_raw(b"a"), Some(5.into())),
PointGetCommand::from_key_ts(Key::from_raw(b"b"), Some(5.into())),
])
.wait()
.unwrap()
.into_iter()
.map(|x| x.unwrap())
.collect();
assert_eq!(
x,
vec![
Some(b"cc".to_vec()),
None,
Some(b"aa".to_vec()),
Some(b"bb".to_vec())
]
);
}
#[test]
fn test_txn() {
let storage = TestStorageBuilder::new().build().unwrap();
let (tx, rx) = channel();
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
vec![Mutation::Put((Key::from_raw(b"x"), b"100".to_vec()))],
b"x".to_vec(),
100.into(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
vec![Mutation::Put((Key::from_raw(b"y"), b"101".to_vec()))],
b"y".to_vec(),
101.into(),
),
expect_ok_callback(tx.clone(), 1),
)
.unwrap();
rx.recv().unwrap();
rx.recv().unwrap();
storage
.sched_txn_command(
commands::Commit::new(
vec![Key::from_raw(b"x")],
100.into(),
110.into(),
Context::default(),
),
expect_value_callback(tx.clone(), 2, TxnStatus::committed(110.into())),
)
.unwrap();
storage
.sched_txn_command(
commands::Commit::new(
vec![Key::from_raw(b"y")],
101.into(),
111.into(),
Context::default(),
),
expect_value_callback(tx.clone(), 3, TxnStatus::committed(111.into())),
)
.unwrap();
rx.recv().unwrap();
rx.recv().unwrap();
expect_value(
b"100".to_vec(),
storage
.get(Context::default(), Key::from_raw(b"x"), 120.into())
.wait(),
);
expect_value(
b"101".to_vec(),
storage
.get(Context::default(), Key::from_raw(b"y"), 120.into())
.wait(),
);
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
vec![Mutation::Put((Key::from_raw(b"x"), b"105".to_vec()))],
b"x".to_vec(),
105.into(),
),
expect_fail_callback(tx, 6, |e| match e {
Error(box ErrorInner::Txn(TxnError(box TxnErrorInner::Mvcc(mvcc::Error(
box mvcc::ErrorInner::WriteConflict { .. },
))))) => (),
e => panic!("unexpected error chain: {:?}", e),
}),
)
.unwrap();
rx.recv().unwrap();
}
#[test]
fn test_sched_too_busy() {
let mut config = Config::default();
config.scheduler_pending_write_threshold = ReadableSize(1);
let storage = TestStorageBuilder::new().config(config).build().unwrap();
let (tx, rx) = channel();
expect_none(
storage
.get(Context::default(), Key::from_raw(b"x"), 100.into())
.wait(),
);
storage
.sched_txn_command::<()>(
commands::Pause::new(vec![Key::from_raw(b"x")], 1000, Context::default()).into(),
expect_ok_callback(tx.clone(), 1),
)
.unwrap();
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
vec![Mutation::Put((Key::from_raw(b"y"), b"101".to_vec()))],
b"y".to_vec(),
101.into(),
),
expect_too_busy_callback(tx.clone(), 2),
)
.unwrap();
rx.recv().unwrap();
rx.recv().unwrap();
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
vec![Mutation::Put((Key::from_raw(b"z"), b"102".to_vec()))],
b"y".to_vec(),
102.into(),
),
expect_ok_callback(tx, 3),
)
.unwrap();
rx.recv().unwrap();
}
#[test]
fn test_cleanup() {
let storage = TestStorageBuilder::new().build().unwrap();
let (tx, rx) = channel();
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
vec![Mutation::Put((Key::from_raw(b"x"), b"100".to_vec()))],
b"x".to_vec(),
100.into(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
storage
.sched_txn_command(
commands::Cleanup::new(
Key::from_raw(b"x"),
100.into(),
TimeStamp::zero(),
Context::default(),
),
expect_ok_callback(tx, 1),
)
.unwrap();
rx.recv().unwrap();
expect_none(
storage
.get(Context::default(), Key::from_raw(b"x"), 105.into())
.wait(),
);
}
#[test]
fn test_cleanup_check_ttl() {
let storage = TestStorageBuilder::new().build().unwrap();
let (tx, rx) = channel();
let ts = TimeStamp::compose;
storage
.sched_txn_command(
commands::Prewrite::with_lock_ttl(
vec![Mutation::Put((Key::from_raw(b"x"), b"110".to_vec()))],
b"x".to_vec(),
ts(110, 0),
100,
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
storage
.sched_txn_command(
commands::Cleanup::new(
Key::from_raw(b"x"),
ts(110, 0),
ts(120, 0),
Context::default(),
),
expect_fail_callback(tx.clone(), 0, |e| match e {
Error(box ErrorInner::Txn(TxnError(box TxnErrorInner::Mvcc(mvcc::Error(
box mvcc::ErrorInner::KeyIsLocked(info),
))))) => assert_eq!(info.get_lock_ttl(), 100),
e => panic!("unexpected error chain: {:?}", e),
}),
)
.unwrap();
rx.recv().unwrap();
storage
.sched_txn_command(
commands::Cleanup::new(
Key::from_raw(b"x"),
ts(110, 0),
ts(220, 0),
Context::default(),
),
expect_ok_callback(tx, 0),
)
.unwrap();
rx.recv().unwrap();
expect_none(
storage
.get(Context::default(), Key::from_raw(b"x"), ts(230, 0))
.wait(),
);
}
#[test]
fn test_high_priority_get_put() {
let storage = TestStorageBuilder::new().build().unwrap();
let (tx, rx) = channel();
let mut ctx = Context::default();
ctx.set_priority(CommandPri::High);
expect_none(storage.get(ctx, Key::from_raw(b"x"), 100.into()).wait());
let mut ctx = Context::default();
ctx.set_priority(CommandPri::High);
storage
.sched_txn_command(
commands::Prewrite::with_context(
vec![Mutation::Put((Key::from_raw(b"x"), b"100".to_vec()))],
b"x".to_vec(),
100.into(),
ctx,
),
expect_ok_callback(tx.clone(), 1),
)
.unwrap();
rx.recv().unwrap();
let mut ctx = Context::default();
ctx.set_priority(CommandPri::High);
storage
.sched_txn_command(
commands::Commit::new(vec![Key::from_raw(b"x")], 100.into(), 101.into(), ctx),
expect_ok_callback(tx, 2),
)
.unwrap();
rx.recv().unwrap();
let mut ctx = Context::default();
ctx.set_priority(CommandPri::High);
expect_none(storage.get(ctx, Key::from_raw(b"x"), 100.into()).wait());
let mut ctx = Context::default();
ctx.set_priority(CommandPri::High);
expect_value(
b"100".to_vec(),
storage.get(ctx, Key::from_raw(b"x"), 101.into()).wait(),
);
}
#[test]
fn test_high_priority_no_block() {
let mut config = Config::default();
config.scheduler_worker_pool_size = 1;
let storage = TestStorageBuilder::new().config(config).build().unwrap();
let (tx, rx) = channel();
expect_none(
storage
.get(Context::default(), Key::from_raw(b"x"), 100.into())
.wait(),
);
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
vec![Mutation::Put((Key::from_raw(b"x"), b"100".to_vec()))],
b"x".to_vec(),
100.into(),
),
expect_ok_callback(tx.clone(), 1),
)
.unwrap();
rx.recv().unwrap();
storage
.sched_txn_command(
commands::Commit::new(
vec![Key::from_raw(b"x")],
100.into(),
101.into(),
Context::default(),
),
expect_ok_callback(tx.clone(), 2),
)
.unwrap();
rx.recv().unwrap();
storage
.sched_txn_command(
commands::Pause::new(vec![Key::from_raw(b"y")], 1000, Context::default()),
expect_ok_callback(tx, 3),
)
.unwrap();
let mut ctx = Context::default();
ctx.set_priority(CommandPri::High);
expect_value(
b"100".to_vec(),
storage.get(ctx, Key::from_raw(b"x"), 101.into()).wait(),
);
// Command Get with high priority not block by command Pause.
assert_eq!(rx.recv().unwrap(), 3);
}
#[test]
fn test_delete_range() {
let storage = TestStorageBuilder::new().build().unwrap();
let (tx, rx) = channel();
// Write x and y.
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
vec![
Mutation::Put((Key::from_raw(b"x"), b"100".to_vec())),
Mutation::Put((Key::from_raw(b"y"), b"100".to_vec())),
Mutation::Put((Key::from_raw(b"z"), b"100".to_vec())),
],
b"x".to_vec(),
100.into(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
storage
.sched_txn_command(
commands::Commit::new(
vec![
Key::from_raw(b"x"),
Key::from_raw(b"y"),
Key::from_raw(b"z"),
],
100.into(),
101.into(),
Context::default(),
),
expect_ok_callback(tx.clone(), 1),
)
.unwrap();
rx.recv().unwrap();
expect_value(
b"100".to_vec(),
storage
.get(Context::default(), Key::from_raw(b"x"), 101.into())
.wait(),
);
expect_value(
b"100".to_vec(),
storage
.get(Context::default(), Key::from_raw(b"y"), 101.into())
.wait(),
);
expect_value(
b"100".to_vec(),
storage
.get(Context::default(), Key::from_raw(b"z"), 101.into())
.wait(),
);
// Delete range [x, z)
storage
.delete_range(
Context::default(),
Key::from_raw(b"x"),
Key::from_raw(b"z"),
false,
expect_ok_callback(tx.clone(), 5),
)
.unwrap();
rx.recv().unwrap();
expect_none(
storage
.get(Context::default(), Key::from_raw(b"x"), 101.into())
.wait(),
);
expect_none(
storage
.get(Context::default(), Key::from_raw(b"y"), 101.into())
.wait(),
);
expect_value(
b"100".to_vec(),
storage
.get(Context::default(), Key::from_raw(b"z"), 101.into())
.wait(),
);
storage
.delete_range(
Context::default(),
Key::from_raw(b""),
Key::from_raw(&[255]),
false,
expect_ok_callback(tx, 9),
)
.unwrap();
rx.recv().unwrap();
expect_none(
storage
.get(Context::default(), Key::from_raw(b"z"), 101.into())
.wait(),
);
}
#[test]
fn test_raw_delete_range() {
let storage = TestStorageBuilder::new().build().unwrap();
let (tx, rx) = channel();
let test_data = [
(b"a", b"001"),
(b"b", b"002"),
(b"c", b"003"),
(b"d", b"004"),
(b"e", b"005"),
];
// Write some key-value pairs to the db
for kv in &test_data {
storage
.raw_put(
Context::default(),
"".to_string(),
kv.0.to_vec(),
kv.1.to_vec(),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
}
expect_value(
b"004".to_vec(),
storage
.raw_get(Context::default(), "".to_string(), b"d".to_vec())
.wait(),
);
// Delete ["d", "e")
storage
.raw_delete_range(
Context::default(),
"".to_string(),
b"d".to_vec(),
b"e".to_vec(),
expect_ok_callback(tx.clone(), 1),
)
.unwrap();
rx.recv().unwrap();
// Assert key "d" has gone
expect_value(
b"003".to_vec(),
storage
.raw_get(Context::default(), "".to_string(), b"c".to_vec())
.wait(),
);
expect_none(
storage
.raw_get(Context::default(), "".to_string(), b"d".to_vec())
.wait(),
);
expect_value(
b"005".to_vec(),
storage
.raw_get(Context::default(), "".to_string(), b"e".to_vec())
.wait(),
);
// Delete ["aa", "ab")
storage
.raw_delete_range(
Context::default(),
"".to_string(),
b"aa".to_vec(),
b"ab".to_vec(),
expect_ok_callback(tx.clone(), 2),
)
.unwrap();
rx.recv().unwrap();
// Assert nothing happened
expect_value(
b"001".to_vec(),
storage
.raw_get(Context::default(), "".to_string(), b"a".to_vec())
.wait(),
);
expect_value(
b"002".to_vec(),
storage
.raw_get(Context::default(), "".to_string(), b"b".to_vec())
.wait(),
);
// Delete all
storage
.raw_delete_range(
Context::default(),
"".to_string(),
b"a".to_vec(),
b"z".to_vec(),
expect_ok_callback(tx, 3),
)
.unwrap();
rx.recv().unwrap();
// Assert now no key remains
for kv in &test_data {
expect_none(
storage
.raw_get(Context::default(), "".to_string(), kv.0.to_vec())
.wait(),
);
}
rx.recv().unwrap();
}
#[test]
fn test_raw_batch_put() {
let storage = TestStorageBuilder::new().build().unwrap();
let (tx, rx) = channel();
let test_data = vec![
(b"a".to_vec(), b"aa".to_vec()),
(b"b".to_vec(), b"bb".to_vec()),
(b"c".to_vec(), b"cc".to_vec()),
(b"d".to_vec(), b"dd".to_vec()),
(b"e".to_vec(), b"ee".to_vec()),
];
// Write key-value pairs in a batch
storage
.raw_batch_put(
Context::default(),
"".to_string(),
test_data.clone(),
expect_ok_callback(tx, 0),
)
.unwrap();
rx.recv().unwrap();
// Verify pairs one by one
for (key, val) in test_data {
expect_value(
val,
storage
.raw_get(Context::default(), "".to_string(), key)
.wait(),
);
}
}
#[test]
fn test_raw_batch_get() {
let storage = TestStorageBuilder::new().build().unwrap();
let (tx, rx) = channel();
let test_data = vec![
(b"a".to_vec(), b"aa".to_vec()),
(b"b".to_vec(), b"bb".to_vec()),
(b"c".to_vec(), b"cc".to_vec()),
(b"d".to_vec(), b"dd".to_vec()),
(b"e".to_vec(), b"ee".to_vec()),
];
// Write key-value pairs one by one
for &(ref key, ref value) in &test_data {
storage
.raw_put(
Context::default(),
"".to_string(),
key.clone(),
value.clone(),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
}
rx.recv().unwrap();
// Verify pairs in a batch
let keys = test_data.iter().map(|&(ref k, _)| k.clone()).collect();
let results = test_data.into_iter().map(|(k, v)| Some((k, v))).collect();
expect_multi_values(
results,
storage
.raw_batch_get(Context::default(), "".to_string(), keys)
.wait(),
);
}
#[test]
fn test_batch_raw_get() {
let storage = TestStorageBuilder::new().build().unwrap();
let (tx, rx) = channel();
let test_data = vec![
(b"a".to_vec(), b"aa".to_vec()),
(b"b".to_vec(), b"bb".to_vec()),
(b"c".to_vec(), b"cc".to_vec()),
(b"d".to_vec(), b"dd".to_vec()),
(b"e".to_vec(), b"ee".to_vec()),
];
// Write key-value pairs one by one
for &(ref key, ref value) in &test_data {
storage
.raw_put(
Context::default(),
"".to_string(),
key.clone(),
value.clone(),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
}
rx.recv().unwrap();
// Verify pairs in a batch
let cmds = test_data
.iter()
.map(|&(ref k, _)| {
let mut req = RawGetRequest::default();
req.set_key(k.clone());
PointGetCommand::from_raw_get(&mut req)
})
.collect();
let results: Vec<Option<Vec<u8>>> = test_data.into_iter().map(|(_, v)| Some(v)).collect();
let x: Vec<Option<Vec<u8>>> = storage
.raw_batch_get_command("".to_string(), cmds)
.wait()
.unwrap()
.into_iter()
.map(|x| x.unwrap())
.collect();
assert_eq!(x, results);
}
#[test]
fn test_raw_batch_delete() {
let storage = TestStorageBuilder::new().build().unwrap();
let (tx, rx) = channel();
let test_data = vec![
(b"a".to_vec(), b"aa".to_vec()),
(b"b".to_vec(), b"bb".to_vec()),
(b"c".to_vec(), b"cc".to_vec()),
(b"d".to_vec(), b"dd".to_vec()),
(b"e".to_vec(), b"ee".to_vec()),
];
// Write key-value pairs in batch
storage
.raw_batch_put(
Context::default(),
"".to_string(),
test_data.clone(),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
// Verify pairs exist
let keys = test_data.iter().map(|&(ref k, _)| k.clone()).collect();
let results = test_data
.iter()
.map(|&(ref k, ref v)| Some((k.clone(), v.clone())))
.collect();
expect_multi_values(
results,
storage
.raw_batch_get(Context::default(), "".to_string(), keys)
.wait(),
);
// Delete ["b", "d"]
storage
.raw_batch_delete(
Context::default(),
"".to_string(),
vec![b"b".to_vec(), b"d".to_vec()],
expect_ok_callback(tx.clone(), 1),
)
.unwrap();
rx.recv().unwrap();
// Assert "b" and "d" are gone
expect_value(
b"aa".to_vec(),
storage
.raw_get(Context::default(), "".to_string(), b"a".to_vec())
.wait(),
);
expect_none(
storage
.raw_get(Context::default(), "".to_string(), b"b".to_vec())
.wait(),
);
expect_value(
b"cc".to_vec(),
storage
.raw_get(Context::default(), "".to_string(), b"c".to_vec())
.wait(),
);
expect_none(
storage
.raw_get(Context::default(), "".to_string(), b"d".to_vec())
.wait(),
);
expect_value(
b"ee".to_vec(),
storage
.raw_get(Context::default(), "".to_string(), b"e".to_vec())
.wait(),
);
// Delete ["a", "c", "e"]
storage
.raw_batch_delete(
Context::default(),
"".to_string(),
vec![b"a".to_vec(), b"c".to_vec(), b"e".to_vec()],
expect_ok_callback(tx, 2),
)
.unwrap();
rx.recv().unwrap();
// Assert no key remains
for (k, _) in test_data {
expect_none(
storage
.raw_get(Context::default(), "".to_string(), k)
.wait(),
);
}
}
#[test]
fn test_raw_scan() {
let storage = TestStorageBuilder::new().build().unwrap();
let (tx, rx) = channel();
let test_data = vec![
(b"a".to_vec(), b"aa".to_vec()),
(b"a1".to_vec(), b"aa11".to_vec()),
(b"a2".to_vec(), b"aa22".to_vec()),
(b"a3".to_vec(), b"aa33".to_vec()),
(b"b".to_vec(), b"bb".to_vec()),
(b"b1".to_vec(), b"bb11".to_vec()),
(b"b2".to_vec(), b"bb22".to_vec()),
(b"b3".to_vec(), b"bb33".to_vec()),
(b"c".to_vec(), b"cc".to_vec()),
(b"c1".to_vec(), b"cc11".to_vec()),
(b"c2".to_vec(), b"cc22".to_vec()),
(b"c3".to_vec(), b"cc33".to_vec()),
(b"d".to_vec(), b"dd".to_vec()),
(b"d1".to_vec(), b"dd11".to_vec()),
(b"d2".to_vec(), b"dd22".to_vec()),
(b"d3".to_vec(), b"dd33".to_vec()),
(b"e".to_vec(), b"ee".to_vec()),
(b"e1".to_vec(), b"ee11".to_vec()),
(b"e2".to_vec(), b"ee22".to_vec()),
(b"e3".to_vec(), b"ee33".to_vec()),
];
// Write key-value pairs in batch
storage
.raw_batch_put(
Context::default(),
"".to_string(),
test_data.clone(),
expect_ok_callback(tx, 0),
)
.unwrap();
rx.recv().unwrap();
// Scan pairs with key only
let mut results: Vec<Option<KvPair>> = test_data
.iter()
.map(|&(ref k, _)| Some((k.clone(), vec![])))
.collect();
expect_multi_values(
results.clone(),
storage
.raw_scan(
Context::default(),
"".to_string(),
vec![],
None,
20,
true,
false,
)
.wait(),
);
results = results.split_off(10);
expect_multi_values(
results,
storage
.raw_scan(
Context::default(),
"".to_string(),
b"c2".to_vec(),
None,
20,
true,
false,
)
.wait(),
);
let mut results: Vec<Option<KvPair>> = test_data
.clone()
.into_iter()
.map(|(k, v)| Some((k, v)))
.collect();
expect_multi_values(
results.clone(),
storage
.raw_scan(
Context::default(),
"".to_string(),
vec![],
None,
20,
false,
false,
)
.wait(),
);
results = results.split_off(10);
expect_multi_values(
results,
storage
.raw_scan(
Context::default(),
"".to_string(),
b"c2".to_vec(),
None,
20,
false,
false,
)
.wait(),
);
let results: Vec<Option<KvPair>> = test_data
.clone()
.into_iter()
.map(|(k, v)| Some((k, v)))
.rev()
.collect();
expect_multi_values(
results,
storage
.raw_scan(
Context::default(),
"".to_string(),
b"z".to_vec(),
None,
20,
false,
true,
)
.wait(),
);
let results: Vec<Option<KvPair>> = test_data
.clone()
.into_iter()
.map(|(k, v)| Some((k, v)))
.rev()
.take(5)
.collect();
expect_multi_values(
results,
storage
.raw_scan(
Context::default(),
"".to_string(),
b"z".to_vec(),
None,
5,
false,
true,
)
.wait(),
);
// Scan with end_key
let results: Vec<Option<KvPair>> = test_data
.clone()
.into_iter()
.skip(6)
.take(4)
.map(|(k, v)| Some((k, v)))
.collect();
expect_multi_values(
results,
storage
.raw_scan(
Context::default(),
"".to_string(),
b"b2".to_vec(),
Some(b"c2".to_vec()),
20,
false,
false,
)
.wait(),
);
let results: Vec<Option<KvPair>> = test_data
.clone()
.into_iter()
.skip(6)
.take(1)
.map(|(k, v)| Some((k, v)))
.collect();
expect_multi_values(
results,
storage
.raw_scan(
Context::default(),
"".to_string(),
b"b2".to_vec(),
Some(b"b2\x00".to_vec()),
20,
false,
false,
)
.wait(),
);
// Reverse scan with end_key
let results: Vec<Option<KvPair>> = test_data
.clone()
.into_iter()
.rev()
.skip(10)
.take(4)
.map(|(k, v)| Some((k, v)))
.collect();
expect_multi_values(
results,
storage
.raw_scan(
Context::default(),
"".to_string(),
b"c2".to_vec(),
Some(b"b2".to_vec()),
20,
false,
true,
)
.wait(),
);
let results: Vec<Option<KvPair>> = test_data
.into_iter()
.skip(6)
.take(1)
.map(|(k, v)| Some((k, v)))
.collect();
expect_multi_values(
results,
storage
.raw_scan(
Context::default(),
"".to_string(),
b"b2\x00".to_vec(),
Some(b"b2".to_vec()),
20,
false,
true,
)
.wait(),
);
// End key tests. Confirm that lower/upper bound works correctly.
let ctx = Context::default();
let results = vec![
(b"c1".to_vec(), b"cc11".to_vec()),
(b"c2".to_vec(), b"cc22".to_vec()),
(b"c3".to_vec(), b"cc33".to_vec()),
(b"d".to_vec(), b"dd".to_vec()),
(b"d1".to_vec(), b"dd11".to_vec()),
(b"d2".to_vec(), b"dd22".to_vec()),
]
.into_iter()
.map(|(k, v)| Some((k, v)));
let engine = storage.get_engine();
expect_multi_values(
results.clone().collect(),
block_on(async {
let snapshot =
<Storage<RocksEngine, DummyLockManager>>::snapshot(&engine, &ctx).await?;
<Storage<RocksEngine, DummyLockManager>>::forward_raw_scan(
&snapshot,
&"".to_string(),
&Key::from_encoded(b"c1".to_vec()),
Some(Key::from_encoded(b"d3".to_vec())),
20,
&mut Statistics::default(),
false,
)
}),
);
expect_multi_values(
results.rev().collect(),
block_on(async move {
let snapshot =
<Storage<RocksEngine, DummyLockManager>>::snapshot(&engine, &ctx).await?;
<Storage<RocksEngine, DummyLockManager>>::reverse_raw_scan(
&snapshot,
&"".to_string(),
&Key::from_encoded(b"d3".to_vec()),
Some(Key::from_encoded(b"c1".to_vec())),
20,
&mut Statistics::default(),
false,
)
}),
);
}
#[test]
fn test_check_key_ranges() {
fn make_ranges(ranges: Vec<(Vec<u8>, Vec<u8>)>) -> Vec<KeyRange> {
ranges
.into_iter()
.map(|(s, e)| {
let mut range = KeyRange::default();
range.set_start_key(s);
if !e.is_empty() {
range.set_end_key(e);
}
range
})
.collect()
}
let ranges = make_ranges(vec![
(b"a".to_vec(), b"a3".to_vec()),
(b"b".to_vec(), b"b3".to_vec()),
(b"c".to_vec(), b"c3".to_vec()),
]);
assert_eq!(
<Storage<RocksEngine, DummyLockManager>>::check_key_ranges(&ranges, false),
true
);
let ranges = make_ranges(vec![
(b"a".to_vec(), vec![]),
(b"b".to_vec(), vec![]),
(b"c".to_vec(), vec![]),
]);
assert_eq!(
<Storage<RocksEngine, DummyLockManager>>::check_key_ranges(&ranges, false),
true
);
let ranges = make_ranges(vec![
(b"a3".to_vec(), b"a".to_vec()),
(b"b3".to_vec(), b"b".to_vec()),
(b"c3".to_vec(), b"c".to_vec()),
]);
assert_eq!(
<Storage<RocksEngine, DummyLockManager>>::check_key_ranges(&ranges, false),
false
);
// if end_key is omitted, the next start_key is used instead. so, false is returned.
let ranges = make_ranges(vec![
(b"c".to_vec(), vec![]),
(b"b".to_vec(), vec![]),
(b"a".to_vec(), vec![]),
]);
assert_eq!(
<Storage<RocksEngine, DummyLockManager>>::check_key_ranges(&ranges, false),
false
);
let ranges = make_ranges(vec![
(b"a3".to_vec(), b"a".to_vec()),
(b"b3".to_vec(), b"b".to_vec()),
(b"c3".to_vec(), b"c".to_vec()),
]);
assert_eq!(
<Storage<RocksEngine, DummyLockManager>>::check_key_ranges(&ranges, true),
true
);
let ranges = make_ranges(vec![
(b"c3".to_vec(), vec![]),
(b"b3".to_vec(), vec![]),
(b"a3".to_vec(), vec![]),
]);
assert_eq!(
<Storage<RocksEngine, DummyLockManager>>::check_key_ranges(&ranges, true),
true
);
let ranges = make_ranges(vec![
(b"a".to_vec(), b"a3".to_vec()),
(b"b".to_vec(), b"b3".to_vec()),
(b"c".to_vec(), b"c3".to_vec()),
]);
assert_eq!(
<Storage<RocksEngine, DummyLockManager>>::check_key_ranges(&ranges, true),
false
);
let ranges = make_ranges(vec![
(b"a3".to_vec(), vec![]),
(b"b3".to_vec(), vec![]),
(b"c3".to_vec(), vec![]),
]);
assert_eq!(
<Storage<RocksEngine, DummyLockManager>>::check_key_ranges(&ranges, true),
false
);
}
#[test]
fn test_raw_batch_scan() {
let storage = TestStorageBuilder::new().build().unwrap();
let (tx, rx) = channel();
let test_data = vec![
(b"a".to_vec(), b"aa".to_vec()),
(b"a1".to_vec(), b"aa11".to_vec()),
(b"a2".to_vec(), b"aa22".to_vec()),
(b"a3".to_vec(), b"aa33".to_vec()),
(b"b".to_vec(), b"bb".to_vec()),
(b"b1".to_vec(), b"bb11".to_vec()),
(b"b2".to_vec(), b"bb22".to_vec()),
(b"b3".to_vec(), b"bb33".to_vec()),
(b"c".to_vec(), b"cc".to_vec()),
(b"c1".to_vec(), b"cc11".to_vec()),
(b"c2".to_vec(), b"cc22".to_vec()),
(b"c3".to_vec(), b"cc33".to_vec()),
(b"d".to_vec(), b"dd".to_vec()),
(b"d1".to_vec(), b"dd11".to_vec()),
(b"d2".to_vec(), b"dd22".to_vec()),
(b"d3".to_vec(), b"dd33".to_vec()),
(b"e".to_vec(), b"ee".to_vec()),
(b"e1".to_vec(), b"ee11".to_vec()),
(b"e2".to_vec(), b"ee22".to_vec()),
(b"e3".to_vec(), b"ee33".to_vec()),
];
// Write key-value pairs in batch
storage
.raw_batch_put(
Context::default(),
"".to_string(),
test_data.clone(),
expect_ok_callback(tx, 0),
)
.unwrap();
rx.recv().unwrap();
// Verify pairs exist
let keys = test_data.iter().map(|&(ref k, _)| k.clone()).collect();
let results = test_data.into_iter().map(|(k, v)| Some((k, v))).collect();
expect_multi_values(
results,
storage
.raw_batch_get(Context::default(), "".to_string(), keys)
.wait(),
);
let results = vec![
Some((b"a".to_vec(), b"aa".to_vec())),
Some((b"a1".to_vec(), b"aa11".to_vec())),
Some((b"a2".to_vec(), b"aa22".to_vec())),
Some((b"a3".to_vec(), b"aa33".to_vec())),
Some((b"b".to_vec(), b"bb".to_vec())),
Some((b"b1".to_vec(), b"bb11".to_vec())),
Some((b"b2".to_vec(), b"bb22".to_vec())),
Some((b"b3".to_vec(), b"bb33".to_vec())),
Some((b"c".to_vec(), b"cc".to_vec())),
Some((b"c1".to_vec(), b"cc11".to_vec())),
Some((b"c2".to_vec(), b"cc22".to_vec())),
Some((b"c3".to_vec(), b"cc33".to_vec())),
Some((b"d".to_vec(), b"dd".to_vec())),
];
let ranges: Vec<KeyRange> = vec![b"a".to_vec(), b"b".to_vec(), b"c".to_vec()]
.into_iter()
.map(|k| {
let mut range = KeyRange::default();
range.set_start_key(k);
range
})
.collect();
expect_multi_values(
results,
storage
.raw_batch_scan(
Context::default(),
"".to_string(),
ranges.clone(),
5,
false,
false,
)
.wait(),
);
let results = vec![
Some((b"a".to_vec(), vec![])),
Some((b"a1".to_vec(), vec![])),
Some((b"a2".to_vec(), vec![])),
Some((b"a3".to_vec(), vec![])),
Some((b"b".to_vec(), vec![])),
Some((b"b1".to_vec(), vec![])),
Some((b"b2".to_vec(), vec![])),
Some((b"b3".to_vec(), vec![])),
Some((b"c".to_vec(), vec![])),
Some((b"c1".to_vec(), vec![])),
Some((b"c2".to_vec(), vec![])),
Some((b"c3".to_vec(), vec![])),
Some((b"d".to_vec(), vec![])),
];
expect_multi_values(
results,
storage
.raw_batch_scan(
Context::default(),
"".to_string(),
ranges.clone(),
5,
true,
false,
)
.wait(),
);
let results = vec![
Some((b"a".to_vec(), b"aa".to_vec())),
Some((b"a1".to_vec(), b"aa11".to_vec())),
Some((b"a2".to_vec(), b"aa22".to_vec())),
Some((b"b".to_vec(), b"bb".to_vec())),
Some((b"b1".to_vec(), b"bb11".to_vec())),
Some((b"b2".to_vec(), b"bb22".to_vec())),
Some((b"c".to_vec(), b"cc".to_vec())),
Some((b"c1".to_vec(), b"cc11".to_vec())),
Some((b"c2".to_vec(), b"cc22".to_vec())),
];
expect_multi_values(
results,
storage
.raw_batch_scan(
Context::default(),
"".to_string(),
ranges.clone(),
3,
false,
false,
)
.wait(),
);
let results = vec![
Some((b"a".to_vec(), vec![])),
Some((b"a1".to_vec(), vec![])),
Some((b"a2".to_vec(), vec![])),
Some((b"b".to_vec(), vec![])),
Some((b"b1".to_vec(), vec![])),
Some((b"b2".to_vec(), vec![])),
Some((b"c".to_vec(), vec![])),
Some((b"c1".to_vec(), vec![])),
Some((b"c2".to_vec(), vec![])),
];
expect_multi_values(
results,
storage
.raw_batch_scan(Context::default(), "".to_string(), ranges, 3, true, false)
.wait(),
);
let results = vec![
Some((b"a2".to_vec(), b"aa22".to_vec())),
Some((b"a1".to_vec(), b"aa11".to_vec())),
Some((b"a".to_vec(), b"aa".to_vec())),
Some((b"b2".to_vec(), b"bb22".to_vec())),
Some((b"b1".to_vec(), b"bb11".to_vec())),
Some((b"b".to_vec(), b"bb".to_vec())),
Some((b"c2".to_vec(), b"cc22".to_vec())),
Some((b"c1".to_vec(), b"cc11".to_vec())),
Some((b"c".to_vec(), b"cc".to_vec())),
];
let ranges: Vec<KeyRange> = vec![
(b"a3".to_vec(), b"a".to_vec()),
(b"b3".to_vec(), b"b".to_vec()),
(b"c3".to_vec(), b"c".to_vec()),
]
.into_iter()
.map(|(s, e)| {
let mut range = KeyRange::default();
range.set_start_key(s);
range.set_end_key(e);
range
})
.collect();
expect_multi_values(
results,
storage
.raw_batch_scan(Context::default(), "".to_string(), ranges, 5, false, true)
.wait(),
);
let results = vec![
Some((b"c2".to_vec(), b"cc22".to_vec())),
Some((b"c1".to_vec(), b"cc11".to_vec())),
Some((b"b2".to_vec(), b"bb22".to_vec())),
Some((b"b1".to_vec(), b"bb11".to_vec())),
Some((b"a2".to_vec(), b"aa22".to_vec())),
Some((b"a1".to_vec(), b"aa11".to_vec())),
];
let ranges: Vec<KeyRange> = vec![b"c3".to_vec(), b"b3".to_vec(), b"a3".to_vec()]
.into_iter()
.map(|s| {
let mut range = KeyRange::default();
range.set_start_key(s);
range
})
.collect();
expect_multi_values(
results,
storage
.raw_batch_scan(Context::default(), "".to_string(), ranges, 2, false, true)
.wait(),
);
let results = vec![
Some((b"a2".to_vec(), vec![])),
Some((b"a1".to_vec(), vec![])),
Some((b"a".to_vec(), vec![])),
Some((b"b2".to_vec(), vec![])),
Some((b"b1".to_vec(), vec![])),
Some((b"b".to_vec(), vec![])),
Some((b"c2".to_vec(), vec![])),
Some((b"c1".to_vec(), vec![])),
Some((b"c".to_vec(), vec![])),
];
let ranges: Vec<KeyRange> = vec![
(b"a3".to_vec(), b"a".to_vec()),
(b"b3".to_vec(), b"b".to_vec()),
(b"c3".to_vec(), b"c".to_vec()),
]
.into_iter()
.map(|(s, e)| {
let mut range = KeyRange::default();
range.set_start_key(s);
range.set_end_key(e);
range
})
.collect();
expect_multi_values(
results,
storage
.raw_batch_scan(Context::default(), "".to_string(), ranges, 5, true, true)
.wait(),
);
}
#[test]
fn test_scan_lock() {
let storage = TestStorageBuilder::new().build().unwrap();
let (tx, rx) = channel();
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
vec![
Mutation::Put((Key::from_raw(b"x"), b"foo".to_vec())),
Mutation::Put((Key::from_raw(b"y"), b"foo".to_vec())),
Mutation::Put((Key::from_raw(b"z"), b"foo".to_vec())),
],
b"x".to_vec(),
100.into(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
storage
.sched_txn_command(
commands::Prewrite::new(
vec![
Mutation::Put((Key::from_raw(b"a"), b"foo".to_vec())),
Mutation::Put((Key::from_raw(b"b"), b"foo".to_vec())),
Mutation::Put((Key::from_raw(b"c"), b"foo".to_vec())),
],
b"c".to_vec(),
101.into(),
123,
false,
3,
TimeStamp::default(),
Context::default(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
let (lock_a, lock_b, lock_c, lock_x, lock_y, lock_z) = (
{
let mut lock = LockInfo::default();
lock.set_primary_lock(b"c".to_vec());
lock.set_lock_version(101);
lock.set_key(b"a".to_vec());
lock.set_lock_ttl(123);
lock.set_txn_size(3);
lock
},
{
let mut lock = LockInfo::default();
lock.set_primary_lock(b"c".to_vec());
lock.set_lock_version(101);
lock.set_key(b"b".to_vec());
lock.set_lock_ttl(123);
lock.set_txn_size(3);
lock
},
{
let mut lock = LockInfo::default();
lock.set_primary_lock(b"c".to_vec());
lock.set_lock_version(101);
lock.set_key(b"c".to_vec());
lock.set_lock_ttl(123);
lock.set_txn_size(3);
lock
},
{
let mut lock = LockInfo::default();
lock.set_primary_lock(b"x".to_vec());
lock.set_lock_version(100);
lock.set_key(b"x".to_vec());
lock
},
{
let mut lock = LockInfo::default();
lock.set_primary_lock(b"x".to_vec());
lock.set_lock_version(100);
lock.set_key(b"y".to_vec());
lock
},
{
let mut lock = LockInfo::default();
lock.set_primary_lock(b"x".to_vec());
lock.set_lock_version(100);
lock.set_key(b"z".to_vec());
lock
},
);
storage
.sched_txn_command(
commands::ScanLock::new(99.into(), None, 10, Context::default()),
expect_value_callback(tx.clone(), 0, vec![]),
)
.unwrap();
rx.recv().unwrap();
storage
.sched_txn_command(
commands::ScanLock::new(100.into(), None, 10, Context::default()),
expect_value_callback(
tx.clone(),
0,
vec![lock_x.clone(), lock_y.clone(), lock_z.clone()],
),
)
.unwrap();
rx.recv().unwrap();
storage
.sched_txn_command(
commands::ScanLock::new(
100.into(),
Some(Key::from_raw(b"a")),
10,
Context::default(),
),
expect_value_callback(
tx.clone(),
0,
vec![lock_x.clone(), lock_y.clone(), lock_z.clone()],
),
)
.unwrap();
rx.recv().unwrap();
storage
.sched_txn_command(
commands::ScanLock::new(
100.into(),
Some(Key::from_raw(b"y")),
10,
Context::default(),
),
expect_value_callback(tx.clone(), 0, vec![lock_y.clone(), lock_z.clone()]),
)
.unwrap();
rx.recv().unwrap();
storage
.sched_txn_command(
commands::ScanLock::new(101.into(), None, 10, Context::default()),
expect_value_callback(
tx.clone(),
0,
vec![
lock_a.clone(),
lock_b.clone(),
lock_c.clone(),
lock_x.clone(),
lock_y.clone(),
lock_z.clone(),
],
),
)
.unwrap();
rx.recv().unwrap();
storage
.sched_txn_command(
commands::ScanLock::new(101.into(), None, 4, Context::default()),
expect_value_callback(
tx.clone(),
0,
vec![lock_a, lock_b.clone(), lock_c.clone(), lock_x.clone()],
),
)
.unwrap();
rx.recv().unwrap();
storage
.sched_txn_command(
commands::ScanLock::new(
101.into(),
Some(Key::from_raw(b"b")),
4,
Context::default(),
),
expect_value_callback(
tx.clone(),
0,
vec![
lock_b.clone(),
lock_c.clone(),
lock_x.clone(),
lock_y.clone(),
],
),
)
.unwrap();
rx.recv().unwrap();
storage
.sched_txn_command(
commands::ScanLock::new(
101.into(),
Some(Key::from_raw(b"b")),
0,
Context::default(),
),
expect_value_callback(tx, 0, vec![lock_b, lock_c, lock_x, lock_y, lock_z]),
)
.unwrap();
rx.recv().unwrap();
}
#[test]
fn test_resolve_lock() {
use crate::storage::txn::RESOLVE_LOCK_BATCH_SIZE;
let storage = TestStorageBuilder::new().build().unwrap();
let (tx, rx) = channel();
// These locks (transaction ts=99) are not going to be resolved.
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
vec![
Mutation::Put((Key::from_raw(b"a"), b"foo".to_vec())),
Mutation::Put((Key::from_raw(b"b"), b"foo".to_vec())),
Mutation::Put((Key::from_raw(b"c"), b"foo".to_vec())),
],
b"c".to_vec(),
99.into(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
let (lock_a, lock_b, lock_c) = (
{
let mut lock = LockInfo::default();
lock.set_primary_lock(b"c".to_vec());
lock.set_lock_version(99);
lock.set_key(b"a".to_vec());
lock
},
{
let mut lock = LockInfo::default();
lock.set_primary_lock(b"c".to_vec());
lock.set_lock_version(99);
lock.set_key(b"b".to_vec());
lock
},
{
let mut lock = LockInfo::default();
lock.set_primary_lock(b"c".to_vec());
lock.set_lock_version(99);
lock.set_key(b"c".to_vec());
lock
},
);
// We should be able to resolve all locks for transaction ts=100 when there are this
// many locks.
let scanned_locks_coll = vec![
1,
RESOLVE_LOCK_BATCH_SIZE,
RESOLVE_LOCK_BATCH_SIZE - 1,
RESOLVE_LOCK_BATCH_SIZE + 1,
RESOLVE_LOCK_BATCH_SIZE * 2,
RESOLVE_LOCK_BATCH_SIZE * 2 - 1,
RESOLVE_LOCK_BATCH_SIZE * 2 + 1,
];
let is_rollback_coll = vec![
false, // commit
true, // rollback
];
let mut ts = 100.into();
for scanned_locks in scanned_locks_coll {
for is_rollback in &is_rollback_coll {
let mut mutations = vec![];
for i in 0..scanned_locks {
mutations.push(Mutation::Put((
Key::from_raw(format!("x{:08}", i).as_bytes()),
b"foo".to_vec(),
)));
}
storage
.sched_txn_command(
commands::Prewrite::with_defaults(mutations, b"x".to_vec(), ts),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
let mut txn_status = HashMap::default();
txn_status.insert(
ts,
if *is_rollback {
TimeStamp::zero() // rollback
} else {
(ts.into_inner() + 5).into() // commit, commit_ts = start_ts + 5
},
);
storage
.sched_txn_command(
commands::ResolveLock::new(txn_status, None, vec![], Context::default()),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
// All locks should be resolved except for a, b and c.
storage
.sched_txn_command(
commands::ScanLock::new(ts, None, 0, Context::default()),
expect_value_callback(
tx.clone(),
0,
vec![lock_a.clone(), lock_b.clone(), lock_c.clone()],
),
)
.unwrap();
rx.recv().unwrap();
ts = (ts.into_inner() + 10).into();
}
}
}
#[test]
fn test_resolve_lock_lite() {
let storage = TestStorageBuilder::new().build().unwrap();
let (tx, rx) = channel();
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
vec![
Mutation::Put((Key::from_raw(b"a"), b"foo".to_vec())),
Mutation::Put((Key::from_raw(b"b"), b"foo".to_vec())),
Mutation::Put((Key::from_raw(b"c"), b"foo".to_vec())),
],
b"c".to_vec(),
99.into(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
// Rollback key 'b' and key 'c' and left key 'a' still locked.
let resolve_keys = vec![Key::from_raw(b"b"), Key::from_raw(b"c")];
storage
.sched_txn_command(
commands::ResolveLockLite::new(
99.into(),
TimeStamp::zero(),
resolve_keys,
Context::default(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
// Check lock for key 'a'.
let lock_a = {
let mut lock = LockInfo::default();
lock.set_primary_lock(b"c".to_vec());
lock.set_lock_version(99);
lock.set_key(b"a".to_vec());
lock
};
storage
.sched_txn_command(
commands::ScanLock::new(99.into(), None, 0, Context::default()),
expect_value_callback(tx.clone(), 0, vec![lock_a]),
)
.unwrap();
rx.recv().unwrap();
// Resolve lock for key 'a'.
storage
.sched_txn_command(
commands::ResolveLockLite::new(
99.into(),
TimeStamp::zero(),
vec![Key::from_raw(b"a")],
Context::default(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
vec![
Mutation::Put((Key::from_raw(b"a"), b"foo".to_vec())),
Mutation::Put((Key::from_raw(b"b"), b"foo".to_vec())),
Mutation::Put((Key::from_raw(b"c"), b"foo".to_vec())),
],
b"c".to_vec(),
101.into(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
// Commit key 'b' and key 'c' and left key 'a' still locked.
let resolve_keys = vec![Key::from_raw(b"b"), Key::from_raw(b"c")];
storage
.sched_txn_command(
commands::ResolveLockLite::new(
101.into(),
102.into(),
resolve_keys,
Context::default(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
// Check lock for key 'a'.
let lock_a = {
let mut lock = LockInfo::default();
lock.set_primary_lock(b"c".to_vec());
lock.set_lock_version(101);
lock.set_key(b"a".to_vec());
lock
};
storage
.sched_txn_command(
commands::ScanLock::new(101.into(), None, 0, Context::default()),
expect_value_callback(tx, 0, vec![lock_a]),
)
.unwrap();
rx.recv().unwrap();
}
#[test]
fn test_txn_heart_beat() {
let storage = TestStorageBuilder::new().build().unwrap();
let (tx, rx) = channel();
let k = Key::from_raw(b"k");
let v = b"v".to_vec();
let uncommitted = TxnStatus::uncommitted;
// No lock.
storage
.sched_txn_command(
commands::TxnHeartBeat::new(k.clone(), 10.into(), 100, Context::default()),
expect_fail_callback(tx.clone(), 0, |e| match e {
Error(box ErrorInner::Txn(TxnError(box TxnErrorInner::Mvcc(mvcc::Error(
box mvcc::ErrorInner::TxnLockNotFound { .. },
))))) => (),
e => panic!("unexpected error chain: {:?}", e),
}),
)
.unwrap();
rx.recv().unwrap();
storage
.sched_txn_command(
commands::Prewrite::with_lock_ttl(
vec![Mutation::Put((k.clone(), v))],
k.as_encoded().to_vec(),
10.into(),
100,
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
// `advise_ttl` = 90, which is less than current ttl 100. The lock's ttl will remains 100.
storage
.sched_txn_command(
commands::TxnHeartBeat::new(k.clone(), 10.into(), 90, Context::default()),
expect_value_callback(tx.clone(), 0, uncommitted(100, TimeStamp::zero())),
)
.unwrap();
rx.recv().unwrap();
// `advise_ttl` = 110, which is greater than current ttl. The lock's ttl will be updated to
// 110.
storage
.sched_txn_command(
commands::TxnHeartBeat::new(k.clone(), 10.into(), 110, Context::default()),
expect_value_callback(tx.clone(), 0, uncommitted(110, TimeStamp::zero())),
)
.unwrap();
rx.recv().unwrap();
// Lock not match. Nothing happens except throwing an error.
storage
.sched_txn_command(
commands::TxnHeartBeat::new(k, 11.into(), 150, Context::default()),
expect_fail_callback(tx, 0, |e| match e {
Error(box ErrorInner::Txn(TxnError(box TxnErrorInner::Mvcc(mvcc::Error(
box mvcc::ErrorInner::TxnLockNotFound { .. },
))))) => (),
e => panic!("unexpected error chain: {:?}", e),
}),
)
.unwrap();
rx.recv().unwrap();
}
#[test]
fn test_check_txn_status() {
let storage = TestStorageBuilder::new().build().unwrap();
let (tx, rx) = channel();
let k = Key::from_raw(b"k");
let v = b"b".to_vec();
let ts = TimeStamp::compose;
use TxnStatus::*;
let uncommitted = TxnStatus::uncommitted;
let committed = TxnStatus::committed;
// No lock and no commit info. Gets an error.
storage
.sched_txn_command(
commands::CheckTxnStatus::new(
k.clone(),
ts(9, 0),
ts(9, 1),
ts(9, 1),
false,
Context::default(),
),
expect_fail_callback(tx.clone(), 0, |e| match e {
Error(box ErrorInner::Txn(TxnError(box TxnErrorInner::Mvcc(mvcc::Error(
box mvcc::ErrorInner::TxnNotFound { .. },
))))) => (),
e => panic!("unexpected error chain: {:?}", e),
}),
)
.unwrap();
rx.recv().unwrap();
// No lock and no commit info. If specified rollback_if_not_exist, the key will be rolled
// back.
storage
.sched_txn_command(
commands::CheckTxnStatus::new(
k.clone(),
ts(9, 0),
ts(9, 1),
ts(9, 1),
true,
Context::default(),
),
expect_value_callback(tx.clone(), 0, LockNotExist),
)
.unwrap();
rx.recv().unwrap();
// A rollback will be written, so an later-arriving prewrite will fail.
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
vec![Mutation::Put((k.clone(), v.clone()))],
k.as_encoded().to_vec(),
ts(9, 0),
),
expect_fail_callback(tx.clone(), 0, |e| match e {
Error(box ErrorInner::Txn(TxnError(box TxnErrorInner::Mvcc(mvcc::Error(
box mvcc::ErrorInner::WriteConflict { .. },
))))) => (),
e => panic!("unexpected error chain: {:?}", e),
}),
)
.unwrap();
rx.recv().unwrap();
storage
.sched_txn_command(
commands::Prewrite::with_lock_ttl(
vec![Mutation::Put((k.clone(), v.clone()))],
k.as_encoded().to_vec(),
ts(10, 0),
100,
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
// If lock exists and not expired, returns the lock's TTL.
storage
.sched_txn_command(
commands::CheckTxnStatus::new(
k.clone(),
ts(10, 0),
ts(12, 0),
ts(15, 0),
true,
Context::default(),
),
expect_value_callback(tx.clone(), 0, uncommitted(100, TimeStamp::zero())),
)
.unwrap();
rx.recv().unwrap();
// TODO: Check the lock's min_commit_ts field.
storage
.sched_txn_command(
commands::Commit::new(vec![k.clone()], ts(10, 0), ts(20, 0), Context::default()),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
// If the transaction is committed, returns the commit_ts.
storage
.sched_txn_command(
commands::CheckTxnStatus::new(
k.clone(),
ts(10, 0),
ts(12, 0),
ts(15, 0),
true,
Context::default(),
),
expect_value_callback(tx.clone(), 0, committed(ts(20, 0))),
)
.unwrap();
rx.recv().unwrap();
storage
.sched_txn_command(
commands::Prewrite::with_lock_ttl(
vec![Mutation::Put((k.clone(), v))],
k.as_encoded().to_vec(),
ts(25, 0),
100,
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
// If the lock has expired, cleanup it.
storage
.sched_txn_command(
commands::CheckTxnStatus::new(
k.clone(),
ts(25, 0),
ts(126, 0),
ts(127, 0),
true,
Context::default(),
),
expect_value_callback(tx.clone(), 0, TtlExpire),
)
.unwrap();
rx.recv().unwrap();
storage
.sched_txn_command(
commands::Commit::new(vec![k], ts(25, 0), ts(28, 0), Context::default()),
expect_fail_callback(tx, 0, |e| match e {
Error(box ErrorInner::Txn(TxnError(box TxnErrorInner::Mvcc(mvcc::Error(
box mvcc::ErrorInner::TxnLockNotFound { .. },
))))) => (),
e => panic!("unexpected error chain: {:?}", e),
}),
)
.unwrap();
rx.recv().unwrap();
}
#[test]
fn test_construct_point_get_command_from_get_request() {
let mut context = Context::default();
context.set_region_id(1);
let raw_key = b"raw_key".to_vec();
let version = 10;
let mut req = GetRequest::default();
req.set_context(context.clone());
req.set_key(raw_key.clone());
req.set_version(version);
let cmd = PointGetCommand::from_get(&mut req);
assert_eq!(cmd.ctx, context);
assert_eq!(cmd.key, Key::from_raw(&raw_key));
assert_eq!(cmd.ts, Some(TimeStamp::new(version)));
}
#[test]
fn test_construct_point_get_command_from_raw_get_request() {
let mut context = Context::default();
context.set_region_id(1);
let raw_key = b"raw_key".to_vec();
let mut req = RawGetRequest::default();
req.set_context(context.clone());
req.set_key(raw_key.clone());
let cmd = PointGetCommand::from_raw_get(&mut req);
assert_eq!(cmd.ctx, context);
assert_eq!(cmd.key.into_encoded(), raw_key);
assert_eq!(cmd.ts, None);
}
fn test_pessimistic_lock_impl(pipelined_pessimistic_lock: bool) {
let storage = TestStorageBuilder::new()
.set_lock_mgr(DummyLockManager {})
.set_pipelined_pessimistic_lock(pipelined_pessimistic_lock)
.build()
.unwrap();
let (tx, rx) = channel();
let (key, val) = (Key::from_raw(b"key"), b"val".to_vec());
let (key2, val2) = (Key::from_raw(b"key2"), b"val2".to_vec());
// Key not exist
for &return_values in &[false, true] {
let pessimistic_lock_res = if return_values {
PessimisticLockRes::Values(vec![None])
} else {
PessimisticLockRes::Empty
};
storage
.sched_txn_command(
new_acquire_pessimistic_lock_command(
vec![(key.clone(), false)],
10,
10,
return_values,
),
expect_pessimistic_lock_res_callback(tx.clone(), pessimistic_lock_res.clone()),
)
.unwrap();
rx.recv().unwrap();
// Duplicated command
storage
.sched_txn_command(
new_acquire_pessimistic_lock_command(
vec![(key.clone(), false)],
10,
10,
return_values,
),
expect_pessimistic_lock_res_callback(tx.clone(), pessimistic_lock_res.clone()),
)
.unwrap();
rx.recv().unwrap();
delete_pessimistic_lock(&storage, key.clone(), 10, 10);
}
storage
.sched_txn_command(
new_acquire_pessimistic_lock_command(vec![(key.clone(), false)], 10, 10, false),
expect_pessimistic_lock_res_callback(tx.clone(), PessimisticLockRes::Empty),
)
.unwrap();
rx.recv().unwrap();
// KeyIsLocked
for &return_values in &[false, true] {
storage
.sched_txn_command(
new_acquire_pessimistic_lock_command(
vec![(key.clone(), false)],
20,
20,
return_values,
),
expect_fail_callback(tx.clone(), 0, |e| match e {
Error(box ErrorInner::Txn(TxnError(box TxnErrorInner::Mvcc(
mvcc::Error(box mvcc::ErrorInner::KeyIsLocked(_)),
)))) => (),
e => panic!("unexpected error chain: {:?}", e),
}),
)
.unwrap();
// The DummyLockManager consumes the Msg::WaitForLock.
rx.recv_timeout(Duration::from_millis(100)).unwrap_err();
}
// Put key and key2.
storage
.sched_txn_command(
commands::PrewritePessimistic::new(
vec![
(Mutation::Put((key.clone(), val.clone())), true),
(Mutation::Put((key2.clone(), val2.clone())), false),
],
key.to_raw().unwrap(),
10.into(),
3000,
10.into(),
1,
TimeStamp::zero(),
Context::default(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
storage
.sched_txn_command(
commands::Commit::new(
vec![key.clone(), key2.clone()],
10.into(),
20.into(),
Context::default(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
// WriteConflict
for &return_values in &[false, true] {
storage
.sched_txn_command(
new_acquire_pessimistic_lock_command(
vec![(key.clone(), false)],
15,
15,
return_values,
),
expect_fail_callback(tx.clone(), 0, |e| match e {
Error(box ErrorInner::Txn(TxnError(box TxnErrorInner::Mvcc(
mvcc::Error(box mvcc::ErrorInner::WriteConflict { .. }),
)))) => (),
e => panic!("unexpected error chain: {:?}", e),
}),
)
.unwrap();
rx.recv().unwrap();
}
// Return multiple values
for &return_values in &[false, true] {
let pessimistic_lock_res = if return_values {
PessimisticLockRes::Values(vec![Some(val.clone()), Some(val2.clone()), None])
} else {
PessimisticLockRes::Empty
};
storage
.sched_txn_command(
new_acquire_pessimistic_lock_command(
vec![
(key.clone(), false),
(key2.clone(), false),
(Key::from_raw(b"key3"), false),
],
30,
30,
return_values,
),
expect_pessimistic_lock_res_callback(tx.clone(), pessimistic_lock_res),
)
.unwrap();
rx.recv().unwrap();
delete_pessimistic_lock(&storage, key.clone(), 30, 30);
}
}
#[test]
fn test_pessimistic_lock() {
test_pessimistic_lock_impl(false);
test_pessimistic_lock_impl(true);
}
pub enum Msg {
WaitFor {
start_ts: TimeStamp,
cb: StorageCallback,
pr: ProcessResult,
lock: Lock,
is_first_lock: bool,
timeout: Option<WaitTimeout>,
},
WakeUp {
lock_ts: TimeStamp,
hashes: Vec<u64>,
commit_ts: TimeStamp,
is_pessimistic_txn: bool,
},
}
// `ProxyLockMgr` sends all msgs it received to `Sender`.
// It's used to check whether we send right messages to lock manager.
#[derive(Clone)]
pub struct ProxyLockMgr {
tx: Sender<Msg>,
has_waiter: Arc<AtomicBool>,
}
impl ProxyLockMgr {
pub fn new(tx: Sender<Msg>) -> Self {
Self {
tx,
has_waiter: Arc::new(AtomicBool::new(false)),
}
}
pub fn set_has_waiter(&mut self, has_waiter: bool) {
self.has_waiter.store(has_waiter, Ordering::Relaxed);
}
}
impl LockManager for ProxyLockMgr {
fn wait_for(
&self,
start_ts: TimeStamp,
cb: StorageCallback,
pr: ProcessResult,
lock: Lock,
is_first_lock: bool,
timeout: Option<WaitTimeout>,
) {
self.tx
.send(Msg::WaitFor {
start_ts,
cb,
pr,
lock,
is_first_lock,
timeout,
})
.unwrap();
}
fn wake_up(
&self,
lock_ts: TimeStamp,
hashes: Vec<u64>,
commit_ts: TimeStamp,
is_pessimistic_txn: bool,
) {
self.tx
.send(Msg::WakeUp {
lock_ts,
hashes,
commit_ts,
is_pessimistic_txn,
})
.unwrap();
}
fn has_waiter(&self) -> bool {
self.has_waiter.load(Ordering::Relaxed)
}
}
// Test whether `Storage` sends right wait-for-lock msgs to `LockManager`.
#[test]
fn validate_wait_for_lock_msg() {
let (msg_tx, msg_rx) = channel();
let storage = TestStorageBuilder::from_engine(TestEngineBuilder::new().build().unwrap())
.set_lock_mgr(ProxyLockMgr::new(msg_tx))
.build()
.unwrap();
let (k, v) = (b"k".to_vec(), b"v".to_vec());
let (tx, rx) = channel();
// Write lock-k.
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
vec![Mutation::Put((Key::from_raw(&k), v))],
k.clone(),
10.into(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
// No wait for msg
assert!(msg_rx.try_recv().is_err());
// Meet lock-k.
storage
.sched_txn_command(
commands::AcquirePessimisticLock::new(
vec![(Key::from_raw(b"foo"), false), (Key::from_raw(&k), false)],
k.clone(),
20.into(),
3000,
true,
20.into(),
Some(WaitTimeout::Millis(100)),
false,
21.into(),
Context::default(),
),
expect_ok_callback(tx, 0),
)
.unwrap();
// The transaction should be waiting for lock released so cb won't be called.
rx.recv_timeout(Duration::from_millis(500)).unwrap_err();
let msg = msg_rx.try_recv().unwrap();
// Check msg validation.
match msg {
Msg::WaitFor {
start_ts,
pr,
lock,
is_first_lock,
timeout,
..
} => {
assert_eq!(start_ts, TimeStamp::new(20));
assert_eq!(
lock,
Lock {
ts: 10.into(),
hash: Key::from_raw(&k).gen_hash(),
}
);
assert_eq!(is_first_lock, true);
assert_eq!(timeout, Some(WaitTimeout::Millis(100)));
match pr {
ProcessResult::PessimisticLockRes { res } => match res {
Err(Error(box ErrorInner::Txn(TxnError(box TxnErrorInner::Mvcc(
MvccError(box MvccErrorInner::KeyIsLocked(info)),
))))) => {
assert_eq!(info.get_key(), k.as_slice());
assert_eq!(info.get_primary_lock(), k.as_slice());
assert_eq!(info.get_lock_version(), 10);
}
_ => panic!("unexpected error"),
},
_ => panic!("unexpected process result"),
};
}
_ => panic!("unexpected msg"),
}
}
// Test whether `Storage` sends right wake-up msgs to `LockManager`
#[test]
fn validate_wake_up_msg() {
fn assert_wake_up_msg_eq(
msg: Msg,
expected_lock_ts: TimeStamp,
expected_hashes: Vec<u64>,
expected_commit_ts: TimeStamp,
expected_is_pessimistic_txn: bool,
) {
match msg {
Msg::WakeUp {
lock_ts,
hashes,
commit_ts,
is_pessimistic_txn,
} => {
assert_eq!(lock_ts, expected_lock_ts);
assert_eq!(hashes, expected_hashes);
assert_eq!(commit_ts, expected_commit_ts);
assert_eq!(is_pessimistic_txn, expected_is_pessimistic_txn);
}
_ => panic!("unexpected msg"),
}
}
let (msg_tx, msg_rx) = channel();
let mut lock_mgr = ProxyLockMgr::new(msg_tx);
lock_mgr.set_has_waiter(true);
let storage = TestStorageBuilder::from_engine(TestEngineBuilder::new().build().unwrap())
.set_lock_mgr(lock_mgr)
.build()
.unwrap();
let (tx, rx) = channel();
let prewrite_locks = |keys: &[Key], ts: TimeStamp| {
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
keys.iter()
.map(|k| Mutation::Put((k.clone(), b"v".to_vec())))
.collect(),
keys[0].to_raw().unwrap(),
ts,
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
};
let acquire_pessimistic_locks = |keys: &[Key], ts: TimeStamp| {
storage
.sched_txn_command(
new_acquire_pessimistic_lock_command(
keys.iter().map(|k| (k.clone(), false)).collect(),
ts,
ts,
false,
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
};
let keys = vec![
Key::from_raw(b"a"),
Key::from_raw(b"b"),
Key::from_raw(b"c"),
];
let key_hashes: Vec<u64> = keys.iter().map(|k| k.gen_hash()).collect();
// Commit
prewrite_locks(&keys, 10.into());
// If locks don't exsit, hashes of released locks should be empty.
for empty_hashes in &[false, true] {
storage
.sched_txn_command(
commands::Commit::new(keys.clone(), 10.into(), 20.into(), Context::default()),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
let msg = msg_rx.recv().unwrap();
let hashes = if *empty_hashes {
Vec::new()
} else {
key_hashes.clone()
};
assert_wake_up_msg_eq(msg, 10.into(), hashes, 20.into(), false);
}
// Cleanup
for pessimistic in &[false, true] {
let mut ts = TimeStamp::new(30);
if *pessimistic {
ts.incr();
acquire_pessimistic_locks(&keys[..1], ts);
} else {
prewrite_locks(&keys[..1], ts);
}
for empty_hashes in &[false, true] {
storage
.sched_txn_command(
commands::Cleanup::new(
keys[0].clone(),
ts,
TimeStamp::max(),
Context::default(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
let msg = msg_rx.recv().unwrap();
let (hashes, pessimistic) = if *empty_hashes {
(Vec::new(), false)
} else {
(key_hashes[..1].to_vec(), *pessimistic)
};
assert_wake_up_msg_eq(msg, ts, hashes, 0.into(), pessimistic);
}
}
// Rollback
for pessimistic in &[false, true] {
let mut ts = TimeStamp::new(40);
if *pessimistic {
ts.incr();
acquire_pessimistic_locks(&keys, ts);
} else {
prewrite_locks(&keys, ts);
}
for empty_hashes in &[false, true] {
storage
.sched_txn_command(
commands::Rollback::new(keys.clone(), ts, Context::default()),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
let msg = msg_rx.recv().unwrap();
let (hashes, pessimistic) = if *empty_hashes {
(Vec::new(), false)
} else {
(key_hashes.clone(), *pessimistic)
};
assert_wake_up_msg_eq(msg, ts, hashes, 0.into(), pessimistic);
}
}
// PessimisticRollback
acquire_pessimistic_locks(&keys, 50.into());
for empty_hashes in &[false, true] {
storage
.sched_txn_command(
commands::PessimisticRollback::new(
keys.clone(),
50.into(),
50.into(),
Context::default(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
let msg = msg_rx.recv().unwrap();
let (hashes, pessimistic) = if *empty_hashes {
(Vec::new(), false)
} else {
(key_hashes.clone(), true)
};
assert_wake_up_msg_eq(msg, 50.into(), hashes, 0.into(), pessimistic);
}
// ResolveLockLite
for commit in &[false, true] {
let mut start_ts = TimeStamp::new(60);
let commit_ts = if *commit {
start_ts.incr();
start_ts.next()
} else {
TimeStamp::zero()
};
prewrite_locks(&keys, start_ts);
for empty_hashes in &[false, true] {
storage
.sched_txn_command(
commands::ResolveLockLite::new(
start_ts,
commit_ts,
keys.clone(),
Context::default(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
let msg = msg_rx.recv().unwrap();
let hashes = if *empty_hashes {
Vec::new()
} else {
key_hashes.clone()
};
assert_wake_up_msg_eq(msg, start_ts, hashes, commit_ts, false);
}
}
// ResolveLock
let mut txn_status = HashMap::default();
acquire_pessimistic_locks(&keys, 70.into());
// Rollback start_ts=70
txn_status.insert(TimeStamp::new(70), TimeStamp::zero());
let committed_keys = vec![
Key::from_raw(b"d"),
Key::from_raw(b"e"),
Key::from_raw(b"f"),
];
let committed_key_hashes: Vec<u64> = committed_keys.iter().map(|k| k.gen_hash()).collect();
// Commit start_ts=75
prewrite_locks(&committed_keys, 75.into());
txn_status.insert(TimeStamp::new(75), TimeStamp::new(76));
storage
.sched_txn_command(
commands::ResolveLock::new(txn_status, None, vec![], Context::default()),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
let mut msg1 = msg_rx.recv().unwrap();
let mut msg2 = msg_rx.recv().unwrap();
match msg1 {
Msg::WakeUp { lock_ts, .. } => {
if lock_ts != TimeStamp::new(70) {
// Let msg1 be the msg of rolled back transaction.
std::mem::swap(&mut msg1, &mut msg2);
}
assert_wake_up_msg_eq(msg1, 70.into(), key_hashes, 0.into(), true);
assert_wake_up_msg_eq(msg2, 75.into(), committed_key_hashes, 76.into(), false);
}
_ => panic!("unexpect msg"),
}
// CheckTxnStatus
let key = Key::from_raw(b"k");
let start_ts = TimeStamp::compose(100, 0);
storage
.sched_txn_command(
commands::Prewrite::with_lock_ttl(
vec![Mutation::Put((key.clone(), b"v".to_vec()))],
key.to_raw().unwrap(),
start_ts,
100,
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
// Not expire
storage
.sched_txn_command(
commands::CheckTxnStatus::new(
key.clone(),
start_ts,
TimeStamp::compose(110, 0),
TimeStamp::compose(150, 0),
false,
Context::default(),
),
expect_value_callback(tx.clone(), 0, TxnStatus::uncommitted(100, 0.into())),
)
.unwrap();
rx.recv().unwrap();
// No msg
assert!(msg_rx.try_recv().is_err());
// Expired
storage
.sched_txn_command(
commands::CheckTxnStatus::new(
key.clone(),
start_ts,
TimeStamp::compose(110, 0),
TimeStamp::compose(201, 0),
false,
Context::default(),
),
expect_value_callback(tx.clone(), 0, TxnStatus::TtlExpire),
)
.unwrap();
rx.recv().unwrap();
assert_wake_up_msg_eq(
msg_rx.recv().unwrap(),
start_ts,
vec![key.gen_hash()],
0.into(),
false,
);
}
}
| 33.913278 | 109 | 0.428049 |
26c224100b3f9ac6ae4144ff473db1c639ab9cca | 6,191 | // Copyright 2020 Hyperledger Ursa Contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![deny(
missing_docs,
trivial_casts,
trivial_numeric_casts,
unconditional_recursion,
unused_import_braces,
unused_lifetimes,
unused_qualifications,
unused_extern_crates,
unused_parens,
while_true
)]
//! Implements Shamir's simple secret sharing scheme.
//! Also provides an implementation of verifiable secret sharing as described by:
//! Feldman (see <https://www.cs.umd.edu/~gasarch/TOPICS/secretsharing/feldmanVSS.pdf>
//! and Pedersen
//! (see <https://www.cs.cornell.edu/courses/cs754/2001fa/129.PDF>)
//!
//! Feldman and Pedersen are similar in many ways. It's hard to describe when to use
//! one over the other. Indeed both are used in
//! <http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.134.6445&rep=rep1&type=pdf>.
//!
//! Feldman reveals the public value of the verifier whereas Pedersen's hides it.
//!
//! FUTURE: Adept secret sharing as described by Phillip Rogaway
//! (see <https://eprint.iacr.org/2020/800>
//!
//! Future work would be to use reed-solomon
//! codes to check for corrupted shares.
#![cfg_attr(feature = "nightly", feature(doc_cfg))]
pub use generic_array::{self, typenum};
use error::{SharingError, SharingResult};
use generic_array::{ArrayLength, GenericArray};
use rand::{CryptoRng, RngCore};
use std::{convert::TryFrom, marker::PhantomData};
/// Represents a prime-order cyclic group where the exponent is a finite field.
/// `Exp` is the type used for the finite field operations
pub trait Group<Exp: ?Sized = Self> {
/// The group size in bytes
type Size: ArrayLength<u8>;
/// Return the zero element of the group, the additive identity
fn zero() -> Self;
/// Return the group element from the specified bytes
fn from_bytes<B: AsRef<[u8]>>(value: B) -> SharingResult<Self>
where
Self: Sized;
/// Return the element chosen uniformly at random using the user-provided RNG
fn random(rng: &mut (impl RngCore + CryptoRng)) -> Self;
/// True iff this element is zero
fn is_zero(&self) -> bool;
/// True iff this element is not zero and less than the modulus
fn is_valid(&self) -> bool;
/// Field negation
fn negate(&mut self);
/// Add another element to this element
fn add_assign(&mut self, rhs: &Self);
/// Subtract another element from this element
fn sub_assign(&mut self, rhs: &Self);
/// Perform a scalar multiplication (exponentiation if the group is in multiplicative form)
fn scalar_mul_assign(&mut self, rhs: &Exp);
/// Serialize this element to bytes
fn to_bytes(&self) -> GenericArray<u8, Self::Size>;
}
/// Represents the finite field methods used by Sharing Schemes
pub trait Field: Group {
/// Return the one element of the field, the multiplicative identity
fn one() -> Self;
/// Return the element from the given number
fn from_usize(value: usize) -> Self;
/// Multiply the inverse of another element with this element
fn scalar_div_assign(&mut self, rhs: &Self);
}
/// The polynomial used for generating the shares
#[derive(Debug)]
pub(crate) struct Polynomial<S: Field> {
pub(crate) coefficients: Vec<S>,
}
impl<S: Field> Polynomial<S> {
/// Construct a random polynomial of the specified degree using a specified intercept
pub fn new(rng: &mut (impl RngCore + CryptoRng), intercept: &S, degree: usize) -> Self {
let mut coefficients = Vec::with_capacity(degree);
// Ensure intercept is set
let mut i = S::zero();
i.add_assign(intercept);
coefficients.push(i);
// Assign random coefficients to polynomial
// Start at 1 since 0 is the intercept and not chosen at random
for _ in 1..degree {
coefficients.push(S::random(rng));
}
Self { coefficients }
}
/// Compute the value of the polynomial for the given `x`
pub fn evaluate(&self, x: &S) -> S {
// Compute the polynomial value using Horner's Method
let degree = self.coefficients.len() - 1;
// b_n = a_n
let mut out = S::zero();
out.add_assign(&self.coefficients[degree]);
for i in (0..degree).rev() {
// b_{n-1} = a_{n-1} + b_n*x
out.scalar_mul_assign(x);
out.add_assign(&self.coefficients[i]);
}
out
}
}
/// A share verifier is used to provide integrity checking of shamir shares
#[derive(Debug)]
pub struct ShareVerifier<S: Field, R: Group<S>> {
pub(crate) value: R,
pub(crate) phantom: PhantomData<S>,
}
impl<S: Field, R: Group<S>> ShareVerifier<S, R> {
/// Serialize the share verifier commitment to a byte array
pub fn to_bytes(&self) -> GenericArray<u8, R::Size> {
self.value.to_bytes()
}
}
impl<S: Field, R: Group<S>> TryFrom<&[u8]> for ShareVerifier<S, R> {
type Error = SharingError;
fn try_from(value: &[u8]) -> SharingResult<Self> {
Ok(Self {
value: R::from_bytes(value)?,
phantom: PhantomData,
})
}
}
impl<S: Field, R: Group<S>> Clone for ShareVerifier<S, R> {
fn clone(&self) -> Self {
Self {
value: R::from_bytes(&self.value.to_bytes()).unwrap(),
phantom: PhantomData,
}
}
}
/// Sharing Errors and Results
pub mod error;
/// Feldman's verifiable secret sharing scheme
pub mod feldman;
/// Pedersen's verifiable secret sharing scheme
pub mod pedersen;
/// Shamir secret sharing scheme
pub mod shamir;
/// Provide a suite of tests for implementers to run for their implementations
#[cfg(feature = "impl_tests")]
pub mod tests;
| 34.394444 | 95 | 0.667259 |
6195d31660f07b5e3b267d3416dc50dda3f4ca80 | 6,410 | #[doc = "Reader of register CPO"]
pub type R = crate::R<u32, super::CPO>;
#[doc = "Writer for register CPO"]
pub type W = crate::W<u32, super::CPO>;
#[doc = "Register CPO `reset()`'s with value 0"]
impl crate::ResetValue for super::CPO {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Compute Operation request\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum CPOREQ_A {
#[doc = "0: Request is cleared."]
_0 = 0,
#[doc = "1: Request Compute Operation."]
_1 = 1,
}
impl From<CPOREQ_A> for bool {
#[inline(always)]
fn from(variant: CPOREQ_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `CPOREQ`"]
pub type CPOREQ_R = crate::R<bool, CPOREQ_A>;
impl CPOREQ_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> CPOREQ_A {
match self.bits {
false => CPOREQ_A::_0,
true => CPOREQ_A::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline(always)]
pub fn is_0(&self) -> bool {
*self == CPOREQ_A::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline(always)]
pub fn is_1(&self) -> bool {
*self == CPOREQ_A::_1
}
}
#[doc = "Write proxy for field `CPOREQ`"]
pub struct CPOREQ_W<'a> {
w: &'a mut W,
}
impl<'a> CPOREQ_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: CPOREQ_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Request is cleared."]
#[inline(always)]
pub fn _0(self) -> &'a mut W {
self.variant(CPOREQ_A::_0)
}
#[doc = "Request Compute Operation."]
#[inline(always)]
pub fn _1(self) -> &'a mut W {
self.variant(CPOREQ_A::_1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
#[doc = "Compute Operation acknowledge\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum CPOACK_A {
#[doc = "0: Compute operation entry has not completed or compute operation exit has completed."]
_0 = 0,
#[doc = "1: Compute operation entry has completed or compute operation exit has not completed."]
_1 = 1,
}
impl From<CPOACK_A> for bool {
#[inline(always)]
fn from(variant: CPOACK_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `CPOACK`"]
pub type CPOACK_R = crate::R<bool, CPOACK_A>;
impl CPOACK_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> CPOACK_A {
match self.bits {
false => CPOACK_A::_0,
true => CPOACK_A::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline(always)]
pub fn is_0(&self) -> bool {
*self == CPOACK_A::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline(always)]
pub fn is_1(&self) -> bool {
*self == CPOACK_A::_1
}
}
#[doc = "Compute Operation wakeup on interrupt\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum CPOWOI_A {
#[doc = "0: No effect."]
_0 = 0,
#[doc = "1: When set, the CPOREQ is cleared on any interrupt or exception vector fetch."]
_1 = 1,
}
impl From<CPOWOI_A> for bool {
#[inline(always)]
fn from(variant: CPOWOI_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `CPOWOI`"]
pub type CPOWOI_R = crate::R<bool, CPOWOI_A>;
impl CPOWOI_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> CPOWOI_A {
match self.bits {
false => CPOWOI_A::_0,
true => CPOWOI_A::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline(always)]
pub fn is_0(&self) -> bool {
*self == CPOWOI_A::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline(always)]
pub fn is_1(&self) -> bool {
*self == CPOWOI_A::_1
}
}
#[doc = "Write proxy for field `CPOWOI`"]
pub struct CPOWOI_W<'a> {
w: &'a mut W,
}
impl<'a> CPOWOI_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: CPOWOI_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "No effect."]
#[inline(always)]
pub fn _0(self) -> &'a mut W {
self.variant(CPOWOI_A::_0)
}
#[doc = "When set, the CPOREQ is cleared on any interrupt or exception vector fetch."]
#[inline(always)]
pub fn _1(self) -> &'a mut W {
self.variant(CPOWOI_A::_1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u32) & 0x01) << 2);
self.w
}
}
impl R {
#[doc = "Bit 0 - Compute Operation request"]
#[inline(always)]
pub fn cporeq(&self) -> CPOREQ_R {
CPOREQ_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 1 - Compute Operation acknowledge"]
#[inline(always)]
pub fn cpoack(&self) -> CPOACK_R {
CPOACK_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 2 - Compute Operation wakeup on interrupt"]
#[inline(always)]
pub fn cpowoi(&self) -> CPOWOI_R {
CPOWOI_R::new(((self.bits >> 2) & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 0 - Compute Operation request"]
#[inline(always)]
pub fn cporeq(&mut self) -> CPOREQ_W {
CPOREQ_W { w: self }
}
#[doc = "Bit 2 - Compute Operation wakeup on interrupt"]
#[inline(always)]
pub fn cpowoi(&mut self) -> CPOWOI_W {
CPOWOI_W { w: self }
}
}
| 28.114035 | 100 | 0.550546 |
d5e8704ae4b384d1b94d2cc9af43556c284b6d6b | 9,917 | // Generated from definition io.k8s.api.extensions.v1beta1.DeploymentSpec
/// DeploymentSpec is the specification of the desired behavior of the Deployment.
#[derive(Clone, Debug, Default, PartialEq)]
pub struct DeploymentSpec {
/// Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)
pub min_ready_seconds: Option<i32>,
/// Indicates that the deployment is paused and will not be processed by the deployment controller.
pub paused: Option<bool>,
/// The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. This is set to the max value of int32 (i.e. 2147483647) by default, which means "no deadline".
pub progress_deadline_seconds: Option<i32>,
/// Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.
pub replicas: Option<i32>,
/// The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified.
pub revision_history_limit: Option<i32>,
/// DEPRECATED. The config this deployment is rolling back to. Will be cleared after rollback is done.
pub rollback_to: Option<::v1_12::api::extensions::v1beta1::RollbackConfig>,
/// Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment.
pub selector: Option<::v1_12::apimachinery::pkg::apis::meta::v1::LabelSelector>,
/// The deployment strategy to use to replace existing pods with new ones.
pub strategy: Option<::v1_12::api::extensions::v1beta1::DeploymentStrategy>,
/// Template describes the pods that will be created.
pub template: ::v1_12::api::core::v1::PodTemplateSpec,
}
impl<'de> ::serde::Deserialize<'de> for DeploymentSpec {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: ::serde::Deserializer<'de> {
#[allow(non_camel_case_types)]
enum Field {
Key_min_ready_seconds,
Key_paused,
Key_progress_deadline_seconds,
Key_replicas,
Key_revision_history_limit,
Key_rollback_to,
Key_selector,
Key_strategy,
Key_template,
Other,
}
impl<'de> ::serde::Deserialize<'de> for Field {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: ::serde::Deserializer<'de> {
struct Visitor;
impl<'de> ::serde::de::Visitor<'de> for Visitor {
type Value = Field;
fn expecting(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
write!(f, "field identifier")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: ::serde::de::Error {
Ok(match v {
"minReadySeconds" => Field::Key_min_ready_seconds,
"paused" => Field::Key_paused,
"progressDeadlineSeconds" => Field::Key_progress_deadline_seconds,
"replicas" => Field::Key_replicas,
"revisionHistoryLimit" => Field::Key_revision_history_limit,
"rollbackTo" => Field::Key_rollback_to,
"selector" => Field::Key_selector,
"strategy" => Field::Key_strategy,
"template" => Field::Key_template,
_ => Field::Other,
})
}
}
deserializer.deserialize_identifier(Visitor)
}
}
struct Visitor;
impl<'de> ::serde::de::Visitor<'de> for Visitor {
type Value = DeploymentSpec;
fn expecting(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
write!(f, "struct DeploymentSpec")
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: ::serde::de::MapAccess<'de> {
let mut value_min_ready_seconds: Option<i32> = None;
let mut value_paused: Option<bool> = None;
let mut value_progress_deadline_seconds: Option<i32> = None;
let mut value_replicas: Option<i32> = None;
let mut value_revision_history_limit: Option<i32> = None;
let mut value_rollback_to: Option<::v1_12::api::extensions::v1beta1::RollbackConfig> = None;
let mut value_selector: Option<::v1_12::apimachinery::pkg::apis::meta::v1::LabelSelector> = None;
let mut value_strategy: Option<::v1_12::api::extensions::v1beta1::DeploymentStrategy> = None;
let mut value_template: Option<::v1_12::api::core::v1::PodTemplateSpec> = None;
while let Some(key) = ::serde::de::MapAccess::next_key::<Field>(&mut map)? {
match key {
Field::Key_min_ready_seconds => value_min_ready_seconds = ::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_paused => value_paused = ::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_progress_deadline_seconds => value_progress_deadline_seconds = ::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_replicas => value_replicas = ::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_revision_history_limit => value_revision_history_limit = ::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_rollback_to => value_rollback_to = ::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_selector => value_selector = ::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_strategy => value_strategy = ::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_template => value_template = Some(::serde::de::MapAccess::next_value(&mut map)?),
Field::Other => { let _: ::serde::de::IgnoredAny = ::serde::de::MapAccess::next_value(&mut map)?; },
}
}
Ok(DeploymentSpec {
min_ready_seconds: value_min_ready_seconds,
paused: value_paused,
progress_deadline_seconds: value_progress_deadline_seconds,
replicas: value_replicas,
revision_history_limit: value_revision_history_limit,
rollback_to: value_rollback_to,
selector: value_selector,
strategy: value_strategy,
template: value_template.ok_or_else(|| ::serde::de::Error::missing_field("template"))?,
})
}
}
deserializer.deserialize_struct(
"DeploymentSpec",
&[
"minReadySeconds",
"paused",
"progressDeadlineSeconds",
"replicas",
"revisionHistoryLimit",
"rollbackTo",
"selector",
"strategy",
"template",
],
Visitor,
)
}
}
impl ::serde::Serialize for DeploymentSpec {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: ::serde::Serializer {
let mut state = serializer.serialize_struct(
"DeploymentSpec",
0 +
self.min_ready_seconds.as_ref().map_or(0, |_| 1) +
self.paused.as_ref().map_or(0, |_| 1) +
self.progress_deadline_seconds.as_ref().map_or(0, |_| 1) +
self.replicas.as_ref().map_or(0, |_| 1) +
self.revision_history_limit.as_ref().map_or(0, |_| 1) +
self.rollback_to.as_ref().map_or(0, |_| 1) +
self.selector.as_ref().map_or(0, |_| 1) +
self.strategy.as_ref().map_or(0, |_| 1) +
1,
)?;
if let Some(value) = &self.min_ready_seconds {
::serde::ser::SerializeStruct::serialize_field(&mut state, "minReadySeconds", value)?;
}
if let Some(value) = &self.paused {
::serde::ser::SerializeStruct::serialize_field(&mut state, "paused", value)?;
}
if let Some(value) = &self.progress_deadline_seconds {
::serde::ser::SerializeStruct::serialize_field(&mut state, "progressDeadlineSeconds", value)?;
}
if let Some(value) = &self.replicas {
::serde::ser::SerializeStruct::serialize_field(&mut state, "replicas", value)?;
}
if let Some(value) = &self.revision_history_limit {
::serde::ser::SerializeStruct::serialize_field(&mut state, "revisionHistoryLimit", value)?;
}
if let Some(value) = &self.rollback_to {
::serde::ser::SerializeStruct::serialize_field(&mut state, "rollbackTo", value)?;
}
if let Some(value) = &self.selector {
::serde::ser::SerializeStruct::serialize_field(&mut state, "selector", value)?;
}
if let Some(value) = &self.strategy {
::serde::ser::SerializeStruct::serialize_field(&mut state, "strategy", value)?;
}
::serde::ser::SerializeStruct::serialize_field(&mut state, "template", &self.template)?;
::serde::ser::SerializeStruct::end(state)
}
}
| 51.921466 | 451 | 0.58435 |
f7edbeddd90dff0ed72ffbb9b88af43ab314a212 | 15,445 | #[cfg(target_family = "unix")]
use std::cmp::min;
use tui::{
backend::Backend,
layout::{Alignment, Constraint, Direction, Layout, Rect},
terminal::Frame,
text::{Span, Spans, Text},
widgets::{Block, Borders, Paragraph, Wrap},
};
use crate::{
app::{App, KillSignal},
canvas::Painter,
};
const DD_BASE: &str = " Confirm Kill Process ── Esc to close ";
const DD_ERROR_BASE: &str = " Error ── Esc to close ";
impl Painter {
pub fn get_dd_spans(&self, app_state: &App) -> Option<Text<'_>> {
if let Some(dd_err) = &app_state.dd_err {
return Some(Text::from(vec![
Spans::default(),
Spans::from("Failed to kill process."),
Spans::from(dd_err.clone()),
Spans::from("Please press ENTER or ESC to close this dialog."),
]));
} else if let Some(to_kill_processes) = app_state.get_to_delete_processes() {
if let Some(first_pid) = to_kill_processes.1.first() {
return Some(Text::from(vec![
Spans::from(""),
if app_state.is_grouped(app_state.current_widget.widget_id) {
if to_kill_processes.1.len() != 1 {
Spans::from(format!(
"Kill {} processes with the name \"{}\"? Press ENTER to confirm.",
to_kill_processes.1.len(),
to_kill_processes.0
))
} else {
Spans::from(format!(
"Kill 1 process with the name \"{}\"? Press ENTER to confirm.",
to_kill_processes.0
))
}
} else {
Spans::from(format!(
"Kill process \"{}\" with PID {}? Press ENTER to confirm.",
to_kill_processes.0, first_pid
))
},
]));
}
}
None
}
fn draw_dd_confirm_buttons<B: Backend>(
&self, f: &mut Frame<'_, B>, button_draw_loc: &Rect, app_state: &mut App,
) {
if cfg!(target_os = "windows") || !app_state.app_config_fields.is_advanced_kill {
let (yes_button, no_button) = match app_state.delete_dialog_state.selected_signal {
KillSignal::Kill(_) => (
Span::styled("Yes", self.colours.currently_selected_text_style),
Span::raw("No"),
),
KillSignal::Cancel => (
Span::raw("Yes"),
Span::styled("No", self.colours.currently_selected_text_style),
),
};
let button_layout = Layout::default()
.direction(Direction::Horizontal)
.constraints(
[
Constraint::Percentage(35),
Constraint::Percentage(30),
Constraint::Percentage(35),
]
.as_ref(),
)
.split(*button_draw_loc);
f.render_widget(
Paragraph::new(yes_button)
.block(Block::default())
.alignment(Alignment::Right),
button_layout[0],
);
f.render_widget(
Paragraph::new(no_button)
.block(Block::default())
.alignment(Alignment::Left),
button_layout[2],
);
if app_state.should_get_widget_bounds() {
// This is kinda weird, but the gist is:
// - We have three sections; we put our mouse bounding box for the "yes" button at the very right edge
// of the left section and 3 characters back. We then give it a buffer size of 1 on the x-coordinate.
// - Same for the "no" button, except it is the right section and we do it from the start of the right
// section.
//
// Lastly, note that mouse detection for the dd buttons assume correct widths. As such, we correct
// them here and check with >= and <= mouse bound checks, as opposed to how we do it elsewhere with
// >= and <. See https://github.com/ClementTsang/bottom/pull/459 for details.
app_state.delete_dialog_state.button_positions = vec![
// Yes
(
button_layout[0].x + button_layout[0].width - 4,
button_layout[0].y,
button_layout[0].x + button_layout[0].width,
button_layout[0].y,
if cfg!(target_os = "windows") { 1 } else { 15 },
),
// No
(
button_layout[2].x - 1,
button_layout[2].y,
button_layout[2].x + 2,
button_layout[2].y,
0,
),
];
}
} else {
#[cfg(target_family = "unix")]
{
let signal_text;
#[cfg(target_os = "linux")]
{
signal_text = vec![
"0: Cancel",
"1: HUP",
"2: INT",
"3: QUIT",
"4: ILL",
"5: TRAP",
"6: ABRT",
"7: BUS",
"8: FPE",
"9: KILL",
"10: USR1",
"11: SEGV",
"12: USR2",
"13: PIPE",
"14: ALRM",
"15: TERM",
"16: STKFLT",
"17: CHLD",
"18: CONT",
"19: STOP",
"20: TSTP",
"21: TTIN",
"22: TTOU",
"23: URG",
"24: XCPU",
"25: XFSZ",
"26: VTALRM",
"27: PROF",
"28: WINCH",
"29: IO",
"30: PWR",
"31: SYS",
"34: RTMIN",
"35: RTMIN+1",
"36: RTMIN+2",
"37: RTMIN+3",
"38: RTMIN+4",
"39: RTMIN+5",
"40: RTMIN+6",
"41: RTMIN+7",
"42: RTMIN+8",
"43: RTMIN+9",
"44: RTMIN+10",
"45: RTMIN+11",
"46: RTMIN+12",
"47: RTMIN+13",
"48: RTMIN+14",
"49: RTMIN+15",
"50: RTMAX-14",
"51: RTMAX-13",
"52: RTMAX-12",
"53: RTMAX-11",
"54: RTMAX-10",
"55: RTMAX-9",
"56: RTMAX-8",
"57: RTMAX-7",
"58: RTMAX-6",
"59: RTMAX-5",
"60: RTMAX-4",
"61: RTMAX-3",
"62: RTMAX-2",
"63: RTMAX-1",
"64: RTMAX",
];
}
#[cfg(target_os = "macos")]
{
signal_text = vec![
"0: Cancel",
"1: HUP",
"2: INT",
"3: QUIT",
"4: ILL",
"5: TRAP",
"6: ABRT",
"7: EMT",
"8: FPE",
"9: KILL",
"10: BUS",
"11: SEGV",
"12: SYS",
"13: PIPE",
"14: ALRM",
"15: TERM",
"16: URG",
"17: STOP",
"18: TSTP",
"19: CONT",
"20: CHLD",
"21: TTIN",
"22: TTOU",
"23: IO",
"24: XCPU",
"25: XFSZ",
"26: VTALRM",
"27: PROF",
"28: WINCH",
"29: INFO",
"30: USR1",
"31: USR2",
];
}
let button_rect = Layout::default()
.direction(Direction::Horizontal)
.margin(1)
.constraints(
[
Constraint::Length((button_draw_loc.width - 14) / 2),
Constraint::Min(0),
Constraint::Length((button_draw_loc.width - 14) / 2),
]
.as_ref(),
)
.split(*button_draw_loc)[1];
let mut selected = match app_state.delete_dialog_state.selected_signal {
KillSignal::Cancel => 0,
KillSignal::Kill(signal) => signal,
};
// 32+33 are skipped
if selected > 31 {
selected -= 2;
}
let layout = Layout::default()
.direction(Direction::Vertical)
.constraints(vec![Constraint::Min(1); button_rect.height.into()])
.split(button_rect);
let prev_offset: usize = app_state.delete_dialog_state.scroll_pos;
app_state.delete_dialog_state.scroll_pos = if selected == 0 {
0
} else if selected < prev_offset + 1 {
selected - 1
} else if selected > prev_offset + layout.len() - 1 {
selected - layout.len() + 1
} else {
prev_offset
};
let scroll_offset: usize = app_state.delete_dialog_state.scroll_pos;
let mut buttons = signal_text
[scroll_offset + 1..min((layout.len()) + scroll_offset, signal_text.len())]
.iter()
.map(|text| Span::raw(*text))
.collect::<Vec<Span<'_>>>();
buttons.insert(0, Span::raw(signal_text[0]));
buttons[selected - scroll_offset] = Span::styled(
signal_text[selected],
self.colours.currently_selected_text_style,
);
app_state.delete_dialog_state.button_positions = layout
.iter()
.enumerate()
.map(|(i, pos)| {
(
pos.x,
pos.y,
pos.x + pos.width - 1,
pos.y + pos.height - 1,
if i == 0 { 0 } else { scroll_offset } + i,
)
})
.collect::<Vec<(u16, u16, u16, u16, usize)>>();
for (btn, pos) in buttons.into_iter().zip(layout.into_iter()) {
f.render_widget(Paragraph::new(btn).alignment(Alignment::Left), pos);
}
}
}
}
pub fn draw_dd_dialog<B: Backend>(
&self, f: &mut Frame<'_, B>, dd_text: Option<Text<'_>>, app_state: &mut App, draw_loc: Rect,
) -> bool {
if let Some(dd_text) = dd_text {
let dd_title = if app_state.dd_err.is_some() {
Spans::from(vec![
Span::styled(" Error ", self.colours.widget_title_style),
Span::styled(
format!(
"─{}─ Esc to close ",
"─".repeat(
usize::from(draw_loc.width)
.saturating_sub(DD_ERROR_BASE.chars().count() + 2)
)
),
self.colours.border_style,
),
])
} else {
Spans::from(vec![
Span::styled(" Confirm Kill Process ", self.colours.widget_title_style),
Span::styled(
format!(
"─{}─ Esc to close ",
"─".repeat(
usize::from(draw_loc.width)
.saturating_sub(DD_BASE.chars().count() + 2)
)
),
self.colours.border_style,
),
])
};
f.render_widget(
Paragraph::new(dd_text)
.block(
Block::default()
.title(dd_title)
.style(self.colours.border_style)
.borders(Borders::ALL)
.border_style(self.colours.border_style),
)
.style(self.colours.text_style)
.alignment(Alignment::Center)
.wrap(Wrap { trim: true }),
draw_loc,
);
let btn_height =
if cfg!(target_os = "windows") || !app_state.app_config_fields.is_advanced_kill {
3
} else {
20
};
// Now draw buttons if needed...
let split_draw_loc = Layout::default()
.direction(Direction::Vertical)
.constraints(if app_state.dd_err.is_some() {
vec![Constraint::Percentage(100)]
} else {
vec![Constraint::Min(3), Constraint::Length(btn_height)]
})
.split(draw_loc);
// This being true implies that dd_err is none.
if let Some(button_draw_loc) = split_draw_loc.get(1) {
self.draw_dd_confirm_buttons(f, button_draw_loc, app_state);
}
if app_state.dd_err.is_some() {
return app_state.delete_dialog_state.is_showing_dd;
} else {
return true;
}
}
// Currently we just return "false" if things go wrong finding
// the process or a first PID (if an error arises it should be caught).
// I don't really like this, and I find it ugly, but it works for now.
false
}
}
| 39.40051 | 120 | 0.369375 |
f9744ff6eb76be18ef1f2bb354e74fa28c147762 | 17,778 | // This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files.git)
// DO NOT EDIT
use crate::Accessible;
use crate::AccessibleRole;
use crate::Align;
use crate::Buildable;
use crate::ConstraintTarget;
use crate::LayoutManager;
use crate::Overflow;
use crate::Widget;
use glib::object::Cast;
use glib::object::IsA;
use glib::object::ObjectType as ObjectType_;
use glib::signal::connect_raw;
use glib::signal::SignalHandlerId;
use glib::translate::*;
use glib::StaticType;
use glib::ToValue;
use std::boxed::Box as Box_;
use std::fmt;
use std::mem::transmute;
glib::wrapper! {
pub struct AspectFrame(Object<ffi::GtkAspectFrame>) @extends Widget, @implements Accessible, Buildable, ConstraintTarget;
match fn {
get_type => || ffi::gtk_aspect_frame_get_type(),
}
}
impl AspectFrame {
#[doc(alias = "gtk_aspect_frame_new")]
pub fn new(xalign: f32, yalign: f32, ratio: f32, obey_child: bool) -> AspectFrame {
assert_initialized_main_thread!();
unsafe {
Widget::from_glib_none(ffi::gtk_aspect_frame_new(
xalign,
yalign,
ratio,
obey_child.to_glib(),
))
.unsafe_cast()
}
}
#[doc(alias = "gtk_aspect_frame_get_child")]
pub fn child(&self) -> Option<Widget> {
unsafe { from_glib_none(ffi::gtk_aspect_frame_get_child(self.to_glib_none().0)) }
}
#[doc(alias = "gtk_aspect_frame_get_obey_child")]
pub fn is_obey_child(&self) -> bool {
unsafe { from_glib(ffi::gtk_aspect_frame_get_obey_child(self.to_glib_none().0)) }
}
#[doc(alias = "gtk_aspect_frame_get_ratio")]
pub fn ratio(&self) -> f32 {
unsafe { ffi::gtk_aspect_frame_get_ratio(self.to_glib_none().0) }
}
#[doc(alias = "gtk_aspect_frame_get_xalign")]
pub fn xalign(&self) -> f32 {
unsafe { ffi::gtk_aspect_frame_get_xalign(self.to_glib_none().0) }
}
#[doc(alias = "gtk_aspect_frame_get_yalign")]
pub fn yalign(&self) -> f32 {
unsafe { ffi::gtk_aspect_frame_get_yalign(self.to_glib_none().0) }
}
#[doc(alias = "gtk_aspect_frame_set_child")]
pub fn set_child<P: IsA<Widget>>(&self, child: Option<&P>) {
unsafe {
ffi::gtk_aspect_frame_set_child(
self.to_glib_none().0,
child.map(|p| p.as_ref()).to_glib_none().0,
);
}
}
#[doc(alias = "gtk_aspect_frame_set_obey_child")]
pub fn set_obey_child(&self, obey_child: bool) {
unsafe {
ffi::gtk_aspect_frame_set_obey_child(self.to_glib_none().0, obey_child.to_glib());
}
}
#[doc(alias = "gtk_aspect_frame_set_ratio")]
pub fn set_ratio(&self, ratio: f32) {
unsafe {
ffi::gtk_aspect_frame_set_ratio(self.to_glib_none().0, ratio);
}
}
#[doc(alias = "gtk_aspect_frame_set_xalign")]
pub fn set_xalign(&self, xalign: f32) {
unsafe {
ffi::gtk_aspect_frame_set_xalign(self.to_glib_none().0, xalign);
}
}
#[doc(alias = "gtk_aspect_frame_set_yalign")]
pub fn set_yalign(&self, yalign: f32) {
unsafe {
ffi::gtk_aspect_frame_set_yalign(self.to_glib_none().0, yalign);
}
}
pub fn connect_property_child_notify<F: Fn(&AspectFrame) + 'static>(
&self,
f: F,
) -> SignalHandlerId {
unsafe extern "C" fn notify_child_trampoline<F: Fn(&AspectFrame) + 'static>(
this: *mut ffi::GtkAspectFrame,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(&from_glib_borrow(this))
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::child\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_child_trampoline::<F> as *const (),
)),
Box_::into_raw(f),
)
}
}
pub fn connect_property_obey_child_notify<F: Fn(&AspectFrame) + 'static>(
&self,
f: F,
) -> SignalHandlerId {
unsafe extern "C" fn notify_obey_child_trampoline<F: Fn(&AspectFrame) + 'static>(
this: *mut ffi::GtkAspectFrame,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(&from_glib_borrow(this))
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::obey-child\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_obey_child_trampoline::<F> as *const (),
)),
Box_::into_raw(f),
)
}
}
pub fn connect_property_ratio_notify<F: Fn(&AspectFrame) + 'static>(
&self,
f: F,
) -> SignalHandlerId {
unsafe extern "C" fn notify_ratio_trampoline<F: Fn(&AspectFrame) + 'static>(
this: *mut ffi::GtkAspectFrame,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(&from_glib_borrow(this))
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::ratio\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_ratio_trampoline::<F> as *const (),
)),
Box_::into_raw(f),
)
}
}
pub fn connect_property_xalign_notify<F: Fn(&AspectFrame) + 'static>(
&self,
f: F,
) -> SignalHandlerId {
unsafe extern "C" fn notify_xalign_trampoline<F: Fn(&AspectFrame) + 'static>(
this: *mut ffi::GtkAspectFrame,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(&from_glib_borrow(this))
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::xalign\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_xalign_trampoline::<F> as *const (),
)),
Box_::into_raw(f),
)
}
}
pub fn connect_property_yalign_notify<F: Fn(&AspectFrame) + 'static>(
&self,
f: F,
) -> SignalHandlerId {
unsafe extern "C" fn notify_yalign_trampoline<F: Fn(&AspectFrame) + 'static>(
this: *mut ffi::GtkAspectFrame,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(&from_glib_borrow(this))
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::yalign\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_yalign_trampoline::<F> as *const (),
)),
Box_::into_raw(f),
)
}
}
}
#[derive(Clone, Default)]
pub struct AspectFrameBuilder {
child: Option<Widget>,
obey_child: Option<bool>,
ratio: Option<f32>,
xalign: Option<f32>,
yalign: Option<f32>,
can_focus: Option<bool>,
can_target: Option<bool>,
css_classes: Option<Vec<String>>,
css_name: Option<String>,
cursor: Option<gdk::Cursor>,
focus_on_click: Option<bool>,
focusable: Option<bool>,
halign: Option<Align>,
has_tooltip: Option<bool>,
height_request: Option<i32>,
hexpand: Option<bool>,
hexpand_set: Option<bool>,
layout_manager: Option<LayoutManager>,
margin_bottom: Option<i32>,
margin_end: Option<i32>,
margin_start: Option<i32>,
margin_top: Option<i32>,
name: Option<String>,
opacity: Option<f64>,
overflow: Option<Overflow>,
receives_default: Option<bool>,
sensitive: Option<bool>,
tooltip_markup: Option<String>,
tooltip_text: Option<String>,
valign: Option<Align>,
vexpand: Option<bool>,
vexpand_set: Option<bool>,
visible: Option<bool>,
width_request: Option<i32>,
accessible_role: Option<AccessibleRole>,
}
impl AspectFrameBuilder {
pub fn new() -> Self {
Self::default()
}
pub fn build(self) -> AspectFrame {
let mut properties: Vec<(&str, &dyn ToValue)> = vec![];
if let Some(ref child) = self.child {
properties.push(("child", child));
}
if let Some(ref obey_child) = self.obey_child {
properties.push(("obey-child", obey_child));
}
if let Some(ref ratio) = self.ratio {
properties.push(("ratio", ratio));
}
if let Some(ref xalign) = self.xalign {
properties.push(("xalign", xalign));
}
if let Some(ref yalign) = self.yalign {
properties.push(("yalign", yalign));
}
if let Some(ref can_focus) = self.can_focus {
properties.push(("can-focus", can_focus));
}
if let Some(ref can_target) = self.can_target {
properties.push(("can-target", can_target));
}
if let Some(ref css_classes) = self.css_classes {
properties.push(("css-classes", css_classes));
}
if let Some(ref css_name) = self.css_name {
properties.push(("css-name", css_name));
}
if let Some(ref cursor) = self.cursor {
properties.push(("cursor", cursor));
}
if let Some(ref focus_on_click) = self.focus_on_click {
properties.push(("focus-on-click", focus_on_click));
}
if let Some(ref focusable) = self.focusable {
properties.push(("focusable", focusable));
}
if let Some(ref halign) = self.halign {
properties.push(("halign", halign));
}
if let Some(ref has_tooltip) = self.has_tooltip {
properties.push(("has-tooltip", has_tooltip));
}
if let Some(ref height_request) = self.height_request {
properties.push(("height-request", height_request));
}
if let Some(ref hexpand) = self.hexpand {
properties.push(("hexpand", hexpand));
}
if let Some(ref hexpand_set) = self.hexpand_set {
properties.push(("hexpand-set", hexpand_set));
}
if let Some(ref layout_manager) = self.layout_manager {
properties.push(("layout-manager", layout_manager));
}
if let Some(ref margin_bottom) = self.margin_bottom {
properties.push(("margin-bottom", margin_bottom));
}
if let Some(ref margin_end) = self.margin_end {
properties.push(("margin-end", margin_end));
}
if let Some(ref margin_start) = self.margin_start {
properties.push(("margin-start", margin_start));
}
if let Some(ref margin_top) = self.margin_top {
properties.push(("margin-top", margin_top));
}
if let Some(ref name) = self.name {
properties.push(("name", name));
}
if let Some(ref opacity) = self.opacity {
properties.push(("opacity", opacity));
}
if let Some(ref overflow) = self.overflow {
properties.push(("overflow", overflow));
}
if let Some(ref receives_default) = self.receives_default {
properties.push(("receives-default", receives_default));
}
if let Some(ref sensitive) = self.sensitive {
properties.push(("sensitive", sensitive));
}
if let Some(ref tooltip_markup) = self.tooltip_markup {
properties.push(("tooltip-markup", tooltip_markup));
}
if let Some(ref tooltip_text) = self.tooltip_text {
properties.push(("tooltip-text", tooltip_text));
}
if let Some(ref valign) = self.valign {
properties.push(("valign", valign));
}
if let Some(ref vexpand) = self.vexpand {
properties.push(("vexpand", vexpand));
}
if let Some(ref vexpand_set) = self.vexpand_set {
properties.push(("vexpand-set", vexpand_set));
}
if let Some(ref visible) = self.visible {
properties.push(("visible", visible));
}
if let Some(ref width_request) = self.width_request {
properties.push(("width-request", width_request));
}
if let Some(ref accessible_role) = self.accessible_role {
properties.push(("accessible-role", accessible_role));
}
let ret = glib::Object::new::<AspectFrame>(&properties).expect("object new");
ret
}
pub fn child<P: IsA<Widget>>(mut self, child: &P) -> Self {
self.child = Some(child.clone().upcast());
self
}
pub fn obey_child(mut self, obey_child: bool) -> Self {
self.obey_child = Some(obey_child);
self
}
pub fn ratio(mut self, ratio: f32) -> Self {
self.ratio = Some(ratio);
self
}
pub fn xalign(mut self, xalign: f32) -> Self {
self.xalign = Some(xalign);
self
}
pub fn yalign(mut self, yalign: f32) -> Self {
self.yalign = Some(yalign);
self
}
pub fn can_focus(mut self, can_focus: bool) -> Self {
self.can_focus = Some(can_focus);
self
}
pub fn can_target(mut self, can_target: bool) -> Self {
self.can_target = Some(can_target);
self
}
pub fn css_classes(mut self, css_classes: Vec<String>) -> Self {
self.css_classes = Some(css_classes);
self
}
pub fn css_name(mut self, css_name: &str) -> Self {
self.css_name = Some(css_name.to_string());
self
}
pub fn cursor(mut self, cursor: &gdk::Cursor) -> Self {
self.cursor = Some(cursor.clone());
self
}
pub fn focus_on_click(mut self, focus_on_click: bool) -> Self {
self.focus_on_click = Some(focus_on_click);
self
}
pub fn focusable(mut self, focusable: bool) -> Self {
self.focusable = Some(focusable);
self
}
pub fn halign(mut self, halign: Align) -> Self {
self.halign = Some(halign);
self
}
pub fn has_tooltip(mut self, has_tooltip: bool) -> Self {
self.has_tooltip = Some(has_tooltip);
self
}
pub fn height_request(mut self, height_request: i32) -> Self {
self.height_request = Some(height_request);
self
}
pub fn hexpand(mut self, hexpand: bool) -> Self {
self.hexpand = Some(hexpand);
self
}
pub fn hexpand_set(mut self, hexpand_set: bool) -> Self {
self.hexpand_set = Some(hexpand_set);
self
}
pub fn layout_manager<P: IsA<LayoutManager>>(mut self, layout_manager: &P) -> Self {
self.layout_manager = Some(layout_manager.clone().upcast());
self
}
pub fn margin_bottom(mut self, margin_bottom: i32) -> Self {
self.margin_bottom = Some(margin_bottom);
self
}
pub fn margin_end(mut self, margin_end: i32) -> Self {
self.margin_end = Some(margin_end);
self
}
pub fn margin_start(mut self, margin_start: i32) -> Self {
self.margin_start = Some(margin_start);
self
}
pub fn margin_top(mut self, margin_top: i32) -> Self {
self.margin_top = Some(margin_top);
self
}
pub fn name(mut self, name: &str) -> Self {
self.name = Some(name.to_string());
self
}
pub fn opacity(mut self, opacity: f64) -> Self {
self.opacity = Some(opacity);
self
}
pub fn overflow(mut self, overflow: Overflow) -> Self {
self.overflow = Some(overflow);
self
}
pub fn receives_default(mut self, receives_default: bool) -> Self {
self.receives_default = Some(receives_default);
self
}
pub fn sensitive(mut self, sensitive: bool) -> Self {
self.sensitive = Some(sensitive);
self
}
pub fn tooltip_markup(mut self, tooltip_markup: &str) -> Self {
self.tooltip_markup = Some(tooltip_markup.to_string());
self
}
pub fn tooltip_text(mut self, tooltip_text: &str) -> Self {
self.tooltip_text = Some(tooltip_text.to_string());
self
}
pub fn valign(mut self, valign: Align) -> Self {
self.valign = Some(valign);
self
}
pub fn vexpand(mut self, vexpand: bool) -> Self {
self.vexpand = Some(vexpand);
self
}
pub fn vexpand_set(mut self, vexpand_set: bool) -> Self {
self.vexpand_set = Some(vexpand_set);
self
}
pub fn visible(mut self, visible: bool) -> Self {
self.visible = Some(visible);
self
}
pub fn width_request(mut self, width_request: i32) -> Self {
self.width_request = Some(width_request);
self
}
pub fn accessible_role(mut self, accessible_role: AccessibleRole) -> Self {
self.accessible_role = Some(accessible_role);
self
}
}
impl fmt::Display for AspectFrame {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("AspectFrame")
}
}
| 31.026178 | 125 | 0.560749 |
f4062930403a1238267a7dcf3b06d66107fe1105 | 542 | /// replace creates a new String, and copies the data from
/// this string slice into it. While doing so, it attempts
/// to find matches of a pattern. If it finds any, it
/// replaces them with the replacement string slice.
fn main() {
// option #1
let host = "192.168.1.100";
let option1 = str::replace(host, ".", "-");
println!("{}", option1);
// option #2
let option2 = "docs.aws.amazon.com".replace(".", "-");
println!("{}", option2);
}
/*
Example output:
--------------
192-168-1-100
docs-aws-amazon-com
*/
| 25.809524 | 58 | 0.601476 |
6966deace277cf1602f94b28ea83c1542237896a | 1,339 | use crate::internal::*;
use tract_core::ops::source::*;
register_all!(TypedSource: pulsify);
fn pulsify(
_op: &TypedSource,
_source: &TypedModel,
node: &TypedNode,
target: &mut PulsedModel,
_mapping: &HashMap<OutletId, OutletId>,
pulse: usize,
) -> TractResult<Option<TVec<OutletId>>> {
let pulsed_fact = PulsedFact::from_tensor_fact_pulse(&node.outputs[0].fact, pulse)?;
let id = target.add_source(node.name.clone(), pulsed_fact)?;
Ok(Some(tvec!(id)))
}
#[derive(Debug, Clone, Hash)]
pub struct PulsedSource(pub PulsedFact);
impl_dyn_hash!(PulsedSource);
impl Op for PulsedSource {
fn name(&self) -> Cow<str> {
"PulsedSource".into()
}
op_pulse!();
not_a_typed_op!();
}
impl EvalOp for PulsedSource {
fn is_stateless(&self) -> bool {
false
}
fn state(
&self,
_session: &mut SessionState,
node_id: usize,
) -> TractResult<Option<Box<dyn OpState>>> {
Ok(Some(Box::new(SourceState(node_id))))
}
}
impl PulsedOp for PulsedSource {
fn pulsed_output_facts(&self, _inputs: &[&PulsedFact]) -> TractResult<TVec<PulsedFact>> {
Ok(tvec!(self.0.clone()))
}
fn to_typed(&self) -> Box<dyn TypedOp> {
Box::new(TypedSource::new(self.0.datum_type.fact(self.0.shape.clone())))
}
as_op!();
}
| 23.491228 | 93 | 0.629574 |
762b5c7c3c63b1470b6237fb87bb4cf5dc40159e | 5,123 | use crate::{config::EntityType, InMemoryBackend, InMemoryBackendError};
use futures_util::{
future::{self, FutureExt},
stream::{self, StreamExt},
};
use rarity_cache::{
entity::{
channel::{CategoryChannelEntity, VoiceChannelEntity, VoiceChannelRepository},
guild::GuildEntity,
Entity,
},
repository::{
GetEntityFuture, ListEntitiesFuture, RemoveEntityFuture, Repository, UpsertEntityFuture,
},
};
use twilight_model::id::ChannelId;
/// Repository to retrieve and work with voice channels and their related
/// entities.
#[derive(Clone, Debug)]
pub struct InMemoryVoiceChannelRepository(pub(crate) InMemoryBackend);
impl Repository<VoiceChannelEntity, InMemoryBackend> for InMemoryVoiceChannelRepository {
fn backend(&self) -> InMemoryBackend {
self.0.clone()
}
fn get(
&self,
user_id: ChannelId,
) -> GetEntityFuture<'_, VoiceChannelEntity, InMemoryBackendError> {
future::ok(
self.0
.0
.channels_voice
.get(&user_id)
.map(|r| r.value().clone()),
)
.boxed()
}
fn list(&self) -> ListEntitiesFuture<'_, VoiceChannelEntity, InMemoryBackendError> {
let stream = stream::iter(
(self.0)
.0
.channels_voice
.iter()
.map(|r| Ok(r.value().clone())),
)
.boxed();
future::ok(stream).boxed()
}
fn remove(&self, user_id: ChannelId) -> RemoveEntityFuture<'_, InMemoryBackendError> {
(self.0).0.channels_voice.remove(&user_id);
future::ok(()).boxed()
}
fn upsert(&self, entity: VoiceChannelEntity) -> UpsertEntityFuture<'_, InMemoryBackendError> {
if !self
.0
.0
.config
.entity_types()
.contains(EntityType::CHANNEL_VOICE)
{
return future::ok(()).boxed();
}
(self.0).0.channels_voice.insert(entity.id(), entity);
future::ok(()).boxed()
}
}
impl VoiceChannelRepository<InMemoryBackend> for InMemoryVoiceChannelRepository {
fn guild(
&self,
channel_id: ChannelId,
) -> GetEntityFuture<'_, GuildEntity, InMemoryBackendError> {
let guild = self
.0
.0
.channels_voice
.get(&channel_id)
.and_then(|channel| channel.guild_id)
.and_then(|id| (self.0).0.guilds.get(&id))
.map(|r| r.value().clone());
future::ok(guild).boxed()
}
fn parent(
&self,
channel_id: ChannelId,
) -> GetEntityFuture<'_, CategoryChannelEntity, InMemoryBackendError> {
let parent = self
.0
.0
.channels_voice
.get(&channel_id)
.and_then(|channel| channel.parent_id)
.and_then(|id| (self.0).0.channels_category.get(&id))
.map(|r| r.value().clone());
future::ok(parent).boxed()
}
}
impl InMemoryVoiceChannelRepository {
/// Retrieve the guild of a voice channel.
///
/// # Examples
///
/// ```no_run
/// use rarity_cache_inmemory::InMemoryCache;
/// use twilight_model::id::ChannelId;
///
/// # #[tokio::main] async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// let cache = InMemoryCache::new();
///
/// if let Some(guild) = cache.voice_channels.guild(ChannelId(1)).await? {
/// println!("the guild's name is {}", guild.name);
/// }
/// # Ok(()) }
/// ```
pub fn guild(
&self,
channel_id: ChannelId,
) -> GetEntityFuture<'_, GuildEntity, InMemoryBackendError> {
VoiceChannelRepository::guild(self, channel_id)
}
/// Retrieve the parent category channel of a voice channel.
///
/// # Examples
///
/// ```no_run
/// use rarity_cache_inmemory::InMemoryCache;
/// use twilight_model::id::ChannelId;
///
/// # #[tokio::main] async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// let cache = InMemoryCache::new();
///
/// if let Some(channel) = cache.voice_channels.parent(ChannelId(1)).await? {
/// println!("the parent category channel's name is {}", channel.name);
/// }
/// # Ok(()) }
/// ```
pub fn parent(
&self,
channel_id: ChannelId,
) -> GetEntityFuture<'_, CategoryChannelEntity, InMemoryBackendError> {
VoiceChannelRepository::parent(self, channel_id)
}
}
#[cfg(test)]
mod tests {
use super::{
InMemoryBackend, InMemoryVoiceChannelRepository, Repository, VoiceChannelEntity,
VoiceChannelRepository,
};
use static_assertions::{assert_impl_all, assert_obj_safe};
use std::fmt::Debug;
assert_impl_all!(
InMemoryVoiceChannelRepository:
VoiceChannelRepository<InMemoryBackend>,
Clone,
Debug,
Repository<VoiceChannelEntity, InMemoryBackend>,
Send,
Sync,
);
assert_obj_safe!(InMemoryVoiceChannelRepository);
}
| 28.461111 | 98 | 0.575444 |
f7b407398840576534fb3aa8fa26ef398a440655 | 5,848 | use std::env;
use serenity::{
async_trait,
model::{
gateway::Ready,
id::GuildId,
interactions::{
application_command::{
ApplicationCommand,
ApplicationCommandInteractionDataOptionValue,
ApplicationCommandOptionType,
},
Interaction,
InteractionResponseType,
},
},
prelude::*,
};
struct Handler;
#[async_trait]
impl EventHandler for Handler {
async fn interaction_create(&self, ctx: Context, interaction: Interaction) {
if let Interaction::ApplicationCommand(command) = interaction {
let content = match command.data.name.as_str() {
"ping" => "Hey, I'm alive!".to_string(),
"id" => {
let options = command
.data
.options
.get(0)
.expect("Expected user option")
.resolved
.as_ref()
.expect("Expected user object");
if let ApplicationCommandInteractionDataOptionValue::User(user, _member) =
options
{
format!("{}'s id is {}", user.tag(), user.id)
} else {
"Please provide a valid user".to_string()
}
},
_ => "not implemented :(".to_string(),
};
if let Err(why) = command
.create_interaction_response(&ctx.http, |response| {
response
.kind(InteractionResponseType::ChannelMessageWithSource)
.interaction_response_data(|message| message.content(content))
})
.await
{
println!("Cannot respond to slash command: {}", why);
}
}
}
async fn ready(&self, ctx: Context, ready: Ready) {
println!("{} is connected!", ready.user.name);
let commands = ApplicationCommand::set_global_application_commands(&ctx.http, |commands| {
commands
.create_application_command(|command| {
command.name("ping").description("A ping command")
})
.create_application_command(|command| {
command.name("id").description("Get a user id").create_option(|option| {
option
.name("id")
.description("The user to lookup")
.kind(ApplicationCommandOptionType::User)
.required(true)
})
})
.create_application_command(|command| {
command
.name("welcome")
.description("Welcome a user")
.create_option(|option| {
option
.name("user")
.description("The user to welcome")
.kind(ApplicationCommandOptionType::User)
.required(true)
})
.create_option(|option| {
option
.name("message")
.description("The message to send")
.kind(ApplicationCommandOptionType::String)
.required(true)
.add_string_choice(
"Welcome to our cool server! Ask me if you need help",
"pizza",
)
.add_string_choice("Hey, do you want a coffee?", "coffee")
.add_string_choice(
"Welcome to the club, you're now a good person. Well, I hope.",
"club",
)
.add_string_choice(
"I hope that you brought a controller to play together!",
"game",
)
})
})
})
.await;
println!("I now have the following global slash commands: {:#?}", commands);
let guild_command = GuildId(123456789)
.create_application_command(&ctx.http, |command| {
command.name("wonderful_command").description("An amazing command")
})
.await;
println!("I created the following guild command: {:#?}", guild_command);
}
}
#[tokio::main]
async fn main() {
// Configure the client with your Discord bot token in the environment.
let token = env::var("DISCORD_TOKEN").expect("Expected a token in the environment");
// The Application Id is usually the Bot User Id.
let application_id: u64 = env::var("APPLICATION_ID")
.expect("Expected an application id in the environment")
.parse()
.expect("application id is not a valid id");
// Build our client.
let mut client = Client::builder(token)
.event_handler(Handler)
.application_id(application_id)
.await
.expect("Error creating client");
// Finally, start a single shard, and start listening to events.
//
// Shards will automatically attempt to reconnect, and will perform
// exponential backoff until it reconnects.
if let Err(why) = client.start().await {
println!("Client error: {:?}", why);
}
}
| 38.222222 | 99 | 0.458105 |
48cce8c410867fad585670e15ad8da5840e36fd9 | 6,360 | /*!
`pubsys` simplifies the process of publishing Bottlerocket updates.
Currently implemented:
* building repos, whether starting from an existing repo or from scratch
* validating repos by loading them and retrieving their targets
* checking for repository metadata expirations within specified number of days
* refreshing and re-signing repos' non-root metadata files
* registering and copying EC2 AMIs
* Marking EC2 AMIs public (or private again)
* setting SSM parameters based on built AMIs
* promoting SSM parameters from versioned entries to named (e.g. 'latest')
To be implemented:
* high-level document describing pubsys usage with examples
Configuration comes from:
* command-line parameters, to specify basic options and paths to the below files
* Infra.toml, for repo and AMI configuration
* Release.toml, for migrations
* Policy files for repo metadata expiration and update wave timing
*/
#![deny(rust_2018_idioms)]
mod aws;
mod repo;
mod vmware;
use semver::Version;
use simplelog::{Config as LogConfig, LevelFilter, SimpleLogger};
use snafu::ResultExt;
use std::path::PathBuf;
use std::process;
use structopt::StructOpt;
use tokio::runtime::Runtime;
fn run() -> Result<()> {
// Parse and store the args passed to the program
let args = Args::from_args();
// SimpleLogger will send errors to stderr and anything less to stdout.
SimpleLogger::init(args.log_level, LogConfig::default()).context(error::Logger)?;
match args.subcommand {
SubCommand::Repo(ref repo_args) => repo::run(&args, &repo_args).context(error::Repo),
SubCommand::ValidateRepo(ref validate_repo_args) => {
repo::validate_repo::run(&args, &validate_repo_args).context(error::ValidateRepo)
}
SubCommand::CheckRepoExpirations(ref check_expirations_args) => {
repo::check_expirations::run(&args, &check_expirations_args)
.context(error::CheckExpirations)
}
SubCommand::RefreshRepo(ref refresh_repo_args) => {
repo::refresh_repo::run(&args, &refresh_repo_args).context(error::RefreshRepo)
}
SubCommand::Ami(ref ami_args) => {
let rt = Runtime::new().context(error::Runtime)?;
rt.block_on(async { aws::ami::run(&args, &ami_args).await.context(error::Ami) })
}
SubCommand::PublishAmi(ref publish_args) => {
let rt = Runtime::new().context(error::Runtime)?;
rt.block_on(async {
aws::publish_ami::run(&args, &publish_args)
.await
.context(error::PublishAmi)
})
}
SubCommand::Ssm(ref ssm_args) => {
let rt = Runtime::new().context(error::Runtime)?;
rt.block_on(async { aws::ssm::run(&args, &ssm_args).await.context(error::Ssm) })
}
SubCommand::PromoteSsm(ref promote_args) => {
let rt = Runtime::new().context(error::Runtime)?;
rt.block_on(async {
aws::promote_ssm::run(&args, &promote_args)
.await
.context(error::PromoteSsm)
})
}
SubCommand::UploadOva(ref upload_args) => {
vmware::upload_ova::run(&args, &upload_args).context(error::UploadOva)
}
}
}
fn main() {
if let Err(e) = run() {
eprintln!("{}", e);
process::exit(1);
}
}
/// Automates publishing of Bottlerocket updates
#[derive(Debug, StructOpt)]
#[structopt(setting = clap::AppSettings::DeriveDisplayOrder)]
struct Args {
#[structopt(global = true, long, default_value = "INFO")]
/// How much detail to log; from least to most: ERROR, WARN, INFO, DEBUG, TRACE
log_level: LevelFilter,
#[structopt(long, parse(from_os_str))]
/// Path to Infra.toml (NOTE: must be specified before subcommand)
infra_config_path: PathBuf,
#[structopt(subcommand)]
subcommand: SubCommand,
}
#[derive(Debug, StructOpt)]
enum SubCommand {
Repo(repo::RepoArgs),
ValidateRepo(repo::validate_repo::ValidateRepoArgs),
CheckRepoExpirations(repo::check_expirations::CheckExpirationsArgs),
RefreshRepo(repo::refresh_repo::RefreshRepoArgs),
Ami(aws::ami::AmiArgs),
PublishAmi(aws::publish_ami::PublishArgs),
Ssm(aws::ssm::SsmArgs),
PromoteSsm(aws::promote_ssm::PromoteArgs),
UploadOva(vmware::upload_ova::UploadArgs),
}
/// Parses a SemVer, stripping a leading 'v' if present
pub(crate) fn friendly_version(
mut version_str: &str,
) -> std::result::Result<Version, semver::Error> {
if version_str.starts_with('v') {
version_str = &version_str[1..];
};
Version::parse(version_str)
}
mod error {
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility = "pub(super)")]
pub(super) enum Error {
#[snafu(display("Failed to build AMI: {}", source))]
Ami { source: crate::aws::ami::Error },
#[snafu(display("Logger setup error: {}", source))]
Logger { source: log::SetLoggerError },
#[snafu(display("Failed to publish AMI: {}", source))]
PublishAmi {
source: crate::aws::publish_ami::Error,
},
#[snafu(display("Failed to promote SSM: {}", source))]
PromoteSsm {
source: crate::aws::promote_ssm::Error,
},
#[snafu(display("Failed to build repo: {}", source))]
Repo { source: crate::repo::Error },
#[snafu(display("Failed to validate repository: {}", source))]
ValidateRepo {
source: crate::repo::validate_repo::Error,
},
#[snafu(display("Check expirations error: {}", source))]
CheckExpirations {
source: crate::repo::check_expirations::Error,
},
#[snafu(display("Failed to refresh repository metadata: {}", source))]
RefreshRepo {
source: crate::repo::refresh_repo::Error,
},
#[snafu(display("Failed to create async runtime: {}", source))]
Runtime { source: std::io::Error },
#[snafu(display("Failed to update SSM: {}", source))]
Ssm { source: crate::aws::ssm::Error },
#[snafu(display("Failed to upload OVA: {}", source))]
UploadOva {
source: crate::vmware::upload_ova::Error,
},
}
}
type Result<T> = std::result::Result<T, error::Error>;
| 33.473684 | 93 | 0.630031 |
4b558d7fe8f15d4a33aee41b00ed2250e18aedd1 | 285 | // xfail-test #2587
// error-pattern: copying a noncopyable value
struct r {
let i:int;
new(i:int) {self.i = i;}
drop {}
}
fn main() {
// This can't make sense as it would copy the classes
let i <- ~[r(0)];
let j <- ~[r(1)];
let k = i + j;
log(debug, j);
}
| 16.764706 | 57 | 0.536842 |
4a6d45a77dbe6715613db0a09a3941060764463b | 1,485 |
extern crate sysinfo;
use sysinfo::SystemExt;
use sysinfo::ProcessExt;
use std::cmp::Ordering;
use std::{thread, time};
use std::collections::HashMap;
struct Proc {
p : sysinfo::Process,
id : sysinfo::Pid
}
impl Ord for Proc {
fn cmp(&self, other: &Proc) -> Ordering {
self.id.cmp(&other.id)
}
}
impl PartialOrd for Proc {
fn partial_cmp(&self, other: &Proc) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for Proc {
fn eq(&self, other: &Proc) -> bool {
self.id == other.id
}
}
impl Eq for Proc {}
fn diff(old: &HashMap<sysinfo::Pid, sysinfo::Process>, new: &HashMap<sysinfo::Pid, sysinfo::Process>)
{
for (pid, p) in old {
if !new.contains_key(pid) {
println!("{}:{} stopped", pid, p.name());
}
}
for (pid, p) in new {
if !old.contains_key(pid) {
println!("{}:{} started", pid, p.name());
}
}
}
fn main()
{
let mut system = sysinfo::System::new();
system.refresh_all();
let mut plist : Vec<Proc> = vec![];
for (pid, proc_) in system.get_process_list() {
plist.push(Proc { p : proc_.clone(), id : *pid });
}
plist.sort();
for p in plist {
println!("{}:{} => status: {:?}", p.id, p.p.name(), p.p.status());
}
loop {
let mut system2 = sysinfo::System::new();
system2.refresh_all();
diff(system.get_process_list(),system2.get_process_list());
system = system2;
thread::sleep(time::Duration::from_millis(1000));
println!(".");
}
}
| 18.333333 | 101 | 0.590572 |
6737c3d851266141e9b4e80d4de82f627c4dfa10 | 5,371 | #[cfg(test)]
mod tests {
use atomic_refcell::AtomicRefCell;
use itertools::Itertools;
use rand::{thread_rng, Rng};
use segment::entry::entry_point::SegmentEntry;
use segment::fixtures::payload_fixtures::{random_int_payload, random_vector};
use segment::index::hnsw_index::hnsw::HNSWIndex;
use segment::index::struct_payload_index::StructPayloadIndex;
use segment::index::{PayloadIndex, VectorIndex};
use segment::segment_constructor::build_segment;
use segment::types::{
Condition, Distance, FieldCondition, Filter, HnswConfig, Indexes, PayloadIndexType,
PayloadKeyType, PayloadType, Range, SearchParams, SegmentConfig, SeqNumberType,
StorageType, TheMap,
};
use std::sync::atomic::AtomicBool;
use std::sync::Arc;
use tempdir::TempDir;
#[test]
fn test_filterable_hnsw() {
let stopped = AtomicBool::new(false);
let dim = 8;
let m = 8;
let num_vectors: u64 = 5_000;
let ef = 32;
let ef_construct = 16;
let distance = Distance::Cosine;
let indexing_threshold = 500;
let num_payload_values = 2;
let mut rnd = thread_rng();
let dir = TempDir::new("segment_dir").unwrap();
let payload_index_dir = TempDir::new("payload_index_dir").unwrap();
let hnsw_dir = TempDir::new("hnsw_dir").unwrap();
let config = SegmentConfig {
vector_size: dim,
index: Indexes::Plain {},
payload_index: Some(PayloadIndexType::Plain),
storage_type: StorageType::InMemory,
distance,
};
let int_key = "int".to_string();
let mut segment = build_segment(dir.path(), &config).unwrap();
for n in 0..num_vectors {
let idx = n.into();
let vector = random_vector(&mut rnd, dim);
let mut payload: TheMap<PayloadKeyType, PayloadType> = Default::default();
payload.insert(
int_key.clone(),
random_int_payload(&mut rnd, num_payload_values),
);
segment
.upsert_point(n as SeqNumberType, idx, &vector)
.unwrap();
segment
.set_full_payload(n as SeqNumberType, idx, payload.clone())
.unwrap();
}
// let opnum = num_vectors + 1;
let payload_index = StructPayloadIndex::open(
segment.condition_checker.clone(),
segment.vector_storage.clone(),
segment.payload_storage.clone(),
segment.id_tracker.clone(),
payload_index_dir.path(),
)
.unwrap();
let payload_index_ptr = Arc::new(AtomicRefCell::new(payload_index));
let hnsw_config = HnswConfig {
m,
ef_construct,
full_scan_threshold: indexing_threshold,
};
let mut hnsw_index = HNSWIndex::open(
hnsw_dir.path(),
segment.condition_checker.clone(),
segment.vector_storage.clone(),
payload_index_ptr.clone(),
hnsw_config,
)
.unwrap();
hnsw_index.build_index(&stopped).unwrap();
payload_index_ptr
.borrow_mut()
.set_indexed(&int_key)
.unwrap();
let borrowed_payload_index = payload_index_ptr.borrow();
let blocks = borrowed_payload_index
.payload_blocks(&int_key, indexing_threshold)
.collect_vec();
for block in blocks.iter() {
assert!(
block.condition.range.is_some(),
"only range conditions should be generated for this type of payload"
);
}
assert_eq!(blocks.len(), num_vectors as usize / indexing_threshold * 2);
hnsw_index.build_index(&stopped).unwrap();
let top = 3;
let mut hits = 0;
let attempts = 100;
for _i in 0..attempts {
let query = random_vector(&mut rnd, dim);
let range_size = 40;
let left_range = rnd.gen_range(0..400);
let right_range = left_range + range_size;
let filter = Filter::new_must(Condition::Field(FieldCondition {
key: int_key.clone(),
r#match: None,
range: Some(Range {
lt: None,
gt: None,
gte: Some(left_range as f64),
lte: Some(right_range as f64),
}),
geo_bounding_box: None,
geo_radius: None,
}));
let filter_query = Some(&filter);
// let filter_query = None;
let index_result = hnsw_index.search_with_graph(
&query,
filter_query,
top,
Some(&SearchParams { hnsw_ef: Some(ef) }),
);
let plain_result =
segment
.vector_index
.borrow()
.search(&query, filter_query, top, None);
if plain_result == index_result {
hits += 1;
}
}
assert!(attempts - hits < 5, "hits: {} of {}", hits, attempts); // Not more than 5% failures
eprintln!("hits = {:#?} out of {}", hits, attempts);
}
}
| 32.95092 | 100 | 0.544405 |
3a2909cf266a185bed2923857d6809680f8a436e | 3,464 | use serde::Deserialize;
#[derive(Deserialize, Debug, Default)]
pub struct Card {
pub name: String,
pub mana_cost: String,
pub type_line: String,
pub power: Option<String>,
pub toughness: Option<String>,
pub oracle_text: String,
pub prints_search_uri: String,
pub prices: Prices,
pub set_name: String,
pub loyalty: Option<String>,
}
#[derive(Deserialize, Debug, Default)]
pub struct Cards {
data: Vec<Card>,
}
#[derive(Deserialize, Debug, Default)]
pub struct Prices {
usd: Option<String>,
usd_foil: Option<String>,
eur: Option<String>,
eur_foil: Option<String>,
}
impl Card {
pub fn pretty_print(&self) {
println!("",);
print!("{}", self.name);
print!(" {}", self.mana_cost);
println!("",);
println!("",);
println!("",);
println!("{}", self.type_line);
println!("",);
println!("{}", self.oracle_text);
println!("",);
match self {
Card {
power: Some(p),
toughness: Some(t),
..
} => {
println!("{} / {}", p, t);
}
Card {
power: Some(p),
toughness: None,
..
} => {
println!("{} / N/A", p);
}
Card {
power: None,
toughness: Some(t),
..
} => {
println!("N/A / {}", t);
}
Card {
loyalty: Some(l), ..
} => {
println!("Loyalty: {}", l);
}
_ => {
println!("",);
}
}
}
#[tokio::main]
pub async fn get_sets(&mut self) -> Result<Cards, Box<dyn std::error::Error + 'static>> {
let response = reqwest::get(&self.prints_search_uri).await?;
let cards = response.json::<Cards>().await?;
Ok(cards)
}
}
pub fn print_prices(cards: Cards, currency: String) {
// TODO: Fix ugly code copy
for card in cards.data.iter() {
println!("{}", card.set_name);
if currency == "usd".to_string() {
match &card.prices {
Prices {
usd: Some(u),
usd_foil: Some(uf),
..
} => println!(" {} / {}", u, uf),
Prices {
usd: None,
usd_foil: Some(uf),
..
} => println!(" N/A / {}", uf),
Prices {
usd: Some(u),
usd_foil: None,
..
} => println!(" {} / N/A", u),
_ => println!(" N/A / N/A",),
}
} else {
match &card.prices {
Prices {
eur: Some(u),
eur_foil: Some(uf),
..
} => println!(" {} / {}", u, uf),
Prices {
eur: None,
eur_foil: Some(uf),
..
} => println!(" N/A / {}", uf),
Prices {
eur: Some(u),
eur_foil: None,
..
} => println!(" {} / N/A", u),
_ => println!(" N/A / N/A",),
}
}
}
}
| 26.852713 | 93 | 0.37067 |
db1de9c364e8aa90b8031b7bca1f58f211a1cb04 | 1,635 | #[macro_export]
macro_rules! cfg_kinds {
(if flat in [flat $(, $kind:ident)*] {$($a:item)*} $(else {$($b:item)*})?) => { $($a)* };
(if serde in [serde $(, $kind:ident)*] {$($a:item)*} $(else {$($b:item)*})?) => { $($a)* };
(if $x:ident in [] {$($a:tt)*} $(else {$($b:item)*})?) => { $($($b)*)? };
(if $x:ident in [$other:ident $(, $kind:ident)*] {$($a:item)*} $(else {$($b:item)*})?) => {
cfg_kinds!(if $x in [$($kind),*] { $($a)* } $(else { $($b)* })?)
};
}
macro_rules! type_tag {
(
type $ty:tt $(<$lt:tt>)*,
name $name:literal,
kinds $kinds:tt
) => {{
use $crate::reexport::lazy_static::lazy_static;
use $crate::tag::FlatTypeTag;
use $crate::tag::TypeTag;
use $crate::tag::HasFlatTypeTag;
use $crate::tag::HasTypeTag;
use ::std::marker::PhantomData;
$crate::cfg_kinds!{
if flat in $kinds {
static FLAT_TAG: FlatTypeTag = FlatTypeTag::new::<$ty>();
unsafe impl $(<$lt>)? HasFlatTypeTag for $ty $(<$lt>)? {
fn flat_type_tag() -> &'static FlatTypeTag{ &FLAT_TAG }
}
static FLAT_TAG_OPTION: Option<FlatTypeTag> = Some(FLAT_TAG);
} else {
static FLAT_TAG_OPTION: Option<FlatTypeTag> = None;
}
};
lazy_static! {
static ref TYPE_TAG: TypeTag = TypeTag::new::<$ty>($name, FLAT_TAG_OPTION);
}
impl $(<$lt>)* HasTypeTag for $ty $(<$lt>)*{
fn type_tag() -> &'static TypeTag { &TYPE_TAG }
}
PhantomData::<$ty>
}}
} | 38.023256 | 95 | 0.47156 |
6142cdb7a2715ff638ffae388dfc251f3d2d0c42 | 1,943 | #![doc = "generated by AutoRust"]
#![allow(non_camel_case_types)]
#![allow(unused_imports)]
use serde::{Deserialize, Serialize};
#[doc = "The response object containing the token for the client"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ClientTokenResponse {
#[doc = "The token value for the WebSocket client to connect to the service"]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub token: Option<String>,
}
impl ClientTokenResponse {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "The error object."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ErrorDetail {
#[doc = "One of a server-defined set of error codes."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[doc = "A human-readable representation of the error."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
#[doc = "The target of the error."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub target: Option<String>,
#[doc = "An array of details about specific errors that led to this reported error."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub details: Vec<ErrorDetail>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub inner: Option<InnerError>,
}
impl ErrorDetail {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct InnerError {
#[doc = "A more specific error code than was provided by the containing error."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub inner: Box<Option<InnerError>>,
}
impl InnerError {
pub fn new() -> Self {
Self::default()
}
}
| 36.660377 | 89 | 0.675759 |
671447d1d154bbd122273fcafa4e86b04f8d18bd | 11,449 | // Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use kernel::ReturnCode;
use LastCallback::*;
#[derive(Debug,PartialEq)]
enum LastCallback {
Uncalled,
InitializeDone(ReturnCode),
IncrementDone(ReturnCode),
}
impl core::default::Default for LastCallback {
fn default() -> LastCallback {
LastCallback::Uncalled
}
}
struct MockClient {
last_callback: core::cell::Cell<LastCallback>,
}
impl MockClient {
pub fn new() -> MockClient {
MockClient { last_callback: Default::default() }
}
pub fn take_last(&self) -> LastCallback {
self.last_callback.take()
}
}
impl h1::nvcounter::Client for MockClient {
fn initialize_done(&self, status: ReturnCode) {
self.last_callback.set(InitializeDone(status));
}
fn increment_done(&self, status: ReturnCode) {
self.last_callback.set(IncrementDone(status));
}
}
#[test]
fn test_capsule() -> bool {
use crate::fake_flash::{ErrorTime,FakeFlash};
use h1::hil::flash::flash::{Client,Flash};
use h1::nvcounter::{FlashCounter,NvCounter};
use h1::nvcounter::internal::{COUNTS_PER_PAGE,Page,WORDS_PER_PAGE};
use ReturnCode::{EBUSY,FAIL,SUCCESS,SuccessWithValue};
use test::{require,require_eq};
// Setup
let mut buffer = [0];
let flash = FakeFlash::new();
let nvcounter = FlashCounter::new(&mut buffer, &flash);
let client = MockClient::new();
nvcounter.set_client(&client);
// Flip some bits so that initialization doesn't finish immediately after
// step A1
let mut buffer = [0];
flash.write(Page::High as usize * WORDS_PER_PAGE + 100, &mut buffer);
// Try to initialize the counter but fail the first erase call.
flash.configure_error(Some(ErrorTime::Fast));
require!(nvcounter.initialize() == FAIL);
// Check to make sure it didn't mark the initialization as ongoing.
require!(nvcounter.initialize() == FAIL);
// Try to initialize again but make the first erase fail asynchronously.
flash.configure_error(Some(ErrorTime::Callback));
require!(nvcounter.initialize() == SUCCESS);
// Confirm it will reject concurrent requests.
require!(nvcounter.initialize() == EBUSY);
require!(nvcounter.read_and_increment() == EBUSY);
require!(client.take_last() == Uncalled);
nvcounter.erase_done(FAIL);
require!(client.take_last() == InitializeDone(FAIL));
// Complete step A1; make the start of step A2 fail.
flash.configure_error(None);
require!(nvcounter.initialize() == SUCCESS);
flash.configure_error(Some(ErrorTime::Fast));
require!(client.take_last() == Uncalled);
nvcounter.erase_done(SUCCESS);
require!(client.take_last() == InitializeDone(FAIL));
// Restart initialization, and make step A2 fail asynchronously.
flash.configure_error(None);
require!(nvcounter.initialize() == SUCCESS);
flash.configure_error(Some(ErrorTime::Callback));
nvcounter.erase_done(SUCCESS);
require!(client.take_last() == Uncalled);
nvcounter.erase_done(FAIL);
require!(client.take_last() == InitializeDone(FAIL));
// Successful initialization.
flash.configure_error(None);
require!(nvcounter.initialize() == SUCCESS);
require!(client.take_last() == Uncalled);
nvcounter.erase_done(SUCCESS);
require!(client.take_last() == Uncalled);
nvcounter.erase_done(SUCCESS);
require!(client.take_last() == InitializeDone(SUCCESS));
// Perform a successful read and increment.
require!(nvcounter.read_and_increment() == SuccessWithValue { value: 0 });
require!(client.take_last() == Uncalled);
let mut buffer = [0];
nvcounter.write_done(&mut buffer, SUCCESS);
require!(client.take_last() == IncrementDone(SUCCESS));
// Try to increment but make the initial write call fail.
flash.configure_error(Some(ErrorTime::Fast));
require!(nvcounter.read_and_increment() == FAIL);
require!(client.take_last() == Uncalled);
// Try to increment; fail the write call asynchronously.
flash.configure_error(Some(ErrorTime::Callback));
require!(nvcounter.read_and_increment() == SuccessWithValue { value: 1 });
require!(client.take_last() == Uncalled);
let mut buffer = [0];
nvcounter.write_done(&mut buffer, FAIL);
require!(client.take_last() == IncrementDone(FAIL));
// Adjust the flash state to be two ticks before low page rollover.
flash.configure_error(None);
let mut buffer = [0x0000003C];
flash.write(Page::Low as usize * WORDS_PER_PAGE + 511, &mut buffer);
// Increment. This should leave the flash in the state immediately before
// low page rollover.
require!(nvcounter.read_and_increment() ==
SuccessWithValue { value: COUNTS_PER_PAGE as usize - 1 });
// Confirm it will reject concurrent requests.
require!(nvcounter.initialize() == EBUSY);
require!(nvcounter.read_and_increment() == EBUSY);
require!(client.take_last() == Uncalled);
let mut buffer = [0];
nvcounter.write_done(&mut buffer, SUCCESS);
require!(client.take_last() == IncrementDone(SUCCESS));
// Start the rollover increment; fail the first high page increment (C1)
// immediately.
flash.configure_error(Some(ErrorTime::Fast));
require!(nvcounter.read_and_increment() == FAIL);
require!(client.take_last() == Uncalled);
// Start the rollover increment; fail the first high page increment (C1)
// asynchronously.
flash.configure_error(Some(ErrorTime::Callback));
require_eq!("C1 async FAIL", nvcounter.read_and_increment(),
SuccessWithValue { value: COUNTS_PER_PAGE as usize });
require!(client.take_last() == Uncalled);
let mut buffer = [0];
nvcounter.write_done(&mut buffer, FAIL);
require!(client.take_last() == IncrementDone(FAIL));
// Start the rollover increment; let the high page increment succeed but
// fail the low page erase quickly. This will commit the increment but not
// clean it up, so we should get a successful call.
flash.configure_error(None);
require_eq!("C2 async FAIL", nvcounter.read_and_increment(),
SuccessWithValue { value: COUNTS_PER_PAGE as usize });
require!(client.take_last() == Uncalled);
flash.configure_error(Some(ErrorTime::Fast));
let mut buffer = [0];
nvcounter.write_done(&mut buffer, SUCCESS);
require!(client.take_last() == IncrementDone(SUCCESS));
// Now the high page is odd and the low page is maxed out.
// Try another increment. Fail step C2 asynchronously.
flash.configure_error(Some(ErrorTime::Callback));
require!(nvcounter.read_and_increment() ==
SuccessWithValue { value: COUNTS_PER_PAGE as usize + 1 });
require!(client.take_last() == Uncalled);
nvcounter.erase_done(FAIL);
require!(client.take_last() == IncrementDone(FAIL));
// Try another increment, fail step C3 immediately.
flash.configure_error(None);
require_eq!("C3 fast FAIL", nvcounter.read_and_increment(),
SuccessWithValue { value: COUNTS_PER_PAGE as usize + 1 });
require!(client.take_last() == Uncalled);
flash.configure_error(Some(ErrorTime::Fast));
nvcounter.erase_done(SUCCESS);
require!(client.take_last() == IncrementDone(FAIL));
// Try to increment, fail step C3 asynchronously.
flash.configure_error(Some(ErrorTime::Callback));
require_eq!("C3 async FAIL", nvcounter.read_and_increment(),
SuccessWithValue { value: COUNTS_PER_PAGE as usize + 1 });
require!(client.take_last() == Uncalled);
let mut buffer = [0];
nvcounter.write_done(&mut buffer, FAIL);
require!(client.take_last() == IncrementDone(FAIL));
// Finish the rollover increment, and fail the next increment immediately.
flash.configure_error(None);
require_eq!("rollover1", nvcounter.read_and_increment(),
SuccessWithValue { value: COUNTS_PER_PAGE as usize + 1 });
require!(client.take_last() == Uncalled);
flash.configure_error(Some(ErrorTime::Fast));
let mut buffer = [0];
nvcounter.write_done(&mut buffer, SUCCESS);
require!(client.take_last() == IncrementDone(FAIL));
// Perform a successful increment.
flash.configure_error(None);
require_eq!("post-rollover", nvcounter.read_and_increment(),
SuccessWithValue { value: COUNTS_PER_PAGE as usize + 1 });
require!(client.take_last() == Uncalled);
let mut buffer = [0];
nvcounter.write_done(&mut buffer, SUCCESS);
require!(client.take_last() == IncrementDone(SUCCESS));
// Advance to the next low page rollover and perform an error-free rollover
// increment and cleanup.
let mut buffer = [0];
flash.write(Page::Low as usize * WORDS_PER_PAGE + 511, &mut buffer);
require_eq!("rollover2", nvcounter.read_and_increment(),
SuccessWithValue { value: 2 * COUNTS_PER_PAGE as usize + 1 });
require!(client.take_last() == Uncalled);
let mut buffer = [0];
nvcounter.write_done(&mut buffer, SUCCESS);
require!(client.take_last() == IncrementDone(SUCCESS));
nvcounter.erase_done(SUCCESS);
require!(client.take_last() == Uncalled);
let mut buffer = [0];
nvcounter.write_done(&mut buffer, SUCCESS);
require!(client.take_last() == Uncalled);
// Verify the value with another increment.
require_eq!("post-rollover2", nvcounter.read_and_increment(),
SuccessWithValue { value: 2 * COUNTS_PER_PAGE as usize + 2 });
require!(client.take_last() == Uncalled);
let mut buffer = [0];
nvcounter.write_done(&mut buffer, SUCCESS);
require!(client.take_last() == IncrementDone(SUCCESS));
// Advance to the next rollover again, and perform an error-free rollover
// increment with no delay before the next increment.
let mut buffer = [0];
flash.write(Page::Low as usize * WORDS_PER_PAGE + 511, &mut buffer);
require_eq!("rollover3", nvcounter.read_and_increment(),
SuccessWithValue { value: 3 * COUNTS_PER_PAGE as usize + 2 });
require!(client.take_last() == Uncalled);
let mut buffer = [0];
nvcounter.write_done(&mut buffer, SUCCESS);
require!(client.take_last() == IncrementDone(SUCCESS));
// Note: The erase should still be going on, so make FakeFlash return EBUSY.
flash.set_busy(true);
require_eq!("post-rollover3", nvcounter.read_and_increment(),
SuccessWithValue { value: 3 * COUNTS_PER_PAGE as usize + 3 });
flash.set_busy(false);
// Finish C2
nvcounter.erase_done(SUCCESS);
require!(client.take_last() == Uncalled);
// Finish C3
let mut buffer = [0];
nvcounter.write_done(&mut buffer, SUCCESS);
require!(client.take_last() == Uncalled);
// Finish B1
let mut buffer = [0];
nvcounter.write_done(&mut buffer, SUCCESS);
require!(client.take_last() == IncrementDone(SUCCESS));
true
}
| 40.743772 | 80 | 0.683903 |
abde24ec06ee2e8a137b3a15b146ede37e0ed097 | 5,070 | use arrayvec::ArrayVec;
use crate::{fs::BSIZE, ide::IDE, param::NBUF, sleeplock::SleepLockC, spinlock::SpinLockC};
#[repr(C)]
pub struct Buffer {
pub flags: i32,
pub dev: usize,
pub blockno: usize,
pub lock: SleepLockC,
refcnt: u32,
prev: *mut Self,
next: *mut Self,
pub qnext: *mut Self,
pub data: [u8; BSIZE],
}
impl Buffer {
pub const VALID: i32 = 0x2; // buffer has been read from disk
pub const DIRTY: i32 = 0x4; // buffer needs to be written to disk
pub const fn new(dev: usize, blockno: usize) -> Self {
Self {
flags: 0,
dev,
blockno,
lock: SleepLockC::new(),
refcnt: 1,
prev: core::ptr::null_mut(),
next: core::ptr::null_mut(),
qnext: core::ptr::null_mut(),
data: [0; BSIZE],
}
}
// Write b's contents to disk. Must be locked.
pub fn write(&mut self) {
if !self.lock.is_locked() {
panic!("Buffer::write");
}
self.flags |= Buffer::DIRTY;
unsafe {
IDE.as_mut().unwrap().rw(self);
}
}
pub fn release(&mut self, cache: &mut BufferCache) {
if !self.lock.is_locked() {
panic!("Buffer::release");
}
self.lock.release();
cache.release_buffer(self);
}
}
// Buffer cache.
//
// The buffer cache is a linked list of buf structures holding
// cached copies of disk block contents. Caching disk blocks
// in memory reduces the number of disk reads and also provides
// a synchronization point for disk blocks used by multiple processes.
//
// Interface:
// * To get a buffer for a particular disk block, call bread.
// * After changing buffer data, call bwrite to write it to disk.
// * When done with the buffer, call brelse.
// * Do not use the buffer after calling brelse.
// * Only one process at a time can use a buffer,
// so do not keep them longer than necessary.
//
// The implementation uses two state flags internally:
// * B_VALID: the buffer data has been read from the disk.
// * B_DIRTY: the buffer data has been modified
// and needs to be written to disk.
pub struct BufferCache {
lock: SpinLockC,
buffers: ArrayVec<Buffer, NBUF>,
}
impl BufferCache {
pub const fn new() -> Self {
Self {
lock: SpinLockC::new(),
buffers: ArrayVec::new_const(),
}
}
pub fn push(&mut self, dev: usize, blockno: usize) -> usize {
let mut buf = Buffer::new(dev, blockno);
buf.lock.acquire();
if self.buffers.try_push(buf).is_err() {
panic!("BufferCache::get: no buffer");
}
self.buffers.len() - 1
}
// Look through buffer cache for block on device dev.
// If not found, allocate a buffer.
// In either case, return locked buffer.
pub fn get(&mut self, dev: usize, blockno: usize) -> &mut Buffer {
self.lock.acquire();
self.buffers
.retain(|buf| buf.refcnt != 0 || buf.flags & Buffer::DIRTY != 0);
let at = self
.buffers
.iter_mut()
.position(|buf| buf.dev == dev && buf.blockno == blockno);
// Is the block already cached?
if let Some(at) = at {
let buf = &mut self.buffers[at];
buf.refcnt += 1;
self.lock.release();
buf.lock.acquire();
buf
} else {
// Not cached; recycle an unused buffer.
// Even if refcnt==0, B_DIRTY indicates a buffer is in use
// because log.c has modified it but not yet committed it.
let at = self.push(dev, blockno);
self.lock.release();
&mut self.buffers[at]
}
}
// Return a locked buf with the contents of the indicated block.
pub fn read(&mut self, dev: usize, blockno: usize) -> &mut Buffer {
let buf = self.get(dev, blockno);
if buf.flags & Buffer::VALID == 0 {
unsafe {
IDE.as_mut().unwrap().rw(buf);
}
}
buf
}
pub fn release_buffer(&mut self, buf: &mut Buffer) {
self.lock.acquire();
buf.refcnt -= 1;
if buf.refcnt == 0 && buf.flags & Buffer::DIRTY == 0 {
let index = self
.buffers
.iter()
.position(|v| v.dev == buf.dev && v.blockno == buf.blockno)
.unwrap();
self.buffers.swap_remove(index);
}
self.lock.release();
}
}
mod _binding {
use super::*;
static mut CACHE: BufferCache = BufferCache::new();
#[no_mangle]
extern "C" fn bread(dev: u32, blockno: u32) -> *mut Buffer {
unsafe { CACHE.read(dev as usize, blockno as usize) }
}
#[no_mangle]
extern "C" fn bwrite(b: *mut Buffer) {
unsafe {
(*b).write();
}
}
#[no_mangle]
extern "C" fn brelse(b: *mut Buffer) {
unsafe {
(*b).release(&mut CACHE);
}
}
}
| 27.258065 | 90 | 0.544181 |
620154545dfe5f36d62af77f3c2f3cff25d98854 | 1,226 | use crate::{
agents::repo,
objects::{Item, JsError},
};
use web_sys::{IdbDatabase, IdbTransactionMode};
pub struct UpdateItemTask {
item: Item,
}
impl UpdateItemTask {
pub fn new_with_item(item: Item) -> Self {
Self { item }
}
}
impl repo::RepositoryTask for UpdateItemTask {
fn get_request(&mut self, db: &IdbDatabase) -> Result<Vec<web_sys::IdbRequest>, JsError> {
let transaction = db.transaction_with_str_sequence_and_mode(
&serde_wasm_bindgen::to_value(&vec!["items"])?,
IdbTransactionMode::Readwrite,
)?;
let item_os = transaction.object_store("items")?;
item_os.put_with_key(
&serde_wasm_bindgen::to_value(&self.item)?,
&serde_wasm_bindgen::to_value(&self.item.get_id())?,
)?;
Ok(vec![item_os.get(&serde_wasm_bindgen::to_value(
&self.item.get_id(),
)?)?])
}
fn set_response(
&mut self,
result: Result<wasm_bindgen::JsValue, wasm_bindgen::JsValue>,
) -> Result<Option<repo::Response>, JsError> {
// Ok(Some(repo::Response::UpdateItem(
// serde_wasm_bindgen::from_value(result?)?,
// )))
Ok(None)
}
}
| 28.511628 | 94 | 0.601958 |
69983e9dc1ea41687bf90a47774abf2404f7157f | 2,510 | struct Solution {}
impl Solution {
pub fn full_justify(mut words: Vec<String>, max_width: i32) -> Vec<String> {
let mut lines = vec![];
let first_word = words.remove(0);
let mut line_buffer = vec![first_word.clone()];
let mut line_size = first_word.len();
for word in words {
if word.len() + 1 + line_size > max_width as usize {
if line_buffer.len() == 1 {
let padding = max_width as usize - line_size;
let space_padding = " ".repeat(padding);
let first_word = line_buffer.remove(0);
lines.push(format!("{}{}", first_word, space_padding));
line_buffer = vec![word.clone()];
line_size = word.len();
continue;
}
let rest = max_width as usize - line_size;
let padding = rest / (line_buffer.len() - 1);
let mut leftover_padding = rest % (line_buffer.len() - 1);
let even_space = " ".repeat(padding);
let first_word = line_buffer.remove(0);
let line = line_buffer.into_iter().fold(first_word, |line, word| {
let uneven_space = if leftover_padding > 0 { " " } else { "" };
if leftover_padding > 0 {
leftover_padding -= 1;
}
format!("{} {}{}{}", line, even_space, uneven_space, word)
});
lines.push(line);
line_buffer = vec![word.clone()];
line_size = word.len();
} else {
line_size += 1 + word.len();
line_buffer.push(word);
}
}
if line_buffer.len() > 0 {
let padding = max_width as usize - line_size;
let space_padding = " ".repeat(padding);
let mut line = line_buffer.join(" ");
line.push_str(&space_padding);
lines.push(line);
}
lines
}
}
fn main() {
let input = vec![
"This".to_string(), "is".to_string(), "an".to_string(),
"example".to_string(), "of".to_string(), "text".to_string(),
"justification.".to_string()
];
let output = vec![
"This is an".to_string(),
"example of text".to_string(),
"justification. ".to_string()
];
assert_eq!(output, Solution::full_justify(input, 16));
}
| 35.352113 | 83 | 0.484861 |
e4ef85149709edce8fe2a972ca7831bdcbf2254b | 6,552 | // Copyright (c) 2021, COSIC-KU Leuven, Kasteelpark Arenberg 10, bus 2452, B-3001 Leuven-Heverlee, Belgium.
// Copyright (c) 2021, Cosmian Tech SAS, 53-55 rue La Boétie, Paris, France.
#![warn(clippy::all)]
#![allow(clippy::trivial_regex)]
#![deny(rust_2018_idioms)]
use parse_display::{Display, FromStr};
use scasm::asm::Body;
use scasm::Compiler;
use std::io::Read;
use std::io::Write;
use std::path::PathBuf;
use structopt::StructOpt;
#[derive(Debug)]
enum Error {
Io(std::io::Error),
AlreadyReported,
Raw(String),
}
impl From<std::io::Error> for Error {
fn from(err: std::io::Error) -> Self {
Error::Io(err)
}
}
impl From<scasm::ErrorReported> for Error {
fn from(_: scasm::ErrorReported) -> Self {
Error::AlreadyReported
}
}
impl From<String> for Error {
fn from(s: String) -> Self {
Error::Raw(s)
}
}
impl<'a> From<&'a str> for Error {
fn from(s: &'a str) -> Self {
Error::Raw(s.to_string())
}
}
#[derive(StructOpt)]
struct Args {
/// Do not show warnings, only errors.
#[structopt(long)]
hide_warnings: bool,
/// Optimization level, level 0 means no optimizations.
#[structopt(short = "O", long, default_value = "3")]
optimization_level: u8,
/// Optimizations to run. Overrides `--optimization-level` and just sets the list of optimizations that are run.
/// If empty, dumps the list of optimizations available
#[structopt(long)]
optimizations: Option<Vec<String>>,
/// The input format, either `assembly` or `bytecode`. If not specified, it is automatically chosen from the input file extension.
#[structopt(long)]
input_format: Option<InputFormat>,
/// The output format, either `assembly`, `graphviz` or `bytecode`. If not specified, it is automatically chosen from the output file extension.
#[structopt(long)]
output_format: Option<OutputFormat>,
/// Dump the asm after each optimization step to the given directory.
#[structopt(long, parse(from_os_str))]
dump_optimizations: Option<PathBuf>,
/// The path to the file to process.
/// The input format is figured out from the file extension and can be either
/// `.asm` or `.bc`
/// If this is missing, the input is read from `stdin` and optimized assembly is output to `stdout`
#[structopt(parse(from_os_str))]
input: Option<PathBuf>,
/// The path to write the output to.
/// The output format is figured out from the extension and can be one of
/// * `.asm` (scasm assembly)
/// * `.bc` (bytecode)
/// * `.dot` (graphviz dot file showing the control flow graph of the input)
///
/// If this is missing, the output is written to `stdout` in scale assembly format
#[structopt(parse(from_os_str))]
output: Option<PathBuf>,
}
#[derive(Display, FromStr, PartialEq, Debug)]
#[display(style = "snake_case")]
enum InputFormat {
Assembly,
Bytecode,
}
#[derive(Display, FromStr, PartialEq, Debug)]
#[display(style = "snake_case")]
enum OutputFormat {
Assembly,
Bytecode,
Graphviz,
}
#[paw::main]
fn main(args: Args) -> Result<(), Error> {
scasm::init_logger()?;
if let Some(opts) = &args.optimizations {
if opts.is_empty() {
scasm::transforms::list_optimizations();
return Ok(());
}
}
let mut cx = scasm::Compiler::stderr();
cx.show_warnings = !args.hide_warnings;
cx.optimization_level = args.optimization_level;
let input_format = match args.input_format {
Some(f) => f,
None => match &args.input {
Some(file) => {
let ext = file
.extension()
.ok_or("cannot write to file without file extension")?;
match ext.to_str() {
Some("asm") => InputFormat::Assembly,
Some("bc") => InputFormat::Bytecode,
_ => return Err(format!("unknown file extension {:?}", ext).into()),
}
}
None => InputFormat::Assembly,
},
};
let mut parsed = if let Some(file) = args.input {
let f = std::fs::File::open(&file)?;
read_input(file, f, &cx, input_format)
} else {
read_input(
PathBuf::from("<stdin>"),
std::io::stdin(),
&cx,
input_format,
)
};
cx.check_for_errors()?;
if let Some(opts) = args.optimizations {
scasm::transforms::run_optimizations(&cx, &mut parsed, args.dump_optimizations, &opts)
} else {
scasm::transforms::apply_default_optimization_pipeline(
&cx,
&mut parsed,
args.dump_optimizations,
);
}
cx.check_for_errors()?;
let output_format = match args.output_format {
Some(of) => of,
None => match &args.output {
Some(file) => {
let ext = file
.extension()
.ok_or("cannot write to file without file extension")?;
match ext.to_str() {
Some("asm") => OutputFormat::Assembly,
Some("bc") => OutputFormat::Bytecode,
Some("dot") => OutputFormat::Graphviz,
_ => return Err(format!("unknown file extension {:?}", ext).into()),
}
}
None => OutputFormat::Assembly,
},
};
if let Some(file) = args.output {
write_output(std::fs::File::create(file)?, &cx, parsed, output_format)?;
} else {
write_output(std::io::stdout(), &cx, parsed, output_format)?;
}
cx.check_for_errors()?;
Ok(())
}
fn read_input(source: PathBuf, f: impl Read, cx: &Compiler, input_format: InputFormat) -> Body<'_> {
match input_format {
InputFormat::Assembly => cx.parse_asm(source, f),
InputFormat::Bytecode => cx.parse_bytecode(source, f),
}
}
fn write_output<'a>(
mut f: impl Write,
cx: &'a Compiler,
parsed: Body<'a>,
output_format: OutputFormat,
) -> Result<(), Error> {
match output_format {
OutputFormat::Assembly => {
let relexed = parsed.relex(&cx);
for lex in relexed {
writeln!(f, "{}", lex.display(cx))?;
}
}
OutputFormat::Graphviz => parsed.print_dot_file(cx, f)?,
OutputFormat::Bytecode => {
let relexed = parsed.relex(&cx);
scasm::binary::generate_bytecode(&cx, &relexed, f)?
}
}
Ok(())
}
| 30.760563 | 148 | 0.580281 |
1875d8280cb627b21e8b25be5406249f85b3cbcb | 248 | struct Bar<T> {
bar: T
}
struct Foo();
impl Foo {
fn foo() { }
}
fn main() {
let thing = Bar { bar: Foo };
thing.bar.foo();
//~^ ERROR no method named `foo` found for fn item `fn() -> Foo {Foo}` in the current scope [E0599]
}
| 16.533333 | 103 | 0.540323 |
38629783f909c1e65883473057451a62aae76ad5 | 1,259 |
// =================================================================
//
// * WARNING *
//
// This file is generated!
//
// Changes made to this file will be overwritten. If changes are
// required to the generated code, the service_crategen project
// must be updated to generate the changes.
//
// =================================================================
#![doc(html_logo_url = "https://raw.githubusercontent.com/rusoto/rusoto/master/assets/logo-square.png")]
//! <p><fullname>Amazon Inspector</fullname> <p>Amazon Inspector enables you to analyze the behavior of your AWS resources and to identify potential security issues. For more information, see <a href="http://docs.aws.amazon.com/inspector/latest/userguide/inspector_introduction.html"> Amazon Inspector User Guide</a>.</p></p>
//!
//! If you're using the service, you're probably looking for [InspectorClient](struct.InspectorClient.html) and [Inspector](trait.Inspector.html).
extern crate futures;
extern crate hyper;
extern crate rusoto_core;
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate serde_json;
extern crate tokio_core;
mod generated;
mod custom;
pub use generated::*;
pub use custom::*;
| 37.029412 | 325 | 0.640191 |
e6cd3c0a54e4746ccca82bb07a4129c7a6bb8d05 | 766 | use environment::RealEnvironment;
#[macro_use]
mod types;
mod environment;
mod cli;
mod configuration;
mod plugins;
mod utils;
#[cfg(test)]
#[macro_use]
extern crate lazy_static;
#[tokio::main]
async fn main() -> Result<(), types::ErrBox> {
match run().await {
Ok(_) => {},
Err(err) => {
eprintln!("{}", err.to_string());
std::process::exit(1);
}
}
Ok(())
}
async fn run() -> Result<(), types::ErrBox> {
let args = cli::parse_args(std::env::args().collect())?;
let environment = RealEnvironment::new(args.verbose);
let plugin_resolver = plugins::wasm::WasmPluginResolver::new(&environment, &crate::plugins::wasm::compile);
cli::run_cli(args, &environment, &plugin_resolver).await
}
| 20.702703 | 111 | 0.613577 |
09a31518a2e451bcb8686265490e4f902e262f5b | 1,325 | use std::fmt;
#[derive(Debug)]
pub struct ParseError {
wrapping: anyhow::Error,
}
impl From<anyhow::Error> for ParseError {
fn from(wrapping: anyhow::Error) -> Self {
Self { wrapping }
}
}
impl fmt::Display for ParseError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.wrapping.fmt(f)
}
}
impl std::error::Error for ParseError {}
impl<I> nom::error::ParseError<I> for ParseError {
fn from_error_kind(_: I, kind: nom::error::ErrorKind) -> Self {
Self {
wrapping: anyhow::anyhow!("failed at {}", kind.description()),
}
}
fn append(_: I, kind: nom::error::ErrorKind, other: Self) -> Self {
Self {
wrapping: other
.wrapping
.context(format!("failed at {}", kind.description())),
}
}
}
impl<I> nom::error::ContextError<I> for ParseError {
fn add_context(_: I, ctx: &'static str, other: Self) -> Self {
Self {
wrapping: other.wrapping.context(ctx),
}
}
}
impl<I, E> nom::error::FromExternalError<I, E> for ParseError
where
E: std::error::Error + Send + Sync + 'static,
{
fn from_external_error(_: I, _: nom::error::ErrorKind, e: E) -> Self {
Self {
wrapping: anyhow::Error::new(e),
}
}
}
| 23.660714 | 74 | 0.557736 |
e224fab5d2a227e5af0cd8fe6394474ee7445fc8 | 14,353 | use crate::myc::constants::{ColumnFlags, StatusFlags};
use crate::packet::PacketWriter;
use crate::value::ToMysqlValue;
use crate::writers;
use crate::{Column, ErrorKind, StatementData};
use byteorder::WriteBytesExt;
use std::borrow::Borrow;
use std::collections::HashMap;
use std::io::{self, Write};
/// Convenience type for responding to a client `USE <db>` command.
pub struct InitWriter<'a> {
pub(crate) writer: &'a mut PacketWriter,
}
impl<'a> InitWriter<'a> {
/// Tell client that database context has been changed
pub fn ok(self) -> io::Result<()> {
writers::write_ok_packet(self.writer, 0, 0, StatusFlags::empty())
}
/// Tell client that there was a problem changing the database context.
/// Although you can return any valid MySQL error code you probably want
/// to keep it similar to the MySQL server and issue either a
/// `ErrorKind::ER_BAD_DB_ERROR` or a `ErrorKind::ER_DBACCESS_DENIED_ERROR`.
pub fn error<E>(self, kind: ErrorKind, msg: &E) -> io::Result<()>
where
E: Borrow<[u8]> + ?Sized,
{
writers::write_err(kind, msg.borrow(), self.writer)
}
}
/// Convenience type for responding to a client `PREPARE` command.
///
/// This type should not be dropped without calling
/// [`reply`](struct.StatementMetaWriter.html#method.reply) or
/// [`error`](struct.StatementMetaWriter.html#method.error).
#[must_use]
pub struct StatementMetaWriter<'a> {
pub(crate) writer: &'a mut PacketWriter,
pub(crate) stmts: &'a mut HashMap<u32, StatementData>,
}
impl<'a> StatementMetaWriter<'a> {
/// Reply to the client with the given meta-information.
///
/// `id` is a statement identifier that the client should supply when it later wants to execute
/// this statement. `params` is a set of [`Column`](struct.Column.html) descriptors for the
/// parameters the client must provide when executing the prepared statement. `columns` is a
/// second set of [`Column`](struct.Column.html) descriptors for the values that will be
/// returned in each row then the statement is later executed.
pub fn reply<PI, CI>(self, id: u32, params: PI, columns: CI) -> io::Result<()>
where
PI: IntoIterator<Item = &'a Column>,
CI: IntoIterator<Item = &'a Column>,
<PI as IntoIterator>::IntoIter: ExactSizeIterator,
<CI as IntoIterator>::IntoIter: ExactSizeIterator,
{
let params = params.into_iter();
self.stmts.insert(
id,
StatementData {
params: params.len() as u16,
..Default::default()
},
);
writers::write_prepare_ok(id, params, columns, self.writer)
}
/// Reply to the client's `PREPARE` with an error.
pub fn error<E>(self, kind: ErrorKind, msg: &E) -> io::Result<()>
where
E: Borrow<[u8]> + ?Sized,
{
writers::write_err(kind, msg.borrow(), self.writer)
}
}
enum Finalizer {
Ok { rows: u64, last_insert_id: u64 },
EOF,
}
/// Convenience type for providing query results to clients.
///
/// This type should not be dropped without calling
/// [`start`](struct.QueryResultWriter.html#method.start),
/// [`completed`](struct.QueryResultWriter.html#method.completed), or
/// [`error`](struct.QueryResultWriter.html#method.error).
///
/// To send multiple resultsets, use
/// [`RowWriter::finish_one`](struct.RowWriter.html#method.finish_one) and
/// [`complete_one`](struct.QueryResultWriter.html#method.complete_one). These are similar to
/// `RowWriter::finish` and `completed`, but both eventually yield back the `QueryResultWriter` so
/// that another resultset can be sent. To indicate that no more resultset will be sent, call
/// [`no_more_results`](struct.QueryResultWriter.html#method.no_more_results). All methods on
/// `QueryResultWriter` (except `no_more_results`) automatically start a new resultset. The
/// `QueryResultWriter` *may* be dropped without calling `no_more_results`, but in this case the
/// program may panic if an I/O error occurs when sending the end-of-records marker to the client.
/// To handle such errors, call `no_more_results` explicitly.
#[must_use]
pub struct QueryResultWriter<'a> {
// XXX: specialization instead?
pub(crate) is_bin: bool,
pub(crate) writer: &'a mut PacketWriter,
last_end: Option<Finalizer>,
}
impl<'a> QueryResultWriter<'a> {
pub(crate) fn new(writer: &'a mut PacketWriter, is_bin: bool) -> Self {
QueryResultWriter {
is_bin,
writer,
last_end: None,
}
}
fn finalize(&mut self, more_exists: bool) -> io::Result<()> {
let mut status = StatusFlags::empty();
if more_exists {
status.set(StatusFlags::SERVER_MORE_RESULTS_EXISTS, true);
}
match self.last_end.take() {
None => Ok(()),
Some(Finalizer::Ok {
rows,
last_insert_id,
}) => writers::write_ok_packet(self.writer, rows, last_insert_id, status),
Some(Finalizer::EOF) => writers::write_eof_packet(self.writer, status),
}
}
/// Start a resultset response to the client that conforms to the given `columns`.
///
/// Note that if no columns are emitted, any written rows are ignored.
///
/// See [`RowWriter`](struct.RowWriter.html).
pub fn start(mut self, columns: &'a [Column]) -> io::Result<RowWriter<'a>> {
self.finalize(true)?;
RowWriter::new(self, columns)
}
/// Send an empty resultset response to the client indicating that `rows` rows were affected by
/// the query in this resultset. `last_insert_id` may be given to communiate an identifier for
/// a client's most recent insertion.
pub fn complete_one(mut self, rows: u64, last_insert_id: u64) -> io::Result<Self> {
self.finalize(true)?;
self.last_end = Some(Finalizer::Ok {
rows,
last_insert_id,
});
Ok(self)
}
/// Send an empty resultset response to the client indicating that `rows` rows were affected by
/// the query. `last_insert_id` may be given to communiate an identifier for a client's most
/// recent insertion.
pub fn completed(self, rows: u64, last_insert_id: u64) -> io::Result<()> {
self.complete_one(rows, last_insert_id)?.no_more_results()
}
/// Reply to the client's query with an error.
pub fn error<E>(mut self, kind: ErrorKind, msg: &E) -> io::Result<()>
where
E: Borrow<[u8]> + ?Sized,
{
self.finalize(true)?;
writers::write_err(kind, msg.borrow(), self.writer)
}
/// Send the last bits of the last resultset to the client, and indicate that there are no more
/// resultsets coming.
pub fn no_more_results(mut self) -> io::Result<()> {
self.finalize(false)
}
}
impl<'a> Drop for QueryResultWriter<'a> {
fn drop(&mut self) {
self.finalize(false).unwrap();
}
}
/// Convenience type for sending rows of a resultset to a client.
///
/// Rows can either be written out one column at a time (using
/// [`write_col`](struct.RowWriter.html#method.write_col) and
/// [`end_row`](struct.RowWriter.html#method.end_row)), or one row at a time (using
/// [`write_row`](struct.RowWriter.html#method.write_row)).
///
/// This type *may* be dropped without calling
/// [`write_row`](struct.RowWriter.html#method.write_row) or
/// [`finish`](struct.RowWriter.html#method.finish). However, in this case, the program may panic
/// if an I/O error occurs when sending the end-of-records marker to the client. To avoid this,
/// call [`finish`](struct.RowWriter.html#method.finish) explicitly.
#[must_use]
pub struct RowWriter<'a> {
result: Option<QueryResultWriter<'a>>,
bitmap_len: usize,
data: Vec<u8>,
columns: &'a [Column],
// next column to write for the current row
// NOTE: (ab)used to track number of *rows* for a zero-column resultset
col: usize,
finished: bool,
}
impl<'a> RowWriter<'a> {
fn new(result: QueryResultWriter<'a>, columns: &'a [Column]) -> io::Result<RowWriter<'a>> {
let bitmap_len = (columns.len() + 7 + 2) / 8;
let mut rw = RowWriter {
result: Some(result),
columns,
bitmap_len,
data: Vec::new(),
col: 0,
finished: false,
};
rw.start()?;
Ok(rw)
}
#[inline]
fn start(&mut self) -> io::Result<()> {
if !self.columns.is_empty() {
writers::column_definitions(self.columns, self.result.as_mut().unwrap().writer)?;
}
Ok(())
}
/// Write a value to the next column of the current row as a part of this resultset.
///
/// If you do not call [`end_row`](struct.RowWriter.html#method.end_row) after the last row,
/// any errors that occur when writing out the last row will be returned by
/// [`finish`](struct.RowWriter.html#method.finish). If you do not call `finish` either, any
/// errors will cause a panic when the `RowWriter` is dropped.
///
/// Note that the row *must* conform to the column specification provided to
/// [`QueryResultWriter::start`](struct.QueryResultWriter.html#method.start). If it does not,
/// this method will return an error indicating that an invalid value type or specification was
/// provided.
pub fn write_col<T>(&mut self, v: T) -> io::Result<()>
where
T: ToMysqlValue,
{
if self.columns.is_empty() {
return Ok(());
}
if self.result.as_mut().unwrap().is_bin {
if self.col == 0 {
self.result.as_mut().unwrap().writer.write_u8(0x00)?;
// leave space for nullmap
self.data.resize(self.bitmap_len, 0);
}
let c = self
.columns
.get(self.col)
.ok_or_else(|| {
io::Error::new(
io::ErrorKind::InvalidData,
"row has more columns than specification",
)
})?
.borrow();
if v.is_null() {
if c.colflags.contains(ColumnFlags::NOT_NULL_FLAG) {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"given NULL value for NOT NULL column",
));
} else {
// https://web.archive.org/web/20170404144156/https://dev.mysql.com/doc/internals/en/null-bitmap.html
// NULL-bitmap-byte = ((field-pos + offset) / 8)
// NULL-bitmap-bit = ((field-pos + offset) % 8)
self.data[(self.col + 2) / 8] |= 1u8 << ((self.col + 2) % 8);
}
} else {
v.to_mysql_bin(&mut self.data, c)?;
}
} else {
v.to_mysql_text(self.result.as_mut().unwrap().writer)?;
}
self.col += 1;
Ok(())
}
/// Indicate that no more column data will be written for the current row.
pub fn end_row(&mut self) -> io::Result<()> {
if self.columns.is_empty() {
self.col += 1;
return Ok(());
}
if self.col != self.columns.len() {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"row has fewer columns than specification",
));
}
if self.result.as_mut().unwrap().is_bin {
self.result
.as_mut()
.unwrap()
.writer
.write_all(&self.data[..])?;
self.data.clear();
}
self.result.as_mut().unwrap().writer.end_packet()?;
self.col = 0;
Ok(())
}
/// Write a single row as a part of this resultset.
///
/// Note that the row *must* conform to the column specification provided to
/// [`QueryResultWriter::start`](struct.QueryResultWriter.html#method.start). If it does not,
/// this method will return an error indicating that an invalid value type or specification was
/// provided.
pub fn write_row<I, E>(&mut self, row: I) -> io::Result<()>
where
I: IntoIterator<Item = E>,
E: ToMysqlValue,
{
if !self.columns.is_empty() {
for v in row {
self.write_col(v)?;
}
}
self.end_row()
}
}
impl<'a> RowWriter<'a> {
fn finish_inner(&mut self) -> io::Result<()> {
if self.finished {
return Ok(());
}
self.finished = true;
if !self.columns.is_empty() && self.col != 0 {
self.end_row()?;
}
Ok(())
}
fn finish_completed(&mut self) -> io::Result<()> {
if self.columns.is_empty() {
// response to no column query is always an OK packet
// we've kept track of the number of rows in col (hacky, I know)
self.result.as_mut().unwrap().last_end = Some(Finalizer::Ok {
rows: self.col as u64,
last_insert_id: 0,
});
} else {
// we wrote out at least one row
self.result.as_mut().unwrap().last_end = Some(Finalizer::EOF);
}
Ok(())
}
/// Indicate to the client that no more rows are coming.
pub fn finish(mut self) -> io::Result<()> {
self.finish_inner()?;
self.finish_completed()?;
self.result.take().unwrap().no_more_results()
}
/// End this resultset response, and indicate to the client that no more rows are coming.
pub fn finish_one(mut self) -> io::Result<QueryResultWriter<'a>> {
self.finish_inner()?;
// we know that dropping self will see self.finished == true,
// and so Drop won't try to use self.result.
Ok(self.result.take().unwrap())
}
/// End this resultset response, and indicate to the client there was an error.
pub fn finish_error<E>(self, kind: ErrorKind, msg: &E) -> io::Result<()>
where
E: Borrow<[u8]>,
{
self.finish_one()?.error(kind, msg)
}
}
impl<'a> Drop for RowWriter<'a> {
fn drop(&mut self) {
self.finish_inner().unwrap();
}
}
| 35.527228 | 121 | 0.591026 |
e55c46a376095081b449c59518ac0ff831047048 | 3,271 | //! # Importer
//!
//! This crate is responsible for importing different email formats (or email storage formats)
//! by reading and parsing the data and writing it into a database (which is defined as a
//! generic type but most probably the `ps-database` module).
//!
//! Currently, the importer requires the construction of a specific type as well as the
//! configuration of the importer format in a configuration.
//!
//! ``` rs
//! https://github.com/terhechte/postsack/issues/11
//! let path = "tests/resources/mbox";
//! let config =
//! ps_core::Config::new(None, path, vec!["".to_string()], ps_core::FormatType::Mbox).expect("Config");
//! let importer = mbox_importer(config.clone());
//!
//! // Next, crate a database and run the importer
//! // let database = Database::new(&config.database_path).unwrap();
//! // let (_receiver, handle) = importer.import(database).unwrap();
//! ```
use ps_core::eyre::Result;
pub(crate) mod formats;
use formats::{shared, ImporterFormat};
use std::{path::PathBuf, thread::JoinHandle};
use ps_core::{
crossbeam_channel::unbounded, Config, DatabaseLike, FormatType, ImporterLike, Message,
MessageReceiver,
};
pub struct Importer<Format: ImporterFormat> {
config: Config,
format: Format,
}
impl<Format: ImporterFormat + 'static> Importer<Format> {
pub fn new(config: Config, format: Format) -> Self {
Self { config, format }
}
}
impl<Format: ImporterFormat + 'static> ImporterLike for Importer<Format> {
fn import<Database: DatabaseLike + 'static>(
self,
database: Database,
) -> Result<(MessageReceiver, JoinHandle<Result<()>>)> {
let Importer { format, .. } = self;
let (sender, receiver) = unbounded();
let config = self.config;
let handle: JoinHandle<Result<()>> = std::thread::spawn(move || {
let outer_sender = sender.clone();
let processed = move || {
let emails = format.emails(&config, sender.clone())?;
let processed =
shared::database::into_database(&config, emails, sender.clone(), database)?;
Ok(processed)
};
let result = processed();
// Send the error away and map it to a crossbeam channel error
match result {
Ok(_) => Ok(()),
Err(e) => match outer_sender.send(Message::Error(e)) {
Ok(_) => Ok(()),
Err(e) => Err(ps_core::eyre::Report::new(e)),
},
}
});
Ok((receiver, handle))
}
}
pub fn gmail_importer(config: Config) -> Importer<formats::Gmail> {
Importer::new(config, formats::Gmail::default())
}
pub fn applemail_importer(config: Config) -> Importer<formats::AppleMail> {
Importer::new(config, formats::AppleMail::default())
}
pub fn mbox_importer(config: Config) -> Importer<formats::Mbox> {
Importer::new(config, formats::Mbox::default())
}
pub fn default_path(format: &FormatType) -> Option<PathBuf> {
match format {
FormatType::AppleMail => formats::AppleMail::default_path(),
FormatType::GmailVault => formats::Gmail::default_path(),
FormatType::Mbox => formats::Mbox::default_path(),
}
}
| 33.377551 | 107 | 0.617854 |
4bf683fd26cd9c669c6b942ecde06da670f998cf | 5,847 | #[doc = "Register `PUBLISH_RATEBOOST` reader"]
pub struct R(crate::R<PUBLISH_RATEBOOST_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<PUBLISH_RATEBOOST_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<PUBLISH_RATEBOOST_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<PUBLISH_RATEBOOST_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `PUBLISH_RATEBOOST` writer"]
pub struct W(crate::W<PUBLISH_RATEBOOST_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<PUBLISH_RATEBOOST_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<PUBLISH_RATEBOOST_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<PUBLISH_RATEBOOST_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Field `CHIDX` reader - DPPI channel that event RATEBOOST will publish to."]
pub struct CHIDX_R(crate::FieldReader<u8, u8>);
impl CHIDX_R {
pub(crate) fn new(bits: u8) -> Self {
CHIDX_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for CHIDX_R {
type Target = crate::FieldReader<u8, u8>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `CHIDX` writer - DPPI channel that event RATEBOOST will publish to."]
pub struct CHIDX_W<'a> {
w: &'a mut W,
}
impl<'a> CHIDX_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !0xff) | (value as u32 & 0xff);
self.w
}
}
#[doc = "\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum EN_A {
#[doc = "0: Disable publishing"]
DISABLED = 0,
#[doc = "1: Enable publishing"]
ENABLED = 1,
}
impl From<EN_A> for bool {
#[inline(always)]
fn from(variant: EN_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `EN` reader - "]
pub struct EN_R(crate::FieldReader<bool, EN_A>);
impl EN_R {
pub(crate) fn new(bits: bool) -> Self {
EN_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> EN_A {
match self.bits {
false => EN_A::DISABLED,
true => EN_A::ENABLED,
}
}
#[doc = "Checks if the value of the field is `DISABLED`"]
#[inline(always)]
pub fn is_disabled(&self) -> bool {
**self == EN_A::DISABLED
}
#[doc = "Checks if the value of the field is `ENABLED`"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
**self == EN_A::ENABLED
}
}
impl core::ops::Deref for EN_R {
type Target = crate::FieldReader<bool, EN_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `EN` writer - "]
pub struct EN_W<'a> {
w: &'a mut W,
}
impl<'a> EN_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: EN_A) -> &'a mut W {
self.bit(variant.into())
}
#[doc = "Disable publishing"]
#[inline(always)]
pub fn disabled(self) -> &'a mut W {
self.variant(EN_A::DISABLED)
}
#[doc = "Enable publishing"]
#[inline(always)]
pub fn enabled(self) -> &'a mut W {
self.variant(EN_A::ENABLED)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 31)) | ((value as u32 & 0x01) << 31);
self.w
}
}
impl R {
#[doc = "Bits 0:7 - DPPI channel that event RATEBOOST will publish to."]
#[inline(always)]
pub fn chidx(&self) -> CHIDX_R {
CHIDX_R::new((self.bits & 0xff) as u8)
}
#[doc = "Bit 31"]
#[inline(always)]
pub fn en(&self) -> EN_R {
EN_R::new(((self.bits >> 31) & 0x01) != 0)
}
}
impl W {
#[doc = "Bits 0:7 - DPPI channel that event RATEBOOST will publish to."]
#[inline(always)]
pub fn chidx(&mut self) -> CHIDX_W {
CHIDX_W { w: self }
}
#[doc = "Bit 31"]
#[inline(always)]
pub fn en(&mut self) -> EN_W {
EN_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "Publish configuration for event RATEBOOST\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [publish_rateboost](index.html) module"]
pub struct PUBLISH_RATEBOOST_SPEC;
impl crate::RegisterSpec for PUBLISH_RATEBOOST_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [publish_rateboost::R](R) reader structure"]
impl crate::Readable for PUBLISH_RATEBOOST_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [publish_rateboost::W](W) writer structure"]
impl crate::Writable for PUBLISH_RATEBOOST_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets PUBLISH_RATEBOOST to value 0"]
impl crate::Resettable for PUBLISH_RATEBOOST_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
}
| 29.831633 | 439 | 0.587481 |
6a7c89d3b4bf337c043d1894e800ccb03ccbb227 | 15,742 | // Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
// Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
// Copyright 2019 Guillaume Becquin
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::bert::attention::{BertAttention, BertIntermediate, BertOutput};
use crate::bert::bert_model::BertConfig;
use std::borrow::{Borrow, BorrowMut};
use tch::{nn, Tensor};
/// # BERT Layer
/// Layer used in BERT encoders.
/// It is made of the following blocks:
/// - `attention`: self-attention `BertAttention` layer
/// - `cross_attention`: (optional) cross-attention `BertAttention` layer (if the model is used as a decoder)
/// - `is_decoder`: flag indicating if the model is used as a decoder
/// - `intermediate`: `BertIntermediate` intermediate layer
/// - `output`: `BertOutput` output layer
pub struct BertLayer {
attention: BertAttention,
is_decoder: bool,
cross_attention: Option<BertAttention>,
intermediate: BertIntermediate,
output: BertOutput,
}
impl BertLayer {
/// Build a new `BertLayer`
///
/// # Arguments
///
/// * `p` - Variable store path for the root of the BERT model
/// * `config` - `BertConfig` object defining the model architecture
///
/// # Example
///
/// ```no_run
/// use rust_bert::bert::{BertConfig, BertLayer};
/// use rust_bert::Config;
/// use std::path::Path;
/// use tch::{nn, Device};
///
/// let config_path = Path::new("path/to/config.json");
/// let device = Device::Cpu;
/// let p = nn::VarStore::new(device);
/// let config = BertConfig::from_file(config_path);
/// let layer: BertLayer = BertLayer::new(&p.root(), &config);
/// ```
pub fn new<'p, P>(p: P, config: &BertConfig) -> BertLayer
where
P: Borrow<nn::Path<'p>>,
{
let p = p.borrow();
let attention = BertAttention::new(p / "attention", config);
let (is_decoder, cross_attention) = match config.is_decoder {
Some(value) => {
if value {
(
value,
Some(BertAttention::new(p / "cross_attention", config)),
)
} else {
(value, None)
}
}
None => (false, None),
};
let intermediate = BertIntermediate::new(p / "intermediate", config);
let output = BertOutput::new(p / "output", config);
BertLayer {
attention,
is_decoder,
cross_attention,
intermediate,
output,
}
}
/// Forward pass through the layer
///
/// # Arguments
///
/// * `hidden_states` - input tensor of shape (*batch size*, *sequence_length*, *hidden_size*).
/// * `mask` - Optional mask of shape (*batch size*, *sequence_length*). Masked position have value 0, non-masked value 1. If None set to 1
/// * `encoder_hidden_states` - Optional encoder hidden state of shape (*batch size*, *encoder_sequence_length*, *hidden_size*). If the model is defined as a decoder and the `encoder_hidden_states` is not None, used in the cross-attention layer as keys and values (query from the decoder).
/// * `encoder_mask` - Optional encoder attention mask of shape (*batch size*, *encoder_sequence_length*). If the model is defined as a decoder and the `encoder_hidden_states` is not None, used to mask encoder values. Positions with value 0 will be masked.
/// * `train` - boolean flag to turn on/off the dropout layers in the model. Should be set to false for inference.
///
/// # Returns
///
/// * `BertLayerOutput` containing:
/// - `hidden_state` - `Tensor` of shape (*batch size*, *sequence_length*, *hidden_size*)
/// - `attention_scores` - `Option<Tensor>` of shape (*batch size*, *sequence_length*, *hidden_size*)
/// - `cross_attention_scores` - `Option<Tensor>` of shape (*batch size*, *sequence_length*, *hidden_size*)
///
/// # Example
///
/// ```no_run
/// # use rust_bert::bert::{BertConfig, BertLayer};
/// # use tch::{nn, Device, Tensor, no_grad, Kind};
/// # use rust_bert::Config;
/// # use std::path::Path;
/// # let config_path = Path::new("path/to/config.json");
/// # let device = Device::Cpu;
/// # let vs = nn::VarStore::new(device);
/// # let config = BertConfig::from_file(config_path);
/// let layer: BertLayer = BertLayer::new(&vs.root(), &config);
/// let (batch_size, sequence_length, hidden_size) = (64, 128, 512);
/// let input_tensor = Tensor::rand(
/// &[batch_size, sequence_length, hidden_size],
/// (Kind::Float, device),
/// );
/// let mask = Tensor::zeros(&[batch_size, sequence_length], (Kind::Int64, device));
///
/// let layer_output = no_grad(|| layer.forward_t(&input_tensor, Some(&mask), None, None, false));
/// ```
pub fn forward_t(
&self,
hidden_states: &Tensor,
mask: Option<&Tensor>,
encoder_hidden_states: Option<&Tensor>,
encoder_mask: Option<&Tensor>,
train: bool,
) -> BertLayerOutput {
let (attention_output, attention_weights) =
self.attention
.forward_t(hidden_states, mask, None, None, train);
let (attention_output, attention_scores, cross_attention_scores) =
if self.is_decoder & encoder_hidden_states.is_some() {
let (attention_output, cross_attention_weights) =
self.cross_attention.as_ref().unwrap().forward_t(
&attention_output,
mask,
encoder_hidden_states,
encoder_mask,
train,
);
(attention_output, attention_weights, cross_attention_weights)
} else {
(attention_output, attention_weights, None)
};
let output = self.intermediate.forward(&attention_output);
let output = self.output.forward_t(&output, &attention_output, train);
BertLayerOutput {
hidden_state: output,
attention_weights: attention_scores,
cross_attention_weights: cross_attention_scores,
}
}
}
/// # BERT Encoder
/// Encoder used in BERT models.
/// It is made of a Vector of `BertLayer` through which hidden states will be passed. The encoder can also be
/// used as a decoder (with cross-attention) if `encoder_hidden_states` are provided.
pub struct BertEncoder {
output_attentions: bool,
output_hidden_states: bool,
layers: Vec<BertLayer>,
}
impl BertEncoder {
/// Build a new `BertEncoder`
///
/// # Arguments
///
/// * `p` - Variable store path for the root of the BERT model
/// * `config` - `BertConfig` object defining the model architecture
///
/// # Example
///
/// ```no_run
/// use rust_bert::bert::{BertConfig, BertEncoder};
/// use rust_bert::Config;
/// use std::path::Path;
/// use tch::{nn, Device};
///
/// let config_path = Path::new("path/to/config.json");
/// let device = Device::Cpu;
/// let p = nn::VarStore::new(device);
/// let config = BertConfig::from_file(config_path);
/// let encoder: BertEncoder = BertEncoder::new(&p.root(), &config);
/// ```
pub fn new<'p, P>(p: P, config: &BertConfig) -> BertEncoder
where
P: Borrow<nn::Path<'p>>,
{
let p = p.borrow() / "layer";
let output_attentions = config.output_attentions.unwrap_or(false);
let output_hidden_states = config.output_hidden_states.unwrap_or(false);
let mut layers: Vec<BertLayer> = vec![];
for layer_index in 0..config.num_hidden_layers {
layers.push(BertLayer::new(&p / layer_index, config));
}
BertEncoder {
output_attentions,
output_hidden_states,
layers,
}
}
/// Forward pass through the encoder
///
/// # Arguments
///
/// * `hidden_states` - input tensor of shape (*batch size*, *sequence_length*, *hidden_size*).
/// * `mask` - Optional mask of shape (*batch size*, *sequence_length*). Masked position have value 0, non-masked value 1. If None set to 1
/// * `encoder_hidden_states` - Optional encoder hidden state of shape (*batch size*, *encoder_sequence_length*, *hidden_size*). If the model is defined as a decoder and the `encoder_hidden_states` is not None, used in the cross-attention layer as keys and values (query from the decoder).
/// * `encoder_mask` - Optional encoder attention mask of shape (*batch size*, *encoder_sequence_length*). If the model is defined as a decoder and the `encoder_hidden_states` is not None, used to mask encoder values. Positions with value 0 will be masked.
/// * `train` - boolean flag to turn on/off the dropout layers in the model. Should be set to false for inference.
///
/// # Returns
///
/// * `BertEncoderOutput` containing:
/// - `hidden_state` - `Tensor` of shape (*batch size*, *sequence_length*, *hidden_size*)
/// - `all_hidden_states` - `Option<Vec<Tensor>>` of length *num_hidden_layers* with shape (*batch size*, *sequence_length*, *hidden_size*)
/// - `all_attentions` - `Option<Vec<Tensor>>` of length *num_hidden_layers* with shape (*batch size*, *sequence_length*, *hidden_size*)
///
/// # Example
///
/// ```no_run
/// # use rust_bert::bert::{BertConfig, BertEncoder};
/// # use tch::{nn, Device, Tensor, no_grad, Kind};
/// # use rust_bert::Config;
/// # use std::path::Path;
/// # let config_path = Path::new("path/to/config.json");
/// # let device = Device::Cpu;
/// # let vs = nn::VarStore::new(device);
/// # let config = BertConfig::from_file(config_path);
/// let encoder: BertEncoder = BertEncoder::new(&vs.root(), &config);
/// let (batch_size, sequence_length, hidden_size) = (64, 128, 512);
/// let input_tensor = Tensor::rand(
/// &[batch_size, sequence_length, hidden_size],
/// (Kind::Float, device),
/// );
/// let mask = Tensor::zeros(&[batch_size, sequence_length], (Kind::Int8, device));
///
/// let encoder_output =
/// no_grad(|| encoder.forward_t(&input_tensor, Some(&mask), None, None, false));
/// ```
pub fn forward_t(
&self,
input: &Tensor,
mask: Option<&Tensor>,
encoder_hidden_states: Option<&Tensor>,
encoder_mask: Option<&Tensor>,
train: bool,
) -> BertEncoderOutput {
let mut all_hidden_states: Option<Vec<Tensor>> = if self.output_hidden_states {
Some(vec![])
} else {
None
};
let mut all_attentions: Option<Vec<Tensor>> = if self.output_attentions {
Some(vec![])
} else {
None
};
let mut hidden_state = None::<Tensor>;
let mut attention_weights: Option<Tensor>;
for layer in &self.layers {
let layer_output = if let Some(hidden_state) = &hidden_state {
layer.forward_t(
hidden_state,
mask,
encoder_hidden_states,
encoder_mask,
train,
)
} else {
layer.forward_t(input, mask, encoder_hidden_states, encoder_mask, train)
};
hidden_state = Some(layer_output.hidden_state);
attention_weights = layer_output.attention_weights;
if let Some(attentions) = all_attentions.borrow_mut() {
attentions.push(attention_weights.as_ref().unwrap().copy());
};
if let Some(hidden_states) = all_hidden_states.borrow_mut() {
hidden_states.push(hidden_state.as_ref().unwrap().copy());
};
}
BertEncoderOutput {
hidden_state: hidden_state.unwrap(),
all_hidden_states,
all_attentions,
}
}
}
/// # BERT Pooler
/// Pooler used in BERT models.
/// It is made of a fully connected layer which is applied to the first sequence element.
pub struct BertPooler {
lin: nn::Linear,
}
impl BertPooler {
/// Build a new `BertPooler`
///
/// # Arguments
///
/// * `p` - Variable store path for the root of the BERT model
/// * `config` - `BertConfig` object defining the model architecture
///
/// # Example
///
/// ```no_run
/// use rust_bert::bert::{BertConfig, BertPooler};
/// use rust_bert::Config;
/// use std::path::Path;
/// use tch::{nn, Device};
///
/// let config_path = Path::new("path/to/config.json");
/// let device = Device::Cpu;
/// let p = nn::VarStore::new(device);
/// let config = BertConfig::from_file(config_path);
/// let pooler: BertPooler = BertPooler::new(&p.root(), &config);
/// ```
pub fn new<'p, P>(p: P, config: &BertConfig) -> BertPooler
where
P: Borrow<nn::Path<'p>>,
{
let p = p.borrow();
let lin = nn::linear(
p / "dense",
config.hidden_size,
config.hidden_size,
Default::default(),
);
BertPooler { lin }
}
/// Forward pass through the pooler
///
/// # Arguments
///
/// * `hidden_states` - input tensor of shape (*batch size*, *sequence_length*, *hidden_size*).
///
/// # Returns
///
/// * `Tensor` of shape (*batch size*, *hidden_size*)
///
/// # Example
///
/// ```no_run
/// # use rust_bert::bert::{BertConfig, BertPooler};
/// # use tch::{nn, Device, Tensor, no_grad, Kind};
/// # use rust_bert::Config;
/// # use std::path::Path;
/// # let config_path = Path::new("path/to/config.json");
/// # let device = Device::Cpu;
/// # let vs = nn::VarStore::new(device);
/// # let config = BertConfig::from_file(config_path);
/// let pooler: BertPooler = BertPooler::new(&vs.root(), &config);
/// let (batch_size, sequence_length, hidden_size) = (64, 128, 512);
/// let input_tensor = Tensor::rand(
/// &[batch_size, sequence_length, hidden_size],
/// (Kind::Float, device),
/// );
///
/// let pooler_output = no_grad(|| pooler.forward(&input_tensor));
/// ```
pub fn forward(&self, hidden_states: &Tensor) -> Tensor {
hidden_states.select(1, 0).apply(&self.lin).tanh()
}
}
/// Container for the BERT layer output.
pub struct BertLayerOutput {
/// Hidden states
pub hidden_state: Tensor,
/// Self attention scores
pub attention_weights: Option<Tensor>,
/// Cross attention scores
pub cross_attention_weights: Option<Tensor>,
}
/// Container for the BERT encoder output.
pub struct BertEncoderOutput {
/// Last hidden states from the model
pub hidden_state: Tensor,
/// Hidden states for all intermediate layers
pub all_hidden_states: Option<Vec<Tensor>>,
/// Attention weights for all intermediate layers
pub all_attentions: Option<Vec<Tensor>>,
}
| 38.583333 | 293 | 0.595477 |
2f5198e70c7ce3f4f87a7907532ef764ac7d7d85 | 2,546 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use crate::{
contexts::WriteContext,
endpoint, recovery,
space::{rx_packet_numbers::AckManager, CryptoStream},
transmission,
};
use core::ops::RangeInclusive;
use s2n_quic_core::packet::number::PacketNumberSpace;
pub struct Payload<'a, Config: endpoint::Config> {
pub ack_manager: &'a mut AckManager,
pub crypto_stream: &'a mut CryptoStream,
pub packet_number_space: PacketNumberSpace,
pub recovery_manager: &'a mut recovery::Manager<Config>,
}
/// Rather than creating a packet with a very small CRYPTO frame (under 16 bytes), it would be
/// better to wait for another transmission and send something larger. This should be better for
/// performance, anyway, since you end up paying for encryption/decryption.
const MIN_SIZE: usize = 16;
impl<'a, Config: endpoint::Config> super::Payload for Payload<'a, Config> {
fn size_hint(&self, range: RangeInclusive<usize>) -> usize {
(*range.start()).max(MIN_SIZE)
}
fn on_transmit<W: WriteContext>(&mut self, context: &mut W) {
debug_assert!(
!context.transmission_mode().is_mtu_probing(),
"Early transmissions should not be used for MTU probing"
);
let did_send_ack = self.ack_manager.on_transmit(context);
// Payloads can only transmit and retransmit
if context.transmission_constraint().can_transmit()
|| context.transmission_constraint().can_retransmit()
{
let _ = self.crypto_stream.tx.on_transmit((), context);
// send PINGs last, since they might not actually be needed if there's an ack-eliciting
// frame already present in the payload
self.recovery_manager.on_transmit(context);
}
if did_send_ack {
// inform the ack manager the packet is populated
self.ack_manager.on_transmit_complete(context);
}
}
fn packet_number_space(&self) -> PacketNumberSpace {
self.packet_number_space
}
}
impl<'a, Config: endpoint::Config> transmission::interest::Provider for Payload<'a, Config> {
fn transmission_interest<Q: transmission::interest::Query>(
&self,
query: &mut Q,
) -> transmission::interest::Result {
self.ack_manager.transmission_interest(query)?;
self.crypto_stream.transmission_interest(query)?;
self.recovery_manager.transmission_interest(query)?;
Ok(())
}
}
| 35.859155 | 99 | 0.674391 |
8ab3d02df07fbd488912d28dc9e8ac1c08ab85cb | 1,325 | use crate::y2019::intcode::IntCode;
pub fn main(input: &str) -> String {
let a = find_a(input);
let b = find_b(input);
format!("{} {}", a, b)
}
fn find_a(input: &str) -> i32 {
let mut ic = IntCode::new(input);
let mut blocks = 0;
loop {
let x = ic.run();
let y = ic.run();
let t = ic.run();
if [x, y, t].iter().any(|o| o.is_none()) {
break blocks;
}
let tile = t.unwrap();
blocks += (tile == 2) as i32;
}
}
fn find_b(input: &str) -> i64 {
let mut ic = IntCode::new(input);
ic.store(0, 2);
let mut score = 0;
let mut paddle = None;
loop {
let x = ic.run();
let y = ic.run();
let t = ic.run();
if [x, y, t].iter().any(|o| o.is_none()) {
break score;
}
let pos = (y.unwrap(), x.unwrap());
if pos == (0, -1) {
score = t.unwrap();
continue;
}
let tile = t.unwrap();
if tile == 3 {
paddle = Some(pos);
} else if tile == 4 {
let (_, x_ball) = pos;
let dx_pad_to_ball = match paddle {
Some((_, x_paddle)) => (x_ball - x_paddle).signum(),
_ => 0,
};
ic.push_input(dx_pad_to_ball);
}
}
}
| 24.537037 | 68 | 0.430943 |
8a7db78e357785a08d9468474ebb661b2025b9be | 1,435 | use ordered_float::OrderedFloat;
use serde::{Deserialize, Serialize};
use crate::EDGE_COST_DIMENSION;
pub type Preference = [f64; EDGE_COST_DIMENSION];
pub type Costs = [f64; EDGE_COST_DIMENSION];
pub fn costs_by_alpha(costs: Costs, alpha: Preference) -> f64 {
costs
.iter()
.zip(alpha.iter())
.fold(0.0, |acc, (cost, factor)| acc + cost * factor)
}
pub fn add_edge_costs(a: Costs, b: Costs) -> Costs {
let mut result = [0.0; EDGE_COST_DIMENSION];
a.iter()
.zip(b.iter())
.enumerate()
.for_each(|(index, (first, second))| result[index] = first + second);
result
}
#[derive(Serialize, Deserialize, Debug, Clone, Copy)]
pub struct Coordinate {
pub lat: f64,
pub lng: f64,
}
impl Coordinate {
pub fn distance_to(&self, other: &Coordinate) -> OrderedFloat<f64> {
((self.lat - other.lat).powi(2) + (self.lng - other.lng).powi(2))
.sqrt()
.into()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_add_edge_costs() {
let a = [1.5, 2.0, 0.7, 1.3];
let b = [1.3, 0.1, 0.3, 0.3];
let result = add_edge_costs(a, b);
assert_eq!([2.8, 2.1, 1.0, 1.6], result);
}
#[test]
fn test_distance_to() {
let a = Coordinate { lat: 5.0, lng: 7.0 };
let b = Coordinate { lat: 2.0, lng: 3.0 };
assert_eq!(a.distance_to(&b), OrderedFloat(5.0));
}
}
| 24.741379 | 77 | 0.567247 |
1e9be2e67f5a50f558059dc5a3f7267e2e82230e | 2,397 | use std::mem::ManuallyDrop;
use pyo3::prelude::*;
use n3_machine::PORT;
use n3_machine_ffi::Program;
use crate::args::parse_python_path;
use crate::code::BuildCode;
use crate::handler::PyHandler;
pub fn n3_execute(py: Python, program: &Program, handler: PyHandler) -> PyResult<()> {
let is_root = program.id.primary == 0;
let is_distributed = program.id.world > 1;
let mut machine_token = program.machine.split(':');
// Step 1. Load the program text
let mut text = n3_program::Program::load(&*program.text).unwrap();
// Step 2. Attach variables
if text.env.is_none() {
text.env = Some(Default::default());
}
let env = text.env.as_mut().unwrap();
env.insert("id".to_string(), Some(program.id.local.into()));
env.insert(
"machine".to_string(),
Some(machine_token.next().unwrap().to_string().into()),
);
env.insert("is root".to_string(), Some(is_root.into()));
env.insert("is distributed".to_string(), Some(is_distributed.into()));
let device_id = machine_token.next().unwrap_or("0").to_string();
// Step 3. Ready for DDP
{
let env = py.import("os")?.get("environ")?;
env.set_item("MASTER_ADDR", program.id.master_addr.to_string())?;
env.set_item("MASTER_PORT", PORT.to_string())?;
env.set_item("RANK", program.id.primary.to_string())?;
env.set_item("LOCAL_RANK", program.id.local.to_string())?;
env.set_item("WORLD_SIZE", program.id.world.to_string())?;
env.set_item("CUDA_VISIBLE_DEVICES", device_id)?;
// set python path to spawn the processes (workers)
py.import("multiprocessing")?
.call1("set_executable", (parse_python_path(),))?;
}
// Step 4. Define the node in REPL
let text = text.build(py, ())?.into_py(py);
// Step 5. Do its own work
text.call_method1(py, &program.command, (handler,))?;
Ok(())
}
/// # Safety
///
/// This function should be called when the Python interpreter is idle.
pub unsafe fn finalize_python() {
// The GILGuard is acquired to finalize the Python interpreter.
// Then, it should not be dropped normally, because the memory is already dropped.
//
// The GILGuard itself is under Stack, so it is unnecessary to manually drop the struct.
let _gil = ManuallyDrop::new(pyo3::Python::acquire_gil());
pyo3::ffi::Py_FinalizeEx();
}
| 32.835616 | 92 | 0.646642 |
6108b3882ea983f4e1ed6aa6d9d9ad3bd624bcae | 37,688 | //! FFI between the C++ zcashd codebase and the Rust Zcash crates.
//!
//! This is internal to zcashd and is not an officially-supported API.
// Catch documentation errors caused by code changes.
#![deny(broken_intra_doc_links)]
// Clippy has a default-deny lint to prevent dereferencing raw pointer arguments
// in a non-unsafe function. However, declaring a function as unsafe has the
// side-effect that the entire function body is treated as an unsafe {} block,
// and rustc will not enforce full safety checks on the parts of the function
// that would otherwise be safe.
//
// The functions in this crate are all for FFI usage, so it's obvious to the
// caller (which is only ever zcashd) that the arguments must satisfy the
// necessary assumptions. We therefore ignore this lint to retain the benefit of
// explicitly annotating the parts of each function that must themselves satisfy
// assumptions of underlying code.
//
// See https://github.com/rust-lang/rfcs/pull/2585 for more background.
#![allow(clippy::not_unsafe_ptr_arg_deref)]
use bellman::groth16::{Parameters, PreparedVerifyingKey, Proof};
use blake2s_simd::Params as Blake2sParams;
use bls12_381::Bls12;
use group::{cofactor::CofactorGroup, GroupEncoding};
use libc::{c_uchar, size_t};
use rand_core::{OsRng, RngCore};
use std::fs::File;
use std::io::BufReader;
use std::path::{Path, PathBuf};
use std::slice;
use subtle::CtOption;
#[cfg(not(target_os = "windows"))]
use std::ffi::OsStr;
#[cfg(not(target_os = "windows"))]
use std::os::unix::ffi::OsStrExt;
#[cfg(target_os = "windows")]
use std::ffi::OsString;
#[cfg(target_os = "windows")]
use std::os::windows::ffi::OsStringExt;
use zcash_primitives::{
block::equihash,
constants::{CRH_IVK_PERSONALIZATION, PROOF_GENERATION_KEY_GENERATOR, SPENDING_KEY_GENERATOR},
merkle_tree::MerklePath,
note_encryption::sapling_ka_agree,
primitives::{Diversifier, Note, PaymentAddress, ProofGenerationKey, Rseed, ViewingKey},
redjubjub::{self, Signature},
sapling::{merkle_hash, spend_sig},
transaction::components::Amount,
zip32,
};
use zcash_proofs::{
circuit::sapling::TREE_DEPTH as SAPLING_TREE_DEPTH,
load_parameters,
sapling::{SaplingProvingContext, SaplingVerificationContext},
sprout,
};
use zcash_history::{Entry as MMREntry, NodeData as MMRNodeData, Tree as MMRTree};
mod blake2b;
mod ed25519;
mod tracing_ffi;
#[cfg(test)]
mod tests;
static mut SAPLING_SPEND_VK: Option<PreparedVerifyingKey<Bls12>> = None;
static mut SAPLING_OUTPUT_VK: Option<PreparedVerifyingKey<Bls12>> = None;
static mut SPROUT_GROTH16_VK: Option<PreparedVerifyingKey<Bls12>> = None;
static mut SAPLING_SPEND_PARAMS: Option<Parameters<Bls12>> = None;
static mut SAPLING_OUTPUT_PARAMS: Option<Parameters<Bls12>> = None;
static mut SPROUT_GROTH16_PARAMS_PATH: Option<PathBuf> = None;
/// Converts CtOption<t> into Option<T>
fn de_ct<T>(ct: CtOption<T>) -> Option<T> {
if ct.is_some().into() {
Some(ct.unwrap())
} else {
None
}
}
/// Reads an FsRepr from a [u8; 32]
/// and multiplies it by the given base.
fn fixed_scalar_mult(from: &[u8; 32], p_g: &jubjub::SubgroupPoint) -> jubjub::SubgroupPoint {
// We only call this with `from` being a valid jubjub::Scalar.
let f = jubjub::Scalar::from_bytes(from).unwrap();
p_g * f
}
/// Loads the zk-SNARK parameters into memory and saves paths as necessary.
/// Only called once.
#[no_mangle]
pub extern "C" fn librustzcash_init_zksnark_params(
#[cfg(not(target_os = "windows"))] spend_path: *const u8,
#[cfg(target_os = "windows")] spend_path: *const u16,
spend_path_len: usize,
#[cfg(not(target_os = "windows"))] output_path: *const u8,
#[cfg(target_os = "windows")] output_path: *const u16,
output_path_len: usize,
#[cfg(not(target_os = "windows"))] sprout_path: *const u8,
#[cfg(target_os = "windows")] sprout_path: *const u16,
sprout_path_len: usize,
) {
#[cfg(not(target_os = "windows"))]
let (spend_path, output_path, sprout_path) = {
(
OsStr::from_bytes(unsafe { slice::from_raw_parts(spend_path, spend_path_len) }),
OsStr::from_bytes(unsafe { slice::from_raw_parts(output_path, output_path_len) }),
if sprout_path.is_null() {
None
} else {
Some(OsStr::from_bytes(unsafe {
slice::from_raw_parts(sprout_path, sprout_path_len)
}))
},
)
};
#[cfg(target_os = "windows")]
let (spend_path, output_path, sprout_path) = {
(
OsString::from_wide(unsafe { slice::from_raw_parts(spend_path, spend_path_len) }),
OsString::from_wide(unsafe { slice::from_raw_parts(output_path, output_path_len) }),
if sprout_path.is_null() {
None
} else {
Some(OsString::from_wide(unsafe {
slice::from_raw_parts(sprout_path, sprout_path_len)
}))
},
)
};
let (spend_path, output_path, sprout_path) = (
Path::new(&spend_path),
Path::new(&output_path),
sprout_path.as_ref().map(|p| Path::new(p)),
);
// Load params
let (spend_params, spend_vk, output_params, output_vk, sprout_vk) =
load_parameters(spend_path, output_path, sprout_path);
// Caller is responsible for calling this function once, so
// these global mutations are safe.
unsafe {
SAPLING_SPEND_PARAMS = Some(spend_params);
SAPLING_OUTPUT_PARAMS = Some(output_params);
SPROUT_GROTH16_PARAMS_PATH = sprout_path.map(|p| p.to_owned());
SAPLING_SPEND_VK = Some(spend_vk);
SAPLING_OUTPUT_VK = Some(output_vk);
SPROUT_GROTH16_VK = sprout_vk;
}
}
/// Writes the "uncommitted" note value for empty leaves of the Merkle tree.
///
/// `result` must be a valid pointer to 32 bytes which will be written.
#[no_mangle]
pub extern "C" fn librustzcash_tree_uncommitted(result: *mut [c_uchar; 32]) {
let tmp = Note::uncommitted().to_bytes();
// Should be okay, caller is responsible for ensuring the pointer
// is a valid pointer to 32 bytes that can be mutated.
let result = unsafe { &mut *result };
*result = tmp;
}
/// Computes a merkle tree hash for a given depth. The `depth` parameter should
/// not be larger than 62.
///
/// `a` and `b` each must be of length 32, and must each be scalars of BLS12-381.
///
/// The result of the merkle tree hash is placed in `result`, which must also be
/// of length 32.
#[no_mangle]
pub extern "C" fn librustzcash_merkle_hash(
depth: size_t,
a: *const [c_uchar; 32],
b: *const [c_uchar; 32],
result: *mut [c_uchar; 32],
) {
// Should be okay, because caller is responsible for ensuring
// the pointers are valid pointers to 32 bytes.
let tmp = merkle_hash(depth, unsafe { &*a }, unsafe { &*b });
// Should be okay, caller is responsible for ensuring the pointer
// is a valid pointer to 32 bytes that can be mutated.
let result = unsafe { &mut *result };
*result = tmp;
}
#[no_mangle] // ToScalar
pub extern "C" fn librustzcash_to_scalar(input: *const [c_uchar; 64], result: *mut [c_uchar; 32]) {
// Should be okay, because caller is responsible for ensuring
// the pointer is a valid pointer to 32 bytes, and that is the
// size of the representation
let scalar = jubjub::Scalar::from_bytes_wide(unsafe { &*input });
let result = unsafe { &mut *result };
*result = scalar.to_bytes();
}
#[no_mangle]
pub extern "C" fn librustzcash_ask_to_ak(ask: *const [c_uchar; 32], result: *mut [c_uchar; 32]) {
let ask = unsafe { &*ask };
let ak = fixed_scalar_mult(ask, &SPENDING_KEY_GENERATOR);
let result = unsafe { &mut *result };
*result = ak.to_bytes();
}
#[no_mangle]
pub extern "C" fn librustzcash_nsk_to_nk(nsk: *const [c_uchar; 32], result: *mut [c_uchar; 32]) {
let nsk = unsafe { &*nsk };
let nk = fixed_scalar_mult(nsk, &PROOF_GENERATION_KEY_GENERATOR);
let result = unsafe { &mut *result };
*result = nk.to_bytes();
}
#[no_mangle]
pub extern "C" fn librustzcash_crh_ivk(
ak: *const [c_uchar; 32],
nk: *const [c_uchar; 32],
result: *mut [c_uchar; 32],
) {
let ak = unsafe { &*ak };
let nk = unsafe { &*nk };
let mut h = Blake2sParams::new()
.hash_length(32)
.personal(CRH_IVK_PERSONALIZATION)
.to_state();
h.update(ak);
h.update(nk);
let mut h = h.finalize().as_ref().to_vec();
// Drop the last five bits, so it can be interpreted as a scalar.
h[31] &= 0b0000_0111;
let result = unsafe { &mut *result };
result.copy_from_slice(&h);
}
#[no_mangle]
pub extern "C" fn librustzcash_check_diversifier(diversifier: *const [c_uchar; 11]) -> bool {
let diversifier = Diversifier(unsafe { *diversifier });
diversifier.g_d().is_some()
}
#[no_mangle]
pub extern "C" fn librustzcash_ivk_to_pkd(
ivk: *const [c_uchar; 32],
diversifier: *const [c_uchar; 11],
result: *mut [c_uchar; 32],
) -> bool {
let ivk = de_ct(jubjub::Scalar::from_bytes(unsafe { &*ivk }));
let diversifier = Diversifier(unsafe { *diversifier });
if let (Some(ivk), Some(g_d)) = (ivk, diversifier.g_d()) {
let pk_d = g_d * ivk;
let result = unsafe { &mut *result };
*result = pk_d.to_bytes();
true
} else {
false
}
}
/// Test generation of commitment randomness
#[test]
fn test_gen_r() {
let mut r1 = [0u8; 32];
let mut r2 = [0u8; 32];
// Verify different r values are generated
librustzcash_sapling_generate_r(&mut r1);
librustzcash_sapling_generate_r(&mut r2);
assert_ne!(r1, r2);
// Verify r values are valid in the field
let _ = jubjub::Scalar::from_bytes(&r1).unwrap();
let _ = jubjub::Scalar::from_bytes(&r2).unwrap();
}
/// Generate uniformly random scalar in Jubjub. The result is of length 32.
#[no_mangle]
pub extern "C" fn librustzcash_sapling_generate_r(result: *mut [c_uchar; 32]) {
// create random 64 byte buffer
let mut rng = OsRng;
let mut buffer = [0u8; 64];
rng.fill_bytes(&mut buffer);
// reduce to uniform value
let r = jubjub::Scalar::from_bytes_wide(&buffer);
let result = unsafe { &mut *result };
*result = r.to_bytes();
}
// Private utility function to get Note from C parameters
fn priv_get_note(
diversifier: *const [c_uchar; 11],
pk_d: *const [c_uchar; 32],
value: u64,
rcm: *const [c_uchar; 32],
) -> Result<Note, ()> {
let diversifier = Diversifier(unsafe { *diversifier });
let g_d = diversifier.g_d().ok_or(())?;
let pk_d = de_ct(jubjub::ExtendedPoint::from_bytes(unsafe { &*pk_d })).ok_or(())?;
let pk_d = de_ct(pk_d.into_subgroup()).ok_or(())?;
// Deserialize randomness
// If this is after ZIP 212, the caller has calculated rcm, and we don't need to call
// Note::derive_esk, so we just pretend the note was using this rcm all along.
let rseed = Rseed::BeforeZip212(de_ct(jubjub::Scalar::from_bytes(unsafe { &*rcm })).ok_or(())?);
let note = Note {
value,
g_d,
pk_d,
rseed,
};
Ok(note)
}
/// Compute a Sapling nullifier.
///
/// The `diversifier` parameter must be 11 bytes in length.
/// The `pk_d`, `r`, `ak` and `nk` parameters must be of length 32.
/// The result is also of length 32 and placed in `result`.
/// Returns false if `diversifier` or `pk_d` is not valid.
#[no_mangle]
pub extern "C" fn librustzcash_sapling_compute_nf(
diversifier: *const [c_uchar; 11],
pk_d: *const [c_uchar; 32],
value: u64,
rcm: *const [c_uchar; 32],
ak: *const [c_uchar; 32],
nk: *const [c_uchar; 32],
position: u64,
result: *mut [c_uchar; 32],
) -> bool {
let note = match priv_get_note(diversifier, pk_d, value, rcm) {
Ok(p) => p,
Err(_) => return false,
};
let ak = match de_ct(jubjub::ExtendedPoint::from_bytes(unsafe { &*ak })) {
Some(p) => p,
None => return false,
};
let ak = match de_ct(ak.into_subgroup()) {
Some(ak) => ak,
None => return false,
};
let nk = match de_ct(jubjub::ExtendedPoint::from_bytes(unsafe { &*nk })) {
Some(p) => p,
None => return false,
};
let nk = match de_ct(nk.into_subgroup()) {
Some(nk) => nk,
None => return false,
};
let vk = ViewingKey { ak, nk };
let nf = note.nf(&vk, position);
let result = unsafe { &mut *result };
result.copy_from_slice(&nf);
true
}
/// Compute a Sapling commitment.
///
/// The `diversifier` parameter must be 11 bytes in length.
/// The `pk_d` and `r` parameters must be of length 32.
/// The result is also of length 32 and placed in `result`.
/// Returns false if `diversifier` or `pk_d` is not valid.
#[no_mangle]
pub extern "C" fn librustzcash_sapling_compute_cmu(
diversifier: *const [c_uchar; 11],
pk_d: *const [c_uchar; 32],
value: u64,
rcm: *const [c_uchar; 32],
result: *mut [c_uchar; 32],
) -> bool {
let note = match priv_get_note(diversifier, pk_d, value, rcm) {
Ok(p) => p,
Err(_) => return false,
};
let result = unsafe { &mut *result };
*result = note.cmu().to_bytes();
true
}
/// Computes \[sk\] \[8\] P for some 32-byte point P, and 32-byte Fs.
///
/// If P or sk are invalid, returns false. Otherwise, the result is written to
/// the 32-byte `result` buffer.
#[no_mangle]
pub extern "C" fn librustzcash_sapling_ka_agree(
p: *const [c_uchar; 32],
sk: *const [c_uchar; 32],
result: *mut [c_uchar; 32],
) -> bool {
// Deserialize p
let p = match de_ct(jubjub::ExtendedPoint::from_bytes(unsafe { &*p })) {
Some(p) => p,
None => return false,
};
// Deserialize sk
let sk = match de_ct(jubjub::Scalar::from_bytes(unsafe { &*sk })) {
Some(p) => p,
None => return false,
};
// Compute key agreement
let ka = sapling_ka_agree(&sk, &p);
// Produce result
let result = unsafe { &mut *result };
*result = ka.to_bytes();
true
}
/// Compute g_d = GH(diversifier) and returns false if the diversifier is
/// invalid. Computes \[esk\] g_d and writes the result to the 32-byte `result`
/// buffer. Returns false if `esk` is not a valid scalar.
#[no_mangle]
pub extern "C" fn librustzcash_sapling_ka_derivepublic(
diversifier: *const [c_uchar; 11],
esk: *const [c_uchar; 32],
result: *mut [c_uchar; 32],
) -> bool {
let diversifier = Diversifier(unsafe { *diversifier });
// Compute g_d from the diversifier
let g_d = match diversifier.g_d() {
Some(g) => g,
None => return false,
};
// Deserialize esk
let esk = match de_ct(jubjub::Scalar::from_bytes(unsafe { &*esk })) {
Some(p) => p,
None => return false,
};
let p = g_d * esk;
let result = unsafe { &mut *result };
*result = p.to_bytes();
true
}
/// Validates the provided Equihash solution against the given parameters, input
/// and nonce.
#[no_mangle]
pub extern "C" fn librustzcash_eh_isvalid(
n: u32,
k: u32,
input: *const c_uchar,
input_len: size_t,
nonce: *const c_uchar,
nonce_len: size_t,
soln: *const c_uchar,
soln_len: size_t,
) -> bool {
if (k >= n) || (n % 8 != 0) || (soln_len != (1 << k) * ((n / (k + 1)) as usize + 1) / 8) {
return false;
}
let rs_input = unsafe { slice::from_raw_parts(input, input_len) };
let rs_nonce = unsafe { slice::from_raw_parts(nonce, nonce_len) };
let rs_soln = unsafe { slice::from_raw_parts(soln, soln_len) };
equihash::is_valid_solution(n, k, rs_input, rs_nonce, rs_soln).is_ok()
}
/// Creates a Sapling verification context. Please free this when you're done.
#[no_mangle]
pub extern "C" fn librustzcash_sapling_verification_ctx_init() -> *mut SaplingVerificationContext {
let ctx = Box::new(SaplingVerificationContext::new());
Box::into_raw(ctx)
}
/// Frees a Sapling verification context returned from
/// [`librustzcash_sapling_verification_ctx_init`].
#[no_mangle]
pub extern "C" fn librustzcash_sapling_verification_ctx_free(ctx: *mut SaplingVerificationContext) {
drop(unsafe { Box::from_raw(ctx) });
}
const GROTH_PROOF_SIZE: usize = 48 // π_A
+ 96 // π_B
+ 48; // π_C
/// Check the validity of a Sapling Spend description, accumulating the value
/// commitment into the context.
#[no_mangle]
pub extern "C" fn librustzcash_sapling_check_spend(
ctx: *mut SaplingVerificationContext,
cv: *const [c_uchar; 32],
anchor: *const [c_uchar; 32],
nullifier: *const [c_uchar; 32],
rk: *const [c_uchar; 32],
zkproof: *const [c_uchar; GROTH_PROOF_SIZE],
spend_auth_sig: *const [c_uchar; 64],
sighash_value: *const [c_uchar; 32],
) -> bool {
// Deserialize the value commitment
let cv = match de_ct(jubjub::ExtendedPoint::from_bytes(unsafe { &*cv })) {
Some(p) => p,
None => return false,
};
// Deserialize the anchor, which should be an element
// of Fr.
let anchor = match de_ct(bls12_381::Scalar::from_bytes(unsafe { &*anchor })) {
Some(a) => a,
None => return false,
};
// Deserialize rk
let rk = match redjubjub::PublicKey::read(&(unsafe { &*rk })[..]) {
Ok(p) => p,
Err(_) => return false,
};
// Deserialize the signature
let spend_auth_sig = match Signature::read(&(unsafe { &*spend_auth_sig })[..]) {
Ok(sig) => sig,
Err(_) => return false,
};
// Deserialize the proof
let zkproof = match Proof::read(&(unsafe { &*zkproof })[..]) {
Ok(p) => p,
Err(_) => return false,
};
unsafe { &mut *ctx }.check_spend(
cv,
anchor,
unsafe { &*nullifier },
rk,
unsafe { &*sighash_value },
spend_auth_sig,
zkproof,
unsafe { SAPLING_SPEND_VK.as_ref() }.unwrap(),
)
}
/// Check the validity of a Sapling Output description, accumulating the value
/// commitment into the context.
#[no_mangle]
pub extern "C" fn librustzcash_sapling_check_output(
ctx: *mut SaplingVerificationContext,
cv: *const [c_uchar; 32],
cm: *const [c_uchar; 32],
epk: *const [c_uchar; 32],
zkproof: *const [c_uchar; GROTH_PROOF_SIZE],
) -> bool {
// Deserialize the value commitment
let cv = match de_ct(jubjub::ExtendedPoint::from_bytes(unsafe { &*cv })) {
Some(p) => p,
None => return false,
};
// Deserialize the commitment, which should be an element
// of Fr.
let cm = match de_ct(bls12_381::Scalar::from_bytes(unsafe { &*cm })) {
Some(a) => a,
None => return false,
};
// Deserialize the ephemeral key
let epk = match de_ct(jubjub::ExtendedPoint::from_bytes(unsafe { &*epk })) {
Some(p) => p,
None => return false,
};
// Deserialize the proof
let zkproof = match Proof::read(&(unsafe { &*zkproof })[..]) {
Ok(p) => p,
Err(_) => return false,
};
unsafe { &mut *ctx }.check_output(
cv,
cm,
epk,
zkproof,
unsafe { SAPLING_OUTPUT_VK.as_ref() }.unwrap(),
)
}
/// Finally checks the validity of the entire Sapling transaction given
/// valueBalance and the binding signature.
#[no_mangle]
pub extern "C" fn librustzcash_sapling_final_check(
ctx: *mut SaplingVerificationContext,
value_balance: i64,
binding_sig: *const [c_uchar; 64],
sighash_value: *const [c_uchar; 32],
) -> bool {
let value_balance = match Amount::from_i64(value_balance) {
Ok(vb) => vb,
Err(()) => return false,
};
// Deserialize the signature
let binding_sig = match Signature::read(&(unsafe { &*binding_sig })[..]) {
Ok(sig) => sig,
Err(_) => return false,
};
unsafe { &*ctx }.final_check(value_balance, unsafe { &*sighash_value }, binding_sig)
}
/// Sprout JoinSplit proof generation.
#[no_mangle]
pub extern "C" fn librustzcash_sprout_prove(
proof_out: *mut [c_uchar; GROTH_PROOF_SIZE],
phi: *const [c_uchar; 32],
rt: *const [c_uchar; 32],
h_sig: *const [c_uchar; 32],
// First input
in_sk1: *const [c_uchar; 32],
in_value1: u64,
in_rho1: *const [c_uchar; 32],
in_r1: *const [c_uchar; 32],
in_auth1: *const [c_uchar; sprout::WITNESS_PATH_SIZE],
// Second input
in_sk2: *const [c_uchar; 32],
in_value2: u64,
in_rho2: *const [c_uchar; 32],
in_r2: *const [c_uchar; 32],
in_auth2: *const [c_uchar; sprout::WITNESS_PATH_SIZE],
// First output
out_pk1: *const [c_uchar; 32],
out_value1: u64,
out_r1: *const [c_uchar; 32],
// Second output
out_pk2: *const [c_uchar; 32],
out_value2: u64,
out_r2: *const [c_uchar; 32],
// Public value
vpub_old: u64,
vpub_new: u64,
) {
// Load parameters from disk
let sprout_fs = File::open(
unsafe { &SPROUT_GROTH16_PARAMS_PATH }
.as_ref()
.expect("parameters should have been initialized"),
)
.expect("couldn't load Sprout groth16 parameters file");
let mut sprout_fs = BufReader::with_capacity(1024 * 1024, sprout_fs);
let params = Parameters::read(&mut sprout_fs, false)
.expect("couldn't deserialize Sprout JoinSplit parameters file");
drop(sprout_fs);
let proof = sprout::create_proof(
unsafe { *phi },
unsafe { *rt },
unsafe { *h_sig },
unsafe { *in_sk1 },
in_value1,
unsafe { *in_rho1 },
unsafe { *in_r1 },
unsafe { &*in_auth1 },
unsafe { *in_sk2 },
in_value2,
unsafe { *in_rho2 },
unsafe { *in_r2 },
unsafe { &*in_auth2 },
unsafe { *out_pk1 },
out_value1,
unsafe { *out_r1 },
unsafe { *out_pk2 },
out_value2,
unsafe { *out_r2 },
vpub_old,
vpub_new,
¶ms,
);
proof
.write(&mut (unsafe { &mut *proof_out })[..])
.expect("should be able to serialize a proof");
}
/// Sprout JoinSplit proof verification.
#[no_mangle]
pub extern "C" fn librustzcash_sprout_verify(
proof: *const [c_uchar; GROTH_PROOF_SIZE],
rt: *const [c_uchar; 32],
h_sig: *const [c_uchar; 32],
mac1: *const [c_uchar; 32],
mac2: *const [c_uchar; 32],
nf1: *const [c_uchar; 32],
nf2: *const [c_uchar; 32],
cm1: *const [c_uchar; 32],
cm2: *const [c_uchar; 32],
vpub_old: u64,
vpub_new: u64,
) -> bool {
sprout::verify_proof(
unsafe { &*proof },
unsafe { &*rt },
unsafe { &*h_sig },
unsafe { &*mac1 },
unsafe { &*mac2 },
unsafe { &*nf1 },
unsafe { &*nf2 },
unsafe { &*cm1 },
unsafe { &*cm2 },
vpub_old,
vpub_new,
unsafe { SPROUT_GROTH16_VK.as_ref() }.expect("parameters should have been initialized"),
)
}
/// This function (using the proving context) constructs an Output proof given
/// the necessary witness information. It outputs `cv` and the `zkproof`.
#[no_mangle]
pub extern "C" fn librustzcash_sapling_output_proof(
ctx: *mut SaplingProvingContext,
esk: *const [c_uchar; 32],
payment_address: *const [c_uchar; 43],
rcm: *const [c_uchar; 32],
value: u64,
cv: *mut [c_uchar; 32],
zkproof: *mut [c_uchar; GROTH_PROOF_SIZE],
) -> bool {
// Grab `esk`, which the caller should have constructed for the DH key exchange.
let esk = match de_ct(jubjub::Scalar::from_bytes(unsafe { &*esk })) {
Some(p) => p,
None => return false,
};
// Grab the payment address from the caller
let payment_address = match PaymentAddress::from_bytes(unsafe { &*payment_address }) {
Some(pa) => pa,
None => return false,
};
// The caller provides the commitment randomness for the output note
let rcm = match de_ct(jubjub::Scalar::from_bytes(unsafe { &*rcm })) {
Some(p) => p,
None => return false,
};
// Create proof
let (proof, value_commitment) = unsafe { &mut *ctx }.output_proof(
esk,
payment_address,
rcm,
value,
unsafe { SAPLING_OUTPUT_PARAMS.as_ref() }.unwrap(),
);
// Write the proof out to the caller
proof
.write(&mut (unsafe { &mut *zkproof })[..])
.expect("should be able to serialize a proof");
// Write the value commitment to the caller
*unsafe { &mut *cv } = value_commitment.to_bytes();
true
}
/// Computes the signature for each Spend description, given the key `ask`, the
/// re-randomization `ar`, the 32-byte sighash `sighash`, and an output `result`
/// buffer of 64-bytes for the signature.
///
/// This function will fail if the provided `ask` or `ar` are invalid.
#[no_mangle]
pub extern "C" fn librustzcash_sapling_spend_sig(
ask: *const [c_uchar; 32],
ar: *const [c_uchar; 32],
sighash: *const [c_uchar; 32],
result: *mut [c_uchar; 64],
) -> bool {
// The caller provides the re-randomization of `ak`.
let ar = match de_ct(jubjub::Scalar::from_bytes(unsafe { &*ar })) {
Some(p) => p,
None => return false,
};
// The caller provides `ask`, the spend authorizing key.
let ask = match redjubjub::PrivateKey::read(&(unsafe { &*ask })[..]) {
Ok(p) => p,
Err(_) => return false,
};
// Initialize secure RNG
let mut rng = OsRng;
// Do the signing
let sig = spend_sig(ask, ar, unsafe { &*sighash }, &mut rng);
// Write out the signature
sig.write(&mut (unsafe { &mut *result })[..])
.expect("result should be 64 bytes");
true
}
/// This function (using the proving context) constructs a binding signature.
///
/// You must provide the intended valueBalance so that we can internally check
/// consistency.
#[no_mangle]
pub extern "C" fn librustzcash_sapling_binding_sig(
ctx: *const SaplingProvingContext,
value_balance: i64,
sighash: *const [c_uchar; 32],
result: *mut [c_uchar; 64],
) -> bool {
let value_balance = match Amount::from_i64(value_balance) {
Ok(vb) => vb,
Err(()) => return false,
};
// Sign
let sig = match unsafe { &*ctx }.binding_sig(value_balance, unsafe { &*sighash }) {
Ok(s) => s,
Err(_) => return false,
};
// Write out signature
sig.write(&mut (unsafe { &mut *result })[..])
.expect("result should be 64 bytes");
true
}
/// This function (using the proving context) constructs a Spend proof given the
/// necessary witness information. It outputs `cv` (the value commitment) and
/// `rk` (so that you don't have to compute it) along with the proof.
#[no_mangle]
pub extern "C" fn librustzcash_sapling_spend_proof(
ctx: *mut SaplingProvingContext,
ak: *const [c_uchar; 32],
nsk: *const [c_uchar; 32],
diversifier: *const [c_uchar; 11],
rcm: *const [c_uchar; 32],
ar: *const [c_uchar; 32],
value: u64,
anchor: *const [c_uchar; 32],
merkle_path: *const [c_uchar; 1 + 33 * SAPLING_TREE_DEPTH + 8],
cv: *mut [c_uchar; 32],
rk_out: *mut [c_uchar; 32],
zkproof: *mut [c_uchar; GROTH_PROOF_SIZE],
) -> bool {
// Grab `ak` from the caller, which should be a point.
let ak = match de_ct(jubjub::ExtendedPoint::from_bytes(unsafe { &*ak })) {
Some(p) => p,
None => return false,
};
// `ak` should be prime order.
let ak = match de_ct(ak.into_subgroup()) {
Some(p) => p,
None => return false,
};
// Grab `nsk` from the caller
let nsk = match de_ct(jubjub::Scalar::from_bytes(unsafe { &*nsk })) {
Some(p) => p,
None => return false,
};
// Construct the proof generation key
let proof_generation_key = ProofGenerationKey {
ak: ak.clone(),
nsk,
};
// Grab the diversifier from the caller
let diversifier = Diversifier(unsafe { *diversifier });
// The caller chooses the note randomness
// If this is after ZIP 212, the caller has calculated rcm, and we don't need to call
// Note::derive_esk, so we just pretend the note was using this rcm all along.
let rseed = match de_ct(jubjub::Scalar::from_bytes(unsafe { &*rcm })) {
Some(p) => Rseed::BeforeZip212(p),
None => return false,
};
// The caller also chooses the re-randomization of ak
let ar = match de_ct(jubjub::Scalar::from_bytes(unsafe { &*ar })) {
Some(p) => p,
None => return false,
};
// We need to compute the anchor of the Spend.
let anchor = match de_ct(bls12_381::Scalar::from_bytes(unsafe { &*anchor })) {
Some(p) => p,
None => return false,
};
// Parse the Merkle path from the caller
let merkle_path = match MerklePath::from_slice(unsafe { &(&*merkle_path)[..] }) {
Ok(w) => w,
Err(_) => return false,
};
// Create proof
let (proof, value_commitment, rk) = unsafe { &mut *ctx }
.spend_proof(
proof_generation_key,
diversifier,
rseed,
ar,
value,
anchor,
merkle_path,
unsafe { SAPLING_SPEND_PARAMS.as_ref() }.unwrap(),
unsafe { SAPLING_SPEND_VK.as_ref() }.unwrap(),
)
.expect("proving should not fail");
// Write value commitment to caller
*unsafe { &mut *cv } = value_commitment.to_bytes();
// Write proof out to caller
proof
.write(&mut (unsafe { &mut *zkproof })[..])
.expect("should be able to serialize a proof");
// Write out `rk` to the caller
rk.write(&mut unsafe { &mut *rk_out }[..])
.expect("should be able to write to rk_out");
true
}
/// Creates a Sapling proving context. Please free this when you're done.
#[no_mangle]
pub extern "C" fn librustzcash_sapling_proving_ctx_init() -> *mut SaplingProvingContext {
let ctx = Box::new(SaplingProvingContext::new());
Box::into_raw(ctx)
}
/// Frees a Sapling proving context returned from
/// [`librustzcash_sapling_proving_ctx_init`].
#[no_mangle]
pub extern "C" fn librustzcash_sapling_proving_ctx_free(ctx: *mut SaplingProvingContext) {
drop(unsafe { Box::from_raw(ctx) });
}
/// Derive the master ExtendedSpendingKey from a seed.
#[no_mangle]
pub extern "C" fn librustzcash_zip32_xsk_master(
seed: *const c_uchar,
seedlen: size_t,
xsk_master: *mut [c_uchar; 169],
) {
let seed = unsafe { std::slice::from_raw_parts(seed, seedlen) };
let xsk = zip32::ExtendedSpendingKey::master(seed);
xsk.write(&mut (unsafe { &mut *xsk_master })[..])
.expect("should be able to serialize an ExtendedSpendingKey");
}
/// Derive a child ExtendedSpendingKey from a parent.
#[no_mangle]
pub extern "C" fn librustzcash_zip32_xsk_derive(
xsk_parent: *const [c_uchar; 169],
i: u32,
xsk_i: *mut [c_uchar; 169],
) {
let xsk_parent = zip32::ExtendedSpendingKey::read(&unsafe { *xsk_parent }[..])
.expect("valid ExtendedSpendingKey");
let i = zip32::ChildIndex::from_index(i);
let xsk = xsk_parent.derive_child(i);
xsk.write(&mut (unsafe { &mut *xsk_i })[..])
.expect("should be able to serialize an ExtendedSpendingKey");
}
/// Derive a child ExtendedFullViewingKey from a parent.
#[no_mangle]
pub extern "C" fn librustzcash_zip32_xfvk_derive(
xfvk_parent: *const [c_uchar; 169],
i: u32,
xfvk_i: *mut [c_uchar; 169],
) -> bool {
let xfvk_parent = zip32::ExtendedFullViewingKey::read(&unsafe { *xfvk_parent }[..])
.expect("valid ExtendedFullViewingKey");
let i = zip32::ChildIndex::from_index(i);
let xfvk = match xfvk_parent.derive_child(i) {
Ok(xfvk) => xfvk,
Err(_) => return false,
};
xfvk.write(&mut (unsafe { &mut *xfvk_i })[..])
.expect("should be able to serialize an ExtendedFullViewingKey");
true
}
/// Derive a PaymentAddress from an ExtendedFullViewingKey.
#[no_mangle]
pub extern "C" fn librustzcash_zip32_xfvk_address(
xfvk: *const [c_uchar; 169],
j: *const [c_uchar; 11],
j_ret: *mut [c_uchar; 11],
addr_ret: *mut [c_uchar; 43],
) -> bool {
let xfvk = zip32::ExtendedFullViewingKey::read(&unsafe { *xfvk }[..])
.expect("valid ExtendedFullViewingKey");
let j = zip32::DiversifierIndex(unsafe { *j });
let addr = match xfvk.address(j) {
Ok(addr) => addr,
Err(_) => return false,
};
let j_ret = unsafe { &mut *j_ret };
let addr_ret = unsafe { &mut *addr_ret };
j_ret.copy_from_slice(&(addr.0).0);
addr_ret.copy_from_slice(&addr.1.to_bytes());
true
}
fn construct_mmr_tree(
// Consensus branch id
cbranch: u32,
// Length of tree in array representation
t_len: u32,
// Indices of provided tree nodes, length of p_len+e_len
ni_ptr: *const u32,
// Provided tree nodes data, length of p_len+e_len
n_ptr: *const [c_uchar; zcash_history::MAX_ENTRY_SIZE],
// Peaks count
p_len: size_t,
// Extra nodes loaded (for deletion) count
e_len: size_t,
) -> Result<MMRTree, &'static str> {
let (indices, nodes) = unsafe {
(
slice::from_raw_parts(ni_ptr, p_len + e_len),
slice::from_raw_parts(n_ptr, p_len + e_len),
)
};
let mut peaks: Vec<_> = indices
.iter()
.zip(nodes.iter())
.map(
|(index, node)| match MMREntry::from_bytes(cbranch, &node[..]) {
Ok(entry) => Ok((*index, entry)),
Err(_) => Err("Invalid encoding"),
},
)
.collect::<Result<_, _>>()?;
let extra = peaks.split_off(p_len);
Ok(MMRTree::new(t_len, peaks, extra))
}
#[no_mangle]
pub extern "system" fn librustzcash_mmr_append(
// Consensus branch id
cbranch: u32,
// Length of tree in array representation
t_len: u32,
// Indices of provided tree nodes, length of p_len
ni_ptr: *const u32,
// Provided tree nodes data, length of p_len
n_ptr: *const [c_uchar; zcash_history::MAX_ENTRY_SIZE],
// Peaks count
p_len: size_t,
// New node pointer
nn_ptr: *const [u8; zcash_history::MAX_NODE_DATA_SIZE],
// Return of root commitment
rt_ret: *mut [u8; 32],
// Return buffer for appended leaves, should be pre-allocated of ceiling(log2(t_len)) length
buf_ret: *mut [c_uchar; zcash_history::MAX_NODE_DATA_SIZE],
) -> u32 {
let new_node_bytes: &[u8; zcash_history::MAX_NODE_DATA_SIZE] = unsafe {
match nn_ptr.as_ref() {
Some(r) => r,
None => {
return 0;
} // Null pointer passed, error
}
};
let mut tree = match construct_mmr_tree(cbranch, t_len, ni_ptr, n_ptr, p_len, 0) {
Ok(t) => t,
_ => {
return 0;
} // error
};
let node = match MMRNodeData::from_bytes(cbranch, &new_node_bytes[..]) {
Ok(node) => node,
_ => {
return 0;
} // error
};
let appended = match tree.append_leaf(node) {
Ok(appended) => appended,
_ => {
return 0;
}
};
let return_count = appended.len();
let root_node = tree
.root_node()
.expect("Just added, should resolve always; qed");
unsafe {
*rt_ret = root_node.data().hash();
for (idx, next_buf) in slice::from_raw_parts_mut(buf_ret, return_count as usize)
.iter_mut()
.enumerate()
{
tree.resolve_link(appended[idx])
.expect("This was generated by the tree and thus resolvable; qed")
.data()
.write(&mut &mut next_buf[..])
.expect("Write using cursor with enough buffer size cannot fail; qed");
}
}
return_count as u32
}
#[no_mangle]
pub extern "system" fn librustzcash_mmr_delete(
// Consensus branch id
cbranch: u32,
// Length of tree in array representation
t_len: u32,
// Indices of provided tree nodes, length of p_len+e_len
ni_ptr: *const u32,
// Provided tree nodes data, length of p_len+e_len
n_ptr: *const [c_uchar; zcash_history::MAX_ENTRY_SIZE],
// Peaks count
p_len: size_t,
// Extra nodes loaded (for deletion) count
e_len: size_t,
// Return of root commitment
rt_ret: *mut [u8; 32],
) -> u32 {
let mut tree = match construct_mmr_tree(cbranch, t_len, ni_ptr, n_ptr, p_len, e_len) {
Ok(t) => t,
_ => {
return 0;
} // error
};
let truncate_len = match tree.truncate_leaf() {
Ok(v) => v,
_ => {
return 0;
} // Error
};
unsafe {
*rt_ret = tree
.root_node()
.expect("Just generated without errors, root should be resolving")
.data()
.hash();
}
truncate_len
}
#[no_mangle]
pub extern "system" fn librustzcash_mmr_hash_node(
cbranch: u32,
n_ptr: *const [u8; zcash_history::MAX_NODE_DATA_SIZE],
h_ret: *mut [u8; 32],
) -> u32 {
let node_bytes: &[u8; zcash_history::MAX_NODE_DATA_SIZE] = unsafe {
match n_ptr.as_ref() {
Some(r) => r,
None => return 1,
}
};
let node = match MMRNodeData::from_bytes(cbranch, &node_bytes[..]) {
Ok(n) => n,
_ => return 1, // error
};
unsafe {
*h_ret = node.hash();
}
0
}
#[no_mangle]
pub extern "C" fn librustzcash_getrandom(buf: *mut u8, buf_len: usize) {
let buf = unsafe { slice::from_raw_parts_mut(buf, buf_len) };
OsRng.fill_bytes(buf);
}
| 30.078212 | 100 | 0.616164 |
e6df58baf606e35d8352e87335a136dc5cacb20f | 1,749 | use nu_protocol::ast::Call;
use nu_protocol::engine::{Command, EngineState, Stack};
use nu_protocol::{Category, Example, PipelineData, ShellError, Signature, Value};
#[derive(Clone)]
pub struct Debug;
impl Command for Debug {
fn name(&self) -> &str {
"debug"
}
fn usage(&self) -> &str {
"Debug print the value(s) piped in."
}
fn signature(&self) -> Signature {
Signature::build("debug").category(Category::Core).switch(
"raw",
"Prints the raw value representation",
Some('r'),
)
}
fn run(
&self,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
) -> Result<PipelineData, ShellError> {
let head = call.head;
let config = stack.get_config()?;
let raw = call.has_flag("raw");
input.map(
move |x| {
if raw {
Value::String {
val: x.debug_value(),
span: head,
}
} else {
Value::String {
val: x.debug_string(", ", &config),
span: head,
}
}
},
engine_state.ctrlc.clone(),
)
}
fn examples(&self) -> Vec<Example> {
vec![Example {
description: "Describe the type of a string",
example: "'hello' | debug",
result: Some(Value::test_string("hello")),
}]
}
}
#[cfg(test)]
mod test {
#[test]
fn test_examples() {
use super::Debug;
use crate::test_examples;
test_examples(Debug {})
}
}
| 24.291667 | 81 | 0.467696 |
38577964b49d04d63a48a76cdf235e06440e7d9b | 55 | pub type Error = Box<dyn std::error::Error + 'static>;
| 27.5 | 54 | 0.672727 |
edb646c11304197daf669a629d3771c568525329 | 18,005 | //! ra_ide_api crate provides "ide-centric" APIs for the rust-analyzer. That is,
//! it generally operates with files and text ranges, and returns results as
//! Strings, suitable for displaying to the human.
//!
//! What powers this API are the `RootDatabase` struct, which defines a `salsa`
//! database, and the `ra_hir` crate, where majority of the analysis happens.
//! However, IDE specific bits of the analysis (most notably completion) happen
//! in this crate.
// For proving that RootDatabase is RefUnwindSafe.
#![recursion_limit = "128"]
mod db;
pub mod mock_analysis;
mod symbol_index;
mod change;
mod status;
mod completion;
mod runnables;
mod name_ref_kind;
mod goto_definition;
mod goto_type_definition;
mod extend_selection;
mod hover;
mod call_info;
mod syntax_highlighting;
mod parent_module;
mod references;
mod impls;
mod assists;
mod diagnostics;
mod syntax_tree;
mod folding_ranges;
mod line_index;
mod line_index_utils;
mod join_lines;
mod typing;
mod matching_brace;
mod display;
mod inlay_hints;
#[cfg(test)]
mod marks;
#[cfg(test)]
mod test_utils;
use std::sync::Arc;
use ra_db::{
salsa::{self, ParallelDatabase},
CheckCanceled, SourceDatabase,
};
use ra_syntax::{SourceFile, TextRange, TextUnit};
use ra_text_edit::TextEdit;
use relative_path::RelativePathBuf;
use crate::{db::LineIndexDatabase, symbol_index::FileSymbol};
pub use crate::{
assists::{Assist, AssistId},
change::{AnalysisChange, LibraryData},
completion::{CompletionItem, CompletionItemKind, InsertTextFormat},
diagnostics::Severity,
display::{file_structure, FunctionSignature, NavigationTarget, StructureNode},
folding_ranges::{Fold, FoldKind},
hover::HoverResult,
inlay_hints::{InlayHint, InlayKind},
line_index::{LineCol, LineIndex},
line_index_utils::translate_offset_with_edit,
references::ReferenceSearchResult,
runnables::{Runnable, RunnableKind},
syntax_highlighting::HighlightedRange,
};
pub use hir::Documentation;
pub use ra_db::{
Canceled, CrateGraph, CrateId, Edition, FileId, FilePosition, FileRange, SourceRootId,
};
pub type Cancelable<T> = Result<T, Canceled>;
#[derive(Debug)]
pub struct SourceChange {
pub label: String,
pub source_file_edits: Vec<SourceFileEdit>,
pub file_system_edits: Vec<FileSystemEdit>,
pub cursor_position: Option<FilePosition>,
}
impl SourceChange {
/// Creates a new SourceChange with the given label
/// from the edits.
pub(crate) fn from_edits<L: Into<String>>(
label: L,
source_file_edits: Vec<SourceFileEdit>,
file_system_edits: Vec<FileSystemEdit>,
) -> Self {
SourceChange {
label: label.into(),
source_file_edits,
file_system_edits,
cursor_position: None,
}
}
/// Creates a new SourceChange with the given label,
/// containing only the given `SourceFileEdits`.
pub(crate) fn source_file_edits<L: Into<String>>(label: L, edits: Vec<SourceFileEdit>) -> Self {
SourceChange {
label: label.into(),
source_file_edits: edits,
file_system_edits: vec![],
cursor_position: None,
}
}
/// Creates a new SourceChange with the given label,
/// containing only the given `FileSystemEdits`.
pub(crate) fn file_system_edits<L: Into<String>>(label: L, edits: Vec<FileSystemEdit>) -> Self {
SourceChange {
label: label.into(),
source_file_edits: vec![],
file_system_edits: edits,
cursor_position: None,
}
}
/// Creates a new SourceChange with the given label,
/// containing only a single `SourceFileEdit`.
pub(crate) fn source_file_edit<L: Into<String>>(label: L, edit: SourceFileEdit) -> Self {
SourceChange::source_file_edits(label, vec![edit])
}
/// Creates a new SourceChange with the given label
/// from the given `FileId` and `TextEdit`
pub(crate) fn source_file_edit_from<L: Into<String>>(
label: L,
file_id: FileId,
edit: TextEdit,
) -> Self {
SourceChange::source_file_edit(label, SourceFileEdit { file_id, edit })
}
/// Creates a new SourceChange with the given label
/// from the given `FileId` and `TextEdit`
pub(crate) fn file_system_edit<L: Into<String>>(label: L, edit: FileSystemEdit) -> Self {
SourceChange::file_system_edits(label, vec![edit])
}
/// Sets the cursor position to the given `FilePosition`
pub(crate) fn with_cursor(mut self, cursor_position: FilePosition) -> Self {
self.cursor_position = Some(cursor_position);
self
}
/// Sets the cursor position to the given `FilePosition`
pub(crate) fn with_cursor_opt(mut self, cursor_position: Option<FilePosition>) -> Self {
self.cursor_position = cursor_position;
self
}
}
#[derive(Debug)]
pub struct SourceFileEdit {
pub file_id: FileId,
pub edit: TextEdit,
}
#[derive(Debug)]
pub enum FileSystemEdit {
CreateFile { source_root: SourceRootId, path: RelativePathBuf },
MoveFile { src: FileId, dst_source_root: SourceRootId, dst_path: RelativePathBuf },
}
#[derive(Debug)]
pub struct Diagnostic {
pub message: String,
pub range: TextRange,
pub fix: Option<SourceChange>,
pub severity: Severity,
}
#[derive(Debug)]
pub struct Query {
query: String,
lowercased: String,
only_types: bool,
libs: bool,
exact: bool,
limit: usize,
}
impl Query {
pub fn new(query: String) -> Query {
let lowercased = query.to_lowercase();
Query {
query,
lowercased,
only_types: false,
libs: false,
exact: false,
limit: usize::max_value(),
}
}
pub fn only_types(&mut self) {
self.only_types = true;
}
pub fn libs(&mut self) {
self.libs = true;
}
pub fn exact(&mut self) {
self.exact = true;
}
pub fn limit(&mut self, limit: usize) {
self.limit = limit
}
}
#[derive(Debug)]
pub struct RangeInfo<T> {
pub range: TextRange,
pub info: T,
}
impl<T> RangeInfo<T> {
pub fn new(range: TextRange, info: T) -> RangeInfo<T> {
RangeInfo { range, info }
}
}
#[derive(Debug)]
pub struct CallInfo {
pub signature: FunctionSignature,
pub active_parameter: Option<usize>,
}
/// `AnalysisHost` stores the current state of the world.
#[derive(Debug)]
pub struct AnalysisHost {
db: db::RootDatabase,
}
impl Default for AnalysisHost {
fn default() -> AnalysisHost {
AnalysisHost::new(None)
}
}
impl AnalysisHost {
pub fn new(lru_capcity: Option<usize>) -> AnalysisHost {
AnalysisHost { db: db::RootDatabase::new(lru_capcity) }
}
/// Returns a snapshot of the current state, which you can query for
/// semantic information.
pub fn analysis(&self) -> Analysis {
Analysis { db: self.db.snapshot() }
}
/// Applies changes to the current state of the world. If there are
/// outstanding snapshots, they will be canceled.
pub fn apply_change(&mut self, change: AnalysisChange) {
self.db.apply_change(change)
}
pub fn maybe_collect_garbage(&mut self) {
self.db.maybe_collect_garbage();
}
pub fn collect_garbage(&mut self) {
self.db.collect_garbage();
}
/// NB: this clears the database
pub fn per_query_memory_usage(&mut self) -> Vec<(String, ra_prof::Bytes)> {
self.db.per_query_memory_usage()
}
pub fn raw_database(&self) -> &(impl hir::db::HirDatabase + salsa::Database) {
&self.db
}
}
/// Analysis is a snapshot of a world state at a moment in time. It is the main
/// entry point for asking semantic information about the world. When the world
/// state is advanced using `AnalysisHost::apply_change` method, all existing
/// `Analysis` are canceled (most method return `Err(Canceled)`).
#[derive(Debug)]
pub struct Analysis {
db: salsa::Snapshot<db::RootDatabase>,
}
// As a general design guideline, `Analysis` API are intended to be independent
// from the language server protocol. That is, when exposing some functionality
// we should think in terms of "what API makes most sense" and not in terms of
// "what types LSP uses". Although currently LSP is the only consumer of the
// API, the API should in theory be usable as a library, or via a different
// protocol.
impl Analysis {
// Creates an analysis instance for a single file, without any extenal
// dependencies, stdlib support or ability to apply changes. See
// `AnalysisHost` for creating a fully-featured analysis.
pub fn from_single_file(text: String) -> (Analysis, FileId) {
let mut host = AnalysisHost::default();
let source_root = SourceRootId(0);
let mut change = AnalysisChange::new();
change.add_root(source_root, true);
let mut crate_graph = CrateGraph::default();
let file_id = FileId(0);
crate_graph.add_crate_root(file_id, Edition::Edition2018);
change.add_file(source_root, file_id, "main.rs".into(), Arc::new(text));
change.set_crate_graph(crate_graph);
host.apply_change(change);
(host.analysis(), file_id)
}
/// Debug info about the current state of the analysis
pub fn status(&self) -> Cancelable<String> {
self.with_db(|db| status::status(&*db))
}
/// Gets the text of the source file.
pub fn file_text(&self, file_id: FileId) -> Cancelable<Arc<String>> {
self.with_db(|db| db.file_text(file_id))
}
/// Gets the syntax tree of the file.
pub fn parse(&self, file_id: FileId) -> Cancelable<SourceFile> {
self.with_db(|db| db.parse(file_id).tree())
}
/// Gets the file's `LineIndex`: data structure to convert between absolute
/// offsets and line/column representation.
pub fn file_line_index(&self, file_id: FileId) -> Cancelable<Arc<LineIndex>> {
self.with_db(|db| db.line_index(file_id))
}
/// Selects the next syntactic nodes encompassing the range.
pub fn extend_selection(&self, frange: FileRange) -> Cancelable<TextRange> {
self.with_db(|db| extend_selection::extend_selection(db, frange))
}
/// Returns position of the matching brace (all types of braces are
/// supported).
pub fn matching_brace(&self, position: FilePosition) -> Cancelable<Option<TextUnit>> {
self.with_db(|db| {
let parse = db.parse(position.file_id);
let file = parse.tree();
matching_brace::matching_brace(&file, position.offset)
})
}
/// Returns a syntax tree represented as `String`, for debug purposes.
// FIXME: use a better name here.
pub fn syntax_tree(
&self,
file_id: FileId,
text_range: Option<TextRange>,
) -> Cancelable<String> {
self.with_db(|db| syntax_tree::syntax_tree(&db, file_id, text_range))
}
/// Returns an edit to remove all newlines in the range, cleaning up minor
/// stuff like trailing commas.
pub fn join_lines(&self, frange: FileRange) -> Cancelable<SourceChange> {
self.with_db(|db| {
let parse = db.parse(frange.file_id);
let file_edit = SourceFileEdit {
file_id: frange.file_id,
edit: join_lines::join_lines(&parse.tree(), frange.range),
};
SourceChange::source_file_edit("join lines", file_edit)
})
}
/// Returns an edit which should be applied when opening a new line, fixing
/// up minor stuff like continuing the comment.
pub fn on_enter(&self, position: FilePosition) -> Cancelable<Option<SourceChange>> {
self.with_db(|db| typing::on_enter(&db, position))
}
/// Returns an edit which should be applied after `=` was typed. Primarily,
/// this works when adding `let =`.
// FIXME: use a snippet completion instead of this hack here.
pub fn on_eq_typed(&self, position: FilePosition) -> Cancelable<Option<SourceChange>> {
self.with_db(|db| {
let parse = db.parse(position.file_id);
let file = parse.tree();
let edit = typing::on_eq_typed(&file, position.offset)?;
Some(SourceChange::source_file_edit(
"add semicolon",
SourceFileEdit { edit, file_id: position.file_id },
))
})
}
/// Returns an edit which should be applied when a dot ('.') is typed on a blank line, indenting the line appropriately.
pub fn on_dot_typed(&self, position: FilePosition) -> Cancelable<Option<SourceChange>> {
self.with_db(|db| typing::on_dot_typed(&db, position))
}
/// Returns a tree representation of symbols in the file. Useful to draw a
/// file outline.
pub fn file_structure(&self, file_id: FileId) -> Cancelable<Vec<StructureNode>> {
self.with_db(|db| file_structure(&db.parse(file_id).tree()))
}
/// Returns a list of the places in the file where type hints can be displayed.
pub fn inlay_hints(&self, file_id: FileId) -> Cancelable<Vec<InlayHint>> {
self.with_db(|db| inlay_hints::inlay_hints(db, file_id, &db.parse(file_id).tree()))
}
/// Returns the set of folding ranges.
pub fn folding_ranges(&self, file_id: FileId) -> Cancelable<Vec<Fold>> {
self.with_db(|db| folding_ranges::folding_ranges(&db.parse(file_id).tree()))
}
/// Fuzzy searches for a symbol.
pub fn symbol_search(&self, query: Query) -> Cancelable<Vec<NavigationTarget>> {
self.with_db(|db| {
symbol_index::world_symbols(db, query)
.into_iter()
.map(|s| NavigationTarget::from_symbol(db, s))
.collect::<Vec<_>>()
})
}
pub fn goto_definition(
&self,
position: FilePosition,
) -> Cancelable<Option<RangeInfo<Vec<NavigationTarget>>>> {
self.with_db(|db| goto_definition::goto_definition(db, position))
}
pub fn goto_implementation(
&self,
position: FilePosition,
) -> Cancelable<Option<RangeInfo<Vec<NavigationTarget>>>> {
self.with_db(|db| impls::goto_implementation(db, position))
}
pub fn goto_type_definition(
&self,
position: FilePosition,
) -> Cancelable<Option<RangeInfo<Vec<NavigationTarget>>>> {
self.with_db(|db| goto_type_definition::goto_type_definition(db, position))
}
/// Finds all usages of the reference at point.
pub fn find_all_refs(
&self,
position: FilePosition,
) -> Cancelable<Option<ReferenceSearchResult>> {
self.with_db(|db| references::find_all_refs(db, position))
}
/// Returns a short text describing element at position.
pub fn hover(&self, position: FilePosition) -> Cancelable<Option<RangeInfo<HoverResult>>> {
self.with_db(|db| hover::hover(db, position))
}
/// Computes parameter information for the given call expression.
pub fn call_info(&self, position: FilePosition) -> Cancelable<Option<CallInfo>> {
self.with_db(|db| call_info::call_info(db, position))
}
/// Returns a `mod name;` declaration which created the current module.
pub fn parent_module(&self, position: FilePosition) -> Cancelable<Vec<NavigationTarget>> {
self.with_db(|db| parent_module::parent_module(db, position))
}
/// Returns crates this file belongs too.
pub fn crate_for(&self, file_id: FileId) -> Cancelable<Vec<CrateId>> {
self.with_db(|db| parent_module::crate_for(db, file_id))
}
/// Returns the root file of the given crate.
pub fn crate_root(&self, crate_id: CrateId) -> Cancelable<FileId> {
self.with_db(|db| db.crate_graph().crate_root(crate_id))
}
/// Returns the set of possible targets to run for the current file.
pub fn runnables(&self, file_id: FileId) -> Cancelable<Vec<Runnable>> {
self.with_db(|db| runnables::runnables(db, file_id))
}
/// Computes syntax highlighting for the given file.
pub fn highlight(&self, file_id: FileId) -> Cancelable<Vec<HighlightedRange>> {
self.with_db(|db| syntax_highlighting::highlight(db, file_id))
}
/// Computes syntax highlighting for the given file.
pub fn highlight_as_html(&self, file_id: FileId, rainbow: bool) -> Cancelable<String> {
self.with_db(|db| syntax_highlighting::highlight_as_html(db, file_id, rainbow))
}
/// Computes completions at the given position.
pub fn completions(&self, position: FilePosition) -> Cancelable<Option<Vec<CompletionItem>>> {
self.with_db(|db| completion::completions(db, position).map(Into::into))
}
/// Computes assists (aka code actions aka intentions) for the given
/// position.
pub fn assists(&self, frange: FileRange) -> Cancelable<Vec<Assist>> {
self.with_db(|db| assists::assists(db, frange))
}
/// Computes the set of diagnostics for the given file.
pub fn diagnostics(&self, file_id: FileId) -> Cancelable<Vec<Diagnostic>> {
self.with_db(|db| diagnostics::diagnostics(db, file_id))
}
/// Computes the type of the expression at the given position.
pub fn type_of(&self, frange: FileRange) -> Cancelable<Option<String>> {
self.with_db(|db| hover::type_of(db, frange))
}
/// Returns the edit required to rename reference at the position to the new
/// name.
pub fn rename(
&self,
position: FilePosition,
new_name: &str,
) -> Cancelable<Option<SourceChange>> {
self.with_db(|db| references::rename(db, position, new_name))
}
fn with_db<F: FnOnce(&db::RootDatabase) -> T + std::panic::UnwindSafe, T>(
&self,
f: F,
) -> Cancelable<T> {
self.db.catch_canceled(f)
}
}
#[test]
fn analysis_is_send() {
fn is_send<T: Send>() {}
is_send::<Analysis>();
}
| 33.158379 | 124 | 0.653263 |
ed822c49a6b27c713c6d320810ea583c40c0db43 | 1,445 | fn main() {
let mut config = hrpc_build::Config::new();
#[cfg(feature = "serde")]
// config.type_attribute(".", "#[derive(serde::Serialize, serde::Deserialize)]");
serde(&mut config, "Record");
serde(&mut config, "PullResponse");
serde(&mut config, "QueryResponse");
config.type_attribute(".sonar.Link", "#[derive(serde::Serialize)]");
// config.type_attribute(
// ".sonar.Record",
// "#[derive(serde::Serialize, serde::Deserialize)]",
// );
// config.type_attribute(
// ".sonar.PullResponse",
// "#[derive(serde::Serialize, serde::Deserialize)]",
// );
config.type_attribute(".sonar.Record", "#[serde(default)]");
// #[cfg(feature = "serde")]
// config.field_attribute(".", "#[derive(serde::Serialize, serde::Deserialize)]");
config.field_attribute(
".sonar.Record.key",
"#[serde(serialize_with = \"crate::as_hex\", deserialize_with = \"crate::from_hex\")]",
);
config.field_attribute(
".sonar.Record.timestamp",
"#[serde(deserialize_with = \"crate::u32_from_integer\")]",
);
config.extern_path(".sonar.Json", "crate::Json");
config
.compile_protos(&["src/schema.proto"], &["src"])
.unwrap();
}
fn serde(config: &mut hrpc_build::Config, name: &str) {
config.type_attribute(
format!(".sonar.{}", name),
"#[derive(serde::Serialize, serde::Deserialize)]",
);
}
| 35.243902 | 95 | 0.588235 |
1cb73b28457da911dbece2d3b00d0c231bc56719 | 2,185 | /*
* @lc app=leetcode.cn id=79 lang=rust
*
* [79] 单词搜索
*/
// @lc code=start
use std::collections::HashSet;
impl Solution {
pub fn exist(board: Vec<Vec<char>>, word: String) -> bool {
let word = word.chars().collect::<Vec<_>>();
if word.is_empty() {
return true;
}
let n = board.len();
if n==0 {
return false;
}
let m = board[0].len();
if m==0 {
return false;
}
for i in 0..n {
for j in 0..m {
if Solution::exist_remain(&board, i as i32, j as i32, &word, m as i32, n as i32, &mut HashSet::<(i32,i32)>::new()) {
return true;
}
}
}
false
}
pub fn exist_remain(board: &[Vec<char>], i: i32, j: i32, remain: &[char], m: i32, n: i32, used: &mut HashSet<(i32,i32)>) -> bool {
if remain[0] == board[i as usize][j as usize] {
if remain.len() == 1 {
true
} else {
let directions = [(1,0),(-1,0),(0,1),(0,-1)];
used.insert((i,j));
for (di, dj) in &directions {
let next = (i+di,j+dj);
if next.0 >= 0 && next.0 < n && next.1 >=0 && next.1 < m && !used.contains(&next)
&& Solution::exist_remain(&board, next.0, next.1, &remain[1..remain.len()], m, n, used) {
return true;
}
}
used.remove(&(i,j));
false
}
} else {
false
}
}
}
// @lc code=end
struct Solution;
#[cfg(test)]
mod test {
use super::Solution;
#[test]
fn returns_expected() {
let board = vec![
vec!['A','B','C','E'],
vec!['S','F','C','S'],
vec!['A','D','E','E']
];
assert!(
Solution::exist(board.clone(), "ABCCED".to_string())
);
assert!(
Solution::exist(board.clone(),"SEE".to_string())
);
assert!(
!Solution::exist(board.clone(),"ABCB".to_string())
);
}
}
| 26.325301 | 134 | 0.408696 |
3a9dd4508542a9b88ba2e9662ef400062376d4e7 | 2,563 | #![feature(async_await, await_macro)]
//! **You must ensure that at least 1 side verifies other's identity,
//! otherwise man-in-the-middle attack can be done against your program.**
//!
//! 
mod dh;
mod proto;
pub mod signature;
mod wrappers;
use derive_error::Error;
use futures::io::{AsyncReadExt, AsyncWriteExt};
use protocol::{Parcel, Settings};
use crate::{
dh::SharedEncryptionKey,
signature::{SigningKeyPair, SigningPubKey},
};
/// Wrapper for possible crate errors
#[derive(Debug, Error)]
pub enum Error {
/// Async I/O error
Io(futures::io::Error),
/// Error originating in `protocol`
Protocol(protocol::Error),
/// Other side is speaking a different protocol or
/// a different version of `fes`.
LibVersion,
/// Other side sent packet that was parsed correctly,
/// but it was unexpected at this moment
Logic,
/// Other side did not allow us to connect or had invalid identity
Rejected,
}
/// Auto-derived convenience extension for working with `protocol`
///
/// Parses and encodes Parcels with default settings.
pub trait ParcelExt<T> {
fn to_bytes(&self) -> Vec<u8>;
fn from_bytes(bytes: &[u8]) -> Result<T, Error>;
}
impl<T: Parcel> ParcelExt<T> for T {
fn to_bytes(&self) -> Vec<u8> {
self.raw_bytes(&Settings::default()).unwrap()
}
fn from_bytes(bytes: &[u8]) -> Result<T, Error> {
Ok(T::from_raw_bytes(bytes, &Settings::default())?)
}
}
/// Alias for `AsyncReadExt + AsyncWriteExt + Unpin`
///
/// See [romio] for example network implementation.
///
/// [romio]: https://crates.io/crates/romio
pub trait AsyncRW: AsyncReadExt + AsyncWriteExt + Unpin {}
impl<T: AsyncReadExt + AsyncWriteExt + Unpin> AsyncRW for T {}
/// Low level AsyncRW wrapper - sends and parses unencrypted Parcels and their size
#[derive(Debug, PartialEq)]
pub(crate) struct UnencryptedAsyncRW<T: AsyncRW>(pub T);
/// AsyncRW wrapper - sends and parses encrypted Parcels and their size
#[derive(Debug, PartialEq)]
pub(crate) struct EncryptedAsyncRW<T: AsyncRW> {
unencrypted: UnencryptedAsyncRW<T>,
key: SharedEncryptionKey,
nonce: u128,
}
/// Established and encrypted 1:1 connection
#[derive(Debug)]
pub struct SecureConnection<T: AsyncRW> {
pub id: SigningKeyPair,
pub other_id: Option<SigningPubKey>,
remote: EncryptedAsyncRW<T>,
}
/// Established but unencrypted 1:1 connection - start here
#[derive(Debug)]
pub struct Connection<T: AsyncRW> {
pub id: SigningKeyPair,
remote: UnencryptedAsyncRW<T>,
}
| 28.164835 | 83 | 0.690597 |
2680810c3d57e957f34a4560a53f804101d26f9c | 2,856 | pub(crate) struct Container {
pub(crate) attrs: Vec<syn::Attribute>,
pub(crate) vis: syn::Visibility,
pub(crate) ident: proc_macro2::Ident,
pub(crate) generics: syn::Generics,
pub(crate) data: Data,
}
pub(crate) enum Data {
Struct(syn::DataStruct),
Enum(syn::DataEnum),
}
impl Container {
pub(crate) fn from_derive_input(input: syn::DeriveInput, trait_name: &str) -> syn::Result<Self> {
let data = match input.data {
syn::Data::Struct(data_struct) => Data::Struct(data_struct),
syn::Data::Enum(data_enum) => Data::Enum(data_enum),
syn::Data::Union(_) => {
let msg = format!("Cannot derive `{}` for unions", trait_name);
return Err(syn::Error::new_spanned(input, msg));
},
};
let syn::DeriveInput { attrs, vis, ident, generics, data: _ } = input;
Ok(Self { attrs, vis, ident, generics, data })
}
}
impl quote::ToTokens for Container {
fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) {
for attr in self.attrs.iter().filter(|a| matches!(a.style, syn::AttrStyle::Outer)) {
attr.to_tokens(tokens);
}
self.vis.to_tokens(tokens);
match self.data {
Data::Struct(ref d) => d.struct_token.to_tokens(tokens),
Data::Enum(ref d) => d.enum_token.to_tokens(tokens),
}
self.ident.to_tokens(tokens);
self.generics.to_tokens(tokens);
match self.data {
Data::Struct(ref data) => match data.fields {
syn::Fields::Named(ref fields) => {
self.generics.where_clause.to_tokens(tokens);
fields.to_tokens(tokens);
},
syn::Fields::Unnamed(ref fields) => {
fields.to_tokens(tokens);
self.generics.where_clause.to_tokens(tokens);
TokensOrDefault(&data.semi_token).to_tokens(tokens);
},
syn::Fields::Unit => {
self.generics.where_clause.to_tokens(tokens);
TokensOrDefault(&data.semi_token).to_tokens(tokens);
},
},
Data::Enum(ref data) => {
self.generics.where_clause.to_tokens(tokens);
data.brace_token.surround(tokens, |tokens| {
data.variants.to_tokens(tokens);
});
},
}
}
}
pub(crate) struct TokensOrDefault<'a, T>(pub(crate) &'a Option<T>);
impl<'a, T> quote::ToTokens for TokensOrDefault<'a, T>
where
T: quote::ToTokens + Default,
{
fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) {
match *self.0 {
Some(ref t) => t.to_tokens(tokens),
None => T::default().to_tokens(tokens),
}
}
}
| 34 | 101 | 0.547269 |
3868a8557776a70030869a91bd975c7257f142ba | 11,771 | // بِسْمِ اللَّهِ الرَّحْمَنِ الرَّحِيم
// This file is part of Setheum.
// Copyright (C) 2019-2021 Setheum Labs.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use crate::{
dollar, AccountId, Amount, Balance, CdpEngine, CollateralCurrencyIds, CurrencyId, Currencies, DepositPerAuthorization, Dex,
ExistentialDeposits, GetNativeCurrencyId, GetSetUSDId, GetDinarCurrencyId, Setmint, Price, Rate, Ratio,
Runtime, TradingPathLimit,
};
use super::utils::{feed_price, set_balance};
use core::convert::TryInto;
use frame_benchmarking::{account, whitelisted_caller};
use frame_system::RawOrigin;
use module_dex::TradingPairStatus;
use orml_benchmarking::runtime_benchmarks;
use orml_traits::{Change, GetByKey, MultiCurrencyExtended};
use sp_runtime::{
traits::{AccountIdLookup, One, StaticLookup, UniqueSaturatedInto},
FixedPointNumber,
};
use sp_std::prelude::*;
const SEED: u32 = 0;
const NATIVE: CurrencyId = GetNativeCurrencyId::get();
const STABLECOIN: CurrencyId = GetSetUSDId::get();
const DINARID: CurrencyId = GetDinarCurrencyId::get();
const SERP: CurrencyId = GetDinarCurrencyId::get();
fn inject_liquidity(
maker: AccountId,
currency_id_a: CurrencyId,
currency_id_b: CurrencyId,
max_amount_a: Balance,
max_amount_b: Balance,
) -> Result<(), &'static str> {
// set balance
<Currencies as MultiCurrencyExtended<_>>::update_balance(
currency_id_a,
&maker,
max_amount_a.unique_saturated_into(),
)?;
<Currencies as MultiCurrencyExtended<_>>::update_balance(
currency_id_b,
&maker,
max_amount_b.unique_saturated_into(),
)?;
let _ = Dex::enable_trading_pair(RawOrigin::Root.into(), currency_id_a, currency_id_b);
Dex::add_liquidity(
RawOrigin::Signed(maker.clone()).into(),
currency_id_a,
currency_id_b,
max_amount_a,
max_amount_b,
Default::default(),
)?;
Ok(())
}
runtime_benchmarks! {
{ Runtime, serp_setmint }
authorize {
let caller: AccountId = whitelisted_caller();
let to: AccountId = account("to", 0, SEED);
let to_lookup = AccountIdLookup::unlookup(to);
// set balance
set_balance(NATIVE, &caller, DepositPerAuthorization::get());
}: _(RawOrigin::Signed(caller), DINARID, to_lookup)
unauthorize {
let caller: AccountId = whitelisted_caller();
let to: AccountId = account("to", 0, SEED);
let to_lookup = AccountIdLookup::unlookup(to);
// set balance
set_balance(NATIVE, &caller, DepositPerAuthorization::get());
Setmint::authorize(
RawOrigin::Signed(caller.clone()).into(),
DINARID,
to_lookup.clone()
)?;
}: _(RawOrigin::Signed(caller), DINARID, to_lookup)
unauthorize_all {
let c in 0 .. CollateralCurrencyIds::get().len().saturating_sub(1) as u32;
let caller: AccountId = whitelisted_caller();
let currency_ids = CollateralCurrencyIds::get();
let to: AccountId = account("to", 0, SEED);
let to_lookup = AccountIdLookup::unlookup(to);
// set balance
set_balance(NATIVE, &caller, DepositPerAuthorization::get().saturating_mul(c.into()));
for i in 0 .. c {
Setmint::authorize(
RawOrigin::Signed(caller.clone()).into(),
currency_ids[i as usize],
to_lookup.clone(),
)?;
}
}: _(RawOrigin::Signed(caller))
// `adjust_loan`, best case:
// adjust both collateral and debit
adjust_loan {
let caller: AccountId = whitelisted_caller();
let currency_id: CurrencyId = CollateralCurrencyIds::get()[0];
let collateral_price = Price::one(); // 1 USD
let debit_value = 100 * dollar(STABLECOIN);
let debit_exchange_rate = CdpEngine::get_debit_exchange_rate(currency_id);
let debit_amount = debit_exchange_rate.reciprocal().unwrap().saturating_mul_int(debit_value);
let debit_amount: Amount = debit_amount.unique_saturated_into();
let collateral_value = 10 * debit_value;
let collateral_amount = Price::saturating_from_rational(dollar(currency_id), dollar(STABLECOIN)).saturating_mul_int(collateral_value);
// set balance
set_balance(currency_id, &caller, collateral_amount + ExistentialDeposits::get(¤cy_id));
// feed price
feed_price(vec![(currency_id, collateral_price)])?;
// set risk params
CdpEngine::set_collateral_params(
RawOrigin::Root.into(),
currency_id,
Change::NewValue(Some(Ratio::saturating_from_rational(150, 100))),
Change::NewValue(Some(Rate::saturating_from_rational(10, 100))),
Change::NewValue(Some(Ratio::saturating_from_rational(150, 100))),
Change::NewValue(debit_value * 100),
)?;
}: _(RawOrigin::Signed(caller), currency_id, collateral_amount.try_into().unwrap(), debit_amount)
transfer_loan_from {
let currency_id: CurrencyId = CollateralCurrencyIds::get()[0];
let sender: AccountId = account("sender", 0, SEED);
let sender_lookup = AccountIdLookup::unlookup(sender.clone());
let receiver: AccountId = whitelisted_caller();
let receiver_lookup = AccountIdLookup::unlookup(receiver.clone());
let debit_value = 100 * dollar(STABLECOIN);
let debit_exchange_rate = CdpEngine::get_debit_exchange_rate(currency_id);
let debit_amount = debit_exchange_rate.reciprocal().unwrap().saturating_mul_int(debit_value);
let debit_amount: Amount = debit_amount.unique_saturated_into();
let collateral_value = 10 * debit_value;
let collateral_amount = Price::saturating_from_rational(dollar(currency_id), dollar(STABLECOIN)).saturating_mul_int(collateral_value);
// set balance
set_balance(currency_id, &sender, collateral_amount + ExistentialDeposits::get(¤cy_id));
set_balance(NATIVE, &sender, DepositPerAuthorization::get());
// feed price
feed_price(vec![(currency_id, Price::one())])?;
// set risk params
CdpEngine::set_collateral_params(
RawOrigin::Root.into(),
currency_id,
Change::NewValue(Some(Ratio::saturating_from_rational(150, 100))),
Change::NewValue(Some(Rate::saturating_from_rational(10, 100))),
Change::NewValue(Some(Ratio::saturating_from_rational(150, 100))),
Change::NewValue(debit_value * 100),
)?;
// initialize sender's loan
Setmint::adjust_loan(
RawOrigin::Signed(sender.clone()).into(),
currency_id,
collateral_amount.try_into().unwrap(),
debit_amount,
)?;
// authorize receiver
Setmint::authorize(
RawOrigin::Signed(sender.clone()).into(),
currency_id,
receiver_lookup,
)?;
}: _(RawOrigin::Signed(receiver), currency_id, sender_lookup)
close_loan_has_debit_by_dex {
let currency_id: CurrencyId = CollateralCurrencyIds::get()[0];
let sender: AccountId = whitelisted_caller();
let maker: AccountId = account("maker", 0, SEED);
let debit_value = 100 * dollar(STABLECOIN);
let debit_exchange_rate = CdpEngine::get_debit_exchange_rate(currency_id);
let debit_amount = debit_exchange_rate.reciprocal().unwrap().saturating_mul_int(debit_value);
let debit_amount: Amount = debit_amount.unique_saturated_into();
let collateral_value = 10 * debit_value;
let collateral_amount = Price::saturating_from_rational(dollar(currency_id), dollar(STABLECOIN)).saturating_mul_int(collateral_value);
// set balance
set_balance(currency_id, &sender, collateral_amount + ExistentialDeposits::get(¤cy_id));
inject_liquidity(maker.clone(), currency_id, DINARID, 10_000 * dollar(SERP), 10_000 * dollar(DINARID))?;
inject_liquidity(maker, DINARID, STABLECOIN, 10_000 * dollar(DINARID), 10_000 * dollar(STABLECOIN))?;
feed_price(vec![(DINARID, Price::one())])?;
// set risk params
CdpEngine::set_collateral_params(
RawOrigin::Root.into(),
currency_id,
Change::NewValue(Some(Ratio::saturating_from_rational(150, 100))),
Change::NewValue(Some(Rate::saturating_from_rational(10, 100))),
Change::NewValue(Some(Ratio::saturating_from_rational(150, 100))),
Change::NewValue(debit_value * 100),
)?;
// initialize sender's loan
Setmint::adjust_loan(
RawOrigin::Signed(sender.clone()).into(),
currency_id,
(10 * collateral_amount).try_into().unwrap(),
debit_amount,
)?;
}: _(RawOrigin::Signed(sender), currency_id, collateral_amount)
expand_position_collateral {
let currency_id: CurrencyId = DINARID;
let sender: AccountId = whitelisted_caller();
let maker: AccountId = account("maker", 0, SEED);
let debit_value = 100 * dollar(STABLECOIN);
let debit_exchange_rate = CdpEngine::get_debit_exchange_rate(currency_id);
let debit_amount = debit_exchange_rate.reciprocal().unwrap().saturating_mul_int(debit_value);
let collateral_value = 10 * debit_value;
let collateral_amount = Price::saturating_from_rational(dollar(currency_id), dollar(STABLECOIN)).saturating_mul_int(collateral_value);
// set balance and inject liquidity
set_balance(currency_id, &sender, (10 * collateral_amount) + ExistentialDeposits::get(¤cy_id));
inject_liquidity(maker, currency_id, STABLECOIN, 10_000 * dollar(currency_id), 10_000 * dollar(STABLECOIN))?;
feed_price(vec![(currency_id, Price::one())])?;
// set risk params
CdpEngine::set_collateral_params(
RawOrigin::Root.into(),
currency_id,
Change::NewValue(Some(Ratio::saturating_from_rational(150, 100))),
Change::NewValue(Some(Rate::saturating_from_rational(10, 100))),
Change::NewValue(Some(Ratio::saturating_from_rational(150, 100))),
Change::NewValue(debit_value * 100),
)?;
// initialize sender's loan
Setmint::adjust_loan(
RawOrigin::Signed(sender.clone()).into(),
currency_id,
collateral_amount.try_into().unwrap(),
debit_amount.try_into().unwrap(),
)?;
}: _(RawOrigin::Signed(sender), currency_id, debit_value, 0)
shrink_position_debit {
let currency_id: CurrencyId = DINARID;
let sender: AccountId = whitelisted_caller();
let maker: AccountId = account("maker", 0, SEED);
let debit_value = 100 * dollar(STABLECOIN);
let debit_exchange_rate = CdpEngine::get_debit_exchange_rate(currency_id);
let debit_amount = debit_exchange_rate.reciprocal().unwrap().saturating_mul_int(debit_value);
let collateral_value = 10 * debit_value;
let collateral_amount = Price::saturating_from_rational(dollar(currency_id), dollar(STABLECOIN)).saturating_mul_int(collateral_value);
// set balance and inject liquidity
set_balance(currency_id, &sender, (10 * collateral_amount) + ExistentialDeposits::get(¤cy_id));
inject_liquidity(maker, currency_id, STABLECOIN, 10_000 * dollar(currency_id), 10_000 * dollar(STABLECOIN))?;
feed_price(vec![(currency_id, Price::one())])?;
// set risk params
CdpEngine::set_collateral_params(
RawOrigin::Root.into(),
currency_id,
Change::NewValue(Some(Ratio::saturating_from_rational(150, 100))),
Change::NewValue(Some(Rate::saturating_from_rational(10, 100))),
Change::NewValue(Some(Ratio::saturating_from_rational(150, 100))),
Change::NewValue(debit_value * 100),
)?;
// initialize sender's loan
Setmint::adjust_loan(
RawOrigin::Signed(sender.clone()).into(),
currency_id,
collateral_amount.try_into().unwrap(),
debit_amount.try_into().unwrap(),
)?;
}: _(RawOrigin::Signed(sender), currency_id, collateral_amount / 5, 0)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::benchmarking::utils::tests::new_test_ext;
use orml_benchmarking::impl_benchmark_test_suite;
impl_benchmark_test_suite!(new_test_ext(),);
}
| 37.015723 | 136 | 0.743097 |
fe1aa822f8cae198e558193bb94b292baf532540 | 1,325 | //! # 376. Wiggle subsequence
//!
//! A wiggle sequence is a sequence of numbers in which the difference
//! between adjacent numbers strictly alternates between positive
//! and negative.
//!
//! A subsequence is obtained by deleting chosen elements (possibly none)
//! from the original sequence, leaving the others in their original order
//!
//! Return the longest wiggle subsequence given a sequence of numbers
use std::cmp::Ordering;
pub fn wiggle_max_length(nums: Vec<i32>) -> i32 {
let n = nums.len();
if n < 2 { return n as i32 }
let mut up_going_up = true;
let mut down_going_up = false;
let mut start_up = 1;
let mut start_down = 1;
for i in 1..n {
match nums[i].cmp(&nums[i-1]) {
Ordering::Greater => {
if up_going_up { up_going_up = false; start_up += 1 }
if down_going_up { down_going_up = false; start_down += 1 }
}
Ordering::Less => {
if !up_going_up { up_going_up = true; start_up += 1 }
if !down_going_up { down_going_up = true; start_down += 1 }
}
_ => continue
}
}
start_up.max(start_down)
}
#[test]
fn testing() {
assert_eq!(wiggle_max_length(vec![1,7,4,9,2,5]), 6)
}
#[test]
fn testing_1() {
assert_eq!(wiggle_max_length(vec![1,17,5,10,13,15,10,5,16,8]), 7)
}
#[test]
fn testing_2() {
assert_eq!(wiggle_max_length(vec![1,2,3,4,5,6,7,8,9]), 2)
} | 24.537037 | 74 | 0.664151 |
0e5d0e0290bf36c6ea9d0757bb3658c4c6becc13 | 21,563 | // Copyright © 2018 winapi-rs developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// All files in the project carrying such notice may not be copied, modified, or distributed
// except according to those terms.
//! theming API
use ctypes::{c_float, c_int, c_void};
use shared::minwindef::{BOOL, BYTE, DWORD, HINSTANCE, HRGN, LPARAM, UINT, ULONG, WORD};
use shared::windef::{COLORREF, HBITMAP, HBRUSH, HDC, HWND, LPCRECT, LPRECT, POINT, RECT, SIZE};
use um::commctrl::HIMAGELIST;
use um::wingdi::{BLENDFUNCTION, LOGFONTW, RGBQUAD, TEXTMETRICW};
use um::winnt::{HANDLE, HRESULT, LONG, LPCWSTR, LPWSTR, PVOID, VOID};
pub type HTHEME = HANDLE;
//pub const MAX_THEMECOLOR: u32 = 64;
//pub const MAX_THEMESIZE: u32 = 64;
extern "system" {
pub fn BeginPanningFeedback(
hwnd: HWND,
) -> BOOL;
pub fn UpdatePanningFeedback(
hwnd: HWND,
lTotalOverpanOffsetX: LONG,
lTotalOverpanOffsetY: LONG,
fInInertia: BOOL,
) -> BOOL;
pub fn EndPanningFeedback(
hwnd: HWND,
fAnimateBack: BOOL,
) -> BOOL;
}
ENUM!{enum TA_PROPERTY {
TAP_FLAGS = 0,
TAP_TRANSFORMCOUNT = 1,
TAP_STAGGERDELAY = 2,
TAP_STAGGERDELAYCAP = 3,
TAP_STAGGERDELAYFACTOR = 4,
TAP_ZORDER = 5,
}}
ENUM!{enum TA_PROPERTY_FLAG {
TAPF_NONE = 0x0,
TAPF_HASSTAGGER = 0x1,
TAPF_ISRTLAWARE = 0x2,
TAPF_ALLOWCOLLECTION = 0x4,
TAPF_HASBACKGROUND = 0x8,
TAPF_HASPERSPECTIVE = 0x10,
}}
extern "system" {
pub fn GetThemeAnimationProperty(
hTheme: HTHEME,
iStoryboardId: c_int,
iTargetId: c_int,
eProperty: TA_PROPERTY,
pvProperty: *mut VOID,
cbSize: DWORD,
pcbSizeOut: *mut DWORD,
) -> HRESULT;
}
ENUM!{enum TA_TRANSFORM_TYPE {
TATT_TRANSLATE_2D = 0,
TATT_SCALE_2D = 1,
TATT_OPACITY = 2,
TATT_CLIP = 3,
}}
ENUM!{enum TA_TRANSFORM_FLAG {
TATF_NONE = 0x0,
TATF_TARGETVALUES_USER = 0x1,
TATF_HASINITIALVALUES = 0x2,
TATF_HASORIGINVALUES = 0x4,
}}
STRUCT!{struct TA_TRANSFORM {
eTransformType: TA_TRANSFORM_TYPE,
dwTimingFunctionId: DWORD,
dwStartTime: DWORD,
dwDurationTime: DWORD,
eFlags: TA_TRANSFORM_FLAG,
}}
pub type PTA_TRANSFORM = *mut TA_TRANSFORM;
STRUCT!{struct TA_TRANSFORM_2D {
header: TA_TRANSFORM,
rX: c_float,
rY: c_float,
rInitialX: c_float,
rInitialY: c_float,
rOriginX: c_float,
rOriginY: c_float,
}}
pub type PTA_TRANSFORM_2D = *mut TA_TRANSFORM_2D;
STRUCT!{struct TA_TRANSFORM_OPACITY {
header: TA_TRANSFORM,
rOpacity: c_float,
rInitialOpacity: c_float,
}}
pub type PTA_TRANSFORM_OPACITY = *mut TA_TRANSFORM_OPACITY;
STRUCT!{struct TA_TRANSFORM_CLIP {
header: TA_TRANSFORM,
rLeft: c_float,
rTop: c_float,
rRight: c_float,
rBottom: c_float,
rInitialLeft: c_float,
rInitialTop: c_float,
rInitialRight: c_float,
rInitialBottom: c_float,
}}
pub type PTA_TRANSFORM_CLIP = *mut TA_TRANSFORM_CLIP;
extern "system" {
pub fn GetThemeAnimationTransform(
hTheme: HTHEME,
iStoryboardId: c_int,
iTargetId: c_int,
dwTransformIndex: DWORD,
pTransform: *mut TA_TRANSFORM,
cbSize: DWORD,
pcbSizeOut: *mut DWORD,
) -> HRESULT;
}
ENUM!{enum TA_TIMINGFUNCTION_TYPE {
TTFT_UNDEFINED = 0,
TTFT_CUBIC_BEZIER = 1,
}}
STRUCT!{struct TA_TIMINGFUNCTION {
eTimingFunctionType: TA_TIMINGFUNCTION_TYPE,
}}
pub type PTA_TIMINGFUNCTION = *mut TA_TIMINGFUNCTION;
STRUCT!{struct TA_CUBIC_BEZIER {
header: TA_TIMINGFUNCTION,
rX0: c_float,
rY0: c_float,
rX1: c_float,
rY1: c_float,
}}
pub type PTA_CUBIC_BEZIER = *mut TA_CUBIC_BEZIER;
extern "system" {
pub fn GetThemeTimingFunction(
hTheme: HTHEME,
iTimingFunctionId: c_int,
pTimingFunction: *mut TA_TIMINGFUNCTION,
cbSize: DWORD,
pcbSizeOut: *mut DWORD,
) -> HRESULT;
pub fn OpenThemeData(
hwnd: HWND,
pszClassList: LPCWSTR,
) -> HTHEME;
}
pub const OTD_FORCE_RECT_SIZING: DWORD = 0x00000001;
pub const OTD_NONCLIENT: DWORD = 0x00000002;
pub const OTD_VALIDBITS: DWORD = OTD_FORCE_RECT_SIZING | OTD_NONCLIENT;
extern "system" {
pub fn OpenThemeDataForDpi(
hwnd: HWND,
pszClassList: LPCWSTR,
dpi: UINT,
) -> HTHEME;
pub fn OpenThemeDataEx(
hwnd: HWND,
pszClassList: LPCWSTR,
dwFlags: DWORD,
) -> HTHEME;
pub fn CloseThemeData(
hTheme: HTHEME,
) -> HRESULT;
pub fn DrawThemeBackground(
hTheme: HTHEME,
hdc: HDC,
iPartId: c_int,
iStateId: c_int,
pRect: LPCRECT,
pClipRect: LPCRECT,
) -> HRESULT;
}
pub const DTBG_CLIPRECT: DWORD = 0x00000001;
pub const DTBG_DRAWSOLID: DWORD = 0x00000002;
pub const DTBG_OMITBORDER: DWORD = 0x00000004;
pub const DTBG_OMITCONTENT: DWORD = 0x00000008;
pub const DTBG_COMPUTINGREGION: DWORD = 0x00000010;
pub const DTBG_MIRRORDC: DWORD = 0x00000020;
pub const DTBG_NOMIRROR: DWORD = 0x00000040;
pub const DTBG_VALIDBITS: DWORD = DTBG_CLIPRECT | DTBG_DRAWSOLID | DTBG_OMITBORDER
| DTBG_OMITCONTENT | DTBG_COMPUTINGREGION | DTBG_MIRRORDC | DTBG_NOMIRROR;
STRUCT!{struct DTBGOPTS {
dwSize: DWORD,
dwFlags: DWORD,
rcClip: RECT,
}}
pub type PDTBGOPTS = *mut DTBGOPTS;
extern "system" {
pub fn DrawThemeBackgroundEx(
hTheme: HTHEME,
hdc: HDC,
iPartId: c_int,
iStateId: c_int,
pRect: LPCRECT,
pOptions: *const DTBGOPTS,
) -> HRESULT;
}
//pub const DTT_GRAYED: u32 = 0x00000001;
//pub const DTT_FLAGS2VALIDBITS: u32 = DTT_GRAYED;
extern "system" {
pub fn DrawThemeText(
hTheme: HTHEME,
hdc: HDC,
iPartId: c_int,
iStateId: c_int,
pszText: LPCWSTR,
cchText: c_int,
dwTextFlags: DWORD,
dwTextFlags2: DWORD,
pRect: LPCRECT,
) -> HRESULT;
pub fn GetThemeBackgroundContentRect(
hTheme: HTHEME,
hdc: HDC,
iPartId: c_int,
iStateId: c_int,
pBoundingRect: LPCRECT,
pContentRect: LPRECT,
) -> HRESULT;
pub fn GetThemeBackgroundExtent(
hTheme: HTHEME,
hdc: HDC,
iPartId: c_int,
iStateId: c_int,
pContentRect: LPCRECT,
pExtentRect: LPRECT,
) -> HRESULT;
pub fn GetThemeBackgroundRegion(
hTheme: HTHEME,
hdc: HDC,
iPartId: c_int,
iStateId: c_int,
pRect: LPCRECT,
pRegion: *mut HRGN,
) -> HRESULT;
}
ENUM!{enum THEMESIZE {
TS_MIN = 0,
TS_TRUE = 1,
TS_DRAW = 2,
}}
extern "system" {
pub fn GetThemePartSize(
hTheme: HTHEME,
hdc: HDC,
iPartId: c_int,
iStateId: c_int,
prc: LPCRECT,
eSize: THEMESIZE,
psz: *mut SIZE,
) -> HRESULT;
pub fn GetThemeTextExtent(
hTheme: HTHEME,
hdc: HDC,
iPartId: c_int,
iStateId: c_int,
pszText: LPCWSTR,
cchCharCount: c_int,
dwTextFlags: DWORD,
pBoundingRect: LPCRECT,
pExtentRect: LPRECT,
) -> HRESULT;
pub fn GetThemeTextMetrics(
hTheme: HTHEME,
hdc: HDC,
iPartId: c_int,
iStateId: c_int,
ptm: *mut TEXTMETRICW,
) -> HRESULT;
}
pub const HTTB_BACKGROUNDSEG: DWORD = 0x00000000;
pub const HTTB_FIXEDBORDER: DWORD = 0x00000002;
pub const HTTB_CAPTION: DWORD = 0x00000004;
pub const HTTB_RESIZINGBORDER_LEFT: DWORD = 0x00000010;
pub const HTTB_RESIZINGBORDER_TOP: DWORD = 0x00000020;
pub const HTTB_RESIZINGBORDER_RIGHT: DWORD = 0x00000040;
pub const HTTB_RESIZINGBORDER_BOTTOM: DWORD = 0x00000080;
pub const HTTB_RESIZINGBORDER: DWORD = HTTB_RESIZINGBORDER_LEFT | HTTB_RESIZINGBORDER_TOP
| HTTB_RESIZINGBORDER_RIGHT | HTTB_RESIZINGBORDER_BOTTOM;
pub const HTTB_SIZINGTEMPLATE: DWORD = 0x00000100;
pub const HTTB_SYSTEMSIZINGMARGINS: DWORD = 0x00000200;
extern "system" {
pub fn HitTestThemeBackground(
hTheme: HTHEME,
hdc: HDC,
iPartId: c_int,
iStateId: c_int,
dwOptions: DWORD,
pRect: LPCRECT,
hrgn: HRGN,
ptTest: POINT,
pwHitTestCode: *mut WORD,
) -> HRESULT;
pub fn DrawThemeEdge(
hTheme: HTHEME,
hdc: HDC,
iPartId: c_int,
iStateId: c_int,
pDestRect: LPCRECT,
uEdge: UINT,
uFlags: UINT,
pContentRect: LPRECT,
) -> HRESULT;
pub fn DrawThemeIcon(
hTheme: HTHEME,
hdc: HDC,
iPartId: c_int,
iStateId: c_int,
pRect: LPCRECT,
himl: HIMAGELIST,
iImageIndex: c_int,
) -> HRESULT;
pub fn IsThemePartDefined(
hTheme: HTHEME,
iPartId: c_int,
iStateId: c_int,
) -> BOOL;
pub fn IsThemeBackgroundPartiallyTransparent(
hTheme: HTHEME,
iPartId: c_int,
iStateId: c_int,
) -> BOOL;
pub fn GetThemeColor(
hTheme: HTHEME,
iPartId: c_int,
iStateId: c_int,
iPropId: c_int,
pColor: *mut COLORREF,
) -> HRESULT;
pub fn GetThemeMetric(
hTheme: HTHEME,
hdc: HDC,
iPartId: c_int,
iStateId: c_int,
iPropId: c_int,
piVal: *mut c_int,
) -> HRESULT;
pub fn GetThemeString(
hTheme: HTHEME,
iPartId: c_int,
iStateId: c_int,
iPropId: c_int,
pszBuff: LPWSTR,
cchMaxBuffChars: c_int,
) -> HRESULT;
pub fn GetThemeBool(
hTheme: HTHEME,
iPartId: c_int,
iStateId: c_int,
iPropId: c_int,
pfVal: *mut BOOL,
) -> HRESULT;
pub fn GetThemeInt(
hTheme: HTHEME,
iPartId: c_int,
iStateId: c_int,
iPropId: c_int,
piVal: *mut c_int,
) -> HRESULT;
pub fn GetThemeEnumValue(
hTheme: HTHEME,
iPartId: c_int,
iStateId: c_int,
iPropId: c_int,
piVal: *mut c_int,
) -> HRESULT;
pub fn GetThemePosition(
hTheme: HTHEME,
iPartId: c_int,
iStateId: c_int,
iPropId: c_int,
pPoint: *mut POINT,
) -> HRESULT;
pub fn GetThemeFont(
hTheme: HTHEME,
hdc: HDC,
iPartId: c_int,
iStateId: c_int,
iPropId: c_int,
pFont: *mut LOGFONTW,
) -> HRESULT;
pub fn GetThemeRect(
hTheme: HTHEME,
iPartId: c_int,
iStateId: c_int,
iPropId: c_int,
pRect: LPRECT,
) -> HRESULT;
}
STRUCT!{struct MARGINS {
cxLeftWidth: c_int,
cxRightWidth: c_int,
cyTopHeight: c_int,
cyBottomHeight: c_int,
}}
pub type PMARGINS = *mut MARGINS;
extern "system" {
pub fn GetThemeMargins(
hTheme: HTHEME,
hdc: HDC,
iPartId: c_int,
iStateId: c_int,
iPropId: c_int,
prc: LPCRECT,
pMargins: *mut MARGINS,
) -> HRESULT;
}
pub const MAX_INTLIST_COUNT: usize = 402;
STRUCT!{struct INTLIST {
iValueCount: c_int,
iValues: [c_int; MAX_INTLIST_COUNT],
}}
pub type PINTLIST = *mut INTLIST;
extern "system" {
pub fn GetThemeIntList(
hTheme: HTHEME,
iPartId: c_int,
iStateId: c_int,
iPropId: c_int,
pIntList: *mut INTLIST,
) -> HRESULT;
}
ENUM!{enum PROPERTYORIGIN {
PO_STATE = 0,
PO_PART = 1,
PO_CLASS = 2,
PO_GLOBAL = 3,
PO_NOTFOUND = 4,
}}
extern "system" {
pub fn GetThemePropertyOrigin(
hTheme: HTHEME,
iPartId: c_int,
iStateId: c_int,
iPropId: c_int,
pOrigin: *mut PROPERTYORIGIN,
) -> HRESULT;
pub fn SetWindowTheme(
hwnd: HWND,
pszSubAppName: LPCWSTR,
pszSubIdList: LPCWSTR,
) -> HRESULT;
pub fn GetThemeFilename(
hTheme: HTHEME,
iPartId: c_int,
iStateId: c_int,
iPropId: c_int,
pszThemeFileName: LPWSTR,
cchMaxBuffChars: c_int,
) -> HRESULT;
pub fn GetThemeSysColor(
hTheme: HTHEME,
iColorId: c_int,
) -> COLORREF;
pub fn GetThemeSysColorBrush(
hTheme: HTHEME,
iColorId: c_int,
) -> HBRUSH;
pub fn GetThemeSysBool(
hTheme: HTHEME,
iBoolId: c_int,
) -> BOOL;
pub fn GetThemeSysSize(
hTheme: HTHEME,
iSizeId: c_int,
) -> c_int;
pub fn GetThemeSysFont(
hTheme: HTHEME,
iFontId: c_int,
plf: *mut LOGFONTW,
) -> HRESULT;
pub fn GetThemeSysString(
hTheme: HTHEME,
iStringId: c_int,
pszStringBuff: LPWSTR,
cchMaxStringChars: c_int,
) -> HRESULT;
pub fn GetThemeSysInt(
hTheme: HTHEME,
iIntId: c_int,
piValue: *mut c_int,
) -> HRESULT;
pub fn IsThemeActive() -> BOOL;
pub fn IsAppThemed() -> BOOL;
pub fn GetWindowTheme(
hwnd: HWND,
) -> HTHEME;
}
pub const ETDT_DISABLE: DWORD = 0x00000001;
pub const ETDT_ENABLE: DWORD = 0x00000002;
pub const ETDT_USETABTEXTURE: DWORD = 0x00000004;
pub const ETDT_ENABLETAB: DWORD = ETDT_ENABLE | ETDT_USETABTEXTURE;
pub const ETDT_USEAEROWIZARDTABTEXTURE: DWORD = 0x00000008;
pub const ETDT_ENABLEAEROWIZARDTAB: DWORD = ETDT_ENABLE | ETDT_USEAEROWIZARDTABTEXTURE;
pub const ETDT_VALIDBITS: DWORD = ETDT_DISABLE | ETDT_ENABLE | ETDT_USETABTEXTURE
| ETDT_USEAEROWIZARDTABTEXTURE;
extern "system" {
pub fn EnableThemeDialogTexture(
hwnd: HWND,
dwFlags: DWORD,
) -> HRESULT;
pub fn IsThemeDialogTextureEnabled(
hwnd: HWND,
) -> BOOL;
}
pub const STAP_ALLOW_NONCLIENT: DWORD = 1 << 0;
pub const STAP_ALLOW_CONTROLS: DWORD = 1 << 1;
pub const STAP_ALLOW_WEBCONTENT: DWORD = 1 << 2;
pub const STAP_VALIDBITS: DWORD = STAP_ALLOW_NONCLIENT | STAP_ALLOW_CONTROLS
| STAP_ALLOW_WEBCONTENT;
extern "system" {
pub fn GetThemeAppProperties() -> DWORD;
pub fn SetThemeAppProperties(
dwFlags: DWORD,
);
pub fn GetCurrentThemeName(
pszThemeFileName: LPWSTR,
cchMaxNameChars: c_int,
pszColorBuff: LPWSTR,
cchMaxColorChars: c_int,
pszSizeBuff: LPWSTR,
cchMaxSizeChars: c_int,
) -> HRESULT;
}
pub const SZ_THDOCPROP_DISPLAYNAME: &'static str = "DisplayName";
pub const SZ_THDOCPROP_CANONICALNAME: &'static str = "ThemeName";
pub const SZ_THDOCPROP_TOOLTIP: &'static str = "ToolTip";
pub const SZ_THDOCPROP_AUTHOR: &'static str = "author";
extern "system" {
pub fn GetThemeDocumentationProperty(
pszThemeName: LPCWSTR,
pszPropertyName: LPCWSTR,
pszValueBuff: LPWSTR,
cchMaxValChars: c_int,
) -> HRESULT;
pub fn DrawThemeParentBackground(
hwnd: HWND,
hdc: HDC,
prc: *const RECT,
) -> HRESULT;
pub fn EnableTheming(
fEnable: BOOL,
) -> HRESULT;
}
pub const GBF_DIRECT: ULONG = 0x00000001;
pub const GBF_COPY: ULONG = 0x00000002;
pub const GBF_VALIDBITS: ULONG = GBF_DIRECT | GBF_COPY;
pub const DTPB_WINDOWDC: DWORD = 0x00000001;
pub const DTPB_USECTLCOLORSTATIC: DWORD = 0x00000002;
pub const DTPB_USEERASEBKGND: DWORD = 0x00000004;
extern "system" {
pub fn DrawThemeParentBackgroundEx(
hwnd: HWND,
hdc: HDC,
dwFlags: DWORD,
prc: *const RECT,
) -> HRESULT;
}
ENUM!{enum WINDOWTHEMEATTRIBUTETYPE {
WTA_NONCLIENT = 1,
}}
STRUCT!{struct WTA_OPTIONS {
dwFlags: DWORD,
dwMask: DWORD,
}}
pub type PWTA_OPTIONS = *mut WTA_OPTIONS;
pub const WTNCA_NODRAWCAPTION: DWORD = 0x00000001;
pub const WTNCA_NODRAWICON: DWORD = 0x00000002;
pub const WTNCA_NOSYSMENU: DWORD = 0x00000004;
pub const WTNCA_NOMIRRORHELP: DWORD = 0x00000008;
pub const WTNCA_VALIDBITS: DWORD = WTNCA_NODRAWCAPTION | WTNCA_NODRAWICON | WTNCA_NOSYSMENU
| WTNCA_NOMIRRORHELP;
extern "system" {
pub fn SetWindowThemeAttribute(
hwnd: HWND,
eAttribute: WINDOWTHEMEATTRIBUTETYPE,
pvAttribute: PVOID,
cbAttribute: DWORD,
) -> HRESULT;
}
#[inline]
pub unsafe fn SetWindowThemeNonClientAttributes(
hwnd: HWND,
dwMask: DWORD,
dwAttributes: DWORD,
) -> HRESULT {
use core::mem::{size_of, zeroed};
let mut wta: WTA_OPTIONS = zeroed();
wta.dwFlags = dwAttributes;
wta.dwMask = dwMask;
SetWindowThemeAttribute(
hwnd,
WTA_NONCLIENT,
&mut wta as *mut WTA_OPTIONS as *mut c_void,
size_of::<WTA_OPTIONS>() as u32,
)
}
FN!{stdcall DTT_CALLBACK_PROC(
hdc: HDC,
pszText: LPWSTR,
cchText: c_int,
prc: LPRECT,
dwFlags: UINT,
lParam: LPARAM,
) -> c_int}
pub const DTT_TEXTCOLOR: DWORD = 1 << 0;
pub const DTT_BORDERCOLOR: DWORD = 1 << 1;
pub const DTT_SHADOWCOLOR: DWORD = 1 << 2;
pub const DTT_SHADOWTYPE: DWORD = 1 << 3;
pub const DTT_SHADOWOFFSET: DWORD = 1 << 4;
pub const DTT_BORDERSIZE: DWORD = 1 << 5;
pub const DTT_FONTPROP: DWORD = 1 << 6;
pub const DTT_COLORPROP: DWORD = 1 << 7;
pub const DTT_STATEID: DWORD = 1 << 8;
pub const DTT_CALCRECT: DWORD = 1 << 9;
pub const DTT_APPLYOVERLAY: DWORD = 1 << 10;
pub const DTT_GLOWSIZE: DWORD = 1 << 11;
pub const DTT_CALLBACK: DWORD = 1 << 12;
pub const DTT_COMPOSITED: DWORD = 1 << 13;
pub const DTT_VALIDBITS: DWORD = DTT_TEXTCOLOR | DTT_BORDERCOLOR | DTT_SHADOWCOLOR
| DTT_SHADOWTYPE | DTT_SHADOWOFFSET | DTT_BORDERSIZE | DTT_FONTPROP | DTT_COLORPROP
| DTT_STATEID | DTT_CALCRECT | DTT_APPLYOVERLAY | DTT_GLOWSIZE | DTT_COMPOSITED;
STRUCT!{struct DTTOPTS {
dwSize: DWORD,
dwFlags: DWORD,
crText: COLORREF,
crBorder: COLORREF,
crShadow: COLORREF,
iTextShadowType: c_int,
ptShadowOffset: POINT,
iBorderSize: c_int,
iFontPropId: c_int,
iColorPropId: c_int,
iStateId: c_int,
fApplyOverlay: BOOL,
iGlowSize: c_int,
pfnDrawTextCallback: DTT_CALLBACK_PROC,
lParam: LPARAM,
}}
pub type PDTTOPTS = *mut DTTOPTS;
extern "system" {
pub fn DrawThemeTextEx(
hTheme: HTHEME,
hdc: HDC,
iPartId: c_int,
iStateId: c_int,
pszText: LPCWSTR,
cchText: c_int,
dwTextFlags: DWORD,
pRect: LPRECT,
pOptions: *const DTTOPTS,
) -> HRESULT;
pub fn GetThemeBitmap(
hTheme: HTHEME,
iPartId: c_int,
iStateId: c_int,
iPropId: c_int,
dwFlags: ULONG,
phBitmap: *mut HBITMAP,
) -> HRESULT;
pub fn GetThemeStream(
hTheme: HTHEME,
iPartId: c_int,
iStateId: c_int,
iPropId: c_int,
ppvStream: *mut *mut VOID,
pcbStream: *mut DWORD,
hInst: HINSTANCE,
) -> HRESULT;
pub fn BufferedPaintInit() -> HRESULT;
pub fn BufferedPaintUnInit() -> HRESULT;
}
pub type HPAINTBUFFER = HANDLE;
ENUM!{enum BP_BUFFERFORMAT {
BPBF_COMPATIBLEBITMAP = 0,
BPBF_DIB = 1,
BPBF_TOPDOWNDIB = 2,
BPBF_TOPDOWNMONODIB = 3,
}}
pub const BPBF_COMPOSITED: BP_BUFFERFORMAT = BPBF_TOPDOWNDIB;
ENUM!{enum BP_ANIMATIONSTYLE {
BPAS_NONE = 0,
BPAS_LINEAR = 1,
BPAS_CUBIC = 2,
BPAS_SINE = 3,
}}
STRUCT!{struct BP_ANIMATIONPARAMS {
cbSize: DWORD,
dwFlags: DWORD,
style: BP_ANIMATIONSTYLE,
dwDuration: DWORD,
}}
pub type PBP_ANIMATIONPARAMS = *mut BP_ANIMATIONPARAMS;
pub const BPPF_ERASE: DWORD = 0x0001;
pub const BPPF_NOCLIP: DWORD = 0x0002;
pub const BPPF_NONCLIENT: DWORD = 0x0004;
STRUCT!{struct BP_PAINTPARAMS {
cbSize: DWORD,
dwFlags: DWORD,
prcExclude: *const RECT,
pBlendFunction: *const BLENDFUNCTION,
}}
pub type PBP_PAINTPARAMS = *mut BP_PAINTPARAMS;
extern "system" {
pub fn BeginBufferedPaint(
hdcTarget: HDC,
prcTarget: *const RECT,
dwFormat: BP_BUFFERFORMAT,
pPaintParams: *mut BP_PAINTPARAMS,
phdc: *mut HDC,
) -> HPAINTBUFFER;
pub fn EndBufferedPaint(
hBufferedPaint: HPAINTBUFFER,
fUpdateTarget: BOOL,
) -> HRESULT;
pub fn GetBufferedPaintTargetRect(
hBufferedPaint: HPAINTBUFFER,
prc: *mut RECT,
) -> HRESULT;
pub fn GetBufferedPaintTargetDC(
hBufferedPaint: HPAINTBUFFER,
) -> HDC;
pub fn GetBufferedPaintDC(
hBufferedPaint: HPAINTBUFFER,
) -> HDC;
pub fn GetBufferedPaintBits(
hBufferedPaint: HPAINTBUFFER,
ppbBuffer: *mut *mut RGBQUAD,
pcxRow: *mut c_int,
) -> HRESULT;
pub fn BufferedPaintClear(
hBufferedPaint: HPAINTBUFFER,
prc: *const RECT,
) -> HRESULT;
pub fn BufferedPaintSetAlpha(
hBufferedPaint: HPAINTBUFFER,
prc: *const RECT,
alpha: BYTE,
) -> HRESULT;
pub fn BufferedPaintStopAllAnimations(
hwnd: HWND,
) -> HRESULT;
}
pub type HANIMATIONBUFFER = HANDLE;
extern "system" {
pub fn BeginBufferedAnimation(
hwnd: HWND,
hdcTarget: HDC,
prcTarget: *const RECT,
dwFormat: BP_BUFFERFORMAT,
pPaintParams: *mut BP_PAINTPARAMS,
pAnimationParams: *mut BP_ANIMATIONPARAMS,
phdcFrom: *mut HDC,
phdcTo: *mut HDC,
) -> HANIMATIONBUFFER;
pub fn EndBufferedAnimation(
hbpAnimation: HANIMATIONBUFFER,
fUpdateTarget: BOOL,
) -> HRESULT;
pub fn BufferedPaintRenderAnimation(
hwnd: HWND,
hdcTarget: HDC,
) -> BOOL;
pub fn IsCompositionActive() -> BOOL;
pub fn GetThemeTransitionDuration(
hTheme: HTHEME,
iPartId: c_int,
iStateIdFrom: c_int,
iStateIdTo: c_int,
iPropId: c_int,
pdwDuration: *mut DWORD,
) -> HRESULT;
}
| 27.859173 | 95 | 0.636739 |
d71866869708fac9a5f972604c12b76b672d621a | 8,544 |
use std::{mem,slice};
use std::io;
use std::io::Read;
use std::fs;
use std::error::Error;
use std::path;
//use flate2::write::DeflateEncoder;
//use flate2::Compression;
//use flate2::read::DeflateDecoder;
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
//use zstd::stream::{Encoder, Decoder};
//use lz4::{Decoder, EncoderBuilder};
use crate::vwmap;
const CACHE_HEADER_MAGIC_STRING: &[u8; 4] = b"FWCA"; // Fwumious Wabbit CAche
const CACHE_HEADER_VERSION:u32 = 10;
/*
Version incompatibilites:
9->10: enable binning
8->9: enabled multi-byte feature names in vw files
7->8: add example importance to the parsed buffer format
*/
// Cache layout:
// 4 bytes: Magic bytes
// u32: Version of the cache format
// u_size + blob: json encoding of vw_source
// ...cached examples
const READBUF_LEN:usize = 1024*100;
pub struct RecordCache {
output_bufwriter: Box<dyn io::Write>,
input_bufreader: Box<dyn io::Read>,
temporary_filename: String,
final_filename: String,
pub writing: bool,
pub reading: bool,
// pub output_buffer: Vec<u32>,
pub byte_buffer: Vec<u8>,//[u8; READBUF_LEN],
start_pointer: usize,
end_pointer: usize,
total_read: usize,
}
impl RecordCache {
pub fn new(input_filename: &str, enabled: bool, vw_map: &vwmap::VwNamespaceMap) -> RecordCache {
let temporary_filename: String;
let final_filename: String;
let gz: bool;
temporary_filename = format!("{}.fwcache.writing", input_filename);
final_filename = format!("{}.fwcache", input_filename);
if !input_filename.ends_with("gz") {
gz = false;
} else
{
gz = true;
}
let mut rc = RecordCache {
output_bufwriter: Box::new(io::BufWriter::new(io::sink())),
input_bufreader: Box::new(io::empty()),
temporary_filename: temporary_filename.to_string(),
final_filename: final_filename.to_string(),
writing: false,
reading: false,
byte_buffer: Vec::new(),
start_pointer: 0,
end_pointer: 0,
total_read: 0,
};
if enabled {
if path::Path::new(&final_filename).exists() {
rc.reading = true;
if !gz {
// we buffer ourselves, otherwise i would be wise to use bufreader
rc.input_bufreader = Box::new(fs::File::open(&final_filename).unwrap());
} else {
// rc.input_bufreader = Box::new(zstd::stream::Decoder::new(fs::File::open(&final_filename).unwrap()).unwrap());
rc.input_bufreader = Box::new(lz4::Decoder::new(fs::File::open(&final_filename).unwrap()).unwrap());
}
println!("using cache_file = {}", final_filename );
println!("ignoring text input in favor of cache input");
rc.byte_buffer.resize(READBUF_LEN, 0);
match rc.verify_header(vw_map) {
Ok(()) => {},
Err(e) => {
println!("Couldn't use the existing cache file: {:?}", e);
rc.reading = false;
}
}
}
if !rc.reading {
rc.writing = true;
println!("creating cache file = {}", final_filename );
if !gz {
rc.output_bufwriter = Box::new(io::BufWriter::new(fs::File::create(temporary_filename).unwrap()));
} else {
// rc.output_bufwriter = Box::new(io::BufWriter::new(DeflateEncoder::new(fs::File::create(temporary_filename).unwrap(),
// Compression::fast())));
// rc.output_bufwriter = Box::new(io::BufWriter::new(zstd::stream::Encoder::new(fs::File::create(temporary_filename).unwrap(),
// -5).unwrap().auto_finish()));
rc.output_bufwriter = Box::new(io::BufWriter::new(lz4::EncoderBuilder::new()
.level(3).build(fs::File::create(temporary_filename).unwrap()
).unwrap()));
}
rc.write_header(vw_map).unwrap();
}
}
rc
}
pub fn push_record(&mut self, record_buf: &[u32]) -> Result<(), Box<dyn Error>> {
if self.writing {
let element_size = mem::size_of::<u32>();
unsafe {
let vv:&[u8] = slice::from_raw_parts(record_buf.as_ptr() as *const u8,
record_buf.len() * element_size) ;
self.output_bufwriter.write_all(&vv)?;
}
}
Ok(())
}
pub fn write_finish(&mut self) -> Result<(), Box<dyn Error>> {
if self.writing {
self.output_bufwriter.flush()?;
fs::rename(&self.temporary_filename, &self.final_filename)?;
}
Ok(())
}
pub fn write_header(&mut self, vw_map: &vwmap::VwNamespaceMap) -> Result<(), Box<dyn Error>> {
self.output_bufwriter.write_all(CACHE_HEADER_MAGIC_STRING)?;
self.output_bufwriter.write_u32::<LittleEndian>(CACHE_HEADER_VERSION)?;
vw_map.save_to_buf(&mut self.output_bufwriter)?;
Ok(())
}
pub fn verify_header(&mut self, vwmap: &vwmap::VwNamespaceMap) -> Result<(), Box<dyn Error>> {
let mut magic_string: [u8; 4] = [0;4];
self.input_bufreader.read(&mut magic_string)?;
if &magic_string != CACHE_HEADER_MAGIC_STRING {
return Err("Cache header does not begin with magic bytes FWFW")?;
}
let version = self.input_bufreader.read_u32::<LittleEndian>()?;
if CACHE_HEADER_VERSION != version {
return Err(format!("Cache file version of this binary: {}, version of the cache file: {}", CACHE_HEADER_VERSION, version))?;
}
// Compare vwmap in cache and the one we've been given. If they differ, rebuild cache
let vwmap_from_cache = vwmap::VwNamespaceMap::new_from_buf(&mut self.input_bufreader)?;
if vwmap_from_cache.vw_source != vwmap.vw_source {
return Err("vw_namespace_map.csv and the one from cache file differ")?;
}
Ok(())
}
pub fn get_next_record(&mut self) -> Result<&[u32], Box<dyn Error>> {
if !self.reading {
return Err("next_recrod() called on reading cache, when not opened in reading mode")?;
}
unsafe {
// We're going to cast another view over the data, so we can read it as u32
// This requires that the allocator we're using gives us sufficiently-aligned bytes,
// but that's not guaranteed, so blow up to avoid UB if the allocator uses that freedom.
assert_eq!(self.byte_buffer.as_ptr() as usize % mem::align_of::<u32>(), 0);
let buf_view:&[u32] = slice::from_raw_parts(self.byte_buffer.as_ptr() as *const u32, READBUF_LEN/4);
loop {
// Classical buffer strategy:
// Return if you have full record in buffer,
// Otherwise shift the buffer and backfill it
if self.end_pointer - self.start_pointer >= 4 {
let record_len = buf_view[self.start_pointer /4 ] as usize;
if self.start_pointer + record_len * 4 <= self.end_pointer {
let ret_buf = &buf_view[self.start_pointer/4..self.start_pointer/4 + record_len];
self.start_pointer += record_len * 4;
return Ok(ret_buf);
}
}
self.byte_buffer.copy_within(self.start_pointer..self.end_pointer, 0);
self.end_pointer -= self.start_pointer;
self.start_pointer = 0;
let read_len = match self.input_bufreader.read(&mut self.byte_buffer[self.end_pointer..READBUF_LEN]) {
Ok(0) => return Ok(&[]),
Ok(n) => n,
Err(e) => Err(e)?
};
self.end_pointer += read_len;
self.total_read += read_len;
}
}
}
} | 40.880383 | 147 | 0.540145 |
6a698fc5bddf10efba28a367c852379bee561abd | 13,443 | use crate::field::{FieldElement, FIELD_ONE, FIELD_THREE, FIELD_TWO};
use bitvec::{order::Lsb0, slice::BitSlice};
use ff::Field;
/// An affine point on an elliptic curve over [FieldElement].
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct AffinePoint {
pub x: FieldElement,
pub y: FieldElement,
pub infinity: bool,
}
impl From<&ProjectivePoint> for AffinePoint {
fn from(p: &ProjectivePoint) -> Self {
let zinv = p.z.invert().unwrap();
let x = p.x * zinv;
let y = p.y * zinv;
AffinePoint {
x,
y,
infinity: false,
}
}
}
impl AffinePoint {
pub const fn new(x: [u64; 4], y: [u64; 4]) -> Self {
Self {
x: FieldElement::new(x),
y: FieldElement::new(y),
infinity: false,
}
}
pub fn identity() -> Self {
Self {
x: FieldElement::zero(),
y: FieldElement::zero(),
infinity: true,
}
}
pub fn double(&mut self) {
if self.infinity {
return;
}
// l = (3x^2+a)/2y with a=1 from stark curve
let lambda = {
let dividend = FIELD_THREE * (self.x * self.x) + FieldElement::one();
let divisor_inv = (FIELD_TWO * self.y).invert().unwrap();
dividend * divisor_inv
};
let result_x = (lambda * lambda) - self.x - self.x;
self.y = lambda * (self.x - result_x) - self.y;
self.x = result_x;
}
pub fn add(&mut self, other: &AffinePoint) {
if other.infinity {
return;
}
if self.infinity {
self.x = other.x;
self.y = other.y;
self.infinity = other.infinity;
return;
}
if self.x == other.x {
if self.y != other.y {
self.infinity = true;
} else {
self.double();
}
return;
}
// l = (y2-y1)/(x2-x1)
let lambda = {
let dividend = other.y - self.y;
let divisor_inv = (other.x - self.x).invert().unwrap();
dividend * divisor_inv
};
let result_x = (lambda * lambda) - self.x - other.x;
self.y = lambda * (self.x - result_x) - self.y;
self.x = result_x;
}
pub fn multiply(&self, bits: &BitSlice<Lsb0, u64>) -> AffinePoint {
let mut product = AffinePoint::identity();
for b in bits.iter().rev() {
product.double();
if *b {
product.add(self);
}
}
product
}
}
/// A projective point on an elliptic curve over [FieldElement].
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct ProjectivePoint {
pub x: FieldElement,
pub y: FieldElement,
pub z: FieldElement,
pub infinity: bool,
}
impl From<&AffinePoint> for ProjectivePoint {
fn from(p: &AffinePoint) -> Self {
let x = p.x;
let y = p.y;
let z = FIELD_ONE;
ProjectivePoint {
x,
y,
z,
infinity: false,
}
}
}
impl ProjectivePoint {
pub fn identity() -> Self {
Self {
x: FieldElement::zero(),
y: FieldElement::zero(),
z: FIELD_ONE,
infinity: true,
}
}
pub fn double(&mut self) {
if self.infinity {
return;
}
// t=3x^2+az^2 with a=1 from stark curve
let t = FIELD_THREE * self.x * self.x + self.z * self.z;
let u = FIELD_TWO * self.y * self.z;
let v = FIELD_TWO * u * self.x * self.y;
let w = t * t - FIELD_TWO * v;
let uy = u * self.y;
let x = u * w;
let y = t * (v - w) - FIELD_TWO * uy * uy;
let z = u * u * u;
self.x = x;
self.y = y;
self.z = z;
}
pub fn add(&mut self, other: &ProjectivePoint) {
if other.infinity {
return;
}
if self.infinity {
self.x = other.x;
self.y = other.y;
self.z = other.z;
self.infinity = other.infinity;
return;
}
let u0 = self.x * other.z;
let u1 = other.x * self.z;
let t0 = self.y * other.z;
let t1 = other.y * self.z;
if u0 == u1 {
if t0 != t1 {
self.infinity = true;
} else {
self.double();
}
return;
}
let t = t0 - t1;
let u = u0 - u1;
let u2 = u * u;
let v = self.z * other.z;
let w = t * t * v - u2 * (u0 + u1);
let u3 = u * u2;
let x = u * w;
let y = t * (u0 * u2 - w) - t0 * u3;
let z = u3 * v;
self.x = x;
self.y = y;
self.z = z;
}
pub fn add_affine(&mut self, other: &AffinePoint) {
if other.infinity {
return;
}
if self.infinity {
self.x = other.x;
self.y = other.y;
self.z = FIELD_ONE;
self.infinity = other.infinity;
return;
}
let u0 = self.x;
let u1 = other.x * self.z;
let t0 = self.y;
let t1 = other.y * self.z;
if u0 == u1 {
if t0 != t1 {
self.infinity = true;
return;
} else {
self.double();
return;
}
}
let t = t0 - t1;
let u = u0 - u1;
let u2 = u * u;
let v = self.z;
let w = t * t * v - u2 * (u0 + u1);
let u3 = u * u2;
let x = u * w;
let y = t * (u0 * u2 - w) - t0 * u3;
let z = u3 * v;
self.x = x;
self.y = y;
self.z = z;
}
pub fn multiply(&self, bits: &BitSlice<Lsb0, u64>) -> ProjectivePoint {
let mut product = ProjectivePoint::identity();
for b in bits.iter().rev() {
product.double();
if *b {
product.add(self);
}
}
product
}
}
/// Montgomery representation of the Stark curve generator G.
pub const CURVE_G: ProjectivePoint = ProjectivePoint {
x: FieldElement::new([
14484022957141291997,
5884444832209845738,
299981207024966779,
232005955912912577,
]),
y: FieldElement::new([
6241159653446987914,
664812301889158119,
18147424675297964973,
405578048423154473,
]),
z: FIELD_ONE,
infinity: false,
};
/// Montgomery representation of the Stark curve constant P0.
pub const PEDERSEN_P0: ProjectivePoint = ProjectivePoint {
x: FieldElement::new([
1933903796324928314,
7739989395386261137,
1641324389046377921,
316327189671755572,
]),
y: FieldElement::new([
14252083571674603243,
12587053260418384210,
4798858472748676776,
81375596133053150,
]),
z: FIELD_ONE,
infinity: false,
};
/// Montgomery representation of the Stark curve constant P1.
pub const PEDERSEN_P1: ProjectivePoint = ProjectivePoint {
x: FieldElement::new([
3602345268353203007,
13758484295849329960,
518715844721862878,
241691544791834578,
]),
y: FieldElement::new([
13441546676070136227,
13001553326386915570,
433857700841878496,
368891789801938570,
]),
z: FIELD_ONE,
infinity: false,
};
/// Montgomery representation of the Stark curve constant P2.
pub const PEDERSEN_P2: ProjectivePoint = ProjectivePoint {
x: FieldElement::new([
16491878934996302286,
12382025591154462459,
10043949394709899044,
253000153565733272,
]),
y: FieldElement::new([
13950428914333633429,
2545498000137298346,
5191292837124484988,
285630633187035523,
]),
z: FIELD_ONE,
infinity: false,
};
/// Montgomery representation of the Stark curve constant P3.
pub const PEDERSEN_P3: ProjectivePoint = ProjectivePoint {
x: FieldElement::new([
1203723169299412240,
18195981508842736832,
12916675983929588442,
338510149841406402,
]),
y: FieldElement::new([
12352616181161700245,
11743524503750604092,
11088962269971685343,
161068411212710156,
]),
z: FIELD_ONE,
infinity: false,
};
/// Montgomery representation of the Stark curve constant P4.
pub const PEDERSEN_P4: ProjectivePoint = ProjectivePoint {
x: FieldElement::new([
1145636535101238356,
10664803185694787051,
299781701614706065,
425493972656615276,
]),
y: FieldElement::new([
8187986478389849302,
4428713245976508844,
6033691581221864148,
345457391846365716,
]),
z: FIELD_ONE,
infinity: false,
};
#[cfg(test)]
mod tests {
use super::*;
use ff::PrimeField;
use pretty_assertions::assert_eq;
fn affine_from_xy_str(x: &str, y: &str) -> AffinePoint {
let x = FieldElement::from_str_vartime(x).expect("Curve x-value invalid");
let y = FieldElement::from_str_vartime(y).expect("Curve y-value invalid");
AffinePoint {
x,
y,
infinity: false,
}
}
fn projective_from_xy_str(x: &str, y: &str) -> ProjectivePoint {
let x = FieldElement::from_str_vartime(x).expect("Curve x-value invalid");
let y = FieldElement::from_str_vartime(y).expect("Curve y-value invalid");
ProjectivePoint {
x,
y,
z: FIELD_ONE,
infinity: false,
}
}
#[test]
fn projective_double() {
let g_double = {
let mut g = CURVE_G;
g.double();
AffinePoint::from(&g)
};
let expected = affine_from_xy_str(
"3324833730090626974525872402899302150520188025637965566623476530814354734325",
"3147007486456030910661996439995670279305852583596209647900952752170983517249",
);
assert_eq!(g_double, expected);
}
#[test]
fn projective_double_and_add() {
let g_triple = {
let mut g = CURVE_G;
g.double();
g.add(&CURVE_G);
AffinePoint::from(&g)
};
let expected = affine_from_xy_str(
"1839793652349538280924927302501143912227271479439798783640887258675143576352",
"3564972295958783757568195431080951091358810058262272733141798511604612925062",
);
assert_eq!(g_triple, expected);
}
#[test]
fn projective_multiply() {
let three = FIELD_THREE.into_bits();
let g = CURVE_G;
let g_triple = AffinePoint::from(&g.multiply(&three));
let expected = affine_from_xy_str(
"1839793652349538280924927302501143912227271479439798783640887258675143576352",
"3564972295958783757568195431080951091358810058262272733141798511604612925062",
);
assert_eq!(g_triple, expected);
}
#[test]
fn affine_projective_multiply() {
let three = FIELD_THREE.into_bits();
let ag = AffinePoint::from(&CURVE_G);
let ag_triple = ag.multiply(&three);
let pg = ProjectivePoint::from(&ag);
let pg_triple = pg.multiply(&three);
let result = AffinePoint::from(&pg_triple);
assert_eq!(ag_triple.x, result.x);
}
#[test]
fn const_generator() {
let expected = projective_from_xy_str(
"874739451078007766457464989774322083649278607533249481151382481072868806602",
"152666792071518830868575557812948353041420400780739481342941381225525861407",
);
assert_eq!(CURVE_G, expected);
}
#[test]
fn const_p0() {
let expected = projective_from_xy_str(
"2089986280348253421170679821480865132823066470938446095505822317253594081284",
"1713931329540660377023406109199410414810705867260802078187082345529207694986",
);
assert_eq!(PEDERSEN_P0, expected);
}
#[test]
fn const_p1() {
let expected = projective_from_xy_str(
"996781205833008774514500082376783249102396023663454813447423147977397232763",
"1668503676786377725805489344771023921079126552019160156920634619255970485781",
);
assert_eq!(PEDERSEN_P1, expected);
}
#[test]
fn const_p2() {
let expected = projective_from_xy_str(
"2251563274489750535117886426533222435294046428347329203627021249169616184184",
"1798716007562728905295480679789526322175868328062420237419143593021674992973",
);
assert_eq!(PEDERSEN_P2, expected);
}
#[test]
fn const_p3() {
let expected = projective_from_xy_str(
"2138414695194151160943305727036575959195309218611738193261179310511854807447",
"113410276730064486255102093846540133784865286929052426931474106396135072156",
);
assert_eq!(PEDERSEN_P3, expected);
}
#[test]
fn const_p4() {
let expected = projective_from_xy_str(
"2379962749567351885752724891227938183011949129833673362440656643086021394946",
"776496453633298175483985398648758586525933812536653089401905292063708816422",
);
assert_eq!(PEDERSEN_P4, expected);
}
}
| 27.04829 | 91 | 0.553522 |
505123c69d3ea7abb3ea507e52415a5dda8450b3 | 13,441 | // WARNING: This file was autogenerated by jni-bindgen. Any changes to this file may be lost!!!
#[cfg(any(feature = "all", feature = "android-hardware-camera2-CaptureRequest_Builder"))]
__jni_bindgen! {
/// public final class [CaptureRequest.Builder](https://developer.android.com/reference/android/hardware/camera2/CaptureRequest.Builder.html)
///
/// Required feature: android-hardware-camera2-CaptureRequest_Builder
public final class CaptureRequest_Builder ("android/hardware/camera2/CaptureRequest$Builder") extends crate::java::lang::Object {
// // Not emitting: Non-public method
// /// [Builder](https://developer.android.com/reference/android/hardware/camera2/CaptureRequest.Builder.html#Builder())
// fn new<'env>(__jni_env: &'env __jni_bindgen::Env) -> __jni_bindgen::std::result::Result<__jni_bindgen::Local<'env, crate::android::hardware::camera2::CaptureRequest_Builder>, __jni_bindgen::Local<'env, crate::java::lang::Throwable>> {
// // class.path == "android/hardware/camera2/CaptureRequest$Builder", java.flags == (empty), .name == "<init>", .descriptor == "()V"
// unsafe {
// let __jni_args = [];
// let (__jni_class, __jni_method) = __jni_env.require_class_method("android/hardware/camera2/CaptureRequest$Builder\0", "<init>\0", "()V\0");
// __jni_env.new_object_a(__jni_class, __jni_method, __jni_args.as_ptr())
// }
// }
/// [addTarget](https://developer.android.com/reference/android/hardware/camera2/CaptureRequest.Builder.html#addTarget(android.view.Surface))
///
/// Required features: "android-view-Surface"
#[cfg(any(feature = "all", all(feature = "android-view-Surface")))]
pub fn addTarget<'env>(&'env self, arg0: impl __jni_bindgen::std::convert::Into<__jni_bindgen::std::option::Option<&'env crate::android::view::Surface>>) -> __jni_bindgen::std::result::Result<(), __jni_bindgen::Local<'env, crate::java::lang::Throwable>> {
// class.path == "android/hardware/camera2/CaptureRequest$Builder", java.flags == PUBLIC, .name == "addTarget", .descriptor == "(Landroid/view/Surface;)V"
unsafe {
let __jni_args = [__jni_bindgen::AsJValue::as_jvalue(&arg0.into())];
let __jni_env = __jni_bindgen::Env::from_ptr(self.0.env);
let (__jni_class, __jni_method) = __jni_env.require_class_method("android/hardware/camera2/CaptureRequest$Builder\0", "addTarget\0", "(Landroid/view/Surface;)V\0");
__jni_env.call_void_method_a(self.0.object, __jni_method, __jni_args.as_ptr())
}
}
/// [removeTarget](https://developer.android.com/reference/android/hardware/camera2/CaptureRequest.Builder.html#removeTarget(android.view.Surface))
///
/// Required features: "android-view-Surface"
#[cfg(any(feature = "all", all(feature = "android-view-Surface")))]
pub fn removeTarget<'env>(&'env self, arg0: impl __jni_bindgen::std::convert::Into<__jni_bindgen::std::option::Option<&'env crate::android::view::Surface>>) -> __jni_bindgen::std::result::Result<(), __jni_bindgen::Local<'env, crate::java::lang::Throwable>> {
// class.path == "android/hardware/camera2/CaptureRequest$Builder", java.flags == PUBLIC, .name == "removeTarget", .descriptor == "(Landroid/view/Surface;)V"
unsafe {
let __jni_args = [__jni_bindgen::AsJValue::as_jvalue(&arg0.into())];
let __jni_env = __jni_bindgen::Env::from_ptr(self.0.env);
let (__jni_class, __jni_method) = __jni_env.require_class_method("android/hardware/camera2/CaptureRequest$Builder\0", "removeTarget\0", "(Landroid/view/Surface;)V\0");
__jni_env.call_void_method_a(self.0.object, __jni_method, __jni_args.as_ptr())
}
}
/// [set](https://developer.android.com/reference/android/hardware/camera2/CaptureRequest.Builder.html#set(android.hardware.camera2.CaptureRequest.Key,%20java.lang.Object))
///
/// Required features: "android-hardware-camera2-CaptureRequest_Key", "java-lang-Object"
#[cfg(any(feature = "all", all(feature = "android-hardware-camera2-CaptureRequest_Key", feature = "java-lang-Object")))]
pub fn set<'env>(&'env self, arg0: impl __jni_bindgen::std::convert::Into<__jni_bindgen::std::option::Option<&'env crate::android::hardware::camera2::CaptureRequest_Key>>, arg1: impl __jni_bindgen::std::convert::Into<__jni_bindgen::std::option::Option<&'env crate::java::lang::Object>>) -> __jni_bindgen::std::result::Result<(), __jni_bindgen::Local<'env, crate::java::lang::Throwable>> {
// class.path == "android/hardware/camera2/CaptureRequest$Builder", java.flags == PUBLIC, .name == "set", .descriptor == "(Landroid/hardware/camera2/CaptureRequest$Key;Ljava/lang/Object;)V"
unsafe {
let __jni_args = [__jni_bindgen::AsJValue::as_jvalue(&arg0.into()), __jni_bindgen::AsJValue::as_jvalue(&arg1.into())];
let __jni_env = __jni_bindgen::Env::from_ptr(self.0.env);
let (__jni_class, __jni_method) = __jni_env.require_class_method("android/hardware/camera2/CaptureRequest$Builder\0", "set\0", "(Landroid/hardware/camera2/CaptureRequest$Key;Ljava/lang/Object;)V\0");
__jni_env.call_void_method_a(self.0.object, __jni_method, __jni_args.as_ptr())
}
}
/// [get](https://developer.android.com/reference/android/hardware/camera2/CaptureRequest.Builder.html#get(android.hardware.camera2.CaptureRequest.Key))
///
/// Required features: "android-hardware-camera2-CaptureRequest_Key", "java-lang-Object"
#[cfg(any(feature = "all", all(feature = "android-hardware-camera2-CaptureRequest_Key", feature = "java-lang-Object")))]
pub fn get<'env>(&'env self, arg0: impl __jni_bindgen::std::convert::Into<__jni_bindgen::std::option::Option<&'env crate::android::hardware::camera2::CaptureRequest_Key>>) -> __jni_bindgen::std::result::Result<__jni_bindgen::std::option::Option<__jni_bindgen::Local<'env, crate::java::lang::Object>>, __jni_bindgen::Local<'env, crate::java::lang::Throwable>> {
// class.path == "android/hardware/camera2/CaptureRequest$Builder", java.flags == PUBLIC, .name == "get", .descriptor == "(Landroid/hardware/camera2/CaptureRequest$Key;)Ljava/lang/Object;"
unsafe {
let __jni_args = [__jni_bindgen::AsJValue::as_jvalue(&arg0.into())];
let __jni_env = __jni_bindgen::Env::from_ptr(self.0.env);
let (__jni_class, __jni_method) = __jni_env.require_class_method("android/hardware/camera2/CaptureRequest$Builder\0", "get\0", "(Landroid/hardware/camera2/CaptureRequest$Key;)Ljava/lang/Object;\0");
__jni_env.call_object_method_a(self.0.object, __jni_method, __jni_args.as_ptr())
}
}
/// [setPhysicalCameraKey](https://developer.android.com/reference/android/hardware/camera2/CaptureRequest.Builder.html#setPhysicalCameraKey(android.hardware.camera2.CaptureRequest.Key,%20java.lang.Object,%20java.lang.String))
///
/// Required features: "android-hardware-camera2-CaptureRequest_Builder", "android-hardware-camera2-CaptureRequest_Key", "java-lang-Object", "java-lang-String"
#[cfg(any(feature = "all", all(feature = "android-hardware-camera2-CaptureRequest_Builder", feature = "android-hardware-camera2-CaptureRequest_Key", feature = "java-lang-Object", feature = "java-lang-String")))]
pub fn setPhysicalCameraKey<'env>(&'env self, arg0: impl __jni_bindgen::std::convert::Into<__jni_bindgen::std::option::Option<&'env crate::android::hardware::camera2::CaptureRequest_Key>>, arg1: impl __jni_bindgen::std::convert::Into<__jni_bindgen::std::option::Option<&'env crate::java::lang::Object>>, arg2: impl __jni_bindgen::std::convert::Into<__jni_bindgen::std::option::Option<&'env crate::java::lang::String>>) -> __jni_bindgen::std::result::Result<__jni_bindgen::std::option::Option<__jni_bindgen::Local<'env, crate::android::hardware::camera2::CaptureRequest_Builder>>, __jni_bindgen::Local<'env, crate::java::lang::Throwable>> {
// class.path == "android/hardware/camera2/CaptureRequest$Builder", java.flags == PUBLIC, .name == "setPhysicalCameraKey", .descriptor == "(Landroid/hardware/camera2/CaptureRequest$Key;Ljava/lang/Object;Ljava/lang/String;)Landroid/hardware/camera2/CaptureRequest$Builder;"
unsafe {
let __jni_args = [__jni_bindgen::AsJValue::as_jvalue(&arg0.into()), __jni_bindgen::AsJValue::as_jvalue(&arg1.into()), __jni_bindgen::AsJValue::as_jvalue(&arg2.into())];
let __jni_env = __jni_bindgen::Env::from_ptr(self.0.env);
let (__jni_class, __jni_method) = __jni_env.require_class_method("android/hardware/camera2/CaptureRequest$Builder\0", "setPhysicalCameraKey\0", "(Landroid/hardware/camera2/CaptureRequest$Key;Ljava/lang/Object;Ljava/lang/String;)Landroid/hardware/camera2/CaptureRequest$Builder;\0");
__jni_env.call_object_method_a(self.0.object, __jni_method, __jni_args.as_ptr())
}
}
/// [getPhysicalCameraKey](https://developer.android.com/reference/android/hardware/camera2/CaptureRequest.Builder.html#getPhysicalCameraKey(android.hardware.camera2.CaptureRequest.Key,%20java.lang.String))
///
/// Required features: "android-hardware-camera2-CaptureRequest_Key", "java-lang-Object", "java-lang-String"
#[cfg(any(feature = "all", all(feature = "android-hardware-camera2-CaptureRequest_Key", feature = "java-lang-Object", feature = "java-lang-String")))]
pub fn getPhysicalCameraKey<'env>(&'env self, arg0: impl __jni_bindgen::std::convert::Into<__jni_bindgen::std::option::Option<&'env crate::android::hardware::camera2::CaptureRequest_Key>>, arg1: impl __jni_bindgen::std::convert::Into<__jni_bindgen::std::option::Option<&'env crate::java::lang::String>>) -> __jni_bindgen::std::result::Result<__jni_bindgen::std::option::Option<__jni_bindgen::Local<'env, crate::java::lang::Object>>, __jni_bindgen::Local<'env, crate::java::lang::Throwable>> {
// class.path == "android/hardware/camera2/CaptureRequest$Builder", java.flags == PUBLIC, .name == "getPhysicalCameraKey", .descriptor == "(Landroid/hardware/camera2/CaptureRequest$Key;Ljava/lang/String;)Ljava/lang/Object;"
unsafe {
let __jni_args = [__jni_bindgen::AsJValue::as_jvalue(&arg0.into()), __jni_bindgen::AsJValue::as_jvalue(&arg1.into())];
let __jni_env = __jni_bindgen::Env::from_ptr(self.0.env);
let (__jni_class, __jni_method) = __jni_env.require_class_method("android/hardware/camera2/CaptureRequest$Builder\0", "getPhysicalCameraKey\0", "(Landroid/hardware/camera2/CaptureRequest$Key;Ljava/lang/String;)Ljava/lang/Object;\0");
__jni_env.call_object_method_a(self.0.object, __jni_method, __jni_args.as_ptr())
}
}
/// [setTag](https://developer.android.com/reference/android/hardware/camera2/CaptureRequest.Builder.html#setTag(java.lang.Object))
///
/// Required features: "java-lang-Object"
#[cfg(any(feature = "all", all(feature = "java-lang-Object")))]
pub fn setTag<'env>(&'env self, arg0: impl __jni_bindgen::std::convert::Into<__jni_bindgen::std::option::Option<&'env crate::java::lang::Object>>) -> __jni_bindgen::std::result::Result<(), __jni_bindgen::Local<'env, crate::java::lang::Throwable>> {
// class.path == "android/hardware/camera2/CaptureRequest$Builder", java.flags == PUBLIC, .name == "setTag", .descriptor == "(Ljava/lang/Object;)V"
unsafe {
let __jni_args = [__jni_bindgen::AsJValue::as_jvalue(&arg0.into())];
let __jni_env = __jni_bindgen::Env::from_ptr(self.0.env);
let (__jni_class, __jni_method) = __jni_env.require_class_method("android/hardware/camera2/CaptureRequest$Builder\0", "setTag\0", "(Ljava/lang/Object;)V\0");
__jni_env.call_void_method_a(self.0.object, __jni_method, __jni_args.as_ptr())
}
}
/// [build](https://developer.android.com/reference/android/hardware/camera2/CaptureRequest.Builder.html#build())
///
/// Required features: "android-hardware-camera2-CaptureRequest"
#[cfg(any(feature = "all", all(feature = "android-hardware-camera2-CaptureRequest")))]
pub fn build<'env>(&'env self) -> __jni_bindgen::std::result::Result<__jni_bindgen::std::option::Option<__jni_bindgen::Local<'env, crate::android::hardware::camera2::CaptureRequest>>, __jni_bindgen::Local<'env, crate::java::lang::Throwable>> {
// class.path == "android/hardware/camera2/CaptureRequest$Builder", java.flags == PUBLIC, .name == "build", .descriptor == "()Landroid/hardware/camera2/CaptureRequest;"
unsafe {
let __jni_args = [];
let __jni_env = __jni_bindgen::Env::from_ptr(self.0.env);
let (__jni_class, __jni_method) = __jni_env.require_class_method("android/hardware/camera2/CaptureRequest$Builder\0", "build\0", "()Landroid/hardware/camera2/CaptureRequest;\0");
__jni_env.call_object_method_a(self.0.object, __jni_method, __jni_args.as_ptr())
}
}
}
}
| 99.562963 | 647 | 0.683282 |
3397c5515e851503656c0de79f3ccb6123037074 | 3,620 | /* automatically generated by rust-bindgen */
#![allow(
dead_code,
non_snake_case,
non_camel_case_types,
non_upper_case_globals
)]
#[repr(C)]
pub struct __BindgenUnionField<T>(::std::marker::PhantomData<T>);
impl<T> __BindgenUnionField<T> {
#[inline]
pub fn new() -> Self {
__BindgenUnionField(::std::marker::PhantomData)
}
#[inline]
pub unsafe fn as_ref(&self) -> &T {
::std::mem::transmute(self)
}
#[inline]
pub unsafe fn as_mut(&mut self) -> &mut T {
::std::mem::transmute(self)
}
}
impl<T> ::std::default::Default for __BindgenUnionField<T> {
#[inline]
fn default() -> Self {
Self::new()
}
}
impl<T> ::std::clone::Clone for __BindgenUnionField<T> {
#[inline]
fn clone(&self) -> Self {
Self::new()
}
}
impl<T> ::std::marker::Copy for __BindgenUnionField<T> {}
impl<T> ::std::fmt::Debug for __BindgenUnionField<T> {
fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
fmt.write_str("__BindgenUnionField")
}
}
impl<T> ::std::hash::Hash for __BindgenUnionField<T> {
fn hash<H: ::std::hash::Hasher>(&self, _state: &mut H) {}
}
impl<T> ::std::cmp::PartialEq for __BindgenUnionField<T> {
fn eq(&self, _other: &__BindgenUnionField<T>) -> bool {
true
}
}
impl<T> ::std::cmp::Eq for __BindgenUnionField<T> {}
#[repr(C)]
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct TErrorResult {
pub mResult: ::std::os::raw::c_int,
pub __bindgen_anon_1: TErrorResult__bindgen_ty_1,
pub mMightHaveUnreported: bool,
pub mUnionState: TErrorResult_UnionState,
}
pub const TErrorResult_UnionState_HasException: TErrorResult_UnionState =
TErrorResult_UnionState::HasMessage;
#[repr(i32)]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum TErrorResult_UnionState {
HasMessage = 0,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct TErrorResult_Message {
_unused: [u8; 0],
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct TErrorResult_DOMExceptionInfo {
_unused: [u8; 0],
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, Hash, PartialEq, Eq)]
pub struct TErrorResult__bindgen_ty_1 {
pub mMessage: __BindgenUnionField<*mut TErrorResult_Message>,
pub mDOMExceptionInfo:
__BindgenUnionField<*mut TErrorResult_DOMExceptionInfo>,
pub bindgen_union_field: u64,
}
impl Default for TErrorResult {
fn default() -> Self {
unsafe { ::std::mem::zeroed() }
}
}
#[repr(C)]
#[derive(Debug, Copy, Hash, PartialEq, Eq)]
pub struct ErrorResult {
pub _base: TErrorResult,
}
#[test]
fn bindgen_test_layout_ErrorResult() {
assert_eq!(
::std::mem::size_of::<ErrorResult>(),
24usize,
concat!("Size of: ", stringify!(ErrorResult))
);
assert_eq!(
::std::mem::align_of::<ErrorResult>(),
8usize,
concat!("Alignment of ", stringify!(ErrorResult))
);
}
impl Clone for ErrorResult {
fn clone(&self) -> Self {
*self
}
}
impl Default for ErrorResult {
fn default() -> Self {
unsafe { ::std::mem::zeroed() }
}
}
#[test]
fn __bindgen_test_layout_TErrorResult_open0_int_close0_instantiation() {
assert_eq!(
::std::mem::size_of::<TErrorResult>(),
24usize,
concat!(
"Size of template specialization: ",
stringify!(TErrorResult)
)
);
assert_eq!(
::std::mem::align_of::<TErrorResult>(),
8usize,
concat!(
"Alignment of template specialization: ",
stringify!(TErrorResult)
)
);
}
| 26.231884 | 78 | 0.623757 |
09e63774c8d89c6f69e94e873ae207b575a0352d | 1,877 | #![no_main]
extern crate all_is_cubes;
use all_is_cubes::apps::Tick;
use all_is_cubes::cgmath::{InnerSpace, Point3, Vector3};
use all_is_cubes::character::Character;
use all_is_cubes::math::{Aab, FreeCoordinate, NotNan};
use all_is_cubes::space::Space;
use all_is_cubes::universe::Universe;
use all_is_cubes::util::{ConciseDebug, CustomFormat};
use libfuzzer_sys::fuzz_target;
fuzz_target!(|input: ([FreeCoordinate; 3], [FreeCoordinate; 3], Space)| {
let (position, velocity, space) = input;
let interesting_bounds_aab = Aab::from(space.grid()).expand(10.0);
// TODO: write a proper Arbitrary impl on a wrapper
let position: Point3<FreeCoordinate> = position.into();
let velocity: Vector3<FreeCoordinate> = velocity.into();
if space.physics().gravity.map(NotNan::into_inner).magnitude() > 100. {
return;
}
println!(
"{} {}",
position.custom_format(ConciseDebug),
velocity.custom_format(ConciseDebug)
);
let mut universe = Universe::new();
let space_ref = universe.insert_anonymous(space);
let mut character = Character::spawn_default(space_ref);
character.body.position = position;
character.body.velocity = velocity;
for i in 0..5000 {
if !interesting_bounds_aab.contains(character.body.position) {
// Flying out of bounds is not interesting.
return;
}
// dbg!((i, character.body.position));
let (info, _tx) = character.step(None, Tick::arbitrary());
// dbg!(info);
// Check for no push out, but not on the first step, which might have been due to initial
// placement in a bad location.
if i != 0 {
assert_eq!(
info.expect("should be making body steps").push_out,
None,
"triggered push_out"
);
}
}
});
| 32.362069 | 97 | 0.636654 |
614453830c63a47f392abd635485a8ffda83f6a3 | 31,142 | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use arena::TypedArena;
use back::link;
use llvm::{ValueRef, get_params};
use middle::def_id::DefId;
use middle::subst::{Subst, Substs};
use middle::subst::VecPerParamSpace;
use middle::subst;
use middle::traits;
use trans::base::*;
use trans::build::*;
use trans::callee::*;
use trans::callee;
use trans::cleanup;
use trans::closure;
use trans::common::*;
use trans::consts;
use trans::datum::*;
use trans::debuginfo::DebugLoc;
use trans::declare;
use trans::expr::SaveIn;
use trans::expr;
use trans::glue;
use trans::machine;
use trans::monomorphize;
use trans::type_::Type;
use trans::type_of::*;
use middle::ty::{self, Ty, HasTypeFlags};
use middle::ty::MethodCall;
use syntax::ast;
use syntax::attr;
use syntax::codemap::DUMMY_SP;
use syntax::ptr::P;
use rustc_front::visit;
use rustc_front::hir;
// drop_glue pointer, size, align.
const VTABLE_OFFSET: usize = 3;
/// The main "translation" pass for methods. Generates code
/// for non-monomorphized methods only. Other methods will
/// be generated once they are invoked with specific type parameters,
/// see `trans::base::lval_static_fn()` or `trans::base::monomorphic_fn()`.
pub fn trans_impl(ccx: &CrateContext,
name: ast::Name,
impl_items: &[P<hir::ImplItem>],
generics: &hir::Generics,
id: ast::NodeId) {
let _icx = push_ctxt("meth::trans_impl");
let tcx = ccx.tcx();
debug!("trans_impl(name={}, id={})", name, id);
let mut v = TransItemVisitor { ccx: ccx };
// Both here and below with generic methods, be sure to recurse and look for
// items that we need to translate.
if !generics.ty_params.is_empty() {
for impl_item in impl_items {
match impl_item.node {
hir::MethodImplItem(..) => {
visit::walk_impl_item(&mut v, impl_item);
}
_ => {}
}
}
return;
}
for impl_item in impl_items {
match impl_item.node {
hir::MethodImplItem(ref sig, ref body) => {
if sig.generics.ty_params.is_empty() {
let trans_everywhere = attr::requests_inline(&impl_item.attrs);
for (ref ccx, is_origin) in ccx.maybe_iter(trans_everywhere) {
let llfn = get_item_val(ccx, impl_item.id);
let empty_substs = tcx.mk_substs(Substs::trans_empty());
trans_fn(ccx, &sig.decl, body, llfn,
empty_substs, impl_item.id, &[]);
update_linkage(ccx,
llfn,
Some(impl_item.id),
if is_origin { OriginalTranslation } else { InlinedCopy });
}
}
visit::walk_impl_item(&mut v, impl_item);
}
_ => {}
}
}
}
pub fn trans_method_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
method_call: MethodCall,
self_expr: Option<&hir::Expr>,
arg_cleanup_scope: cleanup::ScopeId)
-> Callee<'blk, 'tcx> {
let _icx = push_ctxt("meth::trans_method_callee");
let method = bcx.tcx().tables.borrow().method_map[&method_call];
match bcx.tcx().impl_or_trait_item(method.def_id).container() {
ty::ImplContainer(_) => {
debug!("trans_method_callee: static, {:?}", method.def_id);
let datum = callee::trans_fn_ref(bcx.ccx(),
method.def_id,
MethodCallKey(method_call),
bcx.fcx.param_substs);
Callee {
bcx: bcx,
data: Fn(datum.val),
ty: datum.ty
}
}
ty::TraitContainer(trait_def_id) => {
let trait_substs = method.substs.clone().method_to_trait();
let trait_substs = bcx.tcx().mk_substs(trait_substs);
let trait_ref = ty::TraitRef::new(trait_def_id, trait_substs);
let trait_ref = ty::Binder(bcx.monomorphize(&trait_ref));
let span = bcx.tcx().map.span(method_call.expr_id);
debug!("method_call={:?} trait_ref={:?} trait_ref id={:?} substs={:?}",
method_call,
trait_ref,
trait_ref.0.def_id,
trait_ref.0.substs);
let origin = fulfill_obligation(bcx.ccx(),
span,
trait_ref.clone());
debug!("origin = {:?}", origin);
trans_monomorphized_callee(bcx,
method_call,
self_expr,
trait_def_id,
method.def_id,
method.ty,
origin,
arg_cleanup_scope)
}
}
}
pub fn trans_static_method_callee<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
method_id: DefId,
trait_id: DefId,
expr_id: ast::NodeId,
param_substs: &'tcx subst::Substs<'tcx>)
-> Datum<'tcx, Rvalue>
{
let _icx = push_ctxt("meth::trans_static_method_callee");
let tcx = ccx.tcx();
debug!("trans_static_method_callee(method_id={:?}, trait_id={}, \
expr_id={})",
method_id,
tcx.item_path_str(trait_id),
expr_id);
let mname = tcx.item_name(method_id);
debug!("trans_static_method_callee: method_id={:?}, expr_id={}, \
name={}", method_id, expr_id, mname);
// Find the substitutions for the fn itself. This includes
// type parameters that belong to the trait but also some that
// belong to the method:
let rcvr_substs = node_id_substs(ccx, ExprId(expr_id), param_substs);
let subst::SeparateVecsPerParamSpace {
types: rcvr_type,
selfs: rcvr_self,
fns: rcvr_method
} = rcvr_substs.types.split();
// Lookup the precise impl being called. To do that, we need to
// create a trait reference identifying the self type and other
// input type parameters. To create that trait reference, we have
// to pick apart the type parameters to identify just those that
// pertain to the trait. This is easiest to explain by example:
//
// trait Convert {
// fn from<U:Foo>(n: U) -> Option<Self>;
// }
// ...
// let f = <Vec<int> as Convert>::from::<String>(...)
//
// Here, in this call, which I've written with explicit UFCS
// notation, the set of type parameters will be:
//
// rcvr_type: [] <-- nothing declared on the trait itself
// rcvr_self: [Vec<int>] <-- the self type
// rcvr_method: [String] <-- method type parameter
//
// So we create a trait reference using the first two,
// basically corresponding to `<Vec<int> as Convert>`.
// The remaining type parameters (`rcvr_method`) will be used below.
let trait_substs =
Substs::erased(VecPerParamSpace::new(rcvr_type,
rcvr_self,
Vec::new()));
let trait_substs = tcx.mk_substs(trait_substs);
debug!("trait_substs={:?}", trait_substs);
let trait_ref = ty::Binder(ty::TraitRef::new(trait_id, trait_substs));
let vtbl = fulfill_obligation(ccx,
DUMMY_SP,
trait_ref);
// Now that we know which impl is being used, we can dispatch to
// the actual function:
match vtbl {
traits::VtableImpl(traits::VtableImplData {
impl_def_id: impl_did,
substs: impl_substs,
nested: _ }) =>
{
assert!(!impl_substs.types.needs_infer());
// Create the substitutions that are in scope. This combines
// the type parameters from the impl with those declared earlier.
// To see what I mean, consider a possible impl:
//
// impl<T> Convert for Vec<T> {
// fn from<U:Foo>(n: U) { ... }
// }
//
// Recall that we matched `<Vec<int> as Convert>`. Trait
// resolution will have given us a substitution
// containing `impl_substs=[[T=int],[],[]]` (the type
// parameters defined on the impl). We combine
// that with the `rcvr_method` from before, which tells us
// the type parameters from the *method*, to yield
// `callee_substs=[[T=int],[],[U=String]]`.
let subst::SeparateVecsPerParamSpace {
types: impl_type,
selfs: impl_self,
fns: _
} = impl_substs.types.split();
let callee_substs =
Substs::erased(VecPerParamSpace::new(impl_type,
impl_self,
rcvr_method));
let mth = tcx.get_impl_method(impl_did, callee_substs, mname);
trans_fn_ref_with_substs(ccx, mth.method.def_id, ExprId(expr_id),
param_substs,
mth.substs)
}
traits::VtableObject(ref data) => {
let idx = traits::get_vtable_index_of_object_method(tcx, data, method_id);
trans_object_shim(ccx,
data.upcast_trait_ref.clone(),
method_id,
idx)
}
_ => {
tcx.sess.bug(&format!("static call to invalid vtable: {:?}",
vtbl));
}
}
}
fn trans_monomorphized_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
method_call: MethodCall,
self_expr: Option<&hir::Expr>,
trait_id: DefId,
method_id: DefId,
method_ty: Ty<'tcx>,
vtable: traits::Vtable<'tcx, ()>,
arg_cleanup_scope: cleanup::ScopeId)
-> Callee<'blk, 'tcx> {
let _icx = push_ctxt("meth::trans_monomorphized_callee");
match vtable {
traits::VtableImpl(vtable_impl) => {
let ccx = bcx.ccx();
let impl_did = vtable_impl.impl_def_id;
let mname = match ccx.tcx().impl_or_trait_item(method_id) {
ty::MethodTraitItem(method) => method.name,
_ => {
bcx.tcx().sess.bug("can't monomorphize a non-method trait \
item")
}
};
// create a concatenated set of substitutions which includes
// those from the impl and those from the method:
let callee_substs =
combine_impl_and_methods_tps(
bcx, MethodCallKey(method_call), vtable_impl.substs);
let mth = bcx.tcx().get_impl_method(impl_did, callee_substs, mname);
// translate the function
let datum = trans_fn_ref_with_substs(bcx.ccx(),
mth.method.def_id,
MethodCallKey(method_call),
bcx.fcx.param_substs,
mth.substs);
Callee { bcx: bcx, data: Fn(datum.val), ty: datum.ty }
}
traits::VtableClosure(vtable_closure) => {
// The substitutions should have no type parameters remaining
// after passing through fulfill_obligation
let trait_closure_kind = bcx.tcx().lang_items.fn_trait_kind(trait_id).unwrap();
let llfn = closure::trans_closure_method(bcx.ccx(),
vtable_closure.closure_def_id,
vtable_closure.substs,
trait_closure_kind);
Callee {
bcx: bcx,
data: Fn(llfn),
ty: monomorphize_type(bcx, method_ty)
}
}
traits::VtableFnPointer(fn_ty) => {
let trait_closure_kind = bcx.tcx().lang_items.fn_trait_kind(trait_id).unwrap();
let llfn = trans_fn_pointer_shim(bcx.ccx(), trait_closure_kind, fn_ty);
Callee {
bcx: bcx,
data: Fn(llfn),
ty: monomorphize_type(bcx, method_ty)
}
}
traits::VtableObject(ref data) => {
let idx = traits::get_vtable_index_of_object_method(bcx.tcx(), data, method_id);
if let Some(self_expr) = self_expr {
if let ty::TyBareFn(_, ref fty) = monomorphize_type(bcx, method_ty).sty {
let ty = bcx.tcx().mk_fn(None, opaque_method_ty(bcx.tcx(), fty));
return trans_trait_callee(bcx, ty, idx, self_expr, arg_cleanup_scope);
}
}
let datum = trans_object_shim(bcx.ccx(),
data.upcast_trait_ref.clone(),
method_id,
idx);
Callee { bcx: bcx, data: Fn(datum.val), ty: datum.ty }
}
traits::VtableBuiltin(..) |
traits::VtableDefaultImpl(..) |
traits::VtableParam(..) => {
bcx.sess().bug(
&format!("resolved vtable bad vtable {:?} in trans",
vtable));
}
}
}
/// Creates a concatenated set of substitutions which includes those from the impl and those from
/// the method. This are some subtle complications here. Statically, we have a list of type
/// parameters like `[T0, T1, T2, M1, M2, M3]` where `Tn` are type parameters that appear on the
/// receiver. For example, if the receiver is a method parameter `A` with a bound like
/// `trait<B,C,D>` then `Tn` would be `[B,C,D]`.
///
/// The weird part is that the type `A` might now be bound to any other type, such as `foo<X>`.
/// In that case, the vector we want is: `[X, M1, M2, M3]`. Therefore, what we do now is to slice
/// off the method type parameters and append them to the type parameters from the type that the
/// receiver is mapped to.
fn combine_impl_and_methods_tps<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
node: ExprOrMethodCall,
rcvr_substs: subst::Substs<'tcx>)
-> subst::Substs<'tcx>
{
let ccx = bcx.ccx();
let node_substs = node_id_substs(ccx, node, bcx.fcx.param_substs);
debug!("rcvr_substs={:?}", rcvr_substs);
debug!("node_substs={:?}", node_substs);
// Break apart the type parameters from the node and type
// parameters from the receiver.
let node_method = node_substs.types.split().fns;
let subst::SeparateVecsPerParamSpace {
types: rcvr_type,
selfs: rcvr_self,
fns: rcvr_method
} = rcvr_substs.types.clone().split();
assert!(rcvr_method.is_empty());
subst::Substs {
regions: subst::ErasedRegions,
types: subst::VecPerParamSpace::new(rcvr_type, rcvr_self, node_method)
}
}
/// Create a method callee where the method is coming from a trait object (e.g., Box<Trait> type).
/// In this case, we must pull the fn pointer out of the vtable that is packaged up with the
/// object. Objects are represented as a pair, so we first evaluate the self expression and then
/// extract the self data and vtable out of the pair.
fn trans_trait_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
opaque_fn_ty: Ty<'tcx>,
vtable_index: usize,
self_expr: &hir::Expr,
arg_cleanup_scope: cleanup::ScopeId)
-> Callee<'blk, 'tcx> {
let _icx = push_ctxt("meth::trans_trait_callee");
let mut bcx = bcx;
// Translate self_datum and take ownership of the value by
// converting to an rvalue.
let self_datum = unpack_datum!(
bcx, expr::trans(bcx, self_expr));
let llval = if bcx.fcx.type_needs_drop(self_datum.ty) {
let self_datum = unpack_datum!(
bcx, self_datum.to_rvalue_datum(bcx, "trait_callee"));
// Convert to by-ref since `trans_trait_callee_from_llval` wants it
// that way.
let self_datum = unpack_datum!(
bcx, self_datum.to_ref_datum(bcx));
// Arrange cleanup in case something should go wrong before the
// actual call occurs.
self_datum.add_clean(bcx.fcx, arg_cleanup_scope)
} else {
// We don't have to do anything about cleanups for &Trait and &mut Trait.
assert!(self_datum.kind.is_by_ref());
self_datum.val
};
let llself = Load(bcx, expr::get_dataptr(bcx, llval));
let llvtable = Load(bcx, expr::get_meta(bcx, llval));
trans_trait_callee_from_llval(bcx, opaque_fn_ty, vtable_index, llself, llvtable)
}
/// Same as `trans_trait_callee()` above, except that it is given a by-ref pointer to the object
/// pair.
fn trans_trait_callee_from_llval<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
opaque_fn_ty: Ty<'tcx>,
vtable_index: usize,
llself: ValueRef,
llvtable: ValueRef)
-> Callee<'blk, 'tcx> {
let _icx = push_ctxt("meth::trans_trait_callee");
let ccx = bcx.ccx();
// Load the data pointer from the object.
debug!("trans_trait_callee_from_llval(callee_ty={}, vtable_index={}, llself={}, llvtable={})",
opaque_fn_ty,
vtable_index,
bcx.val_to_string(llself),
bcx.val_to_string(llvtable));
// Replace the self type (&Self or Box<Self>) with an opaque pointer.
let mptr = Load(bcx, GEPi(bcx, llvtable, &[vtable_index + VTABLE_OFFSET]));
let llcallee_ty = type_of_fn_from_ty(ccx, opaque_fn_ty);
Callee {
bcx: bcx,
data: TraitItem(MethodData {
llfn: PointerCast(bcx, mptr, llcallee_ty.ptr_to()),
llself: PointerCast(bcx, llself, Type::i8p(ccx)),
}),
ty: opaque_fn_ty
}
}
/// Generate a shim function that allows an object type like `SomeTrait` to
/// implement the type `SomeTrait`. Imagine a trait definition:
///
/// trait SomeTrait { fn get(&self) -> int; ... }
///
/// And a generic bit of code:
///
/// fn foo<T:SomeTrait>(t: &T) {
/// let x = SomeTrait::get;
/// x(t)
/// }
///
/// What is the value of `x` when `foo` is invoked with `T=SomeTrait`?
/// The answer is that it it is a shim function generate by this
/// routine:
///
/// fn shim(t: &SomeTrait) -> int {
/// // ... call t.get() virtually ...
/// }
///
/// In fact, all virtual calls can be thought of as normal trait calls
/// that go through this shim function.
fn trans_object_shim<'a, 'tcx>(
ccx: &'a CrateContext<'a, 'tcx>,
upcast_trait_ref: ty::PolyTraitRef<'tcx>,
method_id: DefId,
vtable_index: usize)
-> Datum<'tcx, Rvalue>
{
let _icx = push_ctxt("trans_object_shim");
let tcx = ccx.tcx();
debug!("trans_object_shim(upcast_trait_ref={:?}, method_id={:?})",
upcast_trait_ref,
method_id);
// Upcast to the trait in question and extract out the substitutions.
let upcast_trait_ref = tcx.erase_late_bound_regions(&upcast_trait_ref);
let object_substs = upcast_trait_ref.substs.clone().erase_regions();
debug!("trans_object_shim: object_substs={:?}", object_substs);
// Lookup the type of this method as declared in the trait and apply substitutions.
let method_ty = match tcx.impl_or_trait_item(method_id) {
ty::MethodTraitItem(method) => method,
_ => {
tcx.sess.bug("can't create a method shim for a non-method item")
}
};
let fty = monomorphize::apply_param_substs(tcx, &object_substs, &method_ty.fty);
let fty = tcx.mk_bare_fn(fty);
let method_ty = opaque_method_ty(tcx, fty);
debug!("trans_object_shim: fty={:?} method_ty={:?}", fty, method_ty);
//
let shim_fn_ty = tcx.mk_fn(None, fty);
let method_bare_fn_ty = tcx.mk_fn(None, method_ty);
let function_name = link::mangle_internal_name_by_type_and_seq(ccx, shim_fn_ty, "object_shim");
let llfn = declare::define_internal_rust_fn(ccx, &function_name, shim_fn_ty);
let sig = ccx.tcx().erase_late_bound_regions(&fty.sig);
let empty_substs = tcx.mk_substs(Substs::trans_empty());
let (block_arena, fcx): (TypedArena<_>, FunctionContext);
block_arena = TypedArena::new();
fcx = new_fn_ctxt(ccx,
llfn,
ast::DUMMY_NODE_ID,
false,
sig.output,
empty_substs,
None,
&block_arena);
let mut bcx = init_function(&fcx, false, sig.output);
let llargs = get_params(fcx.llfn);
let self_idx = fcx.arg_offset();
let llself = llargs[self_idx];
let llvtable = llargs[self_idx + 1];
debug!("trans_object_shim: llself={}, llvtable={}",
bcx.val_to_string(llself), bcx.val_to_string(llvtable));
assert!(!fcx.needs_ret_allocas);
let dest =
fcx.llretslotptr.get().map(
|_| expr::SaveIn(fcx.get_ret_slot(bcx, sig.output, "ret_slot")));
debug!("trans_object_shim: method_offset_in_vtable={}",
vtable_index);
bcx = trans_call_inner(bcx,
DebugLoc::None,
|bcx, _| trans_trait_callee_from_llval(bcx,
method_bare_fn_ty,
vtable_index,
llself, llvtable),
ArgVals(&llargs[(self_idx + 2)..]),
dest).bcx;
finish_fn(&fcx, bcx, sig.output, DebugLoc::None);
immediate_rvalue(llfn, shim_fn_ty)
}
/// Creates a returns a dynamic vtable for the given type and vtable origin.
/// This is used only for objects.
///
/// The `trait_ref` encodes the erased self type. Hence if we are
/// making an object `Foo<Trait>` from a value of type `Foo<T>`, then
/// `trait_ref` would map `T:Trait`.
pub fn get_vtable<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
trait_ref: ty::PolyTraitRef<'tcx>,
param_substs: &'tcx subst::Substs<'tcx>)
-> ValueRef
{
let tcx = ccx.tcx();
let _icx = push_ctxt("meth::get_vtable");
debug!("get_vtable(trait_ref={:?})", trait_ref);
// Check the cache.
match ccx.vtables().borrow().get(&trait_ref) {
Some(&val) => { return val }
None => { }
}
// Not in the cache. Build it.
let methods = traits::supertraits(tcx, trait_ref.clone()).flat_map(|trait_ref| {
let vtable = fulfill_obligation(ccx, DUMMY_SP, trait_ref.clone());
match vtable {
// Should default trait error here?
traits::VtableDefaultImpl(_) |
traits::VtableBuiltin(_) => {
Vec::new().into_iter()
}
traits::VtableImpl(
traits::VtableImplData {
impl_def_id: id,
substs,
nested: _ }) => {
emit_vtable_methods(ccx, id, substs, param_substs).into_iter()
}
traits::VtableClosure(
traits::VtableClosureData {
closure_def_id,
substs,
nested: _ }) => {
let trait_closure_kind = tcx.lang_items.fn_trait_kind(trait_ref.def_id()).unwrap();
let llfn = closure::trans_closure_method(ccx,
closure_def_id,
substs,
trait_closure_kind);
vec![llfn].into_iter()
}
traits::VtableFnPointer(bare_fn_ty) => {
let trait_closure_kind = tcx.lang_items.fn_trait_kind(trait_ref.def_id()).unwrap();
vec![trans_fn_pointer_shim(ccx, trait_closure_kind, bare_fn_ty)].into_iter()
}
traits::VtableObject(ref data) => {
// this would imply that the Self type being erased is
// an object type; this cannot happen because we
// cannot cast an unsized type into a trait object
tcx.sess.bug(
&format!("cannot get vtable for an object type: {:?}",
data));
}
traits::VtableParam(..) => {
tcx.sess.bug(
&format!("resolved vtable for {:?} to bad vtable {:?} in trans",
trait_ref,
vtable));
}
}
});
let size_ty = sizing_type_of(ccx, trait_ref.self_ty());
let size = machine::llsize_of_alloc(ccx, size_ty);
let align = align_of(ccx, trait_ref.self_ty());
let components: Vec<_> = vec![
// Generate a destructor for the vtable.
glue::get_drop_glue(ccx, trait_ref.self_ty()),
C_uint(ccx, size),
C_uint(ccx, align)
].into_iter().chain(methods).collect();
let vtable = consts::addr_of(ccx, C_struct(ccx, &components, false), "vtable");
ccx.vtables().borrow_mut().insert(trait_ref, vtable);
vtable
}
fn emit_vtable_methods<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
impl_id: DefId,
substs: subst::Substs<'tcx>,
param_substs: &'tcx subst::Substs<'tcx>)
-> Vec<ValueRef>
{
let tcx = ccx.tcx();
debug!("emit_vtable_methods(impl_id={:?}, substs={:?}, param_substs={:?})",
impl_id,
substs,
param_substs);
let trt_id = match tcx.impl_trait_ref(impl_id) {
Some(t_id) => t_id.def_id,
None => ccx.sess().bug("make_impl_vtable: don't know how to \
make a vtable for a type impl!")
};
tcx.populate_implementations_for_trait_if_necessary(trt_id);
let nullptr = C_null(Type::nil(ccx).ptr_to());
let trait_item_def_ids = tcx.trait_item_def_ids(trt_id);
trait_item_def_ids
.iter()
// Filter out non-method items.
.filter_map(|item_def_id| {
match *item_def_id {
ty::MethodTraitItemId(def_id) => Some(def_id),
_ => None,
}
})
// Now produce pointers for each remaining method. If the
// method could never be called from this object, just supply
// null.
.map(|trait_method_def_id| {
debug!("emit_vtable_methods: trait_method_def_id={:?}",
trait_method_def_id);
let trait_method_type = match tcx.impl_or_trait_item(trait_method_def_id) {
ty::MethodTraitItem(m) => m,
_ => ccx.sess().bug("should be a method, not other assoc item"),
};
let name = trait_method_type.name;
// Some methods cannot be called on an object; skip those.
if !traits::is_vtable_safe_method(tcx, trt_id, &trait_method_type) {
debug!("emit_vtable_methods: not vtable safe");
return nullptr;
}
debug!("emit_vtable_methods: trait_method_type={:?}",
trait_method_type);
// The substitutions we have are on the impl, so we grab
// the method type from the impl to substitute into.
let mth = tcx.get_impl_method(impl_id, substs.clone(), name);
debug!("emit_vtable_methods: mth={:?}", mth);
// If this is a default method, it's possible that it
// relies on where clauses that do not hold for this
// particular set of type parameters. Note that this
// method could then never be called, so we do not want to
// try and trans it, in that case. Issue #23435.
if mth.is_provided {
let predicates = mth.method.predicates.predicates.subst(tcx, &mth.substs);
if !normalize_and_test_predicates(ccx, predicates.into_vec()) {
debug!("emit_vtable_methods: predicates do not hold");
return nullptr;
}
}
trans_fn_ref_with_substs(ccx,
mth.method.def_id,
ExprId(0),
param_substs,
mth.substs).val
})
.collect()
}
/// Replace the self type (&Self or Box<Self>) with an opaque pointer.
fn opaque_method_ty<'tcx>(tcx: &ty::ctxt<'tcx>, method_ty: &ty::BareFnTy<'tcx>)
-> &'tcx ty::BareFnTy<'tcx> {
let mut inputs = method_ty.sig.0.inputs.clone();
inputs[0] = tcx.mk_mut_ptr(tcx.mk_mach_int(ast::TyI8));
tcx.mk_bare_fn(ty::BareFnTy {
unsafety: method_ty.unsafety,
abi: method_ty.abi,
sig: ty::Binder(ty::FnSig {
inputs: inputs,
output: method_ty.sig.0.output,
variadic: method_ty.sig.0.variadic,
}),
})
}
| 40.976316 | 99 | 0.530216 |
bfbcad1dbc72c9dab5fcad720f92814b4b03e940 | 4,407 | // Copyright 2018-2019 the Deno authors. All rights reserved. MIT license.
use crate::isolate::Buf;
use crate::isolate::IsolateState;
use crate::msg;
use crate::resources;
use crate::resources::Resource;
use crate::resources::ResourceId;
use crate::workers;
use futures::Future;
use serde_json;
use std::sync::Arc;
use std::sync::Mutex;
lazy_static! {
static ref c_rid: Mutex<Option<ResourceId>> = Mutex::new(None);
}
// This corresponds to JS ModuleMetaData.
// TODO Rename one or the other so they correspond.
#[derive(Debug)]
pub struct CodeFetchOutput {
pub module_name: String,
pub filename: String,
pub media_type: msg::MediaType,
pub source_code: String,
pub maybe_output_code: Option<String>,
pub maybe_source_map: Option<String>,
}
impl CodeFetchOutput {
pub fn js_source(&self) -> String {
if self.media_type == msg::MediaType::Json {
return format!("export default {};", self.source_code);
}
match self.maybe_output_code {
None => self.source_code.clone(),
Some(ref output_code) => output_code.clone(),
}
}
}
impl CodeFetchOutput {
// TODO Use serde_derive? Use flatbuffers?
fn from_json(json_str: &str) -> Option<Self> {
match serde_json::from_str::<serde_json::Value>(json_str) {
Ok(serde_json::Value::Object(map)) => {
let module_name = match map["moduleId"].as_str() {
None => return None,
Some(s) => s.to_string(),
};
let filename = match map["fileName"].as_str() {
None => return None,
Some(s) => s.to_string(),
};
let source_code = match map["sourceCode"].as_str() {
None => return None,
Some(s) => s.to_string(),
};
let maybe_output_code =
map["outputCode"].as_str().map(|s| s.to_string());
let maybe_source_map = map["sourceMap"].as_str().map(|s| s.to_string());
Some(CodeFetchOutput {
module_name,
filename,
media_type: msg::MediaType::JavaScript, // TODO
source_code,
maybe_output_code,
maybe_source_map,
})
}
_ => None,
}
}
}
fn lazy_start(parent_state: &Arc<IsolateState>) -> Resource {
let mut cell = c_rid.lock().unwrap();
let rid = cell.get_or_insert_with(|| {
let resource =
workers::spawn(parent_state.clone(), "compilerMain()".to_string());
resource.rid
});
Resource { rid: *rid }
}
fn req(specifier: &str, referrer: &str) -> Buf {
json!({
"specifier": specifier,
"referrer": referrer,
}).to_string()
.into_boxed_str()
.into_boxed_bytes()
}
pub fn compile_sync(
parent_state: &Arc<IsolateState>,
specifier: &str,
referrer: &str,
) -> Option<CodeFetchOutput> {
let req_msg = req(specifier, referrer);
let compiler = lazy_start(parent_state);
let send_future = resources::worker_post_message(compiler.rid, req_msg);
send_future.wait().unwrap();
let recv_future = resources::worker_recv_message(compiler.rid);
let res_msg = recv_future.wait().unwrap().unwrap();
let res_json = std::str::from_utf8(&res_msg).unwrap();
CodeFetchOutput::from_json(res_json)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_compile_sync() {
let cwd = std::env::current_dir().unwrap();
let cwd_string = cwd.to_str().unwrap().to_owned();
let specifier = "./tests/002_hello.ts";
let referrer = cwd_string + "/";
let cfo =
compile_sync(&IsolateState::mock(), specifier, &referrer).unwrap();
let output_code = cfo.maybe_output_code.unwrap();
assert!(output_code.starts_with("console.log(\"Hello World\");"));
}
#[test]
fn code_fetch_output_from_json() {
let json = r#"{
"moduleId":"/Users/rld/src/deno/tests/002_hello.ts",
"fileName":"/Users/rld/src/deno/tests/002_hello.ts",
"mediaType":1,
"sourceCode":"console.log(\"Hello World\");\n",
"outputCode":"yyy",
"sourceMap":"xxx",
"scriptVersion":"1"
}"#;
let actual = CodeFetchOutput::from_json(json).unwrap();
assert_eq!(actual.filename, "/Users/rld/src/deno/tests/002_hello.ts");
assert_eq!(actual.module_name, "/Users/rld/src/deno/tests/002_hello.ts");
assert_eq!(actual.source_code, "console.log(\"Hello World\");\n");
assert_eq!(actual.maybe_output_code, Some("yyy".to_string()));
assert_eq!(actual.maybe_source_map, Some("xxx".to_string()));
}
}
| 28.070064 | 80 | 0.641706 |
b986473176971758bf39e2456d6308b5c2dc110d | 230 | use crate::{
combine_range,
tour::{Tour, TourOrder, UpdateTourError},
tour_order,
};
pub fn init_tour<T>(tour: &mut T) -> Result<(), UpdateTourError>
where
T: Tour,
{
tour.apply(&tour_order!(0..tour.len()))
}
| 17.692308 | 64 | 0.626087 |
e98a6ed68fb6412f33ff2eafd29a0a609c644387 | 15,938 | use std::io::Read;
use std::{fmt, time};
use url::{form_urlencoded, ParseError, Url};
use crate::body::Payload;
use crate::header::{self, Header};
use crate::unit::{self, Unit};
use crate::Response;
use crate::{agent::Agent, error::Error};
pub type Result<T> = std::result::Result<T, Error>;
/// Request instances are builders that creates a request.
///
/// ```
/// # fn main() -> Result<(), ureq::Error> {
/// # ureq::is_test(true);
/// let response = ureq::get("http://example.com/form")
/// .query("foo", "bar baz") // add ?foo=bar+baz
/// .call()?; // run the request
/// # Ok(())
/// # }
/// ```
#[derive(Clone)]
pub struct Request {
agent: Agent,
method: String,
url: String,
headers: Vec<Header>,
timeout: Option<time::Duration>,
}
impl fmt::Debug for Request {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"Request({} {}, {:?})",
self.method, self.url, self.headers
)
}
}
impl Request {
pub(crate) fn new(agent: Agent, method: String, url: String) -> Request {
Request {
agent,
method,
url,
headers: vec![],
timeout: None,
}
}
#[inline(always)]
/// Sets overall timeout for the request, overriding agent's configuration if any.
pub fn timeout(mut self, timeout: time::Duration) -> Self {
self.timeout = Some(timeout);
self
}
/// Sends the request with no body and blocks the caller until done.
///
/// Use this with GET, HEAD, OPTIONS or TRACE. It sends neither
/// Content-Length nor Transfer-Encoding.
///
/// ```
/// # fn main() -> Result<(), ureq::Error> {
/// # ureq::is_test(true);
/// let resp = ureq::get("http://example.com/")
/// .call()?;
/// # Ok(())
/// # }
/// ```
pub fn call(self) -> Result<Response> {
self.do_call(Payload::Empty)
}
fn parse_url(&self) -> Result<Url> {
Ok(self.url.parse().and_then(|url: Url|
// No hostname is fine for urls in general, but not for website urls.
if url.host_str().is_none() {
Err(ParseError::EmptyHost)
} else {
Ok(url)
}
)?)
}
fn do_call(self, payload: Payload) -> Result<Response> {
for h in &self.headers {
h.validate()?;
}
let url = self.parse_url()?;
let deadline = match self.timeout.or(self.agent.config.timeout) {
None => None,
Some(timeout) => {
let now = time::Instant::now();
Some(now.checked_add(timeout).unwrap())
}
};
let reader = payload.into_read();
let unit = Unit::new(
&self.agent,
&self.method,
&url,
&self.headers,
&reader,
deadline,
);
let response = unit::connect(unit, true, reader).map_err(|e| e.url(url.clone()))?;
if response.status() >= 400 {
Err(Error::Status(response.status(), response))
} else {
Ok(response)
}
}
/// Send data a json value.
///
/// Requires feature `ureq = { version = "*", features = ["json"] }`
///
/// The `Content-Length` header is implicitly set to the length of the serialized value.
///
/// ```
/// # fn main() -> Result<(), ureq::Error> {
/// # ureq::is_test(true);
/// let resp = ureq::post("http://httpbin.org/post")
/// .send_json(ureq::json!({
/// "name": "martin",
/// "rust": true,
/// }))?;
/// # Ok(())
/// # }
/// ```
#[cfg(feature = "json")]
pub fn send_json(mut self, data: impl serde::Serialize) -> Result<Response> {
if self.header("Content-Type").is_none() {
self = self.set("Content-Type", "application/json");
}
let json_bytes = serde_json::to_vec(&data)
.expect("Failed to serialze data passed to send_json into JSON");
self.do_call(Payload::Bytes(&json_bytes))
}
/// Send data as bytes.
///
/// The `Content-Length` header is implicitly set to the length of the serialized value.
///
/// ```
/// # fn main() -> Result<(), ureq::Error> {
/// # ureq::is_test(true);
/// let resp = ureq::put("http://httpbin.org/put")
/// .send_bytes(&[0; 1000])?;
/// # Ok(())
/// # }
/// ```
pub fn send_bytes(self, data: &[u8]) -> Result<Response> {
self.do_call(Payload::Bytes(data))
}
/// Send data as a string.
///
/// The `Content-Length` header is implicitly set to the length of the serialized value.
/// Defaults to `utf-8`
///
/// ## Charset support
///
/// Requires feature `ureq = { version = "*", features = ["charset"] }`
///
/// If a `Content-Type` header is present and it contains a charset specification, we
/// attempt to encode the string using that character set. If it fails, we fall back
/// on utf-8.
///
/// ```
/// // this example requires features = ["charset"]
///
/// # fn main() -> Result<(), ureq::Error> {
/// # ureq::is_test(true);
/// let resp = ureq::post("http://httpbin.org/post")
/// .set("Content-Type", "text/plain; charset=iso-8859-1")
/// .send_string("Hällo Wörld!")?;
/// # Ok(())
/// # }
/// ```
pub fn send_string(self, data: &str) -> Result<Response> {
let charset =
crate::response::charset_from_content_type(self.header("content-type")).to_string();
self.do_call(Payload::Text(data, charset))
}
/// Send a sequence of (key, value) pairs as form-urlencoded data.
///
/// The `Content-Type` header is implicitly set to application/x-www-form-urlencoded.
/// The `Content-Length` header is implicitly set to the length of the serialized value.
///
/// ```
/// # fn main() -> Result<(), ureq::Error> {
/// # ureq::is_test(true);
/// let resp = ureq::post("http://httpbin.org/post")
/// .send_form(&[
/// ("foo", "bar"),
/// ("foo2", "bar2"),
/// ])?;
/// # Ok(())
/// # }
/// ```
pub fn send_form(mut self, data: &[(&str, &str)]) -> Result<Response> {
if self.header("Content-Type").is_none() {
self = self.set("Content-Type", "application/x-www-form-urlencoded");
}
let encoded = form_urlencoded::Serializer::new(String::new())
.extend_pairs(data)
.finish();
self.do_call(Payload::Bytes(&encoded.into_bytes()))
}
/// Send data from a reader.
///
/// If no Content-Length and Transfer-Encoding header has been set, it uses the [chunked transfer encoding](https://tools.ietf.org/html/rfc7230#section-4.1).
///
/// The caller may set the Content-Length header to the expected byte size of the reader if is
/// known.
///
/// The input from the reader is buffered into chunks of size 16,384, the max size of a TLS fragment.
///
/// ```
/// use std::io::Cursor;
/// # fn main() -> Result<(), ureq::Error> {
/// # ureq::is_test(true);
/// let read = Cursor::new(vec![0x20; 100]);
/// let resp = ureq::post("http://httpbin.org/post")
/// .send(read)?;
/// # Ok(())
/// # }
/// ```
pub fn send(self, reader: impl Read) -> Result<Response> {
self.do_call(Payload::Reader(Box::new(reader)))
}
/// Set a header field.
///
/// ```
/// # fn main() -> Result<(), ureq::Error> {
/// # ureq::is_test(true);
/// let resp = ureq::get("http://httpbin.org/bytes/1000")
/// .set("Accept", "text/plain")
/// .set("Range", "bytes=500-999")
/// .call()?;
/// # Ok(())
/// # }
/// ```
pub fn set(mut self, header: &str, value: &str) -> Self {
header::add_header(&mut self.headers, Header::new(header, value));
self
}
/// Returns the value for a set header.
///
/// ```
/// let req = ureq::get("/my_page")
/// .set("X-API-Key", "foobar");
/// assert_eq!("foobar", req.header("x-api-Key").unwrap());
/// ```
pub fn header(&self, name: &str) -> Option<&str> {
header::get_header(&self.headers, name)
}
/// A list of the set header names in this request. Lowercased to be uniform.
///
/// ```
/// let req = ureq::get("/my_page")
/// .set("X-API-Key", "foobar")
/// .set("Content-Type", "application/json");
/// assert_eq!(req.header_names(), vec!["x-api-key", "content-type"]);
/// ```
pub fn header_names(&self) -> Vec<String> {
self.headers
.iter()
.map(|h| h.name().to_ascii_lowercase())
.collect()
}
/// Tells if the header has been set.
///
/// ```
/// let req = ureq::get("/my_page")
/// .set("X-API-Key", "foobar");
/// assert_eq!(true, req.has("x-api-Key"));
/// ```
pub fn has(&self, name: &str) -> bool {
header::has_header(&self.headers, name)
}
/// All headers corresponding values for the give name, or empty vector.
///
/// ```
/// let req = ureq::get("/my_page")
/// .set("X-Forwarded-For", "1.2.3.4")
/// .set("X-Forwarded-For", "2.3.4.5");
///
/// assert_eq!(req.all("x-forwarded-for"), vec![
/// "1.2.3.4",
/// "2.3.4.5",
/// ]);
/// ```
pub fn all(&self, name: &str) -> Vec<&str> {
header::get_all_headers(&self.headers, name)
}
/// Set a query parameter.
///
/// For example, to set `?format=json&dest=/login`
///
/// ```
/// # fn main() -> Result<(), ureq::Error> {
/// # ureq::is_test(true);
/// let resp = ureq::get("http://httpbin.org/get")
/// .query("format", "json")
/// .query("dest", "/login")
/// .call()?;
/// # Ok(())
/// # }
/// ```
pub fn query(mut self, param: &str, value: &str) -> Self {
if let Ok(mut url) = self.parse_url() {
url.query_pairs_mut().append_pair(param, value);
// replace url
self.url = url.to_string();
}
self
}
/// Returns the value of the request method. Something like `GET`, `POST`, `PUT` etc.
///
/// ```
/// let req = ureq::put("http://httpbin.org/put");
///
/// assert_eq!(req.method(), "PUT");
/// ```
pub fn method(&self) -> &str {
&self.method
}
/// Get the url str that will be used for this request.
///
/// The url might differ from that originally provided when constructing the
/// request if additional query parameters have been added using [`Request::query()`].
///
/// In case the original url provided to build the request is not possible to
/// parse to a Url, this function returns the original, and it will error once the
/// Request object is used.
///
/// ```
/// # fn main() -> Result<(), ureq::Error> {
/// # ureq::is_test(true);
/// let req = ureq::get("http://httpbin.org/get")
/// .query("foo", "bar");
///
/// assert_eq!(req.url(), "http://httpbin.org/get?foo=bar");
/// # Ok(())
/// # }
/// ```
///
/// ```
/// # fn main() -> Result<(), ureq::Error> {
/// # ureq::is_test(true);
/// let req = ureq::get("SO WRONG")
/// .query("foo", "bar"); // does nothing
///
/// assert_eq!(req.url(), "SO WRONG");
/// # Ok(())
/// # }
/// ```
pub fn url(&self) -> &str {
&self.url
}
/// Get the parsed url that will be used for this request. The parsed url
/// has functions to inspect the parts of the url further.
///
/// The url might differ from that originally provided when constructing the
/// request if additional query parameters have been added using [`Request::query()`].
///
/// Returns a `Result` since a common use case is to construct
/// the [`Request`] using a `&str` in which case the url needs to be parsed
/// to inspect the parts. If the Request url is not possible to parse, this
/// function produces the same error that would otherwise happen when
/// `call` or `send_*` is called.
///
/// ```
/// # fn main() -> Result<(), ureq::Error> {
/// # ureq::is_test(true);
/// let req = ureq::get("http://httpbin.org/get")
/// .query("foo", "bar");
///
/// assert_eq!(req.request_url().unwrap().host(), "httpbin.org");
/// # Ok(())
/// # }
/// ```
pub fn request_url(&self) -> Result<RequestUrl> {
Ok(RequestUrl::new(self.parse_url()?))
}
}
/// Parsed result of a request url with handy inspection methods.
#[derive(Debug, Clone)]
pub struct RequestUrl {
url: Url,
query_pairs: Vec<(String, String)>,
}
impl RequestUrl {
fn new(url: Url) -> Self {
// This is needed to avoid url::Url Cow<str>. We want ureq API to work with &str.
let query_pairs = url
.query_pairs()
.map(|(k, v)| (k.to_string(), v.to_string()))
.collect();
RequestUrl { url, query_pairs }
}
/// Handle the request url as a standard [`url::Url`].
pub fn as_url(&self) -> &Url {
&self.url
}
/// Get the scheme of the request url, i.e. "https" or "http".
pub fn scheme(&self) -> &str {
self.url.scheme()
}
/// Host of the request url.
pub fn host(&self) -> &str {
// this unwrap() is ok, because RequestUrl is tested for empty host
// urls in Request::parse_url().
self.url.host_str().unwrap()
}
/// Port of the request url, if available. Ports are only available if they
/// are present in the original url. Specifically the scheme default ports,
/// 443 for `https` and and 80 for `http` are `None` unless explicitly
/// set in the url, i.e. `https://my-host.com:443/some/path`.
pub fn port(&self) -> Option<u16> {
self.url.port()
}
/// Path of the request url.
pub fn path(&self) -> &str {
self.url.path()
}
/// Returns all query parameters as a vector of key-value pairs.
///
/// ```
/// # fn main() -> Result<(), ureq::Error> {
/// # ureq::is_test(true);
/// let req = ureq::get("http://httpbin.org/get")
/// .query("foo", "42")
/// .query("foo", "43");
///
/// assert_eq!(req.request_url().unwrap().query_pairs(), vec![
/// ("foo", "42"),
/// ("foo", "43")
/// ]);
/// # Ok(())
/// # }
/// ```
pub fn query_pairs(&self) -> Vec<(&str, &str)> {
self.query_pairs
.iter()
.map(|(k, v)| (k.as_str(), v.as_str()))
.collect()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn request_implements_send_and_sync() {
let _request: Box<dyn Send> = Box::new(Request::new(
Agent::new(),
"GET".to_string(),
"https://example.com/".to_string(),
));
let _request: Box<dyn Sync> = Box::new(Request::new(
Agent::new(),
"GET".to_string(),
"https://example.com/".to_string(),
));
}
#[test]
fn send_byte_slice() {
let bytes = vec![1, 2, 3];
crate::agent()
.post("http://example.com")
.send(&bytes[1..2])
.ok();
}
#[test]
fn disallow_empty_host() {
let req = crate::agent().get("file:///some/path");
// Both request_url and call() must surface the same error.
assert_eq!(
req.request_url().unwrap_err().kind(),
crate::ErrorKind::InvalidUrl
);
assert_eq!(req.call().unwrap_err().kind(), crate::ErrorKind::InvalidUrl);
}
}
| 30.416031 | 161 | 0.512549 |
8f1309e4e8491ac6aaf24afa4dcde601c7008282 | 15,878 | use core::fmt::{Debug, Display};
use std::{fs, io, io::Write, process, rc::Rc};
use crate::{traits::HypergraphClass, Hypergraph};
pub struct DotFormatter<N, E, H, L> {
pub edge: Rc<dyn Fn(&Vec<usize>, &E) -> String>,
pub node: Rc<dyn Fn(&Vec<usize>, &N) -> String>,
pub hypergraph: Rc<dyn Fn(&Vec<usize>, &Option<H>) -> String>,
pub link: Rc<dyn Fn(&Vec<usize>, &Option<L>) -> String>,
}
impl<N, E, H, L> DotFormatter<N, E, H, L> {
/// Creates a new `DotFormatter` that forwards the `Debug` implementation in all fields
///
/// Values `None` are left blank.
pub fn debug() -> Self
where
N: Debug,
E: Debug,
H: Debug,
L: Debug,
{
let mut dotformatter = Self::new();
dotformatter
.set_edge(|_, edge| format!("{:?}", edge))
.set_hypergraph(|_, hypergraph_option| {
if let Some(hypergraph) = hypergraph_option {
format!("{:?}", hypergraph)
} else {
String::new()
}
})
.set_link(|_, link_option| {
if let Some(link) = link_option {
format!("{:?}", link)
} else {
String::new()
}
})
.set_node(|_, node| format!("{:?}", node));
dotformatter
}
/// Creates a new `DotFormatter` that forwards the `Display` implementation in all fields.
///
/// Values `None` are left blank.
pub fn display() -> Self
where
N: Display,
E: Display,
H: Display,
L: Display,
{
let mut dotformatter = Self::new();
dotformatter
.set_edge(|_, edge| format!("{}", edge))
.set_hypergraph(|_, hypergraph_option| {
if let Some(hypergraph) = hypergraph_option {
format!("{}", hypergraph)
} else {
String::new()
}
})
.set_link(|_, link_option| {
if let Some(link) = link_option {
format!("{}", link)
} else {
String::new()
}
})
.set_node(|_, node| format!("{}", node));
dotformatter
}
/// Creates a new `DotFormatter` with default settings.
pub fn new() -> Self {
Self::default()
}
pub fn set_edge<F: 'static + Fn(&Vec<usize>, &E) -> String>(
&mut self,
edge_formatter: F,
) -> &mut Self {
self.edge = Rc::new(edge_formatter);
self
}
pub fn set_hypergraph<F: 'static + Fn(&Vec<usize>, &Option<H>) -> String>(
&mut self,
hypergraph_formatter: F,
) -> &mut Self {
self.hypergraph = Rc::new(hypergraph_formatter);
self
}
pub fn set_link<F: 'static + Fn(&Vec<usize>, &Option<L>) -> String>(
&mut self,
link_formatter: F,
) -> &mut Self {
self.link = Rc::new(link_formatter);
self
}
pub fn set_node<F: 'static + Fn(&Vec<usize>, &N) -> String>(
&mut self,
node_formatter: F,
) -> &mut Self {
self.node = Rc::new(node_formatter);
self
}
}
impl<N, E, H, L> Default for DotFormatter<N, E, H, L> {
/// Creates a new `DotFormatter`.
///
/// The label of every element is its `id`.
fn default() -> Self {
DotFormatter {
edge: Rc::new(|id, _| format!("{:?}", id)),
node: Rc::new(|id, _| format!("{:?}", id)),
hypergraph: Rc::new(|id, _| format!("{:?}", id)),
link: Rc::new(|id, _| format!("{:?}", id)),
}
}
}
/// # Visualize
///
/// Visualize hypergraphs.
impl<N, E, H, L, Ty: HypergraphClass> Hypergraph<N, E, H, L, Ty> {
/// Transforms into a [dot language](https://graphviz.org/doc/info/lang.html) representation, from Graphviz.
///
/// Hyperedges are represented as nodes without borders.
pub fn as_dot<F>(&self, formatter: F) -> String
where
F: Into<Option<DotFormatter<N, E, H, L>>>,
{
self.as_dot_impl(vec![], &formatter.into())
}
fn as_dot_impl(
&self,
pre_id: Vec<usize>,
formatter_option: &Option<DotFormatter<N, E, H, L>>,
) -> String {
let mut dot = String::new();
if self.class().is_main() {
dot.push_str("digraph \"[]\" ")
} else if self.class().is_sub() {
dot += &format!("subgraph \"cluster_{:?}\" ", pre_id) // shows as cluster, if supported
}
dot.push_str("{\n\tcompound = true;\n");
// Hypergraph value
match formatter_option {
Some(formatter) => {
dot += &format!(
"\tlabel = \"{}\";\n",
(formatter.hypergraph)(&pre_id, self.value())
);
}
None => {
dot += &format!("\tlabel = \"{:?}\";\n", pre_id);
}
}
// Invisible node to refer to the hypergraph in edges
dot += &format!(
"\t\"{:?}\" [label = \"\", height = 0, width = 0, style = invisible];\n",
pre_id
);
// Nodes
let raw_nodes = self.raw_nodes();
for post_id in raw_nodes.keys() {
let mut id = pre_id.clone();
id.push(*post_id);
let label = match formatter_option {
None => format!("{:?}", id),
Some(formatter) => (formatter.node)(&id, &raw_nodes[post_id].0),
};
dot += &format!("\t\"{:?}\" [label=\"{}\"];\n", &id, label);
}
// Edges
let raw_edges = self.raw_edges();
for post_id in raw_edges.keys() {
let mut id = pre_id.clone();
id.push(*post_id);
let label = match formatter_option {
None => format!("{:?}", id),
Some(formatter) => (formatter.edge)(&id, &raw_edges[post_id].0),
};
dot += &format!("\t\"{:?}\" [style = dotted, label=\"{}\"];\n", &id, label);
}
// Links
let raw_links = self.raw_links();
for post_id in raw_links.keys() {
let mut id = pre_id.clone();
id.push(*post_id);
let link_full = &raw_links[post_id];
let label = match formatter_option {
None => format!("{:?}", id),
Some(formatter) => (formatter.link)(&id, &link_full.0),
};
let mut atributes = String::new();
atributes += &format!("label = \"{}\"", label);
// Recall: Links in a hypergraph can only refer to elements inside that hypergraph.
let local_source: Vec<_> = link_full.1.clone().into_iter().skip(pre_id.len()).collect();
if self.contains_subhypergraph(&local_source) {
atributes += &format!(", ltail = \"cluster_{:?}\"", link_full.1);
}
let local_target: Vec<_> = link_full.2.clone().into_iter().skip(pre_id.len()).collect();
if self.contains_subhypergraph(&local_target) {
atributes += &format!(", lhead = \"cluster_{:?}\"", link_full.2);
}
dot += &format!(
"\t\"{:?}\" -> \"{:?}\" [{}];\n",
&link_full.1, &link_full.2, atributes
);
}
// Subhypergraphs
let raw_hypergraphs = self.raw_hypergraphs();
for post_id in raw_hypergraphs.keys() {
let mut id = pre_id.clone();
id.push(*post_id);
let hypergraph_full = &raw_hypergraphs[post_id];
dot += &hypergraph_full.0.as_dot_impl(id, formatter_option);
}
dot.push_str("}\n");
dot
}
/// Saves the output of [`as_dot`] and draws and saves the graph as a svg file.
///
/// The files are named through `file_name`.
///
/// This is just a shorthand for running the command [`dot`] of Graphviz in the result of the [`as_dot`] method
/// and saving all files.
///
/// # Requirements
///
/// [`Graphviz`] needs to be install in your system. In particular, [`dot`] must be a command accessible from PATH.
///
/// # Safety
///
/// As this calls an external command (`dot`), there is no safety guarantee.
///
/// [`dot`]: https://graphviz.org/doc/info/command.html
/// [`Graphviz`]: https://graphviz.org/
pub fn draw<F>(&self, formatter: F, file_name: impl Display) -> io::Result<process::Child>
where
F: Into<Option<DotFormatter<N, E, H, L>>>,
{
fs::create_dir_all("target/ferret_hypergraph/dot/")?;
let dot_path = format!("target/ferret_hypergraph/dot/{}.dot", file_name);
let mut dot_file = fs::OpenOptions::new()
.create(true)
.write(true)
.truncate(true)
.open(&dot_path)?;
write!(dot_file, "{}", self.as_dot(formatter))?;
fs::create_dir_all("target/ferret_hypergraph/svg/")?;
let child = process::Command::new("dot")
.arg("-Tsvg")
.arg(&dot_path)
.args(&[
"-o",
&format!("target/ferret_hypergraph/svg/{}.svg", file_name),
])
.spawn()
.expect("failed running graphviz dot. Is graphviz installed?");
Ok(child)
}
/// On top of applying the [`draw`] method, it (asynchroniously) renders the svg file into a png file
/// and opens it (using [`emulsion`]) for quick inspection.
///
/// This is just a shorthand for running the method [`draw`], then commands [`resvg`] and [`emulsion`].
///
/// # Requirements
///
/// - [`resvg`] needs to be install in your system.
/// - [`emulsion`] needs to be install in your system.
///
/// # Safety
///
/// This calls an external commands ([`resvg`] and [`emulsion`]). There is no safety guarantee.
///
/// [`resvg`]: https://crates.io/crates/resvg
/// [`emulsion`]: https://github.com/ArturKovacs/emulsion
pub fn draw_and_show<F>(
&self,
formatter: F,
file_name: impl Display,
) -> io::Result<process::Child>
where
F: Into<Option<DotFormatter<N, E, H, L>>>,
{
self.draw(formatter, &file_name)?
.wait()
.expect("dot failed to run.");
fs::create_dir_all("target/ferret_hypergraph/svg/")?;
fs::create_dir_all("target/ferret_hypergraph/png/")?;
process::Command::new("resvg")
.arg(&format!("target/ferret_hypergraph/svg/{}.svg", file_name))
.arg(&format!("target/ferret_hypergraph/png/{}.png", file_name))
.spawn()
.expect("failed running resvg to transform svg to png format. Is resvg installed?")
.wait()
.expect("resvg failed to run.");
let child = process::Command::new("emulsion")
.arg(&format!("target/ferret_hypergraph/png/{}.png", file_name))
.spawn()
.expect("failed running emulsion to open png. Is emulsion installed?");
Ok(child)
}
/// Renders the hypergraph as a png (using [`dot`])
/// and opens it (using [`emulsion`]) for quick inspection.
///
/// This is the fastest way to visualize a hypergraph.
///
/// This is just a shorthand for running the commands [`dot`] to generate a png file and open it with [`emulsion`].
///
/// # Requirements
///
/// - [`dot`] needs to be install in your system.
/// - [`emulsion`] needs to be install in your system.
///
/// # Safety
///
/// As this calls external commands, here is no safety guarantee.
///
/// [`dot`]: https://graphviz.org/doc/info/command.html
/// [`emulsion`]: https://github.com/ArturKovacs/emulsion
pub fn show<F>(&self, formatter: F, file_name: impl Display) -> io::Result<process::Child>
where
F: Into<Option<DotFormatter<N, E, H, L>>>,
{
fs::create_dir_all("target/ferret_hypergraph/png/")?;
let mut child = process::Command::new("dot")
.arg("-Tpng")
.stdin(process::Stdio::piped())
.args(&[
"-o",
&format!("target/ferret_hypergraph/png/{}.png", file_name),
])
.spawn()
.expect("failed running graphviz dot. Is graphviz installed?");
child
.stdin
.as_mut()
.unwrap()
.write(self.as_dot(formatter).as_bytes())
.expect("Writing failed in child process. We could not pass the dot representation of the hypergraph to dot.");
child.wait()
.expect("failed running graphviz dot. If graphviz is running well in your computer, contact us!");
let child = process::Command::new("emulsion")
.arg(&format!("target/ferret_hypergraph/png/{}.png", file_name))
.spawn()
.expect("failed running emulsion to open png. Is emulsion installed?");
Ok(child)
}
}
impl<'a, N, E, H, L, Ty> Into<tabbycat::Graph<'a>> for &'a Hypergraph<N, E, H, L, Ty>
where
H: Display,
{
fn into(self) -> tabbycat::Graph<'a> {
todo!()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn as_dot() {
let mut h = Hypergraph::<&str, &str, &str, &str>::new();
h.add_node("zero");
h.add_node("one");
h.add_edge([0], [1], "two").unwrap();
h.add_hypergraph("five").unwrap();
h.add_node_in("six", [5]).unwrap();
h.add_node_in("seven", [5]).unwrap();
h.add_edge_in([5, 0], [5, 1], "eight", [5]).unwrap();
h.add_link([2], [5, 0], "eleven").unwrap();
h.add_hypergraph_in("twelve", [5]).unwrap();
h.add_node_in("thirteen", [5, 5]).unwrap();
let mut formatter = DotFormatter::new();
formatter
.set_edge(|_, e: &&str| e.to_string())
.set_node(|_, n: &&str| n.to_string())
.set_hypergraph(|_, h: &Option<&str>| match h {
None => "?".to_string(),
Some(v) => v.to_string(),
})
.set_link(|_, l: &Option<&str>| match l {
None => "?".to_string(),
Some(v) => v.to_string(),
});
println!("{}", h.as_dot(formatter));
let mut formatter = DotFormatter::new();
formatter
.set_edge(|_, e: &&str| e.to_string())
.set_node(|_, n: &&str| n.to_string())
.set_hypergraph(|_, h: &Option<&str>| match h {
None => "?".to_string(),
Some(v) => v.to_string(),
})
.set_link(|_, l: &Option<&str>| match l {
None => "?".to_string(),
Some(v) => v.to_string(),
});
assert_eq!(
&h.as_dot(formatter),
"digraph \"[]\" {\n\tcompound = true;\n\tlabel = \"?\";\n\t\"[]\" [label = \"\", height = 0, width = 0, style = invisible];\n\t\"[0]\" [label=\"zero\"];\n\t\"[1]\" [label=\"one\"];\n\t\"[2]\" [style = dotted, label=\"two\"];\n\t\"[0]\" -> \"[2]\" [label = \"?\"];\n\t\"[2]\" -> \"[1]\" [label = \"?\"];\n\t\"[2]\" -> \"[5, 0]\" [label = \"eleven\"];\nsubgraph \"cluster_[5]\" {\n\tcompound = true;\n\tlabel = \"five\";\n\t\"[5]\" [label = \"\", height = 0, width = 0, style = invisible];\n\t\"[5, 0]\" [label=\"six\"];\n\t\"[5, 1]\" [label=\"seven\"];\n\t\"[5, 2]\" [style = dotted, label=\"eight\"];\n\t\"[5, 0]\" -> \"[5, 2]\" [label = \"?\"];\n\t\"[5, 2]\" -> \"[5, 1]\" [label = \"?\"];\nsubgraph \"cluster_[5, 5]\" {\n\tcompound = true;\n\tlabel = \"twelve\";\n\t\"[5, 5]\" [label = \"\", height = 0, width = 0, style = invisible];\n\t\"[5, 5, 0]\" [label=\"thirteen\"];\n}\n}\n}\n",
);
}
}
| 36.501149 | 897 | 0.501386 |
ac99eb9ef80d2ce3a196318a7bdece0a17447497 | 6,362 | #![allow(dead_code)]
use ndarray::{Array, ArrayView, ArrayViewMut, Axis, Dimension, Ix};
use std::any::{Any, TypeId};
use std::mem::transmute;
trait AnyArrayObject<D> {
fn view_mut<'a>(&'a mut self) -> Box<dyn ArrayViewMutObject<'a, D> + 'a>;
fn view<'a>(&'a self) -> Box<dyn ArrayViewObject<'a, D> + 'a>;
fn as_any(&self) -> &dyn Any;
fn as_mut_any(&mut self) -> &mut dyn Any;
}
impl<A, D> AnyArrayObject<D> for Array<A, D>
where
A: 'static + Send,
D: 'static + Dimension,
{
fn view<'a>(&'a self) -> Box<dyn ArrayViewObject<'a, D> + 'a> {
Box::new(Array::<A, D>::view(self))
}
fn view_mut<'a>(&'a mut self) -> Box<dyn ArrayViewMutObject<'a, D> + 'a> {
Box::new(Array::<A, D>::view_mut(self))
}
fn as_any(&self) -> &dyn Any {
self
}
fn as_mut_any(&mut self) -> &mut dyn Any {
self
}
}
pub struct AnyArray<D> {
inner: Box<dyn AnyArrayObject<D>>,
elem_type: TypeId,
}
impl<D> AnyArray<D>
where
D: 'static + Dimension,
{
pub fn new<A>(array: Array<A, D>) -> Self
where
A: 'static + Send,
{
Self {
inner: Box::new(array),
elem_type: TypeId::of::<A>(),
}
}
pub fn view(&self) -> AnyArrayView<'_, D> {
AnyArrayView {
inner: self.inner.view(),
elem_type: self.elem_type,
}
}
pub fn view_mut(&mut self) -> AnyArrayViewMut<'_, D> {
AnyArrayViewMut {
inner: self.inner.view_mut(),
elem_type: self.elem_type,
}
}
pub fn downcast_ref<A>(&self) -> Option<&Array<A, D>>
where
A: 'static,
{
self.inner.as_any().downcast_ref()
}
pub fn downcast_mut<A>(&mut self) -> Option<&mut Array<A, D>>
where
A: 'static,
{
self.inner.as_mut_any().downcast_mut()
}
}
impl<A, D> From<Array<A, D>> for AnyArray<D>
where
A: 'static + Send,
D: 'static + Dimension,
{
fn from(value: Array<A, D>) -> Self {
Self::new(value)
}
}
trait ArrayViewObject<'a, D> {
fn split_at(
self: Box<Self>,
axis: Axis,
index: Ix,
) -> (
Box<dyn ArrayViewObject<'a, D> + 'a>,
Box<dyn ArrayViewObject<'a, D> + 'a>,
);
}
impl<'a, A, D> ArrayViewObject<'a, D> for ArrayView<'a, A, D>
where
A: 'static,
D: Dimension + 'static,
{
fn split_at(
self: Box<Self>,
axis: Axis,
index: Ix,
) -> (
Box<dyn ArrayViewObject<'a, D> + 'a>,
Box<dyn ArrayViewObject<'a, D> + 'a>,
) {
let (l, r) = ArrayView::<A, D>::split_at(*self, axis, index);
(Box::new(l), Box::new(r))
}
}
pub struct AnyArrayView<'a, D> {
inner: Box<dyn ArrayViewObject<'a, D> + 'a>,
elem_type: TypeId,
}
impl<'a, D> AnyArrayView<'a, D>
where
D: 'static + Dimension,
{
pub fn new<A>(view: ArrayView<'a, A, D>) -> Self
where
A: 'static,
{
Self {
inner: Box::new(view),
elem_type: TypeId::of::<A>(),
}
}
pub fn downcast<A: 'static>(&self) -> Option<&ArrayView<'a, A, D>> {
if self.elem_type == TypeId::of::<A>() {
Some(unsafe { self.udowncast() })
} else {
None
}
}
pub unsafe fn udowncast<A: 'static>(&self) -> &ArrayView<'a, A, D> {
let (data, _vtable): (&ArrayView<A, D>, usize) = transmute(&*self.inner);
data
}
pub fn split_at(self, axis: Axis, index: Ix) -> (AnyArrayView<'a, D>, AnyArrayView<'a, D>) {
let (l, r) = self.inner.split_at(axis, index);
(
AnyArrayView {
inner: l,
elem_type: self.elem_type,
},
AnyArrayView {
inner: r,
elem_type: self.elem_type,
},
)
}
}
trait ArrayViewMutObject<'a, D>: Send {
fn split_at(
self: Box<Self>,
axis: Axis,
index: Ix,
) -> (
Box<dyn ArrayViewMutObject<'a, D> + 'a>,
Box<dyn ArrayViewMutObject<'a, D> + 'a>,
);
fn reborrow<'b>(self: Box<Self>) -> Box<dyn ArrayViewMutObject<'b, D> + 'b>
where
'a: 'b;
}
impl<'a, A, D> ArrayViewMutObject<'a, D> for ArrayViewMut<'a, A, D>
where
A: 'static + Send,
D: 'static + Dimension,
{
fn split_at(
self: Box<Self>,
axis: Axis,
index: Ix,
) -> (
Box<dyn ArrayViewMutObject<'a, D> + 'a>,
Box<dyn ArrayViewMutObject<'a, D> + 'a>,
) {
let (l, r) = ArrayViewMut::<A, D>::split_at(*self, axis, index);
(Box::new(l), Box::new(r))
}
fn reborrow<'b>(self: Box<Self>) -> Box<dyn ArrayViewMutObject<'b, D> + 'b>
where
'a: 'b,
{
Box::new(ArrayViewMut::reborrow(*self))
}
}
pub struct AnyArrayViewMut<'a, D> {
inner: Box<dyn ArrayViewMutObject<'a, D> + 'a>,
elem_type: TypeId,
}
impl<'a, D> AnyArrayViewMut<'a, D>
where
D: 'static + Dimension,
{
pub fn new<A>(view: ArrayViewMut<'a, A, D>) -> Self
where
A: 'static + Send,
{
Self {
inner: Box::new(view),
elem_type: TypeId::of::<A>(),
}
}
pub fn downcast<A: 'static>(&mut self) -> Option<&mut ArrayViewMut<'a, A, D>> {
if self.elem_type == TypeId::of::<A>() {
Some(unsafe { self.udowncast() })
} else {
None
}
}
pub unsafe fn udowncast<A: 'static>(&mut self) -> &mut ArrayViewMut<'a, A, D> {
let (data, _): (&mut ArrayViewMut<A, D>, usize) = transmute(&mut *self.inner);
data
}
pub fn split_at<'s1, 's2>(
self,
axis: Axis,
index: Ix,
) -> (AnyArrayViewMut<'s1, D>, AnyArrayViewMut<'s2, D>)
where
'a: 's1,
'a: 's2,
{
let (l, r) = self.inner.split_at(axis, index);
(
AnyArrayViewMut {
inner: l.reborrow(),
elem_type: self.elem_type,
},
AnyArrayViewMut {
inner: r.reborrow(),
elem_type: self.elem_type,
},
)
}
pub fn reborrow<'b>(self) -> Self
where
'a: 'b,
{
AnyArrayViewMut {
inner: self.inner.reborrow(),
elem_type: self.elem_type,
}
}
}
| 23.218978 | 96 | 0.495599 |
28f86d1676b9398814fd722a63eab9bdd5e18ad2 | 2,523 | use age::{x25519, Decryptor, Encryptor};
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput};
use criterion_cycles_per_byte::CyclesPerByte;
#[cfg(unix)]
use pprof::criterion::{Output, PProfProfiler};
use std::io::{self, Read, Write};
use std::iter;
const KB: usize = 1024;
fn bench(c: &mut Criterion<CyclesPerByte>) {
let identity = x25519::Identity::generate();
let recipient = identity.to_public();
let mut group = c.benchmark_group("age");
// Prepare buffers to use in the benchmarks.
let pt_buf = vec![7u8; 1024 * KB];
let mut ct_buf = vec![];
let mut out_buf = vec![0u8; 1024 * KB];
for &size in &[
KB,
4 * KB,
16 * KB,
64 * KB,
128 * KB,
256 * KB,
500 * KB,
1024 * KB,
] {
group.throughput(Throughput::Bytes(size as u64));
group.bench_function(BenchmarkId::new("encrypt", size), |b| {
b.iter(|| {
let mut output = Encryptor::with_recipients(vec![Box::new(recipient.clone())])
.wrap_output(io::sink())
.unwrap();
output.write_all(&pt_buf[..size]).unwrap();
output.finish().unwrap();
})
});
group.bench_function(BenchmarkId::new("decrypt", size), |b| {
let mut output = Encryptor::with_recipients(vec![Box::new(recipient.clone())])
.wrap_output(&mut ct_buf)
.unwrap();
output.write_all(&pt_buf[..size]).unwrap();
output.finish().unwrap();
b.iter(|| {
let decryptor = match Decryptor::new(&ct_buf[..]).unwrap() {
Decryptor::Recipients(decryptor) => decryptor,
_ => panic!(),
};
let mut input = decryptor
.decrypt(iter::once(&identity as &dyn age::Identity))
.unwrap();
input.read_exact(&mut out_buf[..size]).unwrap();
});
ct_buf.clear();
});
}
group.finish();
}
#[cfg(unix)]
criterion_group!(
name = benches;
config = Criterion::default()
.with_measurement(CyclesPerByte)
.with_profiler(PProfProfiler::new(100, Output::Flamegraph(None)));
targets = bench
);
#[cfg(not(unix))]
criterion_group!(
name = benches;
config = Criterion::default()
.with_measurement(CyclesPerByte);
targets = bench
);
criterion_main!(benches);
| 29.337209 | 94 | 0.544193 |
ac200ac6795677554278cb03fe58b7241b7e5085 | 600 | fn is_prime(value: u32) -> bool {
if value == 2 || value == 3 {
return true;
}
if value % 2 == 0 || value % 3 == 0 {
return false;
}
let mut divisor = 6;
while divisor * divisor - 2 * divisor < value {
if value % (divisor - 1) == 0 || value % (divisor + 1) == 0 {
return false;
}
divisor += 6;
}
true
}
pub fn nth(n: u32) -> u32 {
let mut nth = 0;
let mut nth_prime = 2;
while nth < n {
nth_prime += 1;
if is_prime(nth_prime) {
nth += 1;
}
}
nth_prime
}
| 17.647059 | 69 | 0.44 |
1dc8c6116d59f7f508c539eda8da7f99f7cbbb5f | 791 | #![no_std]
use core::sync::atomic::{AtomicUsize, Ordering};
use defmt_rtt as _; // global logger
use stm32f1xx_hal as _; // memory layout
use panic_probe as _;
// same panicking *behavior* as `panic-probe` but doesn't print a panic message
// this prevents the panic message being printed *twice* when `defmt::panic` is invoked
#[defmt::panic_handler]
fn panic() -> ! {
cortex_m::asm::udf()
}
static COUNT: AtomicUsize = AtomicUsize::new(0);
defmt::timestamp!("{=usize}", {
// NOTE(no-CAS) `timestamps` runs with interrupts disabled
let n = COUNT.load(Ordering::Relaxed);
COUNT.store(n + 1, Ordering::Relaxed);
n
});
/// Terminates the application and makes `probe-run` exit with exit-code = 0
pub fn exit() -> ! {
loop {
cortex_m::asm::bkpt();
}
}
| 25.516129 | 87 | 0.663717 |
d5f5b809d814c2377c62bd2ab019f0f608c98914 | 1,057 | use proc_macro2::Span;
use syn::parse::{Error, Parse, ParseStream, Result};
use syn::{Attribute, ItemImpl, ItemTrait, Token};
pub struct Nothing;
impl Parse for Nothing {
fn parse(_input: ParseStream) -> Result<Self> {
Ok(Nothing)
}
}
pub enum Item {
Trait(ItemTrait),
Impl(ItemImpl),
}
impl Parse for Item {
fn parse(input: ParseStream) -> Result<Self> {
let attrs = input.call(Attribute::parse_outer)?;
let lookahead = input.lookahead1();
if lookahead.peek(Token![pub]) || lookahead.peek(Token![trait]) {
let mut item: ItemTrait = input.parse()?;
item.attrs = attrs;
Ok(Item::Trait(item))
} else if lookahead.peek(Token![impl]) {
let mut item: ItemImpl = input.parse()?;
if item.trait_.is_none() {
return Err(Error::new(Span::call_site(), "expected a trait impl"));
}
item.attrs = attrs;
Ok(Item::Impl(item))
} else {
Err(lookahead.error())
}
}
}
| 27.815789 | 83 | 0.564806 |
e904e8b5447a490876ac9261c7a7e3ef4826de37 | 693 | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags: -Z parse-only
struct Foo {
x: isize,
y: isize,
}
fn main() {
let f = Foo {
x: 1,
y: 2,
};
f.x::<isize>;
//~^ ERROR field expressions may not have generic arguments
}
| 26.653846 | 68 | 0.666667 |
0ef1f358f1c3c68b46abcd040b447bed7bcc64cb | 1,243 | use std::sync::{
LockResult,
MutexGuard,
};
use host_lib::{
Assistant,
test_stand::NotConfiguredError,
};
use crate::target::Target;
/// An instance of the test stand
///
/// Used to access all resources that a test case requires.
pub struct TestStand {
_guard: LockResult<MutexGuard<'static, ()>>,
pub target: Target,
pub assistant: Assistant,
}
impl TestStand {
/// Initializes the test stand
///
/// Reads the `test-stand.toml` configuration file and initializes test
/// stand resources, as configured in there.
pub fn new() -> Result<Self, TestStandInitError> {
let test_stand = host_lib::TestStand::new()
.map_err(|err| TestStandInitError::Inner(err))?;
Ok(
Self {
_guard: test_stand.guard,
target: Target::new(test_stand.target?),
assistant: test_stand.assistant?,
}
)
}
}
#[derive(Debug)]
pub enum TestStandInitError {
Inner(host_lib::test_stand::TestStandInitError),
NotConfigured(NotConfiguredError),
}
impl From<NotConfiguredError> for TestStandInitError {
fn from(err: NotConfiguredError) -> Self {
Self::NotConfigured(err)
}
}
| 22.6 | 75 | 0.623492 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.