hexsha
stringlengths 40
40
| size
int64 4
1.05M
| content
stringlengths 4
1.05M
| avg_line_length
float64 1.33
100
| max_line_length
int64 1
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
03122ed229316df816b6bc7eef0126b4748bab30 | 1,275 | use std::collections::HashMap;
use thruster_core::context::Context;
#[cfg(not(feature = "thruster_async_await"))]
use thruster_core::middleware::{MiddlewareReturnValue};
#[cfg(feature = "thruster_async_await")]
use thruster_core::middleware::{MiddlewareReturnValue};
pub trait HasQueryParams {
fn set_query_params(&mut self, query_params: HashMap<String, String>);
fn route(&self) -> &str;
}
pub fn query_params<T: 'static + Context + HasQueryParams + Send>(mut context: T, next: impl Fn(T) -> MiddlewareReturnValue<T> + Send + Sync) -> MiddlewareReturnValue<T> {
let mut query_param_hash = HashMap::new();
{
let route: &str = &context.route();
let mut iter = route.split('?');
// Get rid of first bit (the non-query string part)
let _ = iter.next().unwrap();
if let Some(query_string) = iter.next() {
for query_piece in query_string.split('&') {
let mut query_iterator = query_piece.split('=');
let key = query_iterator.next().unwrap().to_owned();
match query_iterator.next() {
Some(val) => query_param_hash.insert(key, val.to_owned()),
None => query_param_hash.insert(key, "true".to_owned())
};
}
}
}
context.set_query_params(query_param_hash);
next(context)
}
| 31.097561 | 172 | 0.665882 |
76775577143117e989d857fef11121476b60c29d | 3,313 | use std::io::Read;
use serde::de;
use xml::reader::XmlEvent;
use de::ChildDeserializer;
use error::{Error, Result};
pub struct SeqAccess<'a, R: 'a + Read> {
de: ChildDeserializer<'a, R>,
max_size: Option<usize>,
seq_type: SeqType,
}
pub enum SeqType {
/// Sequence is of elements with the same name.
ByElementName { expected_name: String },
/// Sequence is of all elements/text at current depth.
AllMembers,
}
impl<'a, R: 'a + Read> SeqAccess<'a, R> {
pub fn new(mut de: ChildDeserializer<'a, R>, max_size: Option<usize>) -> Self {
let seq_type = if de.unset_map_value() {
debug_expect!(de.peek(), Ok(&XmlEvent::StartElement { ref name, .. }) => {
SeqType::ByElementName { expected_name: name.local_name.clone() }
})
} else {
SeqType::AllMembers
};
SeqAccess {
de,
max_size,
seq_type,
}
}
}
impl<'de, 'a, R: 'a + Read> de::SeqAccess<'de> for SeqAccess<'a, R> {
type Error = Error;
fn next_element_seed<T: de::DeserializeSeed<'de>>(
&mut self,
seed: T,
) -> Result<Option<T::Value>> {
match self.max_size.as_mut() {
Some(&mut 0) => {
return Ok(None);
},
Some(max_size) => {
*max_size -= 1;
},
None => {},
}
match &self.seq_type {
SeqType::ByElementName { expected_name } => {
let mut local_depth = 0;
loop {
let next_element = self.de.peek()?;
match next_element {
XmlEvent::StartElement { name, .. }
if &name.local_name == expected_name && local_depth == 0 =>
{
self.de.set_map_value();
return seed.deserialize(&mut self.de).map(Some);
}
XmlEvent::StartElement { .. } => {
self.de.buffered_reader.skip();
local_depth += 1;
},
XmlEvent::EndElement { .. } => {
if local_depth == 0 {
return Ok(None);
} else {
local_depth -= 1;
self.de.buffered_reader.skip();
}
},
XmlEvent::EndDocument => {
return Ok(None);
},
_ => {
self.de.buffered_reader.skip();
},
}
}
},
SeqType::AllMembers => {
let next_element = self.de.peek()?;
match next_element {
XmlEvent::EndElement { .. } | XmlEvent::EndDocument => return Ok(None),
_ => {
return seed.deserialize(&mut self.de).map(Some);
},
}
},
}
}
fn size_hint(&self) -> Option<usize> {
self.max_size
}
}
| 30.675926 | 91 | 0.4099 |
1e772000579feb50a0ff84d8a35012a3a670bbbf | 2,499 | #[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::BUF2_DATA {
#[doc = r" Modifies the contents of the register"]
#[inline]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = r" Value of the field"]
pub struct DATAR {
bits: u16,
}
impl DATAR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u16 {
self.bits
}
}
#[doc = r" Proxy"]
pub struct _DATAW<'a> {
w: &'a mut W,
}
impl<'a> _DATAW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u16) -> &'a mut W {
const MASK: u16 = 0xffff;
const OFFSET: u8 = 0;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bits 0:15 - Scan result buffer"]
#[inline]
pub fn data(&self) -> DATAR {
let bits = {
const MASK: u16 = 0xffff;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) as u16
};
DATAR { bits }
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bits 0:15 - Scan result buffer"]
#[inline]
pub fn data(&mut self) -> _DATAW {
_DATAW { w: self }
}
}
| 23.575472 | 59 | 0.493798 |
ace74852f96c63f1cdcff2aa8414d1d7697f727d | 3,483 | use crate::{
chain_spec,
cli::{Cli, Subcommand},
service,
};
use node_fluence_runtime::Block;
use sc_cli::{ChainSpec, Role, RuntimeVersion, SubstrateCli};
use sc_service::PartialComponents;
impl SubstrateCli for Cli {
fn impl_name() -> String {
"Substrate Node".into()
}
fn impl_version() -> String {
env!("SUBSTRATE_CLI_IMPL_VERSION").into()
}
fn description() -> String {
env!("CARGO_PKG_DESCRIPTION").into()
}
fn author() -> String {
env!("CARGO_PKG_AUTHORS").into()
}
fn support_url() -> String {
"support.anonymous.an".into()
}
fn copyright_start_year() -> i32 {
2017
}
fn load_spec(&self, id: &str) -> Result<Box<dyn sc_service::ChainSpec>, String> {
Ok(match id {
"dev" => Box::new(chain_spec::development_config()?),
"" | "local" => Box::new(chain_spec::local_testnet_config()?),
path =>
Box::new(chain_spec::ChainSpec::from_json_file(std::path::PathBuf::from(path))?),
})
}
fn native_runtime_version(_: &Box<dyn ChainSpec>) -> &'static RuntimeVersion {
&node_fluence_runtime::VERSION
}
}
/// Parse and run command line arguments
pub fn run() -> sc_cli::Result<()> {
let cli = Cli::from_args();
match &cli.subcommand {
Some(Subcommand::Key(cmd)) => cmd.run(&cli),
Some(Subcommand::BuildSpec(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.sync_run(|config| cmd.run(config.chain_spec, config.network))
},
Some(Subcommand::CheckBlock(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.async_run(|config| {
let PartialComponents { client, task_manager, import_queue, .. } =
service::new_partial(&config)?;
Ok((cmd.run(client, import_queue), task_manager))
})
},
Some(Subcommand::ExportBlocks(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.async_run(|config| {
let PartialComponents { client, task_manager, .. } = service::new_partial(&config)?;
Ok((cmd.run(client, config.database), task_manager))
})
},
Some(Subcommand::ExportState(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.async_run(|config| {
let PartialComponents { client, task_manager, .. } = service::new_partial(&config)?;
Ok((cmd.run(client, config.chain_spec), task_manager))
})
},
Some(Subcommand::ImportBlocks(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.async_run(|config| {
let PartialComponents { client, task_manager, import_queue, .. } =
service::new_partial(&config)?;
Ok((cmd.run(client, import_queue), task_manager))
})
},
Some(Subcommand::PurgeChain(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.sync_run(|config| cmd.run(config.database))
},
Some(Subcommand::Revert(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.async_run(|config| {
let PartialComponents { client, task_manager, backend, .. } =
service::new_partial(&config)?;
Ok((cmd.run(client, backend), task_manager))
})
},
Some(Subcommand::Benchmark(cmd)) =>
if cfg!(feature = "runtime-benchmarks") {
let runner = cli.create_runner(cmd)?;
runner.sync_run(|config| cmd.run::<Block, service::ExecutorDispatch>(config))
} else {
Err("Benchmarking wasn't enabled when building the node. You can enable it with \
`--features runtime-benchmarks`."
.into())
},
None => {
let runner = cli.create_runner(&cli.run)?;
runner.run_node_until_exit(|config| async move {
service::new_full(config).map_err(sc_cli::Error::Service)
})
},
}
}
| 29.268908 | 88 | 0.660637 |
215a2921315213ad680a802ea98155c5ab34766f | 1,810 | // Semantically, we do not allow e.g., `static X: u8 = 0;` as an associated item.
#![feature(specialization)]
fn main() {}
struct S;
impl S {
static IA: u8 = 0;
//~^ ERROR associated `static` items are not allowed
static IB: u8;
//~^ ERROR associated `static` items are not allowed
//~| ERROR associated constant in `impl` without body
default static IC: u8 = 0;
//~^ ERROR associated `static` items are not allowed
//~| ERROR a static item cannot be `default`
pub(crate) default static ID: u8;
//~^ ERROR associated `static` items are not allowed
//~| ERROR associated constant in `impl` without body
//~| ERROR a static item cannot be `default`
}
trait T {
static TA: u8 = 0;
//~^ ERROR associated `static` items are not allowed
static TB: u8;
//~^ ERROR associated `static` items are not allowed
default static TC: u8 = 0;
//~^ ERROR associated `static` items are not allowed
//~| ERROR a static item cannot be `default`
pub(crate) default static TD: u8;
//~^ ERROR associated `static` items are not allowed
//~| ERROR unnecessary visibility qualifier
//~| ERROR a static item cannot be `default`
}
impl T for S {
static TA: u8 = 0;
//~^ ERROR associated `static` items are not allowed
static TB: u8;
//~^ ERROR associated `static` items are not allowed
//~| ERROR associated constant in `impl` without body
default static TC: u8 = 0;
//~^ ERROR associated `static` items are not allowed
//~| ERROR a static item cannot be `default`
pub default static TD: u8;
//~^ ERROR associated `static` items are not allowed
//~| ERROR associated constant in `impl` without body
//~| ERROR unnecessary visibility qualifier
//~| ERROR a static item cannot be `default`
}
| 34.807692 | 81 | 0.651381 |
0ac2599ef5507618b5b86d45fee7869167049871 | 23,830 | // Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! This contains a build plan that is created during the Cargo build routine
//! and stored afterwards, which can be later queried, given a list of dirty
//! files, to retrieve a queue of compiler calls to be invoked (including
//! appropriate arguments and env variables).
//! The underlying structure is a dependency graph between simplified units
//! (package id and crate target kind), as opposed to Cargo units (package with
//! a target info, including crate target kind, profile and host/target kind).
//! This will be used for a quick check recompilation and does not aim to
//! reimplement all the intricacies of Cargo.
//! The unit dependency graph in Cargo also distinguishes between compiling the
//! build script and running it and collecting the build script output to modify
//! the subsequent compilations etc. Since build script executions (not building)
//! are not exposed via `Executor` trait in Cargo, we simply coalesce every unit
//! with a same package and crate target kind (e.g. both building and running
//! build scripts).
use std::collections::{HashMap, HashSet};
use std::fmt;
use std::path::{Path, PathBuf};
use std::sync::Mutex;
use cargo::core::compiler::{CompileMode, Context, Kind, Unit};
use cargo::core::profiles::Profile;
use cargo::core::{PackageId, Target, TargetKind};
use cargo::util::ProcessBuilder;
use cargo_metadata;
use log::{error, trace};
use url::Url;
use crate::build::PackageArg;
use crate::build::plan::{BuildKey, BuildGraph, JobQueue, WorkStatus};
use crate::build::rustc::src_path;
use crate::lsp_data::parse_file_path;
/// Main key type by which `Unit`s will be distinguished in the build plan.
/// In Target we're mostly interested in TargetKind (Lib, Bin, ...) and name
/// (e.g. we can have 2 binary targets with different names).
crate type UnitKey = (PackageId, Target, CompileMode);
/// Holds the information how exactly the build will be performed for a given
/// workspace with given, specified features.
#[derive(Debug, Default)]
crate struct CargoPlan {
/// Stores a full Cargo `Unit` data for a first processed unit with a given key.
crate units: HashMap<UnitKey, OwnedUnit>,
/// Main dependency graph between the simplified units.
crate dep_graph: HashMap<UnitKey, HashSet<UnitKey>>,
/// Reverse dependency graph that's used to construct a dirty compiler call queue.
crate rev_dep_graph: HashMap<UnitKey, HashSet<UnitKey>>,
/// Cached compiler calls used when creating a compiler call queue.
crate compiler_jobs: HashMap<UnitKey, ProcessBuilder>,
/// Calculated input files that unit depend on.
crate input_files: HashMap<UnitKey, Vec<PathBuf>>,
crate file_key_mapping: HashMap<PathBuf, HashSet<UnitKey>>,
// An object for finding the package which a file belongs to and this inferring
// a package argument.
package_map: Option<PackageMap>,
/// Packages (names) for which this build plan was prepared.
/// Used to detect if the plan can reused when building certain packages.
built_packages: HashSet<String>,
}
impl CargoPlan {
crate fn with_manifest(manifest_path: &Path) -> CargoPlan {
CargoPlan {
package_map: Some(PackageMap::new(manifest_path)),
..Default::default()
}
}
crate fn with_packages(manifest_path: &Path, pkgs: HashSet<String>) -> CargoPlan {
CargoPlan {
built_packages: pkgs,
..Self::with_manifest(manifest_path)
}
}
/// Returns whether a build plan has cached compiler invocations and dep
/// graph so it's at all able to return a job queue via `prepare_work`.
crate fn is_ready(&self) -> bool {
!self.compiler_jobs.is_empty()
}
/// Cache a given compiler invocation in `ProcessBuilder` for a given
/// `PackageId` and `TargetKind` in `Target`, to be used when processing
/// cached build plan.
crate fn cache_compiler_job(
&mut self,
id: PackageId,
target: &Target,
mode: CompileMode,
cmd: &ProcessBuilder,
) {
let unit_key = (id, target.clone(), mode);
self.compiler_jobs.insert(unit_key, cmd.clone());
}
crate fn cache_input_files(
&mut self,
id: PackageId,
target: &Target,
mode: CompileMode,
input_files: Vec<PathBuf>,
cwd: Option<&Path>,
) {
let input_files: Vec<_> = input_files
.iter()
.filter_map(|file| src_path(cwd, file))
.filter_map(|file| match std::fs::canonicalize(&file) {
Ok(file) => Some(file),
Err(err) => {
error!("Couldn't canonicalize `{}`: {}", file.display(), err);
None
}
})
.collect();
let unit_key = (id.clone(), target.clone(), mode);
trace!("Caching these files: {:#?} for {:?} key", &input_files, &unit_key);
// Create reverse file -> unit mapping (to be used for dirty unit calculation)
for file in &input_files {
self.file_key_mapping
.entry(file.to_path_buf())
.or_default()
.insert(unit_key.clone());
}
self.input_files.insert(unit_key, input_files);
}
/// Emplace a given `Unit`, along with its `Unit` dependencies (recursively)
/// into the dependency graph as long as the passed `Unit` isn't filtered
/// out by the `filter` closure.
crate fn emplace_dep_with_filter<Filter>(
&mut self,
unit: &Unit<'_>,
cx: &Context<'_, '_>,
filter: &Filter,
)
where
Filter: Fn(&Unit<'_>) -> bool,
{
if !filter(unit) {
return;
}
let key = key_from_unit(unit);
self.units.entry(key.clone()).or_insert_with(|| (*unit).into());
// Process only those units, which are not yet in the dep graph.
if self.dep_graph.get(&key).is_some() {
return;
}
// Keep all the additional Unit information for a given unit (It's
// worth remembering, that the units are only discriminated by a
// pair of (PackageId, TargetKind), so only first occurrence will be saved.
self.units.insert(key.clone(), (*unit).into());
// Fetch and insert relevant unit dependencies to the forward dep graph.
let units = cx.dep_targets(unit);
let dep_keys: HashSet<UnitKey> = units.iter()
// We might not want certain deps to be added transitively (e.g.
// when creating only a sub-dep-graph, limiting the scope).
.filter(|unit| filter(unit))
.map(key_from_unit)
// Units can depend on others with different Targets or Profiles
// (e.g. different `run_custom_build`) despite having the same UnitKey.
// We coalesce them here while creating the UnitKey dep graph.
.filter(|dep| key != *dep)
.collect();
self.dep_graph.insert(key.clone(), dep_keys.clone());
// We also need to track reverse dependencies here, as it's needed
// to quickly construct a work sub-graph from a set of dirty units.
self.rev_dep_graph
.entry(key.clone())
.or_insert_with(HashSet::new);
for unit in dep_keys {
let revs = self.rev_dep_graph.entry(unit).or_insert_with(HashSet::new);
revs.insert(key.clone());
}
// Recursively process other remaining forward dependencies.
for unit in units {
self.emplace_dep_with_filter(&unit, cx, filter);
}
}
/// TODO: Improve detecting dirty crate targets for a set of dirty file paths.
/// This uses a lousy heuristic of checking path prefix for a given crate
/// target to determine whether a given unit (crate target) is dirty. This
/// can easily backfire, e.g. when build script is under src/. Any change
/// to a file under src/ would imply the build script is always dirty, so we
/// never do work and always offload to Cargo in such case.
/// Because of that, build scripts are checked separately and only other
/// crate targets are checked with path prefixes.
fn fetch_dirty_units<T: AsRef<Path>>(&self, files: &[T]) -> HashSet<UnitKey> {
let mut result = HashSet::new();
let build_scripts: HashMap<&Path, UnitKey> = self
.units
.iter()
.filter(|&(&(_, ref target, _), _)| {
*target.kind() == TargetKind::CustomBuild && target.src_path().is_path()
})
.map(|(key, unit)| (unit.target.src_path().path(), key.clone()))
.collect();
let other_targets: HashMap<UnitKey, &Path> = self
.units
.iter()
.filter(|&(&(_, ref target, _), _)| *target.kind() != TargetKind::CustomBuild)
.map(|(key, unit)| {
(
key.clone(),
unit.target
.src_path()
.path()
.parent()
.expect("no parent for src_path"),
)
}).collect();
for modified in files.iter().map(|x| x.as_ref()) {
if let Some(unit) = build_scripts.get(modified) {
result.insert(unit.clone());
} else {
// Not a build script, so we associate a dirty file with a
// package by finding longest (most specified) path prefix.
let matching_prefix_components = |a: &Path, b: &Path| -> usize {
assert!(a.is_absolute() && b.is_absolute());
a.components().zip(b.components())
.skip(1) // Skip RootDir
.take_while(|&(x, y)| x == y)
.count()
};
// Since a package can correspond to many units (e.g. compiled
// as a regular binary or a test harness for unit tests), we
// collect every unit having the longest path prefix.
let max_matching_prefix = other_targets
.values()
.map(|src_dir| matching_prefix_components(modified, src_dir))
.max();
match max_matching_prefix {
Some(0) => error!(
"Modified file {} didn't correspond to any buildable unit!",
modified.display()
),
Some(max) => {
let dirty_units = other_targets
.iter()
.filter(|(_, dir)| max == matching_prefix_components(modified, dir))
.map(|(unit, _)| unit);
result.extend(dirty_units.cloned());
}
None => {} // Possible that only build scripts were modified
}
}
}
result
}
/// For a given set of select dirty units, returns a set of all the
/// dependencies that has to be rebuilt transitively.
fn transitive_dirty_units(&self, dirties: &HashSet<UnitKey>) -> HashSet<UnitKey> {
let mut transitive = dirties.clone();
// Walk through a rev dep graph using a stack of nodes to collect
// transitively every dirty node
let mut to_process: Vec<_> = dirties.iter().cloned().collect();
while let Some(top) = to_process.pop() {
if transitive.get(&top).is_some() {
continue;
}
transitive.insert(top.clone());
// Process every dirty rev dep of the processed node
let dirty_rev_deps = self
.rev_dep_graph
.get(&top)
.expect("missing key in rev_dep_graph")
.iter()
.filter(|dep| dirties.contains(dep));
for rev_dep in dirty_rev_deps {
to_process.push(rev_dep.clone());
}
}
transitive
}
/// Creates a dirty reverse dependency graph using a set of given dirty units.
fn dirty_rev_dep_graph(
&self,
dirties: &HashSet<UnitKey>,
) -> HashMap<UnitKey, HashSet<UnitKey>> {
let dirties = self.transitive_dirty_units(dirties);
trace!("transitive_dirty_units: {:?}", dirties);
self.rev_dep_graph.iter()
// Remove nodes that are not dirty
.filter(|&(unit, _)| dirties.contains(unit))
// Retain only dirty dependencies of the ones that are dirty
.map(|(k, deps)| (k.clone(), deps.iter().cloned().filter(|d| dirties.contains(d)).collect()))
.collect()
}
/// Returns a topological ordering of a connected DAG of rev deps. The
/// output is a stack of units that can be linearly rebuilt, starting from
/// the last element.
fn topological_sort(&self, dirties: &HashMap<UnitKey, HashSet<UnitKey>>) -> Vec<UnitKey> {
let mut visited = HashSet::new();
let mut output = vec![];
for k in dirties.keys() {
if !visited.contains(k) {
dfs(k, &self.rev_dep_graph, &mut visited, &mut output);
}
}
return output;
// Process graph depth-first recursively. A node needs to be pushed
// after processing every other before to ensure topological ordering.
fn dfs(
unit: &UnitKey,
graph: &HashMap<UnitKey, HashSet<UnitKey>>,
visited: &mut HashSet<UnitKey>,
output: &mut Vec<UnitKey>,
) {
if visited.contains(unit) {
return;
} else {
visited.insert(unit.clone());
for neighbour in graph.get(unit).into_iter().flat_map(|nodes| nodes) {
dfs(neighbour, graph, visited, output);
}
output.push(unit.clone());
}
}
}
crate fn prepare_work<T: AsRef<Path> + fmt::Debug>(
&self,
modified: &[T],
) -> WorkStatus {
if !self.is_ready() || self.package_map.is_none() {
return WorkStatus::NeedsCargo(PackageArg::Default);
}
let dirty_packages = self
.package_map
.as_ref()
.unwrap()
.compute_dirty_packages(modified);
let needs_more_packages = dirty_packages
.difference(&self.built_packages)
.next()
.is_some();
let needed_packages = self
.built_packages
.union(&dirty_packages)
.cloned()
.collect();
// We modified a file from a packages, that are not included in the
// cached build plan - run Cargo to recreate the build plan including them
if needs_more_packages {
return WorkStatus::NeedsCargo(PackageArg::Packages(needed_packages));
}
let dirties = self.fetch_dirty_units(modified);
trace!(
"fetch_dirty_units: for files {:?}, these units are dirty: {:?}",
modified,
dirties,
);
if dirties
.iter()
.any(|&(_, ref target, _)| *target.kind() == TargetKind::CustomBuild)
{
WorkStatus::NeedsCargo(PackageArg::Packages(needed_packages))
} else {
let graph = self.dirty_rev_dep_graph(&dirties);
trace!("Constructed dirty rev dep graph: {:?}", graph);
if graph.is_empty() {
return WorkStatus::NeedsCargo(PackageArg::Default);
}
let queue = self.topological_sort(&graph);
trace!(
"Topologically sorted dirty graph: {:?} {}",
queue,
self.is_ready()
);
let jobs: Option<Vec<_>> = queue
.iter()
.map(|x| self.compiler_jobs.get(x).cloned())
.collect();
// It is possible that we want a job which is not in our cache (compiler_jobs),
// for example we might be building a workspace with an error in a crate and later
// crates within the crate that depend on the error-ing one have never been built.
// In that case we need to build from scratch so that everything is in our cache, or
// we cope with the error. In the error case, jobs will be None.
match jobs {
None => WorkStatus::NeedsCargo(PackageArg::Default),
Some(jobs) => {
assert!(!jobs.is_empty());
WorkStatus::Execute(JobQueue::with_commands(jobs))
}
}
}
}
}
/// Maps paths to packages.
///
/// The point of the PackageMap is detect if additional packages need to be
/// included in the cached build plan. The cache can represent only a subset of
/// the entire workspace, hence why we need to detect if a package was modified
/// that's outside the cached build plan - if so, we need to recreate it,
/// including the new package.
#[derive(Debug)]
struct PackageMap {
// A map from a manifest directory to the package name.
package_paths: HashMap<PathBuf, String>,
// A map from a file's path, to the package it belongs to.
map_cache: Mutex<HashMap<PathBuf, String>>,
}
impl PackageMap {
fn new(manifest_path: &Path) -> PackageMap {
PackageMap {
package_paths: Self::discover_package_paths(manifest_path),
map_cache: Mutex::new(HashMap::new()),
}
}
// Find each package in the workspace and record the root directory and package name.
fn discover_package_paths(manifest_path: &Path) -> HashMap<PathBuf, String> {
trace!("read metadata {:?}", manifest_path);
let metadata = match cargo_metadata::metadata(Some(manifest_path)) {
Ok(metadata) => metadata,
Err(_) => return HashMap::new(),
};
metadata
.workspace_members
.into_iter()
.map(|wm| {
assert!(wm.url().starts_with("path+"));
let url = Url::parse(&wm.url()[5..]).expect("Bad URL");
let path = parse_file_path(&url).expect("URL not a path");
(path, wm.name().into())
}).collect()
}
/// Given modified set of files, returns a set of corresponding dirty packages.
fn compute_dirty_packages<T: AsRef<Path> + fmt::Debug>(
&self,
modified_files: &[T],
) -> HashSet<String> {
modified_files
.iter()
.filter_map(|p| self.map(p.as_ref()))
.collect()
}
// Map a file to the package which it belongs to.
// We do this by walking up the directory tree from `path` until we get to
// one of the recorded package root directories.
fn map(&self, path: &Path) -> Option<String> {
if self.package_paths.is_empty() {
return None;
}
let mut map_cache = self.map_cache.lock().unwrap();
if map_cache.contains_key(path) {
return Some(map_cache[path].clone());
}
let result = Self::map_uncached(path, &self.package_paths)?;
map_cache.insert(path.to_owned(), result.clone());
Some(result)
}
fn map_uncached(path: &Path, package_paths: &HashMap<PathBuf, String>) -> Option<String> {
if package_paths.is_empty() {
return None;
}
match package_paths.get(path) {
Some(package) => Some(package.clone()),
None => Self::map_uncached(path.parent()?, package_paths),
}
}
}
fn key_from_unit(unit: &Unit<'_>) -> UnitKey {
(
unit.pkg.package_id().clone(),
unit.target.clone(),
unit.mode,
)
}
#[derive(Hash, PartialEq, Eq, Debug, Clone)]
/// An owned version of `cargo::core::Unit`.
crate struct OwnedUnit {
crate id: PackageId,
crate target: Target,
crate profile: Profile,
crate kind: Kind,
crate mode: CompileMode,
}
impl<'a> From<Unit<'a>> for OwnedUnit {
fn from(unit: Unit<'a>) -> OwnedUnit {
OwnedUnit {
id: unit.pkg.package_id().to_owned(),
target: unit.target.clone(),
profile: unit.profile,
kind: unit.kind,
mode: unit.mode,
}
}
}
impl BuildKey for OwnedUnit {
type Key = UnitKey;
fn key(&self) -> UnitKey {
(self.id.clone(), self.target.clone(), self.mode)
}
}
impl BuildGraph for CargoPlan {
type Unit = OwnedUnit;
fn units(&self) -> Vec<&Self::Unit> {
self.units.values().collect()
}
fn get(&self, key: <Self::Unit as BuildKey>::Key) -> Option<&Self::Unit> {
self.units.get(&key)
}
fn get_mut(&mut self, key: <Self::Unit as BuildKey>::Key) -> Option<&mut Self::Unit> {
self.units.get_mut(&key)
}
fn deps(&self, key: <Self::Unit as BuildKey>::Key) -> Vec<&Self::Unit> {
self.dep_graph
.get(&key)
.map(|d| d.iter().map(|d| &self.units[d]).collect())
.unwrap_or_default()
}
fn add<T: Into<Self::Unit>>(&mut self, unit: T, deps: Vec<T>) {
let unit = unit.into();
// Units can depend on others with different Targets or Profiles
// (e.g. different `run_custom_build`) despite having the same UnitKey.
// We coalesce them here while creating the UnitKey dep graph.
// TODO: Are we sure? Can we figure that out?
let deps = deps.into_iter().map(|d| d.into()).filter(|dep| unit.key() != dep.key());
for dep in deps {
self.dep_graph.entry(unit.key()).or_insert_with(HashSet::new).insert(dep.key());
self.rev_dep_graph.entry(dep.key()).or_insert_with(HashSet::new).insert(unit.key());
self.units.entry(dep.key()).or_insert(dep);
}
// We expect these entries to be present for each unit in the graph
self.dep_graph.entry(unit.key()).or_insert_with(HashSet::new);
self.rev_dep_graph.entry(unit.key()).or_insert_with(HashSet::new);
self.units.entry(unit.key()).or_insert(unit);
}
fn dirties<T: AsRef<Path>>(&self, files: &[T]) -> Vec<&Self::Unit> {
self.fetch_dirty_units(files)
.iter()
.map(|key| self.units.get(key).expect("dirties"))
.collect()
}
fn dirties_transitive<T: AsRef<Path>>(&self, files: &[T]) -> Vec<&Self::Unit> {
let dirties = self.fetch_dirty_units(files);
self.transitive_dirty_units(&dirties)
.iter()
.map(|key| self.units.get(key).expect("dirties_transitive"))
.collect()
}
fn topological_sort(&self, units: Vec<&Self::Unit>) -> Vec<&Self::Unit> {
let keys = units.into_iter().map(|u| u.key()).collect();
let graph = self.dirty_rev_dep_graph(&keys);
CargoPlan::topological_sort(self, &graph)
.iter()
.map(|key| self.units.get(key).expect("topological_sort"))
.collect()
}
fn prepare_work<T: AsRef<Path> + std::fmt::Debug>(&self, files: &[T]) -> WorkStatus {
CargoPlan::prepare_work(self, files)
}
}
| 38.250401 | 105 | 0.579815 |
fca4038556db83cbe422f0f79b93be2d37762866 | 5,236 | use piston::input::Input;
use slog::Logger;
use specs;
use specs::{Read, ReadStorage, Write, WriteStorage};
use std::sync::mpsc;
use crate::pk::cell_dweller::ActiveCellDweller;
use crate::pk::input_adapter;
use crate::pk::net::{Destination, NetMarker, SendMessage, SendMessageQueue, Transport};
use crate::pk::types::*;
use super::{ShootGrenadeMessage, WeaponMessage};
use crate::client_state::ClientState;
use crate::fighter::Fighter;
use crate::message::Message;
pub struct ShootInputAdapter {
sender: mpsc::Sender<ShootEvent>,
}
impl ShootInputAdapter {
pub fn new(sender: mpsc::Sender<ShootEvent>) -> ShootInputAdapter {
ShootInputAdapter { sender: sender }
}
}
impl input_adapter::InputAdapter for ShootInputAdapter {
fn handle(&self, input_event: &Input) {
use piston::input::keyboard::Key;
use piston::input::{Button, ButtonState};
if let &Input::Button(button_args) = input_event {
if let Button::Keyboard(key) = button_args.button {
let is_down = match button_args.state {
ButtonState::Press => true,
ButtonState::Release => false,
};
match key {
Key::Space => self.sender.send(ShootEvent(is_down)).unwrap(),
_ => (),
}
}
}
}
}
pub struct ShootEvent(bool);
pub struct ShootSystem {
input_receiver: mpsc::Receiver<ShootEvent>,
log: Logger,
shoot: bool,
}
impl ShootSystem {
pub fn new(input_receiver: mpsc::Receiver<ShootEvent>, parent_log: &Logger) -> ShootSystem {
ShootSystem {
input_receiver: input_receiver,
log: parent_log.new(o!()),
shoot: false,
}
}
fn consume_input(&mut self) {
loop {
match self.input_receiver.try_recv() {
Ok(ShootEvent(b)) => self.shoot = b,
Err(_) => return,
}
}
}
}
impl<'a> specs::System<'a> for ShootSystem {
type SystemData = (
Read<'a, TimeDeltaResource>,
Read<'a, ActiveCellDweller>,
WriteStorage<'a, Fighter>,
Read<'a, ClientState>,
Write<'a, SendMessageQueue<Message>>,
ReadStorage<'a, NetMarker>,
);
fn run(&mut self, data: Self::SystemData) {
self.consume_input();
let (
dt,
active_cell_dweller_resource,
mut fighters,
client_state,
mut send_message_queue,
net_markers,
) = data;
// Find the active fighter, even if we're not currently trying to shoot;
// we might need to count down the time until we can next shoot.
// If there isn't one, then just silently move on.
let active_cell_dweller_entity = match active_cell_dweller_resource.maybe_entity {
Some(entity) => entity,
None => return,
};
if !fighters.get(active_cell_dweller_entity).is_some() {
// This entity hasn't been realised yet;
// can't do anything else with it this frame.
// TODO: isn't this was `is_alive` is supposed to achieve?
// And yet it doesn't seem to...
return;
}
// Assume it is a fighter, because those are the only cell dwellers
// you're allowed to control in this game.
let active_fighter = fighters
.get_mut(active_cell_dweller_entity)
.expect("Cell dweller should have had a fighter attached!");
// Count down until we're allowed to shoot next.
if active_fighter.seconds_until_next_shot > 0.0 {
active_fighter.seconds_until_next_shot =
(active_fighter.seconds_until_next_shot - dt.0).max(0.0);
}
let still_waiting_to_shoot = active_fighter.seconds_until_next_shot > 0.0;
if self.shoot && !still_waiting_to_shoot {
self.shoot = false;
let fired_by_player_id = client_state
.player_id
.expect("There should be a current player.");
let fired_by_cell_dweller_entity_id = net_markers
.get(active_cell_dweller_entity)
.expect("Active cell dweller should have global identity")
.id;
// Place the bullet in the same location as the player,
// relative to the same globe.
debug!(self.log, "Fire!");
// Ask the server/master to spawn a grenade.
// (TODO: really need to decide on termonology around server/master/client/peer/etc.)
send_message_queue.queue.push_back(SendMessage {
destination: Destination::Master,
game_message: Message::Weapon(WeaponMessage::ShootGrenade(ShootGrenadeMessage {
fired_by_player_id: fired_by_player_id,
fired_by_cell_dweller_entity_id: fired_by_cell_dweller_entity_id,
})),
transport: Transport::UDP,
});
// Reset time until we can shoot again.
active_fighter.seconds_until_next_shot = active_fighter.seconds_between_shots;
}
}
}
| 33.780645 | 97 | 0.593774 |
234c2865892cb555d1f322e2f401c9b987d778b2 | 264 | pub mod bfs;
pub mod bound;
pub mod dijsktra;
pub mod ford_fulkerson;
pub mod gcd;
pub mod graph;
pub mod kruskal;
pub mod maxmin;
pub mod mod_calc;
pub mod parser;
pub mod prime;
pub mod rmq;
pub mod seq;
pub mod text;
pub mod union_find;
pub mod warshall_floyd;
| 15.529412 | 23 | 0.757576 |
bb363fe0a7f23ea2bd474fd6835ed1f2a62f2e97 | 1,296 | struct Solution;
impl Solution {
fn num_special(mat: Vec<Vec<i32>>) -> i32 {
let n = mat.len();
let m = mat[0].len();
let mut rows = vec![0; n];
let mut cols = vec![0; m];
for i in 0..n {
for j in 0..m {
if mat[i][j] == 1 {
rows[i] += 1;
cols[j] += 1;
}
}
}
let mut res = 0;
for i in 0..n {
for j in 0..m {
if mat[i][j] == 1 && rows[i] == 1 && cols[j] == 1 {
res += 1;
}
}
}
res
}
}
#[test]
fn test() {
let mat = vec_vec_i32![[1, 0, 0], [0, 0, 1], [1, 0, 0]];
let res = 1;
assert_eq!(Solution::num_special(mat), res);
let mat = vec_vec_i32![[1, 0, 0], [0, 1, 0], [0, 0, 1]];
let res = 3;
assert_eq!(Solution::num_special(mat), res);
let mat = vec_vec_i32![[0, 0, 0, 1], [1, 0, 0, 0], [0, 1, 1, 0], [0, 0, 0, 0]];
let res = 2;
assert_eq!(Solution::num_special(mat), res);
let mat = vec_vec_i32![
[0, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 1]
];
let res = 3;
assert_eq!(Solution::num_special(mat), res);
}
| 25.92 | 83 | 0.381944 |
1cf30fd09b2bf4fd5810d1693294887c1d8d0793 | 28,664 | //! # Validator Set Update Circuit
//!
//! Prove the validator state transition function for the BLS 12-377 curve.
use crate::gadgets::{g2_to_bits, single_update::SingleUpdate, EpochBits, EpochData};
use bls_gadgets::{BlsVerifyGadget, FpUtils};
use algebra::{
bls12_377::{Bls12_377, G1Projective, G2Projective, Parameters as Bls12_377_Parameters},
bw6_761::Fr,
curves::bls12::Bls12Parameters,
PairingEngine, ProjectiveCurve,
};
use groth16::{Proof, VerifyingKey};
use r1cs_core::{ConstraintSynthesizer, ConstraintSystemRef, SynthesisError};
use r1cs_std::{
alloc::AllocationMode,
bls12_377::{Fq2Var, G1Var, G2Var, PairingVar},
bls12_377::{G1PreparedVar, G2PreparedVar},
fields::fp::FpVar,
pairing::PairingVar as _,
prelude::*,
Assignment,
};
use tracing::{debug, info, span, Level};
// Initialize BLS verification gadget
type BlsGadget = BlsVerifyGadget<Bls12_377, Fr, PairingVar>;
type FrVar = FpVar<Fr>;
type Bool = Boolean<<Bls12_377_Parameters as Bls12Parameters>::Fp>;
#[derive(Clone, Debug)]
/// Contains the initial epoch block, followed by a list of epoch block transitions. The
/// aggregated signature is calculated over all epoch blokc changes. Providing the hash helper
/// will not constrain the CRH->XOF calculation.
pub struct ValidatorSetUpdate<E: PairingEngine> {
pub initial_epoch: EpochData<E>,
/// The number of validators over all the epochs
pub num_validators: u32,
/// A list of all the updates for multiple epochs
pub epochs: Vec<SingleUpdate<E>>,
/// The aggregated signature of all the validators over all the epoch changes
pub aggregated_signature: Option<E::G1Projective>,
/// The optional hash to bits proof data. If provided, the circuit **will not**
/// constrain the inner CRH->XOF hashes in BW6_761 and instead it will be verified
/// via the helper's proof which is in BLS12-377.
pub hash_helper: Option<HashToBitsHelper<E>>,
}
#[derive(Clone, Debug)]
/// The proof and verifying key which will be used to verify the CRH->XOF conversion
pub struct HashToBitsHelper<E: PairingEngine> {
/// The Groth16 proof satisfying the CRH->XOF conversion
pub proof: Proof<E>,
/// The VK produced by the trusted setup
pub verifying_key: VerifyingKey<E>,
}
impl<E: PairingEngine> ValidatorSetUpdate<E> {
/// Initializes an empty validator set update. This is used when running the trusted setup.
#[tracing::instrument(target = "r1cs")]
pub fn empty(
num_validators: usize,
num_epochs: usize,
maximum_non_signers: usize,
vk: Option<VerifyingKey<E>>,
) -> Self {
let empty_update = SingleUpdate::empty(num_validators, maximum_non_signers);
let hash_helper = vk.map(|vk| HashToBitsHelper {
proof: Proof::<E>::default(),
verifying_key: vk,
});
ValidatorSetUpdate {
initial_epoch: EpochData::empty(num_validators, maximum_non_signers),
num_validators: num_validators as u32,
epochs: vec![empty_update; num_epochs],
aggregated_signature: None,
hash_helper,
}
}
}
impl ConstraintSynthesizer<Fr> for ValidatorSetUpdate<Bls12_377> {
/// Enforce that the signatures over the epochs have been calculated
/// correctly, and then compress the public inputs
#[tracing::instrument(target = "r1cs")]
fn generate_constraints(self, cs: ConstraintSystemRef<Fr>) -> Result<(), SynthesisError> {
let span = span!(Level::TRACE, "ValidatorSetUpdate");
let _enter = span.enter();
info!("generating constraints");
// Verify signatures
let epoch_bits = self.enforce(cs)?;
let cs = epoch_bits.first_epoch_bits.cs();
// Compress public inputs
epoch_bits.verify(self.hash_helper, cs)?;
info!("constraints generated");
Ok(())
}
}
impl ValidatorSetUpdate<Bls12_377> {
/// Verify in the constraint system the aggregate BLS
/// signature after constraining the epoch hashes and aggregate
/// public keys for each epoch
#[tracing::instrument(target = "r1cs")]
fn enforce(
&self,
cs: ConstraintSystemRef<<Bls12_377_Parameters as Bls12Parameters>::Fp>,
) -> Result<EpochBits, SynthesisError> {
let span = span!(Level::TRACE, "ValidatorSetUpdate_enforce");
let _enter = span.enter();
debug!("converting initial EpochData to_bits");
// Constrain the initial epoch and get its bits
let (
_,
_,
first_epoch_bits,
_,
first_epoch_index,
first_epoch_entropy,
_,
initial_maximum_non_signers,
initial_pubkey_vars,
) = self.initial_epoch.to_bits(cs)?;
// Constrain all intermediate epochs, and get the aggregate pubkey and epoch hash
// from each one, to be used for the batch verification
debug!("verifying intermediate epochs");
let (
last_epoch_bits,
crh_bits,
xof_bits,
prepared_aggregated_public_keys,
prepared_message_hashes,
) = self.verify_intermediate_epochs(
first_epoch_index,
first_epoch_entropy,
initial_pubkey_vars,
initial_maximum_non_signers,
)?;
// Verify the aggregate BLS signature
debug!("verifying bls signature");
self.verify_signature(
&prepared_aggregated_public_keys,
&prepared_message_hashes,
first_epoch_bits.cs(),
)?;
Ok(EpochBits {
first_epoch_bits,
last_epoch_bits,
crh_bits,
xof_bits,
})
}
/// Ensure that all epochs's bitmaps have been correctly computed
/// and generates the witness data necessary for the final BLS Sig
/// verification and witness compression
#[allow(clippy::type_complexity)]
#[tracing::instrument(target = "r1cs")]
fn verify_intermediate_epochs(
&self,
first_epoch_index: FrVar,
first_epoch_entropy: FrVar,
initial_pubkey_vars: Vec<G2Var>,
initial_max_non_signers: FrVar,
) -> Result<
(
Vec<Bool>,
Vec<Bool>,
Vec<Bool>,
Vec<G2PreparedVar>,
Vec<G1PreparedVar>,
),
SynthesisError,
> {
let span = span!(Level::TRACE, "verify_intermediate_epochs");
let _enter = span.enter();
let dummy_pk = G2Var::new_variable_omit_prime_order_check(
first_epoch_index.cs(),
|| Ok(G2Projective::prime_subgroup_generator()),
AllocationMode::Constant,
)?;
let dummy_message = G1Var::new_variable_omit_prime_order_check(
first_epoch_index.cs(),
|| Ok(G1Projective::prime_subgroup_generator()),
AllocationMode::Constant,
)?;
// Trivially satisfy entropy circuit logic if the first epoch does not
// contain entropy. Done to support earlier versions of Celo.
// Assumes all epochs past a single version will contain entropy
let entropy_bit = first_epoch_entropy.is_eq_zero()?.not();
let mut prepared_aggregated_public_keys = vec![];
let mut prepared_message_hashes = vec![];
let mut last_epoch_bits = vec![];
let mut previous_epoch_index = first_epoch_index;
let mut previous_pubkey_vars = initial_pubkey_vars;
let mut previous_max_non_signers = initial_max_non_signers;
let mut previous_epoch_entropy = first_epoch_entropy;
let mut all_crh_bits = vec![];
let mut all_xof_bits = vec![];
for (i, epoch) in self.epochs.iter().enumerate() {
let span = span!(Level::TRACE, "index", i);
let _enter = span.enter();
let constrained_epoch = epoch.constrain(
&previous_pubkey_vars,
&previous_epoch_index,
&previous_epoch_entropy,
&previous_max_non_signers,
&entropy_bit,
self.num_validators,
self.hash_helper.is_none(), // generate all constraints in BW6_761 if no helper was provided
)?;
// If zero, indicates the current epoch is a "dummy" value, and so
// some values shouldn't be updated in this loop
let index_bit = constrained_epoch.index.is_eq_zero()?.not();
// Update the randomness for the next iteration
previous_epoch_entropy = FrVar::conditionally_select(
&index_bit,
&constrained_epoch.epoch_entropy,
&previous_epoch_entropy,
)?;
// Update the pubkeys for the next iteration
previous_epoch_index = FrVar::conditionally_select(
&index_bit,
&constrained_epoch.index,
&previous_epoch_index,
)?;
previous_pubkey_vars = constrained_epoch
.new_pubkeys
.iter()
.zip(previous_pubkey_vars.iter())
.map(|(new_pk, old_pk)| G2Var::conditionally_select(&index_bit, new_pk, old_pk))
.collect::<Result<Vec<_>, _>>()?;
previous_max_non_signers = FrVar::conditionally_select(
&index_bit,
&constrained_epoch.new_max_non_signers,
&previous_max_non_signers,
)?;
let aggregate_pk = G2Var::conditionally_select(
&index_bit,
&constrained_epoch.aggregate_pk,
&dummy_pk,
)?;
let prepared_aggregate_pk = PairingVar::prepare_g2(&aggregate_pk)?;
let message_hash = G1Var::conditionally_select(
&index_bit,
&constrained_epoch.message_hash,
&dummy_message,
)?;
let prepared_message_hash = PairingVar::prepare_g1(&message_hash)?;
// Save the aggregated pubkey / message hash pair for the BLS batch verification
prepared_aggregated_public_keys.push(prepared_aggregate_pk);
prepared_message_hashes.push(prepared_message_hash);
// Save the xof/crh and the last epoch's bits for compressing the public inputs
all_crh_bits.extend_from_slice(&constrained_epoch.crh_bits);
all_xof_bits.extend_from_slice(&constrained_epoch.xof_bits);
if i == self.epochs.len() - 1 {
let last_apk = BlsGadget::enforce_aggregated_all_pubkeys(
&previous_pubkey_vars, // These are now the last epoch new pubkeys
)?;
let affine_x = last_apk.x.mul_by_inverse(&last_apk.z)?;
let affine_y = last_apk.y.mul_by_inverse(&last_apk.z)?;
let last_apk_affine = G2Var::new(affine_x, affine_y, Fq2Var::one());
let last_apk_bits = g2_to_bits(&last_apk_affine)?;
last_epoch_bits = constrained_epoch.combined_last_epoch_bits;
last_epoch_bits.extend_from_slice(&last_apk_bits);
// make sure the last epoch index is not zero
index_bit.enforce_equal(&Boolean::Constant(true))?;
}
debug!("epoch {} constrained", i);
}
debug!("intermediate epochs verified");
Ok((
last_epoch_bits,
all_crh_bits,
all_xof_bits,
prepared_aggregated_public_keys,
prepared_message_hashes,
))
}
// Verify the aggregate signature
#[tracing::instrument(target = "r1cs")]
fn verify_signature(
&self,
pubkeys: &[G2PreparedVar],
messages: &[G1PreparedVar],
cs: ConstraintSystemRef<<Bls12_377_Parameters as Bls12Parameters>::Fp>,
) -> Result<(), SynthesisError> {
let aggregated_signature = G1Var::new_variable_omit_prime_order_check(
cs,
|| self.aggregated_signature.get(),
AllocationMode::Witness,
)?;
BlsGadget::batch_verify_prepared(&pubkeys, &messages, &aggregated_signature)?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::gadgets::single_update::test_helpers::generate_single_update;
use bls_gadgets::utils::test_helpers::{
print_unsatisfied_constraints, run_profile_constraints,
};
use algebra::{bls12_377::G1Projective, ProjectiveCurve};
use bls_crypto::test_helpers::{keygen_batch, keygen_mul, sign_batch, sum};
use r1cs_core::ConstraintSystem;
type Curve = Bls12_377;
type Entropy = Option<Vec<u8>>;
// let's run our tests with 7 validators and 2 faulty ones
mod epoch_batch_verification {
use super::*;
use crate::epoch_block::hash_first_last_epoch_block;
use crate::gadgets::single_update::test_helpers::generate_dummy_update;
use crate::{BWField, BWFrParams, EpochBlock};
use bls_crypto::PublicKey;
fn epoch_data_to_block(data: &EpochData<Curve>) -> EpochBlock {
EpochBlock::new(
data.index.unwrap(),
data.round.unwrap(),
data.epoch_entropy.clone(),
data.parent_entropy.clone(),
data.maximum_non_signers,
data.public_keys.len(),
data.public_keys
.iter()
.map(|p| PublicKey::from(p.unwrap()))
.collect(),
)
}
#[tracing::instrument(target = "r1cs")]
fn test_epochs(
faults: u32,
num_epochs: usize,
initial_entropy: Entropy,
entropy: Vec<(Entropy, Entropy)>,
bitmaps: Vec<Vec<bool>>,
include_dummy_epochs: bool,
) -> bool {
let num_validators = 3 * faults + 1;
let initial_validator_set = keygen_mul::<Curve>(num_validators as usize);
let initial_epoch = generate_single_update::<Curve>(
0,
0,
initial_entropy,
None, // parent entropy of initial epoch should be ignored
faults,
&initial_validator_set.1,
&[],
)
.epoch_data;
// Generate validators for each of the epochs
let validators = keygen_batch::<Curve>(num_epochs, num_validators as usize);
// Generate `num_epochs` epochs
let mut epochs = validators
.1
.iter()
.zip(entropy)
.enumerate()
.map(
|(epoch_index, (epoch_validators, (parent_entropy, entropy)))| {
generate_single_update::<Curve>(
epoch_index as u16 + 1,
0u8,
entropy,
parent_entropy,
faults,
epoch_validators,
&bitmaps[epoch_index],
)
},
)
.collect::<Vec<_>>();
// The i-th validator set, signs on the i+1th epoch's G1 hash
let mut signers = vec![initial_validator_set.0];
signers.extend_from_slice(&validators.0[..validators.1.len() - 1]);
// Filter the private keys which had a 1 in the boolean per epoch
let mut signers_filtered = Vec::new();
for i in 0..signers.len() {
let mut epoch_signers_filtered = Vec::new();
let epoch_signers = &signers[i];
let epoch_bitmap = &bitmaps[i];
for (j, epoch_signer) in epoch_signers.iter().enumerate() {
if epoch_bitmap[j] {
epoch_signers_filtered.push(*epoch_signer);
}
}
signers_filtered.push(epoch_signers_filtered);
}
use crate::gadgets::test_helpers::hash_epoch;
let epoch_hashes = epochs
.iter()
.map(|update| hash_epoch(&update.epoch_data))
.collect::<Vec<G1Projective>>();
// dummy sig is the same as the message, since sk is 1.
let dummy_message = G1Projective::prime_subgroup_generator();
let dummy_sig = dummy_message;
let mut asigs = sign_batch::<Bls12_377>(&signers_filtered, &epoch_hashes);
if include_dummy_epochs {
epochs = [
&epochs[0..3],
&[
generate_dummy_update(num_validators),
generate_dummy_update(num_validators),
],
&[epochs[3].clone()],
]
.concat();
asigs = [&asigs[0..3], &[dummy_sig, dummy_sig], &[asigs[3]]].concat();
}
let aggregated_signature = sum(&asigs);
let valset = ValidatorSetUpdate::<Curve> {
initial_epoch: initial_epoch.clone(),
epochs: epochs.clone(),
num_validators,
aggregated_signature: Some(aggregated_signature),
hash_helper: None,
};
let cs = ConstraintSystem::<Fr>::new_ref();
let epoch_bits = valset.enforce(cs.clone()).unwrap();
epoch_bits.verify(None, cs.clone()).unwrap();
let hash = hash_first_last_epoch_block(
&epoch_data_to_block(&initial_epoch),
&epoch_data_to_block(&epochs[epochs.len() - 1].epoch_data),
)
.unwrap();
let public_inputs = crate::gadgets::pack::<BWField, BWFrParams>(&hash).unwrap();
assert_eq!(
cs.borrow().unwrap().instance_assignment[1..].to_vec(),
public_inputs
);
print_unsatisfied_constraints(cs.clone());
cs.is_satisfied().unwrap()
}
#[test]
fn test_multiple_epochs() {
run_profile_constraints(test_multiple_epochs_inner);
}
#[tracing::instrument(target = "r1cs")]
fn test_multiple_epochs_inner() {
let num_faults = 2;
let num_epochs = 4;
// no more than `faults` 0s exist in the bitmap
// (i.e. at most `faults` validators who do not sign on the next validator set)
let bitmaps = vec![
vec![true, true, false, true, true, true, true],
vec![true, true, false, true, true, true, true],
vec![true, true, true, true, false, false, true],
vec![true, true, true, true, true, true, true],
];
let initial_entropy = None;
let entropy = vec![(None, None), (None, None), (None, None), (None, None)];
let include_dummy_epochs = false;
assert!(test_epochs(
num_faults,
num_epochs,
initial_entropy,
entropy,
bitmaps,
include_dummy_epochs
));
}
#[test]
fn test_multiple_epochs_with_dummy() {
run_profile_constraints(test_multiple_epochs_with_dummy_inner);
}
#[tracing::instrument(target = "r1cs")]
fn test_multiple_epochs_with_dummy_inner() {
let num_faults = 2;
let num_epochs = 4;
// no more than `faults` 0s exist in the bitmap
// (i.e. at most `faults` validators who do not sign on the next validator set)
let bitmaps = vec![
vec![true, true, false, true, true, true, true],
vec![true, true, false, true, true, true, true],
vec![true, true, true, true, false, false, true],
vec![true, true, true, true, true, true, true],
];
let initial_entropy = None;
let entropy = vec![(None, None), (None, None), (None, None), (None, None)];
let include_dummy_epochs = true;
assert!(test_epochs(
num_faults,
num_epochs,
initial_entropy,
entropy,
bitmaps,
include_dummy_epochs
));
}
#[test]
fn test_multiple_epochs_with_entropy() {
run_profile_constraints(test_multiple_epochs_with_entropy_inner);
}
#[tracing::instrument(target = "r1cs")]
fn test_multiple_epochs_with_entropy_inner() {
let num_faults = 2;
let num_epochs = 4;
// no more than `faults` 0s exist in the bitmap
// (i.e. at most `faults` validators who do not sign on the next validator set)
let bitmaps = vec![
vec![true, true, false, true, true, true, true],
vec![true, true, false, true, true, true, true],
vec![true, true, true, true, false, false, true],
vec![true, true, true, true, true, true, true],
];
let initial_entropy = Some(vec![1u8; EpochData::<Curve>::ENTROPY_BYTES]);
let entropy = vec![
(
Some(vec![1u8; EpochData::<Curve>::ENTROPY_BYTES]),
Some(vec![2u8; EpochData::<Curve>::ENTROPY_BYTES]),
),
(
Some(vec![2u8; EpochData::<Curve>::ENTROPY_BYTES]),
Some(vec![3u8; EpochData::<Curve>::ENTROPY_BYTES]),
),
(
Some(vec![3u8; EpochData::<Curve>::ENTROPY_BYTES]),
Some(vec![4u8; EpochData::<Curve>::ENTROPY_BYTES]),
),
(
Some(vec![4u8; EpochData::<Curve>::ENTROPY_BYTES]),
Some(vec![5u8; EpochData::<Curve>::ENTROPY_BYTES]),
),
];
let include_dummy_epochs = false;
assert!(test_epochs(
num_faults,
num_epochs,
initial_entropy,
entropy,
bitmaps,
include_dummy_epochs
));
}
#[test]
fn test_multiple_epochs_with_wrong_entropy() {
run_profile_constraints(test_multiple_epochs_with_wrong_entropy_inner);
}
#[tracing::instrument(target = "r1cs")]
fn test_multiple_epochs_with_wrong_entropy_inner() {
let num_faults = 2;
let num_epochs = 4;
// no more than `faults` 0s exist in the bitmap
// (i.e. at most `faults` validators who do not sign on the next validator set)
let bitmaps = vec![
vec![true, true, false, true, true, true, true],
vec![true, true, false, true, true, true, true],
vec![true, true, true, true, false, false, true],
vec![true, true, true, true, true, true, true],
];
let initial_entropy = Some(vec![1u8; EpochData::<Curve>::ENTROPY_BYTES]);
let entropy = vec![
(
Some(vec![1u8; EpochData::<Curve>::ENTROPY_BYTES]),
Some(vec![2u8; EpochData::<Curve>::ENTROPY_BYTES]),
),
(
Some(vec![2u8; EpochData::<Curve>::ENTROPY_BYTES]),
Some(vec![3u8; EpochData::<Curve>::ENTROPY_BYTES]),
),
// parent entropy does not match previous entropy
(
Some(vec![5u8; EpochData::<Curve>::ENTROPY_BYTES]),
Some(vec![4u8; EpochData::<Curve>::ENTROPY_BYTES]),
),
(
Some(vec![4u8; EpochData::<Curve>::ENTROPY_BYTES]),
Some(vec![5u8; EpochData::<Curve>::ENTROPY_BYTES]),
),
];
let include_dummy_epochs = false;
assert!(!test_epochs(
num_faults,
num_epochs,
initial_entropy,
entropy,
bitmaps,
include_dummy_epochs
));
}
#[test]
fn test_multiple_epochs_with_wrong_entropy_dummy() {
run_profile_constraints(test_multiple_epochs_with_wrong_entropy_dummy_inner);
}
#[tracing::instrument(target = "r1cs")]
fn test_multiple_epochs_with_wrong_entropy_dummy_inner() {
let num_faults = 2;
let num_epochs = 4;
// no more than `faults` 0s exist in the bitmap
// (i.e. at most `faults` validators who do not sign on the next validator set)
let bitmaps = vec![
vec![true, true, false, true, true, true, true],
vec![true, true, false, true, true, true, true],
vec![true, true, true, true, false, false, true],
vec![true, true, true, true, true, true, true],
];
let initial_entropy = Some(vec![1u8; EpochData::<Curve>::ENTROPY_BYTES]);
let entropy = vec![
(
Some(vec![1u8; EpochData::<Curve>::ENTROPY_BYTES]),
Some(vec![2u8; EpochData::<Curve>::ENTROPY_BYTES]),
),
(
Some(vec![2u8; EpochData::<Curve>::ENTROPY_BYTES]),
Some(vec![3u8; EpochData::<Curve>::ENTROPY_BYTES]),
),
(
Some(vec![3u8; EpochData::<Curve>::ENTROPY_BYTES]),
Some(vec![4u8; EpochData::<Curve>::ENTROPY_BYTES]),
),
// parent entropy does not match previous entropy
(
Some(vec![6u8; EpochData::<Curve>::ENTROPY_BYTES]),
Some(vec![5u8; EpochData::<Curve>::ENTROPY_BYTES]),
),
];
// dummy blocks inserted just before the last epoch
// epoch blocks should verify as if the dummy blocks were not there
let include_dummy_epochs = true;
assert!(!test_epochs(
num_faults,
num_epochs,
initial_entropy,
entropy,
bitmaps,
include_dummy_epochs
));
}
#[test]
fn test_multiple_epochs_with_no_initial_entropy() {
run_profile_constraints(test_multiple_epochs_with_no_initial_entropy_inner);
}
#[tracing::instrument(target = "r1cs")]
fn test_multiple_epochs_with_no_initial_entropy_inner() {
let num_faults = 2;
let num_epochs = 4;
// no more than `faults` 0s exist in the bitmap
// (i.e. at most `faults` validators who do not sign on the next validator set)
let bitmaps = vec![
vec![true, true, false, true, true, true, true],
vec![true, true, false, true, true, true, true],
vec![true, true, true, true, false, false, true],
vec![true, true, true, true, true, true, true],
];
// all entropy should be ignored
let initial_entropy = None;
let entropy = vec![
(
Some(vec![1u8; EpochData::<Curve>::ENTROPY_BYTES]),
Some(vec![2u8; EpochData::<Curve>::ENTROPY_BYTES]),
),
(
Some(vec![2u8; EpochData::<Curve>::ENTROPY_BYTES]),
Some(vec![3u8; EpochData::<Curve>::ENTROPY_BYTES]),
),
// parent entropy does not match previous entropy
(
Some(vec![5u8; EpochData::<Curve>::ENTROPY_BYTES]),
Some(vec![4u8; EpochData::<Curve>::ENTROPY_BYTES]),
),
(
Some(vec![4u8; EpochData::<Curve>::ENTROPY_BYTES]),
Some(vec![5u8; EpochData::<Curve>::ENTROPY_BYTES]),
),
];
let include_dummy_epochs = false;
assert!(test_epochs(
num_faults,
num_epochs,
initial_entropy,
entropy,
bitmaps,
include_dummy_epochs
));
}
}
}
| 39.265753 | 108 | 0.550272 |
e51966f38035a71bb6e38e47e3f13e8f3b1dcb9f | 151 | fn main() {
let word = "silence";
let definition = webster::definition(word).unwrap();
println!("{} definition: {}", word, definition);
} | 21.571429 | 56 | 0.602649 |
3333a1b225d968392ce5323ed8248b7958134d65 | 1,702 | use super::Bitmap;
use alloc::vec::Vec;
#[derive(Debug, PartialEq, Eq)]
pub struct DirtyState {
/// Track the addresses of the block in guest memory which are dirty
dirty_indices: Vec<usize>,
/// Track which partes of memory have been dirtied, it's used as a filter
/// to avoid duplicated entries inside `dirty`.
dirty_bitmap: Bitmap,
/// How many indices we can store, this is a parameter passed by
/// [`DirtyState::new`]
len: usize,
}
impl DirtyState {
/// Create a new Dirty State. To be compatible with the underlaying Bitmap
/// `len` must be a multiple of the number of bits in a word of memory
/// (usize)
pub fn new(len: usize) -> Result<Self, usize> {
Ok(DirtyState {
dirty_indices: Vec::with_capacity(len),
dirty_bitmap: Bitmap::new(len)?,
len,
})
}
/// Sign a certain block as dirty
pub fn dirty(&mut self, block_idx: usize) {
// if it wasn't dirty
if !self.dirty_bitmap.get(block_idx) {
// add the block idx to the indices
self.dirty_indices.push(block_idx);
// and set it's bit in the bitmap for deduplication
self.dirty_bitmap.set(block_idx);
}
}
/// Returns an iterator over the dirtied indices while resetting itself
/// so that the allocations can be re-used.
pub fn drain(&mut self) -> impl Iterator<Item=usize> + '_ {
self.dirty_indices.drain(..).map(|idx| {
self.dirty_bitmap.reset_wide(idx);
idx
})
}
/// Return the size with which the dirty state was initialized
pub fn len(&self) -> usize {
self.len
}
} | 30.945455 | 78 | 0.606345 |
79c31eb5ed54e68a65ff610fb189a8f7320a635b | 574 | #![cfg_attr(not(with_main), no_std)]
#![feature(core_intrinsics)]
extern crate core;
use core::intrinsics::discriminant_value;
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub enum E {
A = 10,
B,
C,
D = 20,
}
#[cfg_attr(crux, crux_test)]
pub fn f() {
unsafe {
assert!(discriminant_value(&E::A) == 10);
assert!(discriminant_value(&E::B) == 11);
assert!(discriminant_value(&E::C) == 12);
assert!(discriminant_value(&E::D) == 20);
}
}
#[cfg(with_main)] pub fn main() { println!("{:?}", f()); }
| 22.076923 | 67 | 0.585366 |
feb54b3468f6d4a9120af18a0e1304ca183120b3 | 12,012 | use data::*;
use generators::core::*;
use generators::numbers::*;
use std::iter;
use std::marker::PhantomData;
/// See [`vecs`](fn.vecs.html)
#[derive(Debug, Clone)]
pub struct VecGenerator<G> {
inner: G,
mean_length: usize,
}
/// See [`info_pools`](fn.info_pools.html)
#[derive(Debug, Clone)]
pub struct InfoPoolGenerator(usize);
/// See [`collections`](fn.collections.html)
#[derive(Debug, Clone)]
pub struct CollectionGenerator<C, G> {
witness: PhantomData<C>,
inner: G,
mean_length: usize,
}
/// See [`choice`](fn.choice.html)
#[derive(Debug, Clone)]
pub struct ChoiceGenerator<T>(Vec<T>);
/// Generates vectors with items given by `inner`.
pub fn vecs<G>(inner: G) -> VecGenerator<G> {
VecGenerator {
inner: inner,
mean_length: 10,
}
}
/// Randomly generates an info-pool (mostly used for testing generators).
pub fn info_pools(len: usize) -> InfoPoolGenerator {
InfoPoolGenerator(len)
}
/// Generates a collection of the given type, populated with elements from the
/// item generator.
///
/// To generate values of BTreeSet<u8>:
///
/// ```
/// use std::collections::BTreeSet;
/// use suppositions::generators::*;
/// let gen = collections::<BTreeSet<_>, _>(u8s());
/// ```
pub fn collections<C, G: Generator>(item: G) -> CollectionGenerator<C, G>
where
C: Extend<G::Item>,
{
CollectionGenerator {
witness: PhantomData,
inner: item,
mean_length: 16,
}
}
/// Returns a random item from the array.
pub fn choice<T>(items: Vec<T>) -> ChoiceGenerator<T> {
ChoiceGenerator(items)
}
impl<G> VecGenerator<G> {
/// Specify the mean length of the vector.
pub fn mean_length(mut self, mean: usize) -> Self {
self.mean_length = mean;
self
}
}
impl<G: Generator> Generator for VecGenerator<G> {
type Item = Vec<G::Item>;
fn generate<I: InfoSource>(&self, src: &mut I) -> Maybe<Self::Item> {
let mut result = Vec::new();
let p_is_final = 1.0 / (1.0 + self.mean_length as f32);
trace!("-> VecGenerator::generate");
let opts = optional_by(weighted_coin(1.0 - p_is_final), &self.inner);
while let Some(item) = src.draw(&opts)? {
result.push(item)
}
trace!("<- VecGenerator::generate");
Ok(result)
}
}
impl Generator for InfoPoolGenerator {
type Item = InfoPool;
fn generate<I: InfoSource>(&self, src: &mut I) -> Maybe<Self::Item> {
let mut result = Vec::new();
let vals = u8s();
for _ in 0..self.0 {
let item = vals.generate(src)?;
result.push(item)
}
Ok(InfoPool::of_vec(result))
}
}
impl<G, C> CollectionGenerator<C, G> {
/// Specify the mean number of _generated_ items. For collections with
/// set semantics, this many not be the same as the mean size of the
/// collection.
pub fn mean_length(mut self, mean: usize) -> Self {
self.mean_length = mean;
self
}
}
impl<G: Generator, C: Default + Extend<G::Item>> Generator for CollectionGenerator<C, G> {
type Item = C;
fn generate<I: InfoSource>(&self, src: &mut I) -> Maybe<Self::Item> {
trace!("-> CollectionGenerator::generate");
let mut coll: C = Default::default();
let p_is_final = 1.0 / (1.0 + self.mean_length as f32);
let opts = optional_by(weighted_coin(1.0 - p_is_final), &self.inner);
while let Some(item) = src.draw(&opts)? {
coll.extend(iter::once(item));
}
trace!("<- CollectionGenerator::generate");
Ok(coll)
}
}
impl<T: Clone> Generator for ChoiceGenerator<T> {
type Item = T;
fn generate<I: InfoSource>(&self, src: &mut I) -> Maybe<Self::Item> {
let &ChoiceGenerator(ref options) = self;
if options.len() == 0 {
warn!("Empty instance of ChoiceGenerator");
return Err(DataError::SkipItem);
}
debug_assert!(options.len() <= u32::max_value() as usize);
trace!("-> ChoiceGenerator::generate");
// Slow as ... a very slow thing, and result in a non-optimal shrink.
let off = src.draw(&uptos(usizes(), options.len()))?;
// these are both very fast.
// let off = src.draw(&uptos(u32s(), options.len() as u32))? as usize;
// let off = src.draw(&uptos(u32s().map(|n| (n as usize) << 32), options.len()))?;
// let v = !u32s().generate(src)?;
// let off = (v as usize * self.0.len()) >> 32;
// let v = !src.draw(&u32s())?;
// let off = (v as usize * options.len()) >> 32;
let res = options[off].clone();
trace!("<- ChoiceGenerator::generate");
Ok(res)
}
}
#[cfg(test)]
mod tests {
use env_logger;
use generators::collections::*;
use generators::core::tests::*;
use std::collections::LinkedList;
#[test]
fn vecs_should_generate_same_output_given_same_input() {
should_generate_same_output_given_same_input(vecs(booleans()));
}
#[test]
fn vecs_usually_generates_different_output_for_different_inputs() {
usually_generates_different_output_for_different_inputs(vecs(booleans()))
}
#[derive(Debug)]
struct Tracer<'a, I: 'a> {
inner: &'a mut I,
child_draws: usize,
}
impl<'a, I> Tracer<'a, I> {
fn new(inner: &'a mut I) -> Self {
let child_draws = 0;
Tracer { inner, child_draws }
}
}
impl<'a, I: InfoSource> InfoSource for Tracer<'a, I> {
fn draw_u8(&mut self) -> u8 {
self.inner.draw_u8()
}
fn draw<S: InfoSink>(&mut self, sink: S) -> S::Out
where
Self: Sized,
{
debug!("-> Tracer::draw");
self.child_draws += 1;
let res = self.inner.draw(sink);
debug!("<- Tracer::draw");
res
}
}
#[test]
fn vecs_records_at_least_as_many_leaves_as_elements() {
env_logger::try_init().unwrap_or_default();
let nitems = 100;
let gen = vecs(booleans());
for _ in 0..nitems {
let mut src = RngSource::new();
let mut rec = Tracer::new(&mut src);
let val = gen.generate(&mut rec).expect("generate");
assert!(
rec.child_draws == val.len() + 1,
"child_draws:{} == val.len:{}",
rec.child_draws,
val.len()
);
}
}
#[test]
fn vec_bools_minimize_to_empty() {
env_logger::try_init().unwrap_or_default();
should_minimize_to(vecs(booleans()), vec![])
}
#[test]
fn vec_bools_can_minimise_with_predicate() {
env_logger::try_init().unwrap_or_default();
should_minimize_to(
vecs(booleans()).filter(|v| v.len() > 2),
vec![false, false, false],
);
}
#[test]
fn info_pools_should_generate_same_output_given_same_input() {
should_generate_same_output_given_same_input(info_pools(8))
}
#[test]
fn info_pools_usually_generates_different_output_for_different_inputs() {
usually_generates_different_output_for_different_inputs(info_pools(8))
}
#[test]
fn info_pools_minimize_to_empty() {
env_logger::try_init().unwrap_or_default();
// We force the generator to output a fixed length.
// This is perhaps not the best idea ever; but it'll do for now.
should_minimize_to(info_pools(8), InfoPool::of_vec(vec![0; 8]))
}
#[test]
fn collections_u64s_minimize_to_empty() {
use std::collections::BTreeSet;
should_minimize_to(collections::<BTreeSet<_>, _>(u8s()), BTreeSet::new());
}
#[test]
fn collections_records_at_least_as_many_leaves_as_elements() {
let nitems = 100;
let gen = collections::<LinkedList<_>, _>(u64s());
for _ in 0..nitems {
let mut src = RngSource::new();
let mut rec = Tracer::new(&mut src);
let val = gen.generate(&mut rec).expect("generate");
assert!(
rec.child_draws == val.len() + 1,
"child_draws:{} == val.len:{}",
rec.child_draws,
val.len()
);
}
}
mod vector_lengths {
use env_logger;
use generators::collections::*;
use generators::core::tests::*;
use std::collections::BTreeMap;
#[test]
fn mean_length_can_be_set_as_10() {
mean_length_can_be_set_as(10);
}
#[test]
fn mean_length_can_be_set_as_3() {
mean_length_can_be_set_as(3);
}
#[test]
fn mean_length_can_be_set_as_5() {
mean_length_can_be_set_as(5);
}
#[test]
fn mean_length_can_be_set_as_7() {
mean_length_can_be_set_as(7);
}
#[test]
fn mean_length_can_be_set_as_23() {
mean_length_can_be_set_as(23);
}
fn mean_length_can_be_set_as(len: usize) {
env_logger::try_init().unwrap_or_default();
let gen = vecs(u8s()).mean_length(len);
let trials = 1024usize;
let expected = len as f64;
let allowed_error = expected * 0.1;
let mut lengths = BTreeMap::new();
let p = unseeded_of_size(1 << 18);
let mut t = p.replay();
for _ in 0..trials {
let val = gen.generate(&mut t).expect("a trial");
*lengths.entry(val.len()).or_insert(0) += 1;
}
println!("Histogram: {:?}", lengths);
let mean: f64 = lengths
.iter()
.map(|(&l, &n)| (l * n) as f64 / trials as f64)
.sum();
assert!(
mean >= (expected - allowed_error) && mean <= (expected + allowed_error),
"Expected mean of {} trials ({}+/-{}); got {}",
trials,
expected,
allowed_error,
mean
);
}
}
mod collection_lengths {
use env_logger;
use generators::collections::*;
use generators::core::tests::*;
use std::collections::{BTreeMap, LinkedList};
#[test]
fn mean_length_can_be_set_as_10() {
mean_length_can_be_set_as(10);
}
#[test]
fn mean_length_can_be_set_as_3() {
mean_length_can_be_set_as(3);
}
#[test]
fn mean_length_can_be_set_as_5() {
mean_length_can_be_set_as(5);
}
#[test]
fn mean_length_can_be_set_as_7() {
mean_length_can_be_set_as(7);
}
#[test]
fn mean_length_can_be_set_as_23() {
mean_length_can_be_set_as(23);
}
fn mean_length_can_be_set_as(len: usize) {
env_logger::try_init().unwrap_or_default();
let gen = collections::<LinkedList<_>, _>(u8s()).mean_length(len);
let trials = 1024usize;
let expected = len as f64;
let allowed_error = expected * 0.1;
let mut lengths = BTreeMap::new();
let p = unseeded_of_size(1 << 18);
let mut t = p.replay();
for _ in 0..trials {
let val: LinkedList<u8> = gen.generate(&mut t).expect("a trial");
*lengths.entry(val.len()).or_insert(0) += 1;
}
println!("Histogram: {:?}", lengths);
let mean: f64 = lengths
.iter()
.map(|(&l, &n)| (l * n) as f64 / trials as f64)
.sum();
assert!(
mean >= (expected - allowed_error) && mean <= (expected + allowed_error),
"Expected mean of {} trials ({}+/-{}); got {}",
trials,
expected,
allowed_error,
mean
);
}
}
}
| 30.03 | 90 | 0.549118 |
5612e912a12afdc4813de81f2229fef230a8a16a | 588 |
extern crate iron;
use iron::prelude::*;
use iron::status;
use std::env;
use std::string::String;
fn main() {
fn messenger(_: &mut Request) -> IronResult<Response> {
let message = match env::var("MESSAGE") {
Ok(val) => val,
Err(e) => {
println!("Couldn't print env var {}: {}", "MESSAGE", e);
String::from("a Message from somewhere!")
},
};
Ok(Response::with((status::Ok, message)))
}
let _server = Iron::new(messenger).http("0.0.0.0:3000").unwrap();
println!("On 3000");
}
| 22.615385 | 72 | 0.518707 |
d7a17bd659e1357f6fd3b560052157c41ab93e25 | 25,316 | //! `Again` is a wasm-compatible utility for retrying standard library [`Futures`](https://doc.rust-lang.org/std/future/trait.Future.html) with a `Result` output type
//!
//! A goal of any operation should be a successful outcome. This crate gives operations a better chance at achieving that.
//!
//! # Examples
//!
//! ## Hello world
//!
//! For simple cases, you can use the module level [`retry`](fn.retry.html) fn, which
//! will retry a task every second for 5 seconds with an exponential backoff.
//!
//! ```no_run
//! again::retry(|| reqwest::get("https://api.company.com"));
//! ```
//!
//! ## Conditional retries
//!
//! By default, `again` will retry any failed `Future` if its `Result` output type is an `Err`.
//! You may not want to retry _every_ kind of error. In those cases you may wish to use the [`retry_if`](fn.retry_if.html) fn, which
//! accepts an additional argument to conditionally determine if the error
//! should be retried.
//!
//! ```no_run
//! again::retry_if(
//! || reqwest::get("https://api.company.com"),
//! reqwest::Error::is_status,
//! );
//! ```
//!
//! ## Retry policies
//!
//! Every application has different needs. The default retry behavior in `again`
//! likely will not suit all of them. You can define your own retry behavior
//! with a [`RetryPolicy`](struct.RetryPolicy.html). A RetryPolicy can be configured with a fixed or exponential backoff,
//! jitter, and other common retry options. This objects may be reused
//! across operations. For more information see the [`RetryPolicy`](struct.RetryPolicy.html) docs.
//!
//! ```ignore
//! use again::RetryPolicy;
//! use std::time::Duration;
//!
//! let policy = RetryPolicy::fixed(Duration::from_millis(100))
//! .with_max_retries(10)
//! .with_jitter(false);
//!
//! policy.retry(|| reqwest::get("https://api.company.com"));
//! ```
//!
//! # Logging
//!
//! For visibility on when operations fail and are retried, a `log::trace` message is emitted,
//! logging the `Debug` display of the error and the delay before the next attempt.
//!
//! # wasm features
//!
//! `again` supports [WebAssembly](https://webassembly.org/) targets i.e. `wasm32-unknown-unknown` which should make this
//! crate a good fit for most environments
//!
//! Two cargo features exist to support various wasm runtimes: `wasm-bindgen` and `stdweb`.
//! To enable them add the following to your `Cargo.toml` file.
//!
//! ```toml
//! [dependencies]
//! again = { version = "xxx", features = ["wasm-bindgen"] }
//! ```
#[cfg(feature = "rand")]
use rand::{distributions::OpenClosed01, thread_rng, Rng};
use std::{cmp::min, future::Future, time::Duration};
use wasm_timer::Delay;
/// Retries a fallible `Future` with a default `RetryPolicy`
///
/// ```
/// again::retry(|| async { Ok::<u32, ()>(42) });
/// ```
pub async fn retry<T>(task: T) -> Result<T::Item, T::Error>
where
T: Task,
{
crate::retry_if(task, Always).await
}
/// Retries a fallible `Future` under a certain provided condition with a default `RetryPolicy`
///
/// ```
/// again::retry_if(|| async { Err::<u32, u32>(7) }, |err: &u32| *err != 42);
/// ```
pub async fn retry_if<T, C>(
task: T,
condition: C,
) -> Result<T::Item, T::Error>
where
T: Task,
C: Condition<T::Error>,
{
RetryPolicy::default().retry_if(task, condition).await
}
/// Reruns and collects the results of a successful `Future` under a certain provided condition
/// with a default `RetryPolicy`
///
/// ```
/// again::collect(
/// |i: u32| async move { Ok::<u32, ()>(i + 1) },
/// |r: &u32| if *r != 32 { Some(*r) } else { None },
/// 1 as u32,
/// );
/// ```
pub async fn collect<T, C, S>(
task: T,
condition: C,
start_value: S,
) -> Result<Vec<T::Item>, T::Error>
where
T: TaskWithParameter<S>,
C: SuccessCondition<T::Item, S>,
{
RetryPolicy::default()
.collect(task, condition, start_value)
.await
}
/// Reruns and collects the results of a `Future`, if successful, with a default `RetryPolicy`
/// under a certain provided success condition. Also retries the `Future`, if
/// not successful under the same policy configuration and the provided error condition.
///
/// ```
/// again::collect_and_retry(
/// |input: u32| async move { Ok::<u32, u32>(input + 1) },
/// |result: &u32| if *result < 2 { Some(*result) } else { None },
/// |err: &u32| *err > 1,
/// 0 as u32,
/// );
/// ```
pub async fn collect_and_retry<T, C, D, S>(
task: T,
success_condition: C,
error_condition: D,
start_value: S,
) -> Result<Vec<T::Item>, T::Error>
where
T: TaskWithParameter<S>,
C: SuccessCondition<T::Item, S>,
D: Condition<T::Error>,
S: Clone,
{
RetryPolicy::default()
.collect_and_retry(task, success_condition, error_condition, start_value)
.await
}
#[derive(Clone, Copy)]
enum Backoff {
Fixed,
Exponential { exponent: f64 },
}
impl Default for Backoff {
fn default() -> Self {
Backoff::Exponential { exponent: 2.0 }
}
}
impl Backoff {
fn iter(
self,
policy: &RetryPolicy,
) -> BackoffIter {
BackoffIter {
backoff: self,
current: 1.0,
#[cfg(feature = "rand")]
jitter: policy.jitter,
delay: policy.delay,
max_delay: policy.max_delay,
max_retries: policy.max_retries,
}
}
}
struct BackoffIter {
backoff: Backoff,
current: f64,
#[cfg(feature = "rand")]
jitter: bool,
delay: Duration,
max_delay: Option<Duration>,
max_retries: usize,
}
impl Iterator for BackoffIter {
type Item = Duration;
fn next(&mut self) -> Option<Self::Item> {
if self.max_retries > 0 {
let factor = match self.backoff {
Backoff::Fixed => self.current,
Backoff::Exponential { exponent } => {
let factor = self.current;
let next_factor = self.current * exponent;
self.current = next_factor;
factor
}
};
let mut delay = self.delay.mul_f64(factor);
#[cfg(feature = "rand")]
{
if self.jitter {
delay = jitter(delay);
}
}
if let Some(max_delay) = self.max_delay {
delay = min(delay, max_delay);
}
self.max_retries -= 1;
return Some(delay);
}
None
}
}
/// A template for configuring retry behavior
///
/// A default is provided, configured
/// to retry a task 5 times with exponential backoff
/// starting with a 1 second delay
#[derive(Clone)]
pub struct RetryPolicy {
backoff: Backoff,
#[cfg(feature = "rand")]
jitter: bool,
delay: Duration,
max_delay: Option<Duration>,
max_retries: usize,
}
impl Default for RetryPolicy {
fn default() -> Self {
Self {
backoff: Backoff::default(),
delay: Duration::from_secs(1),
#[cfg(feature = "rand")]
jitter: false,
max_delay: None,
max_retries: 5,
}
}
}
#[cfg(feature = "rand")]
fn jitter(duration: Duration) -> Duration {
let jitter: f64 = thread_rng().sample(OpenClosed01);
let secs = (duration.as_secs() as f64) * jitter;
let nanos = (duration.subsec_nanos() as f64) * jitter;
let millis = (secs * 1_000_f64) + (nanos / 1_000_000_f64);
Duration::from_millis(millis as u64)
}
impl RetryPolicy {
fn backoffs(&self) -> impl Iterator<Item = Duration> {
self.backoff.iter(self)
}
/// Configures policy with an exponential
/// backoff delay.
///
/// By default, Futures will be retried 5 times.
///
/// These delays will increase in
/// length over time. You may wish to cap just how long
/// using the [`with_max_delay`](struct.Policy.html#method.with_max_delay) fn
///
/// By default an exponential backoff exponential of 2 will be used. This
/// can be modified using the
/// [`with_backoff_exponent`](struct.RetryPolicy.html#method.with_backoff_exponent) fn.
pub fn exponential(delay: Duration) -> Self {
Self {
backoff: Backoff::Exponential { exponent: 2.0f64 },
delay,
..Self::default()
}
}
/// Configures policy with a fixed
/// backoff delay.
///
/// By default, Futures will be retried 5 times.
///
/// These delays will increase in
/// length over time. You may wish to configure how many
/// times a Future will be retried using the [`with_max_retries`](struct.RetryPolicy.html#method.with_max_retries) fn
pub fn fixed(delay: Duration) -> Self {
Self {
backoff: Backoff::Fixed,
delay,
..Self::default()
}
}
/// Set the exponential backoff exponent to be used
///
/// If not using an exponential backoff, this call will be ignored.
pub fn with_backoff_exponent(
mut self,
exp: f64,
) -> Self {
if let Backoff::Exponential { ref mut exponent } = self.backoff {
*exponent = exp;
}
self
}
/// Configures randomness to the delay between retries.
///
/// This is useful for services that have many clients which might all retry at the same time to avoid
/// the ["thundering herd" problem](https://en.wikipedia.org/wiki/Thundering_herd_problem)
#[cfg(feature = "rand")]
pub fn with_jitter(
mut self,
jitter: bool,
) -> Self {
self.jitter = jitter;
self
}
/// Limits the maximum length of delay between retries
pub fn with_max_delay(
mut self,
max: Duration,
) -> Self {
self.max_delay = Some(max);
self
}
/// Limits the maximum number of attempts a Future will be tried
pub fn with_max_retries(
mut self,
max: usize,
) -> Self {
self.max_retries = max;
self
}
/// Retries a fallible `Future` with this policy's configuration
pub async fn retry<T>(
&self,
task: T,
) -> Result<T::Item, T::Error>
where
T: Task,
{
self.retry_if(task, Always).await
}
/// Reruns and collects the results of a successful `Future` with this policy's
/// configuration under a certain provided condition
pub async fn collect<T, C, S>(
&self,
task: T,
condition: C,
start_value: S,
) -> Result<Vec<T::Item>, T::Error>
where
T: TaskWithParameter<S>,
C: SuccessCondition<T::Item, S>,
{
let mut backoffs = self.backoffs();
let mut condition = condition;
let mut task = task;
let mut results = vec![];
let mut input = start_value;
loop {
match task.call(input).await {
Ok(result) => {
let maybe_new_input = condition.retry_with(&result);
results.push(result);
if let Some(new_input) = maybe_new_input {
if let Some(delay) = backoffs.next() {
#[cfg(feature = "log")]
{
log::trace!(
"task succeeded and condition is met. will run again in {:?}",
delay
);
}
let _ = Delay::new(delay).await;
input = new_input;
continue;
}
}
return Ok(results);
}
Err(err) => return Err(err),
}
}
}
/// Reruns and collects the results of a `Future`, if successful, with this policy's
/// configuration under a certain provided success condition. Also retries the `Future`, if
/// not successful under the same policy configuration and the provided error condition.
pub async fn collect_and_retry<T, C, D, S>(
&self,
task: T,
success_condition: C,
error_condition: D,
start_value: S,
) -> Result<Vec<T::Item>, T::Error>
where
T: TaskWithParameter<S>,
C: SuccessCondition<T::Item, S>,
D: Condition<T::Error>,
S: Clone,
{
let mut success_backoffs = self.backoffs();
let mut error_backoffs = self.backoffs();
let mut success_condition = success_condition;
let mut error_condition = error_condition;
let mut task = task;
let mut results = vec![];
let mut input = start_value.clone();
let mut last_result = start_value;
loop {
match task.call(input).await {
Ok(result) => {
let maybe_new_input = success_condition.retry_with(&result);
results.push(result);
if let Some(new_input) = maybe_new_input {
if let Some(delay) = success_backoffs.next() {
#[cfg(feature = "log")]
{
log::trace!(
"task succeeded and condition is met. will run again in {:?}",
delay
);
}
let _ = Delay::new(delay).await;
input = new_input.clone();
last_result = new_input;
continue;
}
}
return Ok(results);
}
Err(err) => {
if error_condition.is_retryable(&err) {
if let Some(delay) = error_backoffs.next() {
#[cfg(feature = "log")]
{
log::trace!(
"task failed with error {:?}. will try again in {:?}",
err,
delay
);
}
let _ = Delay::new(delay).await;
input = last_result.clone();
continue;
}
}
return Err(err);
}
}
}
}
/// Retries a fallible `Future` with this policy's configuration under certain provided conditions
pub async fn retry_if<T, C>(
&self,
task: T,
condition: C,
) -> Result<T::Item, T::Error>
where
T: Task,
C: Condition<T::Error>,
{
let mut backoffs = self.backoffs();
let mut task = task;
let mut condition = condition;
loop {
match task.call().await {
Ok(result) => return Ok(result),
Err(err) => {
if condition.is_retryable(&err) {
if let Some(delay) = backoffs.next() {
#[cfg(feature = "log")]
{
log::trace!(
"task failed with error {:?}. will try again in {:?}",
err,
delay
);
}
let _ = Delay::new(delay).await;
continue;
}
}
return Err(err);
}
}
}
}
}
/// A type to determine if a failed Future should be retried
///
/// A implementation is provided for `Fn(&Err) -> bool` allowing you
/// to use a simple closure or fn handles
pub trait Condition<E> {
/// Return true if a Future error is worth retrying
fn is_retryable(
&mut self,
error: &E,
) -> bool;
}
struct Always;
impl<E> Condition<E> for Always {
#[inline]
fn is_retryable(
&mut self,
_: &E,
) -> bool {
true
}
}
impl<F, E> Condition<E> for F
where
F: FnMut(&E) -> bool,
{
fn is_retryable(
&mut self,
error: &E,
) -> bool {
self(error)
}
}
/// A type to determine if a successful Future should be retried
///
/// A implementation is provided for `Fn(&Result) -> Option<S>`, where S
/// represents the next input value, allowing you to use a simple closure
/// or fn handles
pub trait SuccessCondition<R, S> {
/// Return true if a Future result is worth retrying
fn retry_with(
&mut self,
result: &R,
) -> Option<S>;
}
impl<F, R, S> SuccessCondition<R, S> for F
where
F: Fn(&R) -> Option<S>,
{
fn retry_with(
&mut self,
result: &R,
) -> Option<S> {
self(result)
}
}
/// A unit of work to be retried, that accepts a parameter
///
/// A implementation is provided for `FnMut() -> Future`
pub trait TaskWithParameter<P> {
/// The `Ok` variant of a `Futures` associated Output type
type Item;
/// The `Err` variant of `Futures` associated Output type
type Error: std::fmt::Debug;
/// The resulting `Future` type
type Fut: Future<Output = Result<Self::Item, Self::Error>>;
/// Call the operation which invokes results in a `Future`
fn call(
&mut self,
parameter: P,
) -> Self::Fut;
}
impl<F, Fut, I, P, E> TaskWithParameter<P> for F
where
F: FnMut(P) -> Fut,
Fut: Future<Output = Result<I, E>>,
E: std::fmt::Debug,
{
type Item = I;
type Error = E;
type Fut = Fut;
fn call(
&mut self,
p: P,
) -> Self::Fut {
self(p)
}
}
/// A unit of work to be retried
///
/// A implementation is provided for `FnMut() -> Future`
pub trait Task {
/// The `Ok` variant of a `Futures` associated Output type
type Item;
/// The `Err` variant of `Futures` associated Output type
type Error: std::fmt::Debug;
/// The resulting `Future` type
type Fut: Future<Output = Result<Self::Item, Self::Error>>;
/// Call the operation which invokes results in a `Future`
fn call(&mut self) -> Self::Fut;
}
impl<F, Fut, I, E> Task for F
where
F: FnMut() -> Fut,
Fut: Future<Output = Result<I, E>>,
E: std::fmt::Debug,
{
type Item = I;
type Error = E;
type Fut = Fut;
fn call(&mut self) -> Self::Fut {
self()
}
}
#[cfg(test)]
mod tests {
use super::*;
use approx::assert_relative_eq;
use std::error::Error;
#[test]
fn retry_policy_is_send() {
fn test(_: impl Send) {}
test(RetryPolicy::default())
}
#[test]
#[cfg(feature = "rand")]
fn jitter_adds_variance_to_durations() {
assert!(jitter(Duration::from_secs(1)) != Duration::from_secs(1));
}
#[test]
fn backoff_default() {
if let Backoff::Exponential { exponent } = Backoff::default() {
assert_relative_eq!(exponent, 2.0);
} else {
panic!("Default backoff expected to be exponential!");
}
}
#[test]
fn fixed_backoff() {
let mut iter = RetryPolicy::fixed(Duration::from_secs(1)).backoffs();
assert_eq!(iter.next(), Some(Duration::from_secs(1)));
assert_eq!(iter.next(), Some(Duration::from_secs(1)));
assert_eq!(iter.next(), Some(Duration::from_secs(1)));
assert_eq!(iter.next(), Some(Duration::from_secs(1)));
}
#[test]
fn exponential_backoff() {
let mut iter = RetryPolicy::exponential(Duration::from_secs(1)).backoffs();
assert_relative_eq!(iter.next().unwrap().as_secs_f64(), 1.0);
assert_relative_eq!(iter.next().unwrap().as_secs_f64(), 2.0);
assert_relative_eq!(iter.next().unwrap().as_secs_f64(), 4.0);
assert_relative_eq!(iter.next().unwrap().as_secs_f64(), 8.0);
}
#[test]
fn exponential_backoff_factor() {
let mut iter = RetryPolicy::exponential(Duration::from_secs(1))
.with_backoff_exponent(1.5)
.backoffs();
assert_relative_eq!(iter.next().unwrap().as_secs_f64(), 1.0);
assert_relative_eq!(iter.next().unwrap().as_secs_f64(), 1.5);
assert_relative_eq!(iter.next().unwrap().as_secs_f64(), 2.25);
assert_relative_eq!(iter.next().unwrap().as_secs_f64(), 3.375);
}
#[test]
fn always_is_always_retryable() {
assert!(Always.is_retryable(&()))
}
#[test]
fn closures_impl_condition() {
fn test(_: impl Condition<()>) {}
#[allow(clippy::trivially_copy_pass_by_ref)]
fn foo(_err: &()) -> bool {
true
}
test(foo);
test(|_err: &()| true);
}
#[test]
fn closures_impl_task() {
fn test(_: impl Task) {}
async fn foo() -> Result<u32, ()> {
Ok(42)
}
test(foo);
test(|| async { Ok::<u32, ()>(42) });
}
#[test]
fn retried_futures_are_send_when_tasks_are_send() {
fn test(_: impl Send) {}
test(RetryPolicy::default().retry(|| async { Ok::<u32, ()>(42) }))
}
#[tokio::test]
async fn collect_retries_when_condition_is_met() -> Result<(), Box<dyn Error>> {
let result = RetryPolicy::fixed(Duration::from_millis(1))
.collect(
|input: u32| async move { Ok::<u32, ()>(input + 1) },
|result: &u32| if *result < 2 { Some(*result) } else { None },
0 as u32,
)
.await;
assert_eq!(result, Ok(vec![1, 2]));
Ok(())
}
#[tokio::test]
async fn collect_does_not_retry_when_condition_is_not_met() -> Result<(), Box<dyn Error>> {
let result = RetryPolicy::fixed(Duration::from_millis(1))
.collect(
|input: u32| async move { Ok::<u32, ()>(input + 1) },
|result: &u32| if *result < 1 { Some(*result) } else { None },
0 as u32,
)
.await;
assert_eq!(result, Ok(vec![1]));
Ok(())
}
#[tokio::test]
async fn collect_and_retry_retries_when_success_condition_is_met() -> Result<(), Box<dyn Error>>
{
let result = RetryPolicy::fixed(Duration::from_millis(1))
.collect_and_retry(
|input: u32| async move { Ok::<u32, u32>(input + 1) },
|result: &u32| if *result < 2 { Some(*result) } else { None },
|err: &u32| *err > 1,
0 as u32,
)
.await;
assert_eq!(result, Ok(vec![1, 2]));
Ok(())
}
#[tokio::test]
async fn collect_and_retry_does_not_retry_when_success_condition_is_not_met(
) -> Result<(), Box<dyn Error>> {
let result = RetryPolicy::fixed(Duration::from_millis(1))
.collect_and_retry(
|input: u32| async move { Ok::<u32, u32>(input + 1) },
|result: &u32| if *result < 1 { Some(*result) } else { None },
|err: &u32| *err > 1,
0 as u32,
)
.await;
assert_eq!(result, Ok(vec![1]));
Ok(())
}
#[tokio::test]
async fn collect_and_retry_retries_when_error_condition_is_met() -> Result<(), Box<dyn Error>> {
let mut task_ran = 0;
let _ = RetryPolicy::fixed(Duration::from_millis(1))
.collect_and_retry(
|_input: u32| {
task_ran += 1;
async move { Err::<u32, u32>(0) }
},
|result: &u32| if *result < 2 { Some(*result) } else { None },
|err: &u32| *err == 0,
0 as u32,
)
.await;
// Default for retry policy is 5, so we end up with the task being
// retries 5 times and being run 6 times.
assert_eq!(task_ran, 6);
Ok(())
}
#[tokio::test]
async fn collect_and_retry_does_not_retry_when_error_condition_is_not_met(
) -> Result<(), Box<dyn Error>> {
let result = RetryPolicy::fixed(Duration::from_millis(1))
.collect_and_retry(
|input: u32| async move { Err::<u32, u32>(input + 1) },
|result: &u32| if *result < 1 { Some(*result) } else { None },
|err: &u32| *err > 1,
0 as u32,
)
.await;
assert_eq!(result, Err(1));
Ok(())
}
#[tokio::test]
async fn ok_futures_yield_ok() -> Result<(), Box<dyn Error>> {
let result = RetryPolicy::default()
.retry(|| async { Ok::<u32, ()>(42) })
.await;
assert_eq!(result, Ok(42));
Ok(())
}
#[tokio::test]
async fn failed_futures_yield_err() -> Result<(), Box<dyn Error>> {
let result = RetryPolicy::fixed(Duration::from_millis(1))
.retry(|| async { Err::<u32, ()>(()) })
.await;
assert_eq!(result, Err(()));
Ok(())
}
}
| 30.391357 | 166 | 0.52769 |
e4f5eb1b2c66ff0bdd82caaa52684b0a4bb9f0f1 | 8,947 | #![feature(
negative_impls, // !Send is much cleaner than `PhantomData<Rc>`
untagged_unions, // I want to avoid ManuallyDrop in unions
const_fn_trait_bound, // So generics + const fn are unstable, huh?
generic_associated_types, // Finally!
const_trait_impl,
ptr_metadata
)]
#![allow(
clippy::missing_safety_doc, // Entirely internal code
)]
#![cfg_attr(not(feature = "std"), no_std)]
//! The implementation of (GcContext)[`::zerogc::GcContext`] that is
//! shared among both thread-safe and thread-unsafe code.
/*
* NOTE: Allocation is still needed for internals
*
* Uses:
* 1. `Box` for each handle
* 2. `Vec` for listing buckets of handles
* 3. `Arc` and `Box` for boxing context state
*
* TODO: Should we drop these uses entirely?
*/
extern crate alloc;
use core::mem::ManuallyDrop;
use core::fmt::{self, Debug, Formatter};
use alloc::boxed::Box;
use alloc::vec::Vec;
use zerogc::prelude::*;
pub mod state;
#[macro_use]
pub mod utils;
pub mod collector;
pub mod handle;
use crate::collector::{RawCollectorImpl};
pub use crate::collector::{WeakCollectorRef, CollectorRef, CollectorId};
pub use crate::state::{CollectionManager, RawContext};
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum ContextState {
/// The context is active.
///
/// Its contents are potentially being mutated,
/// so the `shadow_stack` doesn't necessarily
/// reflect the actual set of thread roots.
///
/// New objects could be allocated that are not
/// actually being tracked in the `shadow_stack`.
Active,
/// The context is waiting at a safepoint
/// for a collection to complete.
///
/// The mutating thread is blocked for the
/// duration of the safepoint (until collection completes).
///
/// Therefore, its `shadow_stack` is guarenteed to reflect
/// the actual set of thread roots.
SafePoint {
/// The id of the collection we are waiting for
collection_id: u64
},
/// The context is frozen.
/// Allocation or mutation can't happen
/// but the mutator thread isn't actually blocked.
///
/// Unlike a safepoint, this is explicitly unfrozen at the
/// user's discretion.
///
/// Because no allocation or mutation can happen,
/// its shadow_stack stack is guarenteed to
/// accurately reflect the roots of the context.
#[cfg_attr(not(feature = "sync"), allow(unused))] // TODO: Implement frozen for simple contexts?
Frozen,
}
impl ContextState {
#[cfg_attr(not(feature = "sync"), allow(unused))] // TODO: Implement frozen for simple contexts?
fn is_frozen(&self) -> bool {
matches!(*self, ContextState::Frozen)
}
}
/*
* These form a stack of contexts,
* which all share owns a pointer to the RawContext,
* The raw context is implicitly bound to a single thread
* and manages the state of all the contexts.
*
* https://llvm.org/docs/GarbageCollection.html#the-shadow-stack-gc
* Essentially these objects maintain a shadow stack
*
* The pointer to the RawContext must be Arc, since the
* collector maintains a weak reference to it.
* I use double indirection with a `Rc` because I want
* `recurse_context` to avoid the cost of atomic operations.
*
* SimpleCollectorContexts mirror the application stack.
* They can be stack allocated inside `recurse_context`.
* All we would need to do is internally track ownership of the original
* context. The sub-collector in `recurse_context` is very clearly
* restricted to the lifetime of the closure
* which is a subset of the parent's lifetime.
*
* We still couldn't be Send, since we use interior mutablity
* inside of RawContext that is not thread-safe.
*/
// TODO: Rename to remove 'Simple' from name
pub struct CollectorContext<C: RawCollectorImpl> {
raw: *mut C::RawContext,
/// Whether we are the root context
///
/// Only the root actually owns the `Arc`
/// and is responsible for dropping it
root: bool
}
impl<C: RawCollectorImpl> CollectorContext<C> {
pub(crate) unsafe fn register_root(collector: &CollectorRef<C>) -> Self {
CollectorContext {
raw: Box::into_raw(ManuallyDrop::into_inner(
C::RawContext::register_new(collector)
)),
root: true, // We are responsible for unregistering
}
}
#[inline]
pub fn collector(&self) -> &C {
unsafe { (*self.raw).collector() }
}
#[inline(always)]
unsafe fn with_shadow_stack<R, T: Trace>(
&self, value: *mut &mut T, func: impl FnOnce() -> R
) -> R {
let old_link = (*(*self.raw).shadow_stack_ptr()).last;
let new_link = ShadowStackLink {
element: C::as_dyn_trace_pointer(value),
prev: old_link
};
(*(*self.raw).shadow_stack_ptr()).last = &new_link;
let result = func();
debug_assert_eq!(
(*(*self.raw).shadow_stack_ptr()).last,
&new_link
);
(*(*self.raw).shadow_stack_ptr()).last = new_link.prev;
result
}
#[cold]
unsafe fn trigger_basic_safepoint<T: Trace>(&self, element: &mut &mut T) {
self.with_shadow_stack(element, || {
(*self.raw).trigger_safepoint();
})
}
}
impl<C: RawCollectorImpl> Drop for CollectorContext<C> {
#[inline]
fn drop(&mut self) {
if self.root {
unsafe {
C::Manager::free_context(self.collector(), self.raw);
}
}
}
}
unsafe impl<C: RawCollectorImpl> GcContext for CollectorContext<C> {
type System = CollectorRef<C>;
type Id = CollectorId<C>;
#[inline]
unsafe fn unchecked_safepoint<T: Trace>(&self, value: &mut &mut T) {
debug_assert_eq!((*self.raw).state(), ContextState::Active);
if (*self.raw).collector().should_collect() {
self.trigger_basic_safepoint(value);
}
debug_assert_eq!((*self.raw).state(), ContextState::Active);
}
unsafe fn freeze(&mut self) {
(*self.raw).collector().manager().freeze_context(&*self.raw);
}
unsafe fn unfreeze(&mut self) {
(*self.raw).collector().manager().unfreeze_context(&*self.raw);
}
#[inline]
unsafe fn recurse_context<T, F, R>(&self, value: &mut &mut T, func: F) -> R
where T: Trace, F: for<'gc> FnOnce(&'gc mut Self, &'gc mut T) -> R {
debug_assert_eq!((*self.raw).state(), ContextState::Active);
self.with_shadow_stack(value, || {
let mut sub_context = ManuallyDrop::new(CollectorContext {
/*
* safe to copy because we wont drop it
* Lifetime is guarenteed to be restricted to
* the closure.
*/
raw: self.raw,
root: false /* don't drop our pointer!!! */
});
let result = func(&mut *sub_context, value);
debug_assert!(!sub_context.root);
// No need to run drop code on context.....
core::mem::forget(sub_context);
debug_assert_eq!((*self.raw).state(), ContextState::Active);
result
})
}
#[inline]
fn system(&self) -> &'_ Self::System {
unsafe { (&*self.raw).collector_ref() }
}
#[inline]
fn id(&self) -> Self::Id {
unsafe { (&*self.raw).collector() }.id()
}
}
/// It's not safe for a context to be sent across threads.
///
/// We use (thread-unsafe) interior mutability to maintain the
/// shadow stack. Since we could potentially be cloned via `safepoint_recurse!`,
/// implementing `Send` would allow another thread to obtain a
/// reference to our internal `&RefCell`. Further mutation/access
/// would be undefined.....
impl<C: RawCollectorImpl> !Send for CollectorContext<C> {}
//
// Root tracking
//
#[repr(C)]
#[derive(Debug)]
pub(crate) struct ShadowStackLink<T> {
pub element: T,
/// The previous link in the chain,
/// or NULL if there isn't any
pub prev: *const ShadowStackLink<T>
}
impl<C: RawCollectorImpl> Debug for ShadowStack<C> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_struct("ShadowStack")
.field("last", &format_args!("{:p}", self.last))
.finish()
}
}
#[derive(Clone)]
pub struct ShadowStack<C: RawCollectorImpl> {
/// The last element in the shadow stack,
/// or NULL if it's empty
pub(crate) last: *const ShadowStackLink<C::DynTracePtr>
}
impl<C: RawCollectorImpl> ShadowStack<C> {
unsafe fn as_vec(&self) -> Vec<C::DynTracePtr> {
let mut result: Vec<_> = self.reverse_iter().collect();
result.reverse();
result
}
#[inline]
pub unsafe fn reverse_iter(&self) -> impl Iterator<Item=C::DynTracePtr> + '_ {
core::iter::successors(
self.last.as_ref(),
|link| link.prev.as_ref()
).map(|link| link.element)
}
}
| 32.416667 | 100 | 0.624008 |
7964be89a5da49524b5556ecf8d91e2d6032d4be | 484 | use super::*;
impl Command<Opacity> {
pub fn set(self) -> Command<Opacity<With>> {
self.push_str("set").transmute()
}
pub fn plus(self) -> Command<Opacity<With>> {
self.push_str("plus").transmute()
}
pub fn minus(self) -> Command<Opacity<With>> {
self.push_str("minus").transmute()
}
}
impl Command<Opacity<With>> {
pub fn value(self, value: f32) -> Command<Valid> {
self.push_str(value.to_string()).transmute()
}
}
| 22 | 54 | 0.588843 |
8a48d14be4d120bc803e6ff048fc6161b3f90714 | 5,175 | // Copyright 2019-2021 Tauri Programme within The Commons Conservancy
// SPDX-License-Identifier: Apache-2.0
// SPDX-License-Identifier: MIT
use crate::{
hooks::{InvokeError, InvokeMessage, InvokeResolver},
runtime::Runtime,
Config, Invoke, PackageInfo, Window,
};
pub use anyhow::Result;
use serde::{Deserialize, Serialize};
use serde_json::Value as JsonValue;
use std::sync::Arc;
mod app;
mod cli;
mod clipboard;
mod dialog;
mod event;
#[allow(unused_imports)]
mod file_system;
mod global_shortcut;
mod http;
mod notification;
mod operating_system;
mod path;
mod process;
mod shell;
mod window;
/// The context passed to the invoke handler.
pub struct InvokeContext<R: Runtime> {
pub window: Window<R>,
pub config: Arc<Config>,
pub package_info: PackageInfo,
}
#[cfg(test)]
impl<R: Runtime> Clone for InvokeContext<R> {
fn clone(&self) -> Self {
Self {
window: self.window.clone(),
config: self.config.clone(),
package_info: self.package_info.clone(),
}
}
}
/// The response for a JS `invoke` call.
pub struct InvokeResponse {
json: Result<JsonValue>,
}
impl<T: Serialize> From<T> for InvokeResponse {
fn from(value: T) -> Self {
Self {
json: serde_json::to_value(value).map_err(Into::into),
}
}
}
#[derive(Deserialize)]
#[serde(tag = "module", content = "message")]
enum Module {
App(app::Cmd),
Process(process::Cmd),
Fs(file_system::Cmd),
Os(operating_system::Cmd),
Path(path::Cmd),
Window(Box<window::Cmd>),
Shell(shell::Cmd),
Event(event::Cmd),
Dialog(dialog::Cmd),
Cli(cli::Cmd),
Notification(notification::Cmd),
Http(http::Cmd),
GlobalShortcut(global_shortcut::Cmd),
Clipboard(clipboard::Cmd),
}
impl Module {
fn run<R: Runtime>(
self,
window: Window<R>,
resolver: InvokeResolver<R>,
config: Arc<Config>,
package_info: PackageInfo,
) {
let context = InvokeContext {
window,
config,
package_info,
};
match self {
Self::App(cmd) => resolver.respond_async(async move {
cmd
.run(context)
.and_then(|r| r.json)
.map_err(InvokeError::from_anyhow)
}),
Self::Process(cmd) => resolver.respond_async(async move {
cmd
.run(context)
.and_then(|r| r.json)
.map_err(InvokeError::from_anyhow)
}),
Self::Fs(cmd) => resolver.respond_async(async move {
cmd
.run(context)
.and_then(|r| r.json)
.map_err(InvokeError::from_anyhow)
}),
Self::Path(cmd) => resolver.respond_async(async move {
cmd
.run(context)
.and_then(|r| r.json)
.map_err(InvokeError::from_anyhow)
}),
Self::Os(cmd) => resolver.respond_async(async move {
cmd
.run(context)
.and_then(|r| r.json)
.map_err(InvokeError::from_anyhow)
}),
Self::Window(cmd) => resolver.respond_async(async move {
cmd
.run(context)
.await
.and_then(|r| r.json)
.map_err(InvokeError::from_anyhow)
}),
Self::Shell(cmd) => resolver.respond_async(async move {
cmd
.run(context)
.and_then(|r| r.json)
.map_err(InvokeError::from_anyhow)
}),
Self::Event(cmd) => resolver.respond_async(async move {
cmd
.run(context)
.and_then(|r| r.json)
.map_err(InvokeError::from_anyhow)
}),
Self::Dialog(cmd) => resolver.respond_async(async move {
cmd
.run(context)
.and_then(|r| r.json)
.map_err(InvokeError::from_anyhow)
}),
Self::Cli(cmd) => resolver.respond_async(async move {
cmd
.run(context)
.and_then(|r| r.json)
.map_err(InvokeError::from_anyhow)
}),
Self::Notification(cmd) => resolver.respond_async(async move {
cmd
.run(context)
.and_then(|r| r.json)
.map_err(InvokeError::from_anyhow)
}),
Self::Http(cmd) => resolver.respond_async(async move {
cmd
.run(context)
.await
.and_then(|r| r.json)
.map_err(InvokeError::from_anyhow)
}),
Self::GlobalShortcut(cmd) => resolver.respond_async(async move {
cmd
.run(context)
.and_then(|r| r.json)
.map_err(InvokeError::from_anyhow)
}),
Self::Clipboard(cmd) => resolver.respond_async(async move {
cmd
.run(context)
.and_then(|r| r.json)
.map_err(InvokeError::from_anyhow)
}),
}
}
}
pub(crate) fn handle<R: Runtime>(
module: String,
invoke: Invoke<R>,
config: Arc<Config>,
package_info: &PackageInfo,
) {
let Invoke { message, resolver } = invoke;
let InvokeMessage {
mut payload,
window,
..
} = message;
if let JsonValue::Object(ref mut obj) = payload {
obj.insert("module".to_string(), JsonValue::String(module));
}
match serde_json::from_value::<Module>(payload) {
Ok(module) => module.run(window, resolver, config, package_info.clone()),
Err(e) => resolver.reject(e.to_string()),
}
}
| 24.879808 | 77 | 0.595556 |
d79ec4c0182e87e079e4338a1fc1281a1f2d179b | 12,475 | use std::fmt;
use std::io::Write;
use std::{cmp, collections::HashMap, fs};
use anyhow::{anyhow, Result};
use console::{style, Style, Term};
use itertools::Itertools;
use similar::{ChangeTag, DiffableStr, TextDiff};
use textwrap::indent;
use crate::lint_message::{LintMessage, LintSeverity};
use crate::path::get_display_path;
static CONTEXT_LINES: usize = 3;
pub enum PrintedLintErrors {
Yes,
No,
}
pub fn render_lint_messages_oneline(
stdout: &mut impl Write,
lint_messages: &HashMap<Option<String>, Vec<LintMessage>>,
) -> Result<PrintedLintErrors> {
let mut printed = false;
let current_dir = std::env::current_dir()?;
for lint_message in lint_messages.values().flatten() {
printed = true;
let display_path = match &lint_message.path {
None => "[General linter failure]".to_string(),
Some(path) => {
// Try to render the path relative to user's current working directory.
// But if we fail to relativize the path, just print what the linter
// gave us directly.
get_display_path(path, ¤t_dir)
}
};
let line_number = match lint_message.line {
None => "".to_string(),
Some(line) => format!("{}", line),
};
let column = match lint_message.char {
None => "".to_string(),
Some(char) => format!("{}", char),
};
let description = match &lint_message.description {
None => "",
Some(desc) => desc.as_str(),
};
let description = description.lines().join(" ");
let severity = lint_message.severity.label();
writeln!(
stdout,
"{}:{}:{}:{} {} [{}]",
display_path, line_number, column, severity, description, lint_message.code
)?;
}
if printed {
Ok(PrintedLintErrors::Yes)
} else {
Ok(PrintedLintErrors::No)
}
}
pub fn render_lint_messages_json(
stdout: &mut impl Write,
lint_messages: &HashMap<Option<String>, Vec<LintMessage>>,
) -> Result<PrintedLintErrors> {
let mut printed = false;
for lint_message in lint_messages.values().flatten() {
printed = true;
writeln!(stdout, "{}", serde_json::to_string(lint_message)?)?;
}
if printed {
Ok(PrintedLintErrors::Yes)
} else {
Ok(PrintedLintErrors::No)
}
}
pub fn render_lint_messages(
stdout: &mut impl Write,
lint_messages: &HashMap<Option<String>, Vec<LintMessage>>,
) -> Result<PrintedLintErrors> {
if lint_messages.is_empty() {
writeln!(stdout, "{} No lint issues.", style("ok").green())?;
return Ok(PrintedLintErrors::No);
}
let wrap_78_indent_4 = textwrap::Options::new(78)
.initial_indent(spaces(4))
.subsequent_indent(spaces(4));
// Always render messages in sorted order.
let mut paths: Vec<&Option<String>> = lint_messages.keys().collect();
paths.sort();
let current_dir = std::env::current_dir()?;
for path in paths {
let lint_messages = lint_messages.get(path).unwrap();
stdout.write_all(b"\n\n")?;
match path {
None => write!(stdout, ">>> General linter failure:\n\n")?,
Some(path) => {
// Try to render the path relative to user's current working directory.
// But if we fail to relativize the path, just print what the linter
// gave us directly.
let path_to_print = get_display_path(path, ¤t_dir);
write!(
stdout,
"{} Lint for {}:\n\n",
style(">>>").bold(),
style(path_to_print).underlined()
)?;
}
}
for lint_message in lint_messages {
write_summary_line(stdout, lint_message)?;
// Write the description.
if let Some(description) = &lint_message.description {
for line in textwrap::wrap(description, &wrap_78_indent_4) {
writeln!(stdout, "{}", line)?;
}
}
// If we have original and replacement, show the diff.
if let (Some(original), Some(replacement)) =
(&lint_message.original, &lint_message.replacement)
{
write_context_diff(stdout, original, replacement)?;
} else if let (Some(highlight_line), Some(path)) = (&lint_message.line, path) {
// Otherwise, write the context code snippet.
write_context(stdout, path, highlight_line)?;
}
}
}
Ok(PrintedLintErrors::Yes)
}
// Write formatted context lines, with an styled indicator for which line the lint is about
fn write_context(stdout: &mut impl Write, path: &str, highlight_line: &usize) -> Result<()> {
stdout.write_all(b"\n")?;
let file = fs::read_to_string(path);
match file {
Ok(file) => {
let lines = file.tokenize_lines();
let highlight_idx = highlight_line.saturating_sub(1);
let max_idx = lines.len().saturating_sub(1);
let start_idx = highlight_idx.saturating_sub(CONTEXT_LINES);
let end_idx = cmp::min(max_idx, highlight_idx + CONTEXT_LINES);
for cur_idx in start_idx..=end_idx {
let line = lines
.get(cur_idx)
.ok_or_else(|| anyhow!("TODO line mismatch"))?;
let line_number = cur_idx + 1;
let max_line_number = max_idx + 1;
let max_pad = max_line_number.to_string().len();
// Write `123 | my failing line content
if cur_idx == highlight_idx {
// Highlight the actually failing line with a chevron + different color
write!(
stdout,
" >>> {:>width$} |{}",
style(line_number).dim(),
style(line).yellow(),
width = max_pad
)?;
} else {
write!(
stdout,
" {:>width$} |{}",
style(line_number).dim(),
line,
width = max_pad
)?;
}
}
}
Err(e) => {
let msg = textwrap::indent(
&format!(
"Could not retrieve source context: {}\n\
This is typically a linter bug.",
e
),
spaces(8),
);
write!(stdout, "{}", style(msg).red())?;
}
}
stdout.write_all(b"\n")?;
Ok(())
}
// Write the context, computing and styling a diff from the original to the suggested replacement.
fn write_context_diff(stdout: &mut impl Write, original: &str, replacement: &str) -> Result<()> {
writeln!(
stdout,
"\n {}",
style("You can run `lintrunner -a` to apply this patch.").cyan()
)?;
stdout.write_all(b"\n")?;
let diff = TextDiff::from_lines(original, replacement);
let mut max_line_number = 1;
for (_, group) in diff.grouped_ops(3).iter().enumerate() {
for op in group {
for change in diff.iter_inline_changes(op) {
let old_line = change.old_index().unwrap_or(0) + 1;
let new_line = change.new_index().unwrap_or(0) + 1;
max_line_number = cmp::max(max_line_number, old_line);
max_line_number = cmp::max(max_line_number, new_line);
}
}
}
let max_pad = max_line_number.to_string().len();
for (idx, group) in diff.grouped_ops(3).iter().enumerate() {
if idx > 0 {
writeln!(stdout, "{:-^1$}", "-", 80)?;
}
for op in group {
for change in diff.iter_inline_changes(op) {
let (sign, s) = match change.tag() {
ChangeTag::Delete => ("-", Style::new().red()),
ChangeTag::Insert => ("+", Style::new().green()),
ChangeTag::Equal => (" ", Style::new().dim()),
};
let changeset = Changeset {
max_pad,
old: change.old_index(),
new: change.new_index(),
};
write!(
stdout,
" {} |{}",
style(changeset).dim(),
s.apply_to(sign).bold()
)?;
for (emphasized, value) in change.iter_strings_lossy() {
if emphasized {
write!(stdout, "{}", s.apply_to(value).underlined().on_black())?;
} else {
write!(stdout, "{}", s.apply_to(value))?;
}
}
if change.missing_newline() {
stdout.write_all(b"\n")?;
}
}
}
}
stdout.write_all(b"\n")?;
Ok(())
}
// Write: ` Error (LINTER) prefer-using-this-over-that\n`
fn write_summary_line(stdout: &mut impl Write, lint_message: &LintMessage) -> Result<()> {
let error_style = match lint_message.severity {
LintSeverity::Error => Style::new().on_red().bold(),
LintSeverity::Warning | LintSeverity::Advice | LintSeverity::Disabled => {
Style::new().on_yellow().bold()
}
};
writeln!(
stdout,
" {} ({}) {}",
error_style.apply_to(lint_message.severity.label()),
lint_message.code,
style(&lint_message.name).underlined(),
)?;
Ok(())
}
fn bspaces(len: u8) -> &'static [u8] {
const SPACES: [u8; 255] = [b' '; 255];
&SPACES[0..len as usize]
}
/// Short 'static strs of spaces.
fn spaces(len: u8) -> &'static str {
// SAFETY: `SPACES` is valid UTF-8 since it is all spaces.
unsafe { std::str::from_utf8_unchecked(bspaces(len)) }
}
struct Changeset {
// The length of the largest line number we'll be printing.
max_pad: usize,
old: Option<usize>,
new: Option<usize>,
}
impl fmt::Display for Changeset {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// We want things to get formatted like:
// 1234 1235
// ^^ two spaces
match (self.old, self.new) {
(Some(old), Some(new)) => {
// +1 because we want to print the line number, not the vector index.
let old = old + 1;
let new = new + 1;
write!(
f,
"{:>left_pad$} {:>right_pad$}",
old,
new,
left_pad = self.max_pad,
right_pad = self.max_pad,
)
}
// In cases where old/new are missing, do an approximation:
// '1234 '
// ^^^^ length of '1234' mirrored to the other side
// ^^ two spaces still
(Some(old), None) => write!(f, "{:>width$} {:width$}", old, " ", width = self.max_pad),
(None, Some(new)) => {
let new = new + 1;
write!(f, "{:width$} {:>width$}", " ", new, width = self.max_pad)
}
(None, None) => unreachable!(),
}
}
}
struct Line(Option<usize>);
impl fmt::Display for Line {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.0 {
None => write!(f, " "),
Some(idx) => write!(f, "{:<4}", idx + 1),
}
}
}
pub fn print_error(err: &anyhow::Error) -> std::io::Result<()> {
let mut stderr = Term::stderr();
let mut chain = err.chain();
if let Some(error) = chain.next() {
write!(stderr, "{} ", style("error:").red().bold())?;
let indented = indent(&format!("{}", error), spaces(7));
writeln!(stderr, "{}", indented)?;
for cause in chain {
write!(stderr, "{} ", style("caused_by:").red().bold())?;
write!(stderr, " ")?;
let indented = indent(&format!("{}", cause), spaces(11));
writeln!(stderr, "{}", indented)?;
}
}
Ok(())
}
| 33.625337 | 100 | 0.503006 |
790b46a761f3770fc6438a3d0e7f102e591c3927 | 1,982 | use super::future::ResponseFuture;
use crate::semaphore::Semaphore;
use tower_service::Service;
use futures_core::ready;
use std::task::{Context, Poll};
/// Enforces a limit on the concurrent number of requests the underlying
/// service can handle.
#[derive(Debug, Clone)]
pub struct ConcurrencyLimit<T> {
inner: T,
semaphore: Semaphore,
}
impl<T> ConcurrencyLimit<T> {
/// Create a new concurrency limiter.
pub fn new(inner: T, max: usize) -> Self {
ConcurrencyLimit {
inner,
semaphore: Semaphore::new(max),
}
}
/// Get a reference to the inner service
pub fn get_ref(&self) -> &T {
&self.inner
}
/// Get a mutable reference to the inner service
pub fn get_mut(&mut self) -> &mut T {
&mut self.inner
}
/// Consume `self`, returning the inner service
pub fn into_inner(self) -> T {
self.inner
}
}
impl<S, Request> Service<Request> for ConcurrencyLimit<S>
where
S: Service<Request>,
{
type Response = S::Response;
type Error = S::Error;
type Future = ResponseFuture<S::Future>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
// First, poll the semaphore...
ready!(self.semaphore.poll_acquire(cx));
// ...and if it's ready, poll the inner service.
self.inner.poll_ready(cx)
}
fn call(&mut self, request: Request) -> Self::Future {
// Take the permit
let permit = self
.semaphore
.take_permit()
.expect("max requests in-flight; poll_ready must be called first");
// Call the inner service
let future = self.inner.call(request);
ResponseFuture::new(future, permit)
}
}
#[cfg(feature = "load")]
impl<S> crate::load::Load for ConcurrencyLimit<S>
where
S: crate::load::Load,
{
type Metric = S::Metric;
fn load(&self) -> Self::Metric {
self.inner.load()
}
}
| 24.775 | 85 | 0.602422 |
690b5861c2239d94b4db607c2b6f7b196e057b6b | 22,366 | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Selection over an array of receivers
//!
//! This module contains the implementation machinery necessary for selecting
//! over a number of receivers. One large goal of this module is to provide an
//! efficient interface to selecting over any receiver of any type.
//!
//! This is achieved through an architecture of a "receiver set" in which
//! receivers are added to a set and then the entire set is waited on at once.
//! The set can be waited on multiple times to prevent re-adding each receiver
//! to the set.
//!
//! Usage of this module is currently encouraged to go through the use of the
//! `select!` macro. This macro allows naturally binding of variables to the
//! received values of receivers in a much more natural syntax then usage of the
//! `Select` structure directly.
//!
//! # Example
//!
//! ```rust
//! let (tx1, rx1) = channel();
//! let (tx2, rx2) = channel();
//!
//! tx1.send(1i);
//! tx2.send(2i);
//!
//! select! {
//! val = rx1.recv() => {
//! assert_eq!(val, 1i);
//! },
//! val = rx2.recv() => {
//! assert_eq!(val, 2i);
//! }
//! }
//! ```
#![allow(dead_code)]
#![experimental = "This implementation, while likely sufficient, is unsafe and \
likely to be error prone. At some point in the future this \
module will likely be replaced, and it is currently \
unknown how much API breakage that will cause. The ability \
to select over a number of channels will remain forever, \
but no guarantees beyond this are being made"]
use core::prelude::*;
use core::cell::Cell;
use core::kinds::marker;
use core::mem;
use core::uint;
use comm::Receiver;
use comm::blocking::{mod, SignalToken};
/// The "receiver set" of the select interface. This structure is used to manage
/// a set of receivers which are being selected over.
pub struct Select {
head: *mut Handle<'static, ()>,
tail: *mut Handle<'static, ()>,
next_id: Cell<uint>,
marker1: marker::NoSend,
}
/// A handle to a receiver which is currently a member of a `Select` set of
/// receivers. This handle is used to keep the receiver in the set as well as
/// interact with the underlying receiver.
pub struct Handle<'rx, T:'rx> {
/// The ID of this handle, used to compare against the return value of
/// `Select::wait()`
id: uint,
selector: &'rx Select,
next: *mut Handle<'static, ()>,
prev: *mut Handle<'static, ()>,
added: bool,
packet: &'rx (Packet+'rx),
// due to our fun transmutes, we be sure to place this at the end. (nothing
// previous relies on T)
rx: &'rx Receiver<T>,
}
struct Packets { cur: *mut Handle<'static, ()> }
#[doc(hidden)]
#[deriving(PartialEq)]
pub enum StartResult {
Installed,
Abort,
}
#[doc(hidden)]
pub trait Packet {
fn can_recv(&self) -> bool;
fn start_selection(&self, token: SignalToken) -> StartResult;
fn abort_selection(&self) -> bool;
}
impl Select {
/// Creates a new selection structure. This set is initially empty and
/// `wait` will panic!() if called.
///
/// Usage of this struct directly can sometimes be burdensome, and usage is
/// rather much easier through the `select!` macro.
pub fn new() -> Select {
Select {
marker1: marker::NoSend,
head: 0 as *mut Handle<'static, ()>,
tail: 0 as *mut Handle<'static, ()>,
next_id: Cell::new(1),
}
}
/// Creates a new handle into this receiver set for a new receiver. Note
/// that this does *not* add the receiver to the receiver set, for that you
/// must call the `add` method on the handle itself.
pub fn handle<'a, T: Send>(&'a self, rx: &'a Receiver<T>) -> Handle<'a, T> {
let id = self.next_id.get();
self.next_id.set(id + 1);
Handle {
id: id,
selector: self,
next: 0 as *mut Handle<'static, ()>,
prev: 0 as *mut Handle<'static, ()>,
added: false,
rx: rx,
packet: rx,
}
}
/// Waits for an event on this receiver set. The returned value is *not* an
/// index, but rather an id. This id can be queried against any active
/// `Handle` structures (each one has an `id` method). The handle with
/// the matching `id` will have some sort of event available on it. The
/// event could either be that data is available or the corresponding
/// channel has been closed.
pub fn wait(&self) -> uint {
self.wait2(true)
}
/// Helper method for skipping the preflight checks during testing
fn wait2(&self, do_preflight_checks: bool) -> uint {
// Note that this is currently an inefficient implementation. We in
// theory have knowledge about all receivers in the set ahead of time,
// so this method shouldn't really have to iterate over all of them yet
// again. The idea with this "receiver set" interface is to get the
// interface right this time around, and later this implementation can
// be optimized.
//
// This implementation can be summarized by:
//
// fn select(receivers) {
// if any receiver ready { return ready index }
// deschedule {
// block on all receivers
// }
// unblock on all receivers
// return ready index
// }
//
// Most notably, the iterations over all of the receivers shouldn't be
// necessary.
unsafe {
// Stage 1: preflight checks. Look for any packets ready to receive
if do_preflight_checks {
for handle in self.iter() {
if (*handle).packet.can_recv() {
return (*handle).id();
}
}
}
// Stage 2: begin the blocking process
//
// Create a number of signal tokens, and install each one
// sequentially until one fails. If one fails, then abort the
// selection on the already-installed tokens.
let (wait_token, signal_token) = blocking::tokens();
for (i, handle) in self.iter().enumerate() {
match (*handle).packet.start_selection(signal_token.clone()) {
StartResult::Installed => {}
StartResult::Abort => {
// Go back and abort the already-begun selections
for handle in self.iter().take(i) {
(*handle).packet.abort_selection();
}
return (*handle).id;
}
}
}
// Stage 3: no messages available, actually block
wait_token.wait();
// Stage 4: there *must* be message available; find it.
//
// Abort the selection process on each receiver. If the abort
// process returns `true`, then that means that the receiver is
// ready to receive some data. Note that this also means that the
// receiver may have yet to have fully read the `to_wake` field and
// woken us up (although the wakeup is guaranteed to fail).
//
// This situation happens in the window of where a sender invokes
// increment(), sees -1, and then decides to wake up the task. After
// all this is done, the sending thread will set `selecting` to
// `false`. Until this is done, we cannot return. If we were to
// return, then a sender could wake up a receiver which has gone
// back to sleep after this call to `select`.
//
// Note that it is a "fairly small window" in which an increment()
// views that it should wake a thread up until the `selecting` bit
// is set to false. For now, the implementation currently just spins
// in a yield loop. This is very distasteful, but this
// implementation is already nowhere near what it should ideally be.
// A rewrite should focus on avoiding a yield loop, and for now this
// implementation is tying us over to a more efficient "don't
// iterate over everything every time" implementation.
let mut ready_id = uint::MAX;
for handle in self.iter() {
if (*handle).packet.abort_selection() {
ready_id = (*handle).id;
}
}
// We must have found a ready receiver
assert!(ready_id != uint::MAX);
return ready_id;
}
}
fn iter(&self) -> Packets { Packets { cur: self.head } }
}
impl<'rx, T: Send> Handle<'rx, T> {
/// Retrieve the id of this handle.
#[inline]
pub fn id(&self) -> uint { self.id }
/// Receive a value on the underlying receiver. Has the same semantics as
/// `Receiver.recv`
pub fn recv(&mut self) -> T { self.rx.recv() }
/// Block to receive a value on the underlying receiver, returning `Some` on
/// success or `None` if the channel disconnects. This function has the same
/// semantics as `Receiver.recv_opt`
pub fn recv_opt(&mut self) -> Result<T, ()> { self.rx.recv_opt() }
/// Adds this handle to the receiver set that the handle was created from. This
/// method can be called multiple times, but it has no effect if `add` was
/// called previously.
///
/// This method is unsafe because it requires that the `Handle` is not moved
/// while it is added to the `Select` set.
pub unsafe fn add(&mut self) {
if self.added { return }
let selector: &mut Select = mem::transmute(&*self.selector);
let me: *mut Handle<'static, ()> = mem::transmute(&*self);
if selector.head.is_null() {
selector.head = me;
selector.tail = me;
} else {
(*me).prev = selector.tail;
assert!((*me).next.is_null());
(*selector.tail).next = me;
selector.tail = me;
}
self.added = true;
}
/// Removes this handle from the `Select` set. This method is unsafe because
/// it has no guarantee that the `Handle` was not moved since `add` was
/// called.
pub unsafe fn remove(&mut self) {
if !self.added { return }
let selector: &mut Select = mem::transmute(&*self.selector);
let me: *mut Handle<'static, ()> = mem::transmute(&*self);
if self.prev.is_null() {
assert_eq!(selector.head, me);
selector.head = self.next;
} else {
(*self.prev).next = self.next;
}
if self.next.is_null() {
assert_eq!(selector.tail, me);
selector.tail = self.prev;
} else {
(*self.next).prev = self.prev;
}
self.next = 0 as *mut Handle<'static, ()>;
self.prev = 0 as *mut Handle<'static, ()>;
self.added = false;
}
}
#[unsafe_destructor]
impl Drop for Select {
fn drop(&mut self) {
assert!(self.head.is_null());
assert!(self.tail.is_null());
}
}
#[unsafe_destructor]
impl<'rx, T: Send> Drop for Handle<'rx, T> {
fn drop(&mut self) {
unsafe { self.remove() }
}
}
impl Iterator<*mut Handle<'static, ()>> for Packets {
fn next(&mut self) -> Option<*mut Handle<'static, ()>> {
if self.cur.is_null() {
None
} else {
let ret = Some(self.cur);
unsafe { self.cur = (*self.cur).next; }
ret
}
}
}
#[cfg(test)]
#[allow(unused_imports)]
mod test {
use prelude::*;
use super::*;
// Don't use the libstd version so we can pull in the right Select structure
// (std::comm points at the wrong one)
macro_rules! select {
(
$($name:pat = $rx:ident.$meth:ident() => $code:expr),+
) => ({
use comm::Select;
let sel = Select::new();
$( let mut $rx = sel.handle(&$rx); )+
unsafe {
$( $rx.add(); )+
}
let ret = sel.wait();
$( if ret == $rx.id() { let $name = $rx.$meth(); $code } else )+
{ unreachable!() }
})
}
test! { fn smoke() {
let (tx1, rx1) = channel::<int>();
let (tx2, rx2) = channel::<int>();
tx1.send(1);
select! {
foo = rx1.recv() => { assert_eq!(foo, 1); },
_bar = rx2.recv() => { panic!() }
}
tx2.send(2);
select! {
_foo = rx1.recv() => { panic!() },
bar = rx2.recv() => { assert_eq!(bar, 2) }
}
drop(tx1);
select! {
foo = rx1.recv_opt() => { assert_eq!(foo, Err(())); },
_bar = rx2.recv() => { panic!() }
}
drop(tx2);
select! {
bar = rx2.recv_opt() => { assert_eq!(bar, Err(())); }
}
} }
test! { fn smoke2() {
let (_tx1, rx1) = channel::<int>();
let (_tx2, rx2) = channel::<int>();
let (_tx3, rx3) = channel::<int>();
let (_tx4, rx4) = channel::<int>();
let (tx5, rx5) = channel::<int>();
tx5.send(4);
select! {
_foo = rx1.recv() => { panic!("1") },
_foo = rx2.recv() => { panic!("2") },
_foo = rx3.recv() => { panic!("3") },
_foo = rx4.recv() => { panic!("4") },
foo = rx5.recv() => { assert_eq!(foo, 4); }
}
} }
test! { fn closed() {
let (_tx1, rx1) = channel::<int>();
let (tx2, rx2) = channel::<int>();
drop(tx2);
select! {
_a1 = rx1.recv_opt() => { panic!() },
a2 = rx2.recv_opt() => { assert_eq!(a2, Err(())); }
}
} }
test! { fn unblocks() {
let (tx1, rx1) = channel::<int>();
let (_tx2, rx2) = channel::<int>();
let (tx3, rx3) = channel::<int>();
spawn(move|| {
for _ in range(0u, 20) { Thread::yield_now(); }
tx1.send(1);
rx3.recv();
for _ in range(0u, 20) { Thread::yield_now(); }
});
select! {
a = rx1.recv() => { assert_eq!(a, 1); },
_b = rx2.recv() => { panic!() }
}
tx3.send(1);
select! {
a = rx1.recv_opt() => { assert_eq!(a, Err(())); },
_b = rx2.recv() => { panic!() }
}
} }
test! { fn both_ready() {
let (tx1, rx1) = channel::<int>();
let (tx2, rx2) = channel::<int>();
let (tx3, rx3) = channel::<()>();
spawn(move|| {
for _ in range(0u, 20) { Thread::yield_now(); }
tx1.send(1);
tx2.send(2);
rx3.recv();
});
select! {
a = rx1.recv() => { assert_eq!(a, 1); },
a = rx2.recv() => { assert_eq!(a, 2); }
}
select! {
a = rx1.recv() => { assert_eq!(a, 1); },
a = rx2.recv() => { assert_eq!(a, 2); }
}
assert_eq!(rx1.try_recv(), Err(Empty));
assert_eq!(rx2.try_recv(), Err(Empty));
tx3.send(());
} }
test! { fn stress() {
static AMT: int = 10000;
let (tx1, rx1) = channel::<int>();
let (tx2, rx2) = channel::<int>();
let (tx3, rx3) = channel::<()>();
spawn(move|| {
for i in range(0, AMT) {
if i % 2 == 0 {
tx1.send(i);
} else {
tx2.send(i);
}
rx3.recv();
}
});
for i in range(0, AMT) {
select! {
i1 = rx1.recv() => { assert!(i % 2 == 0 && i == i1); },
i2 = rx2.recv() => { assert!(i % 2 == 1 && i == i2); }
}
tx3.send(());
}
} }
test! { fn cloning() {
let (tx1, rx1) = channel::<int>();
let (_tx2, rx2) = channel::<int>();
let (tx3, rx3) = channel::<()>();
spawn(move|| {
rx3.recv();
tx1.clone();
assert_eq!(rx3.try_recv(), Err(Empty));
tx1.send(2);
rx3.recv();
});
tx3.send(());
select! {
_i1 = rx1.recv() => {},
_i2 = rx2.recv() => panic!()
}
tx3.send(());
} }
test! { fn cloning2() {
let (tx1, rx1) = channel::<int>();
let (_tx2, rx2) = channel::<int>();
let (tx3, rx3) = channel::<()>();
spawn(move|| {
rx3.recv();
tx1.clone();
assert_eq!(rx3.try_recv(), Err(Empty));
tx1.send(2);
rx3.recv();
});
tx3.send(());
select! {
_i1 = rx1.recv() => {},
_i2 = rx2.recv() => panic!()
}
tx3.send(());
} }
test! { fn cloning3() {
let (tx1, rx1) = channel::<()>();
let (tx2, rx2) = channel::<()>();
let (tx3, rx3) = channel::<()>();
spawn(move|| {
let s = Select::new();
let mut h1 = s.handle(&rx1);
let mut h2 = s.handle(&rx2);
unsafe { h2.add(); }
unsafe { h1.add(); }
assert_eq!(s.wait(), h2.id);
tx3.send(());
});
for _ in range(0u, 1000) { Thread::yield_now(); }
drop(tx1.clone());
tx2.send(());
rx3.recv();
} }
test! { fn preflight1() {
let (tx, rx) = channel();
tx.send(());
select! {
() = rx.recv() => {}
}
} }
test! { fn preflight2() {
let (tx, rx) = channel();
tx.send(());
tx.send(());
select! {
() = rx.recv() => {}
}
} }
test! { fn preflight3() {
let (tx, rx) = channel();
drop(tx.clone());
tx.send(());
select! {
() = rx.recv() => {}
}
} }
test! { fn preflight4() {
let (tx, rx) = channel();
tx.send(());
let s = Select::new();
let mut h = s.handle(&rx);
unsafe { h.add(); }
assert_eq!(s.wait2(false), h.id);
} }
test! { fn preflight5() {
let (tx, rx) = channel();
tx.send(());
tx.send(());
let s = Select::new();
let mut h = s.handle(&rx);
unsafe { h.add(); }
assert_eq!(s.wait2(false), h.id);
} }
test! { fn preflight6() {
let (tx, rx) = channel();
drop(tx.clone());
tx.send(());
let s = Select::new();
let mut h = s.handle(&rx);
unsafe { h.add(); }
assert_eq!(s.wait2(false), h.id);
} }
test! { fn preflight7() {
let (tx, rx) = channel::<()>();
drop(tx);
let s = Select::new();
let mut h = s.handle(&rx);
unsafe { h.add(); }
assert_eq!(s.wait2(false), h.id);
} }
test! { fn preflight8() {
let (tx, rx) = channel();
tx.send(());
drop(tx);
rx.recv();
let s = Select::new();
let mut h = s.handle(&rx);
unsafe { h.add(); }
assert_eq!(s.wait2(false), h.id);
} }
test! { fn preflight9() {
let (tx, rx) = channel();
drop(tx.clone());
tx.send(());
drop(tx);
rx.recv();
let s = Select::new();
let mut h = s.handle(&rx);
unsafe { h.add(); }
assert_eq!(s.wait2(false), h.id);
} }
test! { fn oneshot_data_waiting() {
let (tx1, rx1) = channel();
let (tx2, rx2) = channel();
spawn(move|| {
select! {
() = rx1.recv() => {}
}
tx2.send(());
});
for _ in range(0u, 100) { Thread::yield_now() }
tx1.send(());
rx2.recv();
} }
test! { fn stream_data_waiting() {
let (tx1, rx1) = channel();
let (tx2, rx2) = channel();
tx1.send(());
tx1.send(());
rx1.recv();
rx1.recv();
spawn(move|| {
select! {
() = rx1.recv() => {}
}
tx2.send(());
});
for _ in range(0u, 100) { Thread::yield_now() }
tx1.send(());
rx2.recv();
} }
test! { fn shared_data_waiting() {
let (tx1, rx1) = channel();
let (tx2, rx2) = channel();
drop(tx1.clone());
tx1.send(());
rx1.recv();
spawn(move|| {
select! {
() = rx1.recv() => {}
}
tx2.send(());
});
for _ in range(0u, 100) { Thread::yield_now() }
tx1.send(());
rx2.recv();
} }
test! { fn sync1() {
let (tx, rx) = sync_channel::<int>(1);
tx.send(1);
select! {
n = rx.recv() => { assert_eq!(n, 1); }
}
} }
test! { fn sync2() {
let (tx, rx) = sync_channel::<int>(0);
spawn(move|| {
for _ in range(0u, 100) { Thread::yield_now() }
tx.send(1);
});
select! {
n = rx.recv() => { assert_eq!(n, 1); }
}
} }
test! { fn sync3() {
let (tx1, rx1) = sync_channel::<int>(0);
let (tx2, rx2): (Sender<int>, Receiver<int>) = channel();
spawn(move|| { tx1.send(1); });
spawn(move|| { tx2.send(2); });
select! {
n = rx1.recv() => {
assert_eq!(n, 1);
assert_eq!(rx2.recv(), 2);
},
n = rx2.recv() => {
assert_eq!(n, 2);
assert_eq!(rx1.recv(), 1);
}
}
} }
}
| 30.977839 | 83 | 0.486363 |
5d1b2c0d571e7c18a7592ed23178b28fa0e878ab | 3,068 | // WARNING: This file was autogenerated by jni-bindgen. Any changes to this file may be lost!!!
#[cfg(any(feature = "all", feature = "javax-security-auth-AuthPermission"))]
__jni_bindgen! {
/// public final class [AuthPermission](https://developer.android.com/reference/javax/security/auth/AuthPermission.html)
///
/// Required feature: javax-security-auth-AuthPermission
public final class AuthPermission ("javax/security/auth/AuthPermission") extends crate::java::security::BasicPermission {
/// [AuthPermission](https://developer.android.com/reference/javax/security/auth/AuthPermission.html#AuthPermission(java.lang.String))
///
/// Required features: "java-lang-String"
#[cfg(any(feature = "all", all(feature = "java-lang-String")))]
pub fn new_String<'env>(__jni_env: &'env __jni_bindgen::Env, arg0: impl __jni_bindgen::std::convert::Into<__jni_bindgen::std::option::Option<&'env crate::java::lang::String>>) -> __jni_bindgen::std::result::Result<__jni_bindgen::Local<'env, crate::javax::security::auth::AuthPermission>, __jni_bindgen::Local<'env, crate::java::lang::Throwable>> {
// class.path == "javax/security/auth/AuthPermission", java.flags == PUBLIC, .name == "<init>", .descriptor == "(Ljava/lang/String;)V"
unsafe {
let __jni_args = [__jni_bindgen::AsJValue::as_jvalue(&arg0.into())];
let (__jni_class, __jni_method) = __jni_env.require_class_method("javax/security/auth/AuthPermission\0", "<init>\0", "(Ljava/lang/String;)V\0");
__jni_env.new_object_a(__jni_class, __jni_method, __jni_args.as_ptr())
}
}
/// [AuthPermission](https://developer.android.com/reference/javax/security/auth/AuthPermission.html#AuthPermission(java.lang.String,%20java.lang.String))
///
/// Required features: "java-lang-String"
#[cfg(any(feature = "all", all(feature = "java-lang-String")))]
pub fn new_String_String<'env>(__jni_env: &'env __jni_bindgen::Env, arg0: impl __jni_bindgen::std::convert::Into<__jni_bindgen::std::option::Option<&'env crate::java::lang::String>>, arg1: impl __jni_bindgen::std::convert::Into<__jni_bindgen::std::option::Option<&'env crate::java::lang::String>>) -> __jni_bindgen::std::result::Result<__jni_bindgen::Local<'env, crate::javax::security::auth::AuthPermission>, __jni_bindgen::Local<'env, crate::java::lang::Throwable>> {
// class.path == "javax/security/auth/AuthPermission", java.flags == PUBLIC, .name == "<init>", .descriptor == "(Ljava/lang/String;Ljava/lang/String;)V"
unsafe {
let __jni_args = [__jni_bindgen::AsJValue::as_jvalue(&arg0.into()), __jni_bindgen::AsJValue::as_jvalue(&arg1.into())];
let (__jni_class, __jni_method) = __jni_env.require_class_method("javax/security/auth/AuthPermission\0", "<init>\0", "(Ljava/lang/String;Ljava/lang/String;)V\0");
__jni_env.new_object_a(__jni_class, __jni_method, __jni_args.as_ptr())
}
}
}
}
| 80.736842 | 477 | 0.673729 |
5b32d1e6c1940d90f8cbee20a89569b9377e63cb | 16,552 | #![allow(clippy::not_unsafe_ptr_arg_deref)]
mod hnsw;
mod types;
#[macro_use]
extern crate redis_module;
#[macro_use]
extern crate redismodule_cmd;
#[macro_use]
extern crate lazy_static;
extern crate num;
extern crate ordered_float;
extern crate owning_ref;
use hnsw::{Index, Node};
use redis_module::{Context, RedisError, RedisResult, RedisValue};
use redismodule_cmd::{rediscmd_doc, ArgType, Collection, Command};
use std::collections::hash_map::Entry;
use std::collections::{HashMap, HashSet};
use std::sync::{Arc, RwLock};
use types::*;
static PREFIX: &str = "hnsw";
type IndexArc = Arc<RwLock<IndexT>>;
type IndexT = Index<f32, f32>;
lazy_static! {
static ref INDICES: Arc<RwLock<HashMap<String, IndexArc>>> =
Arc::new(RwLock::new(HashMap::new()));
}
thread_local! {
#[rediscmd_doc(clean)]
static NEW_INDEX_CMD: Command = command!{
name: "hnsw.new",
desc: "Create a new HNSW index.",
args: [
["name", "Name of the index.", ArgType::Arg, String, Collection::Unit, None],
["dim", "Dimensionality of the data.", ArgType::Kwarg, u64, Collection::Unit, None],
[
"m",
"Parameter for the number of neighbors to select for each node.",
ArgType::Kwarg, u64, Collection::Unit, Some(Box::new(5_u64))
],
[
"efcon",
"Parameter for the size of the dynamic candidate list.",
ArgType::Kwarg, u64, Collection::Unit, Some(Box::new(200_u64))
],
],
};
#[rediscmd_doc]
static GET_INDEX_CMD: Command = command!{
name: "hnsw.get",
desc: "Retrieve an HNSW index.",
args: [
["name", "Name of the index.", ArgType::Arg, String, Collection::Unit, None],
],
};
#[rediscmd_doc]
static DEL_INDEX_CMD: Command = command!{
name: "hnsw.del",
desc: "Delete an HNSW index.",
args: [
["name", "Name of the index.", ArgType::Arg, String, Collection::Unit, None],
],
};
#[rediscmd_doc]
static ADD_NODE_CMD: Command = command!{
name: "hnsw.node.add",
desc: "Add a node to the index.",
args: [
["index", "name of the index", ArgType::Arg, String, Collection::Unit, None],
["node", "name of the node", ArgType::Arg, String, Collection::Unit, None],
[
"data",
"Dimensionality followed by a space separated vector of data. Total entries must match `DIM` of index",
ArgType::Kwarg, f64, Collection::Vec, None
],
],
};
#[rediscmd_doc]
static GET_NODE_CMD: Command = command!{
name: "hnsw.node.get",
desc: "Retrieve a node from the index.",
args: [
["index", "name of the index", ArgType::Arg, String, Collection::Unit, None],
["node", "name of the node", ArgType::Arg, String, Collection::Unit, None],
],
};
#[rediscmd_doc]
static DEL_NODE_CMD: Command = command!{
name: "hnsw.node.del",
desc: "Delete a node from the index.",
args: [
["index", "name of the index", ArgType::Arg, String, Collection::Unit, None],
["node", "name of the node", ArgType::Arg, String, Collection::Unit, None],
],
};
#[rediscmd_doc]
static SEARCH_CMD: Command = command!{
name: "hnsw.search",
desc: "Search the index for the K nearest elements to the query.",
args: [
["index", "name of the index", ArgType::Arg, String, Collection::Unit, None],
[
"k",
"number of nearest neighbors to return",
ArgType::Kwarg, u64, Collection::Unit, Some(Box::new(5_u64))
],
[
"query",
"Dimensionality followed by a space separated vector of data. Total entries must match `DIM` of index",
ArgType::Kwarg, f64, Collection::Vec, None
],
],
};
}
fn new_index(ctx: &Context, args: Vec<String>) -> RedisResult {
ctx.auto_memory();
let mut parsed = NEW_INDEX_CMD.with(|cmd| cmd.parse_args(args))?;
let name_suffix = parsed.remove("name").unwrap().as_string()?;
let index_name = format!("{}.{}", PREFIX, name_suffix);
let data_dim = parsed.remove("dim").unwrap().as_u64()? as usize;
let m = parsed.remove("m").unwrap().as_u64()? as usize;
let ef_construction = parsed.remove("efcon").unwrap().as_u64()? as usize;
// write to redis
let key = ctx.open_key_writable(&index_name);
match key.get_value::<IndexRedis>(&HNSW_INDEX_REDIS_TYPE)? {
Some(_) => {
return Err(RedisError::String(format!(
"Index: {} already exists",
&index_name
)));
}
None => {
// create index
let index = Index::new(
&index_name,
Box::new(hnsw::metrics::euclidean),
data_dim,
m,
ef_construction,
);
ctx.log_debug(format!("{:?}", index).as_str());
key.set_value::<IndexRedis>(&HNSW_INDEX_REDIS_TYPE, index.clone().into())?;
// Add index to global hashmap
INDICES
.write()
.unwrap()
.insert(index_name, Arc::new(RwLock::new(index)));
}
}
Ok("OK".into())
}
fn get_index(ctx: &Context, args: Vec<String>) -> RedisResult {
ctx.auto_memory();
let mut parsed = GET_INDEX_CMD.with(|cmd| cmd.parse_args(args))?;
let name_suffix = parsed.remove("name").unwrap().as_string()?;
let index_name = format!("{}.{}", PREFIX, name_suffix);
let index = load_index(ctx, &index_name)?;
let index = index.try_read().map_err(|e| e.to_string())?;
ctx.log_debug(format!("Index: {:?}", index).as_str());
ctx.log_debug(format!("Layers: {:?}", index.layers.len()).as_str());
ctx.log_debug(format!("Nodes: {:?}", index.nodes.len()).as_str());
let index_redis: IndexRedis = index.clone().into();
Ok(index_redis.into())
}
fn delete_index(ctx: &Context, args: Vec<String>) -> RedisResult {
ctx.auto_memory();
let mut parsed = DEL_INDEX_CMD.with(|cmd| cmd.parse_args(args))?;
let name_suffix = parsed.remove("name").unwrap().as_string()?;
let index_name = format!("{}.{}", PREFIX, name_suffix);
// get index from global hashmap
load_index(ctx, &index_name)?;
let mut indices = INDICES.write().unwrap();
let index = indices
.remove(&index_name)
.ok_or_else(|| format!("Index: {} does not exist", name_suffix))?;
let index = index.try_read().map_err(|e| e.to_string())?;
for (node_name, _) in index.nodes.iter() {
delete_node_redis(ctx, &node_name)?;
}
// get index from redis
ctx.log_debug(format!("deleting index: {}", &index_name).as_str());
let rkey = ctx.open_key_writable(&index_name);
match rkey.get_value::<IndexRedis>(&HNSW_INDEX_REDIS_TYPE)? {
Some(_) => rkey.delete()?,
None => {
return Err(RedisError::String(format!(
"Index: {} does not exist",
name_suffix
)));
}
};
Ok(1_usize.into())
}
fn load_index(ctx: &Context, index_name: &str) -> Result<IndexArc, RedisError> {
let mut indices = INDICES.write().unwrap();
// check if index is in global hashmap
let index = match indices.entry(index_name.to_string()) {
Entry::Occupied(o) => o.into_mut(),
// if index isn't present, load it from redis
Entry::Vacant(v) => {
// get index from redis
ctx.log_debug(format!("get key: {}", &index_name).as_str());
let rkey = ctx.open_key(&index_name);
let index_redis = rkey
.get_value::<IndexRedis>(&HNSW_INDEX_REDIS_TYPE)?
.ok_or_else(|| format!("Index: {} does not exist", index_name))?;
let index = make_index(ctx, index_redis)?;
v.insert(Arc::new(RwLock::new(index)))
}
};
Ok(index.clone())
}
fn make_index(ctx: &Context, ir: &IndexRedis) -> Result<IndexT, RedisError> {
let mut index: IndexT = ir.clone().into();
index.nodes = HashMap::with_capacity(ir.node_count);
for node_name in &ir.nodes {
let key = ctx.open_key(&node_name);
let nr = key
.get_value::<NodeRedis>(&HNSW_NODE_REDIS_TYPE)?
.ok_or_else(|| format!("Node: {} does not exist", node_name))?;
let node = Node::new(node_name, &nr.data, index.m_max_0);
index.nodes.insert(node_name.to_owned(), node);
}
// reconstruct nodes
for node_name in &ir.nodes {
let target = index.nodes.get(node_name).unwrap();
let key = ctx.open_key(&node_name);
let nr = key
.get_value::<NodeRedis>(&HNSW_NODE_REDIS_TYPE)?
.ok_or_else(|| format!("Node: {} does not exist", node_name))?;
for layer in &nr.neighbors {
let mut node_layer = Vec::with_capacity(layer.len());
for neighbor in layer {
let nn = index
.nodes
.get(neighbor)
.ok_or_else(|| format!("Node: {} does not exist", neighbor))?;
node_layer.push(nn.downgrade());
}
target.write().neighbors.push(node_layer);
}
}
// reconstruct layers
for layer in &ir.layers {
let mut node_layer = HashSet::with_capacity(layer.len());
for node_name in layer {
let node = index
.nodes
.get(node_name)
.ok_or_else(|| format!("Node: {} does not exist", node_name))?;
node_layer.insert(node.downgrade());
}
index.layers.push(node_layer);
}
// set enterpoint
index.enterpoint = match &ir.enterpoint {
Some(node_name) => {
let node = index
.nodes
.get(node_name)
.ok_or_else(|| format!("Node: {} does not exist", node_name))?;
Some(node.downgrade())
}
None => None,
};
Ok(index)
}
fn update_index(ctx: &Context, index_name: &str, index: &IndexT) -> Result<(), RedisError> {
let key = ctx.open_key_writable(index_name);
match key.get_value::<IndexRedis>(&HNSW_INDEX_REDIS_TYPE)? {
Some(_) => {
ctx.log_debug(format!("update index: {}", index_name).as_str());
key.set_value::<IndexRedis>(&HNSW_INDEX_REDIS_TYPE, index.clone().into())?;
}
None => {
return Err(RedisError::String(format!(
"Index: {} does not exist",
index_name
)));
}
}
Ok(())
}
fn add_node(ctx: &Context, args: Vec<String>) -> RedisResult {
ctx.auto_memory();
let mut parsed = ADD_NODE_CMD.with(|cmd| cmd.parse_args(args))?;
let index_suffix = parsed.remove("index").unwrap().as_string()?;
let node_suffix = parsed.remove("node").unwrap().as_string()?;
let index_name = format!("{}.{}", PREFIX, index_suffix);
let node_name = format!("{}.{}.{}", PREFIX, index_suffix, node_suffix);
let dataf64 = parsed.remove("data").unwrap().as_f64vec()?;
let data = dataf64.iter().map(|d| *d as f32).collect::<Vec<f32>>();
let index = load_index(ctx, &index_name)?;
let mut index = index.try_write().map_err(|e| e.to_string())?;
let up = |name: String, node: Node<f32>| {
write_node(ctx, &name, (&node).into()).unwrap();
};
ctx.log_debug(format!("Adding node: {} to Index: {}", &node_name, &index_name).as_str());
index
.add_node(&node_name, &data, up)
.map_err(|e| e.error_string())?;
// write node to redis
let node = index.nodes.get(&node_name).unwrap();
write_node(ctx, &node_name, node.into())?;
// update index in redis
update_index(ctx, &index_name, &index)?;
Ok("OK".into())
}
fn delete_node(ctx: &Context, args: Vec<String>) -> RedisResult {
ctx.auto_memory();
let mut parsed = DEL_NODE_CMD.with(|cmd| cmd.parse_args(args))?;
let index_suffix = parsed.remove("index").unwrap().as_string()?;
let node_suffix = parsed.remove("node").unwrap().as_string()?;
let index_name = format!("{}.{}", PREFIX, index_suffix);
let node_name = format!("{}.{}.{}", PREFIX, index_suffix, node_suffix);
let index = load_index(ctx, &index_name)?;
let mut index = index.try_write().map_err(|e| e.to_string())?;
let node = index.nodes.get(&node_name).unwrap();
if Arc::strong_count(&node.0) > 1 {
return Err(format!(
"{} is being accessed, unable to delete. Try again later",
&node_name
)
.into());
}
let up = |name: String, node: Node<f32>| {
write_node(ctx, &name, (&node).into()).unwrap();
};
index
.delete_node(&node_name, up)
.map_err(|e| e.error_string())?;
delete_node_redis(ctx, &node_name)?;
// update index in redis
update_index(ctx, &index_name, &index)?;
Ok(1_usize.into())
}
fn delete_node_redis(ctx: &Context, node_name: &str) -> Result<(), RedisError> {
ctx.log_debug(format!("del key: {}", node_name).as_str());
let rkey = ctx.open_key_writable(node_name);
match rkey.get_value::<NodeRedis>(&HNSW_NODE_REDIS_TYPE)? {
Some(_) => rkey.delete()?,
None => {
return Err(RedisError::String(format!(
"Node: {} does not exist",
node_name
)));
}
};
Ok(())
}
fn get_node(ctx: &Context, args: Vec<String>) -> RedisResult {
ctx.auto_memory();
let mut parsed = GET_NODE_CMD.with(|cmd| cmd.parse_args(args))?;
let index_suffix = parsed.remove("index").unwrap().as_string()?;
let node_suffix = parsed.remove("node").unwrap().as_string()?;
let node_name = format!("{}.{}.{}", PREFIX, index_suffix, node_suffix);
ctx.log_debug(format!("get key: {}", node_name).as_str());
let key = ctx.open_key(&node_name);
let value = key
.get_value::<NodeRedis>(&HNSW_NODE_REDIS_TYPE)?
.ok_or_else(|| format!("Node: {} does not exist", &node_name))?;
Ok(value.into())
}
fn write_node<'a>(ctx: &'a Context, key: &str, node: NodeRedis) -> RedisResult {
ctx.log_debug(format!("set key: {}", key).as_str());
let rkey = ctx.open_key_writable(key);
match rkey.get_value::<NodeRedis>(&HNSW_NODE_REDIS_TYPE)? {
Some(value) => {
value.data = node.data;
value.neighbors = node.neighbors;
}
None => {
rkey.set_value(&HNSW_NODE_REDIS_TYPE, node)?;
}
}
Ok(key.into())
}
fn search_knn(ctx: &Context, args: Vec<String>) -> RedisResult {
ctx.auto_memory();
let mut parsed = SEARCH_CMD.with(|cmd| cmd.parse_args(args))?;
let index_suffix = parsed.remove("index").unwrap().as_string()?;
let k = parsed.remove("k").unwrap().as_u64()? as usize;
let dataf64 = parsed.remove("query").unwrap().as_f64vec()?;
let data = dataf64.iter().map(|d| *d as f32).collect::<Vec<f32>>();
let index_name = format!("{}.{}", PREFIX, index_suffix);
let index = load_index(ctx, &index_name)?;
let index = index.try_read().map_err(|e| e.to_string())?;
ctx.log_debug(
format!(
"Searching for {} nearest nodes in Index: {}",
k, &index_name
)
.as_str(),
);
match index.search_knn(&data, k) {
Ok(res) => {
let mut reply: Vec<RedisValue> = Vec::new();
reply.push(res.len().into());
for r in &res {
let sr: SearchResultRedis = r.into();
reply.push(sr.into());
}
Ok(reply.into())
}
Err(e) => Err(e.error_string().into()),
}
}
redis_module! {
name: "hnsw",
version: 1,
data_types: [
HNSW_INDEX_REDIS_TYPE,
HNSW_NODE_REDIS_TYPE,
],
commands: [
["hnsw.new", new_index, "write", 0, 0, 0],
["hnsw.get", get_index, "readonly", 0, 0, 0],
["hnsw.del", delete_index, "write", 0, 0, 0],
["hnsw.search", search_knn, "readonly", 0, 0, 0],
["hnsw.node.add", add_node, "write", 0, 0, 0],
["hnsw.node.get", get_node, "readonly", 0, 0, 0],
["hnsw.node.del", delete_node, "write", 0, 0, 0],
],
}
| 32.139806 | 119 | 0.563376 |
eddd0e51b317dd366afa6c7c85cbbdf7d608268b | 2,213 | //! Wrapper around integer types, used as indices within `IntervalMap` and `IntervalSet`.
use core::fmt::Display;
/// Trait for index types: used in the inner representation of [IntervalMap](../struct.IntervalMap.html) and
/// [IntervalSet](../set/struct.IntervalSet.html).
///
/// Implemented for `u8`, `u16`, `u32` and `u64`,
/// `u32` is used by default ([DefaultIx](type.DefaultIx.html)).
///
/// `IntervalMap` or `IntervalSet` can store up to `Ix::MAX - 1` elements
/// (for example `IntervalMap<_, _, u8>` can store up to 255 items).
///
/// Using smaller index types saves memory and slightly reduces running time.
pub trait IndexType: Copy + Display + Sized + Eq + Ord {
/// Undefined index. There can be no indices higher than MAX.
const MAX: Self;
/// Converts index into `usize`.
fn get(self) -> usize;
/// Creates a new index. Returns error if the `elemen_num` is too big.
fn new(element_num: usize) -> Result<Self, &'static str>;
/// Returns `true` if the index is defined.
#[inline(always)]
fn defined(self) -> bool {
self != Self::MAX
}
}
macro_rules! index_error {
(u64) => {
"Failed to insert a new element into IntervalMap/Set: number of elements is too large for u64."
};
($name:ident) => {
concat!(
"Failed to insert a new element into IntervalMap/Set: number of elements is too large for ",
stringify!($name),
", try using u64.")
};
}
macro_rules! impl_index {
($type:ident) => {
impl IndexType for $type {
const MAX: Self = core::$type::MAX;
#[inline(always)]
fn get(self) -> usize {
self as usize
}
#[inline]
fn new(element_num: usize) -> Result<Self, &'static str> {
let element_num = element_num as $type;
if element_num == core::$type::MAX {
Err(index_error!($type))
} else {
Ok(element_num as $type)
}
}
}
};
}
impl_index!(u8);
impl_index!(u16);
impl_index!(u32);
impl_index!(u64);
/// Default index type.
pub type DefaultIx = u32;
| 30.315068 | 108 | 0.576141 |
fc727c53c83ddc144dc8f34c67cfad8378a43352 | 7,827 | //! Futures and other types that allow asynchronous interaction with channels.
use std::{
future::Future,
pin::Pin,
task::{Context, Poll, Waker},
any::Any,
};
use crate::*;
use futures::{Stream, stream::FusedStream, future::FusedFuture, Sink};
struct AsyncSignal(Waker, AtomicBool);
impl Signal for AsyncSignal {
fn as_any(&self) -> &(dyn Any + 'static) { self }
fn fire(&self) {
self.1.store(true, Ordering::SeqCst);
self.0.wake_by_ref()
}
}
// TODO: Wtf happens with timeout races? Futures can still receive items when not being actively polled...
// Is this okay? I guess it must be? How do other async channel crates handle it?
impl<T: Unpin> Sender<T> {
/// Asynchronously send a value into the channel, returning an error if the channel receiver has
/// been dropped. If the channel is bounded and is full, this method will yield to the async runtime.
pub fn send_async(&self, item: T) -> SendFuture<T> {
SendFuture {
shared: &self.shared,
hook: Some(Err(item)),
}
}
/// Use this channel as an asynchronous item sink.
pub fn sink(&self) -> SendSink<T> {
SendSink(SendFuture {
shared: &self.shared,
hook: None,
})
}
}
/// A future that sends a value into a channel.
pub struct SendFuture<'a, T: Unpin> {
shared: &'a Shared<T>,
// Only none after dropping
hook: Option<Result<Arc<Hook<T, AsyncSignal>>, T>>,
}
impl<'a, T: Unpin> Drop for SendFuture<'a, T> {
fn drop(&mut self) {
if let Some(Ok(hook)) = self.hook.take() {
let hook: Arc<Hook<T, dyn Signal>> = hook;
wait_lock(&self.shared.chan).sending
.as_mut()
.unwrap().1
.retain(|s| s.signal().as_any() as *const _ != hook.signal().as_any() as *const _);
}
}
}
impl<'a, T: Unpin> Future for SendFuture<'a, T> {
type Output = Result<(), SendError<T>>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
if let Some(Ok(hook)) = self.hook.as_ref() {
return if hook.is_empty() {
Poll::Ready(Ok(()))
} else if self.shared.is_disconnected() {
match self.hook.take().unwrap() {
Err(item) => Poll::Ready(Err(SendError(item))),
Ok(hook) => match hook.try_take() {
Some(item) => Poll::Ready(Err(SendError(item))),
None => Poll::Ready(Ok(())),
},
}
} else {
Poll::Pending
};
} else {
self.shared.send(
// item
match self.hook.take().unwrap() {
Err(item) => item,
Ok(_) => return Poll::Ready(Ok(())),
},
// should_block
true,
// make_signal
|msg| Hook::slot(Some(msg), AsyncSignal(cx.waker().clone(), AtomicBool::new(false))),
// do_block
|hook| {
self.hook = Some(Ok(hook));
Poll::Pending
}
)
.map(|r| r.map_err(|err| match err {
TrySendTimeoutError::Disconnected(msg) => SendError(msg),
_ => unreachable!(),
}))
}
}
}
impl<'a, T: Unpin> FusedFuture for SendFuture<'a, T> {
fn is_terminated(&self) -> bool {
self.shared.is_disconnected()
}
}
/// A sink that allows sending values into a channel.
pub struct SendSink<'a, T: Unpin>(SendFuture<'a, T>);
impl<'a, T: Unpin> Sink<T> for SendSink<'a, T> {
type Error = SendError<T>;
fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
Pin::new(&mut self.0).poll(cx)
}
fn start_send(mut self: Pin<&mut Self>, item: T) -> Result<(), Self::Error> {
self.0 = SendFuture {
shared: &self.0.shared,
hook: Some(Err(item)),
};
Ok(())
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
Pin::new(&mut self.0).poll(cx) // TODO: A different strategy here?
}
fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
Pin::new(&mut self.0).poll(cx) // TODO: A different strategy here?
}
}
impl<T> Receiver<T> {
/// Asynchronously wait for an incoming value from the channel associated with this receiver,
/// returning an error if all channel senders have been dropped.
pub fn recv_async(&self) -> impl Future<Output=Result<T, RecvError>> + '_ {
RecvFut::new(&self.shared)
}
/// Use this channel as an asynchronous stream of items.
pub fn stream(&self) -> impl Stream<Item=T> + '_ {
RecvFut::new(&self.shared)
}
}
struct RecvFut<'a, T> {
shared: &'a Shared<T>,
hook: Option<Arc<Hook<T, AsyncSignal>>>,
}
impl<'a, T> RecvFut<'a, T> {
fn new(shared: &'a Shared<T>) -> Self {
Self {
shared,
hook: None,
}
}
}
impl<'a, T> Drop for RecvFut<'a, T> {
fn drop(&mut self) {
if let Some(hook) = self.hook.take() {
let hook: Arc<Hook<T, dyn Signal>> = hook;
let mut chan = wait_lock(&self.shared.chan);
// We'd like to use `Arc::ptr_eq` here but it doesn't seem to work consistently with wide pointers?
chan.waiting.retain(|s| s.signal().as_any() as *const _ != hook.signal().as_any() as *const _);
if hook.signal().as_any().downcast_ref::<AsyncSignal>().unwrap().1.load(Ordering::SeqCst) {
// If this signal has been fired, but we're being dropped (and so not listening to it),
// pass the signal on to another receiver
chan.try_wake_receiver_if_pending();
}
}
}
}
impl<'a, T> Future for RecvFut<'a, T> {
type Output = Result<T, RecvError>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
if self.hook.is_some() {
if let Ok(msg) = self.shared.recv_sync(None) {
Poll::Ready(Ok(msg))
} else if self.shared.is_disconnected() {
Poll::Ready(Err(RecvError::Disconnected))
} else {
Poll::Pending
}
} else {
self.shared.recv(
// should_block
true,
// make_signal
|| Hook::trigger(AsyncSignal(cx.waker().clone(), AtomicBool::new(false))),
// do_block
|hook| {
self.hook = Some(hook);
Poll::Pending
}
)
.map(|r| r.map_err(|err| match err {
TryRecvTimeoutError::Disconnected => RecvError::Disconnected,
_ => unreachable!(),
}))
}
}
}
impl<'a, T> FusedFuture for RecvFut<'a, T> {
fn is_terminated(&self) -> bool {
self.shared.is_disconnected()
}
}
impl<'a, T> Stream for RecvFut<'a, T> {
type Item = T;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
match self.as_mut().poll(cx) {
Poll::Pending => return Poll::Pending,
Poll::Ready(item) => {
// Replace the recv future for every item we receive
*self = RecvFut::new(self.shared);
Poll::Ready(item.ok())
},
}
}
}
impl<'a, T> FusedStream for RecvFut<'a, T> {
fn is_terminated(&self) -> bool {
self.shared.is_disconnected()
}
}
| 32.342975 | 111 | 0.521145 |
7a15416bf83d8aee3122f7ed3ea5512228fb42f0 | 515 | use crate::factory::GUIFactory;
use crate::buttons::Button;
use crate::progress_bar::ProgressBar;
pub struct Application {
button: Box<dyn Button>,
progress_bar: Box<dyn ProgressBar>,
}
impl Application {
pub fn create(factory: Box<dyn GUIFactory>) -> Application {
Application {
button: factory.create_button(),
progress_bar: factory.create_progress_bar(),
}
}
pub fn draw(&self) {
self.button.draw();
self.progress_bar.draw();
}
} | 23.409091 | 64 | 0.63301 |
03b9809d05d2e330aa0f2490e34dce48dde1df76 | 2,197 | use lazy_static::lazy_static;
use rdev::{listen, simulate, Button, Event, EventType, Key};
use serial_test::serial;
use std::error::Error;
use std::iter::Iterator;
use std::sync::mpsc::{channel, Receiver, Sender};
use std::sync::Mutex;
use std::thread;
use std::time::Duration;
lazy_static! {
static ref EVENT_CHANNEL: (Mutex<Sender<Event>>, Mutex<Receiver<Event>>) = {
let (send, recv) = channel();
(Mutex::new(send), Mutex::new(recv))
};
}
fn send_event(event: Event) {
EVENT_CHANNEL
.0
.lock()
.expect("Failed to unlock Mutex")
.send(event)
.expect("Receiving end of EVENT_CHANNEL was closed");
}
fn sim_then_listen(events: &mut dyn Iterator<Item = EventType>) -> Result<(), Box<dyn Error>> {
// spawn new thread because listen blocks
let _listener = thread::spawn(move || {
listen(send_event).expect("Could not listen");
});
let second = Duration::from_millis(1000);
thread::sleep(second);
let recv = EVENT_CHANNEL.1.lock()?;
for event in events {
simulate(&event)?;
let recieved_event = recv.recv_timeout(second).expect("No events to recieve");
assert_eq!(recieved_event.event_type, event);
}
Ok(())
}
#[test]
#[serial]
fn test_listen_and_simulate() -> Result<(), Box<dyn Error>> {
// wait for user input from keyboard to stop
// (i.e. the return/enter keypress to run test command)
thread::sleep(Duration::from_millis(50));
let events = vec![
//TODO: fix sending shift keypress events on linux
//EventType::KeyPress(Key::ShiftLeft),
EventType::KeyPress(Key::KeyS),
EventType::KeyRelease(Key::KeyS),
EventType::ButtonPress(Button::Right),
EventType::ButtonRelease(Button::Right),
EventType::Wheel {
delta_x: 0,
delta_y: 1,
},
EventType::Wheel {
delta_x: 0,
delta_y: -1,
},
]
.into_iter();
let click_events = (0..480).map(|pixel| EventType::MouseMove {
x: pixel as f64,
y: pixel as f64,
});
let mut events = events.chain(click_events);
sim_then_listen(&mut events)
}
| 29.293333 | 95 | 0.612198 |
67717579d57ecabc8db36236c01a8ea85b46af38 | 178 | use crate::constants::MAX_LOCKOUT_HISTORY;
pub type BlockCommitmentArray = [u64; MAX_LOCKOUT_HISTORY + 1];
pub type Commitment = solana_sdk::commitment_config::CommitmentLevel;
| 35.6 | 69 | 0.814607 |
56f703ee718a0ffc0d1b82d2ed05c17fed32a884 | 29,415 | use crate::avm1::Object as Avm1Object;
use crate::avm2::{
Activation as Avm2Activation, Error as Avm2Error, Object as Avm2Object,
StageObject as Avm2StageObject, TObject as Avm2TObject, Value as Avm2Value,
};
use crate::backend::ui::MouseCursor;
use crate::context::{RenderContext, UpdateContext};
use crate::display_object::avm1_button::{ButtonState, ButtonTracking};
use crate::display_object::container::{dispatch_added_event, dispatch_removed_event};
use crate::display_object::{DisplayObjectBase, MovieClip, TDisplayObject};
use crate::events::{ClipEvent, ClipEventResult};
use crate::prelude::*;
use crate::tag_utils::{SwfMovie, SwfSlice};
use crate::types::{Degrees, Percent};
use crate::vminterface::Instantiator;
use gc_arena::{Collect, GcCell, MutationContext};
use std::sync::Arc;
#[derive(Clone, Debug, Collect, Copy)]
#[collect(no_drop)]
pub struct Avm2Button<'gc>(GcCell<'gc, Avm2ButtonData<'gc>>);
#[derive(Clone, Debug, Collect)]
#[collect(no_drop)]
pub struct Avm2ButtonData<'gc> {
base: DisplayObjectBase<'gc>,
static_data: GcCell<'gc, ButtonStatic>,
/// The current button state to render.
state: ButtonState,
/// The display object tree to render when the button is in the UP state.
up_state: Option<DisplayObject<'gc>>,
/// The display object tree to render when the button is in the OVER state.
over_state: Option<DisplayObject<'gc>>,
/// The display object tree to render when the button is in the DOWN state.
down_state: Option<DisplayObject<'gc>>,
/// The display object tree to use for mouse hit checks.
hit_area: Option<DisplayObject<'gc>>,
/// The current tracking mode of this button.
tracking: ButtonTracking,
/// The class of this button.
///
/// If not specified in `SymbolClass`, this will be
/// `flash.display.SimpleButton`.
class: Avm2Object<'gc>,
/// The AVM2 representation of this button.
object: Option<Avm2Object<'gc>>,
/// If this button needs to have it's child states constructed, or not.
///
/// All buttons start out unconstructed and have this flag set `true`.
/// This flag is consumed during frame construction.
needs_frame_construction: bool,
/// If this button needs to have it's AVM2 side initialized, or not.
///
/// All buttons start out not needing AVM2 initialization.
needs_avm2_initialization: bool,
has_focus: bool,
enabled: bool,
use_hand_cursor: bool,
/// Skip the next `run_frame` call.
///
/// This flag exists due to a really odd feature of buttons: they run their
/// children for one frame before parents can run. Then they go back to the
/// normal AVM2 execution order for future frames.
skip_current_frame: bool,
}
impl<'gc> Avm2Button<'gc> {
pub fn from_swf_tag(
button: &swf::Button,
source_movie: &SwfSlice,
context: &mut UpdateContext<'_, 'gc, '_>,
) -> Self {
let static_data = ButtonStatic {
swf: source_movie.movie.clone(),
id: button.id,
records: button.records.clone(),
up_to_over_sound: None,
over_to_down_sound: None,
down_to_over_sound: None,
over_to_up_sound: None,
};
Avm2Button(GcCell::allocate(
context.gc_context,
Avm2ButtonData {
base: Default::default(),
static_data: GcCell::allocate(context.gc_context, static_data),
state: self::ButtonState::Up,
hit_area: None,
up_state: None,
over_state: None,
down_state: None,
class: context.avm2.classes().simplebutton,
object: None,
needs_frame_construction: true,
needs_avm2_initialization: false,
tracking: if button.is_track_as_menu {
ButtonTracking::Menu
} else {
ButtonTracking::Push
},
has_focus: false,
enabled: true,
use_hand_cursor: true,
skip_current_frame: false,
},
))
}
pub fn empty_button(context: &mut UpdateContext<'_, 'gc, '_>) -> Self {
let movie = Arc::new(SwfMovie::empty(context.swf.version()));
let button_record = swf::Button {
id: 0,
is_track_as_menu: false,
records: Vec::new(),
actions: Vec::new(),
};
Self::from_swf_tag(&button_record, &movie.into(), context)
}
pub fn set_sounds(self, gc_context: MutationContext<'gc, '_>, sounds: swf::ButtonSounds) {
let button = self.0.write(gc_context);
let mut static_data = button.static_data.write(gc_context);
static_data.up_to_over_sound = sounds.up_to_over_sound;
static_data.over_to_down_sound = sounds.over_to_down_sound;
static_data.down_to_over_sound = sounds.down_to_over_sound;
static_data.over_to_up_sound = sounds.over_to_up_sound;
}
/// Handles the ancient DefineButtonCxform SWF tag.
/// Set the color transform for all children of each state.
pub fn set_colors(
self,
gc_context: MutationContext<'gc, '_>,
color_transforms: &[swf::ColorTransform],
) {
let button = self.0.write(gc_context);
let mut static_data = button.static_data.write(gc_context);
// This tag isn't documented well in SWF19. It is only used in very old SWF<=2 content.
// It applies color transforms to every character in a button, in sequence(?).
for (record, color_transform) in static_data.records.iter_mut().zip(color_transforms.iter())
{
record.color_transform = color_transform.clone();
}
}
/// Construct a given state of the button and return it's containing
/// DisplayObject.
///
/// In contrast to AVM1Button, the AVM2 variety constructs all of it's
/// children once and stores them in four named slots, either on their own
/// (if they are singular) or in `Sprite`s created specifically to store
/// button children. This means that, for example, a child that exists in
/// multiple states in the SWF will actually be instantiated multiple
/// times.
///
/// If the boolean parameter is `true`, then the caller of this function
/// should signal events on all children of the returned display object.
fn create_state(
self,
context: &mut UpdateContext<'_, 'gc, '_>,
swf_state: swf::ButtonState,
) -> (DisplayObject<'gc>, bool) {
let movie = self
.movie()
.expect("All SWF-defined buttons should have movies");
let empty_slice = SwfSlice::empty(movie.clone());
let sprite_class = context.avm2.classes().sprite;
let mut children = Vec::new();
let static_data = self.0.read().static_data;
for record in static_data.read().records.iter() {
if record.states.contains(swf_state) {
match context
.library
.library_for_movie_mut(movie.clone())
.instantiate_by_id(record.id, context.gc_context)
{
Ok(child) => {
child.set_matrix(context.gc_context, &record.matrix.into());
child.set_depth(context.gc_context, record.depth.into());
if swf_state != swf::ButtonState::HIT_TEST {
child.set_color_transform(
context.gc_context,
&record.color_transform.clone().into(),
);
}
children.push((child, record.depth));
}
Err(error) => {
log::error!(
"Button ID {}: could not instantiate child ID {}: {}",
static_data.read().id,
record.id,
error
);
}
};
}
}
if children.len() == 1 {
let child = children.first().cloned().unwrap().0;
child.set_parent(context.gc_context, Some(self.into()));
child.post_instantiation(context, child, None, Instantiator::Movie, false);
child.construct_frame(context);
(child, false)
} else {
let state_sprite = MovieClip::new(empty_slice, context.gc_context);
state_sprite.set_avm2_class(context.gc_context, Some(sprite_class));
state_sprite.set_parent(context.gc_context, Some(self.into()));
state_sprite.construct_frame(context);
for (child, depth) in children {
// `parent` returns `null` for these grandchildren during construction time, even though
// `stage` and `root` will be defined. Set the parent temporarily to the button itself so
// that `parent` is `null` (`DisplayObject::avm2_parent` checks that the parent is a container),
// and then properly set the parent to the state Sprite afterwards.
state_sprite.replace_at_depth(context, child, depth.into());
child.set_parent(context.gc_context, Some(self.into()));
child.post_instantiation(context, child, None, Instantiator::Movie, false);
child.construct_frame(context);
child.set_parent(context.gc_context, Some(state_sprite.into()));
}
(state_sprite.into(), true)
}
}
/// Get the rendered state of the button.
pub fn state(self) -> ButtonState {
self.0.read().state
}
/// Change the rendered state of the button.
pub fn set_state(self, context: &mut UpdateContext<'_, 'gc, '_>, state: ButtonState) {
self.0.write(context.gc_context).state = state;
let button = self.0.read();
if let Some(state) = button.up_state {
state.set_parent(context.gc_context, None);
}
if let Some(state) = button.over_state {
state.set_parent(context.gc_context, None);
}
if let Some(state) = button.down_state {
state.set_parent(context.gc_context, None);
}
if let Some(state) = button.hit_area {
state.set_parent(context.gc_context, None);
}
if let Some(state) = self.get_state_child(state.into()) {
state.set_parent(context.gc_context, Some(self.into()));
}
}
/// Get the display object that represents a particular button state.
pub fn get_state_child(self, state: swf::ButtonState) -> Option<DisplayObject<'gc>> {
match state {
swf::ButtonState::UP => self.0.read().up_state,
swf::ButtonState::OVER => self.0.read().over_state,
swf::ButtonState::DOWN => self.0.read().down_state,
swf::ButtonState::HIT_TEST => self.0.read().hit_area,
_ => None,
}
}
/// Set the display object that represents a particular button state.
pub fn set_state_child(
self,
context: &mut UpdateContext<'_, 'gc, '_>,
state: swf::ButtonState,
child: Option<DisplayObject<'gc>>,
) {
let child_was_on_stage = child.map(|c| c.is_on_stage(context)).unwrap_or(false);
let old_state_child = self.get_state_child(state);
let is_cur_state = swf::ButtonState::from(self.0.read().state) == state;
match state {
swf::ButtonState::UP => self.0.write(context.gc_context).up_state = child,
swf::ButtonState::OVER => self.0.write(context.gc_context).over_state = child,
swf::ButtonState::DOWN => self.0.write(context.gc_context).down_state = child,
swf::ButtonState::HIT_TEST => self.0.write(context.gc_context).hit_area = child,
_ => (),
}
if let Some(child) = child {
if let Some(mut parent) = child.parent().and_then(|parent| parent.as_container()) {
parent.remove_child(context, child, Lists::all());
}
if is_cur_state {
child.set_parent(context.gc_context, Some(self.into()));
}
}
if let Some(old_state_child) = old_state_child {
old_state_child.unload(context);
old_state_child.set_parent(context.gc_context, None);
}
if is_cur_state {
if let Some(child) = child {
dispatch_added_event(self.into(), child, child_was_on_stage, context);
}
if let Some(old_state_child) = old_state_child {
dispatch_removed_event(old_state_child, context);
}
if let Some(child) = child {
child.frame_constructed(context);
}
}
if let Some(child) = child {
child.run_frame_avm2(context);
}
if is_cur_state {
if let Some(child) = child {
child.run_frame_scripts(context);
child.exit_frame(context);
}
}
}
pub fn enabled(self) -> bool {
self.0.read().enabled
}
pub fn set_enabled(self, context: &mut UpdateContext<'_, 'gc, '_>, enabled: bool) {
self.0.write(context.gc_context).enabled = enabled;
if !enabled {
self.set_state(context, ButtonState::Up);
}
}
pub fn use_hand_cursor(self) -> bool {
self.0.read().use_hand_cursor
}
pub fn set_use_hand_cursor(
self,
context: &mut UpdateContext<'_, 'gc, '_>,
use_hand_cursor: bool,
) {
self.0.write(context.gc_context).use_hand_cursor = use_hand_cursor;
}
pub fn button_tracking(self) -> ButtonTracking {
self.0.read().tracking
}
pub fn set_button_tracking(
self,
context: &mut UpdateContext<'_, 'gc, '_>,
tracking: ButtonTracking,
) {
self.0.write(context.gc_context).tracking = tracking;
}
pub fn set_avm2_class(self, mc: MutationContext<'gc, '_>, class: Avm2Object<'gc>) {
self.0.write(mc).class = class;
}
}
impl<'gc> TDisplayObject<'gc> for Avm2Button<'gc> {
impl_display_object!(base);
fn id(&self) -> CharacterId {
self.0.read().static_data.read().id
}
fn movie(&self) -> Option<Arc<SwfMovie>> {
Some(self.0.read().static_data.read().swf.clone())
}
fn post_instantiation(
&self,
context: &mut UpdateContext<'_, 'gc, '_>,
_display_object: DisplayObject<'gc>,
_init_object: Option<Avm1Object<'gc>>,
_instantiated_by: Instantiator,
run_frame: bool,
) {
self.set_default_instance_name(context);
if run_frame {
self.run_frame_avm2(context);
}
self.set_state(context, ButtonState::Up);
}
fn construct_frame(&self, context: &mut UpdateContext<'_, 'gc, '_>) {
let hit_area = self.0.read().hit_area;
if let Some(hit_area) = hit_area {
hit_area.construct_frame(context);
}
let up_state = self.0.read().up_state;
if let Some(up_state) = up_state {
up_state.construct_frame(context);
}
let down_state = self.0.read().up_state;
if let Some(down_state) = down_state {
down_state.construct_frame(context);
}
let over_state = self.0.read().over_state;
if let Some(over_state) = over_state {
over_state.construct_frame(context);
}
let needs_avm2_construction = self.0.read().object.is_none();
let class = self.0.read().class;
if needs_avm2_construction {
let mut activation = Avm2Activation::from_nothing(context.reborrow());
match Avm2StageObject::for_display_object(&mut activation, (*self).into(), class) {
Ok(object) => self.0.write(context.gc_context).object = Some(object.into()),
Err(e) => log::error!("Got {} when constructing AVM2 side of button", e),
};
}
let needs_frame_construction = self.0.read().needs_frame_construction;
if needs_frame_construction {
let (up_state, up_should_fire) = self.create_state(context, swf::ButtonState::UP);
let (over_state, over_should_fire) = self.create_state(context, swf::ButtonState::OVER);
let (down_state, down_should_fire) = self.create_state(context, swf::ButtonState::DOWN);
let (hit_area, hit_should_fire) =
self.create_state(context, swf::ButtonState::HIT_TEST);
let mut write = self.0.write(context.gc_context);
write.up_state = Some(up_state);
write.over_state = Some(over_state);
write.down_state = Some(down_state);
write.hit_area = Some(hit_area);
write.skip_current_frame = true;
write.needs_frame_construction = false;
drop(write);
if up_should_fire {
up_state.post_instantiation(context, up_state, None, Instantiator::Movie, false);
if let Some(up_container) = up_state.as_container() {
for (_depth, child) in up_container.iter_depth_list() {
dispatch_added_event((*self).into(), child, false, context);
}
}
}
if over_should_fire {
over_state.post_instantiation(
context,
over_state,
None,
Instantiator::Movie,
false,
);
if let Some(over_container) = over_state.as_container() {
for (_depth, child) in over_container.iter_depth_list() {
dispatch_added_event((*self).into(), child, false, context);
}
}
}
if down_should_fire {
down_state.post_instantiation(
context,
down_state,
None,
Instantiator::Movie,
false,
);
if let Some(down_container) = down_state.as_container() {
for (_depth, child) in down_container.iter_depth_list() {
dispatch_added_event((*self).into(), child, false, context);
}
}
}
if hit_should_fire {
hit_area.post_instantiation(context, hit_area, None, Instantiator::Movie, false);
if let Some(hit_container) = hit_area.as_container() {
for (_depth, child) in hit_container.iter_depth_list() {
dispatch_added_event((*self).into(), child, false, context);
}
}
}
if needs_avm2_construction {
self.0.write(context.gc_context).needs_avm2_initialization = true;
self.frame_constructed(context);
self.set_state(context, ButtonState::Over);
//NOTE: Yes, we do have to run these in a different order from the
//regular run_frame method.
up_state.run_frame_avm2(context);
over_state.run_frame_avm2(context);
down_state.run_frame_avm2(context);
hit_area.run_frame_avm2(context);
up_state.run_frame_scripts(context);
over_state.run_frame_scripts(context);
down_state.run_frame_scripts(context);
hit_area.run_frame_scripts(context);
self.exit_frame(context);
}
} else if self.0.read().needs_avm2_initialization {
self.0.write(context.gc_context).needs_avm2_initialization = false;
let avm2_object = self.0.read().object;
if let Some(avm2_object) = avm2_object {
let mut constr_thing = || {
let mut activation = Avm2Activation::from_nothing(context.reborrow());
class.call_native_init(Some(avm2_object), &[], &mut activation, Some(class))?;
Ok(())
};
let result: Result<(), Avm2Error> = constr_thing();
if let Err(e) = result {
log::error!("Got {} when constructing AVM2 side of button", e);
}
}
}
}
fn run_frame_avm2(&self, context: &mut UpdateContext<'_, 'gc, '_>) {
if self.0.read().skip_current_frame {
self.0.write(context.gc_context).skip_current_frame = false;
return;
}
let hit_area = self.0.read().hit_area;
if let Some(hit_area) = hit_area {
hit_area.run_frame_avm2(context);
}
let up_state = self.0.read().up_state;
if let Some(up_state) = up_state {
up_state.run_frame_avm2(context);
}
let down_state = self.0.read().down_state;
if let Some(down_state) = down_state {
down_state.run_frame_avm2(context);
}
let over_state = self.0.read().over_state;
if let Some(over_state) = over_state {
over_state.run_frame_avm2(context);
}
}
fn run_frame_scripts(self, context: &mut UpdateContext<'_, 'gc, '_>) {
let hit_area = self.0.read().hit_area;
if let Some(hit_area) = hit_area {
hit_area.run_frame_scripts(context);
}
let up_state = self.0.read().up_state;
if let Some(up_state) = up_state {
up_state.run_frame_scripts(context);
}
let down_state = self.0.read().down_state;
if let Some(down_state) = down_state {
down_state.run_frame_scripts(context);
}
let over_state = self.0.read().over_state;
if let Some(over_state) = over_state {
over_state.run_frame_scripts(context);
}
}
fn render_self(&self, context: &mut RenderContext<'_, 'gc>) {
let state = self.0.read().state;
let current_state = self.get_state_child(state.into());
if let Some(state) = current_state {
state.render(context);
}
}
fn self_bounds(&self) -> BoundingBox {
// No inherent bounds; contains child DisplayObjects.
BoundingBox::default()
}
fn hit_test_shape(
&self,
context: &mut UpdateContext<'_, 'gc, '_>,
point: (Twips, Twips),
options: HitTestOptions,
) -> bool {
if !options.contains(HitTestOptions::SKIP_INVISIBLE) || self.visible() {
let state = self.0.read().state;
if let Some(child) = self.get_state_child(state.into()) {
// hit_area is not actually a child, so transform point into local space before passing it down.
let point = self.global_to_local(point);
if child.hit_test_shape(context, point, options) {
return true;
}
}
}
false
}
fn mouse_pick(
&self,
context: &mut UpdateContext<'_, 'gc, '_>,
point: (Twips, Twips),
require_button_mode: bool,
) -> Option<DisplayObject<'gc>> {
// The button is hovered if the mouse is over any child nodes.
if self.visible() {
let state = self.0.read().state;
let state_child = self.get_state_child(state.into());
if let Some(state_child) = state_child {
let mouse_pick = state_child.mouse_pick(context, point, require_button_mode);
if mouse_pick.is_some() {
return mouse_pick;
}
}
let hit_area = self.0.read().hit_area;
if let Some(hit_area) = hit_area {
// hit_area is not actually a child, so transform point into local space before passing it down.
let point = self.global_to_local(point);
if hit_area.hit_test_shape(context, point, HitTestOptions::MOUSE_PICK) {
return Some((*self).into());
}
}
}
None
}
fn mouse_cursor(&self) -> MouseCursor {
if self.use_hand_cursor() {
MouseCursor::Hand
} else {
MouseCursor::Arrow
}
}
fn object2(&self) -> Avm2Value<'gc> {
self.0
.read()
.object
.map(Avm2Value::from)
.unwrap_or(Avm2Value::Undefined)
}
fn set_object2(&mut self, mc: MutationContext<'gc, '_>, to: Avm2Object<'gc>) {
self.0.write(mc).object = Some(to);
}
fn as_avm2_button(&self) -> Option<Self> {
Some(*self)
}
fn allow_as_mask(&self) -> bool {
let state = self.0.read().state;
let current_state = self.get_state_child(state.into());
if let Some(current_state) = current_state.and_then(|cs| cs.as_container()) {
current_state.is_empty()
} else {
false
}
}
/// Executes and propagates the given clip event.
/// Events execute inside-out; the deepest child will react first, followed by its parent, and
/// so forth.
fn handle_clip_event(
&self,
context: &mut UpdateContext<'_, 'gc, '_>,
event: ClipEvent,
) -> ClipEventResult {
if !self.visible() {
return ClipEventResult::NotHandled;
}
if !self.enabled() && !matches!(event, ClipEvent::KeyPress { .. }) {
return ClipEventResult::NotHandled;
}
if event.propagates() {
let state = self.0.read().state;
let current_state = self.get_state_child(state.into());
if let Some(current_state) = current_state {
if current_state.handle_clip_event(context, event) == ClipEventResult::Handled {
return ClipEventResult::Handled;
}
}
}
let handled = ClipEventResult::NotHandled;
let write = self.0.write(context.gc_context);
// Translate the clip event to a button event, based on how the button state changes.
let static_data = write.static_data;
let static_data = static_data.read();
let (new_state, sound) = match event {
ClipEvent::DragOut => (ButtonState::Over, None),
ClipEvent::DragOver => (ButtonState::Down, None),
ClipEvent::Press => (ButtonState::Down, static_data.over_to_down_sound.as_ref()),
ClipEvent::Release => (ButtonState::Over, static_data.down_to_over_sound.as_ref()),
ClipEvent::ReleaseOutside => (ButtonState::Up, static_data.over_to_up_sound.as_ref()),
ClipEvent::RollOut => (ButtonState::Up, static_data.over_to_up_sound.as_ref()),
ClipEvent::RollOver => (ButtonState::Over, static_data.up_to_over_sound.as_ref()),
_ => return ClipEventResult::NotHandled,
};
write.play_sound(context, sound);
if write.state != new_state {
drop(write);
self.set_state(context, new_state);
}
handled
}
fn is_focusable(&self) -> bool {
true
}
fn on_focus_changed(&self, gc_context: MutationContext<'gc, '_>, focused: bool) {
self.0.write(gc_context).has_focus = focused;
}
fn unload(&self, context: &mut UpdateContext<'_, 'gc, '_>) {
let had_focus = self.0.read().has_focus;
if had_focus {
let tracker = context.focus_tracker;
tracker.set(None, context);
}
if let Some(node) = self.maskee() {
node.set_masker(context.gc_context, None, true);
} else if let Some(node) = self.masker() {
node.set_maskee(context.gc_context, None, true);
}
self.set_removed(context.gc_context, true);
}
}
impl<'gc> Avm2ButtonData<'gc> {
fn play_sound(
&self,
context: &mut UpdateContext<'_, 'gc, '_>,
sound: Option<&swf::ButtonSound>,
) {
if let Some((id, sound_info)) = sound {
if let Some(sound_handle) = context
.library
.library_for_movie_mut(self.movie())
.get_sound(*id)
{
let _ = context.start_sound(sound_handle, sound_info, None, None);
}
}
}
fn movie(&self) -> Arc<SwfMovie> {
self.static_data.read().swf.clone()
}
}
/// Static data shared between all instances of a button.
#[allow(dead_code)]
#[derive(Clone, Debug, Collect)]
#[collect(require_static)]
struct ButtonStatic {
swf: Arc<SwfMovie>,
id: CharacterId,
records: Vec<swf::ButtonRecord>,
/// The sounds to play on state changes for this button.
up_to_over_sound: Option<swf::ButtonSound>,
over_to_down_sound: Option<swf::ButtonSound>,
down_to_over_sound: Option<swf::ButtonSound>,
over_to_up_sound: Option<swf::ButtonSound>,
}
| 35.959658 | 112 | 0.574027 |
61ec0974f6c43d62aeb6d72247e819f80cc260e5 | 1,583 | use std::env;
use std::process;
use rs9cc::tokenizer;
fn main() {
let args: Vec<String> = env::args().collect();
if args.len() != 2 {
eprintln!("引数の個数が正しくありません");
process::exit(1);
}
println!(".intel_syntax noprefix");
println!(".global main");
println!("main:");
let mut tokenizer = tokenizer::Tokenizer::new(&args[1]);
let fst_num_op = tokenizer.expect_number();
if fst_num_op.is_none() {
eprintln!(
"{}",
tokenizer.error_at_cur("最初のトークンが数字ではありません")
);
process::exit(1)
}
let fst_num = fst_num_op.unwrap();
println!(" mov rax, {}", fst_num);
while !tokenizer.expect_eof() {
if tokenizer.expect_op("+") {
let num_op = tokenizer.expect_number();
if num_op.is_none() {
eprintln!("{}", tokenizer.error_at_cur("予期しない文字列です"));
process::exit(1);
}
let num = num_op.unwrap();
println!(" add rax, {}", num);
continue;
} else if tokenizer.expect_op("-") {
let num_op = tokenizer.expect_number();
if num_op.is_none() {
eprintln!("{}", tokenizer.error_at_cur("予期しない文字列です"));
process::exit(1);
}
let num = num_op.unwrap();
println!(" sub rax, {}", num);
continue;
} else {
eprintln!("{}", tokenizer.error_at_cur("予期しない文字列です"));
process::exit(1);
}
}
println!(" ret");
process::exit(0);
}
| 25.95082 | 70 | 0.502843 |
f819983d32913db0b6677485b2d6ab613f0c396a | 9,899 | //! An example of hooking up stdin/stdout to either a TCP or UDP stream.
//!
//! This example will connect to a socket address specified in the argument list
//! and then forward all data read on stdin to the server, printing out all data
//! received on stdout. An optional `--udp` argument can be passed to specify
//! that the connection should be made over UDP instead of TCP, translating each
//! line entered on stdin to a UDP packet to be sent to the remote address.
//!
//! Note that this is not currently optimized for performance, especially
//! around buffer management. Rather it's intended to show an example of
//! working with a client.
//!
//! This example can be quite useful when interacting with the other examples in
//! this repository! Many of them recommend running this as a simple "hook up
//! stdin/stdout to a server" to get up and running.
extern crate futures;
extern crate tokio_core;
extern crate tokio_io;
extern crate bytes;
use std::env;
use std::io::{self, Read, Write};
use std::net::SocketAddr;
use std::thread;
use futures::sync::mpsc;
use futures::{Sink, Future, Stream};
use tokio_core::reactor::Core;
fn main() {
// Determine if we're going to run in TCP or UDP mode
let mut args = env::args().skip(1).collect::<Vec<_>>();
let tcp = match args.iter().position(|a| a == "--udp") {
Some(i) => {
args.remove(i);
false
}
None => true,
};
// Parse what address we're going to connect to
let addr = args.first().unwrap_or_else(|| {
panic!("this program requires at least one argument")
});
let addr = addr.parse::<SocketAddr>().unwrap();
// Create the event loop and initiate the connection to the remote server
let mut core = Core::new().unwrap();
let handle = core.handle();
// Right now Tokio doesn't support a handle to stdin running on the event
// loop, so we farm out that work to a separate thread. This thread will
// read data (with blocking I/O) from stdin and then send it to the event
// loop over a standard futures channel.
let (stdin_tx, stdin_rx) = mpsc::channel(0);
thread::spawn(|| read_stdin(stdin_tx));
let stdin_rx = stdin_rx.map_err(|_| panic!()); // errors not possible on rx
// Now that we've got our stdin read we either set up our TCP connection or
// our UDP connection to get a stream of bytes we're going to emit to
// stdout.
let stdout = if tcp {
tcp::connect(&addr, &handle, Box::new(stdin_rx))
} else {
udp::connect(&addr, &handle, Box::new(stdin_rx))
};
// And now with our stream of bytes to write to stdout, we execute that in
// the event loop! Note that this is doing blocking I/O to emit data to
// stdout, and in general it's a no-no to do that sort of work on the event
// loop. In this case, though, we know it's ok as the event loop isn't
// otherwise running anything useful.
let mut out = io::stdout();
core.run(stdout.for_each(|chunk| {
out.write_all(&chunk)
})).unwrap();
}
mod tcp {
use std::io::{self, Read, Write};
use std::net::{SocketAddr, Shutdown};
use bytes::{BufMut, BytesMut};
use futures::prelude::*;
use tokio_core::net::TcpStream;
use tokio_core::reactor::Handle;
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_io::codec::{Encoder, Decoder};
pub fn connect(addr: &SocketAddr,
handle: &Handle,
stdin: Box<Stream<Item = Vec<u8>, Error = io::Error>>)
-> Box<Stream<Item = BytesMut, Error = io::Error>>
{
let tcp = TcpStream::connect(addr, handle);
let handle = handle.clone();
// After the TCP connection has been established, we set up our client
// to start forwarding data.
//
// First we use the `Io::framed` method with a simple implementation of
// a `Codec` (listed below) that just ships bytes around. We then split
// that in two to work with the stream and sink separately.
//
// Half of the work we're going to do is to take all data we receive on
// `stdin` and send that along the TCP stream (`sink`). The second half
// is to take all the data we receive (`stream`) and then write that to
// stdout. We'll be passing this handle back out from this method.
//
// You'll also note that we *spawn* the work to read stdin and write it
// to the TCP stream. This is done to ensure that happens concurrently
// with us reading data from the stream.
Box::new(tcp.map(move |stream| {
let stream = CloseWithShutdown(stream);
let (sink, stream) = stream.framed(Bytes).split();
let copy_stdin = stdin.forward(sink)
.then(|result| {
if let Err(e) = result {
panic!("failed to write to socket: {}", e)
}
Ok(())
});
handle.spawn(copy_stdin);
stream
}).flatten_stream())
}
/// A small adapter to layer over our TCP stream which uses the `shutdown`
/// syscall when the writer side is shut down. This'll allow us to correctly
/// inform the remote end that we're done writing.
struct CloseWithShutdown(TcpStream);
impl Read for CloseWithShutdown {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.0.read(buf)
}
}
impl AsyncRead for CloseWithShutdown {}
impl Write for CloseWithShutdown {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.0.write(buf)
}
fn flush(&mut self) -> io::Result<()> {
self.0.flush()
}
}
impl AsyncWrite for CloseWithShutdown {
fn shutdown(&mut self) -> Poll<(), io::Error> {
self.0.shutdown(Shutdown::Write)?;
Ok(().into())
}
}
/// A simple `Codec` implementation that just ships bytes around.
///
/// This type is used for "framing" a TCP stream of bytes but it's really
/// just a convenient method for us to work with streams/sinks for now.
/// This'll just take any data read and interpret it as a "frame" and
/// conversely just shove data into the output location without looking at
/// it.
struct Bytes;
impl Decoder for Bytes {
type Item = BytesMut;
type Error = io::Error;
fn decode(&mut self, buf: &mut BytesMut) -> io::Result<Option<BytesMut>> {
if buf.len() > 0 {
let len = buf.len();
Ok(Some(buf.split_to(len)))
} else {
Ok(None)
}
}
fn decode_eof(&mut self, buf: &mut BytesMut) -> io::Result<Option<BytesMut>> {
self.decode(buf)
}
}
impl Encoder for Bytes {
type Item = Vec<u8>;
type Error = io::Error;
fn encode(&mut self, data: Vec<u8>, buf: &mut BytesMut) -> io::Result<()> {
buf.put(&data[..]);
Ok(())
}
}
}
mod udp {
use std::io;
use std::net::SocketAddr;
use bytes::BytesMut;
use futures::{Future, Stream};
use tokio_core::net::{UdpCodec, UdpSocket};
use tokio_core::reactor::Handle;
pub fn connect(&addr: &SocketAddr,
handle: &Handle,
stdin: Box<Stream<Item = Vec<u8>, Error = io::Error>>)
-> Box<Stream<Item = BytesMut, Error = io::Error>>
{
// We'll bind our UDP socket to a local IP/port, but for now we
// basically let the OS pick both of those.
let addr_to_bind = if addr.ip().is_ipv4() {
"0.0.0.0:0".parse().unwrap()
} else {
"[::]:0".parse().unwrap()
};
let udp = UdpSocket::bind(&addr_to_bind, handle)
.expect("failed to bind socket");
// Like above with TCP we use an instance of `UdpCodec` to transform
// this UDP socket into a framed sink/stream which operates over
// discrete values. In this case we're working with *pairs* of socket
// addresses and byte buffers.
let (sink, stream) = udp.framed(Bytes).split();
// All bytes from `stdin` will go to the `addr` specified in our
// argument list. Like with TCP this is spawned concurrently
handle.spawn(stdin.map(move |chunk| {
(addr, chunk)
}).forward(sink).then(|result| {
if let Err(e) = result {
panic!("failed to write to socket: {}", e)
}
Ok(())
}));
// With UDP we could receive data from any source, so filter out
// anything coming from a different address
Box::new(stream.filter_map(move |(src, chunk)| {
if src == addr {
Some(chunk.into())
} else {
None
}
}))
}
struct Bytes;
impl UdpCodec for Bytes {
type In = (SocketAddr, Vec<u8>);
type Out = (SocketAddr, Vec<u8>);
fn decode(&mut self, addr: &SocketAddr, buf: &[u8]) -> io::Result<Self::In> {
Ok((*addr, buf.to_vec()))
}
fn encode(&mut self, (addr, buf): Self::Out, into: &mut Vec<u8>) -> SocketAddr {
into.extend(buf);
addr
}
}
}
// Our helper method which will read data from stdin and send it along the
// sender provided.
fn read_stdin(mut tx: mpsc::Sender<Vec<u8>>) {
let mut stdin = io::stdin();
loop {
let mut buf = vec![0; 1024];
let n = match stdin.read(&mut buf) {
Err(_) |
Ok(0) => break,
Ok(n) => n,
};
buf.truncate(n);
tx = match tx.send(buf).wait() {
Ok(tx) => tx,
Err(_) => break,
};
}
}
| 34.855634 | 88 | 0.577129 |
fc4654f4e730f268306de41c4dd61209e66999d8 | 64 | mod cpu_6502;
mod cpu_bus;
fn main() {
unimplemented!();
}
| 9.142857 | 21 | 0.625 |
fb7dbe674884a74b3c29ec7d6bb0f77b7399f2ae | 3,352 | use crate::{error, Context, Descriptions, Error};
use locspan::Location;
use treeldr::{Caused, Documentation, Id, MaybeSet, Name, Vocabulary, WithCauses};
/// Layout field definition.
#[derive(Clone)]
pub struct Definition<F> {
id: Id,
name: MaybeSet<Name, F>,
layout: MaybeSet<Id, F>,
}
impl<F> Definition<F> {
pub fn new(id: Id) -> Self {
Self {
id,
name: MaybeSet::default(),
layout: MaybeSet::default(),
}
}
pub fn name(&self) -> Option<&WithCauses<Name, F>> {
self.name.with_causes()
}
pub fn set_name(&mut self, name: Name, cause: Option<Location<F>>) -> Result<(), Error<F>>
where
F: Ord + Clone,
{
self.name
.try_set(name, cause, |expected, found, because, causes| {
Error::new(
error::LayoutFieldMismatchName {
id: self.id,
expected,
found,
because: because.preferred().cloned(),
}
.into(),
causes.preferred().cloned(),
)
})
}
pub fn replace_name(&mut self, name: MaybeSet<Name, F>) {
self.name = name
}
/// Build a default name for this layout variant.
pub fn default_name<D: Descriptions<F>>(
&self,
context: &Context<F, D>,
vocabulary: &Vocabulary,
cause: Option<Location<F>>,
) -> Result<Option<Caused<Name, F>>, Error<F>>
where
F: Clone,
{
if let Id::Iri(iri) = self.id {
if let Some(name) = iri.iri(vocabulary).unwrap().path().file_name() {
if let Ok(name) = Name::new(name) {
return Ok(Some(Caused::new(name, cause)));
}
}
}
if let Some(layout_id) = self.layout.with_causes() {
let layout = context
.require_layout(*layout_id.inner(), layout_id.causes().preferred().cloned())?;
if let Some(name) = layout.name() {
return Ok(Some(Caused::new(name.inner().clone(), cause)));
}
}
Ok(None)
}
pub fn layout(&self) -> &MaybeSet<Id, F> {
&self.layout
}
pub fn set_layout(&mut self, layout_ref: Id, cause: Option<Location<F>>) -> Result<(), Error<F>>
where
F: Ord + Clone,
{
self.layout
.try_set(layout_ref, cause, |expected, found, because, causes| {
Error::new(
error::LayoutFieldMismatchLayout {
id: self.id,
expected,
found,
because: because.preferred().cloned(),
}
.into(),
causes.preferred().cloned(),
)
})
}
pub fn replace_layout(&mut self, layout: MaybeSet<Id, F>) {
self.layout = layout
}
}
pub trait Build<F> {
fn require_name(&self) -> Result<WithCauses<Name, F>, Error<F>>;
fn build(
&self,
label: Option<String>,
doc: Documentation,
nodes: &super::super::context::allocated::Nodes<F>,
) -> Result<treeldr::layout::Variant<F>, Error<F>>;
}
impl<F: Ord + Clone> Build<F> for WithCauses<Definition<F>, F> {
fn require_name(&self) -> Result<WithCauses<Name, F>, Error<F>> {
self.name.clone().ok_or_else(|| {
Caused::new(
error::LayoutVariantMissingName(self.id).into(),
self.causes().preferred().cloned(),
)
})
}
fn build(
&self,
label: Option<String>,
doc: Documentation,
nodes: &super::super::context::allocated::Nodes<F>,
) -> Result<treeldr::layout::Variant<F>, Error<F>> {
let name = self.require_name()?;
let layout = self
.layout
.clone()
.try_map_with_causes(|layout_id, causes| {
Ok(**nodes.require_layout(layout_id, causes.preferred().cloned())?)
})?;
Ok(treeldr::layout::Variant::new(name, layout, label, doc))
}
}
| 23.117241 | 97 | 0.62321 |
72b0a7d640a4869c1286b506fe7b921ccc9cd886 | 80,858 | #![allow(non_camel_case_types)]
#![allow(non_upper_case_globals)]
use super::debuginfo::{
DIBuilder, DIDescriptor, DIFile, DILexicalBlock, DISubprogram, DIType,
DIBasicType, DIDerivedType, DICompositeType, DIScope, DIVariable,
DIGlobalVariableExpression, DIArray, DISubrange, DITemplateTypeParameter, DIEnumerator,
DINameSpace, DIFlags, DISPFlags, DebugEmissionKind,
};
use libc::{c_uint, c_int, size_t, c_char};
use libc::{c_ulonglong, c_void};
use std::marker::PhantomData;
use super::RustString;
pub type Bool = c_uint;
pub const True: Bool = 1 as Bool;
pub const False: Bool = 0 as Bool;
#[derive(Copy, Clone, PartialEq)]
#[repr(C)]
#[allow(dead_code)] // Variants constructed by C++.
pub enum LLVMRustResult {
Success,
Failure,
}
// Consts for the LLVM CallConv type, pre-cast to usize.
/// LLVM CallingConv::ID. Should we wrap this?
#[derive(Copy, Clone, PartialEq, Debug)]
#[repr(C)]
pub enum CallConv {
CCallConv = 0,
FastCallConv = 8,
ColdCallConv = 9,
X86StdcallCallConv = 64,
X86FastcallCallConv = 65,
ArmAapcsCallConv = 67,
Msp430Intr = 69,
X86_ThisCall = 70,
PtxKernel = 71,
X86_64_SysV = 78,
X86_64_Win64 = 79,
X86_VectorCall = 80,
X86_Intr = 83,
AmdGpuKernel = 91,
}
/// LLVMRustLinkage
#[derive(PartialEq)]
#[repr(C)]
pub enum Linkage {
ExternalLinkage = 0,
AvailableExternallyLinkage = 1,
LinkOnceAnyLinkage = 2,
LinkOnceODRLinkage = 3,
WeakAnyLinkage = 4,
WeakODRLinkage = 5,
AppendingLinkage = 6,
InternalLinkage = 7,
PrivateLinkage = 8,
ExternalWeakLinkage = 9,
CommonLinkage = 10,
}
// LLVMRustVisibility
#[repr(C)]
pub enum Visibility {
Default = 0,
Hidden = 1,
Protected = 2,
}
/// LLVMDLLStorageClass
#[derive(Copy, Clone)]
#[repr(C)]
pub enum DLLStorageClass {
#[allow(dead_code)]
Default = 0,
DllImport = 1, // Function to be imported from DLL.
#[allow(dead_code)]
DllExport = 2, // Function to be accessible from DLL.
}
/// Matches LLVMRustAttribute in rustllvm.h
/// Semantically a subset of the C++ enum llvm::Attribute::AttrKind,
/// though it is not ABI compatible (since it's a C++ enum)
#[repr(C)]
#[derive(Copy, Clone, Debug)]
pub enum Attribute {
AlwaysInline = 0,
ByVal = 1,
Cold = 2,
InlineHint = 3,
MinSize = 4,
Naked = 5,
NoAlias = 6,
NoCapture = 7,
NoInline = 8,
NonNull = 9,
NoRedZone = 10,
NoReturn = 11,
NoUnwind = 12,
OptimizeForSize = 13,
ReadOnly = 14,
SExt = 15,
StructRet = 16,
UWTable = 17,
ZExt = 18,
InReg = 19,
SanitizeThread = 20,
SanitizeAddress = 21,
SanitizeMemory = 22,
NonLazyBind = 23,
OptimizeNone = 24,
ReturnsTwice = 25,
}
/// LLVMIntPredicate
#[derive(Copy, Clone)]
#[repr(C)]
pub enum IntPredicate {
IntEQ = 32,
IntNE = 33,
IntUGT = 34,
IntUGE = 35,
IntULT = 36,
IntULE = 37,
IntSGT = 38,
IntSGE = 39,
IntSLT = 40,
IntSLE = 41,
}
impl IntPredicate {
pub fn from_generic(intpre: rustc_codegen_ssa::common::IntPredicate) -> Self {
match intpre {
rustc_codegen_ssa::common::IntPredicate::IntEQ => IntPredicate::IntEQ,
rustc_codegen_ssa::common::IntPredicate::IntNE => IntPredicate::IntNE,
rustc_codegen_ssa::common::IntPredicate::IntUGT => IntPredicate::IntUGT,
rustc_codegen_ssa::common::IntPredicate::IntUGE => IntPredicate::IntUGE,
rustc_codegen_ssa::common::IntPredicate::IntULT => IntPredicate::IntULT,
rustc_codegen_ssa::common::IntPredicate::IntULE => IntPredicate::IntULE,
rustc_codegen_ssa::common::IntPredicate::IntSGT => IntPredicate::IntSGT,
rustc_codegen_ssa::common::IntPredicate::IntSGE => IntPredicate::IntSGE,
rustc_codegen_ssa::common::IntPredicate::IntSLT => IntPredicate::IntSLT,
rustc_codegen_ssa::common::IntPredicate::IntSLE => IntPredicate::IntSLE,
}
}
}
/// LLVMRealPredicate
#[derive(Copy, Clone)]
#[repr(C)]
pub enum RealPredicate {
RealPredicateFalse = 0,
RealOEQ = 1,
RealOGT = 2,
RealOGE = 3,
RealOLT = 4,
RealOLE = 5,
RealONE = 6,
RealORD = 7,
RealUNO = 8,
RealUEQ = 9,
RealUGT = 10,
RealUGE = 11,
RealULT = 12,
RealULE = 13,
RealUNE = 14,
RealPredicateTrue = 15,
}
impl RealPredicate {
pub fn from_generic(realpred: rustc_codegen_ssa::common::RealPredicate) -> Self {
match realpred {
rustc_codegen_ssa::common::RealPredicate::RealPredicateFalse =>
RealPredicate::RealPredicateFalse,
rustc_codegen_ssa::common::RealPredicate::RealOEQ => RealPredicate::RealOEQ,
rustc_codegen_ssa::common::RealPredicate::RealOGT => RealPredicate::RealOGT,
rustc_codegen_ssa::common::RealPredicate::RealOGE => RealPredicate::RealOGE,
rustc_codegen_ssa::common::RealPredicate::RealOLT => RealPredicate::RealOLT,
rustc_codegen_ssa::common::RealPredicate::RealOLE => RealPredicate::RealOLE,
rustc_codegen_ssa::common::RealPredicate::RealONE => RealPredicate::RealONE,
rustc_codegen_ssa::common::RealPredicate::RealORD => RealPredicate::RealORD,
rustc_codegen_ssa::common::RealPredicate::RealUNO => RealPredicate::RealUNO,
rustc_codegen_ssa::common::RealPredicate::RealUEQ => RealPredicate::RealUEQ,
rustc_codegen_ssa::common::RealPredicate::RealUGT => RealPredicate::RealUGT,
rustc_codegen_ssa::common::RealPredicate::RealUGE => RealPredicate::RealUGE,
rustc_codegen_ssa::common::RealPredicate::RealULT => RealPredicate::RealULT,
rustc_codegen_ssa::common::RealPredicate::RealULE => RealPredicate::RealULE,
rustc_codegen_ssa::common::RealPredicate::RealUNE => RealPredicate::RealUNE,
rustc_codegen_ssa::common::RealPredicate::RealPredicateTrue =>
RealPredicate::RealPredicateTrue
}
}
}
/// LLVMTypeKind
#[derive(Copy, Clone, PartialEq, Debug)]
#[repr(C)]
pub enum TypeKind {
Void = 0,
Half = 1,
Float = 2,
Double = 3,
X86_FP80 = 4,
FP128 = 5,
PPC_FP128 = 6,
Label = 7,
Integer = 8,
Function = 9,
Struct = 10,
Array = 11,
Pointer = 12,
Vector = 13,
Metadata = 14,
X86_MMX = 15,
Token = 16,
}
impl TypeKind {
pub fn to_generic(self) -> rustc_codegen_ssa::common::TypeKind {
match self {
TypeKind::Void => rustc_codegen_ssa::common::TypeKind::Void,
TypeKind::Half => rustc_codegen_ssa::common::TypeKind::Half,
TypeKind::Float => rustc_codegen_ssa::common::TypeKind::Float,
TypeKind::Double => rustc_codegen_ssa::common::TypeKind::Double,
TypeKind::X86_FP80 => rustc_codegen_ssa::common::TypeKind::X86_FP80,
TypeKind::FP128 => rustc_codegen_ssa::common::TypeKind::FP128,
TypeKind::PPC_FP128 => rustc_codegen_ssa::common::TypeKind::PPC_FP128,
TypeKind::Label => rustc_codegen_ssa::common::TypeKind::Label,
TypeKind::Integer => rustc_codegen_ssa::common::TypeKind::Integer,
TypeKind::Function => rustc_codegen_ssa::common::TypeKind::Function,
TypeKind::Struct => rustc_codegen_ssa::common::TypeKind::Struct,
TypeKind::Array => rustc_codegen_ssa::common::TypeKind::Array,
TypeKind::Pointer => rustc_codegen_ssa::common::TypeKind::Pointer,
TypeKind::Vector => rustc_codegen_ssa::common::TypeKind::Vector,
TypeKind::Metadata => rustc_codegen_ssa::common::TypeKind::Metadata,
TypeKind::X86_MMX => rustc_codegen_ssa::common::TypeKind::X86_MMX,
TypeKind::Token => rustc_codegen_ssa::common::TypeKind::Token,
}
}
}
/// LLVMAtomicRmwBinOp
#[derive(Copy, Clone)]
#[repr(C)]
pub enum AtomicRmwBinOp {
AtomicXchg = 0,
AtomicAdd = 1,
AtomicSub = 2,
AtomicAnd = 3,
AtomicNand = 4,
AtomicOr = 5,
AtomicXor = 6,
AtomicMax = 7,
AtomicMin = 8,
AtomicUMax = 9,
AtomicUMin = 10,
}
impl AtomicRmwBinOp {
pub fn from_generic(op: rustc_codegen_ssa::common::AtomicRmwBinOp) -> Self {
match op {
rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXchg => AtomicRmwBinOp::AtomicXchg,
rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicAdd => AtomicRmwBinOp::AtomicAdd,
rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicSub => AtomicRmwBinOp::AtomicSub,
rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicAnd => AtomicRmwBinOp::AtomicAnd,
rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicNand => AtomicRmwBinOp::AtomicNand,
rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicOr => AtomicRmwBinOp::AtomicOr,
rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXor => AtomicRmwBinOp::AtomicXor,
rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicMax => AtomicRmwBinOp::AtomicMax,
rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicMin => AtomicRmwBinOp::AtomicMin,
rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicUMax => AtomicRmwBinOp::AtomicUMax,
rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicUMin => AtomicRmwBinOp::AtomicUMin
}
}
}
/// LLVMAtomicOrdering
#[derive(Copy, Clone)]
#[repr(C)]
pub enum AtomicOrdering {
#[allow(dead_code)]
NotAtomic = 0,
Unordered = 1,
Monotonic = 2,
// Consume = 3, // Not specified yet.
Acquire = 4,
Release = 5,
AcquireRelease = 6,
SequentiallyConsistent = 7,
}
impl AtomicOrdering {
pub fn from_generic(ao: rustc_codegen_ssa::common::AtomicOrdering) -> Self {
match ao {
rustc_codegen_ssa::common::AtomicOrdering::NotAtomic => AtomicOrdering::NotAtomic,
rustc_codegen_ssa::common::AtomicOrdering::Unordered => AtomicOrdering::Unordered,
rustc_codegen_ssa::common::AtomicOrdering::Monotonic => AtomicOrdering::Monotonic,
rustc_codegen_ssa::common::AtomicOrdering::Acquire => AtomicOrdering::Acquire,
rustc_codegen_ssa::common::AtomicOrdering::Release => AtomicOrdering::Release,
rustc_codegen_ssa::common::AtomicOrdering::AcquireRelease =>
AtomicOrdering::AcquireRelease,
rustc_codegen_ssa::common::AtomicOrdering::SequentiallyConsistent =>
AtomicOrdering::SequentiallyConsistent
}
}
}
/// LLVMRustSynchronizationScope
#[derive(Copy, Clone)]
#[repr(C)]
pub enum SynchronizationScope {
// FIXME: figure out if this variant is needed at all.
#[allow(dead_code)]
Other,
SingleThread,
CrossThread,
}
impl SynchronizationScope {
pub fn from_generic(sc: rustc_codegen_ssa::common::SynchronizationScope) -> Self {
match sc {
rustc_codegen_ssa::common::SynchronizationScope::Other => SynchronizationScope::Other,
rustc_codegen_ssa::common::SynchronizationScope::SingleThread =>
SynchronizationScope::SingleThread,
rustc_codegen_ssa::common::SynchronizationScope::CrossThread =>
SynchronizationScope::CrossThread,
}
}
}
/// LLVMRustFileType
#[derive(Copy, Clone)]
#[repr(C)]
pub enum FileType {
// FIXME: figure out if this variant is needed at all.
#[allow(dead_code)]
Other,
AssemblyFile,
ObjectFile,
}
/// LLVMMetadataType
#[derive(Copy, Clone)]
#[repr(C)]
pub enum MetadataType {
MD_dbg = 0,
MD_tbaa = 1,
MD_prof = 2,
MD_fpmath = 3,
MD_range = 4,
MD_tbaa_struct = 5,
MD_invariant_load = 6,
MD_alias_scope = 7,
MD_noalias = 8,
MD_nontemporal = 9,
MD_mem_parallel_loop_access = 10,
MD_nonnull = 11,
}
/// LLVMRustAsmDialect
#[derive(Copy, Clone)]
#[repr(C)]
pub enum AsmDialect {
// FIXME: figure out if this variant is needed at all.
#[allow(dead_code)]
Other,
Att,
Intel,
}
impl AsmDialect {
pub fn from_generic(asm: syntax::ast::AsmDialect) -> Self {
match asm {
syntax::ast::AsmDialect::Att => AsmDialect::Att,
syntax::ast::AsmDialect::Intel => AsmDialect::Intel
}
}
}
/// LLVMRustCodeGenOptLevel
#[derive(Copy, Clone, PartialEq)]
#[repr(C)]
pub enum CodeGenOptLevel {
// FIXME: figure out if this variant is needed at all.
#[allow(dead_code)]
Other,
None,
Less,
Default,
Aggressive,
}
/// LLVMRelocMode
#[derive(Copy, Clone, PartialEq)]
#[repr(C)]
pub enum RelocMode {
Default,
Static,
PIC,
DynamicNoPic,
ROPI,
RWPI,
ROPI_RWPI,
}
/// LLVMRustCodeModel
#[derive(Copy, Clone)]
#[repr(C)]
pub enum CodeModel {
// FIXME: figure out if this variant is needed at all.
#[allow(dead_code)]
Other,
Small,
Kernel,
Medium,
Large,
None,
}
/// LLVMRustDiagnosticKind
#[derive(Copy, Clone)]
#[repr(C)]
#[allow(dead_code)] // Variants constructed by C++.
pub enum DiagnosticKind {
Other,
InlineAsm,
StackSize,
DebugMetadataVersion,
SampleProfile,
OptimizationRemark,
OptimizationRemarkMissed,
OptimizationRemarkAnalysis,
OptimizationRemarkAnalysisFPCommute,
OptimizationRemarkAnalysisAliasing,
OptimizationRemarkOther,
OptimizationFailure,
PGOProfile,
Linker,
}
/// LLVMRustArchiveKind
#[derive(Copy, Clone)]
#[repr(C)]
pub enum ArchiveKind {
// FIXME: figure out if this variant is needed at all.
#[allow(dead_code)]
Other,
K_GNU,
K_BSD,
K_COFF,
}
/// LLVMRustPassKind
#[derive(Copy, Clone, PartialEq, Debug)]
#[repr(C)]
#[allow(dead_code)] // Variants constructed by C++.
pub enum PassKind {
Other,
Function,
Module,
}
/// LLVMRustThinLTOData
extern { pub type ThinLTOData; }
/// LLVMRustThinLTOBuffer
extern { pub type ThinLTOBuffer; }
// LLVMRustModuleNameCallback
pub type ThinLTOModuleNameCallback =
unsafe extern "C" fn(*mut c_void, *const c_char, *const c_char);
/// LLVMRustThinLTOModule
#[repr(C)]
pub struct ThinLTOModule {
pub identifier: *const c_char,
pub data: *const u8,
pub len: usize,
}
/// LLVMThreadLocalMode
#[derive(Copy, Clone)]
#[repr(C)]
pub enum ThreadLocalMode {
NotThreadLocal,
GeneralDynamic,
LocalDynamic,
InitialExec,
LocalExec
}
extern { type Opaque; }
#[repr(C)]
struct InvariantOpaque<'a> {
_marker: PhantomData<&'a mut &'a ()>,
_opaque: Opaque,
}
// Opaque pointer types
extern { pub type Module; }
extern { pub type Context; }
extern { pub type Type; }
extern { pub type Value; }
extern { pub type ConstantInt; }
extern { pub type Metadata; }
extern { pub type BasicBlock; }
#[repr(C)]
pub struct Builder<'a>(InvariantOpaque<'a>);
extern { pub type MemoryBuffer; }
#[repr(C)]
pub struct PassManager<'a>(InvariantOpaque<'a>);
extern { pub type PassManagerBuilder; }
extern { pub type ObjectFile; }
#[repr(C)]
pub struct SectionIterator<'a>(InvariantOpaque<'a>);
extern { pub type Pass; }
extern { pub type TargetMachine; }
extern { pub type Archive; }
#[repr(C)]
pub struct ArchiveIterator<'a>(InvariantOpaque<'a>);
#[repr(C)]
pub struct ArchiveChild<'a>(InvariantOpaque<'a>);
extern { pub type Twine; }
extern { pub type DiagnosticInfo; }
extern { pub type SMDiagnostic; }
#[repr(C)]
pub struct RustArchiveMember<'a>(InvariantOpaque<'a>);
#[repr(C)]
pub struct OperandBundleDef<'a>(InvariantOpaque<'a>);
#[repr(C)]
pub struct Linker<'a>(InvariantOpaque<'a>);
pub type DiagnosticHandler = unsafe extern "C" fn(&DiagnosticInfo, *mut c_void);
pub type InlineAsmDiagHandler = unsafe extern "C" fn(&SMDiagnostic, *const c_void, c_uint);
pub mod debuginfo {
use super::{InvariantOpaque, Metadata};
#[repr(C)]
pub struct DIBuilder<'a>(InvariantOpaque<'a>);
pub type DIDescriptor = Metadata;
pub type DIScope = DIDescriptor;
pub type DIFile = DIScope;
pub type DILexicalBlock = DIScope;
pub type DISubprogram = DIScope;
pub type DINameSpace = DIScope;
pub type DIType = DIDescriptor;
pub type DIBasicType = DIType;
pub type DIDerivedType = DIType;
pub type DICompositeType = DIDerivedType;
pub type DIVariable = DIDescriptor;
pub type DIGlobalVariableExpression = DIDescriptor;
pub type DIArray = DIDescriptor;
pub type DISubrange = DIDescriptor;
pub type DIEnumerator = DIDescriptor;
pub type DITemplateTypeParameter = DIDescriptor;
// These values **must** match with LLVMRustDIFlags!!
bitflags! {
#[repr(transparent)]
#[derive(Default)]
pub struct DIFlags: u32 {
const FlagZero = 0;
const FlagPrivate = 1;
const FlagProtected = 2;
const FlagPublic = 3;
const FlagFwdDecl = (1 << 2);
const FlagAppleBlock = (1 << 3);
const FlagBlockByrefStruct = (1 << 4);
const FlagVirtual = (1 << 5);
const FlagArtificial = (1 << 6);
const FlagExplicit = (1 << 7);
const FlagPrototyped = (1 << 8);
const FlagObjcClassComplete = (1 << 9);
const FlagObjectPointer = (1 << 10);
const FlagVector = (1 << 11);
const FlagStaticMember = (1 << 12);
const FlagLValueReference = (1 << 13);
const FlagRValueReference = (1 << 14);
const FlagExternalTypeRef = (1 << 15);
const FlagIntroducedVirtual = (1 << 18);
const FlagBitField = (1 << 19);
const FlagNoReturn = (1 << 20);
}
}
// These values **must** match with LLVMRustDISPFlags!!
bitflags! {
#[repr(transparent)]
#[derive(Default)]
pub struct DISPFlags: u32 {
const SPFlagZero = 0;
const SPFlagVirtual = 1;
const SPFlagPureVirtual = 2;
const SPFlagLocalToUnit = (1 << 2);
const SPFlagDefinition = (1 << 3);
const SPFlagOptimized = (1 << 4);
const SPFlagMainSubprogram = (1 << 5);
}
}
/// LLVMRustDebugEmissionKind
#[derive(Copy, Clone)]
#[repr(C)]
pub enum DebugEmissionKind {
NoDebug,
FullDebug,
LineTablesOnly,
}
impl DebugEmissionKind {
pub fn from_generic(kind: rustc::session::config::DebugInfo) -> Self {
use rustc::session::config::DebugInfo;
match kind {
DebugInfo::None => DebugEmissionKind::NoDebug,
DebugInfo::Limited => DebugEmissionKind::LineTablesOnly,
DebugInfo::Full => DebugEmissionKind::FullDebug,
}
}
}
}
extern { pub type ModuleBuffer; }
extern "C" {
pub fn LLVMRustInstallFatalErrorHandler();
// Create and destroy contexts.
pub fn LLVMRustContextCreate(shouldDiscardNames: bool) -> &'static mut Context;
pub fn LLVMContextDispose(C: &'static mut Context);
pub fn LLVMGetMDKindIDInContext(C: &Context, Name: *const c_char, SLen: c_uint) -> c_uint;
// Create modules.
pub fn LLVMModuleCreateWithNameInContext(ModuleID: *const c_char, C: &Context) -> &Module;
pub fn LLVMGetModuleContext(M: &Module) -> &Context;
pub fn LLVMCloneModule(M: &Module) -> &Module;
/// Data layout. See Module::getDataLayout.
pub fn LLVMGetDataLayout(M: &Module) -> *const c_char;
pub fn LLVMSetDataLayout(M: &Module, Triple: *const c_char);
/// See Module::setModuleInlineAsm.
pub fn LLVMSetModuleInlineAsm(M: &Module, Asm: *const c_char);
pub fn LLVMRustAppendModuleInlineAsm(M: &Module, Asm: *const c_char);
/// See llvm::LLVMTypeKind::getTypeID.
pub fn LLVMRustGetTypeKind(Ty: &Type) -> TypeKind;
// Operations on integer types
pub fn LLVMInt1TypeInContext(C: &Context) -> &Type;
pub fn LLVMInt8TypeInContext(C: &Context) -> &Type;
pub fn LLVMInt16TypeInContext(C: &Context) -> &Type;
pub fn LLVMInt32TypeInContext(C: &Context) -> &Type;
pub fn LLVMInt64TypeInContext(C: &Context) -> &Type;
pub fn LLVMIntTypeInContext(C: &Context, NumBits: c_uint) -> &Type;
pub fn LLVMGetIntTypeWidth(IntegerTy: &Type) -> c_uint;
// Operations on real types
pub fn LLVMFloatTypeInContext(C: &Context) -> &Type;
pub fn LLVMDoubleTypeInContext(C: &Context) -> &Type;
// Operations on function types
pub fn LLVMFunctionType(ReturnType: &'a Type,
ParamTypes: *const &'a Type,
ParamCount: c_uint,
IsVarArg: Bool)
-> &'a Type;
pub fn LLVMCountParamTypes(FunctionTy: &Type) -> c_uint;
pub fn LLVMGetParamTypes(FunctionTy: &'a Type, Dest: *mut &'a Type);
// Operations on struct types
pub fn LLVMStructTypeInContext(C: &'a Context,
ElementTypes: *const &'a Type,
ElementCount: c_uint,
Packed: Bool)
-> &'a Type;
// Operations on array, pointer, and vector types (sequence types)
pub fn LLVMRustArrayType(ElementType: &Type, ElementCount: u64) -> &Type;
pub fn LLVMPointerType(ElementType: &Type, AddressSpace: c_uint) -> &Type;
pub fn LLVMVectorType(ElementType: &Type, ElementCount: c_uint) -> &Type;
pub fn LLVMGetElementType(Ty: &Type) -> &Type;
pub fn LLVMGetVectorSize(VectorTy: &Type) -> c_uint;
// Operations on other types
pub fn LLVMVoidTypeInContext(C: &Context) -> &Type;
pub fn LLVMX86MMXTypeInContext(C: &Context) -> &Type;
pub fn LLVMRustMetadataTypeInContext(C: &Context) -> &Type;
// Operations on all values
pub fn LLVMTypeOf(Val: &Value) -> &Type;
pub fn LLVMGetValueName(Val: &Value) -> *const c_char;
pub fn LLVMSetValueName(Val: &Value, Name: *const c_char);
pub fn LLVMReplaceAllUsesWith(OldVal: &'a Value, NewVal: &'a Value);
pub fn LLVMSetMetadata(Val: &'a Value, KindID: c_uint, Node: &'a Value);
// Operations on constants of any type
pub fn LLVMConstNull(Ty: &Type) -> &Value;
pub fn LLVMGetUndef(Ty: &Type) -> &Value;
// Operations on metadata
pub fn LLVMMDStringInContext(C: &Context, Str: *const c_char, SLen: c_uint) -> &Value;
pub fn LLVMMDNodeInContext(C: &'a Context, Vals: *const &'a Value, Count: c_uint) -> &'a Value;
pub fn LLVMAddNamedMetadataOperand(M: &'a Module, Name: *const c_char, Val: &'a Value);
// Operations on scalar constants
pub fn LLVMConstInt(IntTy: &Type, N: c_ulonglong, SignExtend: Bool) -> &Value;
pub fn LLVMConstIntOfArbitraryPrecision(IntTy: &Type, Wn: c_uint, Ws: *const u64) -> &Value;
pub fn LLVMConstReal(RealTy: &Type, N: f64) -> &Value;
pub fn LLVMConstIntGetZExtValue(ConstantVal: &ConstantInt) -> c_ulonglong;
pub fn LLVMRustConstInt128Get(ConstantVal: &ConstantInt, SExt: bool,
high: &mut u64, low: &mut u64) -> bool;
// Operations on composite constants
pub fn LLVMConstStringInContext(C: &Context,
Str: *const c_char,
Length: c_uint,
DontNullTerminate: Bool)
-> &Value;
pub fn LLVMConstStructInContext(C: &'a Context,
ConstantVals: *const &'a Value,
Count: c_uint,
Packed: Bool)
-> &'a Value;
pub fn LLVMConstArray(ElementTy: &'a Type,
ConstantVals: *const &'a Value,
Length: c_uint)
-> &'a Value;
pub fn LLVMConstVector(ScalarConstantVals: *const &Value, Size: c_uint) -> &Value;
// Constant expressions
pub fn LLVMConstInBoundsGEP(
ConstantVal: &'a Value,
ConstantIndices: *const &'a Value,
NumIndices: c_uint,
) -> &'a Value;
pub fn LLVMConstZExt(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
pub fn LLVMConstPtrToInt(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
pub fn LLVMConstIntToPtr(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
pub fn LLVMConstBitCast(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
pub fn LLVMConstPointerCast(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
pub fn LLVMConstExtractValue(AggConstant: &Value,
IdxList: *const c_uint,
NumIdx: c_uint)
-> &Value;
// Operations on global variables, functions, and aliases (globals)
pub fn LLVMIsDeclaration(Global: &Value) -> Bool;
pub fn LLVMRustGetLinkage(Global: &Value) -> Linkage;
pub fn LLVMRustSetLinkage(Global: &Value, RustLinkage: Linkage);
pub fn LLVMSetSection(Global: &Value, Section: *const c_char);
pub fn LLVMRustGetVisibility(Global: &Value) -> Visibility;
pub fn LLVMRustSetVisibility(Global: &Value, Viz: Visibility);
pub fn LLVMGetAlignment(Global: &Value) -> c_uint;
pub fn LLVMSetAlignment(Global: &Value, Bytes: c_uint);
pub fn LLVMSetDLLStorageClass(V: &Value, C: DLLStorageClass);
// Operations on global variables
pub fn LLVMIsAGlobalVariable(GlobalVar: &Value) -> Option<&Value>;
pub fn LLVMAddGlobal(M: &'a Module, Ty: &'a Type, Name: *const c_char) -> &'a Value;
pub fn LLVMGetNamedGlobal(M: &Module, Name: *const c_char) -> Option<&Value>;
pub fn LLVMRustGetOrInsertGlobal(M: &'a Module, Name: *const c_char, T: &'a Type) -> &'a Value;
pub fn LLVMRustInsertPrivateGlobal(M: &'a Module, T: &'a Type) -> &'a Value;
pub fn LLVMGetFirstGlobal(M: &Module) -> Option<&Value>;
pub fn LLVMGetNextGlobal(GlobalVar: &Value) -> Option<&Value>;
pub fn LLVMDeleteGlobal(GlobalVar: &Value);
pub fn LLVMGetInitializer(GlobalVar: &Value) -> Option<&Value>;
pub fn LLVMSetInitializer(GlobalVar: &'a Value, ConstantVal: &'a Value);
pub fn LLVMSetThreadLocal(GlobalVar: &Value, IsThreadLocal: Bool);
pub fn LLVMSetThreadLocalMode(GlobalVar: &Value, Mode: ThreadLocalMode);
pub fn LLVMIsGlobalConstant(GlobalVar: &Value) -> Bool;
pub fn LLVMSetGlobalConstant(GlobalVar: &Value, IsConstant: Bool);
pub fn LLVMRustGetNamedValue(M: &Module, Name: *const c_char) -> Option<&Value>;
pub fn LLVMSetTailCall(CallInst: &Value, IsTailCall: Bool);
// Operations on functions
pub fn LLVMRustGetOrInsertFunction(M: &'a Module,
Name: *const c_char,
FunctionTy: &'a Type)
-> &'a Value;
pub fn LLVMSetFunctionCallConv(Fn: &Value, CC: c_uint);
pub fn LLVMRustAddAlignmentAttr(Fn: &Value, index: c_uint, bytes: u32);
pub fn LLVMRustAddDereferenceableAttr(Fn: &Value, index: c_uint, bytes: u64);
pub fn LLVMRustAddDereferenceableOrNullAttr(Fn: &Value, index: c_uint, bytes: u64);
pub fn LLVMRustAddByValAttr(Fn: &Value, index: c_uint, ty: &Type);
pub fn LLVMRustAddFunctionAttribute(Fn: &Value, index: c_uint, attr: Attribute);
pub fn LLVMRustAddFunctionAttrStringValue(Fn: &Value,
index: c_uint,
Name: *const c_char,
Value: *const c_char);
pub fn LLVMRustRemoveFunctionAttributes(Fn: &Value, index: c_uint, attr: Attribute);
// Operations on parameters
pub fn LLVMIsAArgument(Val: &Value) -> Option<&Value>;
pub fn LLVMCountParams(Fn: &Value) -> c_uint;
pub fn LLVMGetParam(Fn: &Value, Index: c_uint) -> &Value;
// Operations on basic blocks
pub fn LLVMGetBasicBlockParent(BB: &BasicBlock) -> &Value;
pub fn LLVMAppendBasicBlockInContext(C: &'a Context,
Fn: &'a Value,
Name: *const c_char)
-> &'a BasicBlock;
pub fn LLVMDeleteBasicBlock(BB: &BasicBlock);
// Operations on instructions
pub fn LLVMIsAInstruction(Val: &Value) -> Option<&Value>;
pub fn LLVMGetFirstBasicBlock(Fn: &Value) -> &BasicBlock;
// Operations on call sites
pub fn LLVMSetInstructionCallConv(Instr: &Value, CC: c_uint);
pub fn LLVMRustAddCallSiteAttribute(Instr: &Value, index: c_uint, attr: Attribute);
pub fn LLVMRustAddAlignmentCallSiteAttr(Instr: &Value, index: c_uint, bytes: u32);
pub fn LLVMRustAddDereferenceableCallSiteAttr(Instr: &Value, index: c_uint, bytes: u64);
pub fn LLVMRustAddDereferenceableOrNullCallSiteAttr(Instr: &Value,
index: c_uint,
bytes: u64);
pub fn LLVMRustAddByValCallSiteAttr(Instr: &Value, index: c_uint, ty: &Type);
// Operations on load/store instructions (only)
pub fn LLVMSetVolatile(MemoryAccessInst: &Value, volatile: Bool);
// Operations on phi nodes
pub fn LLVMAddIncoming(PhiNode: &'a Value,
IncomingValues: *const &'a Value,
IncomingBlocks: *const &'a BasicBlock,
Count: c_uint);
// Instruction builders
pub fn LLVMCreateBuilderInContext(C: &'a Context) -> &'a mut Builder<'a>;
pub fn LLVMPositionBuilderAtEnd(Builder: &Builder<'a>, Block: &'a BasicBlock);
pub fn LLVMGetInsertBlock(Builder: &Builder<'a>) -> &'a BasicBlock;
pub fn LLVMDisposeBuilder(Builder: &'a mut Builder<'a>);
// Metadata
pub fn LLVMSetCurrentDebugLocation(Builder: &Builder<'a>, L: Option<&'a Value>);
pub fn LLVMGetCurrentDebugLocation(Builder: &Builder<'a>) -> &'a Value;
pub fn LLVMSetInstDebugLocation(Builder: &Builder<'a>, Inst: &'a Value);
// Terminators
pub fn LLVMBuildRetVoid(B: &Builder<'a>) -> &'a Value;
pub fn LLVMBuildRet(B: &Builder<'a>, V: &'a Value) -> &'a Value;
pub fn LLVMBuildBr(B: &Builder<'a>, Dest: &'a BasicBlock) -> &'a Value;
pub fn LLVMBuildCondBr(B: &Builder<'a>,
If: &'a Value,
Then: &'a BasicBlock,
Else: &'a BasicBlock)
-> &'a Value;
pub fn LLVMBuildSwitch(B: &Builder<'a>,
V: &'a Value,
Else: &'a BasicBlock,
NumCases: c_uint)
-> &'a Value;
pub fn LLVMRustBuildInvoke(B: &Builder<'a>,
Fn: &'a Value,
Args: *const &'a Value,
NumArgs: c_uint,
Then: &'a BasicBlock,
Catch: &'a BasicBlock,
Bundle: Option<&OperandBundleDef<'a>>,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildLandingPad(B: &Builder<'a>,
Ty: &'a Type,
PersFn: &'a Value,
NumClauses: c_uint,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildResume(B: &Builder<'a>, Exn: &'a Value) -> &'a Value;
pub fn LLVMBuildUnreachable(B: &Builder<'a>) -> &'a Value;
pub fn LLVMRustBuildCleanupPad(B: &Builder<'a>,
ParentPad: Option<&'a Value>,
ArgCnt: c_uint,
Args: *const &'a Value,
Name: *const c_char)
-> Option<&'a Value>;
pub fn LLVMRustBuildCleanupRet(B: &Builder<'a>,
CleanupPad: &'a Value,
UnwindBB: Option<&'a BasicBlock>)
-> Option<&'a Value>;
pub fn LLVMRustBuildCatchPad(B: &Builder<'a>,
ParentPad: &'a Value,
ArgCnt: c_uint,
Args: *const &'a Value,
Name: *const c_char)
-> Option<&'a Value>;
pub fn LLVMRustBuildCatchRet(
B: &Builder<'a>,
Pad: &'a Value,
BB: &'a BasicBlock,
) -> Option<&'a Value>;
pub fn LLVMRustBuildCatchSwitch(Builder: &Builder<'a>,
ParentPad: Option<&'a Value>,
BB: Option<&'a BasicBlock>,
NumHandlers: c_uint,
Name: *const c_char)
-> Option<&'a Value>;
pub fn LLVMRustAddHandler(CatchSwitch: &'a Value, Handler: &'a BasicBlock);
pub fn LLVMSetPersonalityFn(Func: &'a Value, Pers: &'a Value);
// Add a case to the switch instruction
pub fn LLVMAddCase(Switch: &'a Value, OnVal: &'a Value, Dest: &'a BasicBlock);
// Add a clause to the landing pad instruction
pub fn LLVMAddClause(LandingPad: &'a Value, ClauseVal: &'a Value);
// Set the cleanup on a landing pad instruction
pub fn LLVMSetCleanup(LandingPad: &Value, Val: Bool);
// Arithmetic
pub fn LLVMBuildAdd(B: &Builder<'a>,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildFAdd(B: &Builder<'a>,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildSub(B: &Builder<'a>,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildFSub(B: &Builder<'a>,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildMul(B: &Builder<'a>,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildFMul(B: &Builder<'a>,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildUDiv(B: &Builder<'a>,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildExactUDiv(B: &Builder<'a>,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildSDiv(B: &Builder<'a>,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildExactSDiv(B: &Builder<'a>,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildFDiv(B: &Builder<'a>,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildURem(B: &Builder<'a>,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildSRem(B: &Builder<'a>,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildFRem(B: &Builder<'a>,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildShl(B: &Builder<'a>,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildLShr(B: &Builder<'a>,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildAShr(B: &Builder<'a>,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildNSWAdd(B: &Builder<'a>,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildNUWAdd(B: &Builder<'a>,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildNSWSub(B: &Builder<'a>,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildNUWSub(B: &Builder<'a>,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildNSWMul(B: &Builder<'a>,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildNUWMul(B: &Builder<'a>,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildAnd(B: &Builder<'a>,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildOr(B: &Builder<'a>,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildXor(B: &Builder<'a>,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildNeg(B: &Builder<'a>, V: &'a Value, Name: *const c_char) -> &'a Value;
pub fn LLVMBuildFNeg(B: &Builder<'a>, V: &'a Value, Name: *const c_char) -> &'a Value;
pub fn LLVMBuildNot(B: &Builder<'a>, V: &'a Value, Name: *const c_char) -> &'a Value;
pub fn LLVMRustSetHasUnsafeAlgebra(Instr: &Value);
// Memory
pub fn LLVMBuildAlloca(B: &Builder<'a>, Ty: &'a Type, Name: *const c_char) -> &'a Value;
pub fn LLVMBuildArrayAlloca(B: &Builder<'a>,
Ty: &'a Type,
Val: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildLoad(B: &Builder<'a>, PointerVal: &'a Value, Name: *const c_char) -> &'a Value;
pub fn LLVMBuildStore(B: &Builder<'a>, Val: &'a Value, Ptr: &'a Value) -> &'a Value;
pub fn LLVMBuildGEP(B: &Builder<'a>,
Pointer: &'a Value,
Indices: *const &'a Value,
NumIndices: c_uint,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildInBoundsGEP(B: &Builder<'a>,
Pointer: &'a Value,
Indices: *const &'a Value,
NumIndices: c_uint,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildStructGEP(B: &Builder<'a>,
Pointer: &'a Value,
Idx: c_uint,
Name: *const c_char)
-> &'a Value;
// Casts
pub fn LLVMBuildTrunc(B: &Builder<'a>,
Val: &'a Value,
DestTy: &'a Type,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildZExt(B: &Builder<'a>,
Val: &'a Value,
DestTy: &'a Type,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildSExt(B: &Builder<'a>,
Val: &'a Value,
DestTy: &'a Type,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildFPToUI(B: &Builder<'a>,
Val: &'a Value,
DestTy: &'a Type,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildFPToSI(B: &Builder<'a>,
Val: &'a Value,
DestTy: &'a Type,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildUIToFP(B: &Builder<'a>,
Val: &'a Value,
DestTy: &'a Type,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildSIToFP(B: &Builder<'a>,
Val: &'a Value,
DestTy: &'a Type,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildFPTrunc(B: &Builder<'a>,
Val: &'a Value,
DestTy: &'a Type,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildFPExt(B: &Builder<'a>,
Val: &'a Value,
DestTy: &'a Type,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildPtrToInt(B: &Builder<'a>,
Val: &'a Value,
DestTy: &'a Type,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildIntToPtr(B: &Builder<'a>,
Val: &'a Value,
DestTy: &'a Type,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildBitCast(B: &Builder<'a>,
Val: &'a Value,
DestTy: &'a Type,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildPointerCast(B: &Builder<'a>,
Val: &'a Value,
DestTy: &'a Type,
Name: *const c_char)
-> &'a Value;
pub fn LLVMRustBuildIntCast(B: &Builder<'a>,
Val: &'a Value,
DestTy: &'a Type,
IsSized: bool)
-> &'a Value;
// Comparisons
pub fn LLVMBuildICmp(B: &Builder<'a>,
Op: c_uint,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildFCmp(B: &Builder<'a>,
Op: c_uint,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
// Miscellaneous instructions
pub fn LLVMBuildPhi(B: &Builder<'a>, Ty: &'a Type, Name: *const c_char) -> &'a Value;
pub fn LLVMRustBuildCall(B: &Builder<'a>,
Fn: &'a Value,
Args: *const &'a Value,
NumArgs: c_uint,
Bundle: Option<&OperandBundleDef<'a>>,
Name: *const c_char)
-> &'a Value;
pub fn LLVMRustBuildMemCpy(B: &Builder<'a>,
Dst: &'a Value,
DstAlign: c_uint,
Src: &'a Value,
SrcAlign: c_uint,
Size: &'a Value,
IsVolatile: bool)
-> &'a Value;
pub fn LLVMRustBuildMemMove(B: &Builder<'a>,
Dst: &'a Value,
DstAlign: c_uint,
Src: &'a Value,
SrcAlign: c_uint,
Size: &'a Value,
IsVolatile: bool)
-> &'a Value;
pub fn LLVMBuildSelect(B: &Builder<'a>,
If: &'a Value,
Then: &'a Value,
Else: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildVAArg(B: &Builder<'a>,
list: &'a Value,
Ty: &'a Type,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildExtractElement(B: &Builder<'a>,
VecVal: &'a Value,
Index: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildInsertElement(B: &Builder<'a>,
VecVal: &'a Value,
EltVal: &'a Value,
Index: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildShuffleVector(B: &Builder<'a>,
V1: &'a Value,
V2: &'a Value,
Mask: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildExtractValue(B: &Builder<'a>,
AggVal: &'a Value,
Index: c_uint,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildInsertValue(B: &Builder<'a>,
AggVal: &'a Value,
EltVal: &'a Value,
Index: c_uint,
Name: *const c_char)
-> &'a Value;
pub fn LLVMRustBuildVectorReduceFAdd(B: &Builder<'a>,
Acc: &'a Value,
Src: &'a Value)
-> &'a Value;
pub fn LLVMRustBuildVectorReduceFMul(B: &Builder<'a>,
Acc: &'a Value,
Src: &'a Value)
-> &'a Value;
pub fn LLVMRustBuildVectorReduceAdd(B: &Builder<'a>,
Src: &'a Value)
-> &'a Value;
pub fn LLVMRustBuildVectorReduceMul(B: &Builder<'a>,
Src: &'a Value)
-> &'a Value;
pub fn LLVMRustBuildVectorReduceAnd(B: &Builder<'a>,
Src: &'a Value)
-> &'a Value;
pub fn LLVMRustBuildVectorReduceOr(B: &Builder<'a>,
Src: &'a Value)
-> &'a Value;
pub fn LLVMRustBuildVectorReduceXor(B: &Builder<'a>,
Src: &'a Value)
-> &'a Value;
pub fn LLVMRustBuildVectorReduceMin(B: &Builder<'a>,
Src: &'a Value,
IsSigned: bool)
-> &'a Value;
pub fn LLVMRustBuildVectorReduceMax(B: &Builder<'a>,
Src: &'a Value,
IsSigned: bool)
-> &'a Value;
pub fn LLVMRustBuildVectorReduceFMin(B: &Builder<'a>,
Src: &'a Value,
IsNaN: bool)
-> &'a Value;
pub fn LLVMRustBuildVectorReduceFMax(B: &Builder<'a>,
Src: &'a Value,
IsNaN: bool)
-> &'a Value;
pub fn LLVMRustBuildMinNum(
B: &Builder<'a>,
LHS: &'a Value,
LHS: &'a Value,
) -> &'a Value;
pub fn LLVMRustBuildMaxNum(
B: &Builder<'a>,
LHS: &'a Value,
LHS: &'a Value,
) -> &'a Value;
// Atomic Operations
pub fn LLVMRustBuildAtomicLoad(B: &Builder<'a>,
PointerVal: &'a Value,
Name: *const c_char,
Order: AtomicOrdering)
-> &'a Value;
pub fn LLVMRustBuildAtomicStore(B: &Builder<'a>,
Val: &'a Value,
Ptr: &'a Value,
Order: AtomicOrdering)
-> &'a Value;
pub fn LLVMRustBuildAtomicCmpXchg(B: &Builder<'a>,
LHS: &'a Value,
CMP: &'a Value,
RHS: &'a Value,
Order: AtomicOrdering,
FailureOrder: AtomicOrdering,
Weak: Bool)
-> &'a Value;
pub fn LLVMBuildAtomicRMW(B: &Builder<'a>,
Op: AtomicRmwBinOp,
LHS: &'a Value,
RHS: &'a Value,
Order: AtomicOrdering,
SingleThreaded: Bool)
-> &'a Value;
pub fn LLVMRustBuildAtomicFence(B: &Builder<'_>,
Order: AtomicOrdering,
Scope: SynchronizationScope);
/// Writes a module to the specified path. Returns 0 on success.
pub fn LLVMWriteBitcodeToFile(M: &Module, Path: *const c_char) -> c_int;
/// Creates a pass manager.
pub fn LLVMCreatePassManager() -> &'a mut PassManager<'a>;
/// Creates a function-by-function pass manager
pub fn LLVMCreateFunctionPassManagerForModule(M: &'a Module) -> &'a mut PassManager<'a>;
/// Disposes a pass manager.
pub fn LLVMDisposePassManager(PM: &'a mut PassManager<'a>);
/// Runs a pass manager on a module.
pub fn LLVMRunPassManager(PM: &PassManager<'a>, M: &'a Module) -> Bool;
pub fn LLVMInitializePasses();
pub fn LLVMPassManagerBuilderCreate() -> &'static mut PassManagerBuilder;
pub fn LLVMPassManagerBuilderDispose(PMB: &'static mut PassManagerBuilder);
pub fn LLVMPassManagerBuilderSetSizeLevel(PMB: &PassManagerBuilder, Value: Bool);
pub fn LLVMPassManagerBuilderSetDisableUnrollLoops(PMB: &PassManagerBuilder, Value: Bool);
pub fn LLVMPassManagerBuilderUseInlinerWithThreshold(PMB: &PassManagerBuilder,
threshold: c_uint);
pub fn LLVMPassManagerBuilderPopulateModulePassManager(PMB: &PassManagerBuilder,
PM: &PassManager<'_>);
pub fn LLVMPassManagerBuilderPopulateFunctionPassManager(PMB: &PassManagerBuilder,
PM: &PassManager<'_>);
pub fn LLVMPassManagerBuilderPopulateLTOPassManager(PMB: &PassManagerBuilder,
PM: &PassManager<'_>,
Internalize: Bool,
RunInliner: Bool);
pub fn LLVMRustPassManagerBuilderPopulateThinLTOPassManager(
PMB: &PassManagerBuilder,
PM: &PassManager<'_>);
// Stuff that's in rustllvm/ because it's not upstream yet.
/// Opens an object file.
pub fn LLVMCreateObjectFile(
MemBuf: &'static mut MemoryBuffer,
) -> Option<&'static mut ObjectFile>;
/// Closes an object file.
pub fn LLVMDisposeObjectFile(ObjFile: &'static mut ObjectFile);
/// Enumerates the sections in an object file.
pub fn LLVMGetSections(ObjFile: &'a ObjectFile) -> &'a mut SectionIterator<'a>;
/// Destroys a section iterator.
pub fn LLVMDisposeSectionIterator(SI: &'a mut SectionIterator<'a>);
/// Returns `true` if the section iterator is at the end of the section
/// list:
pub fn LLVMIsSectionIteratorAtEnd(ObjFile: &'a ObjectFile, SI: &SectionIterator<'a>) -> Bool;
/// Moves the section iterator to point to the next section.
pub fn LLVMMoveToNextSection(SI: &SectionIterator<'_>);
/// Returns the current section size.
pub fn LLVMGetSectionSize(SI: &SectionIterator<'_>) -> c_ulonglong;
/// Returns the current section contents as a string buffer.
pub fn LLVMGetSectionContents(SI: &SectionIterator<'_>) -> *const c_char;
/// Reads the given file and returns it as a memory buffer. Use
/// LLVMDisposeMemoryBuffer() to get rid of it.
pub fn LLVMRustCreateMemoryBufferWithContentsOfFile(
Path: *const c_char,
) -> Option<&'static mut MemoryBuffer>;
pub fn LLVMStartMultithreaded() -> Bool;
/// Returns a string describing the last error caused by an LLVMRust* call.
pub fn LLVMRustGetLastError() -> *const c_char;
/// Print the pass timings since static dtors aren't picking them up.
pub fn LLVMRustPrintPassTimings();
pub fn LLVMStructCreateNamed(C: &Context, Name: *const c_char) -> &Type;
pub fn LLVMStructSetBody(StructTy: &'a Type,
ElementTypes: *const &'a Type,
ElementCount: c_uint,
Packed: Bool);
/// Prepares inline assembly.
pub fn LLVMRustInlineAsm(Ty: &Type,
AsmString: *const c_char,
Constraints: *const c_char,
SideEffects: Bool,
AlignStack: Bool,
Dialect: AsmDialect)
-> &Value;
pub fn LLVMRustInlineAsmVerify(Ty: &Type,
Constraints: *const c_char)
-> bool;
pub fn LLVMRustDebugMetadataVersion() -> u32;
pub fn LLVMRustVersionMajor() -> u32;
pub fn LLVMRustVersionMinor() -> u32;
pub fn LLVMRustAddModuleFlag(M: &Module, name: *const c_char, value: u32);
pub fn LLVMRustMetadataAsValue(C: &'a Context, MD: &'a Metadata) -> &'a Value;
pub fn LLVMRustDIBuilderCreate(M: &'a Module) -> &'a mut DIBuilder<'a>;
pub fn LLVMRustDIBuilderDispose(Builder: &'a mut DIBuilder<'a>);
pub fn LLVMRustDIBuilderFinalize(Builder: &DIBuilder<'_>);
pub fn LLVMRustDIBuilderCreateCompileUnit(Builder: &DIBuilder<'a>,
Lang: c_uint,
File: &'a DIFile,
Producer: *const c_char,
isOptimized: bool,
Flags: *const c_char,
RuntimeVer: c_uint,
SplitName: *const c_char,
kind: DebugEmissionKind)
-> &'a DIDescriptor;
pub fn LLVMRustDIBuilderCreateFile(Builder: &DIBuilder<'a>,
Filename: *const c_char,
Directory: *const c_char)
-> &'a DIFile;
pub fn LLVMRustDIBuilderCreateSubroutineType(Builder: &DIBuilder<'a>,
File: &'a DIFile,
ParameterTypes: &'a DIArray)
-> &'a DICompositeType;
pub fn LLVMRustDIBuilderCreateFunction(Builder: &DIBuilder<'a>,
Scope: &'a DIDescriptor,
Name: *const c_char,
LinkageName: *const c_char,
File: &'a DIFile,
LineNo: c_uint,
Ty: &'a DIType,
ScopeLine: c_uint,
Flags: DIFlags,
SPFlags: DISPFlags,
Fn: &'a Value,
TParam: &'a DIArray,
Decl: Option<&'a DIDescriptor>)
-> &'a DISubprogram;
pub fn LLVMRustDIBuilderCreateBasicType(Builder: &DIBuilder<'a>,
Name: *const c_char,
SizeInBits: u64,
AlignInBits: u32,
Encoding: c_uint)
-> &'a DIBasicType;
pub fn LLVMRustDIBuilderCreatePointerType(Builder: &DIBuilder<'a>,
PointeeTy: &'a DIType,
SizeInBits: u64,
AlignInBits: u32,
Name: *const c_char)
-> &'a DIDerivedType;
pub fn LLVMRustDIBuilderCreateStructType(Builder: &DIBuilder<'a>,
Scope: Option<&'a DIDescriptor>,
Name: *const c_char,
File: &'a DIFile,
LineNumber: c_uint,
SizeInBits: u64,
AlignInBits: u32,
Flags: DIFlags,
DerivedFrom: Option<&'a DIType>,
Elements: &'a DIArray,
RunTimeLang: c_uint,
VTableHolder: Option<&'a DIType>,
UniqueId: *const c_char)
-> &'a DICompositeType;
pub fn LLVMRustDIBuilderCreateMemberType(Builder: &DIBuilder<'a>,
Scope: &'a DIDescriptor,
Name: *const c_char,
File: &'a DIFile,
LineNo: c_uint,
SizeInBits: u64,
AlignInBits: u32,
OffsetInBits: u64,
Flags: DIFlags,
Ty: &'a DIType)
-> &'a DIDerivedType;
pub fn LLVMRustDIBuilderCreateVariantMemberType(Builder: &DIBuilder<'a>,
Scope: &'a DIScope,
Name: *const c_char,
File: &'a DIFile,
LineNumber: c_uint,
SizeInBits: u64,
AlignInBits: u32,
OffsetInBits: u64,
Discriminant: Option<&'a Value>,
Flags: DIFlags,
Ty: &'a DIType)
-> &'a DIType;
pub fn LLVMRustDIBuilderCreateLexicalBlock(Builder: &DIBuilder<'a>,
Scope: &'a DIScope,
File: &'a DIFile,
Line: c_uint,
Col: c_uint)
-> &'a DILexicalBlock;
pub fn LLVMRustDIBuilderCreateLexicalBlockFile(Builder: &DIBuilder<'a>,
Scope: &'a DIScope,
File: &'a DIFile)
-> &'a DILexicalBlock;
pub fn LLVMRustDIBuilderCreateStaticVariable(Builder: &DIBuilder<'a>,
Context: Option<&'a DIScope>,
Name: *const c_char,
LinkageName: *const c_char,
File: &'a DIFile,
LineNo: c_uint,
Ty: &'a DIType,
isLocalToUnit: bool,
Val: &'a Value,
Decl: Option<&'a DIDescriptor>,
AlignInBits: u32)
-> &'a DIGlobalVariableExpression;
pub fn LLVMRustDIBuilderCreateVariable(Builder: &DIBuilder<'a>,
Tag: c_uint,
Scope: &'a DIDescriptor,
Name: *const c_char,
File: &'a DIFile,
LineNo: c_uint,
Ty: &'a DIType,
AlwaysPreserve: bool,
Flags: DIFlags,
ArgNo: c_uint,
AlignInBits: u32)
-> &'a DIVariable;
pub fn LLVMRustDIBuilderCreateArrayType(Builder: &DIBuilder<'a>,
Size: u64,
AlignInBits: u32,
Ty: &'a DIType,
Subscripts: &'a DIArray)
-> &'a DIType;
pub fn LLVMRustDIBuilderGetOrCreateSubrange(Builder: &DIBuilder<'a>,
Lo: i64,
Count: i64)
-> &'a DISubrange;
pub fn LLVMRustDIBuilderGetOrCreateArray(Builder: &DIBuilder<'a>,
Ptr: *const Option<&'a DIDescriptor>,
Count: c_uint)
-> &'a DIArray;
pub fn LLVMRustDIBuilderInsertDeclareAtEnd(Builder: &DIBuilder<'a>,
Val: &'a Value,
VarInfo: &'a DIVariable,
AddrOps: *const i64,
AddrOpsCount: c_uint,
DL: &'a Value,
InsertAtEnd: &'a BasicBlock)
-> &'a Value;
pub fn LLVMRustDIBuilderCreateEnumerator(Builder: &DIBuilder<'a>,
Name: *const c_char,
Val: u64)
-> &'a DIEnumerator;
pub fn LLVMRustDIBuilderCreateEnumerationType(Builder: &DIBuilder<'a>,
Scope: &'a DIScope,
Name: *const c_char,
File: &'a DIFile,
LineNumber: c_uint,
SizeInBits: u64,
AlignInBits: u32,
Elements: &'a DIArray,
ClassType: &'a DIType,
IsScoped: bool)
-> &'a DIType;
pub fn LLVMRustDIBuilderCreateUnionType(Builder: &DIBuilder<'a>,
Scope: &'a DIScope,
Name: *const c_char,
File: &'a DIFile,
LineNumber: c_uint,
SizeInBits: u64,
AlignInBits: u32,
Flags: DIFlags,
Elements: Option<&'a DIArray>,
RunTimeLang: c_uint,
UniqueId: *const c_char)
-> &'a DIType;
pub fn LLVMRustDIBuilderCreateVariantPart(Builder: &DIBuilder<'a>,
Scope: &'a DIScope,
Name: *const c_char,
File: &'a DIFile,
LineNo: c_uint,
SizeInBits: u64,
AlignInBits: u32,
Flags: DIFlags,
Discriminator: Option<&'a DIDerivedType>,
Elements: &'a DIArray,
UniqueId: *const c_char)
-> &'a DIDerivedType;
pub fn LLVMSetUnnamedAddr(GlobalVar: &Value, UnnamedAddr: Bool);
pub fn LLVMRustDIBuilderCreateTemplateTypeParameter(Builder: &DIBuilder<'a>,
Scope: Option<&'a DIScope>,
Name: *const c_char,
Ty: &'a DIType,
File: &'a DIFile,
LineNo: c_uint,
ColumnNo: c_uint)
-> &'a DITemplateTypeParameter;
pub fn LLVMRustDIBuilderCreateNameSpace(Builder: &DIBuilder<'a>,
Scope: Option<&'a DIScope>,
Name: *const c_char,
File: &'a DIFile,
LineNo: c_uint)
-> &'a DINameSpace;
pub fn LLVMRustDICompositeTypeReplaceArrays(Builder: &DIBuilder<'a>,
CompositeType: &'a DIType,
Elements: Option<&'a DIArray>,
Params: Option<&'a DIArray>);
pub fn LLVMRustDIBuilderCreateDebugLocation(Context: &'a Context,
Line: c_uint,
Column: c_uint,
Scope: &'a DIScope,
InlinedAt: Option<&'a Metadata>)
-> &'a Value;
pub fn LLVMRustDIBuilderCreateOpDeref() -> i64;
pub fn LLVMRustDIBuilderCreateOpPlusUconst() -> i64;
#[allow(improper_ctypes)]
pub fn LLVMRustWriteTypeToString(Type: &Type, s: &RustString);
#[allow(improper_ctypes)]
pub fn LLVMRustWriteValueToString(value_ref: &Value, s: &RustString);
pub fn LLVMIsAConstantInt(value_ref: &Value) -> Option<&ConstantInt>;
pub fn LLVMRustPassKind(Pass: &Pass) -> PassKind;
pub fn LLVMRustFindAndCreatePass(Pass: *const c_char) -> Option<&'static mut Pass>;
pub fn LLVMRustAddPass(PM: &PassManager<'_>, Pass: &'static mut Pass);
pub fn LLVMRustAddLastExtensionPasses(PMB: &PassManagerBuilder,
Passes: *const &'static mut Pass,
NumPasses: size_t);
pub fn LLVMRustHasFeature(T: &TargetMachine, s: *const c_char) -> bool;
pub fn LLVMRustPrintTargetCPUs(T: &TargetMachine);
pub fn LLVMRustPrintTargetFeatures(T: &TargetMachine);
pub fn LLVMRustGetHostCPUName(len: *mut usize) -> *const c_char;
pub fn LLVMRustCreateTargetMachine(Triple: *const c_char,
CPU: *const c_char,
Features: *const c_char,
Model: CodeModel,
Reloc: RelocMode,
Level: CodeGenOptLevel,
UseSoftFP: bool,
PositionIndependentExecutable: bool,
FunctionSections: bool,
DataSections: bool,
TrapUnreachable: bool,
Singlethread: bool,
AsmComments: bool,
EmitStackSizeSection: bool)
-> Option<&'static mut TargetMachine>;
pub fn LLVMRustDisposeTargetMachine(T: &'static mut TargetMachine);
pub fn LLVMRustAddAnalysisPasses(T: &'a TargetMachine, PM: &PassManager<'a>, M: &'a Module);
pub fn LLVMRustAddBuilderLibraryInfo(PMB: &'a PassManagerBuilder,
M: &'a Module,
DisableSimplifyLibCalls: bool);
pub fn LLVMRustConfigurePassManagerBuilder(PMB: &PassManagerBuilder,
OptLevel: CodeGenOptLevel,
MergeFunctions: bool,
SLPVectorize: bool,
LoopVectorize: bool,
PrepareForThinLTO: bool,
PGOGenPath: *const c_char,
PGOUsePath: *const c_char);
pub fn LLVMRustAddLibraryInfo(PM: &PassManager<'a>,
M: &'a Module,
DisableSimplifyLibCalls: bool);
pub fn LLVMRustRunFunctionPassManager(PM: &PassManager<'a>, M: &'a Module);
pub fn LLVMRustWriteOutputFile(T: &'a TargetMachine,
PM: &PassManager<'a>,
M: &'a Module,
Output: *const c_char,
FileType: FileType)
-> LLVMRustResult;
pub fn LLVMRustPrintModule(PM: &PassManager<'a>,
M: &'a Module,
Output: *const c_char,
Demangle: extern fn(*const c_char,
size_t,
*mut c_char,
size_t) -> size_t,
) -> LLVMRustResult;
pub fn LLVMRustSetLLVMOptions(Argc: c_int, Argv: *const *const c_char);
pub fn LLVMRustPrintPasses();
pub fn LLVMRustGetInstructionCount(M: &Module) -> u32;
pub fn LLVMRustSetNormalizedTarget(M: &Module, triple: *const c_char);
pub fn LLVMRustAddAlwaysInlinePass(P: &PassManagerBuilder, AddLifetimes: bool);
pub fn LLVMRustRunRestrictionPass(M: &Module, syms: *const *const c_char, len: size_t);
pub fn LLVMRustMarkAllFunctionsNounwind(M: &Module);
pub fn LLVMRustOpenArchive(path: *const c_char) -> Option<&'static mut Archive>;
pub fn LLVMRustArchiveIteratorNew(AR: &'a Archive) -> &'a mut ArchiveIterator<'a>;
pub fn LLVMRustArchiveIteratorNext(
AIR: &ArchiveIterator<'a>,
) -> Option<&'a mut ArchiveChild<'a>>;
pub fn LLVMRustArchiveChildName(ACR: &ArchiveChild<'_>, size: &mut size_t) -> *const c_char;
pub fn LLVMRustArchiveChildData(ACR: &ArchiveChild<'_>, size: &mut size_t) -> *const c_char;
pub fn LLVMRustArchiveChildFree(ACR: &'a mut ArchiveChild<'a>);
pub fn LLVMRustArchiveIteratorFree(AIR: &'a mut ArchiveIterator<'a>);
pub fn LLVMRustDestroyArchive(AR: &'static mut Archive);
#[allow(improper_ctypes)]
pub fn LLVMRustGetSectionName(SI: &SectionIterator<'_>,
data: &mut Option<std::ptr::NonNull<c_char>>) -> size_t;
#[allow(improper_ctypes)]
pub fn LLVMRustWriteTwineToString(T: &Twine, s: &RustString);
pub fn LLVMContextSetDiagnosticHandler(C: &Context,
Handler: DiagnosticHandler,
DiagnosticContext: *mut c_void);
#[allow(improper_ctypes)]
pub fn LLVMRustUnpackOptimizationDiagnostic(DI: &'a DiagnosticInfo,
pass_name_out: &RustString,
function_out: &mut Option<&'a Value>,
loc_line_out: &mut c_uint,
loc_column_out: &mut c_uint,
loc_filename_out: &RustString,
message_out: &RustString);
pub fn LLVMRustUnpackInlineAsmDiagnostic(DI: &'a DiagnosticInfo,
cookie_out: &mut c_uint,
message_out: &mut Option<&'a Twine>,
instruction_out: &mut Option<&'a Value>);
#[allow(improper_ctypes)]
pub fn LLVMRustWriteDiagnosticInfoToString(DI: &DiagnosticInfo, s: &RustString);
pub fn LLVMRustGetDiagInfoKind(DI: &DiagnosticInfo) -> DiagnosticKind;
pub fn LLVMRustSetInlineAsmDiagnosticHandler(C: &Context,
H: InlineAsmDiagHandler,
CX: *mut c_void);
#[allow(improper_ctypes)]
pub fn LLVMRustWriteSMDiagnosticToString(d: &SMDiagnostic, s: &RustString);
pub fn LLVMRustWriteArchive(Dst: *const c_char,
NumMembers: size_t,
Members: *const &RustArchiveMember<'_>,
WriteSymbtab: bool,
Kind: ArchiveKind)
-> LLVMRustResult;
pub fn LLVMRustArchiveMemberNew(Filename: *const c_char,
Name: *const c_char,
Child: Option<&ArchiveChild<'a>>)
-> &'a mut RustArchiveMember<'a>;
pub fn LLVMRustArchiveMemberFree(Member: &'a mut RustArchiveMember<'a>);
pub fn LLVMRustSetDataLayoutFromTargetMachine(M: &'a Module, TM: &'a TargetMachine);
pub fn LLVMRustBuildOperandBundleDef(Name: *const c_char,
Inputs: *const &'a Value,
NumInputs: c_uint)
-> &'a mut OperandBundleDef<'a>;
pub fn LLVMRustFreeOperandBundleDef(Bundle: &'a mut OperandBundleDef<'a>);
pub fn LLVMRustPositionBuilderAtStart(B: &Builder<'a>, BB: &'a BasicBlock);
pub fn LLVMRustSetComdat(M: &'a Module, V: &'a Value, Name: *const c_char);
pub fn LLVMRustUnsetComdat(V: &Value);
pub fn LLVMRustSetModulePIELevel(M: &Module);
pub fn LLVMRustModuleBufferCreate(M: &Module) -> &'static mut ModuleBuffer;
pub fn LLVMRustModuleBufferPtr(p: &ModuleBuffer) -> *const u8;
pub fn LLVMRustModuleBufferLen(p: &ModuleBuffer) -> usize;
pub fn LLVMRustModuleBufferFree(p: &'static mut ModuleBuffer);
pub fn LLVMRustModuleCost(M: &Module) -> u64;
pub fn LLVMRustThinLTOBufferCreate(M: &Module) -> &'static mut ThinLTOBuffer;
pub fn LLVMRustThinLTOBufferFree(M: &'static mut ThinLTOBuffer);
pub fn LLVMRustThinLTOBufferPtr(M: &ThinLTOBuffer) -> *const c_char;
pub fn LLVMRustThinLTOBufferLen(M: &ThinLTOBuffer) -> size_t;
pub fn LLVMRustCreateThinLTOData(
Modules: *const ThinLTOModule,
NumModules: c_uint,
PreservedSymbols: *const *const c_char,
PreservedSymbolsLen: c_uint,
) -> Option<&'static mut ThinLTOData>;
pub fn LLVMRustPrepareThinLTORename(
Data: &ThinLTOData,
Module: &Module,
) -> bool;
pub fn LLVMRustPrepareThinLTOResolveWeak(
Data: &ThinLTOData,
Module: &Module,
) -> bool;
pub fn LLVMRustPrepareThinLTOInternalize(
Data: &ThinLTOData,
Module: &Module,
) -> bool;
pub fn LLVMRustPrepareThinLTOImport(
Data: &ThinLTOData,
Module: &Module,
) -> bool;
pub fn LLVMRustGetThinLTOModuleImports(
Data: *const ThinLTOData,
ModuleNameCallback: ThinLTOModuleNameCallback,
CallbackPayload: *mut c_void,
);
pub fn LLVMRustFreeThinLTOData(Data: &'static mut ThinLTOData);
pub fn LLVMRustParseBitcodeForLTO(
Context: &Context,
Data: *const u8,
len: usize,
Identifier: *const c_char,
) -> Option<&Module>;
pub fn LLVMRustThinLTOGetDICompileUnit(M: &Module,
CU1: &mut *mut c_void,
CU2: &mut *mut c_void);
pub fn LLVMRustThinLTOPatchDICompileUnit(M: &Module, CU: *mut c_void);
pub fn LLVMRustLinkerNew(M: &'a Module) -> &'a mut Linker<'a>;
pub fn LLVMRustLinkerAdd(linker: &Linker<'_>,
bytecode: *const c_char,
bytecode_len: usize) -> bool;
pub fn LLVMRustLinkerFree(linker: &'a mut Linker<'a>);
}
| 43.355496 | 99 | 0.482611 |
d63b8e5e531dd9f2637547ffa72cb80b8017616a | 12,688 | //! Simple shape viewer
//!
//! - Drag the mouse to rotate the model.
//! - Drag and drop json files into the window to switch models.
//! - Right-click to move the light to the camera's position.
//! - Enter "P" on the keyboard to switch between parallel projection and perspective projection of the camera.
//! - Enter "L" on the keyboard to switch the point light source/uniform light source of the light.
//!
//! A model json file can be generated by `serde_json`. See the examples of `truck-modeling`!
use std::io::Read;
use std::sync::Arc;
use truck_meshalgo::prelude::*;
use truck_modeling::*;
use truck_platform::*;
use truck_rendimpl::*;
use wgpu::*;
use winit::{dpi::*, event::*, event_loop::ControlFlow};
mod app;
use app::*;
const PUNCHED_CUBE_BYTES: &[u8] = include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../resources/shape/punched-cube.json",
));
enum RenderMode {
NaiveSurface,
NaiveWireFrame,
HiddenLineEliminate,
SurfaceAndWireFrame,
}
struct MyApp {
scene: WindowScene,
creator: InstanceCreator,
rotate_flag: bool,
prev_cursor: Vector2,
instance: PolygonInstance,
wireframe: WireFrameInstance,
render_mode: RenderMode,
}
impl MyApp {
fn create_camera() -> Camera {
let matrix = Matrix4::look_at_rh(
Point3::new(1.5, 1.5, 1.5),
Point3::origin(),
Vector3::unit_y(),
);
Camera::perspective_camera(
matrix.invert().unwrap(),
Rad(std::f64::consts::PI / 4.0),
0.1,
40.0,
)
}
fn update_render_mode(&mut self) {
match self.render_mode {
RenderMode::NaiveSurface => {
self.instance.instance_state_mut().material = Material {
albedo: Vector4::new(1.0, 1.0, 1.0, 1.0),
reflectance: 0.5,
roughness: 0.1,
ambient_ratio: 0.02,
background_ratio: 0.0,
alpha_blend: false,
};
self.scene.update_bind_group(&self.instance);
self.scene.set_visibility(&self.instance, true);
self.scene.set_visibility(&self.wireframe, false);
}
RenderMode::NaiveWireFrame => {
self.wireframe.instance_state_mut().color = Vector4::new(1.0, 1.0, 1.0, 1.0);
self.scene.update_bind_group(&self.wireframe);
self.scene.set_visibility(&self.instance, false);
self.scene.set_visibility(&self.wireframe, true);
}
RenderMode::HiddenLineEliminate => {
self.instance.instance_state_mut().material = Material {
albedo: Vector4::new(0.0, 0.0, 0.0, 1.0),
reflectance: 0.0,
roughness: 0.0,
ambient_ratio: 1.0,
background_ratio: 0.0,
alpha_blend: false,
};
self.wireframe.instance_state_mut().color = Vector4::new(1.0, 1.0, 1.0, 1.0);
self.scene.update_bind_group(&self.instance);
self.scene.update_bind_group(&self.wireframe);
self.scene.set_visibility(&self.instance, true);
self.scene.set_visibility(&self.wireframe, true);
}
RenderMode::SurfaceAndWireFrame => {
self.instance.instance_state_mut().material = Material {
albedo: Vector4::new(1.0, 1.0, 1.0, 1.0),
reflectance: 0.5,
roughness: 0.1,
ambient_ratio: 0.02,
background_ratio: 0.0,
alpha_blend: false,
};
self.wireframe.instance_state_mut().color = Vector4::new(0.0, 0.0, 0.0, 1.0);
self.scene.update_bind_group(&self.instance);
self.scene.update_bind_group(&self.wireframe);
self.scene.set_visibility(&self.instance, true);
self.scene.set_visibility(&self.wireframe, true);
}
}
}
fn load_shape<R: Read>(
creator: &InstanceCreator,
reader: R,
) -> (PolygonInstance, WireFrameInstance) {
let solid = Solid::extract(serde_json::from_reader(reader).unwrap()).unwrap();
let mut bdd_box = BoundingBox::new();
solid
.boundaries()
.iter()
.flatten()
.flat_map(truck_modeling::Face::boundaries)
.flatten()
.for_each(|edge| {
let curve = edge.oriented_curve();
bdd_box += match curve {
Curve::BSplineCurve(curve) => {
let bdb = curve.roughly_bounding_box();
vec![*bdb.max(), *bdb.min()].into_iter().collect()
}
Curve::NURBSCurve(curve) => curve.roughly_bounding_box(),
Curve::IntersectionCurve(_) => BoundingBox::new(),
};
});
let (size, center) = (bdd_box.size(), bdd_box.center());
let mat = Matrix4::from_translation(center.to_vec()) * Matrix4::from_scale(size);
let mesh_solid = solid.triangulation(size * 0.005).unwrap();
let curves = mesh_solid
.edge_iter()
.map(|edge| edge.get_curve())
.collect::<Vec<_>>();
let polygon_state = PolygonState {
matrix: mat.invert().unwrap(),
..Default::default()
};
let wire_state = WireFrameState {
matrix: mat.invert().unwrap(),
..Default::default()
};
(
creator.create_instance(&mesh_solid.to_polygon(), &polygon_state),
creator.create_instance(&curves, &wire_state),
)
}
}
impl App for MyApp {
fn init(window: Arc<winit::window::Window>) -> MyApp {
let sample_count = 4;
let scene_desc = WindowSceneDescriptor {
studio: StudioConfig {
background: Color::BLACK,
camera: MyApp::create_camera(),
lights: vec![Light {
position: Point3::new(1.0, 1.0, 1.0),
color: Vector3::new(1.0, 1.0, 1.0),
light_type: LightType::Point,
}],
},
backend_buffer: BackendBufferConfig {
sample_count,
..Default::default()
},
};
let mut scene =
app::block_on(async move { WindowScene::from_window(window, &scene_desc).await });
let creator = scene.instance_creator();
let (instance, wireframe) = Self::load_shape(&creator, PUNCHED_CUBE_BYTES);
scene.add_object(&instance);
scene.add_object(&wireframe);
let mut app = MyApp {
scene,
creator,
instance,
wireframe,
rotate_flag: false,
prev_cursor: Vector2::zero(),
render_mode: RenderMode::NaiveSurface,
};
app.update_render_mode();
app
}
fn app_title<'a>() -> Option<&'a str> { Some("simple shape viewer") }
fn dropped_file(&mut self, path: std::path::PathBuf) -> ControlFlow {
let file = std::fs::File::open(path).unwrap();
self.scene.clear_objects();
let (instance, wireframe) = Self::load_shape(&self.creator, file);
self.scene.add_object(&instance);
self.scene.add_object(&wireframe);
self.instance = instance;
self.wireframe = wireframe;
self.update_render_mode();
Self::default_control_flow()
}
fn mouse_input(&mut self, state: ElementState, button: MouseButton) -> ControlFlow {
match button {
MouseButton::Left => {
self.rotate_flag = state == ElementState::Pressed;
}
MouseButton::Right => {
let (light, camera) = {
let desc = self.scene.studio_config_mut();
(&mut desc.lights[0], &desc.camera)
};
match light.light_type {
LightType::Point => {
light.position = camera.position();
}
LightType::Uniform => {
light.position = camera.position();
let strength = light.position.to_vec().magnitude();
light.position /= strength;
}
}
}
_ => {}
}
Self::default_control_flow()
}
fn mouse_wheel(&mut self, delta: MouseScrollDelta, _: TouchPhase) -> ControlFlow {
match delta {
MouseScrollDelta::LineDelta(_, y) => {
let camera = &mut self.scene.studio_config_mut().camera;
let trans_vec = camera.eye_direction() * 0.2 * y as f64;
camera.matrix = Matrix4::from_translation(trans_vec) * camera.matrix;
}
MouseScrollDelta::PixelDelta(_) => {}
};
Self::default_control_flow()
}
fn cursor_moved(&mut self, position: PhysicalPosition<f64>) -> ControlFlow {
let position = Vector2::new(position.x, position.y);
if self.rotate_flag {
let matrix = &mut self.scene.studio_config_mut().camera.matrix;
let dir2d = position - self.prev_cursor;
if dir2d.so_small() {
return Self::default_control_flow();
}
let mut axis = dir2d[1] * matrix[0].truncate();
axis += dir2d[0] * matrix[1].truncate();
axis /= axis.magnitude();
let angle = dir2d.magnitude() * 0.01;
let mat = Matrix4::from_axis_angle(axis, Rad(angle));
*matrix = mat.invert().unwrap() * *matrix;
}
self.prev_cursor = position;
Self::default_control_flow()
}
fn keyboard_input(&mut self, input: KeyboardInput, _: bool) -> ControlFlow {
if input.state != ElementState::Pressed {
return Self::default_control_flow();
}
let keycode = match input.virtual_keycode {
Some(keycode) => keycode,
None => return Self::default_control_flow(),
};
match keycode {
VirtualKeyCode::P => {
let camera = &mut self.scene.studio_config_mut().camera;
*camera = match camera.projection_type() {
ProjectionType::Parallel => Camera::perspective_camera(
camera.matrix,
Rad(std::f64::consts::PI / 4.0),
0.1,
40.0,
),
ProjectionType::Perspective => {
Camera::parallel_camera(camera.matrix, 1.0, 0.1, 40.0)
}
};
}
VirtualKeyCode::L => {
let (light, camera) = {
let desc = self.scene.studio_config_mut();
(&mut desc.lights[0], &desc.camera)
};
*light = match light.light_type {
LightType::Point => {
let mut vec = camera.position();
vec /= vec.to_vec().magnitude();
Light {
position: vec,
color: Vector3::new(1.0, 1.0, 1.0),
light_type: LightType::Uniform,
}
}
LightType::Uniform => {
let position = camera.position();
Light {
position,
color: Vector3::new(1.0, 1.0, 1.0),
light_type: LightType::Point,
}
}
};
}
VirtualKeyCode::Space => {
self.render_mode = match self.render_mode {
RenderMode::NaiveSurface => RenderMode::SurfaceAndWireFrame,
RenderMode::SurfaceAndWireFrame => RenderMode::NaiveWireFrame,
RenderMode::NaiveWireFrame => RenderMode::HiddenLineEliminate,
RenderMode::HiddenLineEliminate => RenderMode::NaiveSurface,
};
self.update_render_mode();
}
_ => {}
}
Self::default_control_flow()
}
fn render(&mut self) { self.scene.render_frame(); }
}
fn main() { MyApp::run(); }
| 38.56535 | 111 | 0.509221 |
23a1a76afd242bf389e6a5575b9239336977fc9b | 36,812 | //! Feature resolver.
//!
//! This is a new feature resolver that runs independently of the main
//! dependency resolver. It has several options which can enable new feature
//! resolution behavior.
//!
//! One of its key characteristics is that it can avoid unifying features for
//! shared dependencies in some situations. See `FeatureOpts` for the
//! different behaviors that can be enabled. If no extra options are enabled,
//! then it should behave exactly the same as the dependency resolver's
//! feature resolution.
//!
//! The preferred way to engage this new resolver is via
//! `resolve_ws_with_opts`.
//!
//! This does not *replace* feature resolution in the dependency resolver, but
//! instead acts as a second pass which can *narrow* the features selected in
//! the dependency resolver. The dependency resolver still needs to do its own
//! feature resolution in order to avoid selecting optional dependencies that
//! are never enabled. The dependency resolver could, in theory, just assume
//! all optional dependencies on all packages are enabled (and remove all
//! knowledge of features), but that could introduce new requirements that
//! might change old behavior or cause conflicts. Maybe some day in the future
//! we could experiment with that, but it seems unlikely to work or be all
//! that helpful.
//!
//! There are many assumptions made about the dependency resolver. This
//! feature resolver assumes validation has already been done on the feature
//! maps, and doesn't do any validation itself. It assumes dev-dependencies
//! within a dependency have been removed. There are probably other
//! assumptions that I am forgetting.
use crate::core::compiler::{CompileKind, CompileTarget, RustcTargetData};
use crate::core::dependency::{ArtifactTarget, DepKind, Dependency};
use crate::core::resolver::types::FeaturesSet;
use crate::core::resolver::{Resolve, ResolveBehavior};
use crate::core::{FeatureValue, PackageId, PackageIdSpec, PackageSet, Workspace};
use crate::util::interning::InternedString;
use crate::util::CargoResult;
use anyhow::bail;
use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet};
use std::rc::Rc;
/// The key used in various places to store features for a particular dependency.
/// The actual discrimination happens with the `FeaturesFor` type.
type PackageFeaturesKey = (PackageId, FeaturesFor);
/// Map of activated features.
///
/// The key is `(PackageId, bool)` where the bool is `true` if these
/// are features for a build dependency or proc-macro.
type ActivateMap = HashMap<PackageFeaturesKey, BTreeSet<InternedString>>;
/// Set of all activated features for all packages in the resolve graph.
pub struct ResolvedFeatures {
activated_features: ActivateMap,
/// Optional dependencies that should be built.
///
/// The value is the `name_in_toml` of the dependencies.
activated_dependencies: ActivateMap,
opts: FeatureOpts,
}
/// Options for how the feature resolver works.
#[derive(Default)]
pub struct FeatureOpts {
/// Build deps and proc-macros will not share share features with other dep kinds,
/// and so won't artifact targets.
/// In other terms, if true, features associated with certain kinds of dependencies
/// will only be unified together.
/// If false, there is only one namespace for features, unifying all features across
/// all dependencies, no matter what kind.
decouple_host_deps: bool,
/// Dev dep features will not be activated unless needed.
decouple_dev_deps: bool,
/// Targets that are not in use will not activate features.
ignore_inactive_targets: bool,
/// If enabled, compare against old resolver (for testing).
compare: bool,
}
/// Flag to indicate if Cargo is building *any* dev units (tests, examples, etc.).
///
/// This disables decoupling of dev dependencies. It may be possible to relax
/// this in the future, but it will require significant changes to how unit
/// dependencies are computed, and can result in longer build times with
/// `cargo test` because the lib may need to be built 3 times instead of
/// twice.
#[derive(Copy, Clone, PartialEq)]
pub enum HasDevUnits {
Yes,
No,
}
/// Flag to indicate that target-specific filtering should be disabled.
#[derive(Copy, Clone, PartialEq)]
pub enum ForceAllTargets {
Yes,
No,
}
/// Flag to indicate if features are requested for a build dependency or not.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Ord, PartialOrd, Hash)]
pub enum FeaturesFor {
/// If `Some(target)` is present, we represent an artifact target.
/// Otherwise any other normal or dev dependency.
NormalOrDevOrArtifactTarget(Option<CompileTarget>),
/// Build dependency or proc-macro.
HostDep,
}
impl Default for FeaturesFor {
fn default() -> Self {
FeaturesFor::NormalOrDevOrArtifactTarget(None)
}
}
impl std::fmt::Display for FeaturesFor {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
FeaturesFor::HostDep => f.write_str("host"),
FeaturesFor::NormalOrDevOrArtifactTarget(Some(target)) => {
f.write_str(&target.rustc_target())
}
FeaturesFor::NormalOrDevOrArtifactTarget(None) => Ok(()),
}
}
}
impl FeaturesFor {
pub fn from_for_host(for_host: bool) -> FeaturesFor {
if for_host {
FeaturesFor::HostDep
} else {
FeaturesFor::NormalOrDevOrArtifactTarget(None)
}
}
pub fn from_for_host_or_artifact_target(
for_host: bool,
artifact_target: Option<CompileTarget>,
) -> FeaturesFor {
match artifact_target {
Some(target) => FeaturesFor::NormalOrDevOrArtifactTarget(Some(target)),
None => {
if for_host {
FeaturesFor::HostDep
} else {
FeaturesFor::NormalOrDevOrArtifactTarget(None)
}
}
}
}
fn apply_opts(self, opts: &FeatureOpts) -> Self {
if opts.decouple_host_deps {
self
} else {
FeaturesFor::default()
}
}
}
impl FeatureOpts {
pub fn new(
ws: &Workspace<'_>,
has_dev_units: HasDevUnits,
force_all_targets: ForceAllTargets,
) -> CargoResult<FeatureOpts> {
let mut opts = FeatureOpts::default();
let unstable_flags = ws.config().cli_unstable();
let mut enable = |feat_opts: &Vec<String>| {
for opt in feat_opts {
match opt.as_ref() {
"build_dep" | "host_dep" => opts.decouple_host_deps = true,
"dev_dep" => opts.decouple_dev_deps = true,
"itarget" => opts.ignore_inactive_targets = true,
"all" => {
opts.decouple_host_deps = true;
opts.decouple_dev_deps = true;
opts.ignore_inactive_targets = true;
}
"compare" => opts.compare = true,
"ws" => unimplemented!(),
s => bail!("-Zfeatures flag `{}` is not supported", s),
}
}
Ok(())
};
if let Some(feat_opts) = unstable_flags.features.as_ref() {
enable(feat_opts)?;
}
match ws.resolve_behavior() {
ResolveBehavior::V1 => {}
ResolveBehavior::V2 => {
enable(&vec!["all".to_string()]).unwrap();
}
}
if let HasDevUnits::Yes = has_dev_units {
// Dev deps cannot be decoupled when they are in use.
opts.decouple_dev_deps = false;
}
if let ForceAllTargets::Yes = force_all_targets {
opts.ignore_inactive_targets = false;
}
Ok(opts)
}
/// Creates a new FeatureOpts for the given behavior.
pub fn new_behavior(behavior: ResolveBehavior, has_dev_units: HasDevUnits) -> FeatureOpts {
match behavior {
ResolveBehavior::V1 => FeatureOpts::default(),
ResolveBehavior::V2 => FeatureOpts {
decouple_host_deps: true,
decouple_dev_deps: has_dev_units == HasDevUnits::No,
ignore_inactive_targets: true,
compare: false,
},
}
}
}
/// Features flags requested for a package.
///
/// This should be cheap and fast to clone, it is used in the resolver for
/// various caches.
///
/// This is split into enum variants because the resolver needs to handle
/// features coming from different places (command-line and dependency
/// declarations), but those different places have different constraints on
/// which syntax is allowed. This helps ensure that every place dealing with
/// features is properly handling those syntax restrictions.
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
pub enum RequestedFeatures {
/// Features requested on the command-line with flags.
CliFeatures(CliFeatures),
/// Features specified in a dependency declaration.
DepFeatures {
/// The `features` dependency field.
features: FeaturesSet,
/// The `default-features` dependency field.
uses_default_features: bool,
},
}
/// Features specified on the command-line.
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
pub struct CliFeatures {
/// Features from the `--features` flag.
pub features: Rc<BTreeSet<FeatureValue>>,
/// The `--all-features` flag.
pub all_features: bool,
/// Inverse of `--no-default-features` flag.
pub uses_default_features: bool,
}
impl CliFeatures {
/// Creates a new CliFeatures from the given command-line flags.
pub fn from_command_line(
features: &[String],
all_features: bool,
uses_default_features: bool,
) -> CargoResult<CliFeatures> {
let features = Rc::new(CliFeatures::split_features(features));
// Some early validation to ensure correct syntax.
for feature in features.iter() {
match feature {
// Maybe call validate_feature_name here once it is an error?
FeatureValue::Feature(_) => {}
FeatureValue::Dep { .. } => {
bail!(
"feature `{}` is not allowed to use explicit `dep:` syntax",
feature
);
}
FeatureValue::DepFeature { dep_feature, .. } => {
if dep_feature.contains('/') {
bail!("multiple slashes in feature `{}` is not allowed", feature);
}
}
}
}
Ok(CliFeatures {
features,
all_features,
uses_default_features,
})
}
/// Creates a new CliFeatures with the given `all_features` setting.
pub fn new_all(all_features: bool) -> CliFeatures {
CliFeatures {
features: Rc::new(BTreeSet::new()),
all_features,
uses_default_features: true,
}
}
fn split_features(features: &[String]) -> BTreeSet<FeatureValue> {
features
.iter()
.flat_map(|s| s.split_whitespace())
.flat_map(|s| s.split(','))
.filter(|s| !s.is_empty())
.map(InternedString::new)
.map(FeatureValue::new)
.collect()
}
}
impl ResolvedFeatures {
/// Returns the list of features that are enabled for the given package.
pub fn activated_features(
&self,
pkg_id: PackageId,
features_for: FeaturesFor,
) -> Vec<InternedString> {
self.activated_features_int(pkg_id, features_for)
.expect("activated_features for invalid package")
}
/// Returns if the given dependency should be included.
///
/// This handles dependencies disabled via `cfg` expressions and optional
/// dependencies which are not enabled.
pub fn is_dep_activated(
&self,
pkg_id: PackageId,
features_for: FeaturesFor,
dep_name: InternedString,
) -> bool {
let key = features_for.apply_opts(&self.opts);
self.activated_dependencies
.get(&(pkg_id, key))
.map(|deps| deps.contains(&dep_name))
.unwrap_or(false)
}
/// Variant of `activated_features` that returns `None` if this is
/// not a valid pkg_id/is_build combination. Used in places which do
/// not know which packages are activated (like `cargo clean`).
pub fn activated_features_unverified(
&self,
pkg_id: PackageId,
features_for: FeaturesFor,
) -> Option<Vec<InternedString>> {
self.activated_features_int(pkg_id, features_for).ok()
}
fn activated_features_int(
&self,
pkg_id: PackageId,
features_for: FeaturesFor,
) -> CargoResult<Vec<InternedString>> {
let fk = features_for.apply_opts(&self.opts);
if let Some(fs) = self.activated_features.get(&(pkg_id, fk)) {
Ok(fs.iter().cloned().collect())
} else {
bail!("features did not find {:?} {:?}", pkg_id, fk)
}
}
/// Compares the result against the original resolver behavior.
///
/// Used by `cargo fix --edition` to display any differences.
pub fn compare_legacy(&self, legacy: &ResolvedFeatures) -> DiffMap {
self.activated_features
.iter()
.filter_map(|((pkg_id, for_host), new_features)| {
let old_features = legacy
.activated_features
.get(&(*pkg_id, *for_host))
// The new features may have for_host entries where the old one does not.
.or_else(|| {
legacy
.activated_features
.get(&(*pkg_id, FeaturesFor::default()))
})
.map(|feats| feats.iter().cloned().collect())
.unwrap_or_else(|| BTreeSet::new());
// The new resolver should never add features.
assert_eq!(new_features.difference(&old_features).next(), None);
let removed_features: BTreeSet<_> =
old_features.difference(new_features).cloned().collect();
if removed_features.is_empty() {
None
} else {
Some(((*pkg_id, *for_host), removed_features))
}
})
.collect()
}
}
/// Map of differences.
///
/// Key is `(pkg_id, for_host)`. Value is a set of features or dependencies removed.
pub type DiffMap = BTreeMap<PackageFeaturesKey, BTreeSet<InternedString>>;
pub struct FeatureResolver<'a, 'cfg> {
ws: &'a Workspace<'cfg>,
target_data: &'a RustcTargetData<'cfg>,
/// The platforms to build for, requested by the user.
requested_targets: &'a [CompileKind],
resolve: &'a Resolve,
package_set: &'a PackageSet<'cfg>,
/// Options that change how the feature resolver operates.
opts: FeatureOpts,
/// Map of features activated for each package.
activated_features: ActivateMap,
/// Map of optional dependencies activated for each package.
activated_dependencies: ActivateMap,
/// Keeps track of which packages have had its dependencies processed.
/// Used to avoid cycles, and to speed up processing.
processed_deps: HashSet<PackageFeaturesKey>,
/// If this is `true`, then a non-default `feature_key` needs to be tracked while
/// traversing the graph.
///
/// This is only here to avoid calling `is_proc_macro` when all feature
/// options are disabled (because `is_proc_macro` can trigger downloads).
/// This has to be separate from `FeatureOpts.decouple_host_deps` because
/// `for_host` tracking is also needed for `itarget` to work properly.
track_for_host: bool,
/// `dep_name?/feat_name` features that will be activated if `dep_name` is
/// ever activated.
///
/// The key is the `(package, for_host, dep_name)` of the package whose
/// dependency will trigger the addition of new features. The value is the
/// set of features to activate.
deferred_weak_dependencies:
HashMap<(PackageId, FeaturesFor, InternedString), HashSet<InternedString>>,
}
impl<'a, 'cfg> FeatureResolver<'a, 'cfg> {
/// Runs the resolution algorithm and returns a new `ResolvedFeatures`
/// with the result.
pub fn resolve(
ws: &Workspace<'cfg>,
target_data: &RustcTargetData<'cfg>,
resolve: &Resolve,
package_set: &'a PackageSet<'cfg>,
cli_features: &CliFeatures,
specs: &[PackageIdSpec],
requested_targets: &[CompileKind],
opts: FeatureOpts,
) -> CargoResult<ResolvedFeatures> {
use crate::util::profile;
let _p = profile::start("resolve features");
let track_for_host = opts.decouple_host_deps || opts.ignore_inactive_targets;
let mut r = FeatureResolver {
ws,
target_data,
requested_targets,
resolve,
package_set,
opts,
activated_features: HashMap::new(),
activated_dependencies: HashMap::new(),
processed_deps: HashSet::new(),
track_for_host,
deferred_weak_dependencies: HashMap::new(),
};
r.do_resolve(specs, cli_features)?;
log::debug!("features={:#?}", r.activated_features);
if r.opts.compare {
r.compare();
}
Ok(ResolvedFeatures {
activated_features: r.activated_features,
activated_dependencies: r.activated_dependencies,
opts: r.opts,
})
}
/// Performs the process of resolving all features for the resolve graph.
fn do_resolve(
&mut self,
specs: &[PackageIdSpec],
cli_features: &CliFeatures,
) -> CargoResult<()> {
let member_features = self.ws.members_with_features(specs, cli_features)?;
for (member, cli_features) in &member_features {
let fvs = self.fvs_from_requested(member.package_id(), cli_features);
let fk = if self.track_for_host && self.is_proc_macro(member.package_id()) {
// Also activate for normal dependencies. This is needed if the
// proc-macro includes other targets (like binaries or tests),
// or running in `cargo test`. Note that in a workspace, if
// the proc-macro is selected on the command like (like with
// `--workspace`), this forces feature unification with normal
// dependencies. This is part of the bigger problem where
// features depend on which packages are built.
self.activate_pkg(member.package_id(), FeaturesFor::default(), &fvs)?;
FeaturesFor::HostDep
} else {
FeaturesFor::default()
};
self.activate_pkg(member.package_id(), fk, &fvs)?;
}
Ok(())
}
fn activate_pkg(
&mut self,
pkg_id: PackageId,
fk: FeaturesFor,
fvs: &[FeatureValue],
) -> CargoResult<()> {
log::trace!("activate_pkg {} {}", pkg_id.name(), fk);
// Add an empty entry to ensure everything is covered. This is intended for
// finding bugs where the resolver missed something it should have visited.
// Remove this in the future if `activated_features` uses an empty default.
self.activated_features
.entry((pkg_id, fk.apply_opts(&self.opts)))
.or_insert_with(BTreeSet::new);
for fv in fvs {
self.activate_fv(pkg_id, fk, fv)?;
}
if !self.processed_deps.insert((pkg_id, fk)) {
// Already processed dependencies. There's no need to process them
// again. This is primarily to avoid cycles, but also helps speed
// things up.
//
// This is safe because if another package comes along and adds a
// feature on this package, it will immediately add it (in
// `activate_fv`), and recurse as necessary right then and there.
// For example, consider we've already processed our dependencies,
// and another package comes along and enables one of our optional
// dependencies, it will do so immediately in the
// `FeatureValue::DepFeature` branch, and then immediately
// recurse into that optional dependency. This also holds true for
// features that enable other features.
return Ok(());
}
for (dep_pkg_id, deps) in self.deps(pkg_id, fk) {
for (dep, dep_fk) in deps {
if dep.is_optional() {
// Optional dependencies are enabled in `activate_fv` when
// a feature enables it.
continue;
}
// Recurse into the dependency.
let fvs = self.fvs_from_dependency(dep_pkg_id, dep);
self.activate_pkg(dep_pkg_id, dep_fk, &fvs)?;
}
}
Ok(())
}
/// Activate a single FeatureValue for a package.
fn activate_fv(
&mut self,
pkg_id: PackageId,
fk: FeaturesFor,
fv: &FeatureValue,
) -> CargoResult<()> {
log::trace!("activate_fv {} {} {}", pkg_id.name(), fk, fv);
match fv {
FeatureValue::Feature(f) => {
self.activate_rec(pkg_id, fk, *f)?;
}
FeatureValue::Dep { dep_name } => {
self.activate_dependency(pkg_id, fk, *dep_name)?;
}
FeatureValue::DepFeature {
dep_name,
dep_feature,
weak,
} => {
self.activate_dep_feature(pkg_id, fk, *dep_name, *dep_feature, *weak)?;
}
}
Ok(())
}
/// Activate the given feature for the given package, and then recursively
/// activate any other features that feature enables.
fn activate_rec(
&mut self,
pkg_id: PackageId,
fk: FeaturesFor,
feature_to_enable: InternedString,
) -> CargoResult<()> {
log::trace!(
"activate_rec {} {} feat={}",
pkg_id.name(),
fk,
feature_to_enable
);
let enabled = self
.activated_features
.entry((pkg_id, fk.apply_opts(&self.opts)))
.or_insert_with(BTreeSet::new);
if !enabled.insert(feature_to_enable) {
// Already enabled.
return Ok(());
}
let summary = self.resolve.summary(pkg_id);
let feature_map = summary.features();
let fvs = match feature_map.get(&feature_to_enable) {
Some(fvs) => fvs,
None => {
// TODO: this should only happen for optional dependencies.
// Other cases should be validated by Summary's `build_feature_map`.
// Figure out some way to validate this assumption.
log::debug!(
"pkg {:?} does not define feature {}",
pkg_id,
feature_to_enable
);
return Ok(());
}
};
for fv in fvs {
self.activate_fv(pkg_id, fk, fv)?;
}
Ok(())
}
/// Activate a dependency (`dep:dep_name` syntax).
fn activate_dependency(
&mut self,
pkg_id: PackageId,
fk: FeaturesFor,
dep_name: InternedString,
) -> CargoResult<()> {
// Mark this dependency as activated.
let save_decoupled = fk.apply_opts(&self.opts);
self.activated_dependencies
.entry((pkg_id, save_decoupled))
.or_default()
.insert(dep_name);
// Check for any deferred features.
let to_enable = self
.deferred_weak_dependencies
.remove(&(pkg_id, fk, dep_name));
// Activate the optional dep.
for (dep_pkg_id, deps) in self.deps(pkg_id, fk) {
for (dep, dep_fk) in deps {
if dep.name_in_toml() != dep_name {
continue;
}
if let Some(to_enable) = &to_enable {
for dep_feature in to_enable {
log::trace!(
"activate deferred {} {} -> {}/{}",
pkg_id.name(),
fk,
dep_name,
dep_feature
);
let fv = FeatureValue::new(*dep_feature);
self.activate_fv(dep_pkg_id, dep_fk, &fv)?;
}
}
let fvs = self.fvs_from_dependency(dep_pkg_id, dep);
self.activate_pkg(dep_pkg_id, dep_fk, &fvs)?;
}
}
Ok(())
}
/// Activate a feature within a dependency (`dep_name/feat_name` syntax).
fn activate_dep_feature(
&mut self,
pkg_id: PackageId,
fk: FeaturesFor,
dep_name: InternedString,
dep_feature: InternedString,
weak: bool,
) -> CargoResult<()> {
for (dep_pkg_id, deps) in self.deps(pkg_id, fk) {
for (dep, dep_fk) in deps {
if dep.name_in_toml() != dep_name {
continue;
}
if dep.is_optional() {
let save_for_host = fk.apply_opts(&self.opts);
if weak
&& !self
.activated_dependencies
.get(&(pkg_id, save_for_host))
.map(|deps| deps.contains(&dep_name))
.unwrap_or(false)
{
// This is weak, but not yet activated. Defer in case
// something comes along later and enables it.
log::trace!(
"deferring feature {} {} -> {}/{}",
pkg_id.name(),
fk,
dep_name,
dep_feature
);
self.deferred_weak_dependencies
.entry((pkg_id, fk, dep_name))
.or_default()
.insert(dep_feature);
continue;
}
// Activate the dependency on self.
let fv = FeatureValue::Dep { dep_name };
self.activate_fv(pkg_id, fk, &fv)?;
if !weak {
// The old behavior before weak dependencies were
// added is to also enables a feature of the same
// name.
self.activate_rec(pkg_id, fk, dep_name)?;
}
}
// Activate the feature on the dependency.
let fv = FeatureValue::new(dep_feature);
self.activate_fv(dep_pkg_id, dep_fk, &fv)?;
}
}
Ok(())
}
/// Returns Vec of FeatureValues from a Dependency definition.
fn fvs_from_dependency(&self, dep_id: PackageId, dep: &Dependency) -> Vec<FeatureValue> {
let summary = self.resolve.summary(dep_id);
let feature_map = summary.features();
let mut result: Vec<FeatureValue> = dep
.features()
.iter()
.map(|f| FeatureValue::new(*f))
.collect();
let default = InternedString::new("default");
if dep.uses_default_features() && feature_map.contains_key(&default) {
result.push(FeatureValue::Feature(default));
}
result
}
/// Returns Vec of FeatureValues from a set of command-line features.
fn fvs_from_requested(
&self,
pkg_id: PackageId,
cli_features: &CliFeatures,
) -> Vec<FeatureValue> {
let summary = self.resolve.summary(pkg_id);
let feature_map = summary.features();
let mut result: Vec<FeatureValue> = cli_features.features.iter().cloned().collect();
let default = InternedString::new("default");
if cli_features.uses_default_features && feature_map.contains_key(&default) {
result.push(FeatureValue::Feature(default));
}
if cli_features.all_features {
result.extend(feature_map.keys().map(|k| FeatureValue::Feature(*k)))
}
result
}
/// Returns the dependencies for a package, filtering out inactive targets.
fn deps(
&self,
pkg_id: PackageId,
fk: FeaturesFor,
) -> Vec<(PackageId, Vec<(&'a Dependency, FeaturesFor)>)> {
// Helper for determining if a platform is activated.
let platform_activated = |dep: &Dependency| -> bool {
// We always count platforms as activated if the target stems from an artifact
// dependency's target specification. This triggers in conjunction with
// `[target.'cfg(…)'.dependencies]` manifest sections.
match (dep.is_build(), fk) {
(true, _) | (_, FeaturesFor::HostDep) => {
// We always care about build-dependencies, and they are always
// Host. If we are computing dependencies "for a build script",
// even normal dependencies are host-only.
self.target_data
.dep_platform_activated(dep, CompileKind::Host)
}
(_, FeaturesFor::NormalOrDevOrArtifactTarget(None)) => self
.requested_targets
.iter()
.any(|kind| self.target_data.dep_platform_activated(dep, *kind)),
(_, FeaturesFor::NormalOrDevOrArtifactTarget(Some(target))) => self
.target_data
.dep_platform_activated(dep, CompileKind::Target(target)),
}
};
self.resolve
.deps(pkg_id)
.map(|(dep_id, deps)| {
let deps = deps
.iter()
.filter(|dep| {
if dep.platform().is_some()
&& self.opts.ignore_inactive_targets
&& !platform_activated(dep)
{
return false;
}
if self.opts.decouple_dev_deps && dep.kind() == DepKind::Development {
return false;
}
true
})
.flat_map(|dep| {
// Each `dep`endency can be built for multiple targets. For one, it
// may be a library target which is built as initially configured
// by `fk`. If it appears as build dependency, it must be built
// for the host.
//
// It may also be an artifact dependency,
// which could be built either
//
// - for a specified (aka 'forced') target, specified by
// `dep = { …, target = <triple>` }`
// - as an artifact for use in build dependencies that should
// build for whichever `--target`s are specified
// - like a library would be built
//
// Generally, the logic for choosing a target for dependencies is
// unaltered and used to determine how to build non-artifacts,
// artifacts without target specification and no library,
// or an artifacts library.
//
// All this may result in a dependency being built multiple times
// for various targets which are either specified in the manifest
// or on the cargo command-line.
let lib_fk = if fk == FeaturesFor::default() {
(self.track_for_host && (dep.is_build() || self.is_proc_macro(dep_id)))
.then(|| FeaturesFor::HostDep)
.unwrap_or_default()
} else {
fk
};
// `artifact_target_keys` are produced to fulfil the needs of artifacts that have a target specification.
let artifact_target_keys = dep.artifact().map(|artifact| {
(
artifact.is_lib(),
artifact.target().map(|target| match target {
ArtifactTarget::Force(target) => {
vec![FeaturesFor::NormalOrDevOrArtifactTarget(Some(target))]
}
ArtifactTarget::BuildDependencyAssumeTarget => self
.requested_targets
.iter()
.filter_map(|kind| match kind {
CompileKind::Host => None,
CompileKind::Target(target) => {
Some(FeaturesFor::NormalOrDevOrArtifactTarget(
Some(*target),
))
}
})
.collect(),
}),
)
});
let dep_fks = match artifact_target_keys {
// The artifact is also a library and does specify custom
// targets.
// The library's feature key needs to be used alongside
// the keys artifact targets.
Some((is_lib, Some(mut dep_fks))) if is_lib => {
dep_fks.push(lib_fk);
dep_fks
}
// The artifact is not a library, but does specify
// custom targets.
// Use only these targets feature keys.
Some((_, Some(dep_fks))) => dep_fks,
// There is no artifact in the current dependency
// or there is no target specified on the artifact.
// Use the standard feature key without any alteration.
Some((_, None)) | None => vec![lib_fk],
};
dep_fks.into_iter().map(move |dep_fk| (dep, dep_fk))
})
.collect::<Vec<_>>();
(dep_id, deps)
})
.filter(|(_id, deps)| !deps.is_empty())
.collect()
}
/// Compare the activated features to the resolver. Used for testing.
fn compare(&self) {
let mut found = false;
for ((pkg_id, dep_kind), features) in &self.activated_features {
let r_features = self.resolve.features(*pkg_id);
if !r_features.iter().eq(features.iter()) {
crate::drop_eprintln!(
self.ws.config(),
"{}/{:?} features mismatch\nresolve: {:?}\nnew: {:?}\n",
pkg_id,
dep_kind,
r_features,
features
);
found = true;
}
}
if found {
panic!("feature mismatch");
}
}
fn is_proc_macro(&self, package_id: PackageId) -> bool {
self.package_set
.get_one(package_id)
.expect("packages downloaded")
.proc_macro()
}
}
| 40.364035 | 129 | 0.542486 |
624ae83c80d3acfd7705bbb70fe0a3f783e96982 | 3,847 | use crate::grid::GridResult;
use opengl_graphics::{Texture, TextureSettings};
use regex::Regex;
use std::path::PathBuf;
pub struct ImageLoader {
must_not_match: Vec<String>,
must_match: Vec<String>,
max_count: Option<usize>,
//images: Receiver<image::ImageBuffer>,
}
impl ImageLoader {
pub fn new() -> ImageLoader {
ImageLoader {
must_not_match: Vec::new(),
must_match: Vec::new(),
max_count: None,
}
}
pub fn filter(&mut self, filter: &str) {
self.must_not_match.push(filter.to_owned());
}
pub fn only(&mut self, only: &str) {
self.must_match.push(only.to_owned());
}
pub fn max(&mut self, max: usize) {
self.max_count = Some(max);
}
pub fn load_all(&self, path: PathBuf) -> GridResult<(Vec<String>, Vec<Texture>)> {
let files = path
.read_dir()?
.filter(Result::is_ok)
.map(|f| f.unwrap().path())
.collect();
self.load_files(files)
}
pub fn load_files(&self, files: Vec<PathBuf>) -> GridResult<(Vec<String>, Vec<Texture>)> {
let mut loaded_files = Vec::new();
let mut images = Vec::new();
let mut count = 0;
let must_not_match: Vec<Regex> = self
.must_not_match
.iter()
.map(|f| Regex::new(f).expect(format!("Regex error for 'filter': {}", f).as_str()))
.collect();
let must_match: Vec<Regex> = self
.must_match
.iter()
.map(|f| Regex::new(f).expect(format!("Regex error for 'only': {}", f).as_str()))
.collect();
'fileloop: for file in files {
// Is there a way to do this more concisely?
if let Some(max) = self.max_count {
if count >= max {
break;
}
}
//println!("{:?}", &file);
// refactor to resize(ctx, image, max_x, max_y)
if file.is_dir() {
continue;
}
let filestr = file
.to_str()
.expect("Unable to convert image filename to str");
for regex in &must_match {
if !regex.is_match(&filestr) {
continue 'fileloop;
}
}
for regex in &must_not_match {
//println!("{}, {:?}", &filestr, regex);
if regex.is_match(&filestr) {
continue 'fileloop;
}
}
let image = self.load(&file);
match image {
Ok(i) => {
count += 1;
loaded_files.push(file.to_str().unwrap().to_owned());
images.push(i);
}
Err(err) => eprintln!("{}: {}", file.display(), err),
}
}
Ok((loaded_files, images))
}
fn load(&self, file: &PathBuf) -> GridResult<Texture> {
let contents = std::fs::read(file).expect("Unable to read file");
let img = image::load_from_memory(&contents)?;
let img = match img {
image::DynamicImage::ImageRgba8(img) => img,
x => x.to_rgba(),
};
// TODO: Uncomment once full size display is working
// Resize to reduce GPU memory consumption
// let scale = f32::min(
// 200.0 as f32 / img.width() as f32,
// 200.0 as f32 / img.height() as f32,
// );
// let img = image::imageops::resize(
// &img,
// (img.width() as f32 * scale) as u32,
// (img.height() as f32 * scale) as u32,
// image::imageops::FilterType::Gaussian,
// );
Ok(Texture::from_image(&img, &TextureSettings::new()))
}
}
| 32.327731 | 95 | 0.483234 |
db29ff847ba12534bfa9c51ad9ed9a52f7b3bccf | 4,608 | //! Creation and usage of unnamed pipes.
//!
//! The distinction between named and unnamed pipes is concisely expressed by their names: where named pipes have names, unnamed pipes have handles. This can both be useful or problematic, depending on the use case. Unnamed pipes work best when a child process is used. With the fork model on Unix-like systems, the handle can be transferred to the child process thanks to the cloned address space; on Windows, inheritable handles can be used.
//!
//! Another way to use unnamed pipes is to use a named pipe and/or a Unix domain socket to establish an unnamed pipe connection. It just so happens that this crate supports all three.
impmod! {unnamed_pipe,
UnnamedPipeReader as UnnamedPipeReaderImpl,
UnnamedPipeWriter as UnnamedPipeWriterImpl,
pipe as pipe_impl,
}
use std::{
fmt::{self, Formatter},
io::{self, Read, Write},
};
/// Creates a new pipe with the default creation settings and returns the handles to its writing end and reading end.
///
/// The platform-specific builders in the `os` module of the crate might be more helpful if a configuration process for the pipe is needed.
#[inline]
pub fn pipe() -> io::Result<(UnnamedPipeWriter, UnnamedPipeReader)> {
pipe_impl()
}
/// A handle to the reading end of an unnamed pipe, created by the [`pipe`] function together with the [writing end].
///
/// The core functionality is exposed in a file-like [`Read`] interface. On Windows, the [`ShareHandle`] and [`As-`][`AsRawHandle`]/[`Into-`][`IntoRawHandle`]/[`FromRawHandle`] traits are also implemented, along with [`As-`][`AsRawFd`]/[`Into-`][`IntoRawFd`]/[`FromRawFd`] on Unix.
///
/// [`pipe`]: fn.pipe.html " "
/// [writing end]: struct.UnnamedPipeWriter.html " "
/// [`Read`]: https://doc.rust-lang.org/std/io/trait.Read.html " "
/// [`ShareHandle`]: ../os/windows/trait.ShareHandle.html " "
/// [`AsRawHandle`]: https://doc.rust-lang.org/std/os/windows/io/trait.AsRawHandle.html " "
/// [`IntoRawHandle`]: https://doc.rust-lang.org/std/os/windows/io/trait.IntoRawHandle.html " "
/// [`FromRawHandle`]: https://doc.rust-lang.org/std/os/windows/io/trait.FromRawHandle.html " "
/// [`AsRawFd`]: https://doc.rust-lang.org/std/os/unix/io/trait.AsRawFd.html " "
/// [`IntoRawFd`]: https://doc.rust-lang.org/std/os/unix/io/trait.IntoRawFd.html " "
/// [`FromRawFd`]: https://doc.rust-lang.org/std/os/unix/io/trait.FromRawFd.html " "
pub struct UnnamedPipeReader {
// pub(crate) to allow the platform specific builders to create the public-facing pipe types
pub(crate) inner: UnnamedPipeReaderImpl,
}
impl Read for UnnamedPipeReader {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.inner.read(buf)
}
}
impl fmt::Debug for UnnamedPipeReader {
#[inline]
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&self.inner, f)
}
}
impl_handle_manip!(UnnamedPipeReader);
/// A handle to the writing end of an unnamed pipe, created by the [`pipe`] function together with the [reading end].
///
/// The core functionality is exposed in a file-like [`Write`] interface. On Windows, the [`ShareHandle`] and [`As-`][`AsRawHandle`]/[`Into-`][`IntoRawHandle`]/[`FromRawHandle`] traits are also implemented, along with [`As-`][`AsRawFd`]/[`Into-`][`IntoRawFd`]/[`FromRawFd`] on Unix.
///
/// [`pipe`]: fn.pipe.html " "
/// [reading end]: struct.UnnamedPipeReader.html " "
/// [`Write`]: https://doc.rust-lang.org/std/io/trait.Write.html " "
/// [`ShareHandle`]: ../os/windows/trait.ShareHandle.html " "
/// [`AsRawHandle`]: https://doc.rust-lang.org/std/os/windows/io/trait.AsRawHandle.html " "
/// [`IntoRawHandle`]: https://doc.rust-lang.org/std/os/windows/io/trait.IntoRawHandle.html " "
/// [`FromRawHandle`]: https://doc.rust-lang.org/std/os/windows/io/trait.FromRawHandle.html " "
/// [`AsRawFd`]: https://doc.rust-lang.org/std/os/unix/io/trait.AsRawFd.html " "
/// [`IntoRawFd`]: https://doc.rust-lang.org/std/os/unix/io/trait.IntoRawFd.html " "
/// [`FromRawFd`]: https://doc.rust-lang.org/std/os/unix/io/trait.FromRawFd.html " "
pub struct UnnamedPipeWriter {
pub(crate) inner: UnnamedPipeWriterImpl,
}
impl Write for UnnamedPipeWriter {
#[inline]
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
self.inner.write(data)
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
self.inner.flush()
}
}
impl fmt::Debug for UnnamedPipeWriter {
#[inline]
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&self.inner, f)
}
}
impl_handle_manip!(UnnamedPipeWriter);
| 50.637363 | 443 | 0.685981 |
26b8de6389699ecdd3b750509e7ddfe4996c5e83 | 1,611 | #[doc = "Register `WDOGMIS` reader"]
pub struct R(crate::R<WDOGMIS_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<WDOGMIS_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<WDOGMIS_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<WDOGMIS_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Field `INTERRUPT` reader - Masked Interrupt Status"]
pub struct INTERRUPT_R(crate::FieldReader<bool, bool>);
impl INTERRUPT_R {
#[inline(always)]
pub(crate) fn new(bits: bool) -> Self {
INTERRUPT_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for INTERRUPT_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl R {
#[doc = "Bit 0 - Masked Interrupt Status"]
#[inline(always)]
pub fn interrupt(&self) -> INTERRUPT_R {
INTERRUPT_R::new((self.bits & 0x01) != 0)
}
}
#[doc = "Interrupt status\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [wdogmis](index.html) module"]
pub struct WDOGMIS_SPEC;
impl crate::RegisterSpec for WDOGMIS_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [wdogmis::R](R) reader structure"]
impl crate::Readable for WDOGMIS_SPEC {
type Reader = R;
}
#[doc = "`reset()` method sets WDOGMIS to value 0"]
impl crate::Resettable for WDOGMIS_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
}
| 29.833333 | 226 | 0.630043 |
22249040048c4899b900d34550d6d7aeab6e215e | 84,114 | /* tag::catalog[]
Title:: Integration tests for ic-rosetta-api
Goal:: Among others, demonstrate that we pass rosetta-cli verification tests
Runbook::
. Setup a ledger canister with prefunded accounts
. Run ic-rosetta-api
. Check that the ledger canister can be accessed through ic-rosetta-api
. Verify that balances reported by rosetta-api match balances in the ledger
. Run more specific tests (account_derive, make a transaction)
. Run rosetta-cli check:construction test scenarios
. Run rosetta-cli check:data test scenarios
end::catalog[] */
use assert_json_diff::{assert_json_eq, assert_json_include};
use ic_nns_common::pb::v1::NeuronId;
use ic_nns_governance::pb::v1::neuron::DissolveState;
use ic_rosetta_api::models::{ConstructionPayloadsResponse, NeuronState, Object, PublicKey};
use ic_rosetta_api::time::Seconds;
use ledger_canister::{
protobuf::TipOfChainRequest, AccountBalanceArgs, AccountIdentifier, ArchiveOptions,
BlockHeight, Certification, LedgerCanisterInitPayload, Operation, Subaccount, TipOfChainRes,
Tokens, DEFAULT_TRANSFER_FEE,
};
use canister_test::{Canister, RemoteTestRuntime, Runtime};
use dfn_protobuf::protobuf;
use ed25519_dalek::Signer;
use ic_canister_client::Sender;
use ic_fondue::{ic_instance::InternetComputer, ic_manager::IcHandle};
use ic_nns_constants::{GOVERNANCE_CANISTER_ID, LEDGER_CANISTER_ID, REGISTRY_CANISTER_ID};
use ic_nns_governance::governance::compute_neuron_staking_subaccount;
use ic_nns_governance::pb::v1::{Governance, NetworkEconomics, Neuron};
use ic_nns_test_utils::itest_helpers::{set_up_governance_canister, set_up_ledger_canister};
use ic_registry_subnet_type::SubnetType;
use ic_rosetta_api::convert::{
from_hex, from_model_account_identifier, neuron_account_from_public_key,
neuron_subaccount_bytes_from_public_key, to_hex, to_model_account_identifier,
};
use ic_rosetta_api::request_types::{
AddHotKey, Disburse, MergeMaturity, PublicKeyOrPrincipal, Request, RequestResult,
SetDissolveTimestamp, Spawn, Stake, StartDissolve, Status, StopDissolve,
};
use ic_rosetta_test_utils::{
acc_id, assert_canister_error, assert_ic_error, do_multiple_txn, do_txn, make_user,
prepare_txn, rosetta_api_serv::RosettaApiHandle, send_icpts, sign_txn, to_public_key,
EdKeypair, RequestInfo,
};
use ic_types::{messages::Blob, CanisterId, PrincipalId};
use lazy_static::lazy_static;
use serde_json::{json, Value};
use slog::info;
use slog::Logger;
use std::collections::HashMap;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use url::Url;
lazy_static! {
static ref FEE: Tokens = Tokens::from_e8s(1_000);
}
pub fn config() -> InternetComputer {
InternetComputer::new().add_fast_single_node_subnet(SubnetType::System)
//.add_subnet(Subnet::new(SubnetType::System).add_nodes(2))
}
/// No changes to the IC environment
pub fn test_everything(handle: IcHandle, ctx: &ic_fondue::pot::Context) {
let minting_address = AccountIdentifier::new(GOVERNANCE_CANISTER_ID.get(), None);
let (acc_a, kp_a, _pk_a, _pid_a) = make_user(100);
let kp_a = Arc::new(kp_a);
let (acc_b, kp_b, _pk_b, _pid_b) = make_user(101);
let kp_b = Arc::new(kp_b);
let mut ledger_balances = HashMap::new();
let acc1 = hex2addr("35548ec29e9d85305850e87a2d2642fe7214ff4bb36334070deafc3345c3b127");
let acc2 = hex2addr("42a3eb61d549dc9fe6429ce2361ec60a569b8befe43eb15a3fc5c88516711bc5");
let acc3 = hex2addr("eaf407f7fa3770edb621ce920f6c83cefb63df333044d1cdcd2a300ceb85cb1c");
let acc4 = hex2addr("ba5b33d11f93033ba45b0a0136d4f7f6310ee482cfb1cfebdb4cea55f4aeda17");
let acc5 = hex2addr("776ab0ef12a63f5b1bd605f202b1b5cefeaf5791c0241c773fc8e76a6c4a8b40");
let acc6 = hex2addr("88bf52d6380bf2ed7b5fd4010afd145dc351cbf386def9b9be017bbeb640a919");
let acc7 = hex2addr("92c9c807da64528240f65ec29b58c839bf2374e9c1c38b7661da65fd8710124e");
ledger_balances.insert(acc1, Tokens::from_e8s(100_000_000_001));
ledger_balances.insert(acc2, Tokens::from_e8s(100_000_000_002));
ledger_balances.insert(acc3, Tokens::from_e8s(100_000_000_003));
ledger_balances.insert(acc4, Tokens::from_e8s(100_000_000_004));
ledger_balances.insert(acc5, Tokens::from_e8s(100_000_000_005));
ledger_balances.insert(acc6, Tokens::from_e8s(100_000_000_006));
ledger_balances.insert(acc7, Tokens::from_e8s(100_000_000_007));
ledger_balances.insert(acc_a, Tokens::from_e8s(200_000_000_000));
ledger_balances.insert(acc_b, Tokens::new(1000, 0).unwrap());
let one_year_from_now = 60 * 60 * 24 * 365
+ std::time::SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs();
let mut neuron_tests = NeuronTestsSetup::new(2000, ctx.logger.clone());
neuron_tests.add(
&mut ledger_balances,
"Test disburse",
rand::random(),
|neuron| neuron.dissolve_state = Some(DissolveState::WhenDissolvedTimestampSeconds(0)),
);
neuron_tests.add(
&mut ledger_balances,
"Test raw JSON disburse",
rand::random(),
|neuron| neuron.dissolve_state = Some(DissolveState::WhenDissolvedTimestampSeconds(0)),
);
neuron_tests.add(
&mut ledger_balances,
"Test disburse to custom recipient",
rand::random(),
|neuron| neuron.dissolve_state = Some(DissolveState::WhenDissolvedTimestampSeconds(0)),
);
neuron_tests.add(
&mut ledger_balances,
"Test disburse before neuron is dissolved (fail)",
rand::random(),
|neuron| {
neuron.dissolve_state = Some(DissolveState::WhenDissolvedTimestampSeconds(
one_year_from_now,
))
},
);
neuron_tests.add(
&mut ledger_balances,
"Test disburse an amount",
rand::random(),
|neuron| neuron.dissolve_state = Some(DissolveState::WhenDissolvedTimestampSeconds(0)),
);
neuron_tests.add(
&mut ledger_balances,
"Test disburse an amount full stake",
rand::random(),
|neuron| neuron.dissolve_state = Some(DissolveState::WhenDissolvedTimestampSeconds(0)),
);
neuron_tests.add(
&mut ledger_balances,
"Test disburse more than staked amount (fail)",
rand::random(),
|neuron| neuron.dissolve_state = Some(DissolveState::WhenDissolvedTimestampSeconds(0)),
);
neuron_tests.add(
&mut ledger_balances,
"Test set dissolve timestamp to a prior timestamp (fail)",
rand::random(),
|neuron| {
neuron.dissolve_state = Some(DissolveState::WhenDissolvedTimestampSeconds(
one_year_from_now,
))
},
);
neuron_tests.add(
&mut ledger_balances,
"Test set dissolve timestamp to 5000 seconds from now",
rand::random(),
|neuron| neuron.dissolve_state = Some(DissolveState::WhenDissolvedTimestampSeconds(0)),
);
neuron_tests.add(
&mut ledger_balances,
"Test start dissolving neuron",
rand::random(),
|neuron| {
neuron.dissolve_state = Some(DissolveState::DissolveDelaySeconds(one_year_from_now))
},
);
neuron_tests.add(
&mut ledger_balances,
"Test re-start dissolving a dissolved neuron",
rand::random(),
|neuron| {
neuron.dissolve_state = Some(DissolveState::DissolveDelaySeconds(one_year_from_now))
},
);
neuron_tests.add(
&mut ledger_balances,
"Test add hot key",
rand::random(),
|_| {},
);
neuron_tests.add(
&mut ledger_balances,
"Test start dissolving neuron before delay has been set",
rand::random(),
|neuron| {
neuron.dissolve_state = None;
},
);
neuron_tests.add(
&mut ledger_balances,
"Test spawn neuron with enough maturity",
rand::random(),
|neuron| {
neuron.dissolve_state = None;
neuron.maturity_e8s_equivalent = 500_000_000;
},
);
neuron_tests.add(
&mut ledger_balances,
"Test spawn neuron with not enough maturity",
rand::random(),
|neuron| {
neuron.dissolve_state = None;
neuron.maturity_e8s_equivalent = 4_000;
},
);
neuron_tests.add(
&mut ledger_balances,
"Test merge all neuron maturity",
rand::random(),
|neuron| {
neuron.dissolve_state = None;
neuron.maturity_e8s_equivalent = 420_000_000;
},
);
neuron_tests.add(
&mut ledger_balances,
"Test merge partial neuron maturity",
rand::random(),
|neuron| {
neuron.dissolve_state = None;
neuron.maturity_e8s_equivalent = 420_000_000;
},
);
neuron_tests.add(
&mut ledger_balances,
"Test merge neuron maturity invalid",
rand::random(),
|neuron| {
neuron.dissolve_state = None;
neuron.maturity_e8s_equivalent = 420_000_000;
},
);
let archive_options = ArchiveOptions {
trigger_threshold: 8,
num_blocks_to_archive: 4,
node_max_memory_size_bytes: Some(1024 + 512), // about 10 blocks
max_message_size_bytes: Some(2 * 1024 * 1024),
controller_id: CanisterId::from_u64(876),
};
let ledger_canister_for_governance_payload = LedgerCanisterInitPayload::builder()
.minting_account(minting_address)
.initial_values(ledger_balances.clone())
.archive_options(archive_options.clone())
.send_whitelist(std::iter::once(GOVERNANCE_CANISTER_ID).collect())
.transfer_fee(DEFAULT_TRANSFER_FEE)
.build()
.unwrap();
let ledger_canister_payload = LedgerCanisterInitPayload::builder()
.minting_account(minting_address)
.initial_values(ledger_balances)
.archive_options(archive_options)
.send_whitelist(std::iter::once(GOVERNANCE_CANISTER_ID).collect())
.transfer_fee(*FEE)
.build()
.unwrap();
let (neurons, mut neuron_tests) = neuron_tests.neurons();
let governance_canister_init = Governance {
economics: Some(NetworkEconomics::with_default_values()),
wait_for_quiet_threshold_seconds: 60 * 60 * 24 * 2, // 2 days
short_voting_period_seconds: 60 * 60 * 12, // 12 hours
neurons,
..Default::default()
};
let rt = tokio::runtime::Runtime::new().expect("Could not create tokio runtime.");
rt.block_on(async move {
let endpoint = handle.public_api_endpoints.first().expect("no endpoints");
endpoint.assert_ready(ctx).await;
let node_url = endpoint.url.clone();
//let ic_agent = assert_create_agent(node_url.as_str()).await;
let agent = ic_canister_client::Agent::new(
node_url.clone(),
Sender::from_keypair(&ic_test_identity::TEST_IDENTITY_KEYPAIR),
);
let root_key = agent.root_key().await.unwrap().unwrap();
let remote_runtime = Runtime::Remote(RemoteTestRuntime { agent });
// Reserve the registry canister to ensure that the governance
// and ledger canisters have the right canister ID.
let dummy_canister = remote_runtime.create_canister_max_cycles_with_retries().await.unwrap();
assert_eq!(dummy_canister.canister_id(), REGISTRY_CANISTER_ID);
info!(&ctx.logger, "Installing governance canister");
let governance_future = set_up_governance_canister(&remote_runtime, governance_canister_init);
let governance = governance_future.await;
info!(&ctx.logger, "Governance canister installed");
assert_eq!(governance.canister_id(), GOVERNANCE_CANISTER_ID);
info!(&ctx.logger, "Installing ledger canister for governance");
let ledger_for_governance_future = set_up_ledger_canister(&remote_runtime, ledger_canister_for_governance_payload);
info!(&ctx.logger, "Installing ledger canister");
let ledger_future = set_up_ledger_canister(&remote_runtime, ledger_canister_payload);
let ledger_for_governance = ledger_for_governance_future.await;
info!(&ctx.logger, "Ledger canister installed");
assert_eq!(ledger_for_governance.canister_id(), LEDGER_CANISTER_ID);
let ledger = ledger_future.await;
info!(&ctx.logger, "Ledger canister installed");
let balance = get_balance(&ledger, acc1).await;
assert_eq!(balance, Tokens::from_e8s(100_000_000_001));
let (_cert, tip_idx) = get_tip(&ledger).await;
info!(&ctx.logger, "Starting rosetta-api");
let mut rosetta_api_serv = RosettaApiHandle::start(
node_url.clone(),
8099,
ledger.canister_id(),
governance.canister_id(),
workspace_path(),
Some(&root_key),
)
.await;
rosetta_api_serv.wait_for_tip_sync(tip_idx).await.unwrap();
// smoke test first
let net_status = rosetta_api_serv.network_status().await.unwrap().unwrap();
assert_eq!(net_status.current_block_identifier.index as u64, tip_idx);
let b = rosetta_api_serv.wait_for_block_at(6).await.unwrap();
assert_eq!(b.block_identifier.index, 6);
let br = rosetta_api_serv.block_at(6).await.unwrap().unwrap();
assert_eq!(br.block.unwrap().block_identifier.index as u64, 6);
let bal_resp = rosetta_api_serv.balance(acc1).await.unwrap().unwrap();
assert_eq!(
Tokens::from_e8s(bal_resp.balances[0].value.parse().unwrap()),
Tokens::from_e8s(100_000_000_001)
);
info!(&ctx.logger, "Test metadata suggested fee");
let metadata = rosetta_api_serv.construction_metadata(None, None).await
.unwrap().unwrap();
assert_eq!(metadata.suggested_fee.unwrap()[0].value, format!("{}", FEE.get_e8s()));
// Some more advanced tests
info!(&ctx.logger, "Test derive endpoint");
test_derive(&rosetta_api_serv).await;
info!(&ctx.logger, "Test make transaction");
test_make_transaction(&rosetta_api_serv, &ledger, acc_a, Arc::clone(&kp_a)).await;
info!(&ctx.logger, "Test wrong key");
test_wrong_key(&rosetta_api_serv, acc_a, Arc::clone(&kp_a)).await;
info!(&ctx.logger, "Test no funds");
test_no_funds(&rosetta_api_serv, Arc::clone(&kp_a)).await;
info!(&ctx.logger, "Test configurable ingress window");
test_ingress_window(&rosetta_api_serv, Arc::clone(&kp_a)).await;
info!(&ctx.logger, "Test multiple transfers");
test_multiple_transfers(&rosetta_api_serv, &ledger, acc_b, Arc::clone(&kp_b)).await;
info!(&ctx.logger, "Test multiple transfers (fail)");
test_multiple_transfers_fail(&rosetta_api_serv, &ledger, acc_b, Arc::clone(&kp_b)).await;
// Rosetta-cli tests
let cli_json = PathBuf::from(format!("{}/rosetta_cli.json", workspace_path()));
let cli_ros = PathBuf::from(format!("{}/rosetta_workflows.ros", workspace_path()));
let conf = rosetta_api_serv.generate_rosetta_cli_config(&cli_json, &cli_ros);
info!(&ctx.logger, "Running rosetta-cli check:construction");
rosetta_cli_construction_check(&conf);
info!(&ctx.logger, "check:construction finished successfully");
info!(&ctx.logger, "Running rosetta-cli check:data");
rosetta_cli_data_check(&conf);
info!(&ctx.logger, "check:data finished successfully");
// Finish up. (calling stop is optional because it would be called on drop, but
// this way it's more explicit what is happening)
rosetta_api_serv.stop();
let (_cert, tip_idx) = get_tip(&ledger).await;
info!(&ctx.logger, "Starting rosetta-api again to see if it properly fetches blocks in batches from all the archives");
let mut rosetta_api_serv = RosettaApiHandle::start(
node_url.clone(),
8101,
ledger.canister_id(),
governance.canister_id(),
workspace_path(),
Some(&root_key),
).await;
rosetta_api_serv.wait_for_tip_sync(tip_idx).await.unwrap();
let net_status = rosetta_api_serv.network_status().await.unwrap().unwrap();
assert_eq!(net_status.current_block_identifier.index as u64, tip_idx, "Newly started rosetta-api did not fetch all the blocks from the ledger properly");
rosetta_api_serv.stop();
// this test starts rosetta-api with wrong canister id
// theoretically it can run together with the previous rosetta_api
// but we stopped the previous one to be on the safe side and
// avoid potential problems unrelated to this test
info!(
&ctx.logger,
"Test wrong canister id (expected rosetta-api sync errors in logs)"
);
test_wrong_canister_id(node_url.clone(), None).await;
info!(&ctx.logger, "Test wrong canister id finished");
let (_cert, tip_idx) = get_tip(&ledger_for_governance).await;
info!(&ctx.logger, "Starting rosetta-api with default fee");
let mut rosetta_api_serv = RosettaApiHandle::start(
node_url,
8100,
ledger_for_governance.canister_id(),
governance.canister_id(),
workspace_path(),
Some(&root_key),
)
.await;
rosetta_api_serv.wait_for_tip_sync(tip_idx).await.unwrap();
info!(&ctx.logger, "Neuron management tests");
// Test against prepopulated neurons
let NeuronInfo {account_id, key_pair, neuron_subaccount_identifier, neuron, ..} = neuron_tests.get_neuron_for_test("Test disburse");
test_disburse(&rosetta_api_serv, &ledger_for_governance, account_id, key_pair.into(), neuron_subaccount_identifier, None, None, &neuron).await.unwrap();
// Test against prepopulated neurons (raw)
let NeuronInfo {account_id, key_pair, neuron_subaccount_identifier, neuron, ..} = neuron_tests.get_neuron_for_test("Test raw JSON disburse");
test_disburse_raw(&rosetta_api_serv, &ledger_for_governance, account_id, key_pair.into(), neuron_subaccount_identifier, None, None, &neuron).await.unwrap();
let NeuronInfo {account_id, key_pair, neuron_subaccount_identifier, neuron, ..} = neuron_tests.get_neuron_for_test("Test disburse to custom recipient");
let (recipient, _, _, _) = make_user(102);
test_disburse(&rosetta_api_serv, &ledger_for_governance, account_id, key_pair.into(), neuron_subaccount_identifier, None, Some(recipient), &neuron).await.unwrap();
let NeuronInfo {account_id, key_pair, neuron_subaccount_identifier, neuron, ..} = neuron_tests.get_neuron_for_test("Test disburse before neuron is dissolved (fail)");
test_disburse(&rosetta_api_serv, &ledger_for_governance, account_id, key_pair.into(), neuron_subaccount_identifier, None, None, &neuron).await.unwrap_err();
let NeuronInfo {account_id, key_pair, neuron_subaccount_identifier, neuron, ..} = neuron_tests.get_neuron_for_test("Test disburse an amount");
test_disburse(&rosetta_api_serv, &ledger_for_governance, account_id, key_pair.into(), neuron_subaccount_identifier, Some(Tokens::new(5, 0).unwrap()), None, &neuron).await.unwrap();
let NeuronInfo {account_id, key_pair, neuron_subaccount_identifier, neuron, ..} = neuron_tests.get_neuron_for_test("Test disburse an amount full stake");
test_disburse(&rosetta_api_serv, &ledger_for_governance, account_id, key_pair.into(), neuron_subaccount_identifier, Some(Tokens::new(10, 0).unwrap()), None, &neuron).await.unwrap();
let NeuronInfo {account_id, key_pair, neuron_subaccount_identifier, neuron, ..} = neuron_tests.get_neuron_for_test("Test disburse more than staked amount (fail)");
test_disburse(&rosetta_api_serv, &ledger_for_governance, account_id, key_pair.into(), neuron_subaccount_identifier, Some(Tokens::new(11, 0).unwrap()), None, &neuron).await.unwrap_err();
let NeuronInfo {account_id, key_pair, neuron_subaccount_identifier, ..} = neuron_tests.get_neuron_for_test("Test set dissolve timestamp to a prior timestamp (fail)");
test_set_dissolve_timestamp_in_the_past_fail(&rosetta_api_serv, account_id, key_pair.into(), neuron_subaccount_identifier).await;
let NeuronInfo {account_id, key_pair, neuron_subaccount_identifier, ..} = neuron_tests.get_neuron_for_test("Test set dissolve timestamp to 5000 seconds from now");
let timestamp = Seconds::from(std::time::SystemTime::now() + Duration::from_secs(5000));
let key_pair = Arc::new(key_pair);
test_set_dissolve_timestamp(&rosetta_api_serv, account_id, key_pair.clone(), timestamp, neuron_subaccount_identifier).await;
info!(&ctx.logger, "Test set dissolve timestamp to 5 seconds from now again");
test_set_dissolve_timestamp(&rosetta_api_serv, account_id, key_pair.clone(), timestamp, neuron_subaccount_identifier).await;
// Note that this is an incorrect usage, but no error is returned.
// we would like this to fail, but to make the above case work we have to swallow these errors.
info!(&ctx.logger, "Test set dissolve timestamp to less than it's currently set to (we would like this to fail)");
test_set_dissolve_timestamp(&rosetta_api_serv, account_id, key_pair.clone(), timestamp, neuron_subaccount_identifier).await;
info!(&ctx.logger, "Test set dissolve timestamp to a prior timestamp (fail)");
test_set_dissolve_timestamp_in_the_past_fail(&rosetta_api_serv, account_id, key_pair.clone(), neuron_subaccount_identifier).await;
let NeuronInfo {account_id, key_pair, public_key, neuron_subaccount_identifier, neuron_account, ..} = neuron_tests.get_neuron_for_test("Test start dissolving neuron");
let key_pair = Arc::new(key_pair);
test_start_dissolve(&rosetta_api_serv, account_id, key_pair.clone(), neuron_subaccount_identifier).await.unwrap();
let neuron_info = rosetta_api_serv.account_balance_neuron(neuron_account, None, Some((public_key, neuron_subaccount_identifier)), false).await.unwrap().unwrap().metadata.unwrap();
assert_eq!(neuron_info.state, NeuronState::Dissolving);
info!(&ctx.logger, "Test start dissolving neuron again");
test_start_dissolve(&rosetta_api_serv, account_id, key_pair.clone(), neuron_subaccount_identifier).await.unwrap();
info!(&ctx.logger, "Test stop dissolving neuron");
test_stop_dissolve(&rosetta_api_serv, account_id, key_pair.clone(), neuron_subaccount_identifier).await.unwrap();
info!(&ctx.logger, "Test stop dissolving neuron again");
test_stop_dissolve(&rosetta_api_serv, account_id, key_pair.clone(), neuron_subaccount_identifier).await.unwrap();
info!(&ctx.logger, "Test restart dissolving neuron");
test_start_dissolve(&rosetta_api_serv, account_id, key_pair.clone(), neuron_subaccount_identifier).await.unwrap();
let NeuronInfo {account_id, key_pair, neuron_subaccount_identifier, ..} = neuron_tests.get_neuron_for_test("Test re-start dissolving a dissolved neuron");
let key_pair = Arc::new(key_pair);
test_start_dissolve(&rosetta_api_serv, account_id, key_pair.clone(), neuron_subaccount_identifier).await.unwrap();
info!(&ctx.logger, "Test stop dissolving a dissolved neuron");
test_stop_dissolve(&rosetta_api_serv, account_id, key_pair.clone(), neuron_subaccount_identifier).await.unwrap();
let NeuronInfo {account_id, key_pair, neuron_subaccount_identifier, ..} = neuron_tests.get_neuron_for_test("Test add hot key");
let key_pair = Arc::new(key_pair);
test_add_hot_key(&rosetta_api_serv, account_id, key_pair.clone(), neuron_subaccount_identifier).await.unwrap();
test_start_dissolve(&rosetta_api_serv, account_id, key_pair, neuron_subaccount_identifier).await.unwrap();
let NeuronInfo {account_id, key_pair, neuron_subaccount_identifier, ..} = neuron_tests.get_neuron_for_test("Test start dissolving neuron before delay has been set");
// Note that this is an incorrect usage, but no error is returned.
// Start and Stop operations never fail, even when they have no affect.
test_start_dissolve(&rosetta_api_serv, account_id, key_pair.into(), neuron_subaccount_identifier).await.unwrap();
let neuron_info= neuron_tests.get_neuron_for_test("Test spawn neuron with enough maturity");
test_spawn(&rosetta_api_serv, &ledger_for_governance, neuron_info).await;
let neuron_info= neuron_tests.get_neuron_for_test("Test spawn neuron with not enough maturity");
test_spawn_invalid(&rosetta_api_serv, neuron_info).await;
let neuron_info= neuron_tests.get_neuron_for_test("Test merge all neuron maturity");
test_merge_maturity_all(&rosetta_api_serv, &ledger_for_governance, neuron_info).await;
let neuron_info= neuron_tests.get_neuron_for_test("Test merge partial neuron maturity");
test_merge_maturity_partial(&rosetta_api_serv, &ledger_for_governance, neuron_info).await;
let neuron_info= neuron_tests.get_neuron_for_test("Test merge neuron maturity invalid");
test_merge_maturity_invalid(&rosetta_api_serv, neuron_info).await;
info!(&ctx.logger, "Test staking");
let _ = test_staking(&rosetta_api_serv, acc_b, Arc::clone(&kp_b)).await;
info!(&ctx.logger, "Test staking (raw JSON)");
let _ = test_staking_raw(&rosetta_api_serv, acc_b, Arc::clone(&kp_b)).await;
info!(&ctx.logger, "Test staking failure");
test_staking_failure(&rosetta_api_serv, acc_b, Arc::clone(&kp_b)).await;
info!(&ctx.logger, "Test staking flow");
test_staking_flow(&rosetta_api_serv, &ledger_for_governance, acc_b, Arc::clone(&kp_b), Seconds(one_year_from_now)).await;
info!(&ctx.logger, "Test staking flow two txns");
test_staking_flow_two_txns(&rosetta_api_serv, &ledger_for_governance, acc_b, Arc::clone(&kp_b), Seconds(one_year_from_now)).await;
rosetta_api_serv.stop();
});
}
fn hex2addr(a: &str) -> AccountIdentifier {
AccountIdentifier::from_hex(a).unwrap()
}
async fn get_balance(ledger: &Canister<'_>, acc: AccountIdentifier) -> Tokens {
let reply: Result<Tokens, String> = ledger
.query_("account_balance_pb", protobuf, AccountBalanceArgs::new(acc))
.await;
reply.unwrap()
}
async fn get_tip(ledger: &Canister<'_>) -> (Certification, BlockHeight) {
let reply: Result<TipOfChainRes, String> = ledger
.query_("tip_of_chain_pb", protobuf, TipOfChainRequest {})
.await;
let res = reply.unwrap();
(res.certification, res.tip_index)
}
// Check that derive endpoint of rosetta-api returns correct account address
async fn test_derive(ros: &RosettaApiHandle) {
test_derive_ledger_address(ros).await;
test_derive_neuron_address(ros).await;
}
async fn test_derive_ledger_address(ros: &RosettaApiHandle) {
let (acc, _kp, pk, _pid) = make_user(5);
let derived = ros.construction_derive(pk).await.unwrap().unwrap();
assert_eq!(
acc.to_hex(),
derived.account_identifier.unwrap().address,
"Account id derived via construction/derive is different than expected"
);
}
async fn test_derive_neuron_address(ros: &RosettaApiHandle) {
let (_acc, _kp, pk, pid) = make_user(6);
let derived = ros.neuron_derive(pk).await.unwrap().unwrap();
let account_id = derived.account_identifier.unwrap();
let subaccount_bytes = {
const DOMAIN: &[u8] = b"neuron-stake";
let mut hasher = ic_crypto_sha::Sha256::new();
hasher.write(&[DOMAIN.len() as u8]);
hasher.write(DOMAIN);
hasher.write(pid.as_slice());
hasher.write(&[0u8; 8]);
hasher.finish()
};
assert_eq!(
account_id,
to_model_account_identifier(&AccountIdentifier::new(
GOVERNANCE_CANISTER_ID.get(),
Some(Subaccount(subaccount_bytes)),
))
);
}
// Make a transaction through rosetta-api and verify that it landed on the
// blockchain
async fn test_make_transaction(
ros: &RosettaApiHandle,
ledger: &Canister<'_>,
acc: AccountIdentifier,
key_pair: Arc<EdKeypair>,
) {
let src_balance_before = get_balance(ledger, acc).await;
let (dst_acc, _kp, _pk, _pid) = make_user(1050);
let dst_balance_before = Tokens::from_e8s(
ros.balance(dst_acc).await.unwrap().unwrap().balances[0]
.value
.parse()
.unwrap(),
);
let amount = Tokens::from_e8s(1000);
let tip_idx = ros
.network_status()
.await
.unwrap()
.unwrap()
.current_block_identifier
.index as u64;
let expected_idx = tip_idx + 1;
let t = Operation::Transfer {
from: acc,
to: dst_acc,
amount,
fee: *FEE,
};
let (tid, results, _fee) = do_txn(
ros,
key_pair,
t.clone(),
false,
Some(one_day_from_now_nanos()),
None,
)
.await
.expect("Error during transfer operation.");
if let Some(h) = results.last_block_index() {
assert_eq!(h, expected_idx);
}
let block = ros.wait_for_block_at(expected_idx).await.unwrap();
assert_eq!(block.transactions.len(), 1);
let t = block.transactions.first().unwrap();
assert_eq!(t.transaction_identifier, tid);
check_balance(
ros,
ledger,
&acc,
((src_balance_before - amount).unwrap() - *FEE).unwrap(),
)
.await;
check_balance(
ros,
ledger,
&dst_acc,
(dst_balance_before + amount).unwrap(),
)
.await;
}
async fn check_balance(
ros: &RosettaApiHandle,
ledger: &Canister<'_>,
acc: &AccountIdentifier,
expected_balance: Tokens,
) {
let balance = Tokens::from_e8s(
ros.balance(*acc).await.unwrap().unwrap().balances[0]
.value
.parse()
.unwrap(),
);
assert_eq!(expected_balance, balance);
let balance_from_ledger = get_balance(ledger, *acc).await;
assert_eq!(balance_from_ledger, balance);
}
// Sign a transaction with wrong key and check if it gets rejected
async fn test_wrong_key(ros: &RosettaApiHandle, acc: AccountIdentifier, key_pair: Arc<EdKeypair>) {
let (_acc, wrong_kp, _wrong_pk, _pid) = make_user(1052);
let t = Operation::Transfer {
from: acc,
to: acc_id(1051),
amount: Tokens::from_e8s(100),
fee: *FEE,
};
let (payloads, _fee) = prepare_txn(ros, t, key_pair, false, None, None)
.await
.unwrap();
let signed = sign_txn(ros, &[Arc::new(wrong_kp)], payloads)
.await
.unwrap()
.signed_transaction()
.unwrap();
let err = ros.construction_submit(signed).await.unwrap().unwrap_err();
assert_ic_error(&err, 740, 403, "does not match the public key");
}
async fn test_no_funds(ros: &RosettaApiHandle, funding_key_pair: Arc<EdKeypair>) {
let (acc1, keypair1, _, _) = make_user(9275456);
let keypair1 = Arc::new(keypair1);
let acc2 = acc_id(598620493);
// charge up user1
let (_, bh, _) = send_icpts(
ros,
funding_key_pair,
acc1,
(Tokens::from_e8s(10_000) + *FEE).unwrap(),
)
.await
.unwrap();
ros.wait_for_tip_sync(bh.unwrap()).await.unwrap();
// Transfer some funds from user1 to user2
let (_, bh, _) = send_icpts(ros, Arc::clone(&keypair1), acc2, Tokens::from_e8s(1000))
.await
.unwrap();
ros.wait_for_tip_sync(bh.unwrap()).await.unwrap();
// Try to transfer more. This should fail with an error from the canister.
let err = send_icpts(ros, keypair1, acc2, Tokens::from_e8s(10_000))
.await
.unwrap_err();
assert_canister_error(&err, 750, "account doesn't have enough funds");
// and now try to make a transfer from an empty account
let (_, empty_acc_kp, _, _) = make_user(434561);
let err = send_icpts(ros, Arc::new(empty_acc_kp), acc2, Tokens::from_e8s(100))
.await
.unwrap_err();
assert_canister_error(&err, 750, "account doesn't have enough funds");
}
async fn test_ingress_window(ros: &RosettaApiHandle, funding_key_pair: Arc<EdKeypair>) {
let (acc1, _keypair1, _, _) = make_user(42);
let now = ic_types::time::current_time();
let expiry = now + Duration::from_secs(24 * 60 * 60);
// charge up user1
let (_, bh, _) = ic_rosetta_test_utils::send_icpts_with_window(
ros,
Arc::clone(&funding_key_pair),
acc1,
Tokens::from_e8s(10_000),
Some(expiry.as_nanos_since_unix_epoch()),
Some(now.as_nanos_since_unix_epoch()),
)
.await
.unwrap();
ros.wait_for_tip_sync(bh.unwrap()).await.unwrap();
// do the same transaction again; this should be rejected
// note that we pass the same created_at value to get the same
// transaction
let err = ic_rosetta_test_utils::send_icpts_with_window(
ros,
funding_key_pair,
acc1,
Tokens::from_e8s(10_000),
None,
Some(now.as_nanos_since_unix_epoch()),
)
.await
.unwrap_err();
assert_canister_error(&err, 750, "transaction is a duplicate");
}
async fn test_wrong_canister_id(node_url: Url, root_key_blob: Option<&Blob>) {
let (_acc1, kp, _pk, pid) = make_user(1);
let some_can_id = CanisterId::new(pid).unwrap();
let ros = RosettaApiHandle::start(
node_url,
8101,
some_can_id,
some_can_id,
workspace_path(),
root_key_blob,
)
.await;
let acc2 = acc_id(2);
let err = send_icpts(&ros, Arc::new(kp), acc2, Tokens::from_e8s(1000))
.await
.unwrap_err();
assert_ic_error(&err, 740, 404, "Requested canister does not exist");
}
/// Test doing multiple transfers in a single submit call
async fn test_multiple_transfers(
ros: &RosettaApiHandle,
ledger: &Canister<'_>,
acc: AccountIdentifier,
key_pair: Arc<EdKeypair>,
) {
let (dst_acc1, dst_acc1_kp, _pk, _pid) = make_user(1100);
let (dst_acc2, dst_acc2_kp, _pk, _pid) = make_user(1101);
let (dst_acc3, _kp, _pk, _pid) = make_user(1102);
let amount1 = Tokens::new(3, 0).unwrap();
let amount2 = Tokens::new(2, 0).unwrap();
let amount3 = Tokens::new(1, 0).unwrap();
let tip_idx = ros
.network_status()
.await
.unwrap()
.unwrap()
.current_block_identifier
.index as u64;
let expected_idx = tip_idx + 3;
let (tid, results, _fee) = do_multiple_txn(
ros,
&[
RequestInfo {
request: Request::Transfer(Operation::Transfer {
from: acc,
to: dst_acc1,
amount: amount1,
fee: *FEE,
}),
sender_keypair: Arc::clone(&key_pair),
},
RequestInfo {
request: Request::Transfer(Operation::Transfer {
from: dst_acc1,
to: dst_acc2,
amount: amount2,
fee: *FEE,
}),
sender_keypair: Arc::new(dst_acc1_kp),
},
RequestInfo {
request: Request::Transfer(Operation::Transfer {
from: dst_acc2,
to: dst_acc3,
amount: amount3,
fee: *FEE,
}),
sender_keypair: Arc::new(dst_acc2_kp),
},
],
false,
Some(one_day_from_now_nanos()),
None,
)
.await
.unwrap();
if let Some(h) = results.last_block_index() {
assert_eq!(h, expected_idx);
}
let block = ros.wait_for_block_at(expected_idx).await.unwrap();
assert_eq!(block.transactions.len(), 1);
let t = block.transactions.first().unwrap();
assert_eq!(t.transaction_identifier, tid);
check_balance(
ros,
ledger,
&dst_acc1,
((amount1 - amount2).unwrap() - *FEE).unwrap(),
)
.await;
check_balance(
ros,
ledger,
&dst_acc2,
((amount2 - amount3).unwrap() - *FEE).unwrap(),
)
.await;
check_balance(ros, ledger, &dst_acc3, amount3).await;
}
/// Test part of a multiple transfer failing. This is not atomic.
async fn test_multiple_transfers_fail(
ros: &RosettaApiHandle,
ledger: &Canister<'_>,
acc: AccountIdentifier,
key_pair: Arc<EdKeypair>,
) {
let (dst_acc1, dst_acc1_kp, _pk, _pid) = make_user(1200);
let (dst_acc2, dst_acc2_kp, _pk, _pid) = make_user(1201);
let (dst_acc3, _kp, _pk, _pid) = make_user(1202);
let amount1 = Tokens::new(3, 0).unwrap();
let amount2 = Tokens::new(2, 0).unwrap();
let amount3 = Tokens::new(100_000, 0).unwrap();
let tip_idx = ros
.network_status()
.await
.unwrap()
.unwrap()
.current_block_identifier
.index as u64;
let expected_idx = tip_idx + 1;
let err = do_multiple_txn(
ros,
&[
RequestInfo {
request: Request::Transfer(Operation::Transfer {
from: acc,
to: dst_acc1,
amount: amount1,
fee: *FEE,
}),
sender_keypair: Arc::clone(&key_pair),
},
RequestInfo {
request: Request::Transfer(Operation::Transfer {
from: acc,
to: dst_acc3,
amount: amount3,
fee: *FEE,
}),
sender_keypair: Arc::new(dst_acc2_kp),
},
RequestInfo {
request: Request::Transfer(Operation::Transfer {
from: dst_acc1,
to: dst_acc2,
amount: amount2,
fee: *FEE,
}),
sender_keypair: Arc::new(dst_acc1_kp),
},
],
false,
Some(one_day_from_now_nanos()),
None,
)
.await
.unwrap_err();
assert_canister_error(&err, 750, "debit account doesn't have enough funds");
let block = ros.wait_for_block_at(expected_idx).await.unwrap();
assert_eq!(block.transactions.len(), 1);
check_balance(ros, ledger, &dst_acc1, amount1).await;
check_balance(ros, ledger, &dst_acc2, Tokens::ZERO).await;
check_balance(ros, ledger, &dst_acc3, Tokens::ZERO).await;
}
async fn test_staking(
ros: &RosettaApiHandle,
acc: AccountIdentifier,
key_pair: Arc<EdKeypair>,
) -> (AccountIdentifier, Arc<EdKeypair>) {
let (dst_acc, dst_acc_kp, dst_acc_pk, _pid) = make_user(1300);
let dst_acc_kp = Arc::new(dst_acc_kp);
let neuron_index = 2;
let staked_amount = Tokens::new(10, 0).unwrap();
// Could use /construction/derive for this.
let neuron_account =
neuron_account_from_public_key(&GOVERNANCE_CANISTER_ID, &dst_acc_pk, neuron_index).unwrap();
let neuron_account = from_model_account_identifier(&neuron_account).unwrap();
let (_tid, results, _fee) = do_multiple_txn(
ros,
&[
RequestInfo {
request: Request::Transfer(Operation::Transfer {
from: acc,
to: dst_acc,
amount: (staked_amount + DEFAULT_TRANSFER_FEE).unwrap(),
fee: DEFAULT_TRANSFER_FEE,
}),
sender_keypair: Arc::clone(&key_pair),
},
RequestInfo {
request: Request::Transfer(Operation::Transfer {
from: dst_acc,
to: neuron_account,
amount: staked_amount,
fee: DEFAULT_TRANSFER_FEE,
}),
sender_keypair: Arc::clone(&dst_acc_kp),
},
RequestInfo {
request: Request::Stake(Stake {
account: dst_acc,
neuron_index,
}),
sender_keypair: Arc::clone(&dst_acc_kp),
},
],
false,
Some(one_day_from_now_nanos()),
None,
)
.await
.unwrap();
let neuron_id = results.operations.last().unwrap().neuron_id;
assert!(
neuron_id.is_some(),
"NeuronId should have been returned here"
);
// Block height is the last block observed.
// In this case the transfer to neuron_account.
assert!(results.last_block_index().is_some());
let neuron_info = ros
.account_balance_neuron(neuron_account, neuron_id, None, false)
.await
.unwrap()
.unwrap()
.metadata
.unwrap();
assert_eq!(neuron_info.state, NeuronState::Dissolved);
let neuron_info = ros
.account_balance_neuron(
neuron_account,
None,
Some((dst_acc_pk.clone(), neuron_index)),
false,
)
.await
.unwrap()
.unwrap()
.metadata
.unwrap();
assert_eq!(neuron_info.state, NeuronState::Dissolved);
let neuron_info = ros
.account_balance_neuron(neuron_account, None, Some((dst_acc_pk, neuron_index)), true)
.await
.unwrap()
.unwrap()
.metadata
.unwrap();
assert_eq!(neuron_info.state, NeuronState::Dissolved);
// Return staked account.
(dst_acc, dst_acc_kp)
}
async fn test_staking_raw(
ros: &RosettaApiHandle,
acc: AccountIdentifier,
key_pair: Arc<EdKeypair>,
) -> (AccountIdentifier, Arc<EdKeypair>) {
let (dst_acc, dst_acc_kp, dst_acc_pk, _pid) = make_user(1300);
let dst_acc_kp = Arc::new(dst_acc_kp);
let neuron_index = 2;
// Could use /construction/derive for this.
let neuron_account =
neuron_account_from_public_key(&GOVERNANCE_CANISTER_ID, &dst_acc_pk, neuron_index).unwrap();
let neuron_account = from_model_account_identifier(&neuron_account).unwrap();
// Key pairs as Json.
let pk1 = serde_json::to_value(to_public_key(&key_pair)).unwrap();
let pk2 = serde_json::to_value(to_public_key(&dst_acc_kp)).unwrap();
// Call /construction/derive.
let req_derive = json!({
"network_identifier": &ros.network_id(),
"public_key": pk1,
"metadata": {
"account_type": "ledger"
}
});
let res_derive = raw_construction(ros, "derive", req_derive).await;
let address = res_derive
.get("account_identifier")
.unwrap()
.get("address")
.unwrap();
assert_eq!(&acc.to_hex(), address); // 52bef...
// acc => 52bef...
// dest_acc => 1e31da...
// neuron_account => 79ec2...
// Call /construction/preprocess
let operations = json!([
{
"operation_identifier": {
"index": 0
},
"type": "TRANSACTION",
"account": {
"address": &acc
},
"amount": {
"value": "-1000010000",
"currency": {
"symbol": "ICP",
"decimals": 8
}
},
},
{
"operation_identifier": {
"index": 1
},
"type": "TRANSACTION",
"account": {
"address": &dst_acc
},
"amount": {
"value": "1000010000",
"currency": {
"symbol": "ICP",
"decimals": 8
}
},
},
{
"operation_identifier": {
"index": 2
},
"type": "FEE",
"account": {
"address": &acc
},
"amount": {
"value": "-10000",
"currency": {
"symbol": "ICP",
"decimals": 8
}
},
},
{
"operation_identifier": {
"index": 3
},
"type": "TRANSACTION",
"account": {
"address": &dst_acc
},
"amount": {
"value": "-1000000000",
"currency": {
"symbol": "ICP",
"decimals": 8
}
},
},
{
"operation_identifier": {
"index": 4
},
"type": "TRANSACTION",
"account": {
"address": &neuron_account
},
"amount": {
"value": "1000000000",
"currency": {
"symbol": "ICP",
"decimals": 8
}
},
},
{
"operation_identifier": {
"index": 5
},
"type": "FEE",
"account": {
"address": &dst_acc
},
"amount": {
"value": "-10000",
"currency": {
"symbol": "ICP",
"decimals": 8
}
},
},
{
"operation_identifier": {
"index": 6
},
"type": "STAKE",
"account": {
"address": &dst_acc
},
"metadata": {
"neuron_index": &neuron_index
}
}
]);
let req_preprocess = json!({
"network_identifier": &ros.network_id(),
"operations": operations,
"metadata": {},
});
let res_preprocess = raw_construction(ros, "preprocess", req_preprocess).await;
let options = res_preprocess.get("options");
assert_json_eq!(
json!({
"request_types": [
"TRANSACTION",
"TRANSACTION",
{"STAKE": {"neuron_index": 2}}
]
}),
options.unwrap()
);
// Call /construction/metadata
let req_metadata = json!({
"network_identifier": &ros.network_id(),
"options": options,
"public_keys": [pk1]
});
let res_metadata = raw_construction(ros, "metadata", req_metadata).await;
assert_json_eq!(
json!([
{
"currency": {"symbol": "ICP", "decimals": 8},
"value": format!("{}", DEFAULT_TRANSFER_FEE.get_e8s())
}
]),
res_metadata.get("suggested_fee").unwrap()
);
// NB: metadata response will have to be added to payloads request.
// Call /construction/payloads
let req_payloads = json!({
"network_identifier": &ros.network_id(),
"operations": operations,
"metadata": res_metadata,
"public_keys": [pk1,pk2]
});
let res_payloads = raw_construction(ros, "payloads", req_payloads).await;
let unsigned_transaction: &Value = res_payloads.get("unsigned_transaction").unwrap();
let payloads = res_payloads.get("payloads").unwrap();
let payloads = payloads.as_array().unwrap();
// Call /construction/parse (unsigned).
let req_parse = json!({
"network_identifier": &ros.network_id(),
"signed": false,
"transaction": &unsigned_transaction
});
let _res_parse = raw_construction(ros, "parse", req_parse).await;
// Call /construction/combine.
let signatures = json!([
{
"signing_payload": payloads[0],
"public_key": pk1,
"signature_type": "ed25519",
"hex_bytes": sign(&payloads[0], &key_pair)
},{
"signing_payload": payloads[1],
"public_key": pk1,
"signature_type": "ed25519",
"hex_bytes": sign(&payloads[1], &key_pair)
},{
"signing_payload": payloads[2],
"public_key": pk2,
"signature_type": "ed25519",
"hex_bytes": sign(&payloads[2], &dst_acc_kp)
},{
"signing_payload": payloads[3],
"public_key": pk2,
"signature_type": "ed25519",
"hex_bytes": sign(&payloads[3], &dst_acc_kp)
},{
"signing_payload": payloads[4],
"public_key": pk2,
"signature_type": "ed25519",
"hex_bytes": sign(&payloads[4], &dst_acc_kp)
},{
"signing_payload": payloads[5],
"public_key": pk2,
"signature_type": "ed25519",
"hex_bytes": sign(&payloads[5], &dst_acc_kp)
},
]);
let req_combine = json!({
"network_identifier": &ros.network_id(),
"unsigned_transaction": &unsigned_transaction,
"signatures": signatures
});
let res_combine = raw_construction(ros, "combine", req_combine).await;
// Call /construction/parse (signed).
let signed_transaction: &Value = res_combine.get("signed_transaction").unwrap();
let req_parse = json!({
"network_identifier": &ros.network_id(),
"signed": true,
"transaction": &signed_transaction
});
let _res_parse = raw_construction(ros, "parse", req_parse).await;
// Call /construction/hash.
let req_hash = json!({
"network_identifier": &ros.network_id(),
"signed_transaction": &signed_transaction
});
let _res_hash = raw_construction(ros, "hash", req_hash).await;
// Call /construction/submit.
let req_submit = json!({
"network_identifier": &ros.network_id(),
"signed_transaction": &signed_transaction
});
let res_submit = raw_construction(ros, "submit", req_submit).await;
// Check proper state after staking.
let operations = res_submit
.get("metadata")
.unwrap()
.get("operations")
.unwrap()
.as_array()
.unwrap();
assert_eq!(
7,
operations.len(),
"Expecting 7 operations for the staking transactions."
);
for op in operations.iter() {
assert_eq!(
op.get("status").unwrap(),
"COMPLETED",
"Operation didn't complete."
);
}
assert_json_include!(
actual: &operations[0],
expected: json!({
"amount": {"currency": {"decimals": 8, "symbol": "ICP"}, "value": "-1000010000"},
"operation_identifier": {"index": 0},
"status": "COMPLETED",
"type": "TRANSACTION"
})
);
let last_neuron_id = operations
.last()
.unwrap()
.get("metadata")
.expect("Expecting metadata in response")
.get("neuron_id");
assert!(
last_neuron_id.is_some(),
"NeuronId should have been returned here"
);
let neuron_id = last_neuron_id.unwrap().as_u64();
// Block height is the last block observed.
// In this case the transfer to neuron_account.
let last_block_idx = operations
.iter()
.rev()
.find_map(|r| r.get("metadata").and_then(|r| r.get("block_index")));
assert!(last_block_idx.is_some());
let neuron_info = ros
.account_balance_neuron(neuron_account, neuron_id, None, false)
.await
.unwrap()
.unwrap()
.metadata
.unwrap();
assert_eq!(neuron_info.state, NeuronState::Dissolved);
// Return staked account.
(dst_acc, dst_acc_kp)
}
async fn raw_construction(ros: &RosettaApiHandle, operation: &str, req: Value) -> Object {
let req = req.to_string();
let res = &ros
.raw_construction_endpoint(operation, req.as_bytes())
.await
.unwrap();
assert!(res.1.is_success(), "Result should be a success");
serde_json::from_slice(&res.0).unwrap()
}
fn sign(payload: &Value, keypair: &Arc<EdKeypair>) -> Value {
let hex_bytes: &str = payload.get("hex_bytes").unwrap().as_str().unwrap();
let bytes = from_hex(hex_bytes).unwrap();
let signature_bytes = keypair.sign(&bytes).to_bytes();
let hex_bytes = to_hex(&signature_bytes);
json!(hex_bytes)
}
async fn test_staking_failure(
ros: &RosettaApiHandle,
acc: AccountIdentifier,
key_pair: Arc<EdKeypair>,
) {
let (dst_acc, dst_acc_kp, dst_acc_pk, _pid) = make_user(1301);
let dst_acc_kp = Arc::new(dst_acc_kp);
let neuron_index = 2;
// This is just below the minimum (NetworkEconomics.neuron_minimum_stake_e8s).
let staked_amount = (Tokens::new(1, 0).unwrap() - Tokens::from_e8s(1)).unwrap();
// Could use /construction/derive for this.
let neuron_account =
neuron_account_from_public_key(&GOVERNANCE_CANISTER_ID, &dst_acc_pk, neuron_index).unwrap();
let neuron_account = from_model_account_identifier(&neuron_account).unwrap();
let err = do_multiple_txn(
ros,
&[
RequestInfo {
request: Request::Transfer(Operation::Transfer {
from: acc,
to: dst_acc,
amount: (staked_amount + DEFAULT_TRANSFER_FEE).unwrap(),
fee: DEFAULT_TRANSFER_FEE,
}),
sender_keypair: Arc::clone(&key_pair),
},
RequestInfo {
request: Request::Transfer(Operation::Transfer {
from: dst_acc,
to: neuron_account,
amount: staked_amount,
fee: DEFAULT_TRANSFER_FEE,
}),
sender_keypair: Arc::clone(&dst_acc_kp),
},
RequestInfo {
request: Request::Stake(Stake {
account: dst_acc,
neuron_index,
}),
sender_keypair: Arc::clone(&dst_acc_kp),
},
],
false,
Some(one_day_from_now_nanos()),
None,
)
.await
.unwrap_err();
assert_canister_error(
&err,
750,
"Could not claim neuron: InsufficientFunds: Account does not have enough funds to stake a neuron",
);
}
async fn test_start_dissolve(
ros: &RosettaApiHandle,
account: AccountIdentifier,
key_pair: Arc<EdKeypair>,
neuron_index: u64,
) -> Result<(), ic_rosetta_api::models::Error> {
do_multiple_txn(
ros,
&[RequestInfo {
request: Request::StartDissolve(StartDissolve {
account,
neuron_index,
}),
sender_keypair: Arc::clone(&key_pair),
}],
false,
Some(one_day_from_now_nanos()),
None,
)
.await
.map(|(tx_id, results, _)| {
assert!(!tx_id.is_transfer());
assert!(matches!(
results.operations.first().unwrap(),
RequestResult {
_type: Request::StartDissolve(_),
..
}
));
})
}
async fn test_stop_dissolve(
ros: &RosettaApiHandle,
account: AccountIdentifier,
key_pair: Arc<EdKeypair>,
neuron_index: u64,
) -> Result<(), ic_rosetta_api::models::Error> {
do_multiple_txn(
ros,
&[RequestInfo {
request: Request::StopDissolve(StopDissolve {
account,
neuron_index,
}),
sender_keypair: Arc::clone(&key_pair),
}],
false,
Some(one_day_from_now_nanos()),
None,
)
.await
.map(|(tx_id, results, _)| {
assert!(!tx_id.is_transfer());
assert!(matches!(
results.operations.first().unwrap(),
RequestResult {
_type: Request::StopDissolve(_),
..
}
));
})
}
async fn test_set_dissolve_timestamp_in_the_past_fail(
ros: &RosettaApiHandle,
acc: AccountIdentifier,
key_pair: Arc<EdKeypair>,
neuron_index: u64,
) {
let err = set_dissolve_timestamp(
ros,
acc,
key_pair,
Seconds::from(std::time::SystemTime::now() - Duration::from_secs(100000)),
neuron_index,
)
.await;
assert_canister_error(
&err.unwrap_err(),
750,
"The dissolve delay must be set to a future time.",
);
}
async fn test_set_dissolve_timestamp(
ros: &RosettaApiHandle,
acc: AccountIdentifier,
key_pair: Arc<EdKeypair>,
delay_secs: Seconds,
neuron_index: u64,
) {
set_dissolve_timestamp(ros, acc, key_pair, delay_secs, neuron_index)
.await
.unwrap();
}
async fn set_dissolve_timestamp(
ros: &RosettaApiHandle,
acc: AccountIdentifier,
key_pair: Arc<EdKeypair>,
timestamp: Seconds,
neuron_index: u64,
) -> Result<(), ic_rosetta_api::models::Error> {
do_multiple_txn(
ros,
&[RequestInfo {
request: Request::SetDissolveTimestamp(SetDissolveTimestamp {
account: acc,
neuron_index,
timestamp,
}),
sender_keypair: Arc::clone(&key_pair),
}],
false,
Some(one_day_from_now_nanos()),
None,
)
.await
.map(|(tx_id, results, _)| {
assert!(!tx_id.is_transfer());
assert!(matches!(
results.operations.first().unwrap(),
RequestResult {
_type: Request::SetDissolveTimestamp(_),
..
}
));
})
}
async fn test_add_hot_key(
ros: &RosettaApiHandle,
acc: AccountIdentifier,
key_pair: Arc<EdKeypair>,
neuron_index: u64,
) -> Result<(), ic_rosetta_api::models::Error> {
let (_, _, pk, pid) = make_user(1400);
let r = do_multiple_txn(
ros,
&[RequestInfo {
request: Request::AddHotKey(AddHotKey {
account: acc,
neuron_index,
key: PublicKeyOrPrincipal::PublicKey(pk),
}),
sender_keypair: Arc::clone(&key_pair),
}],
false,
Some(
(ic_types::time::current_time() + Duration::from_secs(24 * 60 * 60))
.as_nanos_since_unix_epoch(),
),
None,
)
.await
.map(|(tx_id, results, _)| {
assert!(!tx_id.is_transfer());
assert!(matches!(
results.operations.first().unwrap(),
RequestResult {
_type: Request::AddHotKey(_),
..
}
));
});
do_multiple_txn(
ros,
&[RequestInfo {
request: Request::AddHotKey(AddHotKey {
account: acc,
neuron_index,
key: PublicKeyOrPrincipal::Principal(pid),
}),
sender_keypair: Arc::clone(&key_pair),
}],
false,
None,
Some(
(ic_types::time::current_time() + Duration::from_secs(24 * 60 * 60))
.as_nanos_since_unix_epoch(),
),
)
.await
.map(|(tx_id, results, _)| {
assert!(!tx_id.is_transfer());
assert!(matches!(
results.operations.first().unwrap(),
RequestResult {
_type: Request::AddHotKey(_),
..
}
));
})
.unwrap_or_else(|e| panic!("{:?}", e));
r
}
#[allow(clippy::too_many_arguments)]
async fn test_disburse(
ros: &RosettaApiHandle,
ledger: &Canister<'_>,
acc: AccountIdentifier,
key_pair: Arc<EdKeypair>,
neuron_index: u64,
amount: Option<Tokens>,
recipient: Option<AccountIdentifier>,
neuron: &Neuron,
) -> Result<(), ic_rosetta_api::models::Error> {
let pre_disburse = get_balance(ledger, acc).await;
let (_, tip_idx) = get_tip(ledger).await;
let res = do_multiple_txn(
ros,
&[RequestInfo {
request: Request::Disburse(Disburse {
account: acc,
amount,
recipient,
neuron_index,
}),
sender_keypair: Arc::clone(&key_pair),
}],
false,
Some(one_day_from_now_nanos()),
None,
)
.await
.map(|(tx_id, results, _)| {
assert!(!tx_id.is_transfer());
assert!(matches!(
results.operations.first().unwrap(),
RequestResult {
_type: Request::Disburse(_),
status: Status::Completed,
..
}
));
results
})?;
let amount = amount.unwrap_or_else(|| Tokens::from_e8s(neuron.cached_neuron_stake_e8s));
let expected_idx = tip_idx + 1;
if let Some(h) = res.last_block_index() {
assert_eq!(h, expected_idx);
}
let _ = ros.wait_for_block_at(expected_idx).await.unwrap();
// governance assumes the default fee for disborse and that's why this check uses the
// DEFAULT_TRANSFER_FEE.
check_balance(
ros,
ledger,
&recipient.unwrap_or(acc),
((pre_disburse + amount).unwrap() - DEFAULT_TRANSFER_FEE).unwrap(),
)
.await;
Ok(())
}
#[allow(clippy::too_many_arguments)]
async fn test_disburse_raw(
ros: &RosettaApiHandle,
ledger: &Canister<'_>,
acc: AccountIdentifier,
key_pair: Arc<EdKeypair>,
neuron_index: u64,
amount: Option<Tokens>,
recipient: Option<AccountIdentifier>,
neuron: &Neuron,
) -> Result<(), ic_rosetta_api::models::Error> {
let pre_disburse = get_balance(ledger, acc).await;
let (_, tip_idx) = get_tip(ledger).await;
let req = json!({
"network_identifier": &ros.network_id(),
"operations": [
{
"operation_identifier": {
"index": 0
},
"type": "DISBURSE",
"account": {
"address": &acc
},
"metadata": {
"neuron_index": &neuron_index
}
}
]
});
let req = req.to_string();
let metadata: Object = serde_json::from_slice(
&ros.raw_construction_endpoint("metadata", req.as_bytes())
.await
.unwrap()
.0,
)
.unwrap();
let mut req: Object = serde_json::from_str(&req).unwrap();
req.insert("metadata".to_string(), metadata.into());
req.insert(
"public_keys".to_string(),
serde_json::to_value(vec![to_public_key(&key_pair)]).unwrap(),
);
let payloads: ConstructionPayloadsResponse = serde_json::from_slice(
&ros.raw_construction_endpoint("payloads", &serde_json::to_vec_pretty(&req).unwrap())
.await
.unwrap()
.0,
)
.unwrap();
let signed = sign_txn(ros, &[key_pair.clone()], payloads).await.unwrap();
let hash_res = ros
.construction_hash(signed.signed_transaction.clone())
.await
.unwrap()?;
let submit_res = ros
.construction_submit(signed.signed_transaction().unwrap())
.await
.unwrap()?;
assert_eq!(
hash_res.transaction_identifier,
submit_res.transaction_identifier
);
for op in submit_res.metadata.operations.iter() {
assert_eq!(
op.status.as_ref().expect("Expecting status to be set."),
"COMPLETED",
"Operation didn't complete."
);
}
let amount = amount.unwrap_or_else(|| Tokens::from_e8s(neuron.cached_neuron_stake_e8s));
let expected_idx = tip_idx + 1;
let _ = ros.wait_for_block_at(expected_idx).await.unwrap();
// governance assumes the default fee for disborse and that's why this check uses the
// DEFAULT_TRANSFER_FEE.
check_balance(
ros,
ledger,
&recipient.unwrap_or(acc),
((pre_disburse + amount).unwrap() - DEFAULT_TRANSFER_FEE).unwrap(),
)
.await;
Ok(())
}
async fn test_staking_flow(
ros: &RosettaApiHandle,
ledger: &Canister<'_>,
test_account: AccountIdentifier,
test_key_pair: Arc<EdKeypair>,
timestamp: Seconds,
) {
let (_, tip_idx) = get_tip(ledger).await;
let balance_before = get_balance(ledger, test_account).await;
let (dst_acc, dst_acc_kp, dst_acc_pk, _pid) = make_user(1400);
let dst_acc_kp = Arc::new(dst_acc_kp);
let staked_amount = Tokens::new(1, 0).unwrap();
let neuron_index = 1;
// Could use /neuron/derive for this.
let neuron_account =
neuron_account_from_public_key(&GOVERNANCE_CANISTER_ID, &dst_acc_pk, neuron_index).unwrap();
let neuron_account = from_model_account_identifier(&neuron_account).unwrap();
let (_tid, res, _fee) = do_multiple_txn(
ros,
&[
RequestInfo {
request: Request::Transfer(Operation::Transfer {
from: test_account,
to: dst_acc,
amount: (staked_amount + DEFAULT_TRANSFER_FEE).unwrap(),
fee: DEFAULT_TRANSFER_FEE,
}),
sender_keypair: Arc::clone(&test_key_pair),
},
RequestInfo {
request: Request::Transfer(Operation::Transfer {
from: dst_acc,
to: neuron_account,
amount: staked_amount,
fee: DEFAULT_TRANSFER_FEE,
}),
sender_keypair: Arc::clone(&dst_acc_kp),
},
RequestInfo {
request: Request::Stake(Stake {
account: dst_acc,
neuron_index,
}),
sender_keypair: Arc::clone(&dst_acc_kp),
},
RequestInfo {
request: Request::SetDissolveTimestamp(SetDissolveTimestamp {
account: dst_acc,
neuron_index,
timestamp,
}),
sender_keypair: Arc::clone(&dst_acc_kp),
},
RequestInfo {
request: Request::StartDissolve(StartDissolve {
account: dst_acc,
neuron_index,
}),
sender_keypair: Arc::clone(&dst_acc_kp),
},
RequestInfo {
request: Request::StopDissolve(StopDissolve {
account: dst_acc,
neuron_index,
}),
sender_keypair: Arc::clone(&dst_acc_kp),
},
],
false,
None,
None,
)
.await
.unwrap();
let expected_idx = tip_idx + 2;
if let Some(h) = res.last_block_index() {
assert_eq!(h, expected_idx);
}
let _ = ros.wait_for_block_at(expected_idx).await.unwrap();
check_balance(
ros,
ledger,
&test_account,
(((balance_before - staked_amount).unwrap() - DEFAULT_TRANSFER_FEE).unwrap()
- DEFAULT_TRANSFER_FEE)
.unwrap(),
)
.await;
}
async fn test_staking_flow_two_txns(
ros: &RosettaApiHandle,
ledger: &Canister<'_>,
test_account: AccountIdentifier,
test_key_pair: Arc<EdKeypair>,
timestamp: Seconds,
) {
let (_, tip_idx) = get_tip(ledger).await;
let balance_before = get_balance(ledger, test_account).await;
let (dst_acc, dst_acc_kp, dst_acc_pk, _pid) = make_user(1401);
let dst_acc_kp = Arc::new(dst_acc_kp);
let staked_amount = Tokens::new(1, 0).unwrap();
let neuron_index = 1;
// Could use /neuron/derive for this.
let neuron_account =
neuron_account_from_public_key(&GOVERNANCE_CANISTER_ID, &dst_acc_pk, neuron_index).unwrap();
let neuron_account = from_model_account_identifier(&neuron_account).unwrap();
let (_tid, _bh, _fee) = do_multiple_txn(
ros,
&[
RequestInfo {
request: Request::Transfer(Operation::Transfer {
from: test_account,
to: dst_acc,
amount: (staked_amount + DEFAULT_TRANSFER_FEE).unwrap(),
fee: DEFAULT_TRANSFER_FEE,
}),
sender_keypair: Arc::clone(&test_key_pair),
},
RequestInfo {
request: Request::Transfer(Operation::Transfer {
from: dst_acc,
to: neuron_account,
amount: staked_amount,
fee: DEFAULT_TRANSFER_FEE,
}),
sender_keypair: Arc::clone(&dst_acc_kp),
},
],
false,
None,
None,
)
.await
.unwrap();
let (_tid, res, _fee) = do_multiple_txn(
ros,
&[
RequestInfo {
request: Request::Stake(Stake {
account: dst_acc,
neuron_index,
}),
sender_keypair: Arc::clone(&dst_acc_kp),
},
RequestInfo {
request: Request::SetDissolveTimestamp(SetDissolveTimestamp {
account: dst_acc,
neuron_index,
timestamp,
}),
sender_keypair: Arc::clone(&dst_acc_kp),
},
RequestInfo {
request: Request::StartDissolve(StartDissolve {
account: dst_acc,
neuron_index,
}),
sender_keypair: Arc::clone(&dst_acc_kp),
},
RequestInfo {
request: Request::StopDissolve(StopDissolve {
account: dst_acc,
neuron_index,
}),
sender_keypair: Arc::clone(&dst_acc_kp),
},
],
false,
None,
None,
)
.await
.unwrap();
let expected_idx = tip_idx + 2;
if let Some(h) = res.last_block_index() {
assert_eq!(h, expected_idx);
}
let _ = ros.wait_for_block_at(expected_idx).await.unwrap();
check_balance(
ros,
ledger,
&test_account,
(((balance_before - staked_amount).unwrap() - DEFAULT_TRANSFER_FEE).unwrap()
- DEFAULT_TRANSFER_FEE)
.unwrap(),
)
.await;
}
async fn test_spawn(ros: &RosettaApiHandle, ledger: &Canister<'_>, neuron_info: NeuronInfo) {
let (_, tip_idx) = get_tip(ledger).await;
let acc = neuron_info.account_id;
let neuron_index = neuron_info.neuron_subaccount_identifier;
let key_pair: Arc<EdKeypair> = neuron_info.key_pair.into();
let neuron_acc = neuron_info.neuron_account;
let balance_main_before = get_balance(ledger, neuron_acc).await;
assert_ne!(
balance_main_before.get_e8s(),
0,
"Neuron balance shouldn't be 0."
);
// the nonce used to generate spawned neuron.
let spawned_neuron_index: u64 = 4321;
let res = do_multiple_txn(
ros,
&[RequestInfo {
request: Request::Spawn(Spawn {
account: acc,
spawned_neuron_index,
controller: Option::None, // use default (same) controller.
neuron_index,
}),
sender_keypair: Arc::clone(&key_pair),
}],
false,
Some(one_day_from_now_nanos()),
None,
)
.await
.map(|(tx_id, results, _)| {
assert!(!tx_id.is_transfer());
assert!(matches!(
results.operations.first().unwrap(),
RequestResult {
_type: Request::Spawn(_),
status: Status::Completed,
..
}
));
results
});
// Check spawn results.
// We expect one transaction to happen.
let expected_idx = tip_idx + 1;
if let Some(h) = res.unwrap().last_block_index() {
assert_eq!(h, expected_idx);
}
// Wait for Rosetta sync.
ros.wait_for_tip_sync(expected_idx).await.unwrap();
let balance_main_after = get_balance(ledger, neuron_acc).await;
assert_eq!(
balance_main_before.get_e8s(),
balance_main_after.get_e8s(),
"Neuron balance shouldn't change during spawn."
);
// Verify that maturity got transferred to the spawned neuron.
let subaccount =
compute_neuron_staking_subaccount(neuron_info.principal_id, spawned_neuron_index);
let spawned_neuron = AccountIdentifier::new(GOVERNANCE_CANISTER_ID.get(), Some(subaccount));
let balance_sub = get_balance(ledger, spawned_neuron).await;
assert_eq!(
500_000_000,
balance_sub.get_e8s(),
"Expecting all maturity to be transferred to the spawned neuron."
);
// We should get the same results with Rosetta call (step not required though).
check_balance(ros, ledger, &spawned_neuron, Tokens::from_e8s(500_000_000)).await;
}
async fn test_spawn_invalid(ros: &RosettaApiHandle, neuron_info: NeuronInfo) {
let acc = neuron_info.account_id;
let neuron_index = neuron_info.neuron_subaccount_identifier;
let key_pair: Arc<EdKeypair> = neuron_info.key_pair.into();
// the nonce used to generate spawned neuron.
let spawned_neuron_index: u64 = 5678;
let res = do_multiple_txn(
ros,
&[RequestInfo {
request: Request::Spawn(Spawn {
account: acc,
spawned_neuron_index,
controller: Option::None, // use default (same) controller.
neuron_index,
}),
sender_keypair: Arc::clone(&key_pair),
}],
false,
Some(one_day_from_now_nanos()),
None,
)
.await
.map(|(tx_id, results, _)| {
assert!(!tx_id.is_transfer());
assert!(matches!(
results.operations.first().unwrap(),
RequestResult {
_type: Request::Spawn(_),
status: Status::Completed,
..
}
));
results
});
assert!(
res.is_err(),
"Error expected while trying to spawn a neuron with no enough maturity"
);
let err = res.unwrap_err();
assert_eq!(err.code, 770);
assert_eq!(err.message, "Operation failed".to_string());
}
async fn test_merge_maturity_all(
ros: &RosettaApiHandle,
ledger: &Canister<'_>,
neuron_info: NeuronInfo,
) {
test_merge_maturity(ros, ledger, neuron_info, None).await;
}
async fn test_merge_maturity_partial(
ros: &RosettaApiHandle,
ledger: &Canister<'_>,
neuron_info: NeuronInfo,
) {
test_merge_maturity(ros, ledger, neuron_info, Some(14)).await;
}
async fn test_merge_maturity(
ros: &RosettaApiHandle,
ledger: &Canister<'_>,
neuron_info: NeuronInfo,
percent: Option<u32>,
) {
let (_, tip_idx) = get_tip(ledger).await;
let acc = neuron_info.account_id;
let neuron_index = neuron_info.neuron_subaccount_identifier;
let key_pair: Arc<EdKeypair> = neuron_info.key_pair.into();
let neuron_acc = neuron_info.neuron_account;
let balance_before = get_balance(ledger, neuron_acc).await;
assert_ne!(
balance_before.get_e8s(),
0,
"Neuron balance shouldn't be 0."
);
let res = do_multiple_txn(
ros,
&[RequestInfo {
request: Request::MergeMaturity(MergeMaturity {
account: acc,
percentage_to_merge: percent.unwrap_or(100),
neuron_index,
}),
sender_keypair: Arc::clone(&key_pair),
}],
false,
Some(one_day_from_now_nanos()),
None,
)
.await
.map(|(tx_id, results, _)| {
assert!(!tx_id.is_transfer());
assert!(matches!(
results.operations.first().unwrap(),
RequestResult {
_type: Request::MergeMaturity(_),
status: Status::Completed,
..
}
));
results
})
.expect("failed to merge neuron maturity");
// Check merge maturity results.
// We expect one transaction to happen.
let expected_idx = tip_idx + 1;
if let Some(h) = res.last_block_index() {
assert_eq!(h, expected_idx);
}
// Wait for Rosetta sync.
ros.wait_for_tip_sync(expected_idx).await.unwrap();
let balance_after = get_balance(ledger, neuron_acc).await;
let maturity = 420_000_000;
let transferred_maturity = (maturity * percent.unwrap_or(100) as u64) / 100;
assert_eq!(
balance_before.get_e8s() + transferred_maturity,
balance_after.get_e8s(),
"Neuron balance should have increased after merge maturity operation."
);
// We should get the same results with Rosetta call (step not required though).
check_balance(
ros,
ledger,
&neuron_acc,
Tokens::from_e8s(balance_before.get_e8s() + transferred_maturity),
)
.await;
}
async fn test_merge_maturity_invalid(ros: &RosettaApiHandle, neuron_info: NeuronInfo) {
let acc = neuron_info.account_id;
let neuron_index = neuron_info.neuron_subaccount_identifier;
let key_pair: Arc<EdKeypair> = neuron_info.key_pair.into();
let res = do_multiple_txn(
ros,
&[RequestInfo {
request: Request::MergeMaturity(MergeMaturity {
account: acc,
percentage_to_merge: 104,
neuron_index,
}),
sender_keypair: Arc::clone(&key_pair),
}],
false,
Some(one_day_from_now_nanos()),
None,
)
.await;
assert!(
res.is_err(),
"Error expected while trying to merge neuron maturity with an invalid percentage"
);
}
fn rosetta_cli_construction_check(conf_file: &str) {
let output = std::process::Command::new("timeout")
.args(&[
"300s",
"rosetta-cli",
"check:construction",
"--configuration-file",
conf_file,
])
//.stdout(std::process::Stdio::inherit())
//.stderr(std::process::Stdio::inherit())
.output()
.expect("failed to execute rosetta-cli");
assert!(
output.status.success(),
"rosetta-cli construction check did not finish successfully: {},/\
\n\n--------------------------\nstdout: {}, \
\n\n--------------------------\nstderr: {}",
output.status,
String::from_utf8(output.stdout).unwrap(),
String::from_utf8(output.stderr).unwrap()
);
}
fn rosetta_cli_data_check(conf_file: &str) {
let output = std::process::Command::new("timeout")
.args(&[
"300s",
"rosetta-cli",
"check:data",
"--configuration-file",
conf_file,
])
//.stdout(std::process::Stdio::inherit())
//.stderr(std::process::Stdio::inherit())
.output()
.expect("failed to execute rosetta-cli");
assert!(
output.status.success(),
"rosetta-cli data check did not finish successfully: {},/\
\n\n--------------------------\nstdout: {}, \
\n\n--------------------------\nstderr: {}",
output.status,
String::from_utf8(output.stdout).unwrap(),
String::from_utf8(output.stderr).unwrap()
);
}
fn workspace_path() -> String {
match std::env::var("CI_PROJECT_DIR") {
Ok(dir) => format!("{}/rs/tests/rosetta_workspace", dir),
Err(_) => "rosetta_workspace".to_string(),
}
}
fn one_day_from_now_nanos() -> u64 {
(ic_types::time::current_time() + Duration::from_secs(24 * 60 * 60)).as_nanos_since_unix_epoch()
}
#[allow(dead_code)]
#[derive(Debug)]
struct NeuronInfo {
account_id: AccountIdentifier,
key_pair: EdKeypair,
public_key: PublicKey,
principal_id: PrincipalId,
neuron_subaccount_identifier: u64,
neuron: Neuron,
neuron_account: ledger_canister::AccountIdentifier,
}
struct NeuronTestsSetup {
info: HashMap<String, NeuronInfo>,
seed: u64,
logger: Logger,
}
struct NeuronTests {
info: HashMap<String, NeuronInfo>,
logger: Logger,
}
impl Drop for NeuronTests {
fn drop(&mut self) {
if !self.info.is_empty() {
let keys: Vec<&String> = self.info.keys().collect();
panic!("Some NeuronTests where never run: {:#?}\n You must consume every test with `NeuronTests::run_test`", keys);
}
}
}
impl NeuronTests {
fn get_neuron_for_test(&mut self, test_name: &str) -> NeuronInfo {
info!(self.logger, "{}", test_name);
self.info.remove(test_name).unwrap_or_else(|| {
panic!(
"No test `{}` was setup!\n Use `NeuronTestsSetup::add` to setup neuron tests.",
test_name
)
})
}
}
impl NeuronTestsSetup {
fn new(seed: u64, logger: Logger) -> NeuronTestsSetup {
NeuronTestsSetup {
info: HashMap::default(),
seed: seed * 100_000,
logger,
}
}
/// This method is used to setup a mature neuron.
/// The default `Neuron` can be modified in the setup closure.
///
/// This would be much nicer if we took a test closure to run against the
/// neuron, upon calling `NeuronTests.test()`.
/// That is not ergonomic, until async_closures are on stable.
fn add(
&mut self,
ledger_balances: &mut HashMap<AccountIdentifier, Tokens>,
test_name: &str,
neuron_subaccount_identifier: u64,
setup: impl FnOnce(&mut Neuron),
) {
let (account_id, key_pair, public_key, principal_id) = make_user(self.seed);
let created_timestamp_seconds = (SystemTime::now().duration_since(UNIX_EPOCH).unwrap()
- Duration::from_secs(60 * 60 * 24 * 365))
.as_secs();
let mut neuron = Neuron {
id: Some(NeuronId { id: self.seed }),
account: neuron_subaccount_bytes_from_public_key(
&public_key,
neuron_subaccount_identifier,
)
.unwrap()
.to_vec(),
controller: Some(principal_id),
created_timestamp_seconds,
aging_since_timestamp_seconds: created_timestamp_seconds + 10,
dissolve_state: Some(DissolveState::WhenDissolvedTimestampSeconds(0)),
cached_neuron_stake_e8s: Tokens::new(10, 0).unwrap().get_e8s(),
kyc_verified: true,
..Default::default()
};
setup(&mut neuron);
let neuron_account = neuron_account_from_public_key(
&GOVERNANCE_CANISTER_ID,
&public_key,
neuron_subaccount_identifier,
)
.unwrap();
let neuron_account = from_model_account_identifier(&neuron_account).unwrap();
ledger_balances.insert(
neuron_account,
Tokens::from_e8s(neuron.cached_neuron_stake_e8s),
);
assert!(
self.info
.insert(
test_name.into(),
NeuronInfo {
account_id,
key_pair,
public_key,
principal_id,
neuron_subaccount_identifier,
neuron,
neuron_account,
},
)
.is_none(),
"You added the same test twice"
);
self.seed += 1;
}
/// Returns hashmap for prepopulating governance, and info about each
/// neuron. The vec is reversed relative to `add` calls, so you should
/// use `Vec.pop()`.
fn neurons(self) -> (HashMap<u64, Neuron>, NeuronTests) {
let NeuronTestsSetup { info, logger, .. } = self;
let neurons = info
.values()
.map(|NeuronInfo { neuron, .. }| (neuron.id.clone().unwrap().id, neuron.clone()))
.collect();
(neurons, NeuronTests { info, logger })
}
}
| 33.807878 | 193 | 0.59587 |
2379b7d8822865fa78919b5b0a43a327471fde95 | 21,635 | // Copyright (c) 2018 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Listening server for receiving client connections which speak SrvProtocol.
//!
//! The server runs in a separate thread and dispatches operational commands to the main thread
//! over an unbounded mpsc channel, `MgrSender`, to `MgrReceiver`. These commands are wrapped in
//! a [`ctl_gateway.CtlRequest`] if they are transactional.
//!
//! Replies to transactional messages are sent back to the CtlGateway thread over an unbounded
//! mpsc channel, [`CtlSender`], to [`CtlReceiver`]. A new mpsc pair is created for each
//! transactional request where the sending half is given to a [`ctl_gateway.CtlRequest`].
use std::cell::RefCell;
use std::error;
use std::fmt;
use std::io;
use std::net::SocketAddr;
use std::rc::Rc;
use std::thread;
use std::time::Duration;
use crate::hcore::crypto;
use crate::protocol;
use crate::protocol::codec::*;
use crate::protocol::net::{self, ErrCode, NetErr, NetResult};
use futures::future::{self, Either};
use futures::prelude::*;
use futures::sync::mpsc;
use prometheus::{HistogramTimer, HistogramVec, IntCounterVec};
use prost;
use tokio::net::TcpListener;
use tokio_codec::Framed;
use tokio_core::reactor;
use super::{CtlRequest, REQ_TIMEOUT};
use crate::manager::{commands, ManagerState};
lazy_static! {
static ref RPC_CALLS: IntCounterVec = register_int_counter_vec!(
"hab_sup_rpc_call_total",
"Total number of RPC calls",
&["name"]
)
.unwrap();
static ref RPC_CALL_DURATION: HistogramVec = register_histogram_vec!(
"hab_sup_rpc_call_request_duration_seconds",
"The latency for RPC calls",
&["name"]
)
.unwrap();
}
/// Sending half of an mpsc unbounded channel used for sending replies for a transactional message
/// from the main thread back to the CtlGateway. This half is stored in a
/// [`ctl_gateway.CtlRequest`] in the main thread.
pub type CtlSender = mpsc::UnboundedSender<SrvMessage>;
/// Receiving half of an mpsc unbounded channel used for sending replies for a transactional
/// message from the main thread back to the CtlGateway. This half is stored in the CtlGateway on
/// it's thread.
pub type CtlReceiver = mpsc::UnboundedReceiver<SrvMessage>;
/// Sender from the CtlGateway to the Manager to dispatch control commands for clients.
pub type MgrSender = mpsc::UnboundedSender<CtlCommand>;
/// Receiver on the Manager for the sender on the CtlGateway to receive control commands.
pub type MgrReceiver = mpsc::UnboundedReceiver<CtlCommand>;
#[derive(Debug)]
pub enum HandlerError {
Decode(prost::DecodeError),
Io(io::Error),
NetErr(NetErr),
SendError(mpsc::SendError<CtlCommand>),
}
impl error::Error for HandlerError {
fn description(&self) -> &str {
match *self {
HandlerError::Decode(ref err) => err.description(),
HandlerError::Io(ref err) => err.description(),
HandlerError::NetErr(ref err) => err.description(),
HandlerError::SendError(ref err) => err.description(),
}
}
}
impl fmt::Display for HandlerError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let content = match *self {
HandlerError::Decode(ref err) => format!("{}", err),
HandlerError::Io(ref err) => format!("{}", err),
HandlerError::NetErr(ref err) => format!("{}", err),
HandlerError::SendError(ref err) => format!("{}", err),
};
write!(f, "{}", content)
}
}
impl From<NetErr> for HandlerError {
fn from(err: NetErr) -> Self {
HandlerError::NetErr(err)
}
}
impl From<io::Error> for HandlerError {
fn from(err: io::Error) -> Self {
HandlerError::Io(err)
}
}
impl From<prost::DecodeError> for HandlerError {
fn from(err: prost::DecodeError) -> Self {
HandlerError::Decode(err)
}
}
impl From<mpsc::SendError<CtlCommand>> for HandlerError {
fn from(err: mpsc::SendError<CtlCommand>) -> Self {
HandlerError::SendError(err)
}
}
/// A wrapper around a [`ctl_gateway.CtlRequest`] and a closure for the main thread to execute.
pub struct CtlCommand {
pub req: CtlRequest,
// JW: This needs to be an `FnOnce<Box>` and not an `Fn<Box>` but right now there is no support
// for boxing an FnOnce in stable Rust. There is a new type called `FnBox` which exists only on
// nightly right now which accomplishes this but it won't stabilize because the Rust core team
// feels that they should just get `Box<FnOnce>` working. We'll need to clone the `CtlRequest`
// argument passed to this closure until `FnOnce<Box>` stabilizes.
//
// https://github.com/rust-lang/rust/issues/28796
fun: Box<dyn Fn(&ManagerState, &mut CtlRequest) -> NetResult<()> + Send>,
}
impl CtlCommand {
/// Create a new CtlCommand from the given CtlSender, transaction, and closure to execute.
pub fn new<F>(tx: Option<CtlSender>, txn: Option<SrvTxn>, fun: F) -> Self
where
F: Fn(&ManagerState, &mut CtlRequest) -> NetResult<()> + Send + 'static,
{
CtlCommand {
fun: Box::new(fun),
req: CtlRequest::new(tx, txn),
}
}
/// Run the contained closure with the given [`manager.ManagerState`].
pub fn run(&mut self, state: &ManagerState) -> NetResult<()> {
(self.fun)(state, &mut self.req)
}
}
/// Server's client representation. Each new connection will allocate a new Client.
struct Client {
handle: reactor::Handle,
state: Rc<RefCell<SrvState>>,
}
impl Client {
/// Serve the client from the given framed socket stream.
pub fn serve(self, socket: SrvStream) -> Box<dyn Future<Item = (), Error = HandlerError>> {
let mgr_tx = self.state.borrow().mgr_tx.clone();
Box::new(
self.handshake(socket)
.and_then(|socket| SrvHandler::new(socket, mgr_tx)),
)
}
/// Initiate a handshake with the connected client before allowing future requests. A failed
/// handshake will close the connection.
fn handshake(
&self,
socket: SrvStream,
) -> Box<dyn Future<Item = SrvStream, Error = HandlerError>> {
let secret_key = self.state.borrow().secret_key.to_string();
let handshake = socket
.into_future()
.map_err(|(err, _)| HandlerError::from(err))
.and_then(move |(m, io)| {
m.map_or_else(
|| {
Err(HandlerError::from(io::Error::from(
io::ErrorKind::UnexpectedEof,
)))
},
move |m| {
if m.message_id() != "Handshake" {
debug!("No handshake");
return Err(HandlerError::from(io::Error::from(
io::ErrorKind::ConnectionAborted,
)));
}
if !m.is_transaction() {
return Err(HandlerError::from(io::Error::from(
io::ErrorKind::ConnectionAborted,
)));
}
match m.parse::<protocol::ctl::Handshake>() {
Ok(decoded) => {
trace!("Received handshake, {:?}", decoded);
let decoded_key = decoded.secret_key.unwrap_or_default();
Ok((m, crypto::secure_eq(decoded_key, secret_key), io))
}
Err(err) => {
warn!("Handshake error, {:?}", err);
Err(HandlerError::from(io::Error::from(
io::ErrorKind::ConnectionAborted,
)))
}
}
},
)
})
.and_then(|(msg, success, socket)| {
let mut reply = if success {
SrvMessage::from(net::ok())
} else {
SrvMessage::from(net::err(ErrCode::Unauthorized, "secret key mismatch"))
};
reply.reply_for(msg.transaction().unwrap(), true);
socket
.send(reply)
.map_err(HandlerError::from)
.and_then(move |io| Ok((io, success)))
});
Box::new(
handshake
.select2(self.timeout(REQ_TIMEOUT))
.then(|res| match res {
Ok(Either::A(((io, true), _to))) => future::ok(io),
Ok(Either::A(((_, false), _to))) => future::err(HandlerError::from(
io::Error::new(io::ErrorKind::ConnectionAborted, "handshake failed"),
)),
Ok(Either::B((_to, _hs))) => future::err(HandlerError::from(io::Error::new(
io::ErrorKind::TimedOut,
"client timed out",
))),
Err(Either::A((err, _))) => future::err(err),
Err(Either::B((err, _))) => future::err(HandlerError::from(err)),
}),
)
}
/// Generate a new timeout future with the given duration in milliseconds.
fn timeout(&self, millis: u64) -> reactor::Timeout {
reactor::Timeout::new(Duration::from_millis(millis), &self.handle)
.expect("failed to generate timeout future")
}
}
/// A `Future` that will resolve into a stream of one or more `SrvMessage` replies.
#[must_use = "futures do nothing unless polled"]
struct SrvHandler {
io: SrvStream,
state: SrvHandlerState,
mgr_tx: MgrSender,
rx: CtlReceiver,
tx: CtlSender,
timer: Option<HistogramTimer>,
}
impl SrvHandler {
fn new(io: SrvStream, mgr_tx: MgrSender) -> Self {
let (tx, rx) = mpsc::unbounded();
SrvHandler {
io: io,
state: SrvHandlerState::Receiving,
mgr_tx: mgr_tx,
rx: rx,
tx: tx,
timer: None,
}
}
}
impl Future for SrvHandler {
type Item = ();
type Error = HandlerError;
fn poll(&mut self) -> Poll<(), Self::Error> {
loop {
match self.state {
SrvHandlerState::Receiving => match try_ready!(self.io.poll()) {
Some(msg) => {
let label_values = &[msg.message_id()];
RPC_CALLS.with_label_values(label_values).inc();
let timer = RPC_CALL_DURATION
.with_label_values(label_values)
.start_timer();
self.timer = Some(timer);
trace!("OnMessage, {}", msg.message_id());
let cmd = match msg.message_id() {
"SvcGetDefaultCfg" => {
let m = msg
.parse::<protocol::ctl::SvcGetDefaultCfg>()
.map_err(HandlerError::from)?;
CtlCommand::new(
Some(self.tx.clone()),
msg.transaction(),
move |state, req| commands::service_cfg(state, req, m.clone()),
)
}
"SvcFilePut" => {
let m = msg
.parse::<protocol::ctl::SvcFilePut>()
.map_err(HandlerError::from)?;
CtlCommand::new(
Some(self.tx.clone()),
msg.transaction(),
move |state, req| {
commands::service_file_put(state, req, m.clone())
},
)
}
"SvcSetCfg" => {
let m = msg
.parse::<protocol::ctl::SvcSetCfg>()
.map_err(HandlerError::from)?;
CtlCommand::new(
Some(self.tx.clone()),
msg.transaction(),
move |state, req| {
commands::service_cfg_set(state, req, m.clone())
},
)
}
"SvcValidateCfg" => {
let m = msg
.parse::<protocol::ctl::SvcValidateCfg>()
.map_err(HandlerError::from)?;
CtlCommand::new(
Some(self.tx.clone()),
msg.transaction(),
move |state, req| {
commands::service_cfg_validate(state, req, m.clone())
},
)
}
"SvcLoad" => {
let m = msg
.parse::<protocol::ctl::SvcLoad>()
.map_err(HandlerError::from)?;
CtlCommand::new(
Some(self.tx.clone()),
msg.transaction(),
move |state, req| commands::service_load(state, req, m.clone()),
)
}
"SvcUnload" => {
let m = msg
.parse::<protocol::ctl::SvcUnload>()
.map_err(HandlerError::from)?;
CtlCommand::new(
Some(self.tx.clone()),
msg.transaction(),
move |state, req| {
commands::service_unload(state, req, m.clone())
},
)
}
"SvcStart" => {
let m = msg
.parse::<protocol::ctl::SvcStart>()
.map_err(HandlerError::from)?;
CtlCommand::new(
Some(self.tx.clone()),
msg.transaction(),
move |state, req| {
commands::service_start(state, req, m.clone())
},
)
}
"SvcStop" => {
let m = msg
.parse::<protocol::ctl::SvcStop>()
.map_err(HandlerError::from)?;
CtlCommand::new(
Some(self.tx.clone()),
msg.transaction(),
move |state, req| commands::service_stop(state, req, m.clone()),
)
}
"SvcStatus" => {
let m = msg
.parse::<protocol::ctl::SvcStatus>()
.map_err(HandlerError::from)?;
CtlCommand::new(
Some(self.tx.clone()),
msg.transaction(),
move |state, req| {
commands::service_status(state, req, m.clone())
},
)
}
"SupDepart" => {
let m = msg
.parse::<protocol::ctl::SupDepart>()
.map_err(HandlerError::from)?;
CtlCommand::new(
Some(self.tx.clone()),
msg.transaction(),
move |state, req| {
commands::supervisor_depart(state, req, m.clone())
},
)
}
_ => {
warn!("Unhandled message, {}", msg.message_id());
break;
}
};
match self.mgr_tx.start_send(cmd) {
Ok(AsyncSink::Ready) => {
self.state = SrvHandlerState::Sending;
continue;
}
Ok(AsyncSink::NotReady(_)) => return Ok(Async::NotReady),
Err(err) => {
warn!("ManagerReceiver err, {:?}", err);
return Err(HandlerError::from(err));
}
}
}
None => break,
},
SrvHandlerState::Sending => match self.rx.poll() {
Ok(Async::Ready(Some(msg))) => {
trace!("MgrSender -> SrvHandler, {:?}", msg);
if msg.is_complete() {
self.state = SrvHandlerState::Sent;
}
try_nb!(self.io.start_send(msg));
try_ready!(self.io.poll_complete());
continue;
}
Ok(Async::Ready(None)) => self.state = SrvHandlerState::Sent,
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(()) => break,
},
SrvHandlerState::Sent => {
if let Some(timer) = self.timer.take() {
timer.observe_duration();
}
trace!("OnMessage complete");
break;
}
}
}
Ok(Async::Ready(()))
}
}
enum SrvHandlerState {
/// Handler is Receiving/Waiting for message from client.
Receiving,
/// Handler has sent a request to the Manager and is streaming replies back to the client
/// socket.
Sending,
/// All messages have been sent to the client and the Handler is now flushing the connection.
Sent,
}
struct SrvState {
secret_key: String,
mgr_tx: MgrSender,
}
/// Start a new thread which will run the CtlGateway server.
///
/// New connections will be authenticated using `secret_key`. Messages from the main thread
/// will be sent over the channel `mgr_tx`.
pub fn run(listen_addr: SocketAddr, secret_key: String, mgr_tx: MgrSender) {
thread::Builder::new()
.name("ctl-gateway".to_string())
.spawn(move || {
let mut core = reactor::Core::new().unwrap();
let handle = core.handle();
let listener = TcpListener::bind(&listen_addr).unwrap();
let state = SrvState {
secret_key: secret_key,
mgr_tx: mgr_tx,
};
let state = Rc::new(RefCell::new(state));
let clients = listener.incoming().map(|socket| {
let addr = socket.peer_addr().unwrap();
let io = Framed::new(socket, SrvCodec::new());
(
Client {
handle: handle.clone(),
state: state.clone(),
}
.serve(io),
addr,
)
});
let server = clients.for_each(|(client, addr)| {
handle.spawn(client.then(move |res| {
debug!("DISCONNECTED from {:?} with result {:?}", addr, res);
future::ok(())
}));
Ok(())
});
core.run(server)
})
.expect("ctl-gateway thread start failure");
}
| 41.685934 | 100 | 0.452785 |
b9253d43f2197099df86c4330119473be5198f5b | 3,732 | // Copyright 2017 Remi Bernotavicius
use std::io::{self, Result};
use std::mem;
use crate::emulator_common::disassembler::{
Disassembler, InstructionPrinter, InstructionPrinterFactory, MemoryAccessor,
SimpleMemoryAccessor,
};
pub use crate::lr35902_emulator::opcodes::opcode_gen::{
dispatch_lr35902_instruction, get_lr35902_instruction, LR35902InstructionSet,
};
#[cfg(test)]
use crate::emulator_common::disassembler::do_disassembler_test;
pub struct LR35902InstructionPrinter<'a> {
stream_out: &'a mut dyn io::Write,
error: Result<()>,
}
mod opcode_gen;
#[derive(Copy, Clone)]
pub struct LR35902InstructionPrinterFactory;
impl<'a> InstructionPrinterFactory<'a> for LR35902InstructionPrinterFactory {
type Output = LR35902InstructionPrinter<'a>;
fn new(&self, stream_out: &'a mut dyn io::Write) -> LR35902InstructionPrinter<'a> {
return LR35902InstructionPrinter {
stream_out: stream_out,
error: Ok(()),
};
}
}
impl<'a> InstructionPrinter<'a> for LR35902InstructionPrinter<'a> {
fn print_instruction(&mut self, stream: &[u8], _address: u16) -> Result<()> {
dispatch_lr35902_instruction(stream, self);
mem::replace(&mut self.error, Ok(()))
}
fn get_instruction<R: io::Read>(&self, stream: R) -> Option<Vec<u8>> {
get_lr35902_instruction(stream)
}
}
pub fn create_disassembler<'a>(
ma: &'a dyn MemoryAccessor,
stream_out: &'a mut dyn io::Write,
) -> Disassembler<'a, LR35902InstructionPrinterFactory> {
Disassembler::new(ma, LR35902InstructionPrinterFactory, stream_out)
}
pub fn disassemble_lr35902_rom(rom: &[u8], include_opcodes: bool) -> Result<()> {
let stdout = &mut io::stdout();
let mut ma = SimpleMemoryAccessor::new();
ma.memory[0..rom.len()].clone_from_slice(rom);
let mut disassembler = create_disassembler(&ma, stdout);
disassembler.disassemble(0u16..rom.len() as u16, include_opcodes)
}
#[test]
fn disassembler_lr35902_test() {
do_disassembler_test(
LR35902InstructionPrinterFactory,
&[
0xcd, 0xd6, 0x35, 0x21, 0x2d, 0xd7, 0xcb, 0xae, 0xcd, 0x29, 0x24, 0x21, 0x26, 0xd1,
0xcb, 0xee, 0xcb, 0xf6, 0xaf, 0xea, 0x6b, 0xcd, 0xcd, 0xaf, 0x20, 0xcd, 0xaf, 0x20,
0xcd, 0xba, 0x20, 0xfa, 0x36, 0xd7, 0xcb, 0x77, 0xc4, 0x9e, 0x03, 0xfa, 0xc5, 0xcf,
0xa7, 0xc2, 0xb5, 0x05, 0xcd, 0x4d, 0x0f, 0x06, 0x07, 0x21, 0x88, 0x69, 0xcd, 0xd6,
0x35,
],
"\
0000000 cd d6 35 CALL $35d6\n\
0000003 21 2d d7 LXI H #$d72d\n\
0000006 cb ae RES 5 M\n\
0000008 cd 29 24 CALL $2429\n\
000000b 21 26 d1 LXI H #$d126\n\
000000e cb ee SET 5 M\n\
0000010 cb f6 SET 6 M\n\
0000012 af XRA A\n\
0000013 ea 6b cd STA $cd6b\n\
0000016 cd af 20 CALL $20af\n\
0000019 cd af 20 CALL $20af\n\
000001c cd ba 20 CALL $20ba\n\
000001f fa 36 d7 LDAD $d736\n\
0000022 cb 77 BIT 6 A\n\
0000024 c4 9e 03 CNZ $39e\n\
0000027 fa c5 cf LDAD $cfc5\n\
000002a a7 ANA A\n\
000002b c2 b5 05 JNZ $5b5\n\
000002e cd 4d 0f CALL $f4d\n\
0000031 06 07 MVI B #$07\n\
0000033 21 88 69 LXI H #$6988\n\
0000036 cd d6 35 CALL $35d6\n\
",
);
}
#[test]
fn disassembler_lr35902_prints_not_implemented_instructions_correctly() {
do_disassembler_test(
LR35902InstructionPrinterFactory,
&[0xd3, 0xe3, 0xe4, 0xf4],
"\
0000000 d3 -\n\
0000001 e3 -\n\
0000002 e4 -\n\
0000003 f4 -\n\
",
);
}
| 33.026549 | 95 | 0.616559 |
f50a023e1b71e8fa728d21a1f31efeb538e84364 | 2,089 | #[doc = "Reader of register DIEPTXF6"]
pub type R = crate::R<u32, super::DIEPTXF6>;
#[doc = "Writer for register DIEPTXF6"]
pub type W = crate::W<u32, super::DIEPTXF6>;
#[doc = "Register DIEPTXF6 `reset()`'s with value 0x0100_062a"]
impl crate::ResetValue for super::DIEPTXF6 {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0x0100_062a
}
}
#[doc = "Reader of field `INEPnTxFStAddr`"]
pub type INEPNTXFSTADDR_R = crate::R<u16, u16>;
#[doc = "Write proxy for field `INEPnTxFStAddr`"]
pub struct INEPNTXFSTADDR_W<'a> {
w: &'a mut W,
}
impl<'a> INEPNTXFSTADDR_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u16) -> &'a mut W {
self.w.bits = (self.w.bits & !0xffff) | ((value as u32) & 0xffff);
self.w
}
}
#[doc = "Reader of field `INEPnTxFDep`"]
pub type INEPNTXFDEP_R = crate::R<u16, u16>;
#[doc = "Write proxy for field `INEPnTxFDep`"]
pub struct INEPNTXFDEP_W<'a> {
w: &'a mut W,
}
impl<'a> INEPNTXFDEP_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u16) -> &'a mut W {
self.w.bits = (self.w.bits & !(0xffff << 16)) | (((value as u32) & 0xffff) << 16);
self.w
}
}
impl R {
#[doc = "Bits 0:15 - IN Endpoint FIFOn Transmit RAM Start Address"]
#[inline(always)]
pub fn inepn_tx_fst_addr(&self) -> INEPNTXFSTADDR_R {
INEPNTXFSTADDR_R::new((self.bits & 0xffff) as u16)
}
#[doc = "Bits 16:31 - IN Endpoint TxFIFO Depth"]
#[inline(always)]
pub fn inepn_tx_fdep(&self) -> INEPNTXFDEP_R {
INEPNTXFDEP_R::new(((self.bits >> 16) & 0xffff) as u16)
}
}
impl W {
#[doc = "Bits 0:15 - IN Endpoint FIFOn Transmit RAM Start Address"]
#[inline(always)]
pub fn inepn_tx_fst_addr(&mut self) -> INEPNTXFSTADDR_W {
INEPNTXFSTADDR_W { w: self }
}
#[doc = "Bits 16:31 - IN Endpoint TxFIFO Depth"]
#[inline(always)]
pub fn inepn_tx_fdep(&mut self) -> INEPNTXFDEP_W {
INEPNTXFDEP_W { w: self }
}
}
| 32.138462 | 90 | 0.608425 |
ebfdc7d32fe1e446ae7bdba2bd76e7c249e1f16c | 158,058 | // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
#[allow(clippy::unnecessary_wraps)]
pub fn parse_associate_created_artifact_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::AssociateCreatedArtifactOutput,
crate::error::AssociateCreatedArtifactError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::AssociateCreatedArtifactError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(crate::error::AssociateCreatedArtifactError::unhandled(
generic,
))
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::AssociateCreatedArtifactError {
meta: generic,
kind: crate::error::AssociateCreatedArtifactErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_access_denied_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::AssociateCreatedArtifactError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"DryRunOperation" => crate::error::AssociateCreatedArtifactError {
meta: generic,
kind: crate::error::AssociateCreatedArtifactErrorKind::DryRunOperation({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::dry_run_operation::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_dry_run_operationjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::AssociateCreatedArtifactError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"HomeRegionNotSetException" => {
crate::error::AssociateCreatedArtifactError {
meta: generic,
kind: crate::error::AssociateCreatedArtifactErrorKind::HomeRegionNotSetException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::home_region_not_set_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_home_region_not_set_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::AssociateCreatedArtifactError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"InternalServerError" => crate::error::AssociateCreatedArtifactError {
meta: generic,
kind: crate::error::AssociateCreatedArtifactErrorKind::InternalServerError({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_error::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_server_errorjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::AssociateCreatedArtifactError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::AssociateCreatedArtifactError {
meta: generic,
kind: crate::error::AssociateCreatedArtifactErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::AssociateCreatedArtifactError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::AssociateCreatedArtifactError {
meta: generic,
kind: crate::error::AssociateCreatedArtifactErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::AssociateCreatedArtifactError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"ServiceUnavailableException" => {
crate::error::AssociateCreatedArtifactError {
meta: generic,
kind: crate::error::AssociateCreatedArtifactErrorKind::ServiceUnavailableException(
{
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::service_unavailable_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_service_unavailable_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::AssociateCreatedArtifactError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
},
),
}
}
"ThrottlingException" => crate::error::AssociateCreatedArtifactError {
meta: generic,
kind: crate::error::AssociateCreatedArtifactErrorKind::ThrottlingException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::throttling_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_throttling_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::AssociateCreatedArtifactError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"UnauthorizedOperation" => crate::error::AssociateCreatedArtifactError {
meta: generic,
kind: crate::error::AssociateCreatedArtifactErrorKind::UnauthorizedOperation({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::unauthorized_operation::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_unauthorized_operationjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::AssociateCreatedArtifactError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::AssociateCreatedArtifactError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_associate_created_artifact_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::AssociateCreatedArtifactOutput,
crate::error::AssociateCreatedArtifactError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::associate_created_artifact_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_associate_discovered_resource_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::AssociateDiscoveredResourceOutput,
crate::error::AssociateDiscoveredResourceError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::AssociateDiscoveredResourceError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(crate::error::AssociateDiscoveredResourceError::unhandled(
generic,
))
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::AssociateDiscoveredResourceError {
meta: generic,
kind: crate::error::AssociateDiscoveredResourceErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_access_denied_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::AssociateDiscoveredResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"DryRunOperation" => crate::error::AssociateDiscoveredResourceError {
meta: generic,
kind: crate::error::AssociateDiscoveredResourceErrorKind::DryRunOperation({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::dry_run_operation::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_dry_run_operationjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::AssociateDiscoveredResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"HomeRegionNotSetException" => {
crate::error::AssociateDiscoveredResourceError {
meta: generic,
kind: crate::error::AssociateDiscoveredResourceErrorKind::HomeRegionNotSetException(
{
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::home_region_not_set_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_home_region_not_set_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::AssociateDiscoveredResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
},
),
}
}
"InternalServerError" => crate::error::AssociateDiscoveredResourceError {
meta: generic,
kind: crate::error::AssociateDiscoveredResourceErrorKind::InternalServerError({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_error::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_server_errorjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::AssociateDiscoveredResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::AssociateDiscoveredResourceError {
meta: generic,
kind: crate::error::AssociateDiscoveredResourceErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::AssociateDiscoveredResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"PolicyErrorException" => crate::error::AssociateDiscoveredResourceError {
meta: generic,
kind: crate::error::AssociateDiscoveredResourceErrorKind::PolicyErrorException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::policy_error_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_policy_error_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::AssociateDiscoveredResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::AssociateDiscoveredResourceError {
meta: generic,
kind: crate::error::AssociateDiscoveredResourceErrorKind::ResourceNotFoundException(
{
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::AssociateDiscoveredResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
},
),
}
}
"ServiceUnavailableException" => crate::error::AssociateDiscoveredResourceError {
meta: generic,
kind: crate::error::AssociateDiscoveredResourceErrorKind::ServiceUnavailableException(
{
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::service_unavailable_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_service_unavailable_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::AssociateDiscoveredResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
},
),
},
"ThrottlingException" => crate::error::AssociateDiscoveredResourceError {
meta: generic,
kind: crate::error::AssociateDiscoveredResourceErrorKind::ThrottlingException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::throttling_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_throttling_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::AssociateDiscoveredResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"UnauthorizedOperation" => crate::error::AssociateDiscoveredResourceError {
meta: generic,
kind: crate::error::AssociateDiscoveredResourceErrorKind::UnauthorizedOperation({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::unauthorized_operation::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_unauthorized_operationjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::AssociateDiscoveredResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::AssociateDiscoveredResourceError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_associate_discovered_resource_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::AssociateDiscoveredResourceOutput,
crate::error::AssociateDiscoveredResourceError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::associate_discovered_resource_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_create_progress_update_stream_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::CreateProgressUpdateStreamOutput,
crate::error::CreateProgressUpdateStreamError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::CreateProgressUpdateStreamError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(crate::error::CreateProgressUpdateStreamError::unhandled(
generic,
))
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::CreateProgressUpdateStreamError {
meta: generic,
kind: crate::error::CreateProgressUpdateStreamErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_access_denied_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::CreateProgressUpdateStreamError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"DryRunOperation" => crate::error::CreateProgressUpdateStreamError {
meta: generic,
kind: crate::error::CreateProgressUpdateStreamErrorKind::DryRunOperation({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::dry_run_operation::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_dry_run_operationjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::CreateProgressUpdateStreamError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"HomeRegionNotSetException" => {
crate::error::CreateProgressUpdateStreamError {
meta: generic,
kind: crate::error::CreateProgressUpdateStreamErrorKind::HomeRegionNotSetException(
{
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::home_region_not_set_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_home_region_not_set_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::CreateProgressUpdateStreamError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
},
),
}
}
"InternalServerError" => crate::error::CreateProgressUpdateStreamError {
meta: generic,
kind: crate::error::CreateProgressUpdateStreamErrorKind::InternalServerError({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_error::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_server_errorjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::CreateProgressUpdateStreamError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::CreateProgressUpdateStreamError {
meta: generic,
kind: crate::error::CreateProgressUpdateStreamErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::CreateProgressUpdateStreamError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ServiceUnavailableException" => {
crate::error::CreateProgressUpdateStreamError {
meta: generic,
kind:
crate::error::CreateProgressUpdateStreamErrorKind::ServiceUnavailableException(
{
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::service_unavailable_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_service_unavailable_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::CreateProgressUpdateStreamError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
},
),
}
}
"ThrottlingException" => crate::error::CreateProgressUpdateStreamError {
meta: generic,
kind: crate::error::CreateProgressUpdateStreamErrorKind::ThrottlingException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::throttling_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_throttling_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::CreateProgressUpdateStreamError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"UnauthorizedOperation" => crate::error::CreateProgressUpdateStreamError {
meta: generic,
kind: crate::error::CreateProgressUpdateStreamErrorKind::UnauthorizedOperation({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::unauthorized_operation::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_unauthorized_operationjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::CreateProgressUpdateStreamError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::CreateProgressUpdateStreamError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_create_progress_update_stream_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::CreateProgressUpdateStreamOutput,
crate::error::CreateProgressUpdateStreamError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::create_progress_update_stream_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_delete_progress_update_stream_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DeleteProgressUpdateStreamOutput,
crate::error::DeleteProgressUpdateStreamError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::DeleteProgressUpdateStreamError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(crate::error::DeleteProgressUpdateStreamError::unhandled(
generic,
))
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::DeleteProgressUpdateStreamError {
meta: generic,
kind: crate::error::DeleteProgressUpdateStreamErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_access_denied_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DeleteProgressUpdateStreamError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"DryRunOperation" => crate::error::DeleteProgressUpdateStreamError {
meta: generic,
kind: crate::error::DeleteProgressUpdateStreamErrorKind::DryRunOperation({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::dry_run_operation::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_dry_run_operationjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DeleteProgressUpdateStreamError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"HomeRegionNotSetException" => {
crate::error::DeleteProgressUpdateStreamError {
meta: generic,
kind: crate::error::DeleteProgressUpdateStreamErrorKind::HomeRegionNotSetException(
{
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::home_region_not_set_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_home_region_not_set_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DeleteProgressUpdateStreamError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
},
),
}
}
"InternalServerError" => crate::error::DeleteProgressUpdateStreamError {
meta: generic,
kind: crate::error::DeleteProgressUpdateStreamErrorKind::InternalServerError({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_error::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_server_errorjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DeleteProgressUpdateStreamError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::DeleteProgressUpdateStreamError {
meta: generic,
kind: crate::error::DeleteProgressUpdateStreamErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DeleteProgressUpdateStreamError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::DeleteProgressUpdateStreamError {
meta: generic,
kind: crate::error::DeleteProgressUpdateStreamErrorKind::ResourceNotFoundException(
{
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DeleteProgressUpdateStreamError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
},
),
}
}
"ServiceUnavailableException" => {
crate::error::DeleteProgressUpdateStreamError {
meta: generic,
kind:
crate::error::DeleteProgressUpdateStreamErrorKind::ServiceUnavailableException(
{
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::service_unavailable_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_service_unavailable_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DeleteProgressUpdateStreamError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
},
),
}
}
"ThrottlingException" => crate::error::DeleteProgressUpdateStreamError {
meta: generic,
kind: crate::error::DeleteProgressUpdateStreamErrorKind::ThrottlingException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::throttling_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_throttling_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DeleteProgressUpdateStreamError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"UnauthorizedOperation" => crate::error::DeleteProgressUpdateStreamError {
meta: generic,
kind: crate::error::DeleteProgressUpdateStreamErrorKind::UnauthorizedOperation({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::unauthorized_operation::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_unauthorized_operationjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DeleteProgressUpdateStreamError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::DeleteProgressUpdateStreamError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_delete_progress_update_stream_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DeleteProgressUpdateStreamOutput,
crate::error::DeleteProgressUpdateStreamError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::delete_progress_update_stream_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_application_state_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DescribeApplicationStateOutput,
crate::error::DescribeApplicationStateError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::DescribeApplicationStateError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(crate::error::DescribeApplicationStateError::unhandled(
generic,
))
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::DescribeApplicationStateError {
meta: generic,
kind: crate::error::DescribeApplicationStateErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_access_denied_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeApplicationStateError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"HomeRegionNotSetException" => {
crate::error::DescribeApplicationStateError {
meta: generic,
kind: crate::error::DescribeApplicationStateErrorKind::HomeRegionNotSetException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::home_region_not_set_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_home_region_not_set_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DescribeApplicationStateError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"InternalServerError" => crate::error::DescribeApplicationStateError {
meta: generic,
kind: crate::error::DescribeApplicationStateErrorKind::InternalServerError({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_error::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_server_errorjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeApplicationStateError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::DescribeApplicationStateError {
meta: generic,
kind: crate::error::DescribeApplicationStateErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeApplicationStateError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"PolicyErrorException" => crate::error::DescribeApplicationStateError {
meta: generic,
kind: crate::error::DescribeApplicationStateErrorKind::PolicyErrorException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::policy_error_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_policy_error_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeApplicationStateError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::DescribeApplicationStateError {
meta: generic,
kind: crate::error::DescribeApplicationStateErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DescribeApplicationStateError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"ServiceUnavailableException" => {
crate::error::DescribeApplicationStateError {
meta: generic,
kind: crate::error::DescribeApplicationStateErrorKind::ServiceUnavailableException(
{
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::service_unavailable_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_service_unavailable_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DescribeApplicationStateError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
},
),
}
}
"ThrottlingException" => crate::error::DescribeApplicationStateError {
meta: generic,
kind: crate::error::DescribeApplicationStateErrorKind::ThrottlingException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::throttling_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_throttling_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeApplicationStateError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::DescribeApplicationStateError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_application_state_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DescribeApplicationStateOutput,
crate::error::DescribeApplicationStateError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::describe_application_state_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_describe_application_state(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeApplicationStateError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_migration_task_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DescribeMigrationTaskOutput,
crate::error::DescribeMigrationTaskError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::DescribeMigrationTaskError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::DescribeMigrationTaskError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::DescribeMigrationTaskError {
meta: generic,
kind: crate::error::DescribeMigrationTaskErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_access_denied_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeMigrationTaskError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"HomeRegionNotSetException" => {
crate::error::DescribeMigrationTaskError {
meta: generic,
kind: crate::error::DescribeMigrationTaskErrorKind::HomeRegionNotSetException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::home_region_not_set_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_home_region_not_set_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DescribeMigrationTaskError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"InternalServerError" => crate::error::DescribeMigrationTaskError {
meta: generic,
kind: crate::error::DescribeMigrationTaskErrorKind::InternalServerError({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_error::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_server_errorjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeMigrationTaskError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::DescribeMigrationTaskError {
meta: generic,
kind: crate::error::DescribeMigrationTaskErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeMigrationTaskError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::DescribeMigrationTaskError {
meta: generic,
kind: crate::error::DescribeMigrationTaskErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DescribeMigrationTaskError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"ServiceUnavailableException" => {
crate::error::DescribeMigrationTaskError {
meta: generic,
kind: crate::error::DescribeMigrationTaskErrorKind::ServiceUnavailableException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::service_unavailable_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_service_unavailable_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DescribeMigrationTaskError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"ThrottlingException" => crate::error::DescribeMigrationTaskError {
meta: generic,
kind: crate::error::DescribeMigrationTaskErrorKind::ThrottlingException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::throttling_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_throttling_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeMigrationTaskError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::DescribeMigrationTaskError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_migration_task_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DescribeMigrationTaskOutput,
crate::error::DescribeMigrationTaskError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::describe_migration_task_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_describe_migration_task(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeMigrationTaskError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_disassociate_created_artifact_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DisassociateCreatedArtifactOutput,
crate::error::DisassociateCreatedArtifactError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::DisassociateCreatedArtifactError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(crate::error::DisassociateCreatedArtifactError::unhandled(
generic,
))
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::DisassociateCreatedArtifactError {
meta: generic,
kind: crate::error::DisassociateCreatedArtifactErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_access_denied_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DisassociateCreatedArtifactError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"DryRunOperation" => crate::error::DisassociateCreatedArtifactError {
meta: generic,
kind: crate::error::DisassociateCreatedArtifactErrorKind::DryRunOperation({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::dry_run_operation::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_dry_run_operationjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DisassociateCreatedArtifactError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"HomeRegionNotSetException" => {
crate::error::DisassociateCreatedArtifactError {
meta: generic,
kind: crate::error::DisassociateCreatedArtifactErrorKind::HomeRegionNotSetException(
{
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::home_region_not_set_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_home_region_not_set_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DisassociateCreatedArtifactError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
},
),
}
}
"InternalServerError" => crate::error::DisassociateCreatedArtifactError {
meta: generic,
kind: crate::error::DisassociateCreatedArtifactErrorKind::InternalServerError({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_error::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_server_errorjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DisassociateCreatedArtifactError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::DisassociateCreatedArtifactError {
meta: generic,
kind: crate::error::DisassociateCreatedArtifactErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DisassociateCreatedArtifactError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::DisassociateCreatedArtifactError {
meta: generic,
kind: crate::error::DisassociateCreatedArtifactErrorKind::ResourceNotFoundException(
{
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DisassociateCreatedArtifactError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
},
),
}
}
"ServiceUnavailableException" => crate::error::DisassociateCreatedArtifactError {
meta: generic,
kind: crate::error::DisassociateCreatedArtifactErrorKind::ServiceUnavailableException(
{
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::service_unavailable_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_service_unavailable_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DisassociateCreatedArtifactError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
},
),
},
"ThrottlingException" => crate::error::DisassociateCreatedArtifactError {
meta: generic,
kind: crate::error::DisassociateCreatedArtifactErrorKind::ThrottlingException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::throttling_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_throttling_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DisassociateCreatedArtifactError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"UnauthorizedOperation" => crate::error::DisassociateCreatedArtifactError {
meta: generic,
kind: crate::error::DisassociateCreatedArtifactErrorKind::UnauthorizedOperation({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::unauthorized_operation::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_unauthorized_operationjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DisassociateCreatedArtifactError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::DisassociateCreatedArtifactError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_disassociate_created_artifact_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DisassociateCreatedArtifactOutput,
crate::error::DisassociateCreatedArtifactError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::disassociate_created_artifact_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_disassociate_discovered_resource_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DisassociateDiscoveredResourceOutput,
crate::error::DisassociateDiscoveredResourceError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::DisassociateDiscoveredResourceError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::DisassociateDiscoveredResourceError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::DisassociateDiscoveredResourceError {
meta: generic,
kind: crate::error::DisassociateDiscoveredResourceErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_access_denied_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DisassociateDiscoveredResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"DryRunOperation" => crate::error::DisassociateDiscoveredResourceError {
meta: generic,
kind: crate::error::DisassociateDiscoveredResourceErrorKind::DryRunOperation({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::dry_run_operation::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_dry_run_operationjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DisassociateDiscoveredResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"HomeRegionNotSetException" => crate::error::DisassociateDiscoveredResourceError {
meta: generic,
kind: crate::error::DisassociateDiscoveredResourceErrorKind::HomeRegionNotSetException(
{
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::home_region_not_set_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_home_region_not_set_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DisassociateDiscoveredResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
},
),
},
"InternalServerError" => crate::error::DisassociateDiscoveredResourceError {
meta: generic,
kind: crate::error::DisassociateDiscoveredResourceErrorKind::InternalServerError({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_error::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_server_errorjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DisassociateDiscoveredResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::DisassociateDiscoveredResourceError {
meta: generic,
kind: crate::error::DisassociateDiscoveredResourceErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DisassociateDiscoveredResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => crate::error::DisassociateDiscoveredResourceError {
meta: generic,
kind: crate::error::DisassociateDiscoveredResourceErrorKind::ResourceNotFoundException(
{
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DisassociateDiscoveredResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
},
),
},
"ServiceUnavailableException" => crate::error::DisassociateDiscoveredResourceError {
meta: generic,
kind:
crate::error::DisassociateDiscoveredResourceErrorKind::ServiceUnavailableException(
{
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::service_unavailable_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_service_unavailable_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DisassociateDiscoveredResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
},
),
},
"ThrottlingException" => crate::error::DisassociateDiscoveredResourceError {
meta: generic,
kind: crate::error::DisassociateDiscoveredResourceErrorKind::ThrottlingException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::throttling_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_throttling_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DisassociateDiscoveredResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"UnauthorizedOperation" => crate::error::DisassociateDiscoveredResourceError {
meta: generic,
kind: crate::error::DisassociateDiscoveredResourceErrorKind::UnauthorizedOperation({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::unauthorized_operation::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_unauthorized_operationjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DisassociateDiscoveredResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::DisassociateDiscoveredResourceError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_disassociate_discovered_resource_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DisassociateDiscoveredResourceOutput,
crate::error::DisassociateDiscoveredResourceError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::disassociate_discovered_resource_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_import_migration_task_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::ImportMigrationTaskOutput,
crate::error::ImportMigrationTaskError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::ImportMigrationTaskError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::ImportMigrationTaskError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::ImportMigrationTaskError {
meta: generic,
kind: crate::error::ImportMigrationTaskErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_access_denied_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ImportMigrationTaskError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"DryRunOperation" => crate::error::ImportMigrationTaskError {
meta: generic,
kind: crate::error::ImportMigrationTaskErrorKind::DryRunOperation({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::dry_run_operation::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_dry_run_operationjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ImportMigrationTaskError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"HomeRegionNotSetException" => {
crate::error::ImportMigrationTaskError {
meta: generic,
kind: crate::error::ImportMigrationTaskErrorKind::HomeRegionNotSetException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::home_region_not_set_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_home_region_not_set_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::ImportMigrationTaskError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"InternalServerError" => crate::error::ImportMigrationTaskError {
meta: generic,
kind: crate::error::ImportMigrationTaskErrorKind::InternalServerError({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_error::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_server_errorjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ImportMigrationTaskError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::ImportMigrationTaskError {
meta: generic,
kind: crate::error::ImportMigrationTaskErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ImportMigrationTaskError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::ImportMigrationTaskError {
meta: generic,
kind: crate::error::ImportMigrationTaskErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::ImportMigrationTaskError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"ServiceUnavailableException" => {
crate::error::ImportMigrationTaskError {
meta: generic,
kind: crate::error::ImportMigrationTaskErrorKind::ServiceUnavailableException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::service_unavailable_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_service_unavailable_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::ImportMigrationTaskError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"ThrottlingException" => crate::error::ImportMigrationTaskError {
meta: generic,
kind: crate::error::ImportMigrationTaskErrorKind::ThrottlingException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::throttling_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_throttling_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ImportMigrationTaskError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"UnauthorizedOperation" => crate::error::ImportMigrationTaskError {
meta: generic,
kind: crate::error::ImportMigrationTaskErrorKind::UnauthorizedOperation({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::unauthorized_operation::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_unauthorized_operationjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ImportMigrationTaskError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::ImportMigrationTaskError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_import_migration_task_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::ImportMigrationTaskOutput,
crate::error::ImportMigrationTaskError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::import_migration_task_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_application_states_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::ListApplicationStatesOutput,
crate::error::ListApplicationStatesError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::ListApplicationStatesError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::ListApplicationStatesError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::ListApplicationStatesError {
meta: generic,
kind: crate::error::ListApplicationStatesErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_access_denied_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListApplicationStatesError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"HomeRegionNotSetException" => {
crate::error::ListApplicationStatesError {
meta: generic,
kind: crate::error::ListApplicationStatesErrorKind::HomeRegionNotSetException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::home_region_not_set_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_home_region_not_set_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::ListApplicationStatesError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"InternalServerError" => crate::error::ListApplicationStatesError {
meta: generic,
kind: crate::error::ListApplicationStatesErrorKind::InternalServerError({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_error::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_server_errorjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListApplicationStatesError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::ListApplicationStatesError {
meta: generic,
kind: crate::error::ListApplicationStatesErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListApplicationStatesError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ServiceUnavailableException" => {
crate::error::ListApplicationStatesError {
meta: generic,
kind: crate::error::ListApplicationStatesErrorKind::ServiceUnavailableException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::service_unavailable_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_service_unavailable_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::ListApplicationStatesError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"ThrottlingException" => crate::error::ListApplicationStatesError {
meta: generic,
kind: crate::error::ListApplicationStatesErrorKind::ThrottlingException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::throttling_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_throttling_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListApplicationStatesError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::ListApplicationStatesError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_application_states_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::ListApplicationStatesOutput,
crate::error::ListApplicationStatesError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::list_application_states_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_list_application_states(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListApplicationStatesError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_created_artifacts_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::ListCreatedArtifactsOutput,
crate::error::ListCreatedArtifactsError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::ListCreatedArtifactsError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::ListCreatedArtifactsError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::ListCreatedArtifactsError {
meta: generic,
kind: crate::error::ListCreatedArtifactsErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_access_denied_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListCreatedArtifactsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"HomeRegionNotSetException" => {
crate::error::ListCreatedArtifactsError {
meta: generic,
kind: crate::error::ListCreatedArtifactsErrorKind::HomeRegionNotSetException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::home_region_not_set_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_home_region_not_set_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::ListCreatedArtifactsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"InternalServerError" => crate::error::ListCreatedArtifactsError {
meta: generic,
kind: crate::error::ListCreatedArtifactsErrorKind::InternalServerError({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_error::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_server_errorjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListCreatedArtifactsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::ListCreatedArtifactsError {
meta: generic,
kind: crate::error::ListCreatedArtifactsErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListCreatedArtifactsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::ListCreatedArtifactsError {
meta: generic,
kind: crate::error::ListCreatedArtifactsErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::ListCreatedArtifactsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"ServiceUnavailableException" => {
crate::error::ListCreatedArtifactsError {
meta: generic,
kind: crate::error::ListCreatedArtifactsErrorKind::ServiceUnavailableException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::service_unavailable_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_service_unavailable_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::ListCreatedArtifactsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"ThrottlingException" => crate::error::ListCreatedArtifactsError {
meta: generic,
kind: crate::error::ListCreatedArtifactsErrorKind::ThrottlingException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::throttling_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_throttling_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListCreatedArtifactsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::ListCreatedArtifactsError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_created_artifacts_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::ListCreatedArtifactsOutput,
crate::error::ListCreatedArtifactsError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::list_created_artifacts_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_list_created_artifacts(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListCreatedArtifactsError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_discovered_resources_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::ListDiscoveredResourcesOutput,
crate::error::ListDiscoveredResourcesError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::ListDiscoveredResourcesError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(crate::error::ListDiscoveredResourcesError::unhandled(
generic,
))
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::ListDiscoveredResourcesError {
meta: generic,
kind: crate::error::ListDiscoveredResourcesErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_access_denied_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListDiscoveredResourcesError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"HomeRegionNotSetException" => {
crate::error::ListDiscoveredResourcesError {
meta: generic,
kind: crate::error::ListDiscoveredResourcesErrorKind::HomeRegionNotSetException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::home_region_not_set_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_home_region_not_set_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::ListDiscoveredResourcesError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"InternalServerError" => crate::error::ListDiscoveredResourcesError {
meta: generic,
kind: crate::error::ListDiscoveredResourcesErrorKind::InternalServerError({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_error::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_server_errorjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListDiscoveredResourcesError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::ListDiscoveredResourcesError {
meta: generic,
kind: crate::error::ListDiscoveredResourcesErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListDiscoveredResourcesError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::ListDiscoveredResourcesError {
meta: generic,
kind: crate::error::ListDiscoveredResourcesErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::ListDiscoveredResourcesError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"ServiceUnavailableException" => {
crate::error::ListDiscoveredResourcesError {
meta: generic,
kind: crate::error::ListDiscoveredResourcesErrorKind::ServiceUnavailableException(
{
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::service_unavailable_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_service_unavailable_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::ListDiscoveredResourcesError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
},
),
}
}
"ThrottlingException" => crate::error::ListDiscoveredResourcesError {
meta: generic,
kind: crate::error::ListDiscoveredResourcesErrorKind::ThrottlingException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::throttling_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_throttling_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListDiscoveredResourcesError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::ListDiscoveredResourcesError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_discovered_resources_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::ListDiscoveredResourcesOutput,
crate::error::ListDiscoveredResourcesError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::list_discovered_resources_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_list_discovered_resources(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListDiscoveredResourcesError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_migration_tasks_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::ListMigrationTasksOutput,
crate::error::ListMigrationTasksError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::ListMigrationTasksError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::ListMigrationTasksError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::ListMigrationTasksError {
meta: generic,
kind: crate::error::ListMigrationTasksErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_access_denied_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListMigrationTasksError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"HomeRegionNotSetException" => {
crate::error::ListMigrationTasksError {
meta: generic,
kind: crate::error::ListMigrationTasksErrorKind::HomeRegionNotSetException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::home_region_not_set_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_home_region_not_set_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::ListMigrationTasksError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"InternalServerError" => crate::error::ListMigrationTasksError {
meta: generic,
kind: crate::error::ListMigrationTasksErrorKind::InternalServerError({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_error::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_server_errorjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListMigrationTasksError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::ListMigrationTasksError {
meta: generic,
kind: crate::error::ListMigrationTasksErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListMigrationTasksError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"PolicyErrorException" => crate::error::ListMigrationTasksError {
meta: generic,
kind: crate::error::ListMigrationTasksErrorKind::PolicyErrorException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::policy_error_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_policy_error_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListMigrationTasksError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::ListMigrationTasksError {
meta: generic,
kind: crate::error::ListMigrationTasksErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::ListMigrationTasksError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"ServiceUnavailableException" => {
crate::error::ListMigrationTasksError {
meta: generic,
kind: crate::error::ListMigrationTasksErrorKind::ServiceUnavailableException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::service_unavailable_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_service_unavailable_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::ListMigrationTasksError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"ThrottlingException" => crate::error::ListMigrationTasksError {
meta: generic,
kind: crate::error::ListMigrationTasksErrorKind::ThrottlingException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::throttling_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_throttling_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListMigrationTasksError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::ListMigrationTasksError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_migration_tasks_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::ListMigrationTasksOutput,
crate::error::ListMigrationTasksError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::list_migration_tasks_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_list_migration_tasks(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListMigrationTasksError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_progress_update_streams_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::ListProgressUpdateStreamsOutput,
crate::error::ListProgressUpdateStreamsError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::ListProgressUpdateStreamsError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(crate::error::ListProgressUpdateStreamsError::unhandled(
generic,
))
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::ListProgressUpdateStreamsError {
meta: generic,
kind: crate::error::ListProgressUpdateStreamsErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_access_denied_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListProgressUpdateStreamsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"HomeRegionNotSetException" => {
crate::error::ListProgressUpdateStreamsError {
meta: generic,
kind: crate::error::ListProgressUpdateStreamsErrorKind::HomeRegionNotSetException(
{
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::home_region_not_set_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_home_region_not_set_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::ListProgressUpdateStreamsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
},
),
}
}
"InternalServerError" => crate::error::ListProgressUpdateStreamsError {
meta: generic,
kind: crate::error::ListProgressUpdateStreamsErrorKind::InternalServerError({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_error::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_server_errorjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListProgressUpdateStreamsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::ListProgressUpdateStreamsError {
meta: generic,
kind: crate::error::ListProgressUpdateStreamsErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListProgressUpdateStreamsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ServiceUnavailableException" => {
crate::error::ListProgressUpdateStreamsError {
meta: generic,
kind: crate::error::ListProgressUpdateStreamsErrorKind::ServiceUnavailableException(
{
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::service_unavailable_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_service_unavailable_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::ListProgressUpdateStreamsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
},
),
}
}
"ThrottlingException" => crate::error::ListProgressUpdateStreamsError {
meta: generic,
kind: crate::error::ListProgressUpdateStreamsErrorKind::ThrottlingException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::throttling_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_throttling_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListProgressUpdateStreamsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::ListProgressUpdateStreamsError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_progress_update_streams_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::ListProgressUpdateStreamsOutput,
crate::error::ListProgressUpdateStreamsError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::list_progress_update_streams_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_list_progress_update_streams(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListProgressUpdateStreamsError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_notify_application_state_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::NotifyApplicationStateOutput,
crate::error::NotifyApplicationStateError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::NotifyApplicationStateError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(crate::error::NotifyApplicationStateError::unhandled(
generic,
))
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::NotifyApplicationStateError {
meta: generic,
kind: crate::error::NotifyApplicationStateErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_access_denied_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::NotifyApplicationStateError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"DryRunOperation" => crate::error::NotifyApplicationStateError {
meta: generic,
kind: crate::error::NotifyApplicationStateErrorKind::DryRunOperation({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::dry_run_operation::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_dry_run_operationjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::NotifyApplicationStateError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"HomeRegionNotSetException" => {
crate::error::NotifyApplicationStateError {
meta: generic,
kind: crate::error::NotifyApplicationStateErrorKind::HomeRegionNotSetException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::home_region_not_set_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_home_region_not_set_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::NotifyApplicationStateError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"InternalServerError" => crate::error::NotifyApplicationStateError {
meta: generic,
kind: crate::error::NotifyApplicationStateErrorKind::InternalServerError({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_error::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_server_errorjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::NotifyApplicationStateError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::NotifyApplicationStateError {
meta: generic,
kind: crate::error::NotifyApplicationStateErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::NotifyApplicationStateError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"PolicyErrorException" => crate::error::NotifyApplicationStateError {
meta: generic,
kind: crate::error::NotifyApplicationStateErrorKind::PolicyErrorException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::policy_error_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_policy_error_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::NotifyApplicationStateError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::NotifyApplicationStateError {
meta: generic,
kind: crate::error::NotifyApplicationStateErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::NotifyApplicationStateError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"ServiceUnavailableException" => {
crate::error::NotifyApplicationStateError {
meta: generic,
kind: crate::error::NotifyApplicationStateErrorKind::ServiceUnavailableException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::service_unavailable_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_service_unavailable_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::NotifyApplicationStateError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"ThrottlingException" => crate::error::NotifyApplicationStateError {
meta: generic,
kind: crate::error::NotifyApplicationStateErrorKind::ThrottlingException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::throttling_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_throttling_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::NotifyApplicationStateError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"UnauthorizedOperation" => crate::error::NotifyApplicationStateError {
meta: generic,
kind: crate::error::NotifyApplicationStateErrorKind::UnauthorizedOperation({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::unauthorized_operation::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_unauthorized_operationjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::NotifyApplicationStateError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::NotifyApplicationStateError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_notify_application_state_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::NotifyApplicationStateOutput,
crate::error::NotifyApplicationStateError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::notify_application_state_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_notify_migration_task_state_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::NotifyMigrationTaskStateOutput,
crate::error::NotifyMigrationTaskStateError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::NotifyMigrationTaskStateError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(crate::error::NotifyMigrationTaskStateError::unhandled(
generic,
))
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::NotifyMigrationTaskStateError {
meta: generic,
kind: crate::error::NotifyMigrationTaskStateErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_access_denied_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::NotifyMigrationTaskStateError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"DryRunOperation" => crate::error::NotifyMigrationTaskStateError {
meta: generic,
kind: crate::error::NotifyMigrationTaskStateErrorKind::DryRunOperation({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::dry_run_operation::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_dry_run_operationjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::NotifyMigrationTaskStateError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"HomeRegionNotSetException" => {
crate::error::NotifyMigrationTaskStateError {
meta: generic,
kind: crate::error::NotifyMigrationTaskStateErrorKind::HomeRegionNotSetException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::home_region_not_set_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_home_region_not_set_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::NotifyMigrationTaskStateError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"InternalServerError" => crate::error::NotifyMigrationTaskStateError {
meta: generic,
kind: crate::error::NotifyMigrationTaskStateErrorKind::InternalServerError({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_error::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_server_errorjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::NotifyMigrationTaskStateError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::NotifyMigrationTaskStateError {
meta: generic,
kind: crate::error::NotifyMigrationTaskStateErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::NotifyMigrationTaskStateError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::NotifyMigrationTaskStateError {
meta: generic,
kind: crate::error::NotifyMigrationTaskStateErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::NotifyMigrationTaskStateError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"ServiceUnavailableException" => {
crate::error::NotifyMigrationTaskStateError {
meta: generic,
kind: crate::error::NotifyMigrationTaskStateErrorKind::ServiceUnavailableException(
{
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::service_unavailable_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_service_unavailable_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::NotifyMigrationTaskStateError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
},
),
}
}
"ThrottlingException" => crate::error::NotifyMigrationTaskStateError {
meta: generic,
kind: crate::error::NotifyMigrationTaskStateErrorKind::ThrottlingException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::throttling_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_throttling_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::NotifyMigrationTaskStateError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"UnauthorizedOperation" => crate::error::NotifyMigrationTaskStateError {
meta: generic,
kind: crate::error::NotifyMigrationTaskStateErrorKind::UnauthorizedOperation({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::unauthorized_operation::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_unauthorized_operationjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::NotifyMigrationTaskStateError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::NotifyMigrationTaskStateError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_notify_migration_task_state_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::NotifyMigrationTaskStateOutput,
crate::error::NotifyMigrationTaskStateError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::notify_migration_task_state_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_put_resource_attributes_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::PutResourceAttributesOutput,
crate::error::PutResourceAttributesError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::PutResourceAttributesError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::PutResourceAttributesError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::PutResourceAttributesError {
meta: generic,
kind: crate::error::PutResourceAttributesErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_access_denied_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::PutResourceAttributesError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"DryRunOperation" => crate::error::PutResourceAttributesError {
meta: generic,
kind: crate::error::PutResourceAttributesErrorKind::DryRunOperation({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::dry_run_operation::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_dry_run_operationjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::PutResourceAttributesError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"HomeRegionNotSetException" => {
crate::error::PutResourceAttributesError {
meta: generic,
kind: crate::error::PutResourceAttributesErrorKind::HomeRegionNotSetException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::home_region_not_set_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_home_region_not_set_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::PutResourceAttributesError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"InternalServerError" => crate::error::PutResourceAttributesError {
meta: generic,
kind: crate::error::PutResourceAttributesErrorKind::InternalServerError({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_error::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_server_errorjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::PutResourceAttributesError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::PutResourceAttributesError {
meta: generic,
kind: crate::error::PutResourceAttributesErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::PutResourceAttributesError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::PutResourceAttributesError {
meta: generic,
kind: crate::error::PutResourceAttributesErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::PutResourceAttributesError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"ServiceUnavailableException" => {
crate::error::PutResourceAttributesError {
meta: generic,
kind: crate::error::PutResourceAttributesErrorKind::ServiceUnavailableException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::service_unavailable_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_service_unavailable_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::PutResourceAttributesError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"ThrottlingException" => crate::error::PutResourceAttributesError {
meta: generic,
kind: crate::error::PutResourceAttributesErrorKind::ThrottlingException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::throttling_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_throttling_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::PutResourceAttributesError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"UnauthorizedOperation" => crate::error::PutResourceAttributesError {
meta: generic,
kind: crate::error::PutResourceAttributesErrorKind::UnauthorizedOperation({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::unauthorized_operation::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_unauthorized_operationjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::PutResourceAttributesError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::PutResourceAttributesError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_put_resource_attributes_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::PutResourceAttributesOutput,
crate::error::PutResourceAttributesError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::put_resource_attributes_output::Builder::default();
let _ = response;
output.build()
})
}
| 43.953838 | 215 | 0.499083 |
fbf48feb0d70ea9e0b7cce6b85203fd337f78997 | 697 | /*
* Copyright 2020 sukawasatoru
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
pub(crate) mod data;
pub mod feature;
pub mod model;
pub mod prelude;
pub mod server;
pub mod util;
| 30.304348 | 75 | 0.738881 |
1e396e433023e3a770198863346512a5594b4db3 | 9,253 | use std::{mem, io, time};
use log::debug;
use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
use frame::CanFrame;
use filter::CanFilter;
use util::{set_socket_option, set_socket_option_mult, system_time_from_timespec, timeval_from_duration};
use errors::CanSocketOpenError;
use constants::*;
/// A socket for a CAN device.
///
/// Will be closed upon deallocation. To close manually, use std::drop::Drop.
/// Internally this is just a wrapped file-descriptor.
#[derive(Debug)]
pub struct CanSocket {
fd: libc::c_int,
}
/// A CAN address struct for binding a socket
#[derive(Debug)]
#[repr(C)]
struct CanAddr {
af_can: libc::c_short,
if_index: libc::c_int,
rx_id: libc::c_uint, // transport protocol class address information (e.g. ISOTP)
tx_id: libc::c_uint,
}
impl CanSocket {
/// Open a named CAN device.
///
/// Usually the more common case, opens a socket can device by name, such
/// as "vcan0" or "socan0".
pub fn open(ifname: &str) -> Result<CanSocket, CanSocketOpenError> {
match nix::net::if_::if_nametoindex(ifname) {
Ok(ifindex) => CanSocket::open_interface(ifindex),
Err(e) => Err(CanSocketOpenError::from(e)),
}
}
/// Open CAN device by interface number.
///
/// Opens a CAN device by kernel interface number.
fn open_interface(if_index: libc::c_uint) -> Result<CanSocket, CanSocketOpenError> {
match CanSocket::open_socket() {
Ok(fd) => CanSocket::bind_socket(if_index, fd),
Err(e) => Err(e),
}
}
fn open_socket() -> Result<i32, CanSocketOpenError> {
let fd: i32;
unsafe {
fd = libc::socket(libc::PF_CAN, libc::SOCK_RAW, CAN_RAW);
}
if fd == -1 {
return Err(CanSocketOpenError::from(io::Error::last_os_error()));
}
Ok(fd)
}
fn bind_socket(if_index: libc::c_uint, fd: i32) -> Result<CanSocket, CanSocketOpenError> {
let socketaddr = CanAddr {
af_can: libc::AF_CAN as libc::c_short,
if_index: if_index as libc::c_int,
rx_id: 0,
tx_id: 0,
};
let r: i32;
unsafe {
let p = &socketaddr as *const CanAddr;
r = libc::bind(fd,
p as *const libc::sockaddr,
mem::size_of::<CanAddr>() as u32
);
}
if r == -1 {
let e = io::Error::last_os_error();
// clean up resource if failure to open
unsafe { libc::close(fd); }
return Err(CanSocketOpenError::from(e));
}
Ok(CanSocket { fd: fd })
}
pub fn close(&mut self) -> io::Result<()> {
let r: i32;
unsafe {
r = libc::close(self.fd);
}
if r == -1 {
return Err(io::Error::last_os_error());
}
Ok(())
}
/// Blocking read a single can frame with timestamp
///
/// Note that reading a frame and retrieving the timestamp requires two
/// consecutive syscalls.
pub fn read(&self) -> io::Result<(CanFrame, time::SystemTime)> {
let frame = self.read_socket()?;
let ts = self.socket_timestamp()?;
Ok((frame, ts))
}
fn socket_timestamp(&self) -> io::Result<time::SystemTime> {
let mut ts = mem::MaybeUninit::<libc::timespec>::uninit();
let r = unsafe {
libc::ioctl(self.fd,
SIOCGSTAMP as libc::c_ulong,
ts.as_mut_ptr())
};
if r == -1 {
return Err(io::Error::last_os_error());
}
let ts = unsafe { ts.assume_init() };
Ok(system_time_from_timespec(ts))
}
/// Blocking read a single can frame.
fn read_socket(&self) -> io::Result<CanFrame> {
let mut frame = CanFrame::empty();
let r = unsafe {
let frame_ptr = &mut frame as *mut CanFrame;
libc::read(self.fd, frame_ptr as *mut libc::c_void, mem::size_of::<CanFrame>())
};
if r as usize != mem::size_of::<CanFrame>() {
return Err(io::Error::last_os_error());
}
Ok(frame)
}
/// Write a single can frame.
///
/// Note that this function can fail with an `EAGAIN` error or similar.
/// Use `write_frame_insist` if you need to be sure that the message got
/// sent or failed.
pub fn write(&self, frame: &CanFrame) -> io::Result<()> {
let r = unsafe {
let frame_ptr = frame as *const CanFrame;
libc::write(self.fd, frame_ptr as *const libc::c_void, mem::size_of::<CanFrame>())
};
if r as usize != mem::size_of::<CanFrame>() {
return Err(io::Error::last_os_error());
}
Ok(())
}
/// Change socket to non-blocking mode
pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
// retrieve current file status flags
let old_flags = unsafe { libc::fcntl(self.fd, libc::F_GETFL) };
if old_flags == -1 {
return Err(io::Error::last_os_error());
}
let new_flags = if nonblocking {
old_flags | libc::O_NONBLOCK
} else {
old_flags & !libc::O_NONBLOCK
};
let r = unsafe { libc::fcntl(self.fd, libc::F_SETFL, new_flags) };
if r != 0 {
return Err(io::Error::last_os_error());
}
Ok(())
}
/// Set the read timeout on the socket
///
/// For convenience, the result value can be checked using
/// `ShouldRetry::should_retry` when a timeout is set.
pub fn set_read_timeout(&self, duration: time::Duration) -> io::Result<()> {
set_socket_option(
self.fd,
libc::SOL_SOCKET,
libc::SO_RCVTIMEO,
&timeval_from_duration(duration)
)
}
/// Set the write timeout on the socket
pub fn set_write_timeout(&self, duration: time::Duration) -> io::Result<()> {
set_socket_option(
self.fd,
libc::SOL_SOCKET,
libc::SO_SNDTIMEO,
&timeval_from_duration(duration)
)
}
/// Sets filters on the socket.
///
/// CAN packages received by SocketCAN are matched against these filters,
/// only matching packets are returned by the interface.
///
/// See `CanFilter` for details on how filtering works. By default, all
/// single filter matching all incoming frames is installed.
pub fn set_filters(&self, filters: &[CanFilter]) -> io::Result<()> {
set_socket_option_mult(self.fd, SOL_CAN_RAW, CAN_RAW_FILTER, filters)
}
/// Sets the error mask on the socket.
///
/// By default (`ERR_MASK_NONE`) no error conditions are reported as
/// special error frames by the socket. Enabling error conditions by
/// setting `ERR_MASK_ALL` or another non-empty error mask causes the
/// socket to receive notification about the specified conditions.
#[inline]
pub fn set_error_mask(&self, mask: u32) -> io::Result<()> {
set_socket_option(self.fd, SOL_CAN_RAW, CAN_RAW_ERR_FILTER, &mask)
}
/// Enable or disable loopback.
///
/// By default, loopback is enabled, causing other applications that open
/// the same CAN bus to see frames emitted by different applications on
/// the same system.
#[inline]
pub fn set_loopback(&self, enabled: bool) -> io::Result<()> {
let loopback: libc::c_int = match enabled {
true => 1,
false => 0,
};
set_socket_option(self.fd, SOL_CAN_RAW, CAN_RAW_LOOPBACK, &loopback)
}
/// Enable or disable receiving of own frames.
///
/// When loopback is enabled, this settings controls if CAN frames sent
/// are received back immediately by sender. Default is off.
pub fn set_recv_own_msgs(&self, enabled: bool) -> io::Result<()> {
let recv_own_msgs: libc::c_int = match enabled {
true => 1,
false => 0,
};
set_socket_option(self.fd, SOL_CAN_RAW, CAN_RAW_RECV_OWN_MSGS, &recv_own_msgs)
}
/// Enable or disable join filters.
///
/// By default a frame is accepted if it matches any of the filters set
/// with `set_filters`. If join filters is enabled, a frame has to match
/// _all_ filters to be accepted.
pub fn set_join_filters(&self, enabled: bool) -> io::Result<()> {
let join_filters: libc::c_int = match enabled {
true => 1,
false => 0,
};
set_socket_option(self.fd, SOL_CAN_RAW, CAN_RAW_JOIN_FILTERS, &join_filters)
}
}
impl AsRawFd for CanSocket {
fn as_raw_fd(&self) -> RawFd {
self.fd
}
}
impl FromRawFd for CanSocket {
unsafe fn from_raw_fd(fd: RawFd) -> CanSocket {
CanSocket { fd: fd }
}
}
impl IntoRawFd for CanSocket {
fn into_raw_fd(self) -> RawFd {
self.fd
}
}
impl Drop for CanSocket {
fn drop(&mut self) {
match self.close() {
Ok(_) => debug!("Socket dropped (fd: {})", self.fd),
Err(e) => debug!("Error dropping socket {}", e),
};
}
} | 30.740864 | 104 | 0.57149 |
f448a6f9ecf729c9a10e82a2c188f3668c62920d | 9,015 | use std::collections::HashMap;
use na::{Isometry2, Vector2};
use rapier::counters::Counters;
use rapier::dynamics::{
IntegrationParameters, JointParams, JointSet, RigidBodyHandle, RigidBodySet,
};
use rapier::geometry::{Collider, ColliderSet};
use std::f32;
use wrapped2d::b2;
use wrapped2d::dynamics::joints::{PrismaticJointDef, RevoluteJointDef, WeldJointDef};
use wrapped2d::user_data::NoUserData;
fn na_vec_to_b2_vec(v: Vector2<f32>) -> b2::Vec2 {
b2::Vec2 { x: v.x, y: v.y }
}
fn b2_vec_to_na_vec(v: b2::Vec2) -> Vector2<f32> {
Vector2::new(v.x, v.y)
}
fn b2_transform_to_na_isometry(v: b2::Transform) -> Isometry2<f32> {
Isometry2::new(b2_vec_to_na_vec(v.pos), v.rot.angle())
}
pub struct Box2dWorld {
world: b2::World<NoUserData>,
rapier2box2d: HashMap<RigidBodyHandle, b2::BodyHandle>,
}
impl Box2dWorld {
pub fn from_rapier(
gravity: Vector2<f32>,
bodies: &RigidBodySet,
colliders: &ColliderSet,
joints: &JointSet,
) -> Self {
let mut world = b2::World::new(&na_vec_to_b2_vec(gravity));
world.set_continuous_physics(false);
let mut res = Box2dWorld {
world,
rapier2box2d: HashMap::new(),
};
res.insert_bodies(bodies);
res.insert_colliders(colliders);
res.insert_joints(joints);
res
}
fn insert_bodies(&mut self, bodies: &RigidBodySet) {
for (handle, body) in bodies.iter() {
let body_type = if !body.is_dynamic() {
b2::BodyType::Static
} else {
b2::BodyType::Dynamic
};
let linear_damping = 0.0;
let angular_damping = 0.0;
// if let Some(rb) = body.downcast_ref::<RigidBody<f32>>() {
// linear_damping = rb.linear_damping();
// angular_damping = rb.angular_damping();
// } else {
// linear_damping = 0.0;
// angular_damping = 0.0;
// }
let def = b2::BodyDef {
body_type,
position: na_vec_to_b2_vec(body.position().translation.vector),
angle: body.position().rotation.angle(),
linear_velocity: na_vec_to_b2_vec(*body.linvel()),
angular_velocity: body.angvel(),
linear_damping,
angular_damping,
..b2::BodyDef::new()
};
let b2_handle = self.world.create_body(&def);
self.rapier2box2d.insert(handle, b2_handle);
// Collider.
let mut b2_body = self.world.body_mut(b2_handle);
b2_body.set_bullet(false /* collider.is_ccd_enabled() */);
}
}
fn insert_colliders(&mut self, colliders: &ColliderSet) {
for (_, collider) in colliders.iter() {
let b2_body_handle = self.rapier2box2d[&collider.parent()];
let mut b2_body = self.world.body_mut(b2_body_handle);
Self::create_fixture(&collider, &mut *b2_body);
}
}
fn insert_joints(&mut self, joints: &JointSet) {
for joint in joints.iter() {
let body_a = self.rapier2box2d[&joint.1.body1];
let body_b = self.rapier2box2d[&joint.1.body2];
match &joint.1.params {
JointParams::BallJoint(params) => {
let def = RevoluteJointDef {
body_a,
body_b,
collide_connected: true,
local_anchor_a: na_vec_to_b2_vec(params.local_anchor1.coords),
local_anchor_b: na_vec_to_b2_vec(params.local_anchor2.coords),
reference_angle: 0.0,
enable_limit: false,
lower_angle: 0.0,
upper_angle: 0.0,
enable_motor: false,
motor_speed: 0.0,
max_motor_torque: 0.0,
};
self.world.create_joint(&def);
}
JointParams::FixedJoint(params) => {
let def = WeldJointDef {
body_a,
body_b,
collide_connected: true,
local_anchor_a: na_vec_to_b2_vec(params.local_anchor1.translation.vector),
local_anchor_b: na_vec_to_b2_vec(params.local_anchor2.translation.vector),
reference_angle: 0.0,
frequency: 0.0,
damping_ratio: 0.0,
};
self.world.create_joint(&def);
}
JointParams::PrismaticJoint(params) => {
let def = PrismaticJointDef {
body_a,
body_b,
collide_connected: true,
local_anchor_a: na_vec_to_b2_vec(params.local_anchor1.coords),
local_anchor_b: na_vec_to_b2_vec(params.local_anchor2.coords),
local_axis_a: na_vec_to_b2_vec(params.local_axis1().into_inner()),
reference_angle: 0.0,
enable_limit: params.limits_enabled,
lower_translation: params.limits[0],
upper_translation: params.limits[1],
enable_motor: false,
max_motor_force: 0.0,
motor_speed: 0.0,
};
self.world.create_joint(&def);
}
}
}
}
fn create_fixture(collider: &Collider, body: &mut b2::MetaBody<NoUserData>) {
let center = na_vec_to_b2_vec(collider.position_wrt_parent().translation.vector);
let mut fixture_def = b2::FixtureDef::new();
fixture_def.restitution = collider.restitution;
fixture_def.friction = collider.friction;
fixture_def.density = collider.density();
fixture_def.is_sensor = collider.is_sensor();
fixture_def.filter = b2::Filter::new();
let shape = collider.shape();
if let Some(b) = shape.as_ball() {
let mut b2_shape = b2::CircleShape::new();
b2_shape.set_radius(b.radius);
b2_shape.set_position(center);
body.create_fixture(&b2_shape, &mut fixture_def);
} else if let Some(c) = shape.as_cuboid() {
let b2_shape = b2::PolygonShape::new_box(c.half_extents.x, c.half_extents.y);
body.create_fixture(&b2_shape, &mut fixture_def);
// } else if let Some(polygon) = shape.as_polygon() {
// let points: Vec<_> = poly
// .vertices()
// .iter()
// .map(|p| collider.position_wrt_parent() * p)
// .map(|p| na_vec_to_b2_vec(p.coords))
// .collect();
// let b2_shape = b2::PolygonShape::new_with(&points);
// body.create_fixture(&b2_shape, &mut fixture_def);
} else if let Some(heightfield) = shape.as_heightfield() {
let mut segments = heightfield.segments();
let seg1 = segments.next().unwrap();
let mut vertices = vec![
na_vec_to_b2_vec(seg1.a.coords),
na_vec_to_b2_vec(seg1.b.coords),
];
// TODO: this will not handle holes properly.
segments.for_each(|seg| {
vertices.push(na_vec_to_b2_vec(seg.b.coords));
});
let b2_shape = b2::ChainShape::new_chain(&vertices);
body.create_fixture(&b2_shape, &mut fixture_def);
} else {
eprintln!("Creating a shape unknown to the Box2d backend.");
}
}
pub fn step(&mut self, counters: &mut Counters, params: &IntegrationParameters) {
// self.world.set_continuous_physics(world.integration_parameters.max_ccd_substeps != 0);
counters.step_started();
self.world.step(
params.dt(),
params.max_velocity_iterations as i32,
params.max_position_iterations as i32,
);
counters.step_completed();
}
pub fn sync(&self, bodies: &mut RigidBodySet, colliders: &mut ColliderSet) {
for (handle, body) in bodies.iter_mut() {
if let Some(pb2_handle) = self.rapier2box2d.get(&handle) {
let b2_body = self.world.body(*pb2_handle);
let pos = b2_transform_to_na_isometry(b2_body.transform().clone());
body.set_position(pos, false);
for coll_handle in body.colliders() {
let collider = &mut colliders[*coll_handle];
collider.set_position_debug(pos * collider.position_wrt_parent());
}
}
}
}
}
| 38.199153 | 104 | 0.533444 |
f98bc476aafec872d40e0c85e67d20e92b795cb7 | 41,864 | //! Partitioning Codegen Units for Incremental Compilation
//! ======================================================
//!
//! The task of this module is to take the complete set of monomorphizations of
//! a crate and produce a set of codegen units from it, where a codegen unit
//! is a named set of (mono-item, linkage) pairs. That is, this module
//! decides which monomorphization appears in which codegen units with which
//! linkage. The following paragraphs describe some of the background on the
//! partitioning scheme.
//!
//! The most important opportunity for saving on compilation time with
//! incremental compilation is to avoid re-codegenning and re-optimizing code.
//! Since the unit of codegen and optimization for LLVM is "modules" or, how
//! we call them "codegen units", the particulars of how much time can be saved
//! by incremental compilation are tightly linked to how the output program is
//! partitioned into these codegen units prior to passing it to LLVM --
//! especially because we have to treat codegen units as opaque entities once
//! they are created: There is no way for us to incrementally update an existing
//! LLVM module and so we have to build any such module from scratch if it was
//! affected by some change in the source code.
//!
//! From that point of view it would make sense to maximize the number of
//! codegen units by, for example, putting each function into its own module.
//! That way only those modules would have to be re-compiled that were actually
//! affected by some change, minimizing the number of functions that could have
//! been re-used but just happened to be located in a module that is
//! re-compiled.
//!
//! However, since LLVM optimization does not work across module boundaries,
//! using such a highly granular partitioning would lead to very slow runtime
//! code since it would effectively prohibit inlining and other inter-procedure
//! optimizations. We want to avoid that as much as possible.
//!
//! Thus we end up with a trade-off: The bigger the codegen units, the better
//! LLVM's optimizer can do its work, but also the smaller the compilation time
//! reduction we get from incremental compilation.
//!
//! Ideally, we would create a partitioning such that there are few big codegen
//! units with few interdependencies between them. For now though, we use the
//! following heuristic to determine the partitioning:
//!
//! - There are two codegen units for every source-level module:
//! - One for "stable", that is non-generic, code
//! - One for more "volatile" code, i.e., monomorphized instances of functions
//! defined in that module
//!
//! In order to see why this heuristic makes sense, let's take a look at when a
//! codegen unit can get invalidated:
//!
//! 1. The most straightforward case is when the BODY of a function or global
//! changes. Then any codegen unit containing the code for that item has to be
//! re-compiled. Note that this includes all codegen units where the function
//! has been inlined.
//!
//! 2. The next case is when the SIGNATURE of a function or global changes. In
//! this case, all codegen units containing a REFERENCE to that item have to be
//! re-compiled. This is a superset of case 1.
//!
//! 3. The final and most subtle case is when a REFERENCE to a generic function
//! is added or removed somewhere. Even though the definition of the function
//! might be unchanged, a new REFERENCE might introduce a new monomorphized
//! instance of this function which has to be placed and compiled somewhere.
//! Conversely, when removing a REFERENCE, it might have been the last one with
//! that particular set of generic arguments and thus we have to remove it.
//!
//! From the above we see that just using one codegen unit per source-level
//! module is not such a good idea, since just adding a REFERENCE to some
//! generic item somewhere else would invalidate everything within the module
//! containing the generic item. The heuristic above reduces this detrimental
//! side-effect of references a little by at least not touching the non-generic
//! code of the module.
//!
//! A Note on Inlining
//! ------------------
//! As briefly mentioned above, in order for LLVM to be able to inline a
//! function call, the body of the function has to be available in the LLVM
//! module where the call is made. This has a few consequences for partitioning:
//!
//! - The partitioning algorithm has to take care of placing functions into all
//! codegen units where they should be available for inlining. It also has to
//! decide on the correct linkage for these functions.
//!
//! - The partitioning algorithm has to know which functions are likely to get
//! inlined, so it can distribute function instantiations accordingly. Since
//! there is no way of knowing for sure which functions LLVM will decide to
//! inline in the end, we apply a heuristic here: Only functions marked with
//! `#[inline]` are considered for inlining by the partitioner. The current
//! implementation will not try to determine if a function is likely to be
//! inlined by looking at the functions definition.
//!
//! Note though that as a side-effect of creating a codegen units per
//! source-level module, functions from the same module will be available for
//! inlining, even when they are not marked #[inline].
use std::collections::hash_map::Entry;
use std::cmp;
use std::sync::Arc;
use syntax::ast::NodeId;
use syntax::symbol::InternedString;
use rustc::dep_graph::{WorkProductId, WorkProduct, DepNode, DepConstructor};
use rustc::hir::CodegenFnAttrFlags;
use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE, CRATE_DEF_INDEX};
use rustc::hir::map::DefPathData;
use rustc::mir::mono::{Linkage, Visibility, CodegenUnitNameBuilder};
use rustc::middle::exported_symbols::SymbolExportLevel;
use rustc::ty::{self, TyCtxt, InstanceDef};
use rustc::ty::item_path::characteristic_def_id_of_type;
use rustc::ty::query::Providers;
use rustc::util::common::time;
use rustc::util::nodemap::{DefIdSet, FxHashMap, FxHashSet};
use rustc::mir::mono::MonoItem;
use crate::monomorphize::collector::InliningMap;
use crate::monomorphize::collector::{self, MonoItemCollectionMode};
use crate::monomorphize::item::{MonoItemExt, InstantiationMode};
pub use rustc::mir::mono::CodegenUnit;
pub enum PartitioningStrategy {
/// Generates one codegen unit per source-level module.
PerModule,
/// Partition the whole crate into a fixed number of codegen units.
FixedUnitCount(usize)
}
pub trait CodegenUnitExt<'tcx> {
fn as_codegen_unit(&self) -> &CodegenUnit<'tcx>;
fn contains_item(&self, item: &MonoItem<'tcx>) -> bool {
self.items().contains_key(item)
}
fn name<'a>(&'a self) -> &'a InternedString
where 'tcx: 'a,
{
&self.as_codegen_unit().name()
}
fn items(&self) -> &FxHashMap<MonoItem<'tcx>, (Linkage, Visibility)> {
&self.as_codegen_unit().items()
}
fn work_product_id(&self) -> WorkProductId {
WorkProductId::from_cgu_name(&self.name().as_str())
}
fn work_product(&self, tcx: TyCtxt<'_, '_, '_>) -> WorkProduct {
let work_product_id = self.work_product_id();
tcx.dep_graph
.previous_work_product(&work_product_id)
.unwrap_or_else(|| {
panic!("Could not find work-product for CGU `{}`", self.name())
})
}
fn items_in_deterministic_order<'a>(&self,
tcx: TyCtxt<'a, 'tcx, 'tcx>)
-> Vec<(MonoItem<'tcx>,
(Linkage, Visibility))> {
// The codegen tests rely on items being process in the same order as
// they appear in the file, so for local items, we sort by node_id first
#[derive(PartialEq, Eq, PartialOrd, Ord)]
pub struct ItemSortKey(Option<NodeId>, ty::SymbolName);
fn item_sort_key<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
item: MonoItem<'tcx>) -> ItemSortKey {
ItemSortKey(match item {
MonoItem::Fn(ref instance) => {
match instance.def {
// We only want to take NodeIds of user-defined
// instances into account. The others don't matter for
// the codegen tests and can even make item order
// unstable.
InstanceDef::Item(def_id) => {
tcx.hir().as_local_node_id(def_id)
}
InstanceDef::VtableShim(..) |
InstanceDef::Intrinsic(..) |
InstanceDef::FnPtrShim(..) |
InstanceDef::Virtual(..) |
InstanceDef::ClosureOnceShim { .. } |
InstanceDef::DropGlue(..) |
InstanceDef::CloneShim(..) => {
None
}
}
}
MonoItem::Static(def_id) => {
tcx.hir().as_local_node_id(def_id)
}
MonoItem::GlobalAsm(node_id) => {
Some(node_id)
}
}, item.symbol_name(tcx))
}
let mut items: Vec<_> = self.items().iter().map(|(&i, &l)| (i, l)).collect();
items.sort_by_cached_key(|&(i, _)| item_sort_key(tcx, i));
items
}
fn codegen_dep_node(&self, tcx: TyCtxt<'_, 'tcx, 'tcx>) -> DepNode {
DepNode::new(tcx, DepConstructor::CompileCodegenUnit(self.name().clone()))
}
}
impl<'tcx> CodegenUnitExt<'tcx> for CodegenUnit<'tcx> {
fn as_codegen_unit(&self) -> &CodegenUnit<'tcx> {
self
}
}
// Anything we can't find a proper codegen unit for goes into this.
fn fallback_cgu_name(name_builder: &mut CodegenUnitNameBuilder<'_, '_, '_>) -> InternedString {
name_builder.build_cgu_name(LOCAL_CRATE, &["fallback"], Some("cgu"))
}
pub fn partition<'a, 'tcx, I>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
mono_items: I,
strategy: PartitioningStrategy,
inlining_map: &InliningMap<'tcx>)
-> Vec<CodegenUnit<'tcx>>
where I: Iterator<Item = MonoItem<'tcx>>
{
// In the first step, we place all regular monomorphizations into their
// respective 'home' codegen unit. Regular monomorphizations are all
// functions and statics defined in the local crate.
let mut initial_partitioning = place_root_mono_items(tcx, mono_items);
initial_partitioning.codegen_units.iter_mut().for_each(|cgu| cgu.estimate_size(&tcx));
debug_dump(tcx, "INITIAL PARTITIONING:", initial_partitioning.codegen_units.iter());
// If the partitioning should produce a fixed count of codegen units, merge
// until that count is reached.
if let PartitioningStrategy::FixedUnitCount(count) = strategy {
merge_codegen_units(tcx, &mut initial_partitioning, count);
debug_dump(tcx, "POST MERGING:", initial_partitioning.codegen_units.iter());
}
// In the next step, we use the inlining map to determine which additional
// monomorphizations have to go into each codegen unit. These additional
// monomorphizations can be drop-glue, functions from external crates, and
// local functions the definition of which is marked with #[inline].
let mut post_inlining = place_inlined_mono_items(initial_partitioning,
inlining_map);
post_inlining.codegen_units.iter_mut().for_each(|cgu| cgu.estimate_size(&tcx));
debug_dump(tcx, "POST INLINING:", post_inlining.codegen_units.iter());
// Next we try to make as many symbols "internal" as possible, so LLVM has
// more freedom to optimize.
if !tcx.sess.opts.cg.link_dead_code {
internalize_symbols(tcx, &mut post_inlining, inlining_map);
}
// Finally, sort by codegen unit name, so that we get deterministic results
let PostInliningPartitioning {
codegen_units: mut result,
mono_item_placements: _,
internalization_candidates: _,
} = post_inlining;
result.sort_by(|cgu1, cgu2| {
cgu1.name().cmp(cgu2.name())
});
result
}
struct PreInliningPartitioning<'tcx> {
codegen_units: Vec<CodegenUnit<'tcx>>,
roots: FxHashSet<MonoItem<'tcx>>,
internalization_candidates: FxHashSet<MonoItem<'tcx>>,
}
/// For symbol internalization, we need to know whether a symbol/mono-item is
/// accessed from outside the codegen unit it is defined in. This type is used
/// to keep track of that.
#[derive(Clone, PartialEq, Eq, Debug)]
enum MonoItemPlacement {
SingleCgu { cgu_name: InternedString },
MultipleCgus,
}
struct PostInliningPartitioning<'tcx> {
codegen_units: Vec<CodegenUnit<'tcx>>,
mono_item_placements: FxHashMap<MonoItem<'tcx>, MonoItemPlacement>,
internalization_candidates: FxHashSet<MonoItem<'tcx>>,
}
fn place_root_mono_items<'a, 'tcx, I>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
mono_items: I)
-> PreInliningPartitioning<'tcx>
where I: Iterator<Item = MonoItem<'tcx>>
{
let mut roots = FxHashSet::default();
let mut codegen_units = FxHashMap::default();
let is_incremental_build = tcx.sess.opts.incremental.is_some();
let mut internalization_candidates = FxHashSet::default();
// Determine if monomorphizations instantiated in this crate will be made
// available to downstream crates. This depends on whether we are in
// share-generics mode and whether the current crate can even have
// downstream crates.
let export_generics = tcx.sess.opts.share_generics() &&
tcx.local_crate_exports_generics();
let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx);
let cgu_name_cache = &mut FxHashMap::default();
for mono_item in mono_items {
match mono_item.instantiation_mode(tcx) {
InstantiationMode::GloballyShared { .. } => {}
InstantiationMode::LocalCopy => continue,
}
let characteristic_def_id = characteristic_def_id_of_mono_item(tcx, mono_item);
let is_volatile = is_incremental_build &&
mono_item.is_generic_fn();
let codegen_unit_name = match characteristic_def_id {
Some(def_id) => compute_codegen_unit_name(tcx,
cgu_name_builder,
def_id,
is_volatile,
cgu_name_cache),
None => fallback_cgu_name(cgu_name_builder),
};
let codegen_unit = codegen_units.entry(codegen_unit_name.clone())
.or_insert_with(|| CodegenUnit::new(codegen_unit_name.clone()));
let mut can_be_internalized = true;
let (linkage, visibility) = mono_item_linkage_and_visibility(
tcx,
&mono_item,
&mut can_be_internalized,
export_generics,
);
if visibility == Visibility::Hidden && can_be_internalized {
internalization_candidates.insert(mono_item);
}
codegen_unit.items_mut().insert(mono_item, (linkage, visibility));
roots.insert(mono_item);
}
// always ensure we have at least one CGU; otherwise, if we have a
// crate with just types (for example), we could wind up with no CGU
if codegen_units.is_empty() {
let codegen_unit_name = fallback_cgu_name(cgu_name_builder);
codegen_units.insert(codegen_unit_name.clone(),
CodegenUnit::new(codegen_unit_name.clone()));
}
PreInliningPartitioning {
codegen_units: codegen_units.into_iter()
.map(|(_, codegen_unit)| codegen_unit)
.collect(),
roots,
internalization_candidates,
}
}
fn mono_item_linkage_and_visibility(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
mono_item: &MonoItem<'tcx>,
can_be_internalized: &mut bool,
export_generics: bool,
) -> (Linkage, Visibility) {
if let Some(explicit_linkage) = mono_item.explicit_linkage(tcx) {
return (explicit_linkage, Visibility::Default)
}
let vis = mono_item_visibility(
tcx,
mono_item,
can_be_internalized,
export_generics,
);
(Linkage::External, vis)
}
fn mono_item_visibility(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
mono_item: &MonoItem<'tcx>,
can_be_internalized: &mut bool,
export_generics: bool,
) -> Visibility {
let instance = match mono_item {
// This is pretty complicated, go below
MonoItem::Fn(instance) => instance,
// Misc handling for generics and such, but otherwise
MonoItem::Static(def_id) => {
return if tcx.is_reachable_non_generic(*def_id) {
*can_be_internalized = false;
default_visibility(tcx, *def_id, false)
} else {
Visibility::Hidden
};
}
MonoItem::GlobalAsm(node_id) => {
let def_id = tcx.hir().local_def_id(*node_id);
return if tcx.is_reachable_non_generic(def_id) {
*can_be_internalized = false;
default_visibility(tcx, def_id, false)
} else {
Visibility::Hidden
};
}
};
let def_id = match instance.def {
InstanceDef::Item(def_id) => def_id,
// These are all compiler glue and such, never exported, always hidden.
InstanceDef::VtableShim(..) |
InstanceDef::FnPtrShim(..) |
InstanceDef::Virtual(..) |
InstanceDef::Intrinsic(..) |
InstanceDef::ClosureOnceShim { .. } |
InstanceDef::DropGlue(..) |
InstanceDef::CloneShim(..) => {
return Visibility::Hidden
}
};
// The `start_fn` lang item is actually a monomorphized instance of a
// function in the standard library, used for the `main` function. We don't
// want to export it so we tag it with `Hidden` visibility but this symbol
// is only referenced from the actual `main` symbol which we unfortunately
// don't know anything about during partitioning/collection. As a result we
// forcibly keep this symbol out of the `internalization_candidates` set.
//
// FIXME: eventually we don't want to always force this symbol to have
// hidden visibility, it should indeed be a candidate for
// internalization, but we have to understand that it's referenced
// from the `main` symbol we'll generate later.
//
// This may be fixable with a new `InstanceDef` perhaps? Unsure!
if tcx.lang_items().start_fn() == Some(def_id) {
*can_be_internalized = false;
return Visibility::Hidden
}
let is_generic = instance.substs.non_erasable_generics().next().is_some();
// Upstream `DefId` instances get different handling than local ones
if !def_id.is_local() {
return if export_generics && is_generic {
// If it is a upstream monomorphization
// and we export generics, we must make
// it available to downstream crates.
*can_be_internalized = false;
default_visibility(tcx, def_id, true)
} else {
Visibility::Hidden
}
}
if is_generic {
if export_generics {
if tcx.is_unreachable_local_definition(def_id) {
// This instance cannot be used
// from another crate.
Visibility::Hidden
} else {
// This instance might be useful in
// a downstream crate.
*can_be_internalized = false;
default_visibility(tcx, def_id, true)
}
} else {
// We are not exporting generics or
// the definition is not reachable
// for downstream crates, we can
// internalize its instantiations.
Visibility::Hidden
}
} else {
// If this isn't a generic function then we mark this a `Default` if
// this is a reachable item, meaning that it's a symbol other crates may
// access when they link to us.
if tcx.is_reachable_non_generic(def_id) {
*can_be_internalized = false;
debug_assert!(!is_generic);
return default_visibility(tcx, def_id, false)
}
// If this isn't reachable then we're gonna tag this with `Hidden`
// visibility. In some situations though we'll want to prevent this
// symbol from being internalized.
//
// There's two categories of items here:
//
// * First is weak lang items. These are basically mechanisms for
// libcore to forward-reference symbols defined later in crates like
// the standard library or `#[panic_handler]` definitions. The
// definition of these weak lang items needs to be referenceable by
// libcore, so we're no longer a candidate for internalization.
// Removal of these functions can't be done by LLVM but rather must be
// done by the linker as it's a non-local decision.
//
// * Second is "std internal symbols". Currently this is primarily used
// for allocator symbols. Allocators are a little weird in their
// implementation, but the idea is that the compiler, at the last
// minute, defines an allocator with an injected object file. The
// `alloc` crate references these symbols (`__rust_alloc`) and the
// definition doesn't get hooked up until a linked crate artifact is
// generated.
//
// The symbols synthesized by the compiler (`__rust_alloc`) are thin
// veneers around the actual implementation, some other symbol which
// implements the same ABI. These symbols (things like `__rg_alloc`,
// `__rdl_alloc`, `__rde_alloc`, etc), are all tagged with "std
// internal symbols".
//
// The std-internal symbols here **should not show up in a dll as an
// exported interface**, so they return `false` from
// `is_reachable_non_generic` above and we'll give them `Hidden`
// visibility below. Like the weak lang items, though, we can't let
// LLVM internalize them as this decision is left up to the linker to
// omit them, so prevent them from being internalized.
let attrs = tcx.codegen_fn_attrs(def_id);
if attrs.flags.contains(CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL) {
*can_be_internalized = false;
}
Visibility::Hidden
}
}
fn default_visibility(tcx: TyCtxt<'_, '_, '_>, id: DefId, is_generic: bool) -> Visibility {
if !tcx.sess.target.target.options.default_hidden_visibility {
return Visibility::Default
}
// Generic functions never have export level C
if is_generic {
return Visibility::Hidden
}
// Things with export level C don't get instantiated in
// downstream crates
if !id.is_local() {
return Visibility::Hidden
}
// C-export level items remain at `Default`, all other internal
// items become `Hidden`
match tcx.reachable_non_generics(id.krate).get(&id) {
Some(SymbolExportLevel::C) => Visibility::Default,
_ => Visibility::Hidden,
}
}
fn merge_codegen_units<'tcx>(tcx: TyCtxt<'_, 'tcx, 'tcx>,
initial_partitioning: &mut PreInliningPartitioning<'tcx>,
target_cgu_count: usize) {
assert!(target_cgu_count >= 1);
let codegen_units = &mut initial_partitioning.codegen_units;
// Note that at this point in time the `codegen_units` here may not be in a
// deterministic order (but we know they're deterministically the same set).
// We want this merging to produce a deterministic ordering of codegen units
// from the input.
//
// Due to basically how we've implemented the merging below (merge the two
// smallest into each other) we're sure to start off with a deterministic
// order (sorted by name). This'll mean that if two cgus have the same size
// the stable sort below will keep everything nice and deterministic.
codegen_units.sort_by_key(|cgu| *cgu.name());
// Merge the two smallest codegen units until the target size is reached.
while codegen_units.len() > target_cgu_count {
// Sort small cgus to the back
codegen_units.sort_by_cached_key(|cgu| cmp::Reverse(cgu.size_estimate()));
let mut smallest = codegen_units.pop().unwrap();
let second_smallest = codegen_units.last_mut().unwrap();
second_smallest.modify_size_estimate(smallest.size_estimate());
for (k, v) in smallest.items_mut().drain() {
second_smallest.items_mut().insert(k, v);
}
}
let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx);
for (index, cgu) in codegen_units.iter_mut().enumerate() {
cgu.set_name(numbered_codegen_unit_name(cgu_name_builder, index));
}
}
fn place_inlined_mono_items<'tcx>(initial_partitioning: PreInliningPartitioning<'tcx>,
inlining_map: &InliningMap<'tcx>)
-> PostInliningPartitioning<'tcx> {
let mut new_partitioning = Vec::new();
let mut mono_item_placements = FxHashMap::default();
let PreInliningPartitioning {
codegen_units: initial_cgus,
roots,
internalization_candidates,
} = initial_partitioning;
let single_codegen_unit = initial_cgus.len() == 1;
for old_codegen_unit in initial_cgus {
// Collect all items that need to be available in this codegen unit
let mut reachable = FxHashSet::default();
for root in old_codegen_unit.items().keys() {
follow_inlining(*root, inlining_map, &mut reachable);
}
let mut new_codegen_unit = CodegenUnit::new(old_codegen_unit.name().clone());
// Add all monomorphizations that are not already there
for mono_item in reachable {
if let Some(linkage) = old_codegen_unit.items().get(&mono_item) {
// This is a root, just copy it over
new_codegen_unit.items_mut().insert(mono_item, *linkage);
} else {
if roots.contains(&mono_item) {
bug!("GloballyShared mono-item inlined into other CGU: \
{:?}", mono_item);
}
// This is a cgu-private copy
new_codegen_unit.items_mut().insert(
mono_item,
(Linkage::Internal, Visibility::Default),
);
}
if !single_codegen_unit {
// If there is more than one codegen unit, we need to keep track
// in which codegen units each monomorphization is placed:
match mono_item_placements.entry(mono_item) {
Entry::Occupied(e) => {
let placement = e.into_mut();
debug_assert!(match *placement {
MonoItemPlacement::SingleCgu { ref cgu_name } => {
*cgu_name != *new_codegen_unit.name()
}
MonoItemPlacement::MultipleCgus => true,
});
*placement = MonoItemPlacement::MultipleCgus;
}
Entry::Vacant(e) => {
e.insert(MonoItemPlacement::SingleCgu {
cgu_name: new_codegen_unit.name().clone()
});
}
}
}
}
new_partitioning.push(new_codegen_unit);
}
return PostInliningPartitioning {
codegen_units: new_partitioning,
mono_item_placements,
internalization_candidates,
};
fn follow_inlining<'tcx>(mono_item: MonoItem<'tcx>,
inlining_map: &InliningMap<'tcx>,
visited: &mut FxHashSet<MonoItem<'tcx>>) {
if !visited.insert(mono_item) {
return;
}
inlining_map.with_inlining_candidates(mono_item, |target| {
follow_inlining(target, inlining_map, visited);
});
}
}
fn internalize_symbols<'a, 'tcx>(_tcx: TyCtxt<'a, 'tcx, 'tcx>,
partitioning: &mut PostInliningPartitioning<'tcx>,
inlining_map: &InliningMap<'tcx>) {
if partitioning.codegen_units.len() == 1 {
// Fast path for when there is only one codegen unit. In this case we
// can internalize all candidates, since there is nowhere else they
// could be accessed from.
for cgu in &mut partitioning.codegen_units {
for candidate in &partitioning.internalization_candidates {
cgu.items_mut().insert(*candidate,
(Linkage::Internal, Visibility::Default));
}
}
return;
}
// Build a map from every monomorphization to all the monomorphizations that
// reference it.
let mut accessor_map: FxHashMap<MonoItem<'tcx>, Vec<MonoItem<'tcx>>> = Default::default();
inlining_map.iter_accesses(|accessor, accessees| {
for accessee in accessees {
accessor_map.entry(*accessee)
.or_default()
.push(accessor);
}
});
let mono_item_placements = &partitioning.mono_item_placements;
// For each internalization candidates in each codegen unit, check if it is
// accessed from outside its defining codegen unit.
for cgu in &mut partitioning.codegen_units {
let home_cgu = MonoItemPlacement::SingleCgu {
cgu_name: cgu.name().clone()
};
for (accessee, linkage_and_visibility) in cgu.items_mut() {
if !partitioning.internalization_candidates.contains(accessee) {
// This item is no candidate for internalizing, so skip it.
continue
}
debug_assert_eq!(mono_item_placements[accessee], home_cgu);
if let Some(accessors) = accessor_map.get(accessee) {
if accessors.iter()
.filter_map(|accessor| {
// Some accessors might not have been
// instantiated. We can safely ignore those.
mono_item_placements.get(accessor)
})
.any(|placement| *placement != home_cgu) {
// Found an accessor from another CGU, so skip to the next
// item without marking this one as internal.
continue
}
}
// If we got here, we did not find any accesses from other CGUs,
// so it's fine to make this monomorphization internal.
*linkage_and_visibility = (Linkage::Internal, Visibility::Default);
}
}
}
fn characteristic_def_id_of_mono_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
mono_item: MonoItem<'tcx>)
-> Option<DefId> {
match mono_item {
MonoItem::Fn(instance) => {
let def_id = match instance.def {
ty::InstanceDef::Item(def_id) => def_id,
ty::InstanceDef::VtableShim(..) |
ty::InstanceDef::FnPtrShim(..) |
ty::InstanceDef::ClosureOnceShim { .. } |
ty::InstanceDef::Intrinsic(..) |
ty::InstanceDef::DropGlue(..) |
ty::InstanceDef::Virtual(..) |
ty::InstanceDef::CloneShim(..) => return None
};
// If this is a method, we want to put it into the same module as
// its self-type. If the self-type does not provide a characteristic
// DefId, we use the location of the impl after all.
if tcx.trait_of_item(def_id).is_some() {
let self_ty = instance.substs.type_at(0);
// This is an implementation of a trait method.
return characteristic_def_id_of_type(self_ty).or(Some(def_id));
}
if let Some(impl_def_id) = tcx.impl_of_method(def_id) {
// This is a method within an inherent impl, find out what the
// self-type is:
let impl_self_ty = tcx.subst_and_normalize_erasing_regions(
instance.substs,
ty::ParamEnv::reveal_all(),
&tcx.type_of(impl_def_id),
);
if let Some(def_id) = characteristic_def_id_of_type(impl_self_ty) {
return Some(def_id);
}
}
Some(def_id)
}
MonoItem::Static(def_id) => Some(def_id),
MonoItem::GlobalAsm(node_id) => Some(tcx.hir().local_def_id(node_id)),
}
}
type CguNameCache = FxHashMap<(DefId, bool), InternedString>;
fn compute_codegen_unit_name(tcx: TyCtxt<'_, '_, '_>,
name_builder: &mut CodegenUnitNameBuilder<'_, '_, '_>,
def_id: DefId,
volatile: bool,
cache: &mut CguNameCache)
-> InternedString {
// Find the innermost module that is not nested within a function
let mut current_def_id = def_id;
let mut cgu_def_id = None;
// Walk backwards from the item we want to find the module for:
loop {
let def_key = tcx.def_key(current_def_id);
match def_key.disambiguated_data.data {
DefPathData::Module(..) => {
if cgu_def_id.is_none() {
cgu_def_id = Some(current_def_id);
}
}
DefPathData::CrateRoot { .. } => {
if cgu_def_id.is_none() {
// If we have not found a module yet, take the crate root.
cgu_def_id = Some(DefId {
krate: def_id.krate,
index: CRATE_DEF_INDEX,
});
}
break
}
_ => {
// If we encounter something that is not a module, throw away
// any module that we've found so far because we now know that
// it is nested within something else.
cgu_def_id = None;
}
}
current_def_id.index = def_key.parent.unwrap();
}
let cgu_def_id = cgu_def_id.unwrap();
cache.entry((cgu_def_id, volatile)).or_insert_with(|| {
let def_path = tcx.def_path(cgu_def_id);
let components = def_path
.data
.iter()
.map(|part| part.data.as_interned_str());
let volatile_suffix = if volatile {
Some("volatile")
} else {
None
};
name_builder.build_cgu_name(def_path.krate, components, volatile_suffix)
}).clone()
}
fn numbered_codegen_unit_name(name_builder: &mut CodegenUnitNameBuilder<'_, '_, '_>,
index: usize)
-> InternedString {
name_builder.build_cgu_name_no_mangle(LOCAL_CRATE, &["cgu"], Some(index))
}
fn debug_dump<'a, 'b, 'tcx, I>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
label: &str,
cgus: I)
where I: Iterator<Item=&'b CodegenUnit<'tcx>>,
'tcx: 'a + 'b
{
if cfg!(debug_assertions) {
debug!("{}", label);
for cgu in cgus {
debug!("CodegenUnit {}:", cgu.name());
for (mono_item, linkage) in cgu.items() {
let symbol_name = mono_item.symbol_name(tcx).as_str();
let symbol_hash_start = symbol_name.rfind('h');
let symbol_hash = symbol_hash_start.map(|i| &symbol_name[i ..])
.unwrap_or("<no hash>");
debug!(" - {} [{:?}] [{}]",
mono_item.to_string(tcx, true),
linkage,
symbol_hash);
}
debug!("");
}
}
}
fn collect_and_partition_mono_items<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
cnum: CrateNum,
) -> (Arc<DefIdSet>, Arc<Vec<Arc<CodegenUnit<'tcx>>>>)
{
assert_eq!(cnum, LOCAL_CRATE);
let collection_mode = match tcx.sess.opts.debugging_opts.print_mono_items {
Some(ref s) => {
let mode_string = s.to_lowercase();
let mode_string = mode_string.trim();
if mode_string == "eager" {
MonoItemCollectionMode::Eager
} else {
if mode_string != "lazy" {
let message = format!("Unknown codegen-item collection mode '{}'. \
Falling back to 'lazy' mode.",
mode_string);
tcx.sess.warn(&message);
}
MonoItemCollectionMode::Lazy
}
}
None => {
if tcx.sess.opts.cg.link_dead_code {
MonoItemCollectionMode::Eager
} else {
MonoItemCollectionMode::Lazy
}
}
};
let (items, inlining_map) =
time(tcx.sess, "monomorphization collection", || {
collector::collect_crate_mono_items(tcx, collection_mode)
});
tcx.sess.abort_if_errors();
crate::monomorphize::assert_symbols_are_distinct(tcx, items.iter());
let strategy = if tcx.sess.opts.incremental.is_some() {
PartitioningStrategy::PerModule
} else {
PartitioningStrategy::FixedUnitCount(tcx.sess.codegen_units())
};
let codegen_units = time(tcx.sess, "codegen unit partitioning", || {
partition(
tcx,
items.iter().cloned(),
strategy,
&inlining_map
)
.into_iter()
.map(Arc::new)
.collect::<Vec<_>>()
});
let mono_items: DefIdSet = items.iter().filter_map(|mono_item| {
match *mono_item {
MonoItem::Fn(ref instance) => Some(instance.def_id()),
MonoItem::Static(def_id) => Some(def_id),
_ => None,
}
}).collect();
if tcx.sess.opts.debugging_opts.print_mono_items.is_some() {
let mut item_to_cgus: FxHashMap<_, Vec<_>> = Default::default();
for cgu in &codegen_units {
for (&mono_item, &linkage) in cgu.items() {
item_to_cgus.entry(mono_item)
.or_default()
.push((cgu.name().clone(), linkage));
}
}
let mut item_keys: Vec<_> = items
.iter()
.map(|i| {
let mut output = i.to_string(tcx, false);
output.push_str(" @@");
let mut empty = Vec::new();
let cgus = item_to_cgus.get_mut(i).unwrap_or(&mut empty);
cgus.sort_by_key(|(name, _)| *name);
cgus.dedup();
for &(ref cgu_name, (linkage, _)) in cgus.iter() {
output.push_str(" ");
output.push_str(&cgu_name.as_str());
let linkage_abbrev = match linkage {
Linkage::External => "External",
Linkage::AvailableExternally => "Available",
Linkage::LinkOnceAny => "OnceAny",
Linkage::LinkOnceODR => "OnceODR",
Linkage::WeakAny => "WeakAny",
Linkage::WeakODR => "WeakODR",
Linkage::Appending => "Appending",
Linkage::Internal => "Internal",
Linkage::Private => "Private",
Linkage::ExternalWeak => "ExternalWeak",
Linkage::Common => "Common",
};
output.push_str("[");
output.push_str(linkage_abbrev);
output.push_str("]");
}
output
})
.collect();
item_keys.sort();
for item in item_keys {
println!("MONO_ITEM {}", item);
}
}
(Arc::new(mono_items), Arc::new(codegen_units))
}
pub fn provide(providers: &mut Providers<'_>) {
providers.collect_and_partition_mono_items =
collect_and_partition_mono_items;
providers.is_codegened_item = |tcx, def_id| {
let (all_mono_items, _) =
tcx.collect_and_partition_mono_items(LOCAL_CRATE);
all_mono_items.contains(&def_id)
};
providers.codegen_unit = |tcx, name| {
let (_, all) = tcx.collect_and_partition_mono_items(LOCAL_CRATE);
all.iter()
.find(|cgu| *cgu.name() == name)
.cloned()
.unwrap_or_else(|| panic!("failed to find cgu with name {:?}", name))
};
}
| 40.487427 | 95 | 0.587426 |
4bad8e71481e83c38f17cef726c50c3f1c4ff69d | 33,349 | use std::collections::HashMap;
use std::env;
use std::fs;
use std::io::{self, BufRead, BufReader, Read, Write};
use std::iter::Peekable;
use std::mem;
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
use std::str::Chars;
use std::thread;
use crate::config::{Color, Config, EmitMode, FileName, NewlineStyle, ReportTactic};
use crate::formatting::{ReportedErrors, SourceFile};
use crate::rustfmt_diff::{make_diff, print_diff, DiffLine, Mismatch, ModifiedChunk, OutputWriter};
use crate::source_file;
use crate::{is_nightly_channel, FormatReport, FormatReportFormatterBuilder, Input, Session};
use rustfmt_config_proc_macro::nightly_only_test;
mod configuration_snippet;
mod mod_resolver;
mod parser;
const DIFF_CONTEXT_SIZE: usize = 3;
// A list of files on which we want to skip testing.
const FILE_SKIP_LIST: &[&str] = &[
// We want to make sure that the `skip_children` is correctly working,
// so we do not want to test this file directly.
"configs/skip_children/foo/mod.rs",
"issue-3434/no_entry.rs",
"issue-3665/sub_mod.rs",
// Testing for issue-3779
"issue-3779/ice.rs",
// These files and directory are a part of modules defined inside `cfg_if!`.
"cfg_if/mod.rs",
"cfg_if/detect",
"issue-3253/foo.rs",
"issue-3253/bar.rs",
"issue-3253/paths",
// These files and directory are a part of modules defined inside `cfg_attr(..)`.
"cfg_mod/dir",
"cfg_mod/bar.rs",
"cfg_mod/foo.rs",
"cfg_mod/wasm32.rs",
"skip/foo.rs",
];
fn init_log() {
let _ = env_logger::builder().is_test(true).try_init();
}
struct TestSetting {
/// The size of the stack of the thread that run tests.
stack_size: usize,
}
impl Default for TestSetting {
fn default() -> Self {
TestSetting {
stack_size: 8_388_608, // 8MB
}
}
}
fn run_test_with<F>(test_setting: &TestSetting, f: F)
where
F: FnOnce(),
F: Send + 'static,
{
thread::Builder::new()
.stack_size(test_setting.stack_size)
.spawn(f)
.expect("Failed to create a test thread")
.join()
.expect("Failed to join a test thread")
}
fn is_subpath<P>(path: &Path, subpath: &P) -> bool
where
P: AsRef<Path>,
{
(0..path.components().count())
.map(|i| {
path.components()
.skip(i)
.take(subpath.as_ref().components().count())
})
.any(|c| c.zip(subpath.as_ref().components()).all(|(a, b)| a == b))
}
fn is_file_skip(path: &Path) -> bool {
FILE_SKIP_LIST
.iter()
.any(|file_path| is_subpath(path, file_path))
}
// Returns a `Vec` containing `PathBuf`s of files with an `rs` extension in the
// given path. The `recursive` argument controls if files from subdirectories
// are also returned.
fn get_test_files(path: &Path, recursive: bool) -> Vec<PathBuf> {
let mut files = vec![];
if path.is_dir() {
for entry in fs::read_dir(path).expect(&format!(
"couldn't read directory {}",
path.to_str().unwrap()
)) {
let entry = entry.expect("couldn't get `DirEntry`");
let path = entry.path();
if path.is_dir() && recursive {
files.append(&mut get_test_files(&path, recursive));
} else if path.extension().map_or(false, |f| f == "rs") && !is_file_skip(&path) {
files.push(path);
}
}
}
files
}
fn verify_config_used(path: &Path, config_name: &str) {
for entry in fs::read_dir(path).expect(&format!(
"couldn't read {} directory",
path.to_str().unwrap()
)) {
let entry = entry.expect("couldn't get directory entry");
let path = entry.path();
if path.extension().map_or(false, |f| f == "rs") {
// check if "// rustfmt-<config_name>:" appears in the file.
let filebuf = BufReader::new(
fs::File::open(&path)
.unwrap_or_else(|_| panic!("couldn't read file {}", path.display())),
);
assert!(
filebuf
.lines()
.map(Result::unwrap)
.take_while(|l| l.starts_with("//"))
.any(|l| l.starts_with(&format!("// rustfmt-{}", config_name))),
"config option file {} does not contain expected config name",
path.display()
);
}
}
}
#[test]
fn verify_config_test_names() {
init_log();
for path in &[
Path::new("tests/source/configs"),
Path::new("tests/target/configs"),
] {
for entry in fs::read_dir(path).expect("couldn't read configs directory") {
let entry = entry.expect("couldn't get directory entry");
let path = entry.path();
if path.is_dir() {
let config_name = path.file_name().unwrap().to_str().unwrap();
// Make sure that config name is used in the files in the directory.
verify_config_used(&path, config_name);
}
}
}
}
// This writes to the terminal using the same approach (via `term::stdout` or
// `println!`) that is used by `rustfmt::rustfmt_diff::print_diff`. Writing
// using only one or the other will cause the output order to differ when
// `print_diff` selects the approach not used.
fn write_message(msg: &str) {
let mut writer = OutputWriter::new(Color::Auto);
writer.writeln(msg, None);
}
// Integration tests. The files in `tests/source` are formatted and compared
// to their equivalent in `tests/target`. The target file and config can be
// overridden by annotations in the source file. The input and output must match
// exactly.
#[test]
fn system_tests() {
init_log();
run_test_with(&TestSetting::default(), || {
// Get all files in the tests/source directory.
let files = get_test_files(Path::new("tests/source"), true);
let (_reports, count, fails) = check_files(files, &None);
// Display results.
println!("Ran {} system tests.", count);
assert_eq!(fails, 0, "{} system tests failed", fails);
assert!(
count >= 300,
"Expected a minimum of {} system tests to be executed",
300
)
});
}
// Do the same for tests/coverage-source directory.
// The only difference is the coverage mode.
#[test]
fn coverage_tests() {
init_log();
let files = get_test_files(Path::new("tests/coverage/source"), true);
let (_reports, count, fails) = check_files(files, &None);
println!("Ran {} tests in coverage mode.", count);
assert_eq!(fails, 0, "{} tests failed", fails);
}
#[test]
fn checkstyle_test() {
init_log();
let filename = "tests/writemode/source/fn-single-line.rs";
let expected_filename = "tests/writemode/target/checkstyle.xml";
assert_output(Path::new(filename), Path::new(expected_filename));
}
#[test]
fn json_test() {
init_log();
let filename = "tests/writemode/source/json.rs";
let expected_filename = "tests/writemode/target/output.json";
assert_output(Path::new(filename), Path::new(expected_filename));
}
#[test]
fn modified_test() {
init_log();
use std::io::BufRead;
// Test "modified" output
let filename = "tests/writemode/source/modified.rs";
let mut data = Vec::new();
let mut config = Config::default();
config
.set()
.emit_mode(crate::config::EmitMode::ModifiedLines);
{
let mut session = Session::new(config, Some(&mut data));
session.format(Input::File(filename.into())).unwrap();
}
let mut lines = data.lines();
let mut chunks = Vec::new();
while let Some(Ok(header)) = lines.next() {
// Parse the header line
let values: Vec<_> = header
.split(' ')
.map(|s| s.parse::<u32>().unwrap())
.collect();
assert_eq!(values.len(), 3);
let line_number_orig = values[0];
let lines_removed = values[1];
let num_added = values[2];
let mut added_lines = Vec::new();
for _ in 0..num_added {
added_lines.push(lines.next().unwrap().unwrap());
}
chunks.push(ModifiedChunk {
line_number_orig,
lines_removed,
lines: added_lines,
});
}
assert_eq!(
chunks,
vec![
ModifiedChunk {
line_number_orig: 4,
lines_removed: 4,
lines: vec!["fn blah() {}".into()],
},
ModifiedChunk {
line_number_orig: 9,
lines_removed: 6,
lines: vec!["#[cfg(a, b)]".into(), "fn main() {}".into()],
},
],
);
}
// Helper function for comparing the results of rustfmt
// to a known output file generated by one of the write modes.
fn assert_output(source: &Path, expected_filename: &Path) {
let config = read_config(source);
let (_, source_file, _) = format_file(source, config.clone());
// Populate output by writing to a vec.
let mut out = vec![];
let _ = source_file::write_all_files(&source_file, &mut out, &config);
let output = String::from_utf8(out).unwrap();
let mut expected_file = fs::File::open(&expected_filename).expect("couldn't open target");
let mut expected_text = String::new();
expected_file
.read_to_string(&mut expected_text)
.expect("Failed reading target");
let compare = make_diff(&expected_text, &output, DIFF_CONTEXT_SIZE);
if !compare.is_empty() {
let mut failures = HashMap::new();
failures.insert(source.to_owned(), compare);
print_mismatches_default_message(failures);
panic!("Text does not match expected output");
}
}
// Helper function for comparing the results of rustfmt
// to a known output generated by one of the write modes.
fn assert_stdin_output(
source: &Path,
expected_filename: &Path,
emit_mode: EmitMode,
has_diff: bool,
) {
let mut config = Config::default();
config.set().newline_style(NewlineStyle::Unix);
config.set().emit_mode(emit_mode);
let mut source_file = fs::File::open(&source).expect("couldn't open source");
let mut source_text = String::new();
source_file
.read_to_string(&mut source_text)
.expect("Failed reading target");
let input = Input::Text(source_text);
// Populate output by writing to a vec.
let mut buf: Vec<u8> = vec![];
{
let mut session = Session::new(config, Some(&mut buf));
session.format(input).unwrap();
let errors = ReportedErrors {
has_diff: has_diff,
..Default::default()
};
assert_eq!(session.errors, errors);
}
let mut expected_file = fs::File::open(&expected_filename).expect("couldn't open target");
let mut expected_text = String::new();
expected_file
.read_to_string(&mut expected_text)
.expect("Failed reading target");
let output = String::from_utf8(buf).unwrap();
let compare = make_diff(&expected_text, &output, DIFF_CONTEXT_SIZE);
if !compare.is_empty() {
let mut failures = HashMap::new();
failures.insert(source.to_owned(), compare);
print_mismatches_default_message(failures);
panic!("Text does not match expected output");
}
}
// Idempotence tests. Files in tests/target are checked to be unaltered by
// rustfmt.
#[nightly_only_test]
#[test]
fn idempotence_tests() {
init_log();
run_test_with(&TestSetting::default(), || {
// Get all files in the tests/target directory.
let files = get_test_files(Path::new("tests/target"), true);
let (_reports, count, fails) = check_files(files, &None);
// Display results.
println!("Ran {} idempotent tests.", count);
assert_eq!(fails, 0, "{} idempotent tests failed", fails);
assert!(
count >= 400,
"Expected a minimum of {} idempotent tests to be executed",
400
)
});
}
// Run rustfmt on itself. This operation must be idempotent. We also check that
// no warnings are emitted.
// Issue-3443: these tests require nightly
#[nightly_only_test]
#[test]
fn self_tests() {
init_log();
let mut files = get_test_files(Path::new("tests"), false);
let bin_directories = vec!["cargo-fmt", "git-rustfmt", "bin", "format-diff"];
for dir in bin_directories {
let mut path = PathBuf::from("src");
path.push(dir);
path.push("main.rs");
files.push(path);
}
files.push(PathBuf::from("src/lib.rs"));
let (reports, count, fails) = check_files(files, &Some(PathBuf::from("rustfmt.toml")));
let mut warnings = 0;
// Display results.
println!("Ran {} self tests.", count);
assert_eq!(fails, 0, "{} self tests failed", fails);
for format_report in reports {
println!(
"{}",
FormatReportFormatterBuilder::new(&format_report).build()
);
warnings += format_report.warning_count();
}
assert_eq!(
warnings, 0,
"Rustfmt's code generated {} warnings",
warnings
);
}
#[test]
fn format_files_find_new_files_via_cfg_if() {
init_log();
run_test_with(&TestSetting::default(), || {
// To repro issue-4656, it is necessary that these files are parsed
// as a part of the same session (hence this separate test runner).
let files = vec![
Path::new("tests/source/issue-4656/lib2.rs"),
Path::new("tests/source/issue-4656/lib.rs"),
];
let config = Config::default();
let mut session = Session::<io::Stdout>::new(config, None);
let mut write_result = HashMap::new();
for file in files {
assert!(file.exists());
let result = session.format(Input::File(file.into())).unwrap();
assert!(!session.has_formatting_errors());
assert!(!result.has_warnings());
let mut source_file = SourceFile::new();
mem::swap(&mut session.source_file, &mut source_file);
for (filename, text) in source_file {
if let FileName::Real(ref filename) = filename {
write_result.insert(filename.to_owned(), text);
}
}
}
assert_eq!(
3,
write_result.len(),
"Should have uncovered an extra file (format_me_please.rs) via lib.rs"
);
assert!(handle_result(write_result, None).is_ok());
});
}
#[test]
fn stdin_formatting_smoke_test() {
init_log();
let input = Input::Text("fn main () {}".to_owned());
let mut config = Config::default();
config.set().emit_mode(EmitMode::Stdout);
let mut buf: Vec<u8> = vec![];
{
let mut session = Session::new(config, Some(&mut buf));
session.format(input).unwrap();
assert!(session.has_no_errors());
}
#[cfg(not(windows))]
assert_eq!(buf, "<stdin>:\n\nfn main() {}\n".as_bytes());
#[cfg(windows)]
assert_eq!(buf, "<stdin>:\n\nfn main() {}\r\n".as_bytes());
}
#[test]
fn stdin_parser_panic_caught() {
init_log();
// See issue #3239.
for text in ["{", "}"].iter().cloned().map(String::from) {
let mut buf = vec![];
let mut session = Session::new(Default::default(), Some(&mut buf));
let _ = session.format(Input::Text(text));
assert!(session.has_parsing_errors());
}
}
/// Ensures that `EmitMode::ModifiedLines` works with input from `stdin`. Useful
/// when embedding Rustfmt (e.g. inside RLS).
#[test]
fn stdin_works_with_modified_lines() {
init_log();
let input = "\nfn\n some( )\n{\n}\nfn main () {}\n";
let output = "1 6 2\nfn some() {}\nfn main() {}\n";
let input = Input::Text(input.to_owned());
let mut config = Config::default();
config.set().newline_style(NewlineStyle::Unix);
config.set().emit_mode(EmitMode::ModifiedLines);
let mut buf: Vec<u8> = vec![];
{
let mut session = Session::new(config, Some(&mut buf));
session.format(input).unwrap();
let errors = ReportedErrors {
has_diff: true,
..Default::default()
};
assert_eq!(session.errors, errors);
}
assert_eq!(buf, output.as_bytes());
}
/// Ensures that `EmitMode::Json` works with input from `stdin`.
#[test]
fn stdin_works_with_json() {
init_log();
assert_stdin_output(
Path::new("tests/writemode/source/stdin.rs"),
Path::new("tests/writemode/target/stdin.json"),
EmitMode::Json,
true,
);
}
/// Ensures that `EmitMode::Checkstyle` works with input from `stdin`.
#[test]
fn stdin_works_with_checkstyle() {
init_log();
assert_stdin_output(
Path::new("tests/writemode/source/stdin.rs"),
Path::new("tests/writemode/target/stdin.xml"),
EmitMode::Checkstyle,
false,
);
}
#[test]
fn stdin_disable_all_formatting_test() {
init_log();
let input = String::from("fn main() { println!(\"This should not be formatted.\"); }");
let mut child = Command::new(rustfmt().to_str().unwrap())
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.arg("--config-path=./tests/config/disable_all_formatting.toml")
.spawn()
.expect("failed to execute child");
{
let stdin = child.stdin.as_mut().expect("failed to get stdin");
stdin
.write_all(input.as_bytes())
.expect("failed to write stdin");
}
let output = child.wait_with_output().expect("failed to wait on child");
assert!(output.status.success());
assert!(output.stderr.is_empty());
assert_eq!(input, String::from_utf8(output.stdout).unwrap());
}
#[test]
fn stdin_generated_files_issue_5172() {
init_log();
let input = Input::Text("//@generated\nfn main() {}".to_owned());
let mut config = Config::default();
config.set().emit_mode(EmitMode::Stdout);
config.set().format_generated_files(false);
config.set().newline_style(NewlineStyle::Unix);
let mut buf: Vec<u8> = vec![];
{
let mut session = Session::new(config, Some(&mut buf));
session.format(input).unwrap();
assert!(session.has_no_errors());
}
// N.B. this should be changed once `format_generated_files` is supported with stdin
assert_eq!(
String::from_utf8(buf).unwrap(),
"<stdin>:\n\n//@generated\nfn main() {}\n",
);
}
#[test]
fn format_lines_errors_are_reported() {
init_log();
let long_identifier = String::from_utf8(vec![b'a'; 239]).unwrap();
let input = Input::Text(format!("fn {}() {{}}", long_identifier));
let mut config = Config::default();
config.set().error_on_line_overflow(true);
let mut session = Session::<io::Stdout>::new(config, None);
session.format(input).unwrap();
assert!(session.has_formatting_errors());
}
#[test]
fn format_lines_errors_are_reported_with_tabs() {
init_log();
let long_identifier = String::from_utf8(vec![b'a'; 97]).unwrap();
let input = Input::Text(format!("fn a() {{\n\t{}\n}}", long_identifier));
let mut config = Config::default();
config.set().error_on_line_overflow(true);
config.set().hard_tabs(true);
let mut session = Session::<io::Stdout>::new(config, None);
session.format(input).unwrap();
assert!(session.has_formatting_errors());
}
// For each file, run rustfmt and collect the output.
// Returns the number of files checked and the number of failures.
fn check_files(files: Vec<PathBuf>, opt_config: &Option<PathBuf>) -> (Vec<FormatReport>, u32, u32) {
let mut count = 0;
let mut fails = 0;
let mut reports = vec![];
for file_name in files {
let sig_comments = read_significant_comments(&file_name);
if sig_comments.contains_key("unstable") && !is_nightly_channel!() {
debug!(
"Skipping '{}' because it requires unstable \
features which are only available on nightly...",
file_name.display()
);
continue;
}
debug!("Testing '{}'...", file_name.display());
match idempotent_check(&file_name, opt_config) {
Ok(ref report) if report.has_warnings() => {
print!("{}", FormatReportFormatterBuilder::new(report).build());
fails += 1;
}
Ok(report) => reports.push(report),
Err(err) => {
if let IdempotentCheckError::Mismatch(msg) = err {
print_mismatches_default_message(msg);
}
fails += 1;
}
}
count += 1;
}
(reports, count, fails)
}
fn print_mismatches_default_message(result: HashMap<PathBuf, Vec<Mismatch>>) {
for (file_name, diff) in result {
let mismatch_msg_formatter =
|line_num| format!("\nMismatch at {}:{}:", file_name.display(), line_num);
print_diff(diff, &mismatch_msg_formatter, &Default::default());
}
if let Some(mut t) = term::stdout() {
t.reset().unwrap_or(());
}
}
fn print_mismatches<T: Fn(u32) -> String>(
result: HashMap<PathBuf, Vec<Mismatch>>,
mismatch_msg_formatter: T,
) {
for (_file_name, diff) in result {
print_diff(diff, &mismatch_msg_formatter, &Default::default());
}
if let Some(mut t) = term::stdout() {
t.reset().unwrap_or(());
}
}
fn read_config(filename: &Path) -> Config {
let sig_comments = read_significant_comments(filename);
// Look for a config file. If there is a 'config' property in the significant comments, use
// that. Otherwise, if there are no significant comments at all, look for a config file with
// the same name as the test file.
let mut config = if !sig_comments.is_empty() {
get_config(sig_comments.get("config").map(Path::new))
} else {
get_config(filename.with_extension("toml").file_name().map(Path::new))
};
for (key, val) in &sig_comments {
if key != "target" && key != "config" && key != "unstable" {
config.override_value(key, val);
if config.is_default(key) {
warn!("Default value {} used explicitly for {}", val, key);
}
}
}
// Don't generate warnings for to-do items.
config.set().report_todo(ReportTactic::Never);
config
}
fn format_file<P: Into<PathBuf>>(filepath: P, config: Config) -> (bool, SourceFile, FormatReport) {
let filepath = filepath.into();
let input = Input::File(filepath);
let mut session = Session::<io::Stdout>::new(config, None);
let result = session.format(input).unwrap();
let parsing_errors = session.has_parsing_errors();
let mut source_file = SourceFile::new();
mem::swap(&mut session.source_file, &mut source_file);
(parsing_errors, source_file, result)
}
enum IdempotentCheckError {
Mismatch(HashMap<PathBuf, Vec<Mismatch>>),
Parse,
}
fn idempotent_check(
filename: &PathBuf,
opt_config: &Option<PathBuf>,
) -> Result<FormatReport, IdempotentCheckError> {
let sig_comments = read_significant_comments(filename);
let config = if let Some(ref config_file_path) = opt_config {
Config::from_toml_path(config_file_path).expect("`rustfmt.toml` not found")
} else {
read_config(filename)
};
let (parsing_errors, source_file, format_report) = format_file(filename, config);
if parsing_errors {
return Err(IdempotentCheckError::Parse);
}
let mut write_result = HashMap::new();
for (filename, text) in source_file {
if let FileName::Real(ref filename) = filename {
write_result.insert(filename.to_owned(), text);
}
}
let target = sig_comments.get("target").map(|x| &(*x)[..]);
handle_result(write_result, target).map(|_| format_report)
}
// Reads test config file using the supplied (optional) file name. If there's no file name or the
// file doesn't exist, just return the default config. Otherwise, the file must be read
// successfully.
fn get_config(config_file: Option<&Path>) -> Config {
let config_file_name = match config_file {
None => return Default::default(),
Some(file_name) => {
let mut full_path = PathBuf::from("tests/config/");
full_path.push(file_name);
if !full_path.exists() {
return Default::default();
};
full_path
}
};
let mut def_config_file = fs::File::open(config_file_name).expect("couldn't open config");
let mut def_config = String::new();
def_config_file
.read_to_string(&mut def_config)
.expect("Couldn't read config");
Config::from_toml(&def_config, Path::new("tests/config/")).expect("invalid TOML")
}
// Reads significant comments of the form: `// rustfmt-key: value` into a hash map.
fn read_significant_comments(file_name: &Path) -> HashMap<String, String> {
let file = fs::File::open(file_name)
.unwrap_or_else(|_| panic!("couldn't read file {}", file_name.display()));
let reader = BufReader::new(file);
let pattern = r"^\s*//\s*rustfmt-([^:]+):\s*(\S+)";
let regex = regex::Regex::new(pattern).expect("failed creating pattern 1");
// Matches lines containing significant comments or whitespace.
let line_regex = regex::Regex::new(r"(^\s*$)|(^\s*//\s*rustfmt-[^:]+:\s*\S+)")
.expect("failed creating pattern 2");
reader
.lines()
.map(|line| line.expect("failed getting line"))
.filter(|line| line_regex.is_match(line))
.filter_map(|line| {
regex.captures_iter(&line).next().map(|capture| {
(
capture
.get(1)
.expect("couldn't unwrap capture")
.as_str()
.to_owned(),
capture
.get(2)
.expect("couldn't unwrap capture")
.as_str()
.to_owned(),
)
})
})
.collect()
}
// Compares output to input.
// TODO: needs a better name, more explanation.
fn handle_result(
result: HashMap<PathBuf, String>,
target: Option<&str>,
) -> Result<(), IdempotentCheckError> {
let mut failures = HashMap::new();
for (file_name, fmt_text) in result {
// If file is in tests/source, compare to file with same name in tests/target.
let target = get_target(&file_name, target);
let open_error = format!("couldn't open target {:?}", target);
let mut f = fs::File::open(&target).expect(&open_error);
let mut text = String::new();
let read_error = format!("failed reading target {:?}", target);
f.read_to_string(&mut text).expect(&read_error);
// Ignore LF and CRLF difference for Windows.
if !string_eq_ignore_newline_repr(&fmt_text, &text) {
let diff = make_diff(&text, &fmt_text, DIFF_CONTEXT_SIZE);
assert!(
!diff.is_empty(),
"Empty diff? Maybe due to a missing a newline at the end of a file?"
);
failures.insert(file_name, diff);
}
}
if failures.is_empty() {
Ok(())
} else {
Err(IdempotentCheckError::Mismatch(failures))
}
}
// Maps source file paths to their target paths.
fn get_target(file_name: &Path, target: Option<&str>) -> PathBuf {
if let Some(n) = file_name
.components()
.position(|c| c.as_os_str() == "source")
{
let mut target_file_name = PathBuf::new();
for (i, c) in file_name.components().enumerate() {
if i == n {
target_file_name.push("target");
} else {
target_file_name.push(c.as_os_str());
}
}
if let Some(replace_name) = target {
target_file_name.with_file_name(replace_name)
} else {
target_file_name
}
} else {
// This is either and idempotence check or a self check.
file_name.to_owned()
}
}
#[test]
fn rustfmt_diff_make_diff_tests() {
init_log();
let diff = make_diff("a\nb\nc\nd", "a\ne\nc\nd", 3);
assert_eq!(
diff,
vec![Mismatch {
line_number: 1,
line_number_orig: 1,
lines: vec![
DiffLine::Context("a".into()),
DiffLine::Resulting("b".into()),
DiffLine::Expected("e".into()),
DiffLine::Context("c".into()),
DiffLine::Context("d".into()),
],
}]
);
}
#[test]
fn rustfmt_diff_no_diff_test() {
init_log();
let diff = make_diff("a\nb\nc\nd", "a\nb\nc\nd", 3);
assert_eq!(diff, vec![]);
}
// Compare strings without distinguishing between CRLF and LF
fn string_eq_ignore_newline_repr(left: &str, right: &str) -> bool {
let left = CharsIgnoreNewlineRepr(left.chars().peekable());
let right = CharsIgnoreNewlineRepr(right.chars().peekable());
left.eq(right)
}
struct CharsIgnoreNewlineRepr<'a>(Peekable<Chars<'a>>);
impl<'a> Iterator for CharsIgnoreNewlineRepr<'a> {
type Item = char;
fn next(&mut self) -> Option<char> {
self.0.next().map(|c| {
if c == '\r' {
if *self.0.peek().unwrap_or(&'\0') == '\n' {
self.0.next();
'\n'
} else {
'\r'
}
} else {
c
}
})
}
}
#[test]
fn string_eq_ignore_newline_repr_test() {
init_log();
assert!(string_eq_ignore_newline_repr("", ""));
assert!(!string_eq_ignore_newline_repr("", "abc"));
assert!(!string_eq_ignore_newline_repr("abc", ""));
assert!(string_eq_ignore_newline_repr("a\nb\nc\rd", "a\nb\r\nc\rd"));
assert!(string_eq_ignore_newline_repr("a\r\n\r\n\r\nb", "a\n\n\nb"));
assert!(!string_eq_ignore_newline_repr("a\r\nbcd", "a\nbcdefghijk"));
}
struct TempFile {
path: PathBuf,
}
fn make_temp_file(file_name: &'static str) -> TempFile {
use std::env::var;
use std::fs::File;
// Used in the Rust build system.
let target_dir = var("RUSTFMT_TEST_DIR").unwrap_or_else(|_| ".".to_owned());
let path = Path::new(&target_dir).join(file_name);
let mut file = File::create(&path).expect("couldn't create temp file");
let content = "fn main() {}\n";
file.write_all(content.as_bytes())
.expect("couldn't write temp file");
TempFile { path }
}
impl Drop for TempFile {
fn drop(&mut self) {
use std::fs::remove_file;
remove_file(&self.path).expect("couldn't delete temp file");
}
}
fn rustfmt() -> PathBuf {
let mut me = env::current_exe().expect("failed to get current executable");
// Chop of the test name.
me.pop();
// Chop off `deps`.
me.pop();
// If we run `cargo test --release`, we might only have a release build.
if cfg!(release) {
// `../release/`
me.pop();
me.push("release");
}
me.push("rustfmt");
assert!(
me.is_file() || me.with_extension("exe").is_file(),
"{}",
if cfg!(release) {
"no rustfmt bin, try running `cargo build --release` before testing"
} else {
"no rustfmt bin, try running `cargo build` before testing"
}
);
me
}
#[test]
fn verify_check_works() {
init_log();
let temp_file = make_temp_file("temp_check.rs");
Command::new(rustfmt().to_str().unwrap())
.arg("--check")
.arg(temp_file.path.to_str().unwrap())
.status()
.expect("run with check option failed");
}
#[test]
fn verify_check_works_with_stdin() {
init_log();
let mut child = Command::new(rustfmt().to_str().unwrap())
.arg("--check")
.stdin(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.expect("run with check option failed");
{
let stdin = child.stdin.as_mut().expect("Failed to open stdin");
stdin
.write_all("fn main() {}\n".as_bytes())
.expect("Failed to write to rustfmt --check");
}
let output = child
.wait_with_output()
.expect("Failed to wait on rustfmt child");
assert!(output.status.success());
}
#[test]
fn verify_check_l_works_with_stdin() {
init_log();
let mut child = Command::new(rustfmt().to_str().unwrap())
.arg("--check")
.arg("-l")
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.expect("run with check option failed");
{
let stdin = child.stdin.as_mut().expect("Failed to open stdin");
stdin
.write_all("fn main()\n{}\n".as_bytes())
.expect("Failed to write to rustfmt --check");
}
let output = child
.wait_with_output()
.expect("Failed to wait on rustfmt child");
assert!(output.status.success());
assert_eq!(std::str::from_utf8(&output.stdout).unwrap(), "<stdin>\n");
}
| 32.097209 | 100 | 0.590542 |
cc15e5358e3cad488ca834c95a74ca46b72fdbbf | 378 | mod with_bitstring;
use std::convert::TryInto;
use std::sync::Arc;
use proptest::strategy::Just;
use proptest::test_runner::TestCaseResult;
use proptest::{prop_assert, prop_assert_eq};
use liblumen_alloc::erts::process::Process;
use liblumen_alloc::erts::term::prelude::*;
use crate::erlang::binary_part_3::result;
// `without_bitstring_errors_badarg` in integration tests
| 23.625 | 57 | 0.777778 |
e8c74c97d84377ac3653341cbc4fb0172178571c | 6,514 | use kurbo::Affine;
use std::ops::{
Add, AddAssign, Div, DivAssign, Mul, MulAssign, Sub, SubAssign,
};
/// Your typical 2D vector.
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct Vector {
pub x: f64,
pub y: f64,
}
impl Vector {
/// Create a new [`Vector`].
///
/// # Panics
///
/// This will panic if `x` or `y` aren't finite.
pub fn new(x: f64, y: f64) -> Self {
assert!(x.is_finite(), "Can't create a vector with {}", x);
assert!(y.is_finite(), "Can't create a vector with {}", y);
Vector::new_unchecked(x, y)
}
pub const fn new_unchecked(x: f64, y: f64) -> Self { Vector { x, y } }
pub fn from_r_theta(radius: f64, angle: f64) -> Self {
Vector::new(radius * angle.cos(), radius * angle.sin())
}
pub const fn zero() -> Vector { Vector::new_unchecked(0.0, 0.0) }
pub const fn x_axis() -> Vector { Vector::new_unchecked(1.0, 0.0) }
pub const fn y_axis() -> Vector { Vector::new_unchecked(0.0, 1.0) }
pub fn length(self) -> f64 { self.x.hypot(self.y) }
pub fn angle(self) -> f64 { f64::atan2(self.y, self.x) }
pub fn unit_vector(self) -> Vector {
let magnitude = self.length();
if magnitude == 0.0 {
Vector::zero()
} else {
self / magnitude
}
}
pub fn orientation(
first: Vector,
second: Vector,
third: Vector,
) -> Orientation {
let value = (second.y - first.y) * (third.x - second.x)
- (second.x - first.x) * (third.y - second.y);
if value > 0.0 {
Orientation::Clockwise
} else if value < 0.0 {
Orientation::Anticlockwise
} else {
Orientation::Collinear
}
}
pub fn dot(left: Vector, right: Vector) -> f64 {
left.x * right.x + left.y * right.y
}
pub fn cross(left: Vector, right: Vector) -> f64 {
left.x * right.y - right.x * left.y
}
pub fn lerp(start: Vector, end: Vector, progress: f64) -> Vector {
start + (end - start) * progress
}
pub fn centre_of_three_points(
first: Vector,
second: Vector,
third: Vector,
) -> Option<Vector> {
let temp = Vector::dot(second, second);
let bc = (Vector::dot(first, first) - temp) / 2.0;
let cd = (temp - third.x * third.x - third.y * third.y) / 2.0;
let determinant = (first.x - second.x) * (second.y - third.y)
- (second.x - third.x) * (first.y - second.y);
if determinant == 0.0 {
// the points are collinear
return None;
}
let x = (bc * (second.y - third.y) - cd * (first.y - second.y))
/ determinant;
let y = ((first.x - second.x) * cd - (second.x - third.x) * bc)
/ determinant;
Some(Vector::new(x, y))
}
pub fn rotated(self, angle: f64) -> Vector {
Vector::from_r_theta(self.length(), self.angle() + angle)
}
}
impl Add for Vector {
type Output = Vector;
fn add(self, other: Vector) -> Vector {
Vector::new(self.x + other.x, self.y + other.y)
}
}
impl AddAssign for Vector {
fn add_assign(&mut self, other: Vector) { *self = *self + other; }
}
impl Sub for Vector {
type Output = Vector;
fn sub(self, other: Vector) -> Vector {
Vector::new(self.x - other.x, self.y - other.y)
}
}
impl SubAssign for Vector {
fn sub_assign(&mut self, other: Vector) { *self = *self - other; }
}
impl Mul<f64> for Vector {
type Output = Vector;
fn mul(self, other: f64) -> Vector {
Vector::new(self.x * other, self.y * other)
}
}
impl Mul<Vector> for f64 {
type Output = Vector;
fn mul(self, other: Vector) -> Vector { other * self }
}
impl MulAssign<f64> for Vector {
fn mul_assign(&mut self, other: f64) { *self = *self * other; }
}
impl Mul<Vector> for Affine {
type Output = Vector;
fn mul(self, other: Vector) -> Vector {
use kurbo::Point;
let temporary = Point::new(other.x, other.y);
let Point { x, y } = self * temporary;
Vector::new(x, y)
}
}
impl MulAssign<Affine> for Vector {
fn mul_assign(&mut self, other: Affine) { *self = other.mul(*self); }
}
impl Div<f64> for Vector {
type Output = Vector;
fn div(self, other: f64) -> Vector {
assert!(other.is_normal(), "Unable to divide by {}", other);
Vector::new_unchecked(self.x / other, self.y / other)
}
}
impl DivAssign<f64> for Vector {
fn div_assign(&mut self, other: f64) { *self = *self / other; }
}
/// How something may be oriented.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum Orientation {
Clockwise,
Anticlockwise,
Collinear,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn add_two_vectors() {
let left = Vector::new(1.0, 2.0);
let right = Vector::new(-20.0, 2.5);
let expected = Vector::new(-19.0, 4.5);
let got = left + right;
assert_eq!(got, expected);
}
#[test]
fn subtract_two_vectors() {
let left = Vector::new(1.0, 2.0);
let right = Vector::new(-20.0, 2.5);
let expected = Vector::new(1.0 - -20.0, 2.0 - 2.5);
let got = left - right;
assert_eq!(got, expected);
}
#[test]
fn multiply_by_two() {
let left = Vector::new(-20.0, 2.5);
let expected = Vector::new(-20.0 * 2.0, 2.5 * 2.0);
let got = left * 2.0;
assert_eq!(got, expected);
}
#[test]
fn divide_by_two() {
let left = Vector::new(-20.0, 2.5);
let expected = Vector::new(-20.0 / 2.0, 2.5 / 2.0);
let got = left / 2.0;
assert_eq!(got, expected);
}
#[test]
#[should_panic(expected = "divide by 0")]
fn divide_by_zero() {
let left = Vector::new(-20.0, 2.5);
let _ = left / 0.0;
}
#[test]
fn find_centre_of_three_points() {
let a = Vector::new(1.0, 0.0);
let b = Vector::new(-1.0, 0.0);
let c = Vector::new(0.0, 1.0);
let centre = Vector::centre_of_three_points(a, b, c).unwrap();
assert_eq!(centre, Vector::zero());
}
#[test]
fn find_a_quarter_of_the_way_between_points() {
let start = Vector::new(0.0, 0.0);
let end = Vector::new(40.0, 8.0);
let expected = Vector::new(10.0, 2.0);
let got = Vector::lerp(start, end, 0.25);
assert_eq!(got, expected);
}
}
| 24.674242 | 74 | 0.538532 |
d6dc33c640f0c45e327509334071634c655804b2 | 2,237 | use seed::{prelude::*, *};
use web_sys;
// ------ ------
// Before Mount
// ------ ------
fn before_mount(_: Url) -> BeforeMount {
BeforeMount::new()
.mount_point("main")
.mount_type(MountType::Takeover)
}
// ------ ------
// Model
// ------ ------
struct Model {
clicks: u32,
}
// ------ ------
// After Mount
// ------ ------
fn after_mount(_: Url, _: &mut impl Orders<Msg, GMsg>) -> AfterMount<Model> {
let model = Model { clicks: 0 };
AfterMount::new(model).url_handling(UrlHandling::None)
}
// ------ ------
// Routes
// ------ ------
fn routes(url: Url) -> Option<Msg> {
Some(Msg::UrlChanged(url))
}
// ------ ------
// Window Events
// ------ ------
fn window_events(_: &Model) -> Vec<EventHandler<Msg>> {
vec![keyboard_ev(Ev::KeyDown, Msg::KeyPressed)]
}
// ------ ------
// Sink
// ------ ------
#[derive(Clone, Copy)]
enum GMsg {
SayHello,
}
fn sink(g_msg: GMsg, _: &mut Model, _: &mut impl Orders<Msg, GMsg>) {
match g_msg {
GMsg::SayHello => log!("Hello!"),
}
}
// ------ ------
// Update
// ------ ------
enum Msg {
Clicked,
UrlChanged(Url),
KeyPressed(web_sys::KeyboardEvent),
SayHello,
}
fn update(msg: Msg, model: &mut Model, orders: &mut impl Orders<Msg, GMsg>) {
match msg {
Msg::Clicked => model.clicks += 1,
Msg::UrlChanged(url) => {
log!(url);
orders.skip();
}
Msg::KeyPressed(event) => {
log!(event.key());
orders.skip();
}
Msg::SayHello => {
orders.send_g_msg(GMsg::SayHello);
}
}
}
// ------ ------
// View
// ------ ------
fn view(model: &Model) -> impl View<Msg> {
vec![
button![
format!("Clicked: {}", model.clicks),
ev(Ev::Click, |_| Msg::Clicked),
],
button!["Say hello", ev(Ev::Click, |_| Msg::SayHello),],
]
}
// ------ ------
// Start
// ------ ------
#[wasm_bindgen(start)]
pub fn render() {
App::builder(update, view)
.before_mount(before_mount)
.after_mount(after_mount)
.routes(routes)
.window_events(window_events)
.sink(sink)
.build_and_start();
}
| 18.957627 | 77 | 0.474743 |
f5eaa9ce4cc1cbdb41641ee28a58a98accce5cc0 | 9,750 | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::prelude::v1::*;
use std::sync::{Arc, SgxMutex as Mutex};
use crate::task_file_manager::TaskFileManager;
use teaclave_proto::teaclave_scheduler_service::*;
use teaclave_rpc::endpoint::Endpoint;
use teaclave_types::*;
use teaclave_worker::Worker;
use anyhow::Result;
use uuid::Uuid;
static WORKER_BASE_DIR: &str = "/tmp/teaclave_agent/";
#[derive(Clone)]
pub(crate) struct TeaclaveExecutionService {
worker: Arc<Worker>,
scheduler_client: Arc<Mutex<TeaclaveSchedulerClient>>,
fusion_base: PathBuf,
}
impl TeaclaveExecutionService {
pub(crate) fn new(
scheduler_service_endpoint: Endpoint,
fusion_base: impl AsRef<Path>,
) -> Result<Self> {
let mut i = 0;
let channel = loop {
match scheduler_service_endpoint.connect() {
Ok(channel) => break channel,
Err(_) => {
anyhow::ensure!(i < 10, "failed to connect to scheduler service");
log::debug!("Failed to connect to scheduler service, retry {}", i);
i += 1;
}
}
std::thread::sleep(std::time::Duration::from_secs(3));
};
let scheduler_client = Arc::new(Mutex::new(TeaclaveSchedulerClient::new(channel)?));
Ok(TeaclaveExecutionService {
worker: Arc::new(Worker::default()),
scheduler_client,
fusion_base: fusion_base.as_ref().to_owned(),
})
}
pub(crate) fn start(&mut self) -> Result<()> {
loop {
std::thread::sleep(std::time::Duration::from_secs(3));
let staged_task = match self.pull_task() {
Ok(staged_task) => staged_task,
Err(e) => {
log::debug!("PullTask Error: {:?}", e);
continue;
}
};
let result = self.invoke_task(&staged_task);
match result {
Ok(_) => log::debug!(
"InvokeTask: {:?}, {:?}, success",
staged_task.user_id,
staged_task.function_id
),
Err(_) => log::debug!(
"InvokeTask: {:?}, {:?}, failure",
staged_task.user_id,
staged_task.function_id
),
}
log::debug!("InvokeTask result: {:?}", result);
match self.update_task_result(&staged_task.task_id, result) {
Ok(_) => (),
Err(e) => {
log::error!("UpdateResult Error: {:?}", e);
continue;
}
}
}
}
fn pull_task(&mut self) -> Result<StagedTask> {
let request = PullTaskRequest {};
let response = self
.scheduler_client
.clone()
.lock()
.map_err(|_| anyhow::anyhow!("Cannot lock scheduler client"))?
.pull_task(request)?;
log::debug!("pull_stask response: {:?}", response);
Ok(response.staged_task)
}
fn invoke_task(&mut self, task: &StagedTask) -> Result<TaskOutputs> {
self.update_task_status(&task.task_id, TaskStatus::Running)?;
let file_mgr = TaskFileManager::new(
WORKER_BASE_DIR,
&self.fusion_base,
&task.task_id,
&task.input_data,
&task.output_data,
)?;
let invocation = prepare_task(&task, &file_mgr)?;
log::debug!("Invoke function: {:?}", invocation);
let worker = Worker::default();
let summary = worker.invoke_function(invocation)?;
let outputs_tag = finalize_task(&file_mgr)?;
let task_outputs = TaskOutputs::new(summary.as_bytes(), outputs_tag);
Ok(task_outputs)
}
fn update_task_result(
&mut self,
task_id: &Uuid,
task_result: Result<TaskOutputs>,
) -> Result<()> {
let request = UpdateTaskResultRequest::new(*task_id, task_result);
let _response = self
.scheduler_client
.clone()
.lock()
.map_err(|_| anyhow::anyhow!("Cannot lock scheduler client"))?
.update_task_result(request)?;
Ok(())
}
fn update_task_status(&mut self, task_id: &Uuid, task_status: TaskStatus) -> Result<()> {
let request = UpdateTaskStatusRequest::new(task_id.to_owned(), task_status);
let _response = self
.scheduler_client
.clone()
.lock()
.map_err(|_| anyhow::anyhow!("Cannot lock scheduler client"))?
.update_task_status(request)?;
Ok(())
}
}
fn prepare_task(task: &StagedTask, file_mgr: &TaskFileManager) -> Result<StagedFunction> {
let input_files = file_mgr.prepare_staged_inputs()?;
let output_files = file_mgr.prepare_staged_outputs()?;
let staged_function = StagedFunctionBuilder::new()
.executor_type(task.executor_type)
.executor(task.executor)
.name(&task.function_name)
.arguments(task.function_arguments.clone())
.payload(task.function_payload.clone())
.input_files(input_files)
.output_files(output_files)
.runtime_name("default")
.build();
Ok(staged_function)
}
fn finalize_task(file_mgr: &TaskFileManager) -> Result<HashMap<String, FileAuthTag>> {
file_mgr.upload_outputs()
}
#[cfg(feature = "enclave_unit_test")]
pub mod tests {
use super::*;
use serde_json::json;
use std::format;
use teaclave_crypto::*;
use url::Url;
use uuid::Uuid;
pub fn test_invoke_echo() {
let task_id = Uuid::new_v4();
let function_arguments =
FunctionArguments::from_json(json!({"message": "Hello, Teaclave!"})).unwrap();
let staged_task = StagedTaskBuilder::new()
.task_id(task_id)
.executor(Executor::Builtin)
.function_name("builtin-echo")
.function_arguments(function_arguments)
.build();
let file_mgr = TaskFileManager::new(
WORKER_BASE_DIR,
"/tmp/fusion_base",
&staged_task.task_id,
&staged_task.input_data,
&staged_task.output_data,
)
.unwrap();
let invocation = prepare_task(&staged_task, &file_mgr).unwrap();
let worker = Worker::default();
let result = worker.invoke_function(invocation);
if result.is_ok() {
finalize_task(&file_mgr).unwrap();
}
assert_eq!(result.unwrap(), "Hello, Teaclave!");
}
pub fn test_invoke_gbdt_train() {
let task_id = Uuid::new_v4();
let function_arguments = FunctionArguments::from_json(json!({
"feature_size": 4,
"max_depth": 4,
"iterations": 100,
"shrinkage": 0.1,
"feature_sample_ratio": 1.0,
"data_sample_ratio": 1.0,
"min_leaf_size": 1,
"loss": "LAD",
"training_optimization_level": 2,
}))
.unwrap();
let fixture_dir = format!(
"file:///{}/fixtures/functions/gbdt_training",
env!("TEACLAVE_TEST_INSTALL_DIR")
);
let input_url = Url::parse(&format!("{}/train.enc", fixture_dir)).unwrap();
let output_url = Url::parse(&format!(
"{}/model-{}.enc.out",
fixture_dir,
task_id.to_string()
))
.unwrap();
let crypto = TeaclaveFile128Key::new(&[0; 16]).unwrap();
let input_cmac = FileAuthTag::from_hex("881adca6b0524472da0a9d0bb02b9af9").unwrap();
let training_input_data = FunctionInputFile::new(input_url, input_cmac, crypto);
let model_output_data = FunctionOutputFile::new(output_url, crypto);
let input_data = hashmap!("training_data" => training_input_data);
let output_data = hashmap!("trained_model" => model_output_data);
let staged_task = StagedTaskBuilder::new()
.task_id(task_id)
.executor(Executor::Builtin)
.function_name("builtin-gbdt-train")
.function_arguments(function_arguments)
.input_data(input_data)
.output_data(output_data)
.build();
let file_mgr = TaskFileManager::new(
WORKER_BASE_DIR,
"/tmp/fusion_base",
&staged_task.task_id,
&staged_task.input_data,
&staged_task.output_data,
)
.unwrap();
let invocation = prepare_task(&staged_task, &file_mgr).unwrap();
let worker = Worker::default();
let result = worker.invoke_function(invocation);
if result.is_ok() {
finalize_task(&file_mgr).unwrap();
}
log::debug!("summary: {:?}", result);
assert!(result.is_ok());
}
}
| 33.972125 | 93 | 0.575897 |
180154fec1d29ebb9237b057711c31c1da6624ea | 710 | // move_semantics4.rs
// Refactor this code so that instead of having `vec0` and creating the vector
// in `fn main`, we create it within `fn fill_vec` and transfer the
// freshly created vector from fill_vec to its caller.
// Execute `rustlings hint move_semantics4` for hints!
fn main() {
let vec0 = fill_vec();
let mut vec1 = fill_vec();
println!("{} has length {} content `{:?}`", "vec1", vec1.len(), vec1);
vec1.push(88);
println!("{} has length {} content `{:?}`", "vec1", vec1.len(), vec1);
}
// `fill_vec()` no longer takes `vec: Vec<i32>` as argument
fn fill_vec() -> Vec<i32> {
let mut vec = Vec::new();
vec.push(22);
vec.push(44);
vec.push(66);
vec
}
| 24.482759 | 78 | 0.616901 |
e96a011eeff01662f4d77315c2e634b3371b224c | 4,784 | #[cfg(test)]
mod test {
use crate::components::cylinder::*;
use crate::components::position::*;
use crate::helpers::size_helpers::*;
use legion::*;
#[test]
#[should_panic]
fn no_cylinder_component() {
let mut world = World::default();
let cylinder_entity: Entity = world.push((Position {
x: 1.0f32,
y: 1.0f32,
},));
set_cylinder_size(&mut world, &cylinder_entity, 2.0f32, 3.0f32);
if let Some(entry) = world.entry(cylinder_entity) {
let cylinder: Cylinder = *entry.get_component::<Cylinder>().unwrap();
assert_approx_eq!(cylinder.radius, 2.0f32);
assert_approx_eq!(cylinder.height, 3.0f32);
}
}
#[test]
fn reset_radius_and_height() {
let mut world = World::default();
let cylinder_entity: Entity = world.push((Cylinder {
radius: 1.0f32,
height: 1.0f32,
},));
set_cylinder_size(&mut world, &cylinder_entity, 2.0f32, 3.0f32);
if let Some(entry) = world.entry(cylinder_entity) {
let cylinder: Cylinder = *entry.get_component::<Cylinder>().unwrap();
assert_approx_eq!(cylinder.radius, 2.0f32);
assert_approx_eq!(cylinder.height, 3.0f32);
}
}
#[test]
#[should_panic]
fn negative_radius() {
let mut world = World::default();
let cylinder_entity: Entity = world.push((Cylinder {
radius: 1.0f32,
height: 1.0f32,
},));
set_cylinder_size(&mut world, &cylinder_entity, -2.0f32, 3.0f32);
if let Some(entry) = world.entry(cylinder_entity) {
let cylinder: Cylinder = *entry.get_component::<Cylinder>().unwrap();
assert_approx_eq!(cylinder.radius, -2.0f32);
assert_approx_eq!(cylinder.height, 3.0f32);
}
}
#[test]
#[should_panic]
fn negative_height() {
let mut world = World::default();
let cylinder_entity: Entity = world.push((Cylinder {
radius: 1.0f32,
height: 1.0f32,
},));
set_cylinder_size(&mut world, &cylinder_entity, 2.0f32, -3.0f32);
if let Some(entry) = world.entry(cylinder_entity) {
let cylinder: Cylinder = *entry.get_component::<Cylinder>().unwrap();
assert_approx_eq!(cylinder.radius, 2.0f32);
assert_approx_eq!(cylinder.height, -3.0f32);
}
}
#[test]
#[should_panic]
fn negative_radius_and_height() {
let mut world = World::default();
let cylinder_entity: Entity = world.push((Cylinder {
radius: 1.0f32,
height: 1.0f32,
},));
set_cylinder_size(&mut world, &cylinder_entity, -2.0f32, -3.0f32);
if let Some(entry) = world.entry(cylinder_entity) {
let cylinder: Cylinder = *entry.get_component::<Cylinder>().unwrap();
assert_approx_eq!(cylinder.radius, -2.0f32);
assert_approx_eq!(cylinder.height, -3.0f32);
}
}
#[test]
#[should_panic]
fn zero_radius() {
let mut world = World::default();
let cylinder_entity: Entity = world.push((Cylinder {
radius: 1.0f32,
height: 1.0f32,
},));
set_cylinder_size(&mut world, &cylinder_entity, 0.0f32, 3.0f32);
if let Some(entry) = world.entry(cylinder_entity) {
let cylinder: Cylinder = *entry.get_component::<Cylinder>().unwrap();
assert_approx_eq!(cylinder.radius, 0.0f32);
assert_approx_eq!(cylinder.height, 3.0f32);
}
}
#[test]
#[should_panic]
fn zero_height() {
let mut world = World::default();
let cylinder_entity: Entity = world.push((Cylinder {
radius: 1.0f32,
height: 1.0f32,
},));
set_cylinder_size(&mut world, &cylinder_entity, 2.0f32, 0.0f32);
if let Some(entry) = world.entry(cylinder_entity) {
let cylinder: Cylinder = *entry.get_component::<Cylinder>().unwrap();
assert_approx_eq!(cylinder.radius, 2.0f32);
assert_approx_eq!(cylinder.height, 0.0f32);
}
}
#[test]
#[should_panic]
fn zero_radius_and_height() {
let mut world = World::default();
let cylinder_entity: Entity = world.push((Cylinder {
radius: 1.0f32,
height: 1.0f32,
},));
set_cylinder_size(&mut world, &cylinder_entity, 0.0f32, 0.0f32);
if let Some(entry) = world.entry(cylinder_entity) {
let cylinder: Cylinder = *entry.get_component::<Cylinder>().unwrap();
assert_approx_eq!(cylinder.radius, 0.0f32);
assert_approx_eq!(cylinder.height, 0.0f32);
}
}
}
| 33.454545 | 81 | 0.571906 |
21a020e1a52ae762cb70d88d78ef502112d3a86c | 8,498 | use crate::difi_partition::*;
use crate::dual_file::DualFile;
use crate::error::*;
use crate::ivfc_level::IvfcLevel;
use crate::misc::*;
use crate::random_access_file::*;
use crate::signed_file::*;
use crate::sub_file::SubFile;
use byte_struct::*;
use log::*;
use std::rc::Rc;
#[derive(ByteStruct)]
#[byte_struct_le]
struct DiffHeader {
magic: [u8; 4],
version: u32,
secondary_table_offset: u64,
primary_table_offset: u64,
table_size: u64,
partition_offset: u64,
partition_size: u64,
active_table: u8,
padding: [u8; 3],
sha: [u8; 0x20],
unique_id: u64,
}
/// DIFF container format that contains one DIFI partition.
pub struct Diff {
parent_len: usize,
header_file: Rc<dyn RandomAccessFile>,
table_upper: Rc<DualFile>,
table_lower: Rc<IvfcLevel>,
partition: Rc<DifiPartition>,
unique_id: u64,
}
struct DiffInfo {
secondary_table_offset: usize,
primary_table_offset: usize,
table_len: usize,
partition_offset: usize,
partition_len: usize,
end: usize,
}
impl Diff {
fn calculate_info(param: &DifiPartitionParam) -> DiffInfo {
let (descriptor_len, partition_len) = DifiPartition::calculate_size(param);
let partition_align = param.get_align();
let secondary_table_offset = 0x200;
let table_len = descriptor_len;
let primary_table_offset = align_up(secondary_table_offset + table_len, 8);
let partition_offset = align_up(primary_table_offset + table_len, partition_align);
let end = partition_offset + partition_len;
DiffInfo {
secondary_table_offset,
primary_table_offset,
table_len,
partition_offset,
partition_len,
end,
}
}
pub fn calculate_size(param: &DifiPartitionParam) -> usize {
Diff::calculate_info(param).end
}
pub fn format(
file: Rc<dyn RandomAccessFile>,
signer: Option<(Box<dyn Signer>, [u8; 16])>,
param: &DifiPartitionParam,
unique_id: u64,
) -> Result<(), Error> {
file.write(0, &[0; 0x200])?;
let header_file_bare = Rc::new(SubFile::new(file.clone(), 0x100, 0x100)?);
let header_file: Rc<dyn RandomAccessFile> = match signer {
None => header_file_bare,
Some((signer, key)) => Rc::new(SignedFile::new_unverified(
Rc::new(SubFile::new(file.clone(), 0, 0x10)?),
header_file_bare,
signer,
key,
)?),
};
let info = Diff::calculate_info(param);
let header = DiffHeader {
magic: *b"DIFF",
version: 0x30000,
secondary_table_offset: info.secondary_table_offset as u64,
primary_table_offset: info.primary_table_offset as u64,
table_size: info.table_len as u64,
partition_offset: info.partition_offset as u64,
partition_size: info.partition_len as u64,
active_table: 1,
padding: [0; 3],
sha: [0; 0x20],
unique_id,
};
write_struct(header_file.as_ref(), 0, header)?;
let table = Rc::new(IvfcLevel::new(
Rc::new(SubFile::new(header_file.clone(), 0x34, 0x20)?),
Rc::new(SubFile::new(
file.clone(),
info.secondary_table_offset,
info.table_len,
)?),
info.table_len,
)?);
DifiPartition::format(table.as_ref(), param)?;
table.commit()?;
header_file.commit()?;
Ok(())
}
pub fn new(
file: Rc<dyn RandomAccessFile>,
signer: Option<(Box<dyn Signer>, [u8; 16])>,
) -> Result<Diff, Error> {
let parent_len = file.len();
let header_file_bare = Rc::new(SubFile::new(file.clone(), 0x100, 0x100)?);
let header_file: Rc<dyn RandomAccessFile> = match signer {
None => header_file_bare,
Some((signer, key)) => Rc::new(SignedFile::new(
Rc::new(SubFile::new(file.clone(), 0, 0x10)?),
header_file_bare,
signer,
key,
)?),
};
let header: DiffHeader = read_struct(header_file.as_ref(), 0)?;
if header.magic != *b"DIFF" || header.version != 0x30000 {
error!(
"Unexpected DIFF magic {:?} {:X}",
header.magic, header.version
);
return make_error(Error::MagicMismatch);
}
let table_selector = Rc::new(SubFile::new(header_file.clone(), 0x30, 1)?);
let table_hash = Rc::new(SubFile::new(header_file.clone(), 0x34, 0x20)?);
let table_pair: [Rc<dyn RandomAccessFile>; 2] = [
Rc::new(SubFile::new(
file.clone(),
header.primary_table_offset as usize,
header.table_size as usize,
)?),
Rc::new(SubFile::new(
file.clone(),
header.secondary_table_offset as usize,
header.table_size as usize,
)?),
];
let table_upper = Rc::new(DualFile::new(table_selector, table_pair)?);
let table_lower = Rc::new(IvfcLevel::new(
table_hash,
table_upper.clone(),
header.table_size as usize,
)?);
let partition = Rc::new(SubFile::new(
file.clone(),
header.partition_offset as usize,
header.partition_size as usize,
)?);
let partition = Rc::new(DifiPartition::new(table_lower.clone(), partition)?);
Ok(Diff {
parent_len,
header_file,
table_upper,
table_lower,
partition,
unique_id: header.unique_id,
})
}
pub fn parent_len(&self) -> usize {
self.parent_len
}
pub fn commit(&self) -> Result<(), Error> {
self.partition.commit()?;
self.table_lower.commit()?;
self.table_upper.commit()?;
self.header_file.commit()
}
pub fn partition(&self) -> &Rc<DifiPartition> {
&self.partition
}
pub fn unique_id(&self) -> u64 {
self.unique_id
}
}
#[cfg(test)]
mod test {
use crate::diff::*;
use crate::memory_file::MemoryFile;
use crate::signed_file::test::SimpleSigner;
#[test]
fn struct_size() {
assert_eq!(DiffHeader::BYTE_LEN, 0x5C);
}
#[test]
fn format_size() {
let sample = include_str!("extdiffsize.txt");
for line in sample.split('\n') {
if line.is_empty() {
continue;
}
let lr: Vec<_> = line.split(' ').collect();
let left = lr[0].parse::<usize>().unwrap();
let right = lr[1].parse::<usize>().unwrap();
let param = DifiPartitionParam {
dpfs_level2_block_len: 128,
dpfs_level3_block_len: 4096,
ivfc_level1_block_len: 512,
ivfc_level2_block_len: 512,
ivfc_level3_block_len: 4096,
ivfc_level4_block_len: 4096,
data_len: left,
external_ivfc_level4: true,
};
assert_eq!(Diff::calculate_size(¶m), right);
}
}
#[test]
fn fuzz() {
use rand::distributions::Standard;
use rand::prelude::*;
let mut rng = rand::thread_rng();
for _ in 0..10 {
let signer = Box::new(SimpleSigner::new());
let key = rng.gen();
let param = DifiPartitionParam::random();
let len = param.data_len;
let parent_len = Diff::calculate_size(¶m);
let parent = Rc::new(MemoryFile::new(vec![0; parent_len]));
Diff::format(parent.clone(), Some((signer.clone(), key)), ¶m, 0).unwrap();
let diff = Diff::new(parent.clone(), Some((signer.clone(), key))).unwrap();
let init: Vec<u8> = rng.sample_iter(&Standard).take(len).collect();
diff.partition().write(0, &init).unwrap();
let plain = MemoryFile::new(init);
crate::random_access_file::fuzzer(
diff,
|diff| diff.partition().as_ref(),
|diff| diff.commit().unwrap(),
|| Diff::new(parent.clone(), Some((signer.clone(), key))).unwrap(),
plain,
);
}
}
}
| 30.6787 | 91 | 0.547541 |
f4c9c46900ededee4e2b67d3457d2ece7f4a6e69 | 2,707 | #[doc = "Reader of register PDAC_W0_0_36"]
pub type R = crate::R<u32, super::PDAC_W0_0_36>;
#[doc = "Writer for register PDAC_W0_0_36"]
pub type W = crate::W<u32, super::PDAC_W0_0_36>;
#[doc = "Register PDAC_W0_0_36 `reset()`'s with value 0"]
impl crate::ResetValue for super::PDAC_W0_0_36 {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `D0ACP`"]
pub type D0ACP_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `D0ACP`"]
pub struct D0ACP_W<'a> {
w: &'a mut W,
}
impl<'a> D0ACP_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !0x07) | ((value as u32) & 0x07);
self.w
}
}
#[doc = "Reader of field `D1ACP`"]
pub type D1ACP_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `D1ACP`"]
pub struct D1ACP_W<'a> {
w: &'a mut W,
}
impl<'a> D1ACP_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x07 << 3)) | (((value as u32) & 0x07) << 3);
self.w
}
}
#[doc = "Reader of field `D2ACP`"]
pub type D2ACP_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `D2ACP`"]
pub struct D2ACP_W<'a> {
w: &'a mut W,
}
impl<'a> D2ACP_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x07 << 6)) | (((value as u32) & 0x07) << 6);
self.w
}
}
#[doc = "Reader of field `EALO`"]
pub type EALO_R = crate::R<u8, u8>;
impl R {
#[doc = "Bits 0:2 - Domain 0 access control policy"]
#[inline(always)]
pub fn d0acp(&self) -> D0ACP_R {
D0ACP_R::new((self.bits & 0x07) as u8)
}
#[doc = "Bits 3:5 - Domain 1 access control policy"]
#[inline(always)]
pub fn d1acp(&self) -> D1ACP_R {
D1ACP_R::new(((self.bits >> 3) & 0x07) as u8)
}
#[doc = "Bits 6:8 - Domain 2 access control policy"]
#[inline(always)]
pub fn d2acp(&self) -> D2ACP_R {
D2ACP_R::new(((self.bits >> 6) & 0x07) as u8)
}
#[doc = "Bits 24:27 - Excessive Access Lock Owner"]
#[inline(always)]
pub fn ealo(&self) -> EALO_R {
EALO_R::new(((self.bits >> 24) & 0x0f) as u8)
}
}
impl W {
#[doc = "Bits 0:2 - Domain 0 access control policy"]
#[inline(always)]
pub fn d0acp(&mut self) -> D0ACP_W {
D0ACP_W { w: self }
}
#[doc = "Bits 3:5 - Domain 1 access control policy"]
#[inline(always)]
pub fn d1acp(&mut self) -> D1ACP_W {
D1ACP_W { w: self }
}
#[doc = "Bits 6:8 - Domain 2 access control policy"]
#[inline(always)]
pub fn d2acp(&mut self) -> D2ACP_W {
D2ACP_W { w: self }
}
}
| 28.197917 | 80 | 0.587736 |
621323d96ad19fb8a14586bb3a199ea316c812a9 | 2,467 | /* tag::catalog[]
end::catalog[] */
use crate::types::*;
use crate::util::*;
use ic_fondue::{ic_manager::IcHandle, prod_tests::ic::InternetComputer};
use ic_registry_subnet_type::SubnetType;
use ic_utils::interfaces::ManagementCanister;
pub fn config() -> InternetComputer {
InternetComputer::new()
.add_fast_single_node_subnet(SubnetType::System)
.add_fast_single_node_subnet(SubnetType::Application)
}
/// Tests that query replies can be larger than update replies.
pub fn query_reply_sizes(handle: IcHandle, ctx: &ic_fondue::pot::Context) {
// A wasm that exports a query function that has a 3MiB reply.
let wasm = wabt::wat2wasm(
r#"(module
(import "ic0" "msg_reply" (func $msg_reply))
(import "ic0" "msg_reply_data_append"
(func $msg_reply_data_append (param i32) (param i32)))
(func $hi
(call $msg_reply_data_append (i32.const 0) (i32.const 3145728))
(call $msg_reply))
(memory $memory 48)
(export "memory" (memory $memory))
(export "canister_query hi" (func $hi)))"#,
)
.unwrap();
let mut rng = ctx.rng.clone();
let rt = tokio::runtime::Runtime::new().expect("Could not create tokio runtime.");
rt.block_on({
async move {
let endpoint = get_random_application_node_endpoint(&handle, &mut rng);
endpoint.assert_ready(ctx).await;
let agent = assert_create_agent(endpoint.url.as_str()).await;
let mgr = ManagementCanister::create(&agent);
let canister_id = mgr
.create_canister()
.as_provisional_create_with_amount(None)
.call_and_wait(delay())
.await
.expect("Couldn't create canister with provisional API.")
.0;
mgr.install_code(&canister_id, &wasm)
.call_and_wait(delay())
.await
.unwrap();
// Calling the query function as a query succeeds.
agent.query(&canister_id, "hi").call().await.unwrap();
// Calling the query function as an update fails because the reply
// is too big.
let res = agent
.update(&canister_id, "hi")
.call_and_wait(delay())
.await;
assert_reject(res, RejectCode::CanisterError);
}
})
}
| 36.820896 | 86 | 0.580867 |
2920df043f6d99c90d43004fd71369e6b6fdf6f9 | 5,383 | use cardano_storage::{chain_state, tag, Error};
use exe_common::genesisdata;
use exe_common::network::BlockRef;
use std::sync::Arc;
use iron;
use iron::status;
use iron::{IronResult, Request, Response};
use router::Router;
use super::super::config::Networks;
use super::common;
use std::str::FromStr;
pub struct Handler {
networks: Arc<Networks>,
}
impl Handler {
pub fn new(networks: Arc<Networks>) -> Self {
Handler { networks: networks }
}
pub fn route(self, router: &mut Router) -> &mut Router {
router.get(":network/utxos/:address", self, "utxos")
}
}
#[derive(Serialize, Deserialize, PartialEq, Debug)]
struct Utxo {
txid: cardano::tx::TxId,
index: u32,
address: cardano::address::ExtendedAddr,
coin: cardano::coin::Coin,
}
impl iron::Handler for Handler {
fn handle(&self, req: &mut Request) -> IronResult<Response> {
let (_, net) = match common::get_network(req, &self.networks) {
None => {
return Ok(Response::with(status::BadRequest));
}
Some(x) => x,
};
let params = req.extensions.get::<router::Router>().unwrap();
let address = params.find("address").unwrap();
let genesis_str = genesisdata::data::get_genesis_data(&net.config.genesis_prev).unwrap();
let genesis_data = genesisdata::parse::parse(genesis_str.as_bytes());
let storage = net.storage.read().unwrap();
let tip = match net.storage.read().unwrap().get_block_from_tag(&tag::HEAD) {
Err(Error::NoSuchTag) => {
return Ok(Response::with((status::NotFound, "No Tip To Serve")));
}
Err(err) => {
error!("error while reading block: {:?}", err);
return Ok(Response::with(status::InternalServerError));
}
Ok(block) => {
let header = block.header();
BlockRef {
hash: header.compute_hash(),
parent: header.previous_header(),
date: header.blockdate(),
}
}
};
let chain_state =
chain_state::restore_chain_state(&storage, &genesis_data, &tip.hash).unwrap();
let filter_address = match cardano::address::ExtendedAddr::from_str(&address) {
Ok(addr) => addr,
Err(_) => return Ok(Response::with((status::BadRequest, "Invalid address"))),
};
let utxos = utxos_by_address(&chain_state.utxos, &filter_address);
let serialized_data = serde_json::to_string(&utxos).unwrap();
let mut response = Response::with((status::Ok, serialized_data));
response.headers.set(iron::headers::ContentType::json());
Ok(response)
}
}
fn utxos_by_address(
utxos: &cardano::block::Utxos,
address: &cardano::address::ExtendedAddr,
) -> Vec<Utxo> {
utxos
.iter()
.filter_map(|(k, v)| {
if v.address == *address {
Some(Utxo {
txid: k.id,
index: k.index,
address: v.address.clone(),
coin: v.value,
})
} else {
None
}
})
.collect()
}
#[cfg(test)]
mod tests {
use super::*;
use cardano::address::ExtendedAddr;
use cardano::tx::TxOut;
use cardano::tx::TxoPointer;
use std::collections::BTreeMap;
static BASE58_ADDRESS: &str = "DdzFFzCqrhsjcfsReoiHddtt3ih6YusHbNXMTAjCvi5vakqk6sHkXDbMkaYgAbZyiy6hNK4761cF33AaCog93vbwgXGEXKgmA52dhrhJ";
static BYTES: [u8; 32] = [0u8; 32];
#[test]
fn filter_existent_address() {
let mut utxos = BTreeMap::<TxoPointer, TxOut>::new();
let filter_address = ExtendedAddr::from_str(&BASE58_ADDRESS).unwrap();
let txid = cardano::hash::Blake2b256::new(&BYTES);
utxos.insert(
TxoPointer { id: txid, index: 0 },
TxOut {
address: filter_address.clone(),
value: cardano::coin::Coin::new(1000).unwrap(),
},
);
let res = utxos_by_address(&utxos, &filter_address);
assert!(res.contains(&Utxo {
txid: txid,
index: 0,
address: ExtendedAddr::from_str(&BASE58_ADDRESS).unwrap(),
coin: cardano::coin::Coin::new(1000).unwrap(),
}));
}
#[test]
fn filter_inexistent_address() {
let mut utxos = BTreeMap::<TxoPointer, TxOut>::new();
let filter_address = ExtendedAddr::from_str(&BASE58_ADDRESS).unwrap();
let txid = cardano::hash::Blake2b256::new(&BYTES);
let different_address = "DdzFFzCqrhtD4c7dNAyVG29R64GapneLWUbVTECYywUsc6baB7FatGkTGcLWNj3hZnhXJ1ZD43ZBooiUVnVEGQSmEjrxdAP7YUk8dQze";
utxos.insert(
TxoPointer { id: txid, index: 1 },
TxOut {
address: ExtendedAddr::from_str(&different_address).unwrap(),
value: cardano::coin::Coin::new(1000).unwrap(),
},
);
let res = utxos_by_address(&utxos, &filter_address);
assert!(!res.contains(&Utxo {
txid: txid,
index: 0,
address: ExtendedAddr::from_str(&BASE58_ADDRESS).unwrap(),
coin: cardano::coin::Coin::new(1000).unwrap(),
}));
}
}
| 30.585227 | 141 | 0.567342 |
ab54ca89225084e092d47b228aa5c9697cdb2c77 | 1,622 | // Copyright 2019 The vault713 Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod foreign;
pub mod owner;
pub mod types;
pub use self::foreign::Foreign;
use self::foreign::ForeignCheckMiddlewareFn;
pub use self::owner::Owner;
pub use self::types::*;
use crate::wallet::types::{
NodeVersionInfo, Slate, CURRENT_SLATE_VERSION, GRIN_BLOCK_HEADER_VERSION,
};
use crate::wallet::ErrorKind;
use failure::Error;
pub fn check_middleware(
name: ForeignCheckMiddlewareFn,
node_version_info: Option<NodeVersionInfo>,
slate: Option<&Slate>,
) -> Result<(), Error> {
match name {
// allow coinbases to be built regardless
ForeignCheckMiddlewareFn::BuildCoinbase => Ok(()),
_ => {
let mut bhv = 1;
if let Some(n) = node_version_info {
bhv = n.block_header_version;
}
if let Some(s) = slate {
if s.version_info.version < CURRENT_SLATE_VERSION
|| (bhv == 1 && s.version_info.block_header_version != 1)
|| (bhv > 1 && s.version_info.block_header_version < GRIN_BLOCK_HEADER_VERSION)
{
return Err(ErrorKind::Compatibility.into());
}
}
Ok(())
}
}
}
| 30.037037 | 84 | 0.713317 |
5d9ab0873a977fae48c70d7bcbc9ccacf4ec27c7 | 8,846 | use crate::peer_store::Behaviour;
use crate::protocol::Protocol;
use crate::protocol_service::ProtocolService;
use crate::transport::TransportOutput;
use crate::Network;
use crate::PeerId;
use ckb_time::now_ms;
use futures::future::{self, Future};
use futures::stream::FuturesUnordered;
use futures::Stream;
use libp2p::core::Multiaddr;
use libp2p::core::SwarmController;
use libp2p::core::{upgrade, MuxedTransport};
use libp2p::{self, ping};
use log::{trace, warn};
use std::boxed::Box;
use std::io::{Error as IoError, ErrorKind as IoErrorKind};
use std::sync::Arc;
use std::time::Duration;
use std::time::Instant;
use tokio::io::{AsyncRead, AsyncWrite};
use tokio::timer::{Interval, Timeout};
pub struct PingService {
ping_interval: Duration,
ping_timeout: Duration,
}
impl PingService {
pub fn new(ping_interval: Duration, ping_timeout: Duration) -> Self {
PingService {
ping_interval,
ping_timeout,
}
}
fn ping_to_protocol<T>(peer_id: PeerId, output: ping::PingOutput) -> Protocol<T> {
match output {
ping::PingOutput::Ponger(processing) => Protocol::Pong(processing, peer_id),
ping::PingOutput::Pinger { pinger, processing } => {
Protocol::Ping(pinger, processing, peer_id)
}
}
}
}
impl<T: Send> ProtocolService<T> for PingService {
type Output = ping::PingOutput;
fn convert_to_protocol(
peer_id: Arc<PeerId>,
_addr: &Multiaddr,
output: Self::Output,
) -> Protocol<T> {
Self::ping_to_protocol(PeerId::clone(&peer_id), output)
}
fn handle(
&self,
network: Arc<Network>,
protocol: Protocol<T>,
) -> Box<Future<Item = (), Error = IoError> + Send> {
match protocol {
Protocol::Pong(processing, _peer_id) => {
Box::new(processing) as Box<Future<Item = _, Error = _> + Send>
}
Protocol::Ping(pinger, processing, peer_id) => {
match network.get_peer_pinger(&peer_id) {
Some(pinger_loader) => {
// ping and store pinger
Box::new(pinger_loader.tie_or_passthrough(pinger, processing))
as Box<Future<Item = _, Error = _> + Send>
}
None => Box::new(future::err(IoError::new(
IoErrorKind::Other,
"ping protocol can't find peer",
))) as Box<Future<Item = _, Error = _> + Send>,
}
}
_ => Box::new(future::ok(())) as Box<Future<Item = _, Error = _> + Send>,
}
}
// Periodicly ping peers
fn start_protocol<SwarmTran, Tran, TranOut>(
&self,
network: Arc<Network>,
swarm_controller: SwarmController<
SwarmTran,
Box<Future<Item = (), Error = IoError> + Send>,
>,
transport: Tran,
) -> Box<Future<Item = (), Error = IoError> + Send>
where
SwarmTran: MuxedTransport<Output = Protocol<T>> + Clone + Send + 'static,
SwarmTran::MultiaddrFuture: Send + 'static,
SwarmTran::Dial: Send,
SwarmTran::Listener: Send,
SwarmTran::ListenerUpgrade: Send,
SwarmTran::Incoming: Send,
SwarmTran::IncomingUpgrade: Send,
Tran: MuxedTransport<Output = TransportOutput<TranOut>> + Clone + Send + 'static,
Tran::MultiaddrFuture: Send + 'static,
Tran::Dial: Send,
Tran::Listener: Send,
Tran::ListenerUpgrade: Send,
Tran::Incoming: Send,
Tran::IncomingUpgrade: Send,
TranOut: AsyncRead + AsyncWrite + Send + 'static,
{
let transport = transport.and_then(move |out, endpoint, client_addr| {
let peer_id = out.peer_id;
upgrade::apply(out.socket, libp2p::ping::Ping, endpoint, client_addr)
.map(move |(out, addr)| (Self::ping_to_protocol(peer_id, out), addr))
});
let periodic_ping_future = Interval::new(
Instant::now() + Duration::from_secs(5),
self.ping_interval,
)
.map_err(|err| IoError::new(IoErrorKind::Other, err))
.for_each({
let network = Arc::clone(&network);
let transport = transport.clone();
let ping_timeout = self.ping_timeout;
move |_| {
let mut ping_futures = FuturesUnordered::new();
// build ping future for each peer
for peer_id in network.peers() {
let peer_id = peer_id.clone();
// only ping first address?
if let Some(addr) = network.get_peer_addresses(&peer_id).get(0) {
if let Some(pinger_loader) = network.get_peer_pinger(&peer_id) {
let ping_future = pinger_loader
.dial(&swarm_controller, &addr, transport.clone())
.and_then({
let peer_id = peer_id.clone();
move |mut pinger| {
pinger.ping().map(|_| peer_id).map_err(|err| {
IoError::new(
IoErrorKind::Other,
format!("pinger error {}", err),
)
})
}
});
let ping_start_time = now_ms();
let ping_future =
Future::then(Timeout::new(ping_future, ping_timeout), {
let network = Arc::clone(&network);
move |result| -> Result<(), IoError> {
let mut peer_store = network.peer_store().write();
match result {
Ok(peer_id) => {
let now = now_ms();
let ping = now - ping_start_time;
network.modify_peer(&peer_id, |peer| {
peer.ping = Some(ping);
peer.last_ping_time = Some(now);
});
peer_store.report(&peer_id, Behaviour::Ping);
trace!(
target: "network",
"received pong from {:?} in {:?}",
peer_id,
ping
);
Ok(())
}
Err(err) => {
peer_store
.report(&peer_id, Behaviour::FailedToPing);
network.drop_peer(&peer_id);
trace!(
target: "network",
"error when send ping to {:?}, error: {:?}",
peer_id,
err
);
Ok(())
}
}
}
});
ping_futures
.push(Box::new(ping_future)
as Box<Future<Item = _, Error = _> + Send>);
}
}
}
Box::new(
ping_futures
.into_future()
.map(|_| ())
.map_err(|(err, _)| err),
) as Box<Future<Item = _, Error = _> + Send>
}
})
.then(|err| {
warn!(target: "network", "Ping service stopped, reason: {:?}", err);
err
});
Box::new(periodic_ping_future) as Box<Future<Item = _, Error = _> + Send>
}
}
| 43.576355 | 96 | 0.419173 |
5d875a5a1bf1f5ee7e371297f3f82067707c6352 | 546 | // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(windows_subsystem)]
#![windows_subsystem = "windows"]
fn main() {}
| 36.4 | 68 | 0.736264 |
3318b6c6e30ee824025352e1ddf3fe39375d6af5 | 13,686 | // Copyright 2016 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Compute line wrapping breaks for text.
use time;
use xi_rope::rope::{LinesMetric, Rope, RopeInfo};
use xi_rope::tree::Cursor;
use xi_rope::interval::Interval;
use xi_rope::breaks::{Breaks, BreakBuilder, BreaksBaseMetric};
use xi_rope::spans::Spans;
use xi_trace::trace_block;
use xi_unicode::LineBreakLeafIter;
use styles::Style;
use client::Client;
use width_cache::{Token, WidthCache};
struct LineBreakCursor<'a> {
inner: Cursor<'a, RopeInfo>,
lb_iter: LineBreakLeafIter,
last_byte: u8,
}
impl<'a> LineBreakCursor<'a> {
fn new(text: &'a Rope, pos: usize) -> LineBreakCursor<'a> {
let inner = Cursor::new(text, pos);
let lb_iter = match inner.get_leaf() {
Some((s, offset)) if !s.is_empty() =>
LineBreakLeafIter::new(s.as_str(), offset),
_ => LineBreakLeafIter::default()
};
LineBreakCursor {
inner,
lb_iter,
last_byte: 0,
}
}
// position and whether break is hard; up to caller to stop calling after EOT
fn next(&mut self) -> (usize, bool) {
let mut leaf = self.inner.get_leaf();
loop {
match leaf {
Some((s, offset)) => {
let (next, hard) = self.lb_iter.next(s.as_str());
if next < s.len() {
return (self.inner.pos() - offset + next, hard);
}
if !s.is_empty() {
self.last_byte = s.as_bytes()[s.len() - 1];
}
leaf = self.inner.next_leaf();
}
// A little hacky but only reports last break as hard if final newline
None => return (self.inner.pos(), self.last_byte == b'\n')
}
}
}
}
pub fn linewrap(text: &Rope, cols: usize) -> Breaks {
let start_time = time::now();
let mut lb_cursor = LineBreakCursor::new(text, 0);
let mut builder = BreakBuilder::new();
let mut last_pos = 0;
let mut last_break_pos = 0;
let mut width = 0;
loop {
let (pos, hard) = lb_cursor.next();
let word_width = pos - last_pos;
if width > 0 && width + word_width > cols {
builder.add_break(width);
//eprintln!("soft break {}", width);
last_break_pos += width;
width = 0;
}
width += word_width;
if hard {
builder.add_break(width);
//eprintln!("hard break {}", width);
last_break_pos += width;
width = 0;
}
last_pos = pos;
if pos == text.len() { break; }
}
builder.add_no_break(text.len() - last_break_pos);
let result = builder.build();
let time_ms = (time::now() - start_time).num_nanoseconds().unwrap() as f64 * 1e-6;
eprintln!("time to wrap {} bytes: {:.2}ms", text.len(), time_ms);
result
}
// `text` is string _after_ editing.
pub fn rewrap(breaks: &mut Breaks, text: &Rope, iv: Interval, newsize: usize, cols: usize) {
let (edit_iv, new_breaks) = {
let start_time = time::now();
let (start, end) = iv.start_end();
let mut bk_cursor = Cursor::new(breaks, start);
// start of range to invalidate
let mut inval_start = bk_cursor.prev::<BreaksBaseMetric>().unwrap_or(0);
if inval_start > 0 {
// edit on this line can invalidate break at end of previous
inval_start = bk_cursor.prev::<BreaksBaseMetric>().unwrap_or(0);
}
bk_cursor.set(end);
// compute end position in edited rope
let mut inval_end = bk_cursor.next::<BreaksBaseMetric>().map_or(text.len(), |pos|
pos - (end - start) + newsize);
let mut lb_cursor = LineBreakCursor::new(text, inval_start);
let mut builder = BreakBuilder::new();
let mut last_pos = inval_start;
let mut last_break_pos = inval_start;
let mut width = 0;
loop {
let (pos, hard) = lb_cursor.next();
let word_width = pos - last_pos;
if width > 0 && width + word_width > cols {
builder.add_break(width);
last_break_pos += width;
width = 0;
while last_break_pos > inval_end {
inval_end = bk_cursor.next::<BreaksBaseMetric>().map_or(text.len(), |pos|
pos - (end - start) + newsize);
}
if last_break_pos == inval_end {
break;
}
}
width += word_width;
if hard {
// TODO: DRY
builder.add_break(width);
last_break_pos += width;
width = 0;
while last_break_pos > inval_end {
inval_end = bk_cursor.next::<BreaksBaseMetric>().map_or(text.len(), |pos|
pos - (end - start) + newsize);
}
if last_break_pos == inval_end {
break;
}
}
last_pos = pos;
if pos == text.len() {
break;
}
}
builder.add_no_break(inval_end - last_break_pos);
let time_ms = (time::now() - start_time).num_nanoseconds().unwrap() as f64 * 1e-6;
eprintln!("time to wrap {} bytes: {:.2}ms (not counting build+edit)",
inval_end - inval_start, time_ms);
(Interval::new_open_closed(inval_start, inval_end + (end - start) - newsize), builder.build())
};
breaks.edit(edit_iv, new_breaks);
}
/// A potential opportunity to insert a break. In this representation, the widths
/// have been requested (in a batch request) but are not necessarily known until
/// the request is issued.
struct PotentialBreak {
/// The offset within the text of the end of the word.
pos: usize,
/// A token referencing the width of the word, to be resolved in the width cache.
tok: Token,
/// Whether the break is a hard break or a soft break.
hard: bool,
}
// State for a rewrap in progress
struct RewrapCtx<'a> {
text: &'a Rope,
lb_cursor: LineBreakCursor<'a>,
lb_cursor_pos: usize,
width_cache: &'a mut WidthCache,
client: &'a Client,
pot_breaks: Vec<PotentialBreak>,
pot_break_ix: usize, // index within pot_breaks
max_offset: usize, // offset of maximum break (ie hard break following edit)
max_width: f64,
}
// This constant should be tuned so that the RPC takes about 1ms. Less than that,
// RPC overhead becomes significant. More than that, interactivity suffers.
const MAX_POT_BREAKS: usize = 10_000;
impl<'a> RewrapCtx<'a> {
fn new(text: &'a Rope, /* _style_spans: &Spans<Style>, */ client: &'a Client,
max_width: f64, width_cache: &'a mut WidthCache, start: usize, end: usize) -> RewrapCtx<'a>
{
let lb_cursor_pos = start;
let lb_cursor = LineBreakCursor::new(text, start);
RewrapCtx {
text,
lb_cursor,
lb_cursor_pos,
width_cache,
client,
pot_breaks: Vec::new(),
pot_break_ix: 0,
max_offset: end,
max_width,
}
}
fn refill_pot_breaks(&mut self) {
let style_id = 2; // TODO: derive from style spans rather than assuming.
let mut req = self.width_cache.batch_req();
self.pot_breaks.clear();
self.pot_break_ix = 0;
let mut pos = self.lb_cursor_pos;
while pos < self.max_offset && self.pot_breaks.len() < MAX_POT_BREAKS {
let (next, hard) = self.lb_cursor.next();
// TODO: avoid allocating string
let word = self.text.slice_to_string(pos, next);
let tok = req.request(style_id, &word);
pos = next;
self.pot_breaks.push(PotentialBreak { pos, tok, hard });
}
req.issue(self.client).unwrap();
self.lb_cursor_pos = pos;
}
/// Compute the next break, assuming `start` is a valid break.
///
/// Invariant: `start` corresponds to the start of the word referenced by `pot_break_ix`.
fn wrap_one_line(&mut self, start: usize) -> Option<usize> {
let mut line_width = 0.0;
let mut pos = start;
while pos < self.max_offset {
if self.pot_break_ix >= self.pot_breaks.len() {
self.refill_pot_breaks();
}
let pot_break = &self.pot_breaks[self.pot_break_ix];
if pot_break.hard {
self.pot_break_ix += 1;
return Some(pot_break.pos);
}
let width = self.width_cache.resolve(pot_break.tok);
if line_width == 0.0 && width >= self.max_width {
self.pot_break_ix += 1;
return Some(pot_break.pos);
}
line_width += width;
if line_width > self.max_width {
return Some(pos);
}
self.pot_break_ix += 1;
pos = pot_break.pos;
}
None
}
}
/// Wrap the text (in batch mode) using width measurement.
pub fn linewrap_width(text: &Rope, width_cache: &mut WidthCache,
_style_spans: &Spans<Style>, client: &Client,
max_width: f64) -> Breaks
{
let mut ctx = RewrapCtx::new(text, /* style_spans, */ client,
max_width, width_cache, 0, text.len());
let mut builder = BreakBuilder::new();
let mut pos = 0;
while let Some(next) = ctx.wrap_one_line(pos) {
builder.add_break(next - pos);
pos = next;
}
builder.add_no_break(text.len() - pos);
builder.build()
}
/// Compute a new chunk of breaks after an edit. Returns new breaks to replace
/// the old ones. The interval [start..end] represents a frontier.
fn compute_rewrap_width(text: &Rope, width_cache: &mut WidthCache,
/* style_spans: &Spans<Style>, */ client: &Client,
max_width: f64, breaks: &Breaks,
start: usize, end: usize) -> Breaks
{
let mut line_cursor = Cursor::new(&text, end);
let measure_end = if line_cursor.is_boundary::<LinesMetric>() {
end
} else {
line_cursor.next::<LinesMetric>().unwrap_or(text.len())
};
let mut ctx = RewrapCtx::new(text, /* style_spans, */ client, max_width,
width_cache, start, measure_end);
let mut builder = BreakBuilder::new();
let mut pos = start;
let mut break_cursor = Cursor::new(&breaks, end);
// TODO: factor this into `at_or_next` method on cursor.
let mut next_break = if break_cursor.is_boundary::<BreaksBaseMetric>() {
Some(end)
} else {
break_cursor.next::<BreaksBaseMetric>()
};
loop {
// iterate newly computed breaks and existing breaks until they converge
if let Some(new_next) = ctx.wrap_one_line(pos) {
while let Some(old_next) = next_break {
if old_next >= new_next {
break;
}
next_break = break_cursor.next::<BreaksBaseMetric>();
}
// TODO: we might be able to tighten the logic, avoiding this last break,
// in some cases (resulting in a smaller delta).
builder.add_break(new_next - pos);
if let Some(old_next) = next_break {
if new_next == old_next {
// Breaking process has converged.
break;
}
}
pos = new_next;
} else {
// EOF
builder.add_no_break(text.len() - pos);
break;
}
}
return builder.build();
}
pub fn rewrap_width(breaks: &mut Breaks, text: &Rope,
width_cache: &mut WidthCache, // _style_spans: &Spans<Style>,
client: &Client, iv: Interval, newsize: usize, max_width: f64)
{
let _t = trace_block("linewrap::rewrap_width", &["core"]);
// First, remove any breaks in edited section.
let mut builder = BreakBuilder::new();
builder.add_no_break(newsize);
breaks.edit(iv, builder.build());
// At this point, breaks is aligned with text.
let mut start = iv.start();
let end = start + newsize;
// [start..end] is edited range in text
// Find a point earlier than any possible breaks change. For simplicity, this is the
// beginning of the paragraph, but going back two breaks would be better.
let mut cursor = Cursor::new(&text, start);
if !cursor.is_boundary::<LinesMetric>() {
start = cursor.prev::<LinesMetric>().unwrap_or(0);
}
let new_breaks = compute_rewrap_width(text, width_cache, /* style_spans, */
client, max_width, breaks,
start, end);
let edit_iv = Interval::new_open_closed(start, start + new_breaks.len());
breaks.edit(edit_iv, new_breaks);
}
| 37.291553 | 102 | 0.564299 |
dd456004c02158700e0a7ba3d49e1aab2a1489a8 | 4,875 | use std::collections::HashMap;
use std::str::FromStr;
use async_graphql::scalar;
use indradb::{EdgeKey, EdgeProperties, Type};
use serde::{Deserialize, Serialize};
use serde_json::{Map, Value};
use uuid::Uuid;
use crate::{MutablePropertyInstanceSetter, PropertyInstanceGetter};
/// Relation instances are edges from an outbound entity instance to an
/// inbound entity instance.
///
/// The relation instance is of a relation type. The relation type defines
/// the entity types of the outbound entity instance and the inbound entity
/// instance. Furthermore the relation type defines which properties
/// (name, data type, socket type) a relation instance have to have.
///
/// In constrast to the relation type, the relation instance stores values/
/// documents in it's properties.
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct RelationInstance {
/// The id of the outbound vertex.
pub outbound_id: Uuid,
/// The name of the relation type
#[serde(alias = "type")]
pub type_name: String,
/// The id of the inbound vertex.
pub inbound_id: Uuid,
/// Textual description of the relation instance.
#[serde(default = "String::new")]
pub description: String,
/// The properties of then relation instance.
///
/// Each property is represented by it's name (String) and it's value. The value is
/// a representation of a JSON. Therefore the value can be boolean, number, string,
/// array or an object. For more information about the data types please look at
/// https://docs.serde.rs/serde_json/value/enum.Value.html
#[serde(default = "HashMap::new")]
pub properties: HashMap<String, Value>,
}
scalar!(RelationInstance);
impl RelationInstance {
pub fn new(
outbound_id: Uuid,
type_name: String,
inbound_id: Uuid,
properties: HashMap<String, Value>,
) -> RelationInstance {
RelationInstance {
outbound_id,
type_name,
inbound_id,
description: String::new(),
properties,
}
}
pub fn get_key(&self) -> Option<EdgeKey> {
let t = Type::from_str(self.type_name.as_str());
if t.is_ok() {
return Some(EdgeKey::new(self.outbound_id, t.unwrap(), self.inbound_id));
}
None
}
}
impl From<EdgeProperties> for RelationInstance {
fn from(properties: EdgeProperties) -> Self {
let outbound_id = properties.edge.key.outbound_id.clone();
let type_name = properties.edge.key.t.0.clone();
let inbound_id = properties.edge.key.inbound_id.clone();
let properties: HashMap<String, Value> = properties
.props
.iter()
.map(|p| (p.name.clone(), p.value.clone()))
.collect();
RelationInstance {
outbound_id,
type_name,
inbound_id,
description: String::new(),
properties,
}
}
}
impl PropertyInstanceGetter for RelationInstance {
fn get<S: Into<String>>(&self, property_name: S) -> Option<Value> {
self.properties
.get(&property_name.into())
.and_then(|v| Some(v.clone()))
}
fn as_bool<S: Into<String>>(&self, property_name: S) -> Option<bool> {
self.properties
.get(&property_name.into())
.and_then(|p| p.as_bool())
}
fn as_u64<S: Into<String>>(&self, property_name: S) -> Option<u64> {
self.properties
.get(&property_name.into())
.and_then(|p| p.as_u64())
}
fn as_i64<S: Into<String>>(&self, property_name: S) -> Option<i64> {
self.properties
.get(&property_name.into())
.and_then(|p| p.as_i64())
}
fn as_f64<S: Into<String>>(&self, property_name: S) -> Option<f64> {
self.properties
.get(&property_name.into())
.and_then(|p| p.as_f64())
}
fn as_string<S: Into<String>>(&self, property_name: S) -> Option<String> {
self.properties
.get(&property_name.into())
.and_then(|p| p.as_str().and_then(|s| Some(s.to_string())))
}
fn as_array<S: Into<String>>(&self, property_name: S) -> Option<Vec<Value>> {
self.properties
.get(&property_name.into())
.and_then(|p| p.as_array().map(Vec::clone))
}
fn as_object<S: Into<String>>(&self, property_name: S) -> Option<Map<String, Value>> {
self.properties
.get(&property_name.into())
.and_then(|p| p.as_object())
.map(Map::clone)
}
}
impl MutablePropertyInstanceSetter for RelationInstance {
fn set<S: Into<String>>(&mut self, property_name: S, value: Value) {
let property_value = self.properties.get_mut(&property_name.into()).unwrap();
*property_value = value.clone()
}
}
| 32.284768 | 90 | 0.611897 |
1c4c50f855a47f847db156f685ac9503205d61c9 | 10,729 | use std::borrow::Cow;
use std::mem;
use bytes::{BufMut, ByteOrder, BytesMut};
use nom::{IResult, be_i16, be_i32, be_i64};
use errors::Result;
use protocol::{parse_response_header, parse_string, ApiVersion, Encodable, ErrorCode, MessageSet, MessageSetEncoder,
Offset, ParseTag, PartitionId, Record, RequestHeader, RequiredAck, ResponseHeader, Timestamp, WriteExt,
ARRAY_LEN_SIZE, BYTES_LEN_SIZE, PARTITION_ID_SIZE, STR_LEN_SIZE};
const REQUIRED_ACKS_SIZE: usize = 2;
const ACK_TIMEOUT_SIZE: usize = 4;
#[derive(Clone, Debug, PartialEq)]
pub struct ProduceRequest<'a> {
pub header: RequestHeader<'a>,
/// This field indicates how many acknowledgements the servers should
/// receive before responding to the request.
pub required_acks: RequiredAck,
/// This provides a maximum time in milliseconds the server can await the
/// receipt of the number of acknowledgements in `required_acks`.
pub ack_timeout: i32,
/// The topic that data is being published to.
pub topics: Vec<ProduceTopicData<'a>>,
}
#[derive(Clone, Debug, PartialEq)]
pub struct ProduceTopicData<'a> {
/// The topic that data is being published to.
pub topic_name: Cow<'a, str>,
/// The partition that data is being published to.
pub partitions: Vec<ProducePartitionData<'a>>,
}
#[derive(Clone, Debug, PartialEq)]
pub struct ProducePartitionData<'a> {
/// The partition that data is being published to.
pub partition_id: PartitionId,
/// A set of messages in the standard format.
pub message_set: Cow<'a, MessageSet>,
}
impl<'a> Record for ProduceRequest<'a> {
fn size(&self, api_version: ApiVersion) -> usize {
self.header.size(api_version) + REQUIRED_ACKS_SIZE + ACK_TIMEOUT_SIZE
+ self.topics.iter().fold(ARRAY_LEN_SIZE, |size, topic| {
size + STR_LEN_SIZE + topic.topic_name.len()
+ topic.partitions.iter().fold(ARRAY_LEN_SIZE, |size, partition| {
size + PARTITION_ID_SIZE + BYTES_LEN_SIZE + partition.message_set.size(api_version)
})
})
}
}
impl<'a> Encodable for ProduceRequest<'a> {
fn encode<T: ByteOrder>(&self, dst: &mut BytesMut) -> Result<()> {
let encoder = MessageSetEncoder::new(self.header.api_version, None);
self.header.encode::<T>(dst)?;
dst.put_i16::<T>(self.required_acks);
dst.put_i32::<T>(self.ack_timeout);
dst.put_array::<T, _, _>(&self.topics, |buf, topic| {
buf.put_str::<T, _>(Some(topic.topic_name.as_ref()))?;
buf.put_array::<T, _, _>(&topic.partitions, |buf, partition| {
buf.put_i32::<T>(partition.partition_id);
let size_off = buf.len();
buf.put_i32::<T>(0);
encoder.encode::<T>(&partition.message_set, buf)?;
let message_set_size = buf.len() - size_off - mem::size_of::<i32>();
T::write_i32(&mut buf[size_off..], message_set_size as i32);
Ok(())
})
})
}
}
#[derive(Clone, Debug, PartialEq)]
pub struct ProduceResponse {
pub header: ResponseHeader,
/// The topic this response entry corresponds to.
pub topics: Vec<ProduceTopicStatus>,
/// Duration in milliseconds for which the request was throttled due to
/// quota violation. (Zero if the request did not violate any quota).
pub throttle_time: Option<i32>,
}
#[derive(Clone, Debug, PartialEq)]
pub struct ProduceTopicStatus {
/// The topic this response entry corresponds to.
pub topic_name: String,
/// The partition this response entry corresponds to.
pub partitions: Vec<ProducePartitionStatus>,
}
#[derive(Clone, Debug, PartialEq)]
pub struct ProducePartitionStatus {
/// The partition this response entry corresponds to.
pub partition_id: PartitionId,
/// The error from this partition, if any.
pub error_code: ErrorCode,
/// The offset assigned to the first message in the message set appended to
/// this partition.
pub offset: Offset,
/// Unit is milliseconds since beginning of the epoch (midnight Jan 1, 1970
/// (UTC)).
pub timestamp: Option<Timestamp>,
}
impl ProduceResponse {
pub fn parse(buf: &[u8], api_version: ApiVersion) -> IResult<&[u8], Self> {
parse_produce_response(buf, api_version)
}
}
named_args!(parse_produce_response(api_version: ApiVersion)<ProduceResponse>,
parse_tag!(ParseTag::ProduceResponse,
do_parse!(
header: parse_response_header
>> topics: length_count!(be_i32, apply!(parse_produce_topic_status, api_version))
>> throttle_time: cond!(api_version > 0, be_i32)
>> (ProduceResponse {
header,
topics,
throttle_time,
})
)
)
);
named_args!(parse_produce_topic_status(api_version: ApiVersion)<ProduceTopicStatus>,
parse_tag!(ParseTag::ProduceTopicStatus,
do_parse!(
topic_name: parse_string
>> partitions: length_count!(be_i32, apply!(parse_produce_partition_status, api_version))
>> (ProduceTopicStatus {
topic_name,
partitions,
})
)
)
);
named_args!(parse_produce_partition_status(api_version: ApiVersion)<ProducePartitionStatus>,
parse_tag!(ParseTag::ProducePartitionStatus,
do_parse!(
partition_id: be_i32
>> error_code: be_i16
>> offset: be_i64
>> timestamp: cond!(api_version > 1, be_i64)
>> (ProducePartitionStatus {
partition_id,
error_code,
offset,
timestamp,
})
)
)
);
#[cfg(test)]
mod tests {
use bytes::{BigEndian, Bytes};
use nom::IResult;
use super::*;
use compression::Compression;
use protocol::*;
lazy_static!{
static ref TEST_REQUEST_DATA: Vec<u8> = vec![
// ProduceRequest
// RequestHeader
0, 0, // api_key
0, 1, // api_version
0, 0, 0, 123, // correlation_id
0, 6, 99, 108, 105, 101, 110, 116, // client_id
255, 255, // required_acks
0, 0, 0, 123, // ack_timeout
// topics: [ProduceTopicData]
0, 0, 0, 1,
// ProduceTopicData
0, 5, 116, 111, 112, 105, 99, // topic_name
// partitions: [ProducePartitionData]
0, 0, 0, 1,
// ProducePartitionData
0, 0, 0, 1, // partition
// MessageSet
0, 0, 0, 42,
// messages: [Message]
0, 0, 0, 0, 0, 0, 0, 0, // offset
0, 0, 0, 30, // size
226, 52, 65, 188, // crc
1, // magic
0, // attributes
0, 0, 0, 0, 0, 0, 1, 200, // timestamp
0, 0, 0, 3, 107, 101, 121, // key
0, 0, 0, 5, 118, 97, 108, 117, 101 // value
];
static ref TEST_RESPONSE_DATA: Vec<u8> = vec![
// ResponseHeader
0, 0, 0, 123, // correlation_id
// topics: [ProduceTopicStatus]
0, 0, 0, 1,
0, 5, b't', b'o', b'p', b'i', b'c', // topic_name
// partitions: [ProducePartitionStatus]
0, 0, 0, 1,
0, 0, 0, 1, // partition
0, 2, // error_code
0, 0, 0, 0, 0, 0, 0, 3, // offset
0, 0, 0, 0, 0, 0, 0, 4, // timestamp
0, 0, 0, 5 // throttle_time
];
static ref TEST_RESPONSE: ProduceResponse = ProduceResponse {
header: ResponseHeader { correlation_id: 123 },
topics: vec![ProduceTopicStatus {
topic_name: "topic".to_owned(),
partitions: vec![ProducePartitionStatus {
partition_id: 1,
error_code: 2,
offset: 3,
timestamp: Some(4),
}],
}],
throttle_time: Some(5),
};
}
#[test]
fn test_encode_produce_request() {
let req = ProduceRequest {
header: RequestHeader {
api_key: ApiKeys::Produce as ApiVersion,
api_version: 1,
correlation_id: 123,
client_id: Some("client".into()),
},
required_acks: RequiredAcks::All as RequiredAck,
ack_timeout: 123,
topics: vec![
ProduceTopicData {
topic_name: "topic".into(),
partitions: vec![
ProducePartitionData {
partition_id: 1,
message_set: Cow::Owned(MessageSet {
messages: vec![
Message {
offset: 0,
compression: Compression::None,
key: Some(Bytes::from(&b"key"[..])),
value: Some(Bytes::from(&b"value"[..])),
timestamp: Some(MessageTimestamp::CreateTime(456)),
},
],
}),
},
],
},
],
};
let mut buf = BytesMut::with_capacity(128);
req.encode::<BigEndian>(&mut buf).unwrap();
assert_eq!(req.size(req.header.api_version), buf.len());
assert_eq!(&buf[..], &TEST_REQUEST_DATA[..]);
}
#[test]
fn test_parse_produce_response() {
assert_eq!(
parse_produce_response(TEST_RESPONSE_DATA.as_slice(), 2),
IResult::Done(&[][..], TEST_RESPONSE.clone())
);
}
}
| 37.124567 | 118 | 0.504241 |
dba97d89b60d5373453ea661cabbdee9250058db | 9,762 | // ======================================
// This file was automatically generated.
// ======================================
use serde_derive::{Deserialize, Serialize};
use crate::client::{Client, Response};
use crate::ids::TopupId;
use crate::params::{Expand, Expandable, List, Metadata, Object, RangeQuery, Timestamp};
use crate::resources::{BalanceTransaction, Currency, Source};
/// The resource representing a Stripe "Topup".
///
/// For more details see <https://stripe.com/docs/api/topups/object>
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
pub struct Topup {
/// Unique identifier for the object.
pub id: TopupId,
/// Amount transferred.
pub amount: i64,
/// ID of the balance transaction that describes the impact of this top-up on your account balance.
///
/// May not be specified depending on status of top-up.
#[serde(skip_serializing_if = "Option::is_none")]
pub balance_transaction: Option<Expandable<BalanceTransaction>>,
/// Time at which the object was created.
///
/// Measured in seconds since the Unix epoch.
pub created: Timestamp,
/// Three-letter [ISO currency code](https://www.iso.org/iso-4217-currency-codes.html), in lowercase.
///
/// Must be a [supported currency](https://stripe.com/docs/currencies).
pub currency: Currency,
/// An arbitrary string attached to the object.
///
/// Often useful for displaying to users.
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
/// Date the funds are expected to arrive in your Stripe account for payouts.
///
/// This factors in delays like weekends or bank holidays.
/// May not be specified depending on status of top-up.
#[serde(skip_serializing_if = "Option::is_none")]
pub expected_availability_date: Option<Timestamp>,
/// Error code explaining reason for top-up failure if available (see [the errors section](https://stripe.com/docs/api#errors) for a list of codes).
#[serde(skip_serializing_if = "Option::is_none")]
pub failure_code: Option<String>,
/// Message to user further explaining reason for top-up failure if available.
#[serde(skip_serializing_if = "Option::is_none")]
pub failure_message: Option<String>,
/// Has the value `true` if the object exists in live mode or the value `false` if the object exists in test mode.
pub livemode: bool,
/// Set of [key-value pairs](https://stripe.com/docs/api/metadata) that you can attach to an object.
///
/// This can be useful for storing additional information about the object in a structured format.
pub metadata: Metadata,
pub source: Source,
/// Extra information about a top-up.
///
/// This will appear on your source's bank statement.
/// It must contain at least one letter.
#[serde(skip_serializing_if = "Option::is_none")]
pub statement_descriptor: Option<String>,
/// The status of the top-up is either `canceled`, `failed`, `pending`, `reversed`, or `succeeded`.
pub status: TopupStatus,
/// A string that identifies this top-up as part of a group.
#[serde(skip_serializing_if = "Option::is_none")]
pub transfer_group: Option<String>,
}
impl Topup {
/// Returns a list of top-ups.
pub fn list(client: &Client, params: ListTopups<'_>) -> Response<List<Topup>> {
client.get_query("/topups", ¶ms)
}
/// Retrieves the details of a top-up that has previously been created.
///
/// Supply the unique top-up ID that was returned from your previous request, and Stripe will return the corresponding top-up information.
pub fn retrieve(client: &Client, id: &TopupId, expand: &[&str]) -> Response<Topup> {
client.get_query(&format!("/topups/{}", id), &Expand { expand })
}
/// Updates the metadata of a top-up.
///
/// Other top-up details are not editable by design.
pub fn update(client: &Client, id: &TopupId, params: UpdateTopup<'_>) -> Response<Topup> {
client.post_form(&format!("/topups/{}", id), ¶ms)
}
}
impl Object for Topup {
type Id = TopupId;
fn id(&self) -> Self::Id {
self.id.clone()
}
fn object(&self) -> &'static str {
"topup"
}
}
/// The parameters for `Topup::list`.
#[derive(Clone, Debug, Serialize, Default)]
pub struct ListTopups<'a> {
/// A positive integer representing how much to transfer.
#[serde(skip_serializing_if = "Option::is_none")]
pub amount: Option<RangeQuery<Timestamp>>,
/// A filter on the list, based on the object `created` field.
///
/// The value can be a string with an integer Unix timestamp, or it can be a dictionary with a number of different query options.
#[serde(skip_serializing_if = "Option::is_none")]
pub created: Option<RangeQuery<Timestamp>>,
/// A cursor for use in pagination.
///
/// `ending_before` is an object ID that defines your place in the list.
/// For instance, if you make a list request and receive 100 objects, starting with `obj_bar`, your subsequent call can include `ending_before=obj_bar` in order to fetch the previous page of the list.
#[serde(skip_serializing_if = "Option::is_none")]
pub ending_before: Option<TopupId>,
/// Specifies which fields in the response should be expanded.
#[serde(skip_serializing_if = "Expand::is_empty")]
pub expand: &'a [&'a str],
/// A limit on the number of objects to be returned.
///
/// Limit can range between 1 and 100, and the default is 10.
#[serde(skip_serializing_if = "Option::is_none")]
pub limit: Option<u64>,
/// A cursor for use in pagination.
///
/// `starting_after` is an object ID that defines your place in the list.
/// For instance, if you make a list request and receive 100 objects, ending with `obj_foo`, your subsequent call can include `starting_after=obj_foo` in order to fetch the next page of the list.
#[serde(skip_serializing_if = "Option::is_none")]
pub starting_after: Option<TopupId>,
/// Only return top-ups that have the given status.
///
/// One of `canceled`, `failed`, `pending` or `succeeded`.
#[serde(skip_serializing_if = "Option::is_none")]
pub status: Option<TopupStatusFilter>,
}
impl<'a> ListTopups<'a> {
pub fn new() -> Self {
ListTopups {
amount: Default::default(),
created: Default::default(),
ending_before: Default::default(),
expand: Default::default(),
limit: Default::default(),
starting_after: Default::default(),
status: Default::default(),
}
}
}
/// The parameters for `Topup::update`.
#[derive(Clone, Debug, Serialize, Default)]
pub struct UpdateTopup<'a> {
/// An arbitrary string attached to the object.
///
/// Often useful for displaying to users.
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<&'a str>,
/// Specifies which fields in the response should be expanded.
#[serde(skip_serializing_if = "Expand::is_empty")]
pub expand: &'a [&'a str],
/// Set of [key-value pairs](https://stripe.com/docs/api/metadata) that you can attach to an object.
///
/// This can be useful for storing additional information about the object in a structured format.
/// Individual keys can be unset by posting an empty value to them.
/// All keys can be unset by posting an empty value to `metadata`.
#[serde(skip_serializing_if = "Option::is_none")]
pub metadata: Option<Metadata>,
}
impl<'a> UpdateTopup<'a> {
pub fn new() -> Self {
UpdateTopup {
description: Default::default(),
expand: Default::default(),
metadata: Default::default(),
}
}
}
/// An enum representing the possible values of an `Topup`'s `status` field.
#[derive(Copy, Clone, Debug, Deserialize, Serialize, Eq, PartialEq)]
#[serde(rename_all = "snake_case")]
pub enum TopupStatus {
Canceled,
Failed,
Pending,
Reversed,
Succeeded,
}
impl TopupStatus {
pub fn as_str(self) -> &'static str {
match self {
TopupStatus::Canceled => "canceled",
TopupStatus::Failed => "failed",
TopupStatus::Pending => "pending",
TopupStatus::Reversed => "reversed",
TopupStatus::Succeeded => "succeeded",
}
}
}
impl AsRef<str> for TopupStatus {
fn as_ref(&self) -> &str {
self.as_str()
}
}
impl std::fmt::Display for TopupStatus {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
self.as_str().fmt(f)
}
}
impl std::default::Default for TopupStatus {
fn default() -> Self {
Self::Canceled
}
}
/// An enum representing the possible values of an `ListTopups`'s `status` field.
#[derive(Copy, Clone, Debug, Deserialize, Serialize, Eq, PartialEq)]
#[serde(rename_all = "snake_case")]
pub enum TopupStatusFilter {
Canceled,
Failed,
Pending,
Succeeded,
}
impl TopupStatusFilter {
pub fn as_str(self) -> &'static str {
match self {
TopupStatusFilter::Canceled => "canceled",
TopupStatusFilter::Failed => "failed",
TopupStatusFilter::Pending => "pending",
TopupStatusFilter::Succeeded => "succeeded",
}
}
}
impl AsRef<str> for TopupStatusFilter {
fn as_ref(&self) -> &str {
self.as_str()
}
}
impl std::fmt::Display for TopupStatusFilter {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
self.as_str().fmt(f)
}
}
impl std::default::Default for TopupStatusFilter {
fn default() -> Self {
Self::Canceled
}
}
| 34.4947 | 204 | 0.642901 |
766cf820096893269e23a4cb6229873fdc34cbfb | 2,029 | extern crate packed_struct;
#[macro_use]
extern crate packed_struct_codegen;
use packed_struct::prelude::*;
#[derive(PackedStruct, PartialEq, Debug)]
#[packed_struct(bit_numbering="msb0")]
pub struct SmallInts {
#[packed_field(bits="0:2")]
pub val1: Integer<u8, packed_bits::Bits3>,
#[packed_field(bits="3:4")]
pub val2: Integer<u8, packed_bits::Bits2>,
pub val3: bool,
#[packed_field(bits="6")]
pub val4: bool,
#[packed_field(bits="7..")]
pub val5: bool
}
#[test]
fn test_packing_bit_positions() {
let a = SmallInts {
val1: 7.into(),
val2: 3.into(),
val3: true,
val4: true,
val5: true
};
let packed = a.pack();
assert_eq!([255], packed);
let unpacked = SmallInts::unpack(&packed).unwrap();
assert_eq!(a, unpacked);
}
#[derive(PackedStruct, PartialEq, Debug)]
#[packed_struct(size_bytes="1", bit_numbering="lsb0")]
pub struct SmallIntsLsb {
#[packed_field(bits="2:0")]
pub val1: Integer<u8, packed_bits::Bits3>,
#[packed_field(bits="6")]
pub val2: bool
}
#[test]
fn test_packing_bit_positions_lsb() {
let a = SmallIntsLsb {
val1: 0b111.into(),
val2: true
};
let packed = a.pack();
assert_eq!(&[0b01000111], &packed);
let unpacked = SmallIntsLsb::unpack(&packed).unwrap();
assert_eq!(a, unpacked);
}
#[test]
fn test_packing_byte_position() {
#[derive(Copy, Clone, Debug, PartialEq, PackedStruct)]
#[packed_struct(bit_numbering="msb0", endian="msb")]
pub struct BufferChecksum {
#[packed_field(bytes="0")]
pub version: u8,
#[packed_field(bytes="1:4")]
pub size: u32,
#[packed_field(bytes="5..")]
pub checksum: u64
}
let b = BufferChecksum {
version: 101,
size: 52748273,
checksum: 869034217895
};
let packed = b.pack();
assert_eq!(packed.len(), 13);
let unpacked = BufferChecksum::unpack(&packed).unwrap();
assert_eq!(b, unpacked);
}
| 22.544444 | 60 | 0.608181 |
75de5c56ea139e32d1e4dd31dbd525a7823642ec | 6,966 | // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The compiler code necessary to implement the `#[derive]` extensions.
use syntax::ast::{MetaItem, MetaItemKind, self};
use syntax::attr::AttrMetaMethods;
use syntax::ext::base::{ExtCtxt, SyntaxEnv, Annotatable};
use syntax::ext::base::{MultiDecorator, MultiItemDecorator, MultiModifier};
use syntax::ext::build::AstBuilder;
use syntax::feature_gate;
use syntax::codemap::Span;
use syntax::parse::token::{intern, intern_and_get_ident};
macro_rules! pathvec {
($($x:ident)::+) => (
vec![ $( stringify!($x) ),+ ]
)
}
macro_rules! path {
($($x:tt)*) => (
::ext::deriving::generic::ty::Path::new( pathvec!( $($x)* ) )
)
}
macro_rules! path_local {
($x:ident) => (
::deriving::generic::ty::Path::new_local(stringify!($x))
)
}
macro_rules! pathvec_std {
($cx:expr, $first:ident :: $($rest:ident)::+) => ({
let mut v = pathvec!($($rest)::+);
if let Some(s) = $cx.crate_root {
v.insert(0, s);
}
v
})
}
macro_rules! path_std {
($($x:tt)*) => (
::deriving::generic::ty::Path::new( pathvec_std!( $($x)* ) )
)
}
pub mod bounds;
pub mod clone;
pub mod encodable;
pub mod decodable;
pub mod hash;
pub mod debug;
pub mod default;
#[path="cmp/partial_eq.rs"]
pub mod partial_eq;
#[path="cmp/eq.rs"]
pub mod eq;
#[path="cmp/partial_ord.rs"]
pub mod partial_ord;
#[path="cmp/ord.rs"]
pub mod ord;
pub mod generic;
fn expand_derive(cx: &mut ExtCtxt,
span: Span,
mitem: &MetaItem,
annotatable: Annotatable)
-> Annotatable {
annotatable.map_item_or(|item| {
item.map(|mut item| {
if mitem.value_str().is_some() {
cx.span_err(mitem.span, "unexpected value in `derive`");
}
let traits = mitem.meta_item_list().unwrap_or(&[]);
if traits.is_empty() {
cx.span_warn(mitem.span, "empty trait list in `derive`");
}
for titem in traits.iter().rev() {
let tname = match titem.node {
MetaItemKind::Word(ref tname) => tname,
_ => {
cx.span_err(titem.span, "malformed `derive` entry");
continue;
}
};
if !(is_builtin_trait(tname) || cx.ecfg.enable_custom_derive()) {
feature_gate::emit_feature_err(&cx.parse_sess.span_diagnostic,
"custom_derive",
titem.span,
feature_gate::GateIssue::Language,
feature_gate::EXPLAIN_CUSTOM_DERIVE);
continue;
}
// #[derive(Foo, Bar)] expands to #[derive_Foo] #[derive_Bar]
item.attrs.push(cx.attribute(titem.span, cx.meta_word(titem.span,
intern_and_get_ident(&format!("derive_{}", tname)))));
}
item
})
}, |a| {
cx.span_err(span, "`derive` can only be applied to items");
a
})
}
macro_rules! derive_traits {
($( $name:expr => $func:path, )+) => {
pub fn register_all(env: &mut SyntaxEnv) {
// Define the #[derive_*] extensions.
$({
struct DeriveExtension;
impl MultiItemDecorator for DeriveExtension {
fn expand(&self,
ecx: &mut ExtCtxt,
sp: Span,
mitem: &MetaItem,
annotatable: &Annotatable,
push: &mut FnMut(Annotatable)) {
warn_if_deprecated(ecx, sp, $name);
$func(ecx, sp, mitem, annotatable, push);
}
}
env.insert(intern(concat!("derive_", $name)),
MultiDecorator(Box::new(DeriveExtension)));
})+
env.insert(intern("derive"),
MultiModifier(Box::new(expand_derive)));
}
fn is_builtin_trait(name: &str) -> bool {
match name {
$( $name )|+ => true,
_ => false,
}
}
}
}
derive_traits! {
"Clone" => clone::expand_deriving_clone,
"Hash" => hash::expand_deriving_hash,
"RustcEncodable" => encodable::expand_deriving_rustc_encodable,
"RustcDecodable" => decodable::expand_deriving_rustc_decodable,
"PartialEq" => partial_eq::expand_deriving_partial_eq,
"Eq" => eq::expand_deriving_eq,
"PartialOrd" => partial_ord::expand_deriving_partial_ord,
"Ord" => ord::expand_deriving_ord,
"Debug" => debug::expand_deriving_debug,
"Default" => default::expand_deriving_default,
"Send" => bounds::expand_deriving_unsafe_bound,
"Sync" => bounds::expand_deriving_unsafe_bound,
"Copy" => bounds::expand_deriving_copy,
// deprecated
"Encodable" => encodable::expand_deriving_encodable,
"Decodable" => decodable::expand_deriving_decodable,
}
#[inline] // because `name` is a compile-time constant
fn warn_if_deprecated(ecx: &mut ExtCtxt, sp: Span, name: &str) {
if let Some(replacement) = match name {
"Encodable" => Some("RustcEncodable"),
"Decodable" => Some("RustcDecodable"),
_ => None,
} {
ecx.span_warn(sp, &format!("derive({}) is deprecated in favor of derive({})",
name, replacement));
}
}
/// Construct a name for the inner type parameter that can't collide with any type parameters of
/// the item. This is achieved by starting with a base and then concatenating the names of all
/// other type parameters.
// FIXME(aburka): use real hygiene when that becomes possible
fn hygienic_type_parameter(item: &Annotatable, base: &str) -> String {
let mut typaram = String::from(base);
if let Annotatable::Item(ref item) = *item {
match item.node {
ast::ItemKind::Struct(_, ast::Generics { ref ty_params, .. }) |
ast::ItemKind::Enum(_, ast::Generics { ref ty_params, .. }) => {
for ty in ty_params.iter() {
typaram.push_str(&ty.ident.name.as_str());
}
}
_ => {}
}
}
typaram
}
| 31.520362 | 96 | 0.542349 |
22ab62ac372f218b97cbcad9de86929075f645e6 | 14,605 | use crate::interface::{Compiler, Result};
use crate::passes::{self, BoxedResolver, QueryContext};
use rustc_ast as ast;
use rustc_codegen_ssa::traits::CodegenBackend;
use rustc_codegen_ssa::CodegenResults;
use rustc_data_structures::svh::Svh;
use rustc_data_structures::sync::{Lrc, OnceCell, WorkerLocal};
use rustc_hir::def_id::LOCAL_CRATE;
use rustc_incremental::DepGraphFuture;
use rustc_lint::LintStore;
use rustc_middle::arena::Arena;
use rustc_middle::dep_graph::DepGraph;
use rustc_middle::ty::{GlobalCtxt, TyCtxt};
use rustc_query_impl::Queries as TcxQueries;
use rustc_session::config::{self, OutputFilenames, OutputType};
use rustc_session::{output::find_crate_name, Session};
use rustc_span::symbol::sym;
use std::any::Any;
use std::cell::{Ref, RefCell, RefMut};
use std::rc::Rc;
/// Represent the result of a query.
///
/// This result can be stolen with the [`take`] method and generated with the [`compute`] method.
///
/// [`take`]: Self::take
/// [`compute`]: Self::compute
pub struct Query<T> {
result: RefCell<Option<Result<T>>>,
}
impl<T> Query<T> {
fn compute<F: FnOnce() -> Result<T>>(&self, f: F) -> Result<&Query<T>> {
let mut result = self.result.borrow_mut();
if result.is_none() {
*result = Some(f());
}
result.as_ref().unwrap().as_ref().map(|_| self).map_err(|err| *err)
}
/// Takes ownership of the query result. Further attempts to take or peek the query
/// result will panic unless it is generated by calling the `compute` method.
pub fn take(&self) -> T {
self.result.borrow_mut().take().expect("missing query result").unwrap()
}
/// Borrows the query result using the RefCell. Panics if the result is stolen.
pub fn peek(&self) -> Ref<'_, T> {
Ref::map(self.result.borrow(), |r| {
r.as_ref().unwrap().as_ref().expect("missing query result")
})
}
/// Mutably borrows the query result using the RefCell. Panics if the result is stolen.
pub fn peek_mut(&self) -> RefMut<'_, T> {
RefMut::map(self.result.borrow_mut(), |r| {
r.as_mut().unwrap().as_mut().expect("missing query result")
})
}
}
impl<T> Default for Query<T> {
fn default() -> Self {
Query { result: RefCell::new(None) }
}
}
pub struct Queries<'tcx> {
compiler: &'tcx Compiler,
gcx: OnceCell<GlobalCtxt<'tcx>>,
queries: OnceCell<TcxQueries<'tcx>>,
arena: WorkerLocal<Arena<'tcx>>,
hir_arena: WorkerLocal<rustc_ast_lowering::Arena<'tcx>>,
dep_graph_future: Query<Option<DepGraphFuture>>,
parse: Query<ast::Crate>,
crate_name: Query<String>,
register_plugins: Query<(ast::Crate, Lrc<LintStore>)>,
expansion: Query<(Rc<ast::Crate>, Rc<RefCell<BoxedResolver>>, Lrc<LintStore>)>,
dep_graph: Query<DepGraph>,
prepare_outputs: Query<OutputFilenames>,
global_ctxt: Query<QueryContext<'tcx>>,
ongoing_codegen: Query<Box<dyn Any>>,
}
impl<'tcx> Queries<'tcx> {
pub fn new(compiler: &'tcx Compiler) -> Queries<'tcx> {
Queries {
compiler,
gcx: OnceCell::new(),
queries: OnceCell::new(),
arena: WorkerLocal::new(|_| Arena::default()),
hir_arena: WorkerLocal::new(|_| rustc_ast_lowering::Arena::default()),
dep_graph_future: Default::default(),
parse: Default::default(),
crate_name: Default::default(),
register_plugins: Default::default(),
expansion: Default::default(),
dep_graph: Default::default(),
prepare_outputs: Default::default(),
global_ctxt: Default::default(),
ongoing_codegen: Default::default(),
}
}
fn session(&self) -> &Lrc<Session> {
&self.compiler.sess
}
fn codegen_backend(&self) -> &Lrc<Box<dyn CodegenBackend>> {
self.compiler.codegen_backend()
}
fn dep_graph_future(&self) -> Result<&Query<Option<DepGraphFuture>>> {
self.dep_graph_future.compute(|| {
let sess = self.session();
Ok(sess.opts.build_dep_graph().then(|| rustc_incremental::load_dep_graph(sess)))
})
}
pub fn parse(&self) -> Result<&Query<ast::Crate>> {
self.parse.compute(|| {
passes::parse(self.session(), &self.compiler.input)
.map_err(|mut parse_error| parse_error.emit())
})
}
pub fn register_plugins(&self) -> Result<&Query<(ast::Crate, Lrc<LintStore>)>> {
self.register_plugins.compute(|| {
let crate_name = self.crate_name()?.peek().clone();
let krate = self.parse()?.take();
let empty: &(dyn Fn(&Session, &mut LintStore) + Sync + Send) = &|_, _| {};
let (krate, lint_store) = passes::register_plugins(
self.session(),
&*self.codegen_backend().metadata_loader(),
self.compiler.register_lints.as_deref().unwrap_or_else(|| empty),
krate,
&crate_name,
)?;
// Compute the dependency graph (in the background). We want to do
// this as early as possible, to give the DepGraph maximum time to
// load before dep_graph() is called, but it also can't happen
// until after rustc_incremental::prepare_session_directory() is
// called, which happens within passes::register_plugins().
self.dep_graph_future().ok();
Ok((krate, Lrc::new(lint_store)))
})
}
pub fn crate_name(&self) -> Result<&Query<String>> {
self.crate_name.compute(|| {
Ok({
let parse_result = self.parse()?;
let krate = parse_result.peek();
// parse `#[crate_name]` even if `--crate-name` was passed, to make sure it matches.
find_crate_name(self.session(), &krate.attrs, &self.compiler.input)
})
})
}
pub fn expansion(
&self,
) -> Result<&Query<(Rc<ast::Crate>, Rc<RefCell<BoxedResolver>>, Lrc<LintStore>)>> {
tracing::trace!("expansion");
self.expansion.compute(|| {
let crate_name = self.crate_name()?.peek().clone();
let (krate, lint_store) = self.register_plugins()?.take();
let _timer = self.session().timer("configure_and_expand");
let sess = self.session();
let mut resolver = passes::create_resolver(
sess.clone(),
self.codegen_backend().metadata_loader(),
&krate,
&crate_name,
);
let krate = resolver.access(|resolver| {
passes::configure_and_expand(sess, &lint_store, krate, &crate_name, resolver)
})?;
Ok((Rc::new(krate), Rc::new(RefCell::new(resolver)), lint_store))
})
}
fn dep_graph(&self) -> Result<&Query<DepGraph>> {
self.dep_graph.compute(|| {
let sess = self.session();
let future_opt = self.dep_graph_future()?.take();
let dep_graph = future_opt
.and_then(|future| {
let (prev_graph, prev_work_products) =
sess.time("blocked_on_dep_graph_loading", || future.open().open(sess));
rustc_incremental::build_dep_graph(sess, prev_graph, prev_work_products)
})
.unwrap_or_else(DepGraph::new_disabled);
Ok(dep_graph)
})
}
pub fn prepare_outputs(&self) -> Result<&Query<OutputFilenames>> {
self.prepare_outputs.compute(|| {
let (krate, boxed_resolver, _) = &*self.expansion()?.peek();
let crate_name = self.crate_name()?.peek();
passes::prepare_outputs(
self.session(),
self.compiler,
krate,
&*boxed_resolver,
&crate_name,
)
})
}
pub fn global_ctxt(&'tcx self) -> Result<&Query<QueryContext<'tcx>>> {
self.global_ctxt.compute(|| {
let crate_name = self.crate_name()?.peek().clone();
let outputs = self.prepare_outputs()?.peek().clone();
let dep_graph = self.dep_graph()?.peek().clone();
let (krate, resolver, lint_store) = self.expansion()?.take();
Ok(passes::create_global_ctxt(
self.compiler,
lint_store,
krate,
dep_graph,
resolver,
outputs,
&crate_name,
&self.queries,
&self.gcx,
&self.arena,
&self.hir_arena,
))
})
}
pub fn ongoing_codegen(&'tcx self) -> Result<&Query<Box<dyn Any>>> {
self.ongoing_codegen.compute(|| {
let outputs = self.prepare_outputs()?;
self.global_ctxt()?.peek_mut().enter(|tcx| {
tcx.analysis(()).ok();
// Don't do code generation if there were any errors
self.session().compile_status()?;
// Hook for UI tests.
Self::check_for_rustc_errors_attr(tcx);
Ok(passes::start_codegen(&***self.codegen_backend(), tcx, &*outputs.peek()))
})
})
}
/// Check for the `#[rustc_error]` annotation, which forces an error in codegen. This is used
/// to write UI tests that actually test that compilation succeeds without reporting
/// an error.
fn check_for_rustc_errors_attr(tcx: TyCtxt<'_>) {
let Some((def_id, _)) = tcx.entry_fn(()) else { return };
let attrs = &*tcx.get_attrs(def_id);
let attrs = attrs.iter().filter(|attr| attr.has_name(sym::rustc_error));
for attr in attrs {
match attr.meta_item_list() {
// Check if there is a `#[rustc_error(delay_span_bug_from_inside_query)]`.
Some(list)
if list.iter().any(|list_item| {
matches!(
list_item.ident().map(|i| i.name),
Some(sym::delay_span_bug_from_inside_query)
)
}) =>
{
tcx.ensure().trigger_delay_span_bug(def_id);
}
// Bare `#[rustc_error]`.
None => {
tcx.sess.span_fatal(
tcx.def_span(def_id),
"fatal error triggered by #[rustc_error]",
);
}
// Some other attribute.
Some(_) => {
tcx.sess.span_warn(
tcx.def_span(def_id),
"unexpected annotation used with `#[rustc_error(...)]!",
);
}
}
}
}
pub fn linker(&'tcx self) -> Result<Linker> {
let sess = self.session().clone();
let codegen_backend = self.codegen_backend().clone();
let dep_graph = self.dep_graph()?.peek().clone();
let prepare_outputs = self.prepare_outputs()?.take();
let crate_hash = self.global_ctxt()?.peek_mut().enter(|tcx| tcx.crate_hash(LOCAL_CRATE));
let ongoing_codegen = self.ongoing_codegen()?.take();
Ok(Linker {
sess,
codegen_backend,
dep_graph,
prepare_outputs,
crate_hash,
ongoing_codegen,
})
}
}
pub struct Linker {
// compilation inputs
sess: Lrc<Session>,
codegen_backend: Lrc<Box<dyn CodegenBackend>>,
// compilation outputs
dep_graph: DepGraph,
prepare_outputs: OutputFilenames,
crate_hash: Svh,
ongoing_codegen: Box<dyn Any>,
}
impl Linker {
pub fn link(self) -> Result<()> {
let (codegen_results, work_products) = self.codegen_backend.join_codegen(
self.ongoing_codegen,
&self.sess,
&self.prepare_outputs,
)?;
self.sess.compile_status()?;
let sess = &self.sess;
let dep_graph = self.dep_graph;
sess.time("serialize_work_products", || {
rustc_incremental::save_work_product_index(sess, &dep_graph, work_products)
});
let prof = self.sess.prof.clone();
prof.generic_activity("drop_dep_graph").run(move || drop(dep_graph));
// Now that we won't touch anything in the incremental compilation directory
// any more, we can finalize it (which involves renaming it)
rustc_incremental::finalize_session_directory(&self.sess, self.crate_hash);
if !self
.sess
.opts
.output_types
.keys()
.any(|&i| i == OutputType::Exe || i == OutputType::Metadata)
{
return Ok(());
}
if sess.opts.debugging_opts.no_link {
let encoded = CodegenResults::serialize_rlink(&codegen_results);
let rlink_file = self.prepare_outputs.with_extension(config::RLINK_EXT);
std::fs::write(&rlink_file, encoded).map_err(|err| {
sess.fatal(&format!("failed to write file {}: {}", rlink_file.display(), err));
})?;
return Ok(());
}
let _timer = sess.prof.verbose_generic_activity("link_crate");
self.codegen_backend.link(&self.sess, codegen_results, &self.prepare_outputs)
}
}
impl Compiler {
pub fn enter<F, T>(&self, f: F) -> T
where
F: for<'tcx> FnOnce(&'tcx Queries<'tcx>) -> T,
{
let mut _timer = None;
let queries = Queries::new(self);
let ret = f(&queries);
// NOTE: intentionally does not compute the global context if it hasn't been built yet,
// since that likely means there was a parse error.
if let Some(Ok(gcx)) = &mut *queries.global_ctxt.result.borrow_mut() {
// We assume that no queries are run past here. If there are new queries
// after this point, they'll show up as "<unknown>" in self-profiling data.
{
let _prof_timer =
queries.session().prof.generic_activity("self_profile_alloc_query_strings");
gcx.enter(rustc_query_impl::alloc_self_profile_query_strings);
}
self.session()
.time("serialize_dep_graph", || gcx.enter(rustc_incremental::save_dep_graph));
}
_timer = Some(self.session().timer("free_global_ctxt"));
ret
}
}
| 35.972906 | 100 | 0.561315 |
f56e53b829b8eef0e772d26e32fc1db1dfaa3da7 | 6,798 | use algonaut_crypto::HashDigest;
use algonaut_crypto::Signature;
use algonaut_encoding::U8_32Visitor;
use data_encoding::BASE64;
use derive_more::{Add, Display, Sub};
use error::CoreError;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use sha2::Digest;
use static_assertions::_core::ops::{Add, Sub};
use std::convert::TryInto;
use std::fmt::{self, Debug, Formatter};
use std::ops::Mul;
pub use address::Address;
pub use address::MultisigAddress;
pub use multisig::MultisigSignature;
pub use multisig::MultisigSubsig;
mod address;
mod error;
mod multisig;
pub const MICRO_ALGO_CONVERSION_FACTOR: f64 = 1e6;
/// MicroAlgos are the base unit of currency in Algorand
#[derive(
Copy, Clone, Debug, Ord, PartialOrd, Eq, PartialEq, Serialize, Deserialize, Display, Add, Sub,
)]
pub struct MicroAlgos(pub u64);
impl MicroAlgos {
pub fn to_algos(self) -> f64 {
self.0 as f64 / MICRO_ALGO_CONVERSION_FACTOR
}
pub fn from_algos(algos: f64) -> MicroAlgos {
MicroAlgos((algos * MICRO_ALGO_CONVERSION_FACTOR) as u64)
}
}
impl Add<u64> for MicroAlgos {
type Output = Self;
fn add(self, rhs: u64) -> Self::Output {
MicroAlgos(self.0 + rhs)
}
}
impl Sub<u64> for MicroAlgos {
type Output = Self;
fn sub(self, rhs: u64) -> Self::Output {
MicroAlgos(self.0 - rhs)
}
}
// Intentionally not implementing Mul<Rhs=Self>
// If you're multiplying a MicroAlgos by MicroAlgos, something has gone wrong in your math
// That would give you MicroAlgos squared and those don't exist
impl Mul<u64> for MicroAlgos {
type Output = Self;
fn mul(self, rhs: u64) -> Self::Output {
MicroAlgos(self.0 * rhs)
}
}
/// Round of the Algorand consensus protocol
#[derive(Copy, Clone, Eq, PartialEq, Debug, Serialize, Deserialize, Display, Add, Sub)]
pub struct Round(pub u64);
impl Add<u64> for Round {
type Output = Self;
fn add(self, rhs: u64) -> Self::Output {
Round(self.0 + rhs)
}
}
impl Sub<u64> for Round {
type Output = Self;
fn sub(self, rhs: u64) -> Self::Output {
Round(self.0 - rhs)
}
}
// Intentionally not implementing Mul<Rhs=Self>
// If you're multiplying a Round by a Round, something has gone wrong in your math
// That would give you Rounds squared and those don't exist
impl Mul<u64> for Round {
type Output = Self;
fn mul(self, rhs: u64) -> Self::Output {
Round(self.0 * rhs)
}
}
/// Participation public key used in key registration transactions
#[derive(Copy, Clone, Eq, PartialEq)]
pub struct VotePk(pub [u8; 32]);
impl Serialize for VotePk {
fn serialize<S>(&self, serializer: S) -> Result<<S as Serializer>::Ok, <S as Serializer>::Error>
where
S: Serializer,
{
serializer.serialize_bytes(&self.0[..])
}
}
impl<'de> Deserialize<'de> for VotePk {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
Ok(VotePk(deserializer.deserialize_bytes(U8_32Visitor)?))
}
}
impl Debug for VotePk {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.to_base64_str())
}
}
impl VotePk {
pub fn from_base64_str(base64_str: &str) -> Result<VotePk, CoreError> {
Ok(VotePk(base64_str_to_u8_array(base64_str)?))
}
pub fn to_base64_str(self) -> String {
BASE64.encode(&self.0)
}
}
/// VRF public key used in key registration transaction
#[derive(Copy, Clone, Eq, PartialEq)]
pub struct VrfPk(pub [u8; 32]);
impl Serialize for VrfPk {
fn serialize<S>(&self, serializer: S) -> Result<<S as Serializer>::Ok, <S as Serializer>::Error>
where
S: Serializer,
{
serializer.serialize_bytes(&self.0[..])
}
}
impl<'de> Deserialize<'de> for VrfPk {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
Ok(VrfPk(deserializer.deserialize_bytes(U8_32Visitor)?))
}
}
impl Debug for VrfPk {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.to_base64_str())
}
}
impl VrfPk {
pub fn from_base64_str(base64_str: &str) -> Result<VrfPk, CoreError> {
Ok(VrfPk(base64_str_to_u8_array(base64_str)?))
}
pub fn to_base64_str(self) -> String {
BASE64.encode(&self.0)
}
}
#[derive(Eq, PartialEq, Clone)]
pub struct SignedLogic {
pub logic: CompiledTealBytes,
pub args: Vec<Vec<u8>>,
pub sig: LogicSignature,
}
impl SignedLogic {
pub fn as_address(&self) -> Address {
Address(sha2::Sha512Trunc256::digest(&self.logic.bytes_to_sign()).into())
}
/// Performs signature verification against the sender address, and general consistency checks.
pub fn verify(&self, address: Address) -> bool {
match &self.sig {
LogicSignature::ContractAccount => self.as_address() == address,
LogicSignature::DelegatedSig(sig) => {
let pk = address.as_public_key();
pk.verify(&self.logic.bytes_to_sign(), sig)
}
LogicSignature::DelegatedMultiSig(msig) => msig.verify(&self.logic.bytes_to_sign()),
}
}
}
impl Debug for SignedLogic {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(
f,
"logic: {:?}, args: {:?}, sig: {:?}",
BASE64.encode(&self.logic.0),
self.args
.iter()
.map(|a| BASE64.encode(a))
.collect::<Vec<String>>(),
self.sig
)
}
}
#[derive(Debug, Eq, PartialEq, Clone, Serialize, Deserialize)]
pub struct CompiledTealBytes(pub Vec<u8>);
impl CompiledTealBytes {
pub fn bytes_to_sign(&self) -> Vec<u8> {
let mut prefix_encoded_tx = b"Program".to_vec();
prefix_encoded_tx.extend_from_slice(&self.0);
prefix_encoded_tx
}
}
#[derive(Debug, Eq, PartialEq, Clone)]
pub enum LogicSignature {
ContractAccount,
DelegatedSig(Signature),
DelegatedMultiSig(MultisigSignature),
}
pub trait ToMsgPack: Serialize {
fn to_msg_pack(&self) -> Result<Vec<u8>, rmp_serde::encode::Error> {
rmp_serde::to_vec_named(&self)
}
}
fn base64_str_to_u8_array<const N: usize>(base64_str: &str) -> Result<[u8; N], CoreError> {
BASE64
.decode(base64_str.as_bytes())?
.try_into()
.map_err(|v| CoreError::General(format!("Couldn't convert vec: {:?} into u8 array", v)))
}
#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)]
pub struct SuggestedTransactionParams {
pub genesis_id: String,
pub genesis_hash: HashDigest,
pub consensus_version: String,
pub fee: MicroAlgos,
pub min_fee: MicroAlgos,
pub first_valid: Round,
pub last_valid: Round,
}
| 26.554688 | 100 | 0.638864 |
ddef2bd53c6a9b5ad9bba1975d6f7552d3db956b | 19,744 | #![cfg(test)]
use cosmwasm_std::testing::{mock_dependencies, mock_env, mock_info};
use cosmwasm_std::{from_binary, to_binary, CosmosMsg, DepsMut, Empty, Response, WasmMsg};
use cw721::{
ApprovedForAllResponse, ContractInfoResponse, Cw721Query, Cw721ReceiveMsg, Expiration,
NftInfoResponse, OwnerOfResponse,
};
use crate::{
ContractError, Cw721Contract, ExecuteMsg, Extension, InstantiateMsg, MintMsg, QueryMsg,
};
const MINTER: &str = "merlin";
const CONTRACT_NAME: &str = "Magic Power";
const SYMBOL: &str = "MGK";
fn setup_contract(deps: DepsMut<'_>) -> Cw721Contract<'static, Extension, Empty> {
let contract = Cw721Contract::default();
let msg = InstantiateMsg {
name: CONTRACT_NAME.to_string(),
symbol: SYMBOL.to_string(),
minter: String::from(MINTER),
};
let info = mock_info("creator", &[]);
let res = contract.instantiate(deps, mock_env(), info, msg).unwrap();
assert_eq!(0, res.messages.len());
contract
}
#[test]
fn proper_instantiation() {
let mut deps = mock_dependencies(&[]);
let contract = Cw721Contract::<Extension, Empty>::default();
let msg = InstantiateMsg {
name: CONTRACT_NAME.to_string(),
symbol: SYMBOL.to_string(),
minter: String::from(MINTER),
};
let info = mock_info("creator", &[]);
// we can just call .unwrap() to assert this was a success
let res = contract
.instantiate(deps.as_mut(), mock_env(), info, msg)
.unwrap();
assert_eq!(0, res.messages.len());
// it worked, let's query the state
let res = contract.minter(deps.as_ref()).unwrap();
assert_eq!(MINTER, res.minter);
let info = contract.contract_info(deps.as_ref()).unwrap();
assert_eq!(
info,
ContractInfoResponse {
name: CONTRACT_NAME.to_string(),
symbol: SYMBOL.to_string(),
}
);
let count = contract.num_tokens(deps.as_ref()).unwrap();
assert_eq!(0, count.count);
// list the token_ids
let tokens = contract.all_tokens(deps.as_ref(), None, None).unwrap();
assert_eq!(0, tokens.tokens.len());
}
#[test]
fn minting() {
let mut deps = mock_dependencies(&[]);
let contract = setup_contract(deps.as_mut());
let token_id = "petrify".to_string();
let token_uri = "https://www.merriam-webster.com/dictionary/petrify".to_string();
let mint_msg = ExecuteMsg::Mint(MintMsg::<Extension> {
token_id: token_id.clone(),
owner: String::from("medusa"),
token_uri: Some(token_uri.clone()),
extension: None,
});
// random cannot mint
let random = mock_info("random", &[]);
let err = contract
.execute(deps.as_mut(), mock_env(), random, mint_msg.clone())
.unwrap_err();
assert_eq!(err, ContractError::Unauthorized {});
// minter can mint
let allowed = mock_info(MINTER, &[]);
let _ = contract
.execute(deps.as_mut(), mock_env(), allowed, mint_msg)
.unwrap();
// ensure num tokens increases
let count = contract.num_tokens(deps.as_ref()).unwrap();
assert_eq!(1, count.count);
// unknown nft returns error
let _ = contract
.nft_info(deps.as_ref(), "unknown".to_string())
.unwrap_err();
// this nft info is correct
let info = contract.nft_info(deps.as_ref(), token_id.clone()).unwrap();
assert_eq!(
info,
NftInfoResponse::<Extension> {
token_uri: Some(token_uri),
extension: None,
}
);
// owner info is correct
let owner = contract
.owner_of(deps.as_ref(), mock_env(), token_id.clone(), true)
.unwrap();
assert_eq!(
owner,
OwnerOfResponse {
owner: String::from("medusa"),
approvals: vec![],
}
);
// Cannot mint same token_id again
let mint_msg2 = ExecuteMsg::Mint(MintMsg::<Extension> {
token_id: token_id.clone(),
owner: String::from("hercules"),
token_uri: None,
extension: None,
});
let allowed = mock_info(MINTER, &[]);
let err = contract
.execute(deps.as_mut(), mock_env(), allowed, mint_msg2)
.unwrap_err();
assert_eq!(err, ContractError::Claimed {});
// list the token_ids
let tokens = contract.all_tokens(deps.as_ref(), None, None).unwrap();
assert_eq!(1, tokens.tokens.len());
assert_eq!(vec![token_id], tokens.tokens);
}
#[test]
fn transferring_nft() {
let mut deps = mock_dependencies(&[]);
let contract = setup_contract(deps.as_mut());
// Mint a token
let token_id = "melt".to_string();
let token_uri = "https://www.merriam-webster.com/dictionary/melt".to_string();
let mint_msg = ExecuteMsg::Mint(MintMsg::<Extension> {
token_id: token_id.clone(),
owner: String::from("venus"),
token_uri: Some(token_uri),
extension: None,
});
let minter = mock_info(MINTER, &[]);
contract
.execute(deps.as_mut(), mock_env(), minter, mint_msg)
.unwrap();
// random cannot transfer
let random = mock_info("random", &[]);
let transfer_msg = ExecuteMsg::TransferNft {
recipient: String::from("random"),
token_id: token_id.clone(),
};
let err = contract
.execute(deps.as_mut(), mock_env(), random, transfer_msg)
.unwrap_err();
assert_eq!(err, ContractError::Unauthorized {});
// owner can
let random = mock_info("venus", &[]);
let transfer_msg = ExecuteMsg::TransferNft {
recipient: String::from("random"),
token_id: token_id.clone(),
};
let res = contract
.execute(deps.as_mut(), mock_env(), random, transfer_msg)
.unwrap();
assert_eq!(
res,
Response::new()
.add_attribute("action", "transfer_nft")
.add_attribute("sender", "venus")
.add_attribute("recipient", "random")
.add_attribute("token_id", token_id)
);
}
#[test]
fn sending_nft() {
let mut deps = mock_dependencies(&[]);
let contract = setup_contract(deps.as_mut());
// Mint a token
let token_id = "melt".to_string();
let token_uri = "https://www.merriam-webster.com/dictionary/melt".to_string();
let mint_msg = ExecuteMsg::Mint(MintMsg::<Extension> {
token_id: token_id.clone(),
owner: String::from("venus"),
token_uri: Some(token_uri),
extension: None,
});
let minter = mock_info(MINTER, &[]);
contract
.execute(deps.as_mut(), mock_env(), minter, mint_msg)
.unwrap();
let msg = to_binary("You now have the melting power").unwrap();
let target = String::from("another_contract");
let send_msg = ExecuteMsg::SendNft {
contract: target.clone(),
token_id: token_id.clone(),
msg: msg.clone(),
};
let random = mock_info("random", &[]);
let err = contract
.execute(deps.as_mut(), mock_env(), random, send_msg.clone())
.unwrap_err();
assert_eq!(err, ContractError::Unauthorized {});
// but owner can
let random = mock_info("venus", &[]);
let res = contract
.execute(deps.as_mut(), mock_env(), random, send_msg)
.unwrap();
let payload = Cw721ReceiveMsg {
sender: String::from("venus"),
token_id: token_id.clone(),
msg,
};
let expected = payload.into_cosmos_msg(target.clone()).unwrap();
// ensure expected serializes as we think it should
match &expected {
CosmosMsg::Wasm(WasmMsg::Execute { contract_addr, .. }) => {
assert_eq!(contract_addr, &target)
}
m => panic!("Unexpected message type: {:?}", m),
}
// and make sure this is the request sent by the contract
assert_eq!(
res,
Response::new()
.add_message(expected)
.add_attribute("action", "send_nft")
.add_attribute("sender", "venus")
.add_attribute("recipient", "another_contract")
.add_attribute("token_id", token_id)
);
}
#[test]
fn approving_revoking() {
let mut deps = mock_dependencies(&[]);
let contract = setup_contract(deps.as_mut());
// Mint a token
let token_id = "grow".to_string();
let token_uri = "https://www.merriam-webster.com/dictionary/grow".to_string();
let mint_msg = ExecuteMsg::Mint(MintMsg::<Extension> {
token_id: token_id.clone(),
owner: String::from("demeter"),
token_uri: Some(token_uri),
extension: None,
});
let minter = mock_info(MINTER, &[]);
contract
.execute(deps.as_mut(), mock_env(), minter, mint_msg)
.unwrap();
// Give random transferring power
let approve_msg = ExecuteMsg::Approve {
spender: String::from("random"),
token_id: token_id.clone(),
expires: None,
};
let owner = mock_info("demeter", &[]);
let res = contract
.execute(deps.as_mut(), mock_env(), owner, approve_msg)
.unwrap();
assert_eq!(
res,
Response::new()
.add_attribute("action", "approve")
.add_attribute("sender", "demeter")
.add_attribute("spender", "random")
.add_attribute("token_id", token_id.clone())
);
// random can now transfer
let random = mock_info("random", &[]);
let transfer_msg = ExecuteMsg::TransferNft {
recipient: String::from("person"),
token_id: token_id.clone(),
};
contract
.execute(deps.as_mut(), mock_env(), random, transfer_msg)
.unwrap();
// Approvals are removed / cleared
let query_msg = QueryMsg::OwnerOf {
token_id: token_id.clone(),
include_expired: None,
};
let res: OwnerOfResponse = from_binary(
&contract
.query(deps.as_ref(), mock_env(), query_msg.clone())
.unwrap(),
)
.unwrap();
assert_eq!(
res,
OwnerOfResponse {
owner: String::from("person"),
approvals: vec![],
}
);
// Approve, revoke, and check for empty, to test revoke
let approve_msg = ExecuteMsg::Approve {
spender: String::from("random"),
token_id: token_id.clone(),
expires: None,
};
let owner = mock_info("person", &[]);
contract
.execute(deps.as_mut(), mock_env(), owner.clone(), approve_msg)
.unwrap();
let revoke_msg = ExecuteMsg::Revoke {
spender: String::from("random"),
token_id,
};
contract
.execute(deps.as_mut(), mock_env(), owner, revoke_msg)
.unwrap();
// Approvals are now removed / cleared
let res: OwnerOfResponse = from_binary(
&contract
.query(deps.as_ref(), mock_env(), query_msg)
.unwrap(),
)
.unwrap();
assert_eq!(
res,
OwnerOfResponse {
owner: String::from("person"),
approvals: vec![],
}
);
}
#[test]
fn approving_all_revoking_all() {
let mut deps = mock_dependencies(&[]);
let contract = setup_contract(deps.as_mut());
// Mint a couple tokens (from the same owner)
let token_id1 = "grow1".to_string();
let token_uri1 = "https://www.merriam-webster.com/dictionary/grow1".to_string();
let token_id2 = "grow2".to_string();
let token_uri2 = "https://www.merriam-webster.com/dictionary/grow2".to_string();
let mint_msg1 = ExecuteMsg::Mint(MintMsg::<Extension> {
token_id: token_id1.clone(),
owner: String::from("demeter"),
token_uri: Some(token_uri1),
extension: None,
});
let minter = mock_info(MINTER, &[]);
contract
.execute(deps.as_mut(), mock_env(), minter.clone(), mint_msg1)
.unwrap();
let mint_msg2 = ExecuteMsg::Mint(MintMsg::<Extension> {
token_id: token_id2.clone(),
owner: String::from("demeter"),
token_uri: Some(token_uri2),
extension: None,
});
contract
.execute(deps.as_mut(), mock_env(), minter, mint_msg2)
.unwrap();
// paginate the token_ids
let tokens = contract.all_tokens(deps.as_ref(), None, Some(1)).unwrap();
assert_eq!(1, tokens.tokens.len());
assert_eq!(vec![token_id1.clone()], tokens.tokens);
let tokens = contract
.all_tokens(deps.as_ref(), Some(token_id1.clone()), Some(3))
.unwrap();
assert_eq!(1, tokens.tokens.len());
assert_eq!(vec![token_id2.clone()], tokens.tokens);
// demeter gives random full (operator) power over her tokens
let approve_all_msg = ExecuteMsg::ApproveAll {
operator: String::from("random"),
expires: None,
};
let owner = mock_info("demeter", &[]);
let res = contract
.execute(deps.as_mut(), mock_env(), owner, approve_all_msg)
.unwrap();
assert_eq!(
res,
Response::new()
.add_attribute("action", "approve_all")
.add_attribute("sender", "demeter")
.add_attribute("operator", "random")
);
// random can now transfer
let random = mock_info("random", &[]);
let transfer_msg = ExecuteMsg::TransferNft {
recipient: String::from("person"),
token_id: token_id1,
};
contract
.execute(deps.as_mut(), mock_env(), random.clone(), transfer_msg)
.unwrap();
// random can now send
let inner_msg = WasmMsg::Execute {
contract_addr: "another_contract".into(),
msg: to_binary("You now also have the growing power").unwrap(),
funds: vec![],
};
let msg: CosmosMsg = CosmosMsg::Wasm(inner_msg);
let send_msg = ExecuteMsg::SendNft {
contract: String::from("another_contract"),
token_id: token_id2,
msg: to_binary(&msg).unwrap(),
};
contract
.execute(deps.as_mut(), mock_env(), random, send_msg)
.unwrap();
// Approve_all, revoke_all, and check for empty, to test revoke_all
let approve_all_msg = ExecuteMsg::ApproveAll {
operator: String::from("operator"),
expires: None,
};
// person is now the owner of the tokens
let owner = mock_info("person", &[]);
contract
.execute(deps.as_mut(), mock_env(), owner, approve_all_msg)
.unwrap();
let res = contract
.all_approvals(
deps.as_ref(),
mock_env(),
String::from("person"),
true,
None,
None,
)
.unwrap();
assert_eq!(
res,
ApprovedForAllResponse {
operators: vec![cw721::Approval {
spender: String::from("operator"),
expires: Expiration::Never {}
}]
}
);
// second approval
let buddy_expires = Expiration::AtHeight(1234567);
let approve_all_msg = ExecuteMsg::ApproveAll {
operator: String::from("buddy"),
expires: Some(buddy_expires),
};
let owner = mock_info("person", &[]);
contract
.execute(deps.as_mut(), mock_env(), owner.clone(), approve_all_msg)
.unwrap();
// and paginate queries
let res = contract
.all_approvals(
deps.as_ref(),
mock_env(),
String::from("person"),
true,
None,
Some(1),
)
.unwrap();
assert_eq!(
res,
ApprovedForAllResponse {
operators: vec![cw721::Approval {
spender: String::from("buddy"),
expires: buddy_expires,
}]
}
);
let res = contract
.all_approvals(
deps.as_ref(),
mock_env(),
String::from("person"),
true,
Some(String::from("buddy")),
Some(2),
)
.unwrap();
assert_eq!(
res,
ApprovedForAllResponse {
operators: vec![cw721::Approval {
spender: String::from("operator"),
expires: Expiration::Never {}
}]
}
);
let revoke_all_msg = ExecuteMsg::RevokeAll {
operator: String::from("operator"),
};
contract
.execute(deps.as_mut(), mock_env(), owner, revoke_all_msg)
.unwrap();
// Approvals are removed / cleared without affecting others
let res = contract
.all_approvals(
deps.as_ref(),
mock_env(),
String::from("person"),
false,
None,
None,
)
.unwrap();
assert_eq!(
res,
ApprovedForAllResponse {
operators: vec![cw721::Approval {
spender: String::from("buddy"),
expires: buddy_expires,
}]
}
);
// ensure the filter works (nothing should be here
let mut late_env = mock_env();
late_env.block.height = 1234568; //expired
let res = contract
.all_approvals(
deps.as_ref(),
late_env,
String::from("person"),
false,
None,
None,
)
.unwrap();
assert_eq!(0, res.operators.len());
}
#[test]
fn query_tokens_by_owner() {
let mut deps = mock_dependencies(&[]);
let contract = setup_contract(deps.as_mut());
let minter = mock_info(MINTER, &[]);
// Mint a couple tokens (from the same owner)
let token_id1 = "grow1".to_string();
let demeter = String::from("Demeter");
let token_id2 = "grow2".to_string();
let ceres = String::from("Ceres");
let token_id3 = "sing".to_string();
let mint_msg = ExecuteMsg::Mint(MintMsg::<Extension> {
token_id: token_id1.clone(),
owner: demeter.clone(),
token_uri: None,
extension: None,
});
contract
.execute(deps.as_mut(), mock_env(), minter.clone(), mint_msg)
.unwrap();
let mint_msg = ExecuteMsg::Mint(MintMsg::<Extension> {
token_id: token_id2.clone(),
owner: ceres.clone(),
token_uri: None,
extension: None,
});
contract
.execute(deps.as_mut(), mock_env(), minter.clone(), mint_msg)
.unwrap();
let mint_msg = ExecuteMsg::Mint(MintMsg::<Extension> {
token_id: token_id3.clone(),
owner: demeter.clone(),
token_uri: None,
extension: None,
});
contract
.execute(deps.as_mut(), mock_env(), minter, mint_msg)
.unwrap();
// get all tokens in order:
let expected = vec![token_id1.clone(), token_id2.clone(), token_id3.clone()];
let tokens = contract.all_tokens(deps.as_ref(), None, None).unwrap();
assert_eq!(&expected, &tokens.tokens);
// paginate
let tokens = contract.all_tokens(deps.as_ref(), None, Some(2)).unwrap();
assert_eq!(&expected[..2], &tokens.tokens[..]);
let tokens = contract
.all_tokens(deps.as_ref(), Some(expected[1].clone()), None)
.unwrap();
assert_eq!(&expected[2..], &tokens.tokens[..]);
// get by owner
let by_ceres = vec![token_id2];
let by_demeter = vec![token_id1, token_id3];
// all tokens by owner
let tokens = contract
.tokens(deps.as_ref(), demeter.clone(), None, None)
.unwrap();
assert_eq!(&by_demeter, &tokens.tokens);
let tokens = contract.tokens(deps.as_ref(), ceres, None, None).unwrap();
assert_eq!(&by_ceres, &tokens.tokens);
// paginate for demeter
let tokens = contract
.tokens(deps.as_ref(), demeter.clone(), None, Some(1))
.unwrap();
assert_eq!(&by_demeter[..1], &tokens.tokens[..]);
let tokens = contract
.tokens(deps.as_ref(), demeter, Some(by_demeter[0].clone()), Some(3))
.unwrap();
assert_eq!(&by_demeter[1..], &tokens.tokens[..]);
}
| 29.779789 | 91 | 0.579771 |
0822147b47a5332123c083e16084aec2b45eab74 | 4,503 | #[doc = "Reader of register TCD9_CITER_ELINKYES"]
pub type R = crate::R<u16, super::TCD9_CITER_ELINKYES>;
#[doc = "Writer for register TCD9_CITER_ELINKYES"]
pub type W = crate::W<u16, super::TCD9_CITER_ELINKYES>;
#[doc = "Register TCD9_CITER_ELINKYES `reset()`'s with value 0"]
impl crate::ResetValue for super::TCD9_CITER_ELINKYES {
type Type = u16;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `CITER`"]
pub type CITER_R = crate::R<u16, u16>;
#[doc = "Write proxy for field `CITER`"]
pub struct CITER_W<'a> {
w: &'a mut W,
}
impl<'a> CITER_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u16) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01ff) | ((value as u16) & 0x01ff);
self.w
}
}
#[doc = "Reader of field `LINKCH`"]
pub type LINKCH_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `LINKCH`"]
pub struct LINKCH_W<'a> {
w: &'a mut W,
}
impl<'a> LINKCH_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x1f << 9)) | (((value as u16) & 0x1f) << 9);
self.w
}
}
#[doc = "Enable channel-to-channel linking on minor-loop complete\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ELINK_A {
#[doc = "0: The channel-to-channel linking is disabled"]
ELINK_0 = 0,
#[doc = "1: The channel-to-channel linking is enabled"]
ELINK_1 = 1,
}
impl From<ELINK_A> for bool {
#[inline(always)]
fn from(variant: ELINK_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `ELINK`"]
pub type ELINK_R = crate::R<bool, ELINK_A>;
impl ELINK_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> ELINK_A {
match self.bits {
false => ELINK_A::ELINK_0,
true => ELINK_A::ELINK_1,
}
}
#[doc = "Checks if the value of the field is `ELINK_0`"]
#[inline(always)]
pub fn is_elink_0(&self) -> bool {
*self == ELINK_A::ELINK_0
}
#[doc = "Checks if the value of the field is `ELINK_1`"]
#[inline(always)]
pub fn is_elink_1(&self) -> bool {
*self == ELINK_A::ELINK_1
}
}
#[doc = "Write proxy for field `ELINK`"]
pub struct ELINK_W<'a> {
w: &'a mut W,
}
impl<'a> ELINK_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: ELINK_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "The channel-to-channel linking is disabled"]
#[inline(always)]
pub fn elink_0(self) -> &'a mut W {
self.variant(ELINK_A::ELINK_0)
}
#[doc = "The channel-to-channel linking is enabled"]
#[inline(always)]
pub fn elink_1(self) -> &'a mut W {
self.variant(ELINK_A::ELINK_1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 15)) | (((value as u16) & 0x01) << 15);
self.w
}
}
impl R {
#[doc = "Bits 0:8 - Current Major Iteration Count"]
#[inline(always)]
pub fn citer(&self) -> CITER_R {
CITER_R::new((self.bits & 0x01ff) as u16)
}
#[doc = "Bits 9:13 - Minor Loop Link Channel Number"]
#[inline(always)]
pub fn linkch(&self) -> LINKCH_R {
LINKCH_R::new(((self.bits >> 9) & 0x1f) as u8)
}
#[doc = "Bit 15 - Enable channel-to-channel linking on minor-loop complete"]
#[inline(always)]
pub fn elink(&self) -> ELINK_R {
ELINK_R::new(((self.bits >> 15) & 0x01) != 0)
}
}
impl W {
#[doc = "Bits 0:8 - Current Major Iteration Count"]
#[inline(always)]
pub fn citer(&mut self) -> CITER_W {
CITER_W { w: self }
}
#[doc = "Bits 9:13 - Minor Loop Link Channel Number"]
#[inline(always)]
pub fn linkch(&mut self) -> LINKCH_W {
LINKCH_W { w: self }
}
#[doc = "Bit 15 - Enable channel-to-channel linking on minor-loop complete"]
#[inline(always)]
pub fn elink(&mut self) -> ELINK_W {
ELINK_W { w: self }
}
}
| 30.02 | 88 | 0.568954 |
67fe10f3d54d21c6bc936f72acd537ce114dad8d | 4,109 | // Copyright 2018 Parity Technologies (UK) Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use crate::upgrade::{InboundUpgradeSend, OutboundUpgradeSend};
use crate::protocols_handler::{
KeepAlive,
SubstreamProtocol,
ProtocolsHandler,
ProtocolsHandlerEvent,
ProtocolsHandlerUpgrErr
};
use std::task::{Context, Poll};
/// Wrapper around a protocol handler that turns the output event into something else.
pub struct MapOutEvent<TProtoHandler, TMap> {
inner: TProtoHandler,
map: TMap,
}
impl<TProtoHandler, TMap> MapOutEvent<TProtoHandler, TMap> {
/// Creates a `MapOutEvent`.
#[inline]
pub(crate) fn new(inner: TProtoHandler, map: TMap) -> Self {
MapOutEvent {
inner,
map,
}
}
}
impl<TProtoHandler, TMap, TNewOut> ProtocolsHandler for MapOutEvent<TProtoHandler, TMap>
where
TProtoHandler: ProtocolsHandler,
TMap: FnMut(TProtoHandler::OutEvent) -> TNewOut,
TNewOut: Send + 'static,
TMap: Send + 'static,
{
type InEvent = TProtoHandler::InEvent;
type OutEvent = TNewOut;
type Error = TProtoHandler::Error;
type InboundProtocol = TProtoHandler::InboundProtocol;
type OutboundProtocol = TProtoHandler::OutboundProtocol;
type OutboundOpenInfo = TProtoHandler::OutboundOpenInfo;
#[inline]
fn listen_protocol(&self) -> SubstreamProtocol<Self::InboundProtocol> {
self.inner.listen_protocol()
}
#[inline]
fn inject_fully_negotiated_inbound(
&mut self,
protocol: <Self::InboundProtocol as InboundUpgradeSend>::Output
) {
self.inner.inject_fully_negotiated_inbound(protocol)
}
#[inline]
fn inject_fully_negotiated_outbound(
&mut self,
protocol: <Self::OutboundProtocol as OutboundUpgradeSend>::Output,
info: Self::OutboundOpenInfo
) {
self.inner.inject_fully_negotiated_outbound(protocol, info)
}
#[inline]
fn inject_event(&mut self, event: Self::InEvent) {
self.inner.inject_event(event)
}
#[inline]
fn inject_dial_upgrade_error(&mut self, info: Self::OutboundOpenInfo, error: ProtocolsHandlerUpgrErr<<Self::OutboundProtocol as OutboundUpgradeSend>::Error>) {
self.inner.inject_dial_upgrade_error(info, error)
}
#[inline]
fn connection_keep_alive(&self) -> KeepAlive {
self.inner.connection_keep_alive()
}
#[inline]
fn poll(
&mut self,
cx: &mut Context,
) -> Poll<
ProtocolsHandlerEvent<Self::OutboundProtocol, Self::OutboundOpenInfo, Self::OutEvent, Self::Error>,
> {
self.inner.poll(cx).map(|ev| {
match ev {
ProtocolsHandlerEvent::Custom(ev) => ProtocolsHandlerEvent::Custom((self.map)(ev)),
ProtocolsHandlerEvent::Close(err) => ProtocolsHandlerEvent::Close(err),
ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info } => {
ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info }
}
}
})
}
}
| 34.822034 | 163 | 0.688002 |
89e0b521d304cf205147a3649ced958e7bb9f52a | 1,230 | /*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma version(1)
#pragma rs java_package_name(android.renderscript.cts)
// Don't edit this file! It is auto-generated by frameworks/rs/api/gen_runtime.
float __attribute__((kernel)) testNativeNormalizeFloatFloat(float inV) {
return native_normalize(inV);
}
float2 __attribute__((kernel)) testNativeNormalizeFloat2Float2(float2 inV) {
return native_normalize(inV);
}
float3 __attribute__((kernel)) testNativeNormalizeFloat3Float3(float3 inV) {
return native_normalize(inV);
}
float4 __attribute__((kernel)) testNativeNormalizeFloat4Float4(float4 inV) {
return native_normalize(inV);
}
| 32.368421 | 80 | 0.760163 |
14cb54970c27c00c9e3209dca254a7a0d981d173 | 1,964 | use termion::color::{Bg, Cyan, Reset};
use termion::event::{Event, Key};
use super::{command_worker::CommandWorker, EventWorker};
use state::State;
use util::Direction;
#[derive(Debug)]
pub struct EditWorker {}
impl Default for EditWorker {
fn default() -> Self {
EditWorker {}
}
}
impl EventWorker for EditWorker {
fn mode(&self) -> String {
format!("{} Edit {}", Bg(Cyan), Bg(Reset))
}
fn update(&mut self, state: &mut State, e: Event) -> Option<Box<dyn EventWorker>> {
match e {
Event::Key(Key::Char('\n')) => {
let cursor = state.current_panel().cursor.clone();
let buffer_id = state.current_panel().buffer_id;
state
.buffers
.get_mut(&buffer_id)
.expect("internal error: missing current buffer")
.insert_line_at_cursor(&cursor);
let ref mut cursor = state.current_panel_mut().cursor;
cursor.x = 0;
cursor.go(Direction::Down, 1);
}
Event::Key(Key::Backspace) => {
let cursor = state.current_panel().cursor.clone();
state.current_buffer_mut().erase_at_cursor(&cursor);
state.current_panel_mut().cursor.go(Direction::Left, 1);
}
Event::Key(Key::Char(c)) => {
let cursor = state.current_panel().cursor.clone();
let buffer_id = state.current_panel().buffer_id;
state
.buffers
.get_mut(&buffer_id)
.expect("internal error: missing current buffer")
.insert_at_cursor(c, &cursor);
state.current_panel_mut().cursor.go(Direction::Right, 1);
}
Event::Key(Key::Esc) => return Some(Box::new(CommandWorker::default())),
_ => (),
}
None
}
}
| 34.45614 | 87 | 0.519348 |
0ac1fcb2746fd877eb71bca75910b0dafb15ebc1 | 4,162 | use tokio::{io::{AsyncReadExt, AsyncWriteExt}, net::{TcpStream, tcp::{OwnedReadHalf, OwnedWriteHalf}}, time::sleep};
use tokio::net::tcp::{ReadHalf, WriteHalf};
use tokio::time::{Duration};
use tokio::sync::oneshot::{channel, Sender, Receiver};
use futures::{future::Either, pin_mut};
use log::trace;
use log::error;
pub struct CustomPump<'a> {
id: &'a str,
client_socket: TcpStream,
endpoint_socket: TcpStream,
buffer: &'a mut [u8],
read_timeout: u64
}
impl<'a> CustomPump<'a> {
pub fn from(id: &'a str, client_socket: TcpStream, endpoint_socket: TcpStream, buffer: &'a mut [u8], read_timeout: u64) -> Self {
CustomPump { id, client_socket, endpoint_socket, buffer, read_timeout }
}
pub async fn start(mut self) {
self.run_pumps_custom().await;
}
async fn run_pumps_custom(mut self) {
// Split the buffer.
let buffer_size = self.buffer.len();
let (buffer_up, buffer_down) = self.buffer.split_at_mut(buffer_size / 2);
// Split the sockets.
let (client_socket_read, client_socket_write) = self.client_socket.into_split();
let (endpoint_socket_read, endpoint_socket_write) = self.endpoint_socket.into_split();
// Create cancellation channels.
let (client_cancellation_sender, client_cancellation_receiver) = channel::<bool>();
let (endpoint_cancellation_sender, endpoint_cancellation_receiver) = channel::<bool>();
// FYI: Cancellation senders are moved because this is a one-shot channel. The sender can only send
// once, and the object is moved when calling the send method.
// Run the pumps.
let pump_up = CustomPump::run_pump(&self.id, "up", client_socket_read, endpoint_socket_write, client_cancellation_sender, endpoint_cancellation_receiver, buffer_up, self.read_timeout);
let pump_down = CustomPump::run_pump(&self.id, "down", endpoint_socket_read, client_socket_write, endpoint_cancellation_sender, client_cancellation_receiver, buffer_down, self.read_timeout);
futures::future::join(pump_up, pump_down).await;
}
async fn run_pump(
id: &str,
direction: &str,
mut from: OwnedReadHalf,
mut to: OwnedWriteHalf,
cancel_sender: Sender<bool>,
mut cancel_receiver: Receiver<bool>,
mut buffer: &mut [u8],
read_timeout: u64
) {
loop {
let mut read_fut = from.read(buffer);
let mut timeout_fut = sleep(Duration::from_millis(read_timeout));
pin_mut!(read_fut);
pin_mut!(timeout_fut);
// Read or timeout.
let select_future = futures::future::select(
read_fut,
timeout_fut
).await;
// If we read successfully, write.
if let Either::Left((Ok(read), _)) = select_future {
// Reading 0 bytes is a close, and a write error is a receiver close. Notify and return.
if read == 0 {
trace!("[{}] Read {} bytes while pumping {}, closing.", id, read, direction);
cancel_sender.send(true).unwrap_or_default();
return;
}
if let Err(err) = to.write_all(&buffer[..read]).await {
error!("[{}] Failed to write {} {} bytes of data, closing: {}", id, direction, read, err);
return;
}
if let Err(err) = to.flush().await {
error!("[{}] Failed to flush {} {} bytes of data, closing: {}", id, direction, read, err);
return;
}
trace!("[{}] Pumped {} {} bytes of data: {:x?}.", id, direction, read, &buffer[0..10]);
} else if let Either::Left((Err(err), _)) = select_future {
error!("[{}] Failed to read {}: {}.", id, direction, err);
return;
}
//Return if other thread has cancelled.
if cancel_receiver.try_recv().unwrap_or(false) {
return;
}
}
}
} | 39.264151 | 198 | 0.583373 |
e6b8eb2a42b1801ec2276143f0a4ebbd15346656 | 35,175 | #![allow(unused_imports, non_camel_case_types)]
use crate::model::CodeableConcept::CodeableConcept;
use crate::model::ContactDetail::ContactDetail;
use crate::model::Element::Element;
use crate::model::Extension::Extension;
use crate::model::Identifier::Identifier;
use crate::model::Meta::Meta;
use crate::model::Narrative::Narrative;
use crate::model::Reference::Reference;
use crate::model::ResourceList::ResourceList;
use crate::model::TestScript_Destination::TestScript_Destination;
use crate::model::TestScript_Fixture::TestScript_Fixture;
use crate::model::TestScript_Metadata::TestScript_Metadata;
use crate::model::TestScript_Origin::TestScript_Origin;
use crate::model::TestScript_Setup::TestScript_Setup;
use crate::model::TestScript_Teardown::TestScript_Teardown;
use crate::model::TestScript_Test::TestScript_Test;
use crate::model::TestScript_Variable::TestScript_Variable;
use crate::model::UsageContext::UsageContext;
use serde_json::json;
use serde_json::value::Value;
use std::borrow::Cow;
/// A structured set of tests against a FHIR server or client implementation to
/// determine compliance against the FHIR specification.
#[derive(Debug)]
pub struct TestScript<'a> {
pub(crate) value: Cow<'a, Value>,
}
impl TestScript<'_> {
pub fn new(value: &Value) -> TestScript {
TestScript {
value: Cow::Borrowed(value),
}
}
pub fn to_json(&self) -> Value {
(*self.value).clone()
}
/// Extensions for copyright
pub fn _copyright(&self) -> Option<Element> {
if let Some(val) = self.value.get("_copyright") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Extensions for date
pub fn _date(&self) -> Option<Element> {
if let Some(val) = self.value.get("_date") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Extensions for description
pub fn _description(&self) -> Option<Element> {
if let Some(val) = self.value.get("_description") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Extensions for experimental
pub fn _experimental(&self) -> Option<Element> {
if let Some(val) = self.value.get("_experimental") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Extensions for implicitRules
pub fn _implicit_rules(&self) -> Option<Element> {
if let Some(val) = self.value.get("_implicitRules") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Extensions for language
pub fn _language(&self) -> Option<Element> {
if let Some(val) = self.value.get("_language") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Extensions for name
pub fn _name(&self) -> Option<Element> {
if let Some(val) = self.value.get("_name") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Extensions for publisher
pub fn _publisher(&self) -> Option<Element> {
if let Some(val) = self.value.get("_publisher") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Extensions for purpose
pub fn _purpose(&self) -> Option<Element> {
if let Some(val) = self.value.get("_purpose") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Extensions for status
pub fn _status(&self) -> Option<Element> {
if let Some(val) = self.value.get("_status") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Extensions for title
pub fn _title(&self) -> Option<Element> {
if let Some(val) = self.value.get("_title") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Extensions for url
pub fn _url(&self) -> Option<Element> {
if let Some(val) = self.value.get("_url") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Extensions for version
pub fn _version(&self) -> Option<Element> {
if let Some(val) = self.value.get("_version") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Contact details to assist a user in finding and communicating with the
/// publisher.
pub fn contact(&self) -> Option<Vec<ContactDetail>> {
if let Some(Value::Array(val)) = self.value.get("contact") {
return Some(
val.into_iter()
.map(|e| ContactDetail {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// These resources do not have an independent existence apart from the resource
/// that contains them - they cannot be identified independently, and nor can they
/// have their own independent transaction scope.
pub fn contained(&self) -> Option<Vec<ResourceList>> {
if let Some(Value::Array(val)) = self.value.get("contained") {
return Some(
val.into_iter()
.map(|e| ResourceList {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// A copyright statement relating to the test script and/or its contents. Copyright
/// statements are generally legal restrictions on the use and publishing of the
/// test script.
pub fn copyright(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("copyright") {
return Some(string);
}
return None;
}
/// The date (and optionally time) when the test script was published. The date
/// must change when the business version changes and it must change if the status
/// code changes. In addition, it should change when the substantive content of the
/// test script changes.
pub fn date(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("date") {
return Some(string);
}
return None;
}
/// A free text natural language description of the test script from a consumer's
/// perspective.
pub fn description(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("description") {
return Some(string);
}
return None;
}
/// An abstract server used in operations within this test script in the destination
/// element.
pub fn destination(&self) -> Option<Vec<TestScript_Destination>> {
if let Some(Value::Array(val)) = self.value.get("destination") {
return Some(
val.into_iter()
.map(|e| TestScript_Destination {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// A Boolean value to indicate that this test script is authored for testing
/// purposes (or education/evaluation/marketing) and is not intended to be used for
/// genuine usage.
pub fn experimental(&self) -> Option<bool> {
if let Some(val) = self.value.get("experimental") {
return Some(val.as_bool().unwrap());
}
return None;
}
/// May be used to represent additional information that is not part of the basic
/// definition of the resource. To make the use of extensions safe and manageable,
/// there is a strict set of governance applied to the definition and use of
/// extensions. Though any implementer can define an extension, there is a set of
/// requirements that SHALL be met as part of the definition of the extension.
pub fn extension(&self) -> Option<Vec<Extension>> {
if let Some(Value::Array(val)) = self.value.get("extension") {
return Some(
val.into_iter()
.map(|e| Extension {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// Fixture in the test script - by reference (uri). All fixtures are required for
/// the test script to execute.
pub fn fixture(&self) -> Option<Vec<TestScript_Fixture>> {
if let Some(Value::Array(val)) = self.value.get("fixture") {
return Some(
val.into_iter()
.map(|e| TestScript_Fixture {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// The logical id of the resource, as used in the URL for the resource. Once
/// assigned, this value never changes.
pub fn id(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("id") {
return Some(string);
}
return None;
}
/// A formal identifier that is used to identify this test script when it is
/// represented in other formats, or referenced in a specification, model, design or
/// an instance.
pub fn identifier(&self) -> Option<Identifier> {
if let Some(val) = self.value.get("identifier") {
return Some(Identifier {
value: Cow::Borrowed(val),
});
}
return None;
}
/// A reference to a set of rules that were followed when the resource was
/// constructed, and which must be understood when processing the content. Often,
/// this is a reference to an implementation guide that defines the special rules
/// along with other profiles etc.
pub fn implicit_rules(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("implicitRules") {
return Some(string);
}
return None;
}
/// A legal or geographic region in which the test script is intended to be used.
pub fn jurisdiction(&self) -> Option<Vec<CodeableConcept>> {
if let Some(Value::Array(val)) = self.value.get("jurisdiction") {
return Some(
val.into_iter()
.map(|e| CodeableConcept {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// The base language in which the resource is written.
pub fn language(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("language") {
return Some(string);
}
return None;
}
/// The metadata about the resource. This is content that is maintained by the
/// infrastructure. Changes to the content might not always be associated with
/// version changes to the resource.
pub fn meta(&self) -> Option<Meta> {
if let Some(val) = self.value.get("meta") {
return Some(Meta {
value: Cow::Borrowed(val),
});
}
return None;
}
/// The required capability must exist and are assumed to function correctly on the
/// FHIR server being tested.
pub fn metadata(&self) -> Option<TestScript_Metadata> {
if let Some(val) = self.value.get("metadata") {
return Some(TestScript_Metadata {
value: Cow::Borrowed(val),
});
}
return None;
}
/// May be used to represent additional information that is not part of the basic
/// definition of the resource and that modifies the understanding of the element
/// that contains it and/or the understanding of the containing element's
/// descendants. Usually modifier elements provide negation or qualification. To
/// make the use of extensions safe and manageable, there is a strict set of
/// governance applied to the definition and use of extensions. Though any
/// implementer is allowed to define an extension, there is a set of requirements
/// that SHALL be met as part of the definition of the extension. Applications
/// processing a resource are required to check for modifier extensions. Modifier
/// extensions SHALL NOT change the meaning of any elements on Resource or
/// DomainResource (including cannot change the meaning of modifierExtension
/// itself).
pub fn modifier_extension(&self) -> Option<Vec<Extension>> {
if let Some(Value::Array(val)) = self.value.get("modifierExtension") {
return Some(
val.into_iter()
.map(|e| Extension {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// A natural language name identifying the test script. This name should be usable
/// as an identifier for the module by machine processing applications such as code
/// generation.
pub fn name(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("name") {
return Some(string);
}
return None;
}
/// An abstract server used in operations within this test script in the origin
/// element.
pub fn origin(&self) -> Option<Vec<TestScript_Origin>> {
if let Some(Value::Array(val)) = self.value.get("origin") {
return Some(
val.into_iter()
.map(|e| TestScript_Origin {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// Reference to the profile to be used for validation.
pub fn profile(&self) -> Option<Vec<Reference>> {
if let Some(Value::Array(val)) = self.value.get("profile") {
return Some(
val.into_iter()
.map(|e| Reference {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// The name of the organization or individual that published the test script.
pub fn publisher(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("publisher") {
return Some(string);
}
return None;
}
/// Explanation of why this test script is needed and why it has been designed as it
/// has.
pub fn purpose(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("purpose") {
return Some(string);
}
return None;
}
/// A series of required setup operations before tests are executed.
pub fn setup(&self) -> Option<TestScript_Setup> {
if let Some(val) = self.value.get("setup") {
return Some(TestScript_Setup {
value: Cow::Borrowed(val),
});
}
return None;
}
/// The status of this test script. Enables tracking the life-cycle of the content.
pub fn status(&self) -> Option<TestScriptStatus> {
if let Some(Value::String(val)) = self.value.get("status") {
return Some(TestScriptStatus::from_string(&val).unwrap());
}
return None;
}
/// A series of operations required to clean up after all the tests are executed
/// (successfully or otherwise).
pub fn teardown(&self) -> Option<TestScript_Teardown> {
if let Some(val) = self.value.get("teardown") {
return Some(TestScript_Teardown {
value: Cow::Borrowed(val),
});
}
return None;
}
/// A test in this script.
pub fn test(&self) -> Option<Vec<TestScript_Test>> {
if let Some(Value::Array(val)) = self.value.get("test") {
return Some(
val.into_iter()
.map(|e| TestScript_Test {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// A human-readable narrative that contains a summary of the resource and can be
/// used to represent the content of the resource to a human. The narrative need not
/// encode all the structured data, but is required to contain sufficient detail to
/// make it "clinically safe" for a human to just read the narrative. Resource
/// definitions may define what content should be represented in the narrative to
/// ensure clinical safety.
pub fn text(&self) -> Option<Narrative> {
if let Some(val) = self.value.get("text") {
return Some(Narrative {
value: Cow::Borrowed(val),
});
}
return None;
}
/// A short, descriptive, user-friendly title for the test script.
pub fn title(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("title") {
return Some(string);
}
return None;
}
/// An absolute URI that is used to identify this test script when it is referenced
/// in a specification, model, design or an instance; also called its canonical
/// identifier. This SHOULD be globally unique and SHOULD be a literal address at
/// which at which an authoritative instance of this test script is (or will be)
/// published. This URL can be the target of a canonical reference. It SHALL remain
/// the same when the test script is stored on different servers.
pub fn url(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("url") {
return Some(string);
}
return None;
}
/// The content was developed with a focus and intent of supporting the contexts
/// that are listed. These contexts may be general categories (gender, age, ...) or
/// may be references to specific programs (insurance plans, studies, ...) and may
/// be used to assist with indexing and searching for appropriate test script
/// instances.
pub fn use_context(&self) -> Option<Vec<UsageContext>> {
if let Some(Value::Array(val)) = self.value.get("useContext") {
return Some(
val.into_iter()
.map(|e| UsageContext {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// Variable is set based either on element value in response body or on header
/// field value in the response headers.
pub fn variable(&self) -> Option<Vec<TestScript_Variable>> {
if let Some(Value::Array(val)) = self.value.get("variable") {
return Some(
val.into_iter()
.map(|e| TestScript_Variable {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// The identifier that is used to identify this version of the test script when it
/// is referenced in a specification, model, design or instance. This is an
/// arbitrary value managed by the test script author and is not expected to be
/// globally unique. For example, it might be a timestamp (e.g. yyyymmdd) if a
/// managed version is not available. There is also no expectation that versions can
/// be placed in a lexicographical sequence.
pub fn version(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("version") {
return Some(string);
}
return None;
}
pub fn validate(&self) -> bool {
if let Some(_val) = self._copyright() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self._date() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self._description() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self._experimental() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self._implicit_rules() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self._language() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self._name() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self._publisher() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self._purpose() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self._status() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self._title() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self._url() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self._version() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.contact() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.contained() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.copyright() {}
if let Some(_val) = self.date() {}
if let Some(_val) = self.description() {}
if let Some(_val) = self.destination() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.experimental() {}
if let Some(_val) = self.extension() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.fixture() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.id() {}
if let Some(_val) = self.identifier() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.implicit_rules() {}
if let Some(_val) = self.jurisdiction() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.language() {}
if let Some(_val) = self.meta() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.metadata() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.modifier_extension() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.name() {}
if let Some(_val) = self.origin() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.profile() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.publisher() {}
if let Some(_val) = self.purpose() {}
if let Some(_val) = self.setup() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.status() {}
if let Some(_val) = self.teardown() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.test() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.text() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.title() {}
if let Some(_val) = self.url() {}
if let Some(_val) = self.use_context() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.variable() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.version() {}
return true;
}
}
#[derive(Debug)]
pub struct TestScriptBuilder {
pub(crate) value: Value,
}
impl TestScriptBuilder {
pub fn build(&self) -> TestScript {
TestScript {
value: Cow::Owned(self.value.clone()),
}
}
pub fn with(existing: TestScript) -> TestScriptBuilder {
TestScriptBuilder {
value: (*existing.value).clone(),
}
}
pub fn new() -> TestScriptBuilder {
let mut __value: Value = json!({});
return TestScriptBuilder { value: __value };
}
pub fn _copyright<'a>(&'a mut self, val: Element) -> &'a mut TestScriptBuilder {
self.value["_copyright"] = json!(val.value);
return self;
}
pub fn _date<'a>(&'a mut self, val: Element) -> &'a mut TestScriptBuilder {
self.value["_date"] = json!(val.value);
return self;
}
pub fn _description<'a>(&'a mut self, val: Element) -> &'a mut TestScriptBuilder {
self.value["_description"] = json!(val.value);
return self;
}
pub fn _experimental<'a>(&'a mut self, val: Element) -> &'a mut TestScriptBuilder {
self.value["_experimental"] = json!(val.value);
return self;
}
pub fn _implicit_rules<'a>(&'a mut self, val: Element) -> &'a mut TestScriptBuilder {
self.value["_implicitRules"] = json!(val.value);
return self;
}
pub fn _language<'a>(&'a mut self, val: Element) -> &'a mut TestScriptBuilder {
self.value["_language"] = json!(val.value);
return self;
}
pub fn _name<'a>(&'a mut self, val: Element) -> &'a mut TestScriptBuilder {
self.value["_name"] = json!(val.value);
return self;
}
pub fn _publisher<'a>(&'a mut self, val: Element) -> &'a mut TestScriptBuilder {
self.value["_publisher"] = json!(val.value);
return self;
}
pub fn _purpose<'a>(&'a mut self, val: Element) -> &'a mut TestScriptBuilder {
self.value["_purpose"] = json!(val.value);
return self;
}
pub fn _status<'a>(&'a mut self, val: Element) -> &'a mut TestScriptBuilder {
self.value["_status"] = json!(val.value);
return self;
}
pub fn _title<'a>(&'a mut self, val: Element) -> &'a mut TestScriptBuilder {
self.value["_title"] = json!(val.value);
return self;
}
pub fn _url<'a>(&'a mut self, val: Element) -> &'a mut TestScriptBuilder {
self.value["_url"] = json!(val.value);
return self;
}
pub fn _version<'a>(&'a mut self, val: Element) -> &'a mut TestScriptBuilder {
self.value["_version"] = json!(val.value);
return self;
}
pub fn contact<'a>(&'a mut self, val: Vec<ContactDetail>) -> &'a mut TestScriptBuilder {
self.value["contact"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn contained<'a>(&'a mut self, val: Vec<ResourceList>) -> &'a mut TestScriptBuilder {
self.value["contained"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn copyright<'a>(&'a mut self, val: &str) -> &'a mut TestScriptBuilder {
self.value["copyright"] = json!(val);
return self;
}
pub fn date<'a>(&'a mut self, val: &str) -> &'a mut TestScriptBuilder {
self.value["date"] = json!(val);
return self;
}
pub fn description<'a>(&'a mut self, val: &str) -> &'a mut TestScriptBuilder {
self.value["description"] = json!(val);
return self;
}
pub fn destination<'a>(
&'a mut self,
val: Vec<TestScript_Destination>,
) -> &'a mut TestScriptBuilder {
self.value["destination"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn experimental<'a>(&'a mut self, val: bool) -> &'a mut TestScriptBuilder {
self.value["experimental"] = json!(val);
return self;
}
pub fn extension<'a>(&'a mut self, val: Vec<Extension>) -> &'a mut TestScriptBuilder {
self.value["extension"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn fixture<'a>(&'a mut self, val: Vec<TestScript_Fixture>) -> &'a mut TestScriptBuilder {
self.value["fixture"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn id<'a>(&'a mut self, val: &str) -> &'a mut TestScriptBuilder {
self.value["id"] = json!(val);
return self;
}
pub fn identifier<'a>(&'a mut self, val: Identifier) -> &'a mut TestScriptBuilder {
self.value["identifier"] = json!(val.value);
return self;
}
pub fn implicit_rules<'a>(&'a mut self, val: &str) -> &'a mut TestScriptBuilder {
self.value["implicitRules"] = json!(val);
return self;
}
pub fn jurisdiction<'a>(&'a mut self, val: Vec<CodeableConcept>) -> &'a mut TestScriptBuilder {
self.value["jurisdiction"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn language<'a>(&'a mut self, val: &str) -> &'a mut TestScriptBuilder {
self.value["language"] = json!(val);
return self;
}
pub fn meta<'a>(&'a mut self, val: Meta) -> &'a mut TestScriptBuilder {
self.value["meta"] = json!(val.value);
return self;
}
pub fn metadata<'a>(&'a mut self, val: TestScript_Metadata) -> &'a mut TestScriptBuilder {
self.value["metadata"] = json!(val.value);
return self;
}
pub fn modifier_extension<'a>(&'a mut self, val: Vec<Extension>) -> &'a mut TestScriptBuilder {
self.value["modifierExtension"] =
json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn name<'a>(&'a mut self, val: &str) -> &'a mut TestScriptBuilder {
self.value["name"] = json!(val);
return self;
}
pub fn origin<'a>(&'a mut self, val: Vec<TestScript_Origin>) -> &'a mut TestScriptBuilder {
self.value["origin"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn profile<'a>(&'a mut self, val: Vec<Reference>) -> &'a mut TestScriptBuilder {
self.value["profile"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn publisher<'a>(&'a mut self, val: &str) -> &'a mut TestScriptBuilder {
self.value["publisher"] = json!(val);
return self;
}
pub fn purpose<'a>(&'a mut self, val: &str) -> &'a mut TestScriptBuilder {
self.value["purpose"] = json!(val);
return self;
}
pub fn setup<'a>(&'a mut self, val: TestScript_Setup) -> &'a mut TestScriptBuilder {
self.value["setup"] = json!(val.value);
return self;
}
pub fn status<'a>(&'a mut self, val: TestScriptStatus) -> &'a mut TestScriptBuilder {
self.value["status"] = json!(val.to_string());
return self;
}
pub fn teardown<'a>(&'a mut self, val: TestScript_Teardown) -> &'a mut TestScriptBuilder {
self.value["teardown"] = json!(val.value);
return self;
}
pub fn test<'a>(&'a mut self, val: Vec<TestScript_Test>) -> &'a mut TestScriptBuilder {
self.value["test"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn text<'a>(&'a mut self, val: Narrative) -> &'a mut TestScriptBuilder {
self.value["text"] = json!(val.value);
return self;
}
pub fn title<'a>(&'a mut self, val: &str) -> &'a mut TestScriptBuilder {
self.value["title"] = json!(val);
return self;
}
pub fn url<'a>(&'a mut self, val: &str) -> &'a mut TestScriptBuilder {
self.value["url"] = json!(val);
return self;
}
pub fn use_context<'a>(&'a mut self, val: Vec<UsageContext>) -> &'a mut TestScriptBuilder {
self.value["useContext"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn variable<'a>(&'a mut self, val: Vec<TestScript_Variable>) -> &'a mut TestScriptBuilder {
self.value["variable"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn version<'a>(&'a mut self, val: &str) -> &'a mut TestScriptBuilder {
self.value["version"] = json!(val);
return self;
}
}
#[derive(Debug)]
pub enum TestScriptStatus {
Draft,
Active,
Retired,
Unknown,
}
impl TestScriptStatus {
pub fn from_string(string: &str) -> Option<TestScriptStatus> {
match string {
"draft" => Some(TestScriptStatus::Draft),
"active" => Some(TestScriptStatus::Active),
"retired" => Some(TestScriptStatus::Retired),
"unknown" => Some(TestScriptStatus::Unknown),
_ => None,
}
}
pub fn to_string(&self) -> String {
match self {
TestScriptStatus::Draft => "draft".to_string(),
TestScriptStatus::Active => "active".to_string(),
TestScriptStatus::Retired => "retired".to_string(),
TestScriptStatus::Unknown => "unknown".to_string(),
}
}
}
| 34.117362 | 99 | 0.541521 |
6948812a7216e3d97f900d7c115d8fabc3fb3d29 | 4,210 | extern crate newton_rootfinder;
use newton_rootfinder as nrf;
use util::test_cases::broyden1965::*;
use crate::common::{run_function_case_fd, run_function_case_jac};
#[test]
fn broyden_case5_fd() {
let problem_size = 5;
let damping = false;
run_function_case_fd(
problem_size,
broyden1965_case5,
init_broyden1965_case5(),
solution_broyden1965_case5(),
nrf::solver::ResolutionMethod::NewtonRaphson,
damping,
);
}
#[test]
fn broyden_case5_jac() {
let problem_size = 5;
let damping = false;
run_function_case_jac(
problem_size,
broyden1965_case5,
broyden1965_case5_jac,
init_broyden1965_case5(),
solution_broyden1965_case5(),
nrf::solver::ResolutionMethod::NewtonRaphson,
damping,
);
}
#[test]
fn broyden_case6_fd() {
let problem_size = 5;
let damping = false;
run_function_case_fd(
problem_size,
broyden1965_case6,
init_broyden1965_case6(),
solution_broyden1965_case6(),
nrf::solver::ResolutionMethod::NewtonRaphson,
damping,
);
}
#[test]
fn broyden_case6_jac() {
let problem_size = 5;
let damping = false;
run_function_case_jac(
problem_size,
broyden1965_case6,
broyden1965_case6_jac,
init_broyden1965_case6(),
solution_broyden1965_case6(),
nrf::solver::ResolutionMethod::NewtonRaphson,
damping,
);
}
#[test]
fn broyden_case7_fd() {
let problem_size = 10;
let damping = false;
run_function_case_fd(
problem_size,
broyden1965_case7,
init_broyden1965_case7(),
solution_broyden1965_case7(),
nrf::solver::ResolutionMethod::NewtonRaphson,
damping,
);
}
#[test]
fn broyden_case7_jac() {
let problem_size = 10;
let damping = false;
run_function_case_jac(
problem_size,
broyden1965_case7,
broyden1965_case7_jac,
init_broyden1965_case7(),
solution_broyden1965_case7(),
nrf::solver::ResolutionMethod::NewtonRaphson,
damping,
);
}
#[test]
fn broyden_case8_fd() {
let problem_size = 20;
let damping = false;
run_function_case_fd(
problem_size,
broyden1965_case8,
init_broyden1965_case8(),
solution_broyden1965_case8(),
nrf::solver::ResolutionMethod::NewtonRaphson,
damping,
);
}
#[test]
fn broyden_case8_jac() {
let problem_size = 20;
let damping = false;
run_function_case_jac(
problem_size,
broyden1965_case8,
broyden1965_case8_jac,
init_broyden1965_case8(),
solution_broyden1965_case8(),
nrf::solver::ResolutionMethod::NewtonRaphson,
damping,
);
}
#[test]
fn broyden_case9_fd() {
let problem_size = 2;
let damping = false;
run_function_case_fd(
problem_size,
broyden1965_case9,
init_broyden1965_case9(),
solution_broyden1965_case9(),
nrf::solver::ResolutionMethod::NewtonRaphson,
damping,
);
}
#[test]
fn broyden_case9_jac() {
let problem_size = 2;
let damping = false;
run_function_case_jac(
problem_size,
broyden1965_case9,
broyden1965_case9_jac,
init_broyden1965_case9(),
solution_broyden1965_case9(),
nrf::solver::ResolutionMethod::NewtonRaphson,
damping,
);
}
//#[test]
//#[should_panic] // This test can panic, see file src/test_cases/broyden1965
//fn broyden_case10_fd() {
// let problem_size = 2;
// run_function_case_fd(
// problem_size,
// broyden1965_case10,
// init_broyden1965_case10(),
// solution_broyden1965_case10(),
// nrf::solver::ResolutionMethod::NewtonRaphson,
// );
//}
//#[test] // This test can panic, see file src/test_cases/broyden1965
//fn broyden_case10_jac() {
// let problem_size = 2;
// run_function_case_jac(
// problem_size,
// broyden1965_case10,
// broyden1965_case10_jac,
// init_broyden1965_case10(),
// solution_broyden1965_case10(),
// nrf::solver::ResolutionMethod::NewtonRaphson,
// );
//}
| 23.785311 | 77 | 0.638717 |
87366b963530b71ca8a862b0bc55ea42dabc9656 | 1,539 | use std::path::Path;
use crate::{
error::Result, interpreter::Interpreter, parser::StmtIterator, resolver::Resolver,
scanner::TokenIterator,
};
pub struct Runner;
impl Runner {
pub(crate) fn file<P>(&mut self, f: &P) -> Result<()>
where
P: AsRef<Path>,
{
let src = std::fs::read_to_string(f)?;
let mut i = Interpreter::new(false);
self.run(&mut i, &src)
}
pub(crate) fn prompt(&mut self) -> Result<()> {
use rustyline::error::ReadlineError;
use rustyline::Editor;
let mut reader = Editor::<()>::new();
let mut i = Interpreter::new(true);
loop {
let line = reader.readline(">> ");
match line {
Err(ReadlineError::Interrupted) => break,
Err(ReadlineError::Eof) => break,
Err(e) => {
eprintln!("{}", e);
}
Ok(line) => {
if let Err(e) = self.run(&mut i, &line) {
eprintln!("{}", e);
}
}
}
}
Ok(())
}
pub(crate) fn run(&mut self, i: &mut Interpreter, src: &str) -> Result<()> {
for res in src.chars().tokens().statements() {
match res {
Err(e) => eprintln!("{}", e),
Ok(stmt) => {
let i = Resolver::resolve(i, &stmt)?;
stmt.accept(i)?;
}
}
}
Ok(())
}
}
| 24.822581 | 86 | 0.425601 |
644fcf663d756da4140680f2f00279113e35923b | 6,917 | #![allow(unused_imports, non_camel_case_types)]
use crate::models::r5::CodeableConcept::CodeableConcept;
use crate::models::r5::Extension::Extension;
use crate::models::r5::Reference::Reference;
use serde_json::json;
use serde_json::value::Value;
use std::borrow::Cow;
/// An action that is or was performed on or for a patient, practitioner, device,
/// organization, or location. For example, this can be a physical intervention on a
/// patient like an operation, or less invasive like long term services, counseling,
/// or hypnotherapy. This can be a quality or safety inspection for a location,
/// organization, or device. This can be an accreditation procedure on a practitioner
/// for licensing.
#[derive(Debug)]
pub struct Procedure_Performer<'a> {
pub(crate) value: Cow<'a, Value>,
}
impl Procedure_Performer<'_> {
pub fn new(value: &Value) -> Procedure_Performer {
Procedure_Performer {
value: Cow::Borrowed(value),
}
}
pub fn to_json(&self) -> Value {
(*self.value).clone()
}
/// Indicates who or what performed the procedure.
pub fn actor(&self) -> Reference {
Reference {
value: Cow::Borrowed(&self.value["actor"]),
}
}
/// May be used to represent additional information that is not part of the basic
/// definition of the element. To make the use of extensions safe and manageable,
/// there is a strict set of governance applied to the definition and use of
/// extensions. Though any implementer can define an extension, there is a set of
/// requirements that SHALL be met as part of the definition of the extension.
pub fn extension(&self) -> Option<Vec<Extension>> {
if let Some(Value::Array(val)) = self.value.get("extension") {
return Some(
val.into_iter()
.map(|e| Extension {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// Distinguishes the type of involvement of the performer in the procedure. For
/// example, surgeon, anaesthetist, endoscopist.
pub fn function(&self) -> Option<CodeableConcept> {
if let Some(val) = self.value.get("function") {
return Some(CodeableConcept {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Unique id for the element within a resource (for internal references). This may be
/// any string value that does not contain spaces.
pub fn id(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("id") {
return Some(string);
}
return None;
}
/// May be used to represent additional information that is not part of the basic
/// definition of the element and that modifies the understanding of the element
/// in which it is contained and/or the understanding of the containing element's
/// descendants. Usually modifier elements provide negation or qualification. To make
/// the use of extensions safe and manageable, there is a strict set of governance
/// applied to the definition and use of extensions. Though any implementer can define
/// an extension, there is a set of requirements that SHALL be met as part of the
/// definition of the extension. Applications processing a resource are required to
/// check for modifier extensions. Modifier extensions SHALL NOT change the meaning
/// of any elements on Resource or DomainResource (including cannot change the meaning
/// of modifierExtension itself).
pub fn modifier_extension(&self) -> Option<Vec<Extension>> {
if let Some(Value::Array(val)) = self.value.get("modifierExtension") {
return Some(
val.into_iter()
.map(|e| Extension {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// The organization the device or practitioner was acting on behalf of.
pub fn on_behalf_of(&self) -> Option<Reference> {
if let Some(val) = self.value.get("onBehalfOf") {
return Some(Reference {
value: Cow::Borrowed(val),
});
}
return None;
}
pub fn validate(&self) -> bool {
if !self.actor().validate() {
return false;
}
if let Some(_val) = self.extension() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.function() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.id() {}
if let Some(_val) = self.modifier_extension() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.on_behalf_of() {
if !_val.validate() {
return false;
}
}
return true;
}
}
#[derive(Debug)]
pub struct Procedure_PerformerBuilder {
pub(crate) value: Value,
}
impl Procedure_PerformerBuilder {
pub fn build(&self) -> Procedure_Performer {
Procedure_Performer {
value: Cow::Owned(self.value.clone()),
}
}
pub fn with(existing: Procedure_Performer) -> Procedure_PerformerBuilder {
Procedure_PerformerBuilder {
value: (*existing.value).clone(),
}
}
pub fn new(actor: Reference) -> Procedure_PerformerBuilder {
let mut __value: Value = json!({});
__value["actor"] = json!(actor.value);
return Procedure_PerformerBuilder { value: __value };
}
pub fn extension<'a>(&'a mut self, val: Vec<Extension>) -> &'a mut Procedure_PerformerBuilder {
self.value["extension"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn function<'a>(&'a mut self, val: CodeableConcept) -> &'a mut Procedure_PerformerBuilder {
self.value["function"] = json!(val.value);
return self;
}
pub fn id<'a>(&'a mut self, val: &str) -> &'a mut Procedure_PerformerBuilder {
self.value["id"] = json!(val);
return self;
}
pub fn modifier_extension<'a>(
&'a mut self,
val: Vec<Extension>,
) -> &'a mut Procedure_PerformerBuilder {
self.value["modifierExtension"] =
json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn on_behalf_of<'a>(&'a mut self, val: Reference) -> &'a mut Procedure_PerformerBuilder {
self.value["onBehalfOf"] = json!(val.value);
return self;
}
}
| 35.654639 | 99 | 0.59202 |
d7105049272fb024c931ad4607c3b5d91deb6e59 | 9,357 | use rayon::iter::plumbing::*;
use rayon::prelude::*;
/// Stress-test indexes for `Producer::split_at`.
fn check<F, I>(expected: &[I::Item], mut f: F)
where
F: FnMut() -> I,
I: IntoParallelIterator,
I::Iter: IndexedParallelIterator,
I::Item: PartialEq + std::fmt::Debug,
{
map_triples(expected.len() + 1, |i, j, k| {
Split::forward(f(), i, j, k, expected);
Split::reverse(f(), i, j, k, expected);
});
}
fn map_triples<F>(end: usize, mut f: F)
where
F: FnMut(usize, usize, usize),
{
for i in 0..end {
for j in i..end {
for k in j..end {
f(i, j, k);
}
}
}
}
#[derive(Debug)]
struct Split {
i: usize,
j: usize,
k: usize,
reverse: bool,
}
impl Split {
fn forward<I>(iter: I, i: usize, j: usize, k: usize, expected: &[I::Item])
where
I: IntoParallelIterator,
I::Iter: IndexedParallelIterator,
I::Item: PartialEq + std::fmt::Debug,
{
let result = iter.into_par_iter().with_producer(Split {
i,
j,
k,
reverse: false,
});
assert_eq!(result, expected);
}
fn reverse<I>(iter: I, i: usize, j: usize, k: usize, expected: &[I::Item])
where
I: IntoParallelIterator,
I::Iter: IndexedParallelIterator,
I::Item: PartialEq + std::fmt::Debug,
{
let result = iter.into_par_iter().with_producer(Split {
i,
j,
k,
reverse: true,
});
assert!(result.iter().eq(expected.iter().rev()));
}
}
impl<T> ProducerCallback<T> for Split {
type Output = Vec<T>;
fn callback<P>(self, producer: P) -> Self::Output
where
P: Producer<Item = T>,
{
println!("{:?}", self);
// Splitting the outer indexes first gets us an arbitrary mid section,
// which we then split further to get full test coverage.
let (left, d) = producer.split_at(self.k);
let (a, mid) = left.split_at(self.i);
let (b, c) = mid.split_at(self.j - self.i);
let a = a.into_iter();
let b = b.into_iter();
let c = c.into_iter();
let d = d.into_iter();
check_len(&a, self.i);
check_len(&b, self.j - self.i);
check_len(&c, self.k - self.j);
let chain = a.chain(b).chain(c).chain(d);
if self.reverse {
chain.rev().collect()
} else {
chain.collect()
}
}
}
fn check_len<I: ExactSizeIterator>(iter: &I, len: usize) {
assert_eq!(iter.size_hint(), (len, Some(len)));
assert_eq!(iter.len(), len);
}
// **** Base Producers ****
#[test]
fn array() {
let a = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
check(&a, || a);
}
#[test]
fn empty() {
let v = vec![42];
check(&v[..0], rayon::iter::empty);
}
#[test]
fn once() {
let v = vec![42];
check(&v, || rayon::iter::once(42));
}
#[test]
fn option() {
let v = vec![42];
check(&v, || Some(42));
}
#[test]
fn range() {
let v: Vec<_> = (0..10).collect();
check(&v, || 0..10);
}
#[test]
fn range_inclusive() {
let v: Vec<_> = (0u16..=10).collect();
check(&v, || 0u16..=10);
}
#[test]
fn repeatn() {
let v: Vec<_> = std::iter::repeat(1).take(5).collect();
check(&v, || rayon::iter::repeatn(1, 5));
}
#[test]
fn slice_iter() {
let s: Vec<_> = (0..10).collect();
let v: Vec<_> = s.iter().collect();
check(&v, || &s);
}
#[test]
fn slice_iter_mut() {
let mut s: Vec<_> = (0..10).collect();
let mut v: Vec<_> = s.clone();
let expected: Vec<_> = v.iter_mut().collect();
map_triples(expected.len() + 1, |i, j, k| {
Split::forward(s.par_iter_mut(), i, j, k, &expected);
Split::reverse(s.par_iter_mut(), i, j, k, &expected);
});
}
#[test]
fn slice_chunks() {
let s: Vec<_> = (0..10).collect();
for len in 1..s.len() + 2 {
let v: Vec<_> = s.chunks(len).collect();
check(&v, || s.par_chunks(len));
}
}
#[test]
fn slice_chunks_exact() {
let s: Vec<_> = (0..10).collect();
for len in 1..s.len() + 2 {
let v: Vec<_> = s.chunks_exact(len).collect();
check(&v, || s.par_chunks_exact(len));
}
}
#[test]
fn slice_chunks_mut() {
let mut s: Vec<_> = (0..10).collect();
let mut v: Vec<_> = s.clone();
for len in 1..s.len() + 2 {
let expected: Vec<_> = v.chunks_mut(len).collect();
map_triples(expected.len() + 1, |i, j, k| {
Split::forward(s.par_chunks_mut(len), i, j, k, &expected);
Split::reverse(s.par_chunks_mut(len), i, j, k, &expected);
});
}
}
#[test]
fn slice_chunks_exact_mut() {
let mut s: Vec<_> = (0..10).collect();
let mut v: Vec<_> = s.clone();
for len in 1..s.len() + 2 {
let expected: Vec<_> = v.chunks_exact_mut(len).collect();
map_triples(expected.len() + 1, |i, j, k| {
Split::forward(s.par_chunks_exact_mut(len), i, j, k, &expected);
Split::reverse(s.par_chunks_exact_mut(len), i, j, k, &expected);
});
}
}
#[test]
fn slice_rchunks() {
let s: Vec<_> = (0..10).collect();
for len in 1..s.len() + 2 {
let v: Vec<_> = s.rchunks(len).collect();
check(&v, || s.par_rchunks(len));
}
}
#[test]
fn slice_rchunks_exact() {
let s: Vec<_> = (0..10).collect();
for len in 1..s.len() + 2 {
let v: Vec<_> = s.rchunks_exact(len).collect();
check(&v, || s.par_rchunks_exact(len));
}
}
#[test]
fn slice_rchunks_mut() {
let mut s: Vec<_> = (0..10).collect();
let mut v: Vec<_> = s.clone();
for len in 1..s.len() + 2 {
let expected: Vec<_> = v.rchunks_mut(len).collect();
map_triples(expected.len() + 1, |i, j, k| {
Split::forward(s.par_rchunks_mut(len), i, j, k, &expected);
Split::reverse(s.par_rchunks_mut(len), i, j, k, &expected);
});
}
}
#[test]
fn slice_rchunks_exact_mut() {
let mut s: Vec<_> = (0..10).collect();
let mut v: Vec<_> = s.clone();
for len in 1..s.len() + 2 {
let expected: Vec<_> = v.rchunks_exact_mut(len).collect();
map_triples(expected.len() + 1, |i, j, k| {
Split::forward(s.par_rchunks_exact_mut(len), i, j, k, &expected);
Split::reverse(s.par_rchunks_exact_mut(len), i, j, k, &expected);
});
}
}
#[test]
fn slice_windows() {
let s: Vec<_> = (0..10).collect();
let v: Vec<_> = s.windows(2).collect();
check(&v, || s.par_windows(2));
}
#[test]
fn vec() {
let v: Vec<_> = (0..10).collect();
check(&v, || v.clone());
}
// **** Adaptors ****
#[test]
fn chain() {
let v: Vec<_> = (0..10).collect();
check(&v, || (0..5).into_par_iter().chain(5..10));
}
#[test]
fn cloned() {
let v: Vec<_> = (0..10).collect();
check(&v, || v.par_iter().cloned());
}
#[test]
fn copied() {
let v: Vec<_> = (0..10).collect();
check(&v, || v.par_iter().copied());
}
#[test]
fn enumerate() {
let v: Vec<_> = (0..10).enumerate().collect();
check(&v, || (0..10).into_par_iter().enumerate());
}
#[test]
fn step_by() {
let v: Vec<_> = (0..10).step_by(2).collect();
check(&v, || (0..10).into_par_iter().step_by(2))
}
#[test]
fn step_by_unaligned() {
let v: Vec<_> = (0..10).step_by(3).collect();
check(&v, || (0..10).into_par_iter().step_by(3))
}
#[test]
fn inspect() {
let v: Vec<_> = (0..10).collect();
check(&v, || (0..10).into_par_iter().inspect(|_| ()));
}
#[test]
fn update() {
let v: Vec<_> = (0..10).collect();
check(&v, || (0..10).into_par_iter().update(|_| ()));
}
#[test]
fn interleave() {
let v = [0, 10, 1, 11, 2, 12, 3, 4];
check(&v, || (0..5).into_par_iter().interleave(10..13));
check(&v[..6], || (0..3).into_par_iter().interleave(10..13));
let v = [0, 10, 1, 11, 2, 12, 13, 14];
check(&v, || (0..3).into_par_iter().interleave(10..15));
}
#[test]
fn intersperse() {
let v = [0, -1, 1, -1, 2, -1, 3, -1, 4];
check(&v, || (0..5).into_par_iter().intersperse(-1));
}
#[test]
fn chunks() {
let s: Vec<_> = (0..10).collect();
let v: Vec<_> = s.chunks(2).map(|c| c.to_vec()).collect();
check(&v, || s.par_iter().cloned().chunks(2));
}
#[test]
fn map() {
let v: Vec<_> = (0..10).collect();
check(&v, || v.par_iter().map(Clone::clone));
}
#[test]
fn map_with() {
let v: Vec<_> = (0..10).collect();
check(&v, || v.par_iter().map_with(vec![0], |_, &x| x));
}
#[test]
fn map_init() {
let v: Vec<_> = (0..10).collect();
check(&v, || v.par_iter().map_init(|| vec![0], |_, &x| x));
}
#[test]
fn panic_fuse() {
let v: Vec<_> = (0..10).collect();
check(&v, || (0..10).into_par_iter().panic_fuse());
}
#[test]
fn rev() {
let v: Vec<_> = (0..10).rev().collect();
check(&v, || (0..10).into_par_iter().rev());
}
#[test]
fn with_max_len() {
let v: Vec<_> = (0..10).collect();
check(&v, || (0..10).into_par_iter().with_max_len(1));
}
#[test]
fn with_min_len() {
let v: Vec<_> = (0..10).collect();
check(&v, || (0..10).into_par_iter().with_min_len(1));
}
#[test]
fn zip() {
let v: Vec<_> = (0..10).zip(10..20).collect();
check(&v, || (0..10).into_par_iter().zip(10..20));
check(&v[..5], || (0..5).into_par_iter().zip(10..20));
check(&v[..5], || (0..10).into_par_iter().zip(10..15));
}
| 23.688608 | 78 | 0.514161 |
1c3bd8097af70c9eb1f47ce1731a817ab0775247 | 10,493 | use nom::{
branch::alt,
bytes::complete::{escaped, tag, tag_no_case, take_while1, take_while_m_n},
character::is_digit,
combinator::map_res,
combinator::{map, opt},
sequence::separated_pair,
sequence::{delimited, pair, preceded, terminated},
IResult,
};
use crate::expression::functions::{filter, Constraint};
use crate::{
data::inline_data,
data::{datablock, DataBlock},
expression::DefaultOrNamedIri,
expression::{bind, ExpressionAsVar, Iri},
literal::NumericLiteral,
literal::{boolean, numeric_literal, silent},
node::{BlankNode, RdfLiteral, TriplesNode},
quads::{quads_pattern, Quads},
query::select::{sub_select, SubSelect},
terminals::sp_sep1,
terminals::{
anon, default_or_named_iri, iri, nil, pn_chars_tail, pn_chars_u_one, pn_local,
preceded_tag1, rdf_literal, sp, sp1, sp_enc, sp_enc1, sp_sep,
},
triple::{property_list, property_list_not_empty, triples_block, triples_node, TriplesBlock},
var::{var_or_iri, var_or_term, Var, VarOrIri, VarOrTerm, Verb},
};
use nom::character::complete::char;
use nom::combinator::recognize;
use nom::multi::{separated_list, separated_nonempty_list};
use nom::sequence::tuple;
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum GraphTerm {
Iri(Iri),
RdfLiteral(RdfLiteral),
NumericLiteral(NumericLiteral),
BooleanLiteral(bool),
BlankNode(BlankNode),
/// empty parentheses
Nil,
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct GroupGraphPatternSub {
pub triples_block: Option<TriplesBlock>,
pub graph_pattern_and_triples: Vec<GraphPatternAndTriples>,
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum GraphOrDefault {
Graph(Iri),
Default,
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum GraphRefAll {
GraphRef(Iri),
Default,
Named,
All,
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum GraphNode {
VarOrTerm(VarOrTerm),
TriplesNode(Box<TriplesNode>),
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum GroupGraphPattern {
SubSelect(Box<SubSelect>),
GroupGraphPatternSub(GroupGraphPatternSub),
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct GraphPattern {
pub pattern_or_filter: GraphPatternOrFilter,
pub triples_block: Option<TriplesBlock>,
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum GraphPatternOrFilter {
GraphPattern(Box<GraphPatternNotTriples>),
Filter(Constraint),
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum GraphPatternNotTriples {
Optional(GroupGraphPattern),
GroupOrUnion(GroupOrUnionGraphPattern),
Minus(GroupGraphPattern),
Graph(GraphGraphPattern),
Service(ServiceGraphPattern),
Filter(Constraint),
Bind(ExpressionAsVar),
InlineData(DataBlock),
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct GraphPatternAndTriples {
pub graph_pattern: GraphPatternNotTriples,
pub triples: Option<TriplesBlock>,
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct GroupOrUnionGraphPattern(pub Vec<GroupGraphPattern>);
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct GraphGraphPattern {
pub var_or_iri: VarOrIri,
pub graph_pattern: GroupGraphPattern,
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct ServiceGraphPattern {
pub var_or_iri: VarOrIri,
pub silent: bool,
pub graph_pattern: GroupGraphPattern,
}
pub(crate) fn graph_ref(i: &str) -> IResult<&str, Iri> {
map(sp_sep1(tag_no_case("graph"), iri), |(_, iri)| iri)(i)
}
pub(crate) fn graph_or_default(i: &str) -> IResult<&str, GraphOrDefault> {
alt((
map(tag_no_case("default"), |_| GraphOrDefault::Default),
map(
pair(opt(pair(tag_no_case("graph"), sp1)), iri),
|(_, iri)| GraphOrDefault::Graph(iri),
),
))(i)
}
pub(crate) fn graph_ref_all(i: &str) -> IResult<&str, GraphRefAll> {
alt((
map(tag_no_case("default"), |_| GraphRefAll::Default),
map(tag_no_case("named"), |_| GraphRefAll::Named),
map(tag_no_case("all"), |_| GraphRefAll::All),
map(graph_ref, GraphRefAll::GraphRef),
))(i)
}
pub(crate) fn graph_pattern_not_triples(i: &str) -> IResult<&str, GraphPatternNotTriples> {
alt((
map(
group_or_union_graph_pattern,
GraphPatternNotTriples::GroupOrUnion,
),
map(
optional_group_graph_pattern,
GraphPatternNotTriples::Optional,
),
map(minus_graph_pattern, GraphPatternNotTriples::Minus),
map(graph_graph_pattern, GraphPatternNotTriples::Graph),
map(service_graph_pattern, GraphPatternNotTriples::Service),
map(filter, GraphPatternNotTriples::Filter),
map(bind, GraphPatternNotTriples::Bind),
map(inline_data, GraphPatternNotTriples::InlineData),
))(i)
}
pub(crate) fn group_or_union_graph_pattern(i: &str) -> IResult<&str, GroupOrUnionGraphPattern> {
map(
separated_nonempty_list(sp_enc1(tag_no_case("union")), group_graph_pattern),
GroupOrUnionGraphPattern,
)(i)
}
pub(crate) fn optional_group_graph_pattern(i: &str) -> IResult<&str, GroupGraphPattern> {
preceded_tag1("optional", group_graph_pattern)(i)
}
pub(crate) fn minus_graph_pattern(i: &str) -> IResult<&str, GroupGraphPattern> {
preceded_tag1("minus", group_graph_pattern)(i)
}
pub(crate) fn graph_graph_pattern(i: &str) -> IResult<&str, GraphGraphPattern> {
map(
tuple((
terminated(tag_no_case("graph"), sp1),
terminated(var_or_iri, sp),
group_graph_pattern,
)),
|(_, var_or_iri, graph_pattern)| GraphGraphPattern {
var_or_iri,
graph_pattern,
},
)(i)
}
pub(crate) fn group_graph_pattern_sub(i: &str) -> IResult<&str, GroupGraphPatternSub> {
map(
pair(
opt(terminated(triples_block, sp)),
separated_list(sp, group_graph_pattern_and_triples),
),
|(triples_block, graph_pattern_and_triples)| GroupGraphPatternSub {
triples_block,
graph_pattern_and_triples,
},
)(i)
}
pub(crate) fn group_graph_pattern_and_triples(i: &str) -> IResult<&str, GraphPatternAndTriples> {
map(
separated_pair(
graph_pattern_not_triples,
opt(preceded(sp, char('?'))),
opt(preceded(sp, triples_block)),
),
|(graph_pattern, triples)| GraphPatternAndTriples {
graph_pattern,
triples,
},
)(i)
}
pub(crate) fn service_graph_pattern(i: &str) -> IResult<&str, ServiceGraphPattern> {
map(
tuple((
terminated(tag_no_case("service"), sp1),
map(opt(terminated(silent, sp1)), Option::unwrap_or_default),
terminated(var_or_iri, sp),
group_graph_pattern,
)),
|(_, silent, var_or_iri, graph_pattern)| ServiceGraphPattern {
var_or_iri,
silent,
graph_pattern,
},
)(i)
}
pub(crate) fn group_graph_pattern(i: &str) -> IResult<&str, GroupGraphPattern> {
delimited(
char('{'),
sp_enc(alt((
map(sub_select, |s| GroupGraphPattern::SubSelect(Box::new(s))),
map(
group_graph_pattern_sub,
GroupGraphPattern::GroupGraphPatternSub,
),
))),
char('}'),
)(i)
}
pub(crate) fn graph_term(i: &str) -> IResult<&str, GraphTerm> {
alt((
map(iri, GraphTerm::Iri),
map(rdf_literal, GraphTerm::RdfLiteral),
map(numeric_literal, GraphTerm::NumericLiteral),
map(boolean, GraphTerm::BooleanLiteral),
map(blank_node, GraphTerm::BlankNode),
map(nil, |_| GraphTerm::Nil),
))(i)
}
pub(crate) fn blank_node(i: &str) -> IResult<&str, BlankNode> {
alt((
map(
sp_sep(
tag("_:"),
recognize(pair(
alt((pn_chars_u_one, take_while_m_n(1, 1, |c| is_digit(c as u8)))),
pn_chars_tail,
)),
),
|(_, label)| BlankNode::Label(label.to_string()),
),
map(anon, |_| BlankNode::Anon),
))(i)
}
pub(crate) fn graph_node(i: &str) -> IResult<&str, GraphNode> {
alt((
map(var_or_term, GraphNode::VarOrTerm),
map(triples_node, |node| GraphNode::TriplesNode(Box::new(node))),
))(i)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::expression::PrefixedName;
#[test]
fn is_graph_ref_all() {
assert_eq!(graph_ref_all("default"), Ok(("", GraphRefAll::Default)));
assert_eq!(
graph_ref_all("graph :uri1"),
Ok((
"",
GraphRefAll::GraphRef(Iri::PrefixedName(PrefixedName::PnameLN {
pn_prefix: None,
pn_local: "uri1".to_string(),
},))
))
);
assert_eq!(
graph_ref_all("graph <http://example.org/foaf/aliceFoaf>"),
Ok((
"",
GraphRefAll::GraphRef(Iri::Iri("http://example.org/foaf/aliceFoaf".to_string()))
))
);
}
#[test]
fn is_graph_term() {
assert_eq!(graph_term("()"), Ok(("", GraphTerm::Nil)));
assert_eq!(
graph_term("true"),
Ok(("", GraphTerm::BooleanLiteral(true)))
);
assert_eq!(
graph_term("<http://example.org/foaf/aliceFoaf>"),
Ok((
"",
GraphTerm::Iri(Iri::Iri("http://example.org/foaf/aliceFoaf".to_string()))
))
);
assert_eq!(
graph_term("-5"),
Ok(("", GraphTerm::NumericLiteral(NumericLiteral::Int(-5))))
);
}
#[test]
fn is_var_or_term() {
assert_eq!(var_or_term("()"), Ok(("", VarOrTerm::Term(GraphTerm::Nil))));
assert_eq!(
var_or_term("false"),
Ok(("", VarOrTerm::Term(GraphTerm::BooleanLiteral(false))))
);
assert_eq!(
var_or_term("?name"),
Ok(("", VarOrTerm::Var(Var::QMark("name".to_string()))))
);
}
}
| 30.152299 | 98 | 0.588488 |
76172f72eee6f033b97c07abf324c07450a27fe2 | 17,066 | // This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files)
// DO NOT EDIT
use crate::CoordType;
#[cfg(any(feature = "v2_32", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v2_32")))]
use crate::ScrollType;
use crate::TextBoundary;
use crate::TextClipType;
use crate::TextGranularity;
use crate::TextRange;
use crate::TextRectangle;
use glib::object::Cast;
use glib::object::IsA;
use glib::signal::connect_raw;
use glib::signal::SignalHandlerId;
use glib::translate::*;
use std::boxed::Box as Box_;
use std::fmt;
use std::mem;
use std::mem::transmute;
glib::glib_wrapper! {
pub struct Text(Interface<ffi::AtkText>);
match fn {
get_type => || ffi::atk_text_get_type(),
}
}
pub const NONE_TEXT: Option<&Text> = None;
pub trait TextExt: 'static {
#[doc(alias = "atk_text_add_selection")]
fn add_selection(&self, start_offset: i32, end_offset: i32) -> bool;
#[doc(alias = "atk_text_get_bounded_ranges")]
fn get_bounded_ranges(
&self,
rect: &mut TextRectangle,
coord_type: CoordType,
x_clip_type: TextClipType,
y_clip_type: TextClipType,
) -> Vec<TextRange>;
#[doc(alias = "atk_text_get_caret_offset")]
fn get_caret_offset(&self) -> i32;
#[doc(alias = "atk_text_get_character_at_offset")]
fn get_character_at_offset(&self, offset: i32) -> char;
#[doc(alias = "atk_text_get_character_count")]
fn get_character_count(&self) -> i32;
#[doc(alias = "atk_text_get_character_extents")]
fn get_character_extents(&self, offset: i32, coords: CoordType) -> (i32, i32, i32, i32);
//#[doc(alias = "atk_text_get_default_attributes")]
//fn get_default_attributes(&self) -> /*Ignored*/Option<AttributeSet>;
#[doc(alias = "atk_text_get_n_selections")]
fn get_n_selections(&self) -> i32;
#[doc(alias = "atk_text_get_offset_at_point")]
fn get_offset_at_point(&self, x: i32, y: i32, coords: CoordType) -> i32;
#[doc(alias = "atk_text_get_range_extents")]
fn get_range_extents(
&self,
start_offset: i32,
end_offset: i32,
coord_type: CoordType,
) -> TextRectangle;
//#[doc(alias = "atk_text_get_run_attributes")]
//fn get_run_attributes(&self, offset: i32) -> (/*Ignored*/AttributeSet, i32, i32);
#[doc(alias = "atk_text_get_selection")]
fn get_selection(&self, selection_num: i32) -> (glib::GString, i32, i32);
#[doc(alias = "atk_text_get_string_at_offset")]
fn get_string_at_offset(
&self,
offset: i32,
granularity: TextGranularity,
) -> (Option<glib::GString>, i32, i32);
#[doc(alias = "atk_text_get_text")]
fn get_text(&self, start_offset: i32, end_offset: i32) -> Option<glib::GString>;
#[doc(alias = "atk_text_get_text_at_offset")]
fn get_text_at_offset(
&self,
offset: i32,
boundary_type: TextBoundary,
) -> (glib::GString, i32, i32);
#[doc(alias = "atk_text_remove_selection")]
fn remove_selection(&self, selection_num: i32) -> bool;
#[cfg(any(feature = "v2_32", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v2_32")))]
#[doc(alias = "atk_text_scroll_substring_to")]
fn scroll_substring_to(&self, start_offset: i32, end_offset: i32, type_: ScrollType) -> bool;
#[cfg(any(feature = "v2_32", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v2_32")))]
#[doc(alias = "atk_text_scroll_substring_to_point")]
fn scroll_substring_to_point(
&self,
start_offset: i32,
end_offset: i32,
coords: CoordType,
x: i32,
y: i32,
) -> bool;
#[doc(alias = "atk_text_set_caret_offset")]
fn set_caret_offset(&self, offset: i32) -> bool;
#[doc(alias = "atk_text_set_selection")]
fn set_selection(&self, selection_num: i32, start_offset: i32, end_offset: i32) -> bool;
fn connect_text_attributes_changed<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
fn connect_text_caret_moved<F: Fn(&Self, i32) + 'static>(&self, f: F) -> SignalHandlerId;
fn connect_text_insert<F: Fn(&Self, i32, i32, &str) + 'static>(&self, f: F) -> SignalHandlerId;
fn connect_text_remove<F: Fn(&Self, i32, i32, &str) + 'static>(&self, f: F) -> SignalHandlerId;
fn connect_text_selection_changed<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
}
impl<O: IsA<Text>> TextExt for O {
fn add_selection(&self, start_offset: i32, end_offset: i32) -> bool {
unsafe {
from_glib(ffi::atk_text_add_selection(
self.as_ref().to_glib_none().0,
start_offset,
end_offset,
))
}
}
fn get_bounded_ranges(
&self,
rect: &mut TextRectangle,
coord_type: CoordType,
x_clip_type: TextClipType,
y_clip_type: TextClipType,
) -> Vec<TextRange> {
unsafe {
FromGlibPtrContainer::from_glib_full(ffi::atk_text_get_bounded_ranges(
self.as_ref().to_glib_none().0,
rect.to_glib_none_mut().0,
coord_type.to_glib(),
x_clip_type.to_glib(),
y_clip_type.to_glib(),
))
}
}
fn get_caret_offset(&self) -> i32 {
unsafe { ffi::atk_text_get_caret_offset(self.as_ref().to_glib_none().0) }
}
fn get_character_at_offset(&self, offset: i32) -> char {
unsafe {
std::convert::TryFrom::try_from(ffi::atk_text_get_character_at_offset(
self.as_ref().to_glib_none().0,
offset,
))
.expect("conversion from an invalid Unicode value attempted")
}
}
fn get_character_count(&self) -> i32 {
unsafe { ffi::atk_text_get_character_count(self.as_ref().to_glib_none().0) }
}
fn get_character_extents(&self, offset: i32, coords: CoordType) -> (i32, i32, i32, i32) {
unsafe {
let mut x = mem::MaybeUninit::uninit();
let mut y = mem::MaybeUninit::uninit();
let mut width = mem::MaybeUninit::uninit();
let mut height = mem::MaybeUninit::uninit();
ffi::atk_text_get_character_extents(
self.as_ref().to_glib_none().0,
offset,
x.as_mut_ptr(),
y.as_mut_ptr(),
width.as_mut_ptr(),
height.as_mut_ptr(),
coords.to_glib(),
);
let x = x.assume_init();
let y = y.assume_init();
let width = width.assume_init();
let height = height.assume_init();
(x, y, width, height)
}
}
//fn get_default_attributes(&self) -> /*Ignored*/Option<AttributeSet> {
// unsafe { TODO: call ffi:atk_text_get_default_attributes() }
//}
fn get_n_selections(&self) -> i32 {
unsafe { ffi::atk_text_get_n_selections(self.as_ref().to_glib_none().0) }
}
fn get_offset_at_point(&self, x: i32, y: i32, coords: CoordType) -> i32 {
unsafe {
ffi::atk_text_get_offset_at_point(
self.as_ref().to_glib_none().0,
x,
y,
coords.to_glib(),
)
}
}
fn get_range_extents(
&self,
start_offset: i32,
end_offset: i32,
coord_type: CoordType,
) -> TextRectangle {
unsafe {
let mut rect = TextRectangle::uninitialized();
ffi::atk_text_get_range_extents(
self.as_ref().to_glib_none().0,
start_offset,
end_offset,
coord_type.to_glib(),
rect.to_glib_none_mut().0,
);
rect
}
}
//fn get_run_attributes(&self, offset: i32) -> (/*Ignored*/AttributeSet, i32, i32) {
// unsafe { TODO: call ffi:atk_text_get_run_attributes() }
//}
fn get_selection(&self, selection_num: i32) -> (glib::GString, i32, i32) {
unsafe {
let mut start_offset = mem::MaybeUninit::uninit();
let mut end_offset = mem::MaybeUninit::uninit();
let ret = from_glib_full(ffi::atk_text_get_selection(
self.as_ref().to_glib_none().0,
selection_num,
start_offset.as_mut_ptr(),
end_offset.as_mut_ptr(),
));
let start_offset = start_offset.assume_init();
let end_offset = end_offset.assume_init();
(ret, start_offset, end_offset)
}
}
fn get_string_at_offset(
&self,
offset: i32,
granularity: TextGranularity,
) -> (Option<glib::GString>, i32, i32) {
unsafe {
let mut start_offset = mem::MaybeUninit::uninit();
let mut end_offset = mem::MaybeUninit::uninit();
let ret = from_glib_full(ffi::atk_text_get_string_at_offset(
self.as_ref().to_glib_none().0,
offset,
granularity.to_glib(),
start_offset.as_mut_ptr(),
end_offset.as_mut_ptr(),
));
let start_offset = start_offset.assume_init();
let end_offset = end_offset.assume_init();
(ret, start_offset, end_offset)
}
}
fn get_text(&self, start_offset: i32, end_offset: i32) -> Option<glib::GString> {
unsafe {
from_glib_full(ffi::atk_text_get_text(
self.as_ref().to_glib_none().0,
start_offset,
end_offset,
))
}
}
fn get_text_at_offset(
&self,
offset: i32,
boundary_type: TextBoundary,
) -> (glib::GString, i32, i32) {
unsafe {
let mut start_offset = mem::MaybeUninit::uninit();
let mut end_offset = mem::MaybeUninit::uninit();
let ret = from_glib_full(ffi::atk_text_get_text_at_offset(
self.as_ref().to_glib_none().0,
offset,
boundary_type.to_glib(),
start_offset.as_mut_ptr(),
end_offset.as_mut_ptr(),
));
let start_offset = start_offset.assume_init();
let end_offset = end_offset.assume_init();
(ret, start_offset, end_offset)
}
}
fn remove_selection(&self, selection_num: i32) -> bool {
unsafe {
from_glib(ffi::atk_text_remove_selection(
self.as_ref().to_glib_none().0,
selection_num,
))
}
}
#[cfg(any(feature = "v2_32", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v2_32")))]
fn scroll_substring_to(&self, start_offset: i32, end_offset: i32, type_: ScrollType) -> bool {
unsafe {
from_glib(ffi::atk_text_scroll_substring_to(
self.as_ref().to_glib_none().0,
start_offset,
end_offset,
type_.to_glib(),
))
}
}
#[cfg(any(feature = "v2_32", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v2_32")))]
fn scroll_substring_to_point(
&self,
start_offset: i32,
end_offset: i32,
coords: CoordType,
x: i32,
y: i32,
) -> bool {
unsafe {
from_glib(ffi::atk_text_scroll_substring_to_point(
self.as_ref().to_glib_none().0,
start_offset,
end_offset,
coords.to_glib(),
x,
y,
))
}
}
fn set_caret_offset(&self, offset: i32) -> bool {
unsafe {
from_glib(ffi::atk_text_set_caret_offset(
self.as_ref().to_glib_none().0,
offset,
))
}
}
fn set_selection(&self, selection_num: i32, start_offset: i32, end_offset: i32) -> bool {
unsafe {
from_glib(ffi::atk_text_set_selection(
self.as_ref().to_glib_none().0,
selection_num,
start_offset,
end_offset,
))
}
}
fn connect_text_attributes_changed<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn text_attributes_changed_trampoline<P, F: Fn(&P) + 'static>(
this: *mut ffi::AtkText,
f: glib::ffi::gpointer,
) where
P: IsA<Text>,
{
let f: &F = &*(f as *const F);
f(&Text::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"text-attributes-changed\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
text_attributes_changed_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
fn connect_text_caret_moved<F: Fn(&Self, i32) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn text_caret_moved_trampoline<P, F: Fn(&P, i32) + 'static>(
this: *mut ffi::AtkText,
arg1: libc::c_int,
f: glib::ffi::gpointer,
) where
P: IsA<Text>,
{
let f: &F = &*(f as *const F);
f(&Text::from_glib_borrow(this).unsafe_cast_ref(), arg1)
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"text-caret-moved\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
text_caret_moved_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
fn connect_text_insert<F: Fn(&Self, i32, i32, &str) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn text_insert_trampoline<P, F: Fn(&P, i32, i32, &str) + 'static>(
this: *mut ffi::AtkText,
arg1: libc::c_int,
arg2: libc::c_int,
arg3: *mut libc::c_char,
f: glib::ffi::gpointer,
) where
P: IsA<Text>,
{
let f: &F = &*(f as *const F);
f(
&Text::from_glib_borrow(this).unsafe_cast_ref(),
arg1,
arg2,
&glib::GString::from_glib_borrow(arg3),
)
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"text-insert\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
text_insert_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
fn connect_text_remove<F: Fn(&Self, i32, i32, &str) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn text_remove_trampoline<P, F: Fn(&P, i32, i32, &str) + 'static>(
this: *mut ffi::AtkText,
arg1: libc::c_int,
arg2: libc::c_int,
arg3: *mut libc::c_char,
f: glib::ffi::gpointer,
) where
P: IsA<Text>,
{
let f: &F = &*(f as *const F);
f(
&Text::from_glib_borrow(this).unsafe_cast_ref(),
arg1,
arg2,
&glib::GString::from_glib_borrow(arg3),
)
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"text-remove\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
text_remove_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
fn connect_text_selection_changed<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn text_selection_changed_trampoline<P, F: Fn(&P) + 'static>(
this: *mut ffi::AtkText,
f: glib::ffi::gpointer,
) where
P: IsA<Text>,
{
let f: &F = &*(f as *const F);
f(&Text::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"text-selection-changed\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
text_selection_changed_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
}
impl fmt::Display for Text {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("Text")
}
}
| 32.945946 | 100 | 0.534806 |
ffbe2a65ac089870cbe3ac1453c10a642789a10f | 17,713 | //! This module defines exception handlers.
use atomic::{Atomic, Ordering};
use core::fmt::{self, Debug, Write};
use crate::{smc, uart, util::make_arithaddr};
use xenon_cpu::mfspr;
pub const EXCEPTION_VECTORS: [usize; 17] = [
0x00000000_00000100, // Reset
0x00000000_00000200, // Machine check
0x00000000_00000300, // Data storage
0x00000000_00000380, // Data segment
0x00000000_00000400, // Instruction storage
0x00000000_00000480, // Instruction segment
0x00000000_00000500, // External interrupt
0x00000000_00000600, // Alignment
0x00000000_00000700, // Program
0x00000000_00000800, // Floating point
0x00000000_00000900, // Decrementer
0x00000000_00000980,
0x00000000_00000c00, // System call
0x00000000_00000d00, // Trace
0x00000000_00000f00, // Performance
0x00000000_00001600,
0x00000000_00001800,
];
#[allow(dead_code)]
#[derive(Copy, Clone, Debug)]
#[non_exhaustive] // N.B: NECESSARY because we cast from integers.
#[repr(u32)]
pub enum ExceptionType {
Reset = 0x10,
MachineCheck = 0x20,
Dsi = 0x30,
DataSegment = 0x38,
Isi = 0x40,
InstructionSegment = 0x48,
ExternalInterrupt = 0x50,
Alignment = 0x60,
Program = 0x70,
FloatingPoint = 0x80,
Decrementer = 0x90,
SystemCall = 0xC0,
Trace = 0xD0,
Performance = 0xF0,
}
#[repr(C, align(512))]
#[derive(Copy, Clone, Default)]
pub struct CpuContext {
pub r: [u64; 32],
pub cr: u64, // 0x100 (256)
pub lr: u64, // 0x108 (264)
pub ctr: u64, // 0x110 (272)
pub pc: u64, // 0x118 (280)
pub msr: u64, // 0x120 (288)
}
impl Debug for CpuContext {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> core::fmt::Result {
core::writeln!(fmt, "r:")?;
for i in 0..32 {
core::writeln!(fmt, " {:>3}: {:016X}", i, self.r[i])?;
}
core::writeln!(fmt, "cr: {:016X}", self.cr)?;
core::writeln!(fmt, "lr: {:016X}", self.lr)?;
core::writeln!(fmt, "ctr: {:016X}", self.ctr)?;
core::writeln!(fmt, "pc: {:016X}", self.pc)?;
core::writeln!(fmt, "msr: {:016X}", self.msr)?;
Ok(())
}
}
#[allow(dead_code)]
impl CpuContext {
pub const fn new() -> Self {
Self {
r: [0u64; 32],
cr: 0u64,
lr: 0u64,
ctr: 0u64,
pc: 0u64,
msr: 0u64,
}
}
pub fn with_hvcall(func: extern "C" fn() -> !, r1: u64) -> Self {
Self {
r: [
0xBEBEBEBE_BEBEBEBE, // r0
r1, // r1
0xBEBEBEBE_BEBEBEBE, // r2
0xBEBEBEBE_BEBEBEBE, // r3
0xBEBEBEBE_BEBEBEBE, // r4
0xBEBEBEBE_BEBEBEBE, // r5
0xBEBEBEBE_BEBEBEBE, // r6
0xBEBEBEBE_BEBEBEBE, // r7
0xBEBEBEBE_BEBEBEBE, // r8
0xBEBEBEBE_BEBEBEBE, // r9
0xBEBEBEBE_BEBEBEBE, // r10
0xBEBEBEBE_BEBEBEBE, // r11
func as u64, // r12
0xBEBEBEBE_BEBEBEBE, // r13
0xBEBEBEBE_BEBEBEBE, // r14
0xBEBEBEBE_BEBEBEBE, // r15
0xBEBEBEBE_BEBEBEBE, // r16
0xBEBEBEBE_BEBEBEBE, // r17
0xBEBEBEBE_BEBEBEBE, // r18
0xBEBEBEBE_BEBEBEBE, // r19
0xBEBEBEBE_BEBEBEBE, // r20
0xBEBEBEBE_BEBEBEBE, // r21
0xBEBEBEBE_BEBEBEBE, // r22
0xBEBEBEBE_BEBEBEBE, // r23
0xBEBEBEBE_BEBEBEBE, // r24
0xBEBEBEBE_BEBEBEBE, // r25
0xBEBEBEBE_BEBEBEBE, // r26
0xBEBEBEBE_BEBEBEBE, // r27
0xBEBEBEBE_BEBEBEBE, // r28
0xBEBEBEBE_BEBEBEBE, // r29
0xBEBEBEBE_BEBEBEBE, // r30
0xBEBEBEBE_BEBEBEBE, // r31
],
cr: 0xBEBEBEBE_BEBEBEBE,
lr: 0xBEBEBEBE_BEBEBEBE,
ctr: 0xBEBEBEBE_BEBEBEBE,
pc: func as u64,
msr: 0x90000000_00001000, // MSR[SF/HV/ME]
}
}
pub fn with_svcall(func: extern "C" fn() -> !, r1: u64) -> Self {
Self {
r: [
0xBEBEBEBE_BEBEBEBE, // r0
r1, // r1
0xBEBEBEBE_BEBEBEBE, // r2
0xBEBEBEBE_BEBEBEBE, // r3
0xBEBEBEBE_BEBEBEBE, // r4
0xBEBEBEBE_BEBEBEBE, // r5
0xBEBEBEBE_BEBEBEBE, // r6
0xBEBEBEBE_BEBEBEBE, // r7
0xBEBEBEBE_BEBEBEBE, // r8
0xBEBEBEBE_BEBEBEBE, // r9
0xBEBEBEBE_BEBEBEBE, // r10
0xBEBEBEBE_BEBEBEBE, // r11
func as u64, // r12
0xBEBEBEBE_BEBEBEBE, // r13
0xBEBEBEBE_BEBEBEBE, // r14
0xBEBEBEBE_BEBEBEBE, // r15
0xBEBEBEBE_BEBEBEBE, // r16
0xBEBEBEBE_BEBEBEBE, // r17
0xBEBEBEBE_BEBEBEBE, // r18
0xBEBEBEBE_BEBEBEBE, // r19
0xBEBEBEBE_BEBEBEBE, // r20
0xBEBEBEBE_BEBEBEBE, // r21
0xBEBEBEBE_BEBEBEBE, // r22
0xBEBEBEBE_BEBEBEBE, // r23
0xBEBEBEBE_BEBEBEBE, // r24
0xBEBEBEBE_BEBEBEBE, // r25
0xBEBEBEBE_BEBEBEBE, // r26
0xBEBEBEBE_BEBEBEBE, // r27
0xBEBEBEBE_BEBEBEBE, // r28
0xBEBEBEBE_BEBEBEBE, // r29
0xBEBEBEBE_BEBEBEBE, // r30
0xBEBEBEBE_BEBEBEBE, // r31
],
cr: 0xBEBEBEBE_BEBEBEBE,
lr: 0xBEBEBEBE_BEBEBEBE,
ctr: 0xBEBEBEBE_BEBEBEBE,
pc: func as u64,
msr: 0x80000000_00001000, // MSR[SF/ME]
}
}
}
/// This is a per-processor area where context information is saved when
/// an exception is encountered.
#[no_mangle]
static mut EXCEPTION_SAVE_AREA: [CpuContext; 6] = [CpuContext::new(); 6];
/// This area contains context information for per-process exception handlers.
/// This is generally static and unmodified.
#[no_mangle]
static mut EXCEPTION_LOAD_AREA: [CpuContext; 6] = [CpuContext::new(); 6];
/// The definition of the application-defined exception handler.
pub type ExceptionHandler = fn(ExceptionType, &mut CpuContext) -> Result<(), ()>;
/// The application-defined exception handler.
static EXCEPTION_HANDLER: Atomic<Option<ExceptionHandler>> = Atomic::new(None);
#[no_mangle]
extern "C" fn handle_exception() -> ! {
// FIXME: This may allow for unencoded enum discriminants to exist.
let id: ExceptionType = {
unsafe { core::mem::transmute(mfspr!(304) as u32) } // HPSRG0
};
// SAFETY: We have exclusive access to the save area corresponding to this processor.
let save_area: &mut CpuContext = unsafe {
let pir = mfspr!(1023);
&mut EXCEPTION_SAVE_AREA[pir as usize]
};
match EXCEPTION_HANDLER.load(Ordering::Relaxed) {
Some(ex) => {
// If the handler successfully handles the exception, reload the calling context.
if ex(id, save_area).is_ok() {
unsafe {
load_context(save_area);
}
}
}
// Fallback and handle the exception here.
None => {}
}
let pir = unsafe { mfspr!(1023) };
let closure = |uart: &mut uart::UART| {
core::writeln!(uart, "UNHANDLED EXCEPTION! Hit exception vector {:?}", id).unwrap();
core::writeln!(uart, "MSR: {:#?}", xenon_cpu::intrin::mfmsr()).unwrap();
core::writeln!(uart, "PIR: {:#?}", pir).unwrap();
core::writeln!(uart, "---- Saved registers:").unwrap();
core::writeln!(uart, " MSR: {:#?}", save_area.msr).unwrap();
core::writeln!(uart, " LR: {:#?}", save_area.lr).unwrap();
core::writeln!(uart, " PC: {:#?}", save_area.pc).unwrap();
};
// Attempt to lock the UART. If that fails (for example, because we took an exception
// while the UART was locked), forcibly take it to print out error text.
let res = {
let mut tries = 0u64;
loop {
match uart::UART.try_lock(&closure) {
Ok(_) => break Ok(()),
Err(_) => {
if tries > 50 {
break Err(());
}
tries += 1;
xenon_cpu::time::delay(core::time::Duration::from_millis(100));
}
}
}
};
if res.is_err() {
let mut uart = unsafe { uart::UART.get_mut_unchecked() };
closure(&mut uart);
}
if pir == 0 {
// Not good. Auto-reset the system.
smc::SMC.lock(|smc| {
smc.send_message(&[0x82043000u32, 0x00000000u32, 0x00000000u32, 0x00000000u32]);
});
}
loop {}
}
#[naked]
#[no_mangle]
pub unsafe extern "C" fn load_context(_ctx: &CpuContext) -> ! {
asm!(
"ld %r0, 0x100(%r3)",
"mtcr %r0",
"ld %r0, 0x108(%r3)",
"mtlr %r0",
"ld %r0, 0x110(%r3)",
"mtctr %r0",
"ld %r0, 0x118(%r3)",
"mtsrr0 %r0",
"ld %r0, 0x120(%r3)",
"mtsrr1 %r0",
"ld %r0, 0x00(%r3)",
"ld %r1, 0x08(%r3)",
"ld %r2, 0x10(%r3)",
// N.B: r3 is loaded last.
"ld %r4, 0x20(%r3)",
"ld %r5, 0x28(%r3)",
"ld %r6, 0x30(%r3)",
"ld %r7, 0x38(%r3)",
"ld %r8, 0x40(%r3)",
"ld %r9, 0x48(%r3)",
"ld %r10, 0x50(%r3)",
"ld %r11, 0x58(%r3)",
"ld %r12, 0x60(%r3)",
"ld %r13, 0x68(%r3)",
"ld %r14, 0x70(%r3)",
"ld %r15, 0x78(%r3)",
"ld %r16, 0x80(%r3)",
"ld %r17, 0x88(%r3)",
"ld %r18, 0x90(%r3)",
"ld %r19, 0x98(%r3)",
"ld %r20, 0xA0(%r3)",
"ld %r21, 0xA8(%r3)",
"ld %r22, 0xB0(%r3)",
"ld %r23, 0xB8(%r3)",
"ld %r24, 0xC0(%r3)",
"ld %r25, 0xC8(%r3)",
"ld %r26, 0xD0(%r3)",
"ld %r27, 0xD8(%r3)",
"ld %r28, 0xE0(%r3)",
"ld %r29, 0xE8(%r3)",
"ld %r30, 0xF0(%r3)",
"ld %r31, 0xF8(%r3)",
"ld %r3, 0x18(%r3)",
"rfid",
options(noreturn),
);
}
#[naked]
unsafe extern "C" fn except_thunk() -> ! {
asm!(
"mtctr %r4", // Reload CTR with original value
"mfspr %r4, 1023", // r4 = PIR
"sldi %r4, %r4, 32 + 9",
"oris %r4, %r4, EXCEPTION_SAVE_AREA@highest",
"ori %r4, %r4, EXCEPTION_SAVE_AREA@higher",
"rotldi %r4, %r4, 32",
"oris %r4, %r4, EXCEPTION_SAVE_AREA@high",
"ori %r4, %r4, EXCEPTION_SAVE_AREA@l",
// Now save registers.
"std %r0, 0x00(%r4)",
"std %r1, 0x08(%r4)",
"std %r2, 0x10(%r4)",
"mfspr %r0, 304", // Reload R3, which was saved in HPSRG0.
"std %r0, 0x18(%r4)",
"mfspr %r0, 305", // Reload R4, which was saved in HSPRG1.
"std %r0, 0x20(%r4)",
"std %r5, 0x28(%r4)",
"std %r6, 0x30(%r4)",
"std %r7, 0x38(%r4)",
"std %r8, 0x40(%r4)",
"std %r9, 0x48(%r4)",
"std %r10, 0x50(%r4)",
"std %r11, 0x58(%r4)",
"std %r12, 0x60(%r4)",
"std %r13, 0x68(%r4)",
"std %r14, 0x70(%r4)",
"std %r15, 0x78(%r4)",
"std %r16, 0x80(%r4)",
"std %r17, 0x88(%r4)",
"std %r18, 0x90(%r4)",
"std %r19, 0x98(%r4)",
"std %r20, 0xA0(%r4)",
"std %r21, 0xA8(%r4)",
"std %r22, 0xB0(%r4)",
"std %r23, 0xB8(%r4)",
"std %r24, 0xC0(%r4)",
"std %r25, 0xC8(%r4)",
"std %r26, 0xD0(%r4)",
"std %r27, 0xD8(%r4)",
"std %r28, 0xE0(%r4)",
"std %r29, 0xE8(%r4)",
"std %r30, 0xF0(%r4)",
"std %r31, 0xF8(%r4)",
"mfcr %r0",
"std %r0, 0x100(%r4)",
"mflr %r0",
"std %r0, 0x108(%r4)",
"mfctr %r0",
"std %r0, 0x110(%r4)",
"mfsrr0 %r0",
"std %r0, 0x118(%r4)",
"mfsrr1 %r0",
"std %r0, 0x120(%r4)",
"mtspr 304, %r3", // HPSRG0 = exception ID
// Now load the exception load context.
"b except_load_thunk",
options(noreturn)
);
}
#[naked]
#[no_mangle]
unsafe extern "C" fn except_load_thunk() -> ! {
asm!(
"mfspr %r3, 1023", // r3 = PIR
"sldi %r3, %r3, 32 + 9",
// N.B: These instructions are patched later.
"trap",
"trap",
"rotldi %r3, %r3, 32",
"trap",
"trap",
"b load_context",
options(noreturn)
)
}
/// Create a longjmp for an exception vector.
/// This will preverse r3/r4 in HSPRG0 and HSPRG1, respectively.
/// r3 will be loaded with the constant specified in the `id` parameter.
/// r4 will be loaded with the value of CTR.
const fn make_longjmp_exc(id: u16, target: usize) -> [u32; 11] {
[
0x7C704BA6, // mtspr HSPRG0, %r3
0x7C914BA6, // mtspr HSPRG1, %r4
(0x3C600000 | ((target >> 48) & 0xFFFF)) as u32, // lis %r3, target[64:48]
(0x60630000 | ((target >> 32) & 0xFFFF)) as u32, // ori %r3, %r3, target[48:32]
0x786307C6, // rldicr %r3, %r3, 32, 31
(0x64630000 | ((target >> 16) & 0xFFFF)) as u32, // oris %r3, %r3, target[32:16]
(0x60630000 | ((target >> 00) & 0xFFFF)) as u32, // ori %r3, %r3, target[16:0]
0x7C8902A6, // mfctr %r4
0x7C6903A6, // mtctr %r3
(0x38600000 | (id as u32)), // li %r3, id
0x4E800420, // bctr
]
}
pub unsafe fn cause_exception() -> ! {
// Trap.
asm!("trap", options(noreturn));
}
/// This function initializes the exception handler subsystem.
///
/// # Safety
/// This function should only be called once during startup.
/// This will place jump stubs at the PowerPC exception vectors.
///
/// Unsafe for obvious reasons.
pub unsafe fn init_except(handler: Option<ExceptionHandler>) {
EXCEPTION_HANDLER.store(handler, Ordering::Relaxed);
// Set up the load area.
EXCEPTION_LOAD_AREA = [
CpuContext::with_hvcall(handle_exception, 0x8000_0000_1EFF_0000),
CpuContext::with_hvcall(handle_exception, 0x8000_0000_1EFE_0000),
CpuContext::with_hvcall(handle_exception, 0x8000_0000_1EFD_0000),
CpuContext::with_hvcall(handle_exception, 0x8000_0000_1EFC_0000),
CpuContext::with_hvcall(handle_exception, 0x8000_0000_1EFB_0000),
CpuContext::with_hvcall(handle_exception, 0x8000_0000_1EFA_0000),
];
// N.B: We have to patch the exception thunk to deal with PIE.
{
let save_area = &mut EXCEPTION_SAVE_AREA[0] as *mut _ as usize;
let thunk_area = except_thunk as usize as *mut u32;
// We have to use addition here because the PIR is pre-loaded into r4 by
// the thunk, and a bitwise OR will not properly add it as an offset.
// We only have to use addition on the lowest chunk, because the highest
// offset is `0xA00` (5 << 9).
let (arith_hi, arith_lo) = make_arithaddr(save_area as u32);
// "oris %r4, %r4, EXCEPTION_SAVE_AREA@highest"
thunk_area
.offset(3)
.write_volatile(0x64840000 | ((save_area >> 48) & 0xFFFF) as u32);
// "ori %r4, %r4, EXCEPTION_SAVE_AREA@higher"
thunk_area
.offset(4)
.write_volatile(0x60840000 | ((save_area >> 32) & 0xFFFF) as u32);
// "oris %r4, %r4, EXCEPTION_SAVE_AREA@ha"
thunk_area
.offset(6)
.write_volatile(0x64840000 | arith_hi as u32);
// "addi %r4, %r4, EXCEPTION_SAVE_AREA@l"
thunk_area
.offset(7)
.write_volatile(0x38840000 | arith_lo as u32);
}
// Ditto for the load thunk.
{
let load_area = &mut EXCEPTION_LOAD_AREA[0] as *mut _ as usize;
let thunk_area = except_load_thunk as usize as *mut u32;
let (arith_hi, arith_lo) = make_arithaddr(load_area as u32);
// "oris %r3, %r3, EXCEPTION_LOAD_AREA@highest"
thunk_area
.offset(2)
.write_volatile(0x64630000 | ((load_area >> 48) & 0xFFFF) as u32);
// "ori %r3, %r3, EXCEPTION_LOAD_AREA@higher"
thunk_area
.offset(3)
.write_volatile(0x60630000 | ((load_area >> 32) & 0xFFFF) as u32);
// "oris %r3, %r3, EXCEPTION_LOAD_AREA@ha"
thunk_area
.offset(5)
.write_volatile(0x64630000 | arith_hi as u32);
// "addi %r3, %r3, EXCEPTION_LOAD_AREA@l"
thunk_area
.offset(6)
.write_volatile(0x38630000 | arith_lo as u32);
}
for vec in EXCEPTION_VECTORS.iter() {
let buf = make_longjmp_exc((*vec >> 4) as u16, except_thunk as usize);
core::ptr::copy_nonoverlapping(buf.as_ptr(), *vec as *mut u32, buf.len());
}
}
#[cfg(test)]
mod test {
use crate::except::make_arithaddr;
#[test]
fn test_arithaddr() {
assert_eq!(make_arithaddr(0x0B0B8018), 0x0B0C8018);
}
}
| 34.261122 | 93 | 0.513747 |
edaed19925687103c85fc34297d61482a8a04053 | 2,715 | // Testing against Google Wycheproof test vectors
// Latest commit when these test vectors were pulled: https://github.com/google/wycheproof/commit/2196000605e45d91097147c9c71f26b72af58003
extern crate hex;
extern crate serde_json;
use self::hex::decode;
use self::serde_json::{Deserializer, Value};
use crate::kdf::hkdf_test_runner;
use std::{fs::File, io::BufReader};
fn wycheproof_runner(path: &str) {
let file = File::open(path).unwrap();
let reader = BufReader::new(file);
let stream = Deserializer::from_reader(reader).into_iter::<Value>();
for test_file in stream {
for test_groups in test_file.unwrap().get("testGroups") {
for test_group_collection in test_groups.as_array() {
for test_group in test_group_collection {
for test_vectors in test_group.get("tests").unwrap().as_array() {
for test_case in test_vectors {
let ikm =
decode(test_case.get("ikm").unwrap().as_str().unwrap()).unwrap();
let salt =
decode(test_case.get("salt").unwrap().as_str().unwrap()).unwrap();
let info =
decode(test_case.get("info").unwrap().as_str().unwrap()).unwrap();
let okm_len = test_case.get("size").unwrap().as_u64().unwrap();
let okm =
decode(test_case.get("okm").unwrap().as_str().unwrap()).unwrap();
let result: bool =
match test_case.get("result").unwrap().as_str().unwrap() {
"valid" => true,
"invalid" => false,
_ => panic!("Unrecognized result detected!"),
};
let tcid = test_case.get("tcId").unwrap().as_u64().unwrap();
println!("tcId: {}, okm_len: {}", tcid, okm_len);
hkdf_test_runner(
None,
&okm,
&salt,
&ikm,
&info,
okm_len as usize,
result,
);
}
}
}
}
}
}
}
#[test]
fn test_wycheproof_hkdf() {
wycheproof_runner(
"./tests/test_data/third_party/google/wycheproof/wycheproof_hkdf_sha512_test.json",
);
}
| 42.421875 | 138 | 0.456722 |
9c52f9cf3d7e449c64019c09b5d9fcc0ae2ee2ff | 9,076 | // Copyright 2018 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use rand::{self, Rng};
use std::{error::Error, net::SocketAddr};
use super::{NodeHandler, NodeRole, RequestData};
use helpers::Height;
use messages::{Any, Connect, Message, PeersRequest, RawMessage, Status};
impl NodeHandler {
/// Redirects message to the corresponding `handle_...` function.
pub fn handle_message(&mut self, raw: RawMessage) {
match Any::from_raw(raw) {
Ok(Any::Connect(msg)) => self.handle_connect(msg),
Ok(Any::Status(msg)) => self.handle_status(&msg),
Ok(Any::Consensus(msg)) => self.handle_consensus(msg),
Ok(Any::Request(msg)) => self.handle_request(msg),
Ok(Any::Block(msg)) => self.handle_block(&msg),
Ok(Any::Transaction(msg)) => self.handle_tx(&msg),
Ok(Any::TransactionsBatch(msg)) => self.handle_txs_batch(&msg),
Err(err) => {
error!("Invalid message received: {:?}", err.description());
}
}
}
/// Handles the `Connected` event. Node's `Connect` message is sent as response
/// if received `Connect` message is correct.
pub fn handle_connected(&mut self, address: &SocketAddr, connect: Connect) {
info!("Received Connect message from peer: {:?}", address);
// TODO: use `ConnectInfo` instead of connect-messages. (ECR-1452)
self.handle_connect(connect);
}
/// Handles the `Disconnected` event. Node will try to connect to that address again if it was
/// in the validators list.
pub fn handle_disconnected(&mut self, addr: SocketAddr) {
info!("Disconnected from: {}", addr);
self.remove_peer_with_addr(addr);
}
/// Handles the `UnableConnectToPeer` event. Node will try to connect to that address again
/// if it was in the validators list.
pub fn handle_unable_to_connect(&mut self, addr: SocketAddr) {
info!("Could not connect to: {}", addr);
self.remove_peer_with_addr(addr);
}
/// Removes peer from the state and from the cache. Node will try to connect to that address
/// again if it was in the validators list.
fn remove_peer_with_addr(&mut self, addr: SocketAddr) {
let need_reconnect = self.state.remove_peer_with_addr(&addr);
if need_reconnect {
self.connect(&addr);
}
self.blockchain.remove_peer_with_addr(&addr);
}
/// Handles the `Connect` message and connects to a peer as result.
pub fn handle_connect(&mut self, message: Connect) {
// TODO Add spam protection. (ECR-170)
// TODO: drop connection if checks have failed. (ECR-1837)
let address = message.addr();
if address == self.state.our_connect_message().addr() {
trace!("Received Connect with same address as our external_address.");
return;
}
let public_key = *message.pub_key();
if public_key == *self.state.our_connect_message().pub_key() {
trace!("Received Connect with same pub_key as ours.");
return;
}
if !self.state.connect_list().is_peer_allowed(&public_key) {
error!(
"Received connect message from {:?} peer which not in ConnectList.",
message.pub_key()
);
return;
}
if !message.verify_signature(&public_key) {
error!(
"Received connect-message with incorrect signature, msg={:?}",
message
);
return;
}
// Check if we have another connect message from peer with the given public_key.
let mut need_connect = true;
if let Some(saved_message) = self.state.peers().get(&public_key) {
if saved_message.time() > message.time() {
error!("Received outdated Connect message from {}", address);
return;
} else if saved_message.time() < message.time() {
need_connect = saved_message.addr() != message.addr();
} else if saved_message.addr() == message.addr() {
need_connect = false;
} else {
error!("Received weird Connect message from {}", address);
return;
}
}
self.state.add_peer(public_key, message.clone());
info!(
"Received Connect message from {}, {}",
address, need_connect,
);
self.blockchain.save_peer(&public_key, message);
if need_connect {
info!("Send Connect message to {}", address);
self.connect(&address);
}
}
/// Handles the `Status` message. Node sends `BlockRequest` as response if height in the
/// message is higher than node's height.
pub fn handle_status(&mut self, msg: &Status) {
let height = self.state.height();
trace!(
"HANDLE STATUS: current height = {}, msg height = {}",
height,
msg.height()
);
if !self.state.connect_list().is_peer_allowed(msg.from()) {
error!(
"Received status message from peer = {:?} which not in ConnectList.",
msg.from()
);
return;
}
// Handle message from future height
if msg.height() > height {
let peer = msg.from();
if !msg.verify_signature(peer) {
error!(
"Received status message with incorrect signature, msg={:?}",
msg
);
return;
}
// Check validator height info
if msg.height() > self.state.node_height(peer) {
// Update validator height
self.state.set_node_height(*peer, msg.height());
}
// Request block
self.request(RequestData::Block(height), *peer);
}
}
/// Handles the `PeersRequest` message. Node sends `Connect` messages of other peers as result.
pub fn handle_request_peers(&mut self, msg: &PeersRequest) {
let peers: Vec<Connect> = self.state.peers().iter().map(|(_, b)| b.clone()).collect();
trace!(
"HANDLE REQUEST PEERS: Sending {:?} peers to {:?}",
peers,
msg.from()
);
for peer in peers {
self.send_to_peer(*msg.from(), peer.raw());
}
}
/// Handles `NodeTimeout::Status`, broadcasts the `Status` message if it isn't outdated as
/// result.
pub fn handle_status_timeout(&mut self, height: Height) {
if self.state.height() == height {
self.broadcast_status();
self.add_status_timeout();
}
}
/// Handles `NodeTimeout::PeerExchange`. Node sends the `PeersRequest` to a random peer.
pub fn handle_peer_exchange_timeout(&mut self) {
if !self.state.peers().is_empty() {
let to = self.state.peers().len();
let gen_peer_id = || -> usize {
let mut rng = rand::thread_rng();
rng.gen_range(0, to)
};
let peer = self.state
.peers()
.iter()
.map(|x| x.1.clone())
.nth(gen_peer_id())
.unwrap();
let peer = peer.clone();
let msg = PeersRequest::new(
self.state.consensus_public_key(),
peer.pub_key(),
self.state.consensus_secret_key(),
);
trace!("Request peers from peer with addr {:?}", peer.addr());
self.send_to_peer(*peer.pub_key(), msg.raw());
}
self.add_peer_exchange_timeout();
}
/// Handles `NodeTimeout::UpdateApiState`.
/// Node update internal `ApiState` and `NodeRole`.
pub fn handle_update_api_state_timeout(&mut self) {
self.api_state.update_node_state(&self.state);
self.node_role = NodeRole::new(self.state.validator_id());
self.add_update_api_state_timeout();
}
/// Broadcasts the `Status` message to all peers.
pub fn broadcast_status(&mut self) {
let hash = self.blockchain.last_hash();
let status = Status::new(
self.state.consensus_public_key(),
self.state.height(),
&hash,
self.state.consensus_secret_key(),
);
trace!("Broadcast status: {:?}", status);
self.broadcast(status.raw());
}
}
| 37.659751 | 99 | 0.571617 |
eb616430b39bbdd43bd1fc3f4520a37566984a3b | 22,386 | //! Vote program
//! Receive and processes votes from validators
use crate::{
id,
vote_state::{self, Vote, VoteAuthorize, VoteInit, VoteState},
};
use log::*;
use num_derive::{FromPrimitive, ToPrimitive};
use serde_derive::{Deserialize, Serialize};
use solana_metrics::inc_new_counter_info;
use solana_sdk::{
decode_error::DecodeError,
feature_set,
hash::Hash,
instruction::{AccountMeta, Instruction, InstructionError},
keyed_account::{from_keyed_account, get_signers, keyed_account_at_index, KeyedAccount},
process_instruction::InvokeContext,
program_utils::limited_deserialize,
pubkey::Pubkey,
system_instruction,
sysvar::{self, clock::Clock, slot_hashes::SlotHashes},
};
use std::collections::HashSet;
use thiserror::Error;
/// Reasons the stake might have had an error
#[derive(Error, Debug, Clone, PartialEq, FromPrimitive, ToPrimitive)]
pub enum VoteError {
#[error("vote already recorded or not in slot hashes history")]
VoteTooOld,
#[error("vote slots do not match bank history")]
SlotsMismatch,
#[error("vote hash does not match bank hash")]
SlotHashMismatch,
#[error("vote has no slots, invalid")]
EmptySlots,
#[error("vote timestamp not recent")]
TimestampTooOld,
#[error("authorized voter has already been changed this epoch")]
TooSoonToReauthorize,
}
impl<E> DecodeError<E> for VoteError {
fn type_of() -> &'static str {
"VoteError"
}
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub enum VoteInstruction {
/// Initialize a vote account
///
/// # Account references
/// 0. `[WRITE]` Uninitialized vote account
/// 1. `[]` Rent sysvar
/// 2. `[]` Clock sysvar
/// 3. `[SIGNER]` New validator identity (node_pubkey)
InitializeAccount(VoteInit),
/// Authorize a key to send votes or issue a withdrawal
///
/// # Account references
/// 0. `[WRITE]` Vote account to be updated with the Pubkey for authorization
/// 1. `[]` Clock sysvar
/// 2. `[SIGNER]` Vote or withdraw authority
Authorize(Pubkey, VoteAuthorize),
/// A Vote instruction with recent votes
///
/// # Account references
/// 0. `[WRITE]` Vote account to vote with
/// 1. `[]` Slot hashes sysvar
/// 2. `[]` Clock sysvar
/// 3. `[SIGNER]` Vote authority
Vote(Vote),
/// Withdraw some amount of funds
///
/// # Account references
/// 0. `[WRITE]` Vote account to withdraw from
/// 1. `[WRITE]` Recipient account
/// 2. `[SIGNER]` Withdraw authority
Withdraw(u64),
/// Update the vote account's validator identity (node_pubkey)
///
/// # Account references
/// 0. `[WRITE]` Vote account to be updated with the given authority public key
/// 1. `[SIGNER]` New validator identity (node_pubkey)
/// 2. `[SIGNER]` Withdraw authority
UpdateValidatorIdentity,
/// Update the commission for the vote account
///
/// # Account references
/// 0. `[WRITE]` Vote account to be updated
/// 1. `[SIGNER]` Withdraw authority
UpdateCommission(u8),
/// A Vote instruction with recent votes
///
/// # Account references
/// 0. `[WRITE]` Vote account to vote with
/// 1. `[]` Slot hashes sysvar
/// 2. `[]` Clock sysvar
/// 3. `[SIGNER]` Vote authority
VoteSwitch(Vote, Hash),
/// Authorize a key to send votes or issue a withdrawal
///
/// This instruction behaves like `Authorize` with the additional requirement that the new vote
/// or withdraw authority must also be a signer.
///
/// # Account references
/// 0. `[WRITE]` Vote account to be updated with the Pubkey for authorization
/// 1. `[]` Clock sysvar
/// 2. `[SIGNER]` Vote or withdraw authority
/// 3. `[SIGNER]` New vote or withdraw authority
AuthorizeChecked(VoteAuthorize),
}
fn initialize_account(vote_pubkey: &Pubkey, vote_init: &VoteInit) -> Instruction {
let account_metas = vec![
AccountMeta::new(*vote_pubkey, false),
AccountMeta::new_readonly(sysvar::rent::id(), false),
AccountMeta::new_readonly(sysvar::clock::id(), false),
AccountMeta::new_readonly(vote_init.node_pubkey, true),
];
Instruction::new_with_bincode(
id(),
&VoteInstruction::InitializeAccount(*vote_init),
account_metas,
)
}
pub fn create_account(
from_pubkey: &Pubkey,
vote_pubkey: &Pubkey,
vote_init: &VoteInit,
carats: u64,
) -> Vec<Instruction> {
let space = VoteState::size_of() as u64;
let create_ix =
system_instruction::create_account(from_pubkey, vote_pubkey, carats, space, &id());
let init_ix = initialize_account(vote_pubkey, vote_init);
vec![create_ix, init_ix]
}
pub fn create_account_with_seed(
from_pubkey: &Pubkey,
vote_pubkey: &Pubkey,
base: &Pubkey,
seed: &str,
vote_init: &VoteInit,
carats: u64,
) -> Vec<Instruction> {
let space = VoteState::size_of() as u64;
let create_ix = system_instruction::create_account_with_seed(
from_pubkey,
vote_pubkey,
base,
seed,
carats,
space,
&id(),
);
let init_ix = initialize_account(vote_pubkey, vote_init);
vec![create_ix, init_ix]
}
pub fn authorize(
vote_pubkey: &Pubkey,
authorized_pubkey: &Pubkey, // currently authorized
new_authorized_pubkey: &Pubkey,
vote_authorize: VoteAuthorize,
) -> Instruction {
let account_metas = vec![
AccountMeta::new(*vote_pubkey, false),
AccountMeta::new_readonly(sysvar::clock::id(), false),
AccountMeta::new_readonly(*authorized_pubkey, true),
];
Instruction::new_with_bincode(
id(),
&VoteInstruction::Authorize(*new_authorized_pubkey, vote_authorize),
account_metas,
)
}
pub fn authorize_checked(
vote_pubkey: &Pubkey,
authorized_pubkey: &Pubkey, // currently authorized
new_authorized_pubkey: &Pubkey,
vote_authorize: VoteAuthorize,
) -> Instruction {
let account_metas = vec![
AccountMeta::new(*vote_pubkey, false),
AccountMeta::new_readonly(sysvar::clock::id(), false),
AccountMeta::new_readonly(*authorized_pubkey, true),
AccountMeta::new_readonly(*new_authorized_pubkey, true),
];
Instruction::new_with_bincode(
id(),
&VoteInstruction::AuthorizeChecked(vote_authorize),
account_metas,
)
}
pub fn update_validator_identity(
vote_pubkey: &Pubkey,
authorized_withdrawer_pubkey: &Pubkey,
node_pubkey: &Pubkey,
) -> Instruction {
let account_metas = vec![
AccountMeta::new(*vote_pubkey, false),
AccountMeta::new_readonly(*node_pubkey, true),
AccountMeta::new_readonly(*authorized_withdrawer_pubkey, true),
];
Instruction::new_with_bincode(
id(),
&VoteInstruction::UpdateValidatorIdentity,
account_metas,
)
}
pub fn update_commission(
vote_pubkey: &Pubkey,
authorized_withdrawer_pubkey: &Pubkey,
commission: u8,
) -> Instruction {
let account_metas = vec![
AccountMeta::new(*vote_pubkey, false),
AccountMeta::new_readonly(*authorized_withdrawer_pubkey, true),
];
Instruction::new_with_bincode(
id(),
&VoteInstruction::UpdateCommission(commission),
account_metas,
)
}
pub fn vote(vote_pubkey: &Pubkey, authorized_voter_pubkey: &Pubkey, vote: Vote) -> Instruction {
let account_metas = vec![
AccountMeta::new(*vote_pubkey, false),
AccountMeta::new_readonly(sysvar::slot_hashes::id(), false),
AccountMeta::new_readonly(sysvar::clock::id(), false),
AccountMeta::new_readonly(*authorized_voter_pubkey, true),
];
Instruction::new_with_bincode(id(), &VoteInstruction::Vote(vote), account_metas)
}
pub fn vote_switch(
vote_pubkey: &Pubkey,
authorized_voter_pubkey: &Pubkey,
vote: Vote,
proof_hash: Hash,
) -> Instruction {
let account_metas = vec![
AccountMeta::new(*vote_pubkey, false),
AccountMeta::new_readonly(sysvar::slot_hashes::id(), false),
AccountMeta::new_readonly(sysvar::clock::id(), false),
AccountMeta::new_readonly(*authorized_voter_pubkey, true),
];
Instruction::new_with_bincode(
id(),
&VoteInstruction::VoteSwitch(vote, proof_hash),
account_metas,
)
}
pub fn withdraw(
vote_pubkey: &Pubkey,
authorized_withdrawer_pubkey: &Pubkey,
carats: u64,
to_pubkey: &Pubkey,
) -> Instruction {
let account_metas = vec![
AccountMeta::new(*vote_pubkey, false),
AccountMeta::new(*to_pubkey, false),
AccountMeta::new_readonly(*authorized_withdrawer_pubkey, true),
];
Instruction::new_with_bincode(id(), &VoteInstruction::Withdraw(carats), account_metas)
}
fn verify_rent_exemption(
keyed_account: &KeyedAccount,
rent_sysvar_account: &KeyedAccount,
) -> Result<(), InstructionError> {
let rent: sysvar::rent::Rent = from_keyed_account(rent_sysvar_account)?;
if !rent.is_exempt(keyed_account.carats()?, keyed_account.data_len()?) {
Err(InstructionError::InsufficientFunds)
} else {
Ok(())
}
}
pub fn process_instruction(
_program_id: &Pubkey,
data: &[u8],
invoke_context: &mut dyn InvokeContext,
) -> Result<(), InstructionError> {
let keyed_accounts = invoke_context.get_keyed_accounts()?;
trace!("process_instruction: {:?}", data);
trace!("keyed_accounts: {:?}", keyed_accounts);
let signers: HashSet<Pubkey> = get_signers(keyed_accounts);
let me = &mut keyed_account_at_index(keyed_accounts, 0)?;
if me.owner()? != id() {
return Err(InstructionError::InvalidAccountOwner);
}
match limited_deserialize(data)? {
VoteInstruction::InitializeAccount(vote_init) => {
verify_rent_exemption(me, keyed_account_at_index(keyed_accounts, 1)?)?;
vote_state::initialize_account(
me,
&vote_init,
&signers,
&from_keyed_account::<Clock>(keyed_account_at_index(keyed_accounts, 2)?)?,
invoke_context.is_feature_active(&feature_set::check_init_vote_data::id()),
)
}
VoteInstruction::Authorize(voter_pubkey, vote_authorize) => vote_state::authorize(
me,
&voter_pubkey,
vote_authorize,
&signers,
&from_keyed_account::<Clock>(keyed_account_at_index(keyed_accounts, 1)?)?,
),
VoteInstruction::UpdateValidatorIdentity => vote_state::update_validator_identity(
me,
keyed_account_at_index(keyed_accounts, 1)?.unsigned_key(),
&signers,
),
VoteInstruction::UpdateCommission(commission) => {
vote_state::update_commission(me, commission, &signers)
}
VoteInstruction::Vote(vote) | VoteInstruction::VoteSwitch(vote, _) => {
inc_new_counter_info!("vote-native", 1);
vote_state::process_vote(
me,
&from_keyed_account::<SlotHashes>(keyed_account_at_index(keyed_accounts, 1)?)?,
&from_keyed_account::<Clock>(keyed_account_at_index(keyed_accounts, 2)?)?,
&vote,
&signers,
)
}
VoteInstruction::Withdraw(carats) => {
let to = keyed_account_at_index(keyed_accounts, 1)?;
vote_state::withdraw(me, carats, to, &signers)
}
VoteInstruction::AuthorizeChecked(vote_authorize) => {
if invoke_context.is_feature_active(&feature_set::vote_stake_checked_instructions::id())
{
let voter_pubkey = &keyed_account_at_index(keyed_accounts, 3)?
.signer_key()
.ok_or(InstructionError::MissingRequiredSignature)?;
vote_state::authorize(
me,
voter_pubkey,
vote_authorize,
&signers,
&from_keyed_account::<Clock>(keyed_account_at_index(keyed_accounts, 1)?)?,
)
} else {
Err(InstructionError::InvalidInstructionData)
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use bincode::serialize;
use solana_sdk::{
account::{self, Account, AccountSharedData},
process_instruction::MockInvokeContext,
rent::Rent,
};
use std::cell::RefCell;
use std::str::FromStr;
fn create_default_account() -> RefCell<AccountSharedData> {
RefCell::new(AccountSharedData::default())
}
// these are for 100% coverage in this file
#[test]
fn test_vote_process_instruction_decode_bail() {
assert_eq!(
super::process_instruction(
&Pubkey::default(),
&[],
&mut MockInvokeContext::new(vec![])
),
Err(InstructionError::NotEnoughAccountKeys),
);
}
#[allow(clippy::same_item_push)]
fn process_instruction(instruction: &Instruction) -> Result<(), InstructionError> {
let mut accounts: Vec<_> = instruction
.accounts
.iter()
.map(|meta| {
RefCell::new(if sysvar::clock::check_id(&meta.pubkey) {
account::create_account_shared_data_for_test(&Clock::default())
} else if sysvar::slot_hashes::check_id(&meta.pubkey) {
account::create_account_shared_data_for_test(&SlotHashes::default())
} else if sysvar::rent::check_id(&meta.pubkey) {
account::create_account_shared_data_for_test(&Rent::free())
} else if meta.pubkey == invalid_vote_state_pubkey() {
AccountSharedData::from(Account {
owner: invalid_vote_state_pubkey(),
..Account::default()
})
} else {
AccountSharedData::from(Account {
owner: id(),
..Account::default()
})
})
})
.collect();
for _ in 0..instruction.accounts.len() {
accounts.push(RefCell::new(AccountSharedData::default()));
}
{
let keyed_accounts: Vec<_> = instruction
.accounts
.iter()
.zip(accounts.iter())
.map(|(meta, account)| KeyedAccount::new(&meta.pubkey, meta.is_signer, account))
.collect();
super::process_instruction(
&Pubkey::default(),
&instruction.data,
&mut MockInvokeContext::new(keyed_accounts),
)
}
}
fn invalid_vote_state_pubkey() -> Pubkey {
Pubkey::from_str("BadVote111111111111111111111111111111111111").unwrap()
}
#[test]
fn test_spoofed_vote() {
assert_eq!(
process_instruction(&vote(
&invalid_vote_state_pubkey(),
&Pubkey::default(),
Vote::default(),
)),
Err(InstructionError::InvalidAccountOwner),
);
}
#[test]
fn test_vote_process_instruction() {
solana_logger::setup();
let instructions = create_account(
&Pubkey::default(),
&Pubkey::default(),
&VoteInit::default(),
100,
);
assert_eq!(
process_instruction(&instructions[1]),
Err(InstructionError::InvalidAccountData),
);
assert_eq!(
process_instruction(&vote(
&Pubkey::default(),
&Pubkey::default(),
Vote::default(),
)),
Err(InstructionError::InvalidAccountData),
);
assert_eq!(
process_instruction(&vote_switch(
&Pubkey::default(),
&Pubkey::default(),
Vote::default(),
Hash::default(),
)),
Err(InstructionError::InvalidAccountData),
);
assert_eq!(
process_instruction(&authorize(
&Pubkey::default(),
&Pubkey::default(),
&Pubkey::default(),
VoteAuthorize::Voter,
)),
Err(InstructionError::InvalidAccountData),
);
assert_eq!(
process_instruction(&update_validator_identity(
&Pubkey::default(),
&Pubkey::default(),
&Pubkey::default(),
)),
Err(InstructionError::InvalidAccountData),
);
assert_eq!(
process_instruction(&update_commission(
&Pubkey::default(),
&Pubkey::default(),
0,
)),
Err(InstructionError::InvalidAccountData),
);
assert_eq!(
process_instruction(&withdraw(
&Pubkey::default(),
&Pubkey::default(),
0,
&Pubkey::default()
)),
Err(InstructionError::InvalidAccountData),
);
}
#[test]
fn test_vote_authorize_checked() {
let vote_pubkey = Pubkey::new_unique();
let authorized_pubkey = Pubkey::new_unique();
let new_authorized_pubkey = Pubkey::new_unique();
// Test with vanilla authorize accounts
let mut instruction = authorize_checked(
&vote_pubkey,
&authorized_pubkey,
&new_authorized_pubkey,
VoteAuthorize::Voter,
);
instruction.accounts = instruction.accounts[0..2].to_vec();
assert_eq!(
process_instruction(&instruction),
Err(InstructionError::NotEnoughAccountKeys),
);
let mut instruction = authorize_checked(
&vote_pubkey,
&authorized_pubkey,
&new_authorized_pubkey,
VoteAuthorize::Withdrawer,
);
instruction.accounts = instruction.accounts[0..2].to_vec();
assert_eq!(
process_instruction(&instruction),
Err(InstructionError::NotEnoughAccountKeys),
);
// Test with non-signing new_authorized_pubkey
let mut instruction = authorize_checked(
&vote_pubkey,
&authorized_pubkey,
&new_authorized_pubkey,
VoteAuthorize::Voter,
);
instruction.accounts[3] = AccountMeta::new_readonly(new_authorized_pubkey, false);
assert_eq!(
process_instruction(&instruction),
Err(InstructionError::MissingRequiredSignature),
);
let mut instruction = authorize_checked(
&vote_pubkey,
&authorized_pubkey,
&new_authorized_pubkey,
VoteAuthorize::Withdrawer,
);
instruction.accounts[3] = AccountMeta::new_readonly(new_authorized_pubkey, false);
assert_eq!(
process_instruction(&instruction),
Err(InstructionError::MissingRequiredSignature),
);
// Test with new_authorized_pubkey signer
let vote_account = AccountSharedData::new_ref(100, VoteState::size_of(), &id());
let clock_address = sysvar::clock::id();
let clock_account = RefCell::new(account::create_account_shared_data_for_test(
&Clock::default(),
));
let default_authorized_pubkey = Pubkey::default();
let authorized_account = create_default_account();
let new_authorized_account = create_default_account();
let keyed_accounts = vec![
KeyedAccount::new(&vote_pubkey, false, &vote_account),
KeyedAccount::new(&clock_address, false, &clock_account),
KeyedAccount::new(&default_authorized_pubkey, true, &authorized_account),
KeyedAccount::new(&new_authorized_pubkey, true, &new_authorized_account),
];
assert_eq!(
super::process_instruction(
&Pubkey::default(),
&serialize(&VoteInstruction::AuthorizeChecked(VoteAuthorize::Voter)).unwrap(),
&mut MockInvokeContext::new(keyed_accounts)
),
Ok(())
);
let keyed_accounts = vec![
KeyedAccount::new(&vote_pubkey, false, &vote_account),
KeyedAccount::new(&clock_address, false, &clock_account),
KeyedAccount::new(&default_authorized_pubkey, true, &authorized_account),
KeyedAccount::new(&new_authorized_pubkey, true, &new_authorized_account),
];
assert_eq!(
super::process_instruction(
&Pubkey::default(),
&serialize(&VoteInstruction::AuthorizeChecked(
VoteAuthorize::Withdrawer
))
.unwrap(),
&mut MockInvokeContext::new(keyed_accounts)
),
Ok(())
);
}
#[test]
fn test_minimum_balance() {
let rent = solana_sdk::rent::Rent::default();
let minimum_balance = rent.minimum_balance(VoteState::size_of());
// golden, may need updating when vote_state grows
assert!(minimum_balance as f64 / 10f64.powf(9.0) < 0.04)
}
#[test]
fn test_custom_error_decode() {
use num_traits::FromPrimitive;
fn pretty_err<T>(err: InstructionError) -> String
where
T: 'static + std::error::Error + DecodeError<T> + FromPrimitive,
{
if let InstructionError::Custom(code) = err {
let specific_error: T = T::decode_custom_error_to_enum(code).unwrap();
format!(
"{:?}: {}::{:?} - {}",
err,
T::type_of(),
specific_error,
specific_error,
)
} else {
"".to_string()
}
}
assert_eq!(
"Custom(0): VoteError::VoteTooOld - vote already recorded or not in slot hashes history",
pretty_err::<VoteError>(VoteError::VoteTooOld.into())
)
}
}
| 33.06647 | 101 | 0.589967 |
5610bf9538d24edd6e8ef4336b6180d9afb5bbed | 11,533 | use crate::UnivariateSumcheck;
use alloc::{vec, vec::Vec};
use ark_bcs::{
bcs::constraints::transcript::SimulationTranscriptVar,
iop::{bookkeeper::NameSpace, constraints::oracles::VirtualOracleVar, message::OracleIndex},
iop_trace,
prelude::{MsgRoundRef, ProverRoundMessageInfo},
};
use ark_crypto_primitives::merkle_tree::{constraints::ConfigGadget, Config};
use ark_ff::PrimeField;
use ark_ldt::domain::Radix2CosetDomain;
use ark_r1cs_std::{
fields::fp::FpVar,
poly::domain::{vanishing_poly::VanishingPolynomial, Radix2DomainVar},
prelude::*,
};
use ark_relations::r1cs::SynthesisError;
use ark_sponge::{
constraints::{AbsorbGadget, SpongeWithGadget},
Absorb,
};
#[derive(Debug, Clone)]
pub struct SumcheckPOracleVar<F: PrimeField> {
pub summation_domain: Radix2CosetDomain<F>,
pub claimed_sum: FpVar<F>,
pub order_h_inv_times_claimed_sum: FpVar<F>,
pub h_handle: (MsgRoundRef, OracleIndex),
pub f_handle: (MsgRoundRef, OracleIndex),
}
impl<F: PrimeField> SumcheckPOracleVar<F> {
pub fn new(
summation_domain: Radix2CosetDomain<F>,
claimed_sum: FpVar<F>,
h_handle: (MsgRoundRef, OracleIndex),
f_handle: (MsgRoundRef, OracleIndex),
) -> Self {
let order_h_inv_times_claimed_sum =
&claimed_sum * F::from(summation_domain.size() as u64).inverse().unwrap();
Self {
summation_domain,
claimed_sum,
order_h_inv_times_claimed_sum,
h_handle,
f_handle,
}
}
}
impl<F: PrimeField> VirtualOracleVar<F> for SumcheckPOracleVar<F> {
fn constituent_oracle_handles(&self) -> Vec<(MsgRoundRef, Vec<OracleIndex>)> {
vec![
(self.h_handle.0, vec![self.h_handle.1]),
(self.f_handle.0, vec![self.f_handle.1]),
]
}
fn evaluate_var(
&self,
coset_domain: Radix2DomainVar<F>,
constituent_oracles: &[Vec<FpVar<F>>],
) -> Result<Vec<FpVar<F>>, SynthesisError> {
let h_eval = &constituent_oracles[0];
let f_eval = &constituent_oracles[1];
let mut cur_x_inv = coset_domain.offset().inverse().unwrap();
let z_h = VanishingPolynomial::new(
self.summation_domain.offset,
self.summation_domain.dim() as u64,
);
let z_h_eval = coset_domain
.elements()
.into_iter()
.map(|x| z_h.evaluate_constraints(&x))
.collect::<Result<Vec<_>, SynthesisError>>()?;
let gen_inv = coset_domain.gen.inverse().unwrap();
assert_eq!(h_eval.len(), f_eval.len());
assert_eq!(h_eval.len(), z_h_eval.len());
assert_eq!(h_eval.len(), coset_domain.size() as usize);
Ok(f_eval
.iter()
.zip(h_eval)
.zip(z_h_eval)
.map(|((f, h), z_h)| {
let result = (f - &self.order_h_inv_times_claimed_sum - &z_h * h) * &cur_x_inv;
cur_x_inv = &cur_x_inv * gen_inv;
result
})
.collect())
}
}
impl<F: PrimeField + Absorb> UnivariateSumcheck<F> {
pub fn register_sumcheck_commit_phase_var<
MT: Config,
MTG: ConfigGadget<MT, F, Leaf = [FpVar<F>]>,
S: SpongeWithGadget<F>,
>(
&self,
transcript: &mut SimulationTranscriptVar<F, MT, MTG, S>,
ns: NameSpace,
f_handle: (MsgRoundRef, OracleIndex),
claimed_sum: FpVar<F>,
) -> Result<(), SynthesisError>
where
MTG::InnerDigest: AbsorbGadget<F>,
MT::InnerDigest: Absorb,
{
// receive h with no degree bound
let round_info = ProverRoundMessageInfo::new_using_codeword_domain(transcript)
.with_num_message_oracles(1)
.build();
let h_handle =
transcript.receive_prover_current_round(ns, round_info, iop_trace!("h oracle"))?;
// register g as a virtual oracle
let g_oracle = SumcheckPOracleVar::new(
self.summation_domain,
claimed_sum,
(h_handle, (0, false).into()),
f_handle,
);
let test_bound = self.degree_bound_of_g();
transcript.register_prover_virtual_round(
ns,
g_oracle,
vec![test_bound],
vec![test_bound],
iop_trace!("g oracle"),
);
Ok(())
}
}
#[cfg(test)]
mod tests {
use crate::{
protocol::tests::{FieldMTConfig, MockProtocol, MockProverParam, MockVerifierParam},
test_util::poseidon_parameters,
UnivariateSumcheck,
};
use alloc::vec;
use ark_bcs::{
bcs::{
constraints::{
proof::BCSProofVar, transcript::SimulationTranscriptVar,
verifier::BCSVerifierGadget, MTHashParametersVar,
},
prover::BCSProof,
MTHashParameters,
},
iop::{
bookkeeper::NameSpace,
constraints::{message::MessagesCollectionVar, IOPVerifierWithGadget},
message::OracleIndex,
ProverParam,
},
iop_trace,
ldt::rl_ldt::{LinearCombinationLDT, LinearCombinationLDTParameters},
prelude::ProverRoundMessageInfo,
};
use ark_bls12_381::Fr;
use ark_crypto_primitives::{
crh::poseidon::constraints::CRHParametersVar,
merkle_tree::{constraints::ConfigGadget, Config, IdentityDigestConverter},
};
use ark_ldt::domain::Radix2CosetDomain;
use ark_poly::{univariate::DensePolynomial, UVPolynomial};
use ark_r1cs_std::{
alloc::{AllocVar, AllocationMode},
fields::fp::FpVar,
};
use ark_relations::{
ns,
r1cs::{ConstraintSystem, ConstraintSystemRef, Namespace, SynthesisError},
};
use ark_sponge::{
constraints::{AbsorbGadget, CryptographicSpongeVar, SpongeWithGadget},
poseidon::{constraints::PoseidonSpongeVar, PoseidonSponge},
Absorb, CryptographicSponge,
};
use ark_std::test_rng;
use core::borrow::Borrow;
#[derive(Clone, Debug)]
pub struct MockVerifierParamVar {
pub summation_domain: Radix2CosetDomain<Fr>,
pub claimed_sum: FpVar<Fr>,
}
impl AllocVar<MockVerifierParam, Fr> for MockVerifierParamVar {
fn new_variable<T: Borrow<MockVerifierParam>>(
cs: impl Into<Namespace<Fr>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
let val = f()?;
let val = val.borrow();
let var = Self {
summation_domain: val.summation_domain,
claimed_sum: FpVar::new_variable(cs, || Ok(val.claimed_sum), mode)?,
};
Ok(var)
}
}
impl<S: SpongeWithGadget<Fr>> IOPVerifierWithGadget<S, Fr> for MockProtocol {
type VerifierParameterVar = MockVerifierParamVar;
type VerifierOutputVar = ();
type PublicInputVar = ();
fn register_iop_structure_var<MT: Config, MTG: ConfigGadget<MT, Fr, Leaf = [FpVar<Fr>]>>(
namespace: NameSpace,
transcript: &mut SimulationTranscriptVar<Fr, MT, MTG, S>,
verifier_parameter: &Self::VerifierParameterVar,
) -> Result<(), SynthesisError>
where
MT::InnerDigest: Absorb,
MTG::InnerDigest: AbsorbGadget<Fr>,
{
let poly_info = ProverRoundMessageInfo::new_using_codeword_domain(transcript)
.with_num_message_oracles(1)
.build();
let poly_handle = transcript.receive_prover_current_round(
namespace,
poly_info,
iop_trace!("poly to sum"),
)?;
let sumcheck = UnivariateSumcheck {
summation_domain: verifier_parameter.summation_domain,
};
let sumcheck_ns = transcript.new_namespace(namespace, iop_trace!("sumcheck"));
sumcheck.register_sumcheck_commit_phase_var(
transcript,
sumcheck_ns,
(poly_handle, OracleIndex::new(0, false)),
verifier_parameter.claimed_sum.clone(),
)
}
fn query_and_decide_var<'a>(
_cs: ConstraintSystemRef<Fr>,
_namespace: NameSpace,
_verifier_parameter: &Self::VerifierParameterVar,
_public_input_var: &Self::PublicInputVar,
_sponge: &mut S::Var,
_transcript_messages: &mut MessagesCollectionVar<'a, Fr>,
) -> Result<Self::VerifierOutputVar, SynthesisError> {
// nothing to do here. LDT is everything.
Ok(())
}
}
impl ConfigGadget<FieldMTConfig, Fr> for FieldMTConfig {
type Leaf = [FpVar<Fr>];
type LeafDigest = FpVar<Fr>;
type LeafInnerConverter = IdentityDigestConverter<FpVar<Fr>>;
type InnerDigest = FpVar<Fr>;
type LeafHash = ark_crypto_primitives::crh::poseidon::constraints::CRHGadget<Fr>;
type TwoToOneHash =
ark_crypto_primitives::crh::poseidon::constraints::TwoToOneCRHGadget<Fr>;
}
#[test]
fn test_constraints_end_to_end() {
let mut rng = test_rng();
let poseidon_param = poseidon_parameters();
let sponge = PoseidonSponge::new(&poseidon_param);
let codeword_domain = Radix2CosetDomain::new_radix2_coset(256, Fr::from(0x12345u128));
let ldt_param = LinearCombinationLDTParameters::new(128, vec![1, 2, 1], codeword_domain, 5);
let summation_domain = Radix2CosetDomain::new_radix2_coset(32, Fr::from(0x6789u128));
let poly = DensePolynomial::rand(100, &mut rng);
let claimed_sum = summation_domain.evaluate(&poly).into_iter().sum::<Fr>();
let cs = ConstraintSystem::<Fr>::new_ref();
let mt_hash_param = MTHashParameters::<FieldMTConfig> {
leaf_hash_param: poseidon_param.clone(),
inner_hash_param: poseidon_param.clone(),
};
let poseidon_param_var =
CRHParametersVar::new_constant(cs.clone(), poseidon_parameters()).unwrap();
let mt_hash_param_var = MTHashParametersVar::<Fr, FieldMTConfig, FieldMTConfig> {
leaf_params: poseidon_param_var.clone(),
inner_params: poseidon_param_var.clone(),
};
let prover_param = MockProverParam {
summation_domain,
poly,
claimed_sum,
};
let verifier_param = prover_param.to_verifier_param();
let verifier_param_var =
MockVerifierParamVar::new_witness(ns!(cs, "verifier_param"), || Ok(verifier_param))
.unwrap();
let proof = BCSProof::generate::<MockProtocol, MockProtocol, LinearCombinationLDT<Fr>, _>(
sponge.clone(),
&(),
&(),
&prover_param,
&ldt_param,
mt_hash_param.clone(),
)
.unwrap();
let proof_var = BCSProofVar::new_witness(ns!(cs, "proof"), || Ok(proof)).unwrap();
let sponge_var = PoseidonSpongeVar::new(ns!(cs, "sponge").cs(), &poseidon_param);
BCSVerifierGadget::verify::<MockProtocol, LinearCombinationLDT<Fr>, PoseidonSponge<Fr>>(
cs.clone(),
sponge_var,
&proof_var,
&(),
&verifier_param_var,
&ldt_param,
&mt_hash_param_var,
)
.unwrap();
}
}
| 34.633634 | 100 | 0.59889 |
87480580d1904117a9521dbb305507b41cdfc7fd | 914 | //! Structs describing the PokéAPI data model.
#[macro_use]
pub mod resource;
pub use resource::NameOf;
pub use resource::NamedResource;
pub use resource::Resource;
#[macro_use]
pub mod text;
pub use text::Language;
pub use text::LanguageName;
pub mod ability;
pub mod berry;
pub mod contest;
pub mod evolution;
pub mod item;
pub mod location;
pub mod mov;
pub mod nature;
pub mod pokedex;
pub mod species;
pub mod stat;
pub mod ty;
pub mod version;
mod data;
pub use data::*;
pub use ability::Ability;
pub use berry::Berry;
pub use item::Item;
pub use item::Tm;
pub use location::Location;
pub use location::Region;
pub use mov::Move;
pub use nature::Nature;
pub use pokedex::Pokedex;
pub use species::EggGroup;
pub use species::Pokemon;
pub use species::Species;
pub use stat::Stat;
pub use ty::Type;
pub use version::Generation;
pub use pokedex::PokedexName;
pub use stat::StatName;
pub use ty::TypeName;
| 18.28 | 46 | 0.745077 |
bb880568cc13f0b6106ce4cbe2cbd7168db27d5d | 4,199 | // Copyright 2013-2014 The CGMath Developers. For a full listing of the authors,
// refer to the Cargo.toml file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use quaternion::*;
use structure::*;
use std::mem;
use std::ops::*;
use simd::f32x4 as Simdf32x4;
impl From<Simdf32x4> for Quaternion<f32> {
#[inline]
fn from(f: Simdf32x4) -> Self {
unsafe {
let mut ret: Self = mem::uninitialized();
{
let ret_mut: &mut [f32; 4] = ret.as_mut();
f.store(ret_mut.as_mut(), 0 as usize);
}
ret
}
}
}
impl Into<Simdf32x4> for Quaternion<f32> {
#[inline]
fn into(self) -> Simdf32x4 {
let self_ref: &[f32; 4] = self.as_ref();
Simdf32x4::load(self_ref.as_ref(), 0 as usize)
}
}
impl InnerSpace for Quaternion<f32> {
#[inline]
fn dot(self, other: Quaternion<f32>) -> f32 {
let lhs: Simdf32x4 = self.into();
let rhs: Simdf32x4 = other.into();
let r = lhs * rhs;
r.extract(0) + r.extract(1) + r.extract(2) + r.extract(3)
}
}
impl_operator_simd! {
[Simdf32x4]; Neg for Quaternion<f32> {
fn neg(lhs) -> Quaternion<f32> {
(-lhs).into()
}
}
}
impl_operator_simd! {@rs
[Simdf32x4]; Mul<f32> for Quaternion<f32> {
fn mul(lhs, rhs) -> Quaternion<f32> {
(lhs * rhs).into()
}
}
}
impl MulAssign<f32> for Quaternion<f32> {
fn mul_assign(&mut self, other: f32) {
let s: Simdf32x4 = (*self).into();
let other = Simdf32x4::splat(other);
*self = (s * other).into();
}
}
impl_operator_simd! {@rs
[Simdf32x4]; Div<f32> for Quaternion<f32> {
fn div(lhs, rhs) -> Quaternion<f32> {
(lhs / rhs).into()
}
}
}
impl DivAssign<f32> for Quaternion<f32> {
fn div_assign(&mut self, other: f32) {
let s: Simdf32x4 = (*self).into();
let other = Simdf32x4::splat(other);
*self = (s / other).into();
}
}
impl_operator_simd! {
[Simdf32x4]; Add<Quaternion<f32>> for Quaternion<f32> {
fn add(lhs, rhs) -> Quaternion<f32> {
(lhs + rhs).into()
}
}
}
impl AddAssign for Quaternion<f32> {
#[inline]
fn add_assign(&mut self, rhs: Self) {
let s: Simdf32x4 = (*self).into();
let rhs: Simdf32x4 = rhs.into();
*self = (s + rhs).into();
}
}
impl_operator_simd! {
[Simdf32x4]; Sub<Quaternion<f32>> for Quaternion<f32> {
fn sub(lhs, rhs) -> Quaternion<f32> {
(lhs - rhs).into()
}
}
}
impl SubAssign for Quaternion<f32> {
#[inline]
fn sub_assign(&mut self, rhs: Self) {
let s: Simdf32x4 = (*self).into();
let rhs: Simdf32x4 = rhs.into();
*self = (s - rhs).into();
}
}
impl_operator_simd! {
[Simdf32x4]; Mul<Quaternion<f32>> for Quaternion<f32> {
fn mul(lhs, rhs) -> Quaternion<f32> {
{
let p0 = Simdf32x4::splat(lhs.extract(0)) * rhs;
let p1 = Simdf32x4::splat(lhs.extract(1)) * Simdf32x4::new(
-rhs.extract(1), rhs.extract(0), -rhs.extract(3), rhs.extract(2)
);
let p2 = Simdf32x4::splat(lhs.extract(2)) * Simdf32x4::new(
-rhs.extract(2), rhs.extract(3), rhs.extract(0), -rhs.extract(1)
);
let p3 = Simdf32x4::splat(lhs.extract(3)) * Simdf32x4::new(
-rhs.extract(3), -rhs.extract(2), rhs.extract(1), rhs.extract(0)
);
(p0 + p1 + p2 + p3).into()
}
}
}
}
| 27.993333 | 84 | 0.553703 |
e2c52c662d0ff1444878b75d0fa20868dd9faeb6 | 673 | use crate::{
guild::audit_log::{AuditLogChange, AuditLogEvent, AuditLogOptionalEntryInfo},
id::{AuditLogEntryId, UserId},
};
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct AuditLogEntry {
pub action_type: AuditLogEvent,
#[serde(skip_serializing_if = "Option::is_none")]
pub changes: Option<Vec<AuditLogChange>>,
pub id: AuditLogEntryId,
#[serde(skip_serializing_if = "Option::is_none")]
pub options: Option<AuditLogOptionalEntryInfo>,
#[serde(skip_serializing_if = "Option::is_none")]
pub reason: Option<String>,
pub target_id: Option<String>,
pub user_id: Option<UserId>,
}
| 33.65 | 81 | 0.716196 |
333085304a2e71515edef0e91275e50ef691b3f9 | 216 | pub use room_object::room::Room;
pub mod game;
mod room_object;
macro def_attr {
($name:ident, $rty:ty) => {
#[inline]
pub fn $name(&self) -> $rty {
self.0.$name()
}
}
}
| 15.428571 | 37 | 0.49537 |
14cf15a6b70d0235a76133703b0077c5d38682f7 | 44,332 | /* automatically generated by rust-bindgen 0.59.1 */
pub const BZ_RUN: u32 = 0;
pub const BZ_FLUSH: u32 = 1;
pub const BZ_FINISH: u32 = 2;
pub const BZ_OK: u32 = 0;
pub const BZ_RUN_OK: u32 = 1;
pub const BZ_FLUSH_OK: u32 = 2;
pub const BZ_FINISH_OK: u32 = 3;
pub const BZ_STREAM_END: u32 = 4;
pub const BZ_SEQUENCE_ERROR: i32 = -1;
pub const BZ_PARAM_ERROR: i32 = -2;
pub const BZ_MEM_ERROR: i32 = -3;
pub const BZ_DATA_ERROR: i32 = -4;
pub const BZ_DATA_ERROR_MAGIC: i32 = -5;
pub const BZ_IO_ERROR: i32 = -6;
pub const BZ_UNEXPECTED_EOF: i32 = -7;
pub const BZ_OUTBUFF_FULL: i32 = -8;
pub const BZ_CONFIG_ERROR: i32 = -9;
pub const _STDIO_H: u32 = 1;
pub const _FEATURES_H: u32 = 1;
pub const _DEFAULT_SOURCE: u32 = 1;
pub const __GLIBC_USE_ISOC2X: u32 = 0;
pub const __USE_ISOC11: u32 = 1;
pub const __USE_ISOC99: u32 = 1;
pub const __USE_ISOC95: u32 = 1;
pub const __USE_POSIX_IMPLICITLY: u32 = 1;
pub const _POSIX_SOURCE: u32 = 1;
pub const _POSIX_C_SOURCE: u32 = 200809;
pub const __USE_POSIX: u32 = 1;
pub const __USE_POSIX2: u32 = 1;
pub const __USE_POSIX199309: u32 = 1;
pub const __USE_POSIX199506: u32 = 1;
pub const __USE_XOPEN2K: u32 = 1;
pub const __USE_XOPEN2K8: u32 = 1;
pub const _ATFILE_SOURCE: u32 = 1;
pub const __USE_MISC: u32 = 1;
pub const __USE_ATFILE: u32 = 1;
pub const __USE_FORTIFY_LEVEL: u32 = 0;
pub const __GLIBC_USE_DEPRECATED_GETS: u32 = 0;
pub const __GLIBC_USE_DEPRECATED_SCANF: u32 = 0;
pub const _STDC_PREDEF_H: u32 = 1;
pub const __STDC_IEC_559__: u32 = 1;
pub const __STDC_IEC_559_COMPLEX__: u32 = 1;
pub const __STDC_ISO_10646__: u32 = 201706;
pub const __GNU_LIBRARY__: u32 = 6;
pub const __GLIBC__: u32 = 2;
pub const __GLIBC_MINOR__: u32 = 31;
pub const _SYS_CDEFS_H: u32 = 1;
pub const __glibc_c99_flexarr_available: u32 = 1;
pub const __WORDSIZE: u32 = 64;
pub const __WORDSIZE_TIME64_COMPAT32: u32 = 1;
pub const __SYSCALL_WORDSIZE: u32 = 64;
pub const __LONG_DOUBLE_USES_FLOAT128: u32 = 0;
pub const __HAVE_GENERIC_SELECTION: u32 = 1;
pub const __GLIBC_USE_LIB_EXT2: u32 = 0;
pub const __GLIBC_USE_IEC_60559_BFP_EXT: u32 = 0;
pub const __GLIBC_USE_IEC_60559_BFP_EXT_C2X: u32 = 0;
pub const __GLIBC_USE_IEC_60559_FUNCS_EXT: u32 = 0;
pub const __GLIBC_USE_IEC_60559_FUNCS_EXT_C2X: u32 = 0;
pub const __GLIBC_USE_IEC_60559_TYPES_EXT: u32 = 0;
pub const __GNUC_VA_LIST: u32 = 1;
pub const _BITS_TYPES_H: u32 = 1;
pub const __TIMESIZE: u32 = 64;
pub const _BITS_TYPESIZES_H: u32 = 1;
pub const __OFF_T_MATCHES_OFF64_T: u32 = 1;
pub const __INO_T_MATCHES_INO64_T: u32 = 1;
pub const __RLIM_T_MATCHES_RLIM64_T: u32 = 1;
pub const __STATFS_MATCHES_STATFS64: u32 = 1;
pub const __FD_SETSIZE: u32 = 1024;
pub const _BITS_TIME64_H: u32 = 1;
pub const _____fpos_t_defined: u32 = 1;
pub const ____mbstate_t_defined: u32 = 1;
pub const _____fpos64_t_defined: u32 = 1;
pub const ____FILE_defined: u32 = 1;
pub const __FILE_defined: u32 = 1;
pub const __struct_FILE_defined: u32 = 1;
pub const _IO_EOF_SEEN: u32 = 16;
pub const _IO_ERR_SEEN: u32 = 32;
pub const _IO_USER_LOCK: u32 = 32768;
pub const _IOFBF: u32 = 0;
pub const _IOLBF: u32 = 1;
pub const _IONBF: u32 = 2;
pub const BUFSIZ: u32 = 8192;
pub const EOF: i32 = -1;
pub const SEEK_SET: u32 = 0;
pub const SEEK_CUR: u32 = 1;
pub const SEEK_END: u32 = 2;
pub const P_tmpdir: &'static [u8; 5usize] = b"/tmp\0";
pub const _BITS_STDIO_LIM_H: u32 = 1;
pub const L_tmpnam: u32 = 20;
pub const TMP_MAX: u32 = 238328;
pub const FILENAME_MAX: u32 = 4096;
pub const L_ctermid: u32 = 9;
pub const FOPEN_MAX: u32 = 16;
pub const BZ_MAX_UNUSED: u32 = 5000;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct bz_stream {
pub next_in: *mut ::std::os::raw::c_char,
pub avail_in: ::std::os::raw::c_uint,
pub total_in_lo32: ::std::os::raw::c_uint,
pub total_in_hi32: ::std::os::raw::c_uint,
pub next_out: *mut ::std::os::raw::c_char,
pub avail_out: ::std::os::raw::c_uint,
pub total_out_lo32: ::std::os::raw::c_uint,
pub total_out_hi32: ::std::os::raw::c_uint,
pub state: *mut ::std::os::raw::c_void,
pub bzalloc: ::std::option::Option<
unsafe extern "C" fn(
arg1: *mut ::std::os::raw::c_void,
arg2: ::std::os::raw::c_int,
arg3: ::std::os::raw::c_int,
) -> *mut ::std::os::raw::c_void,
>,
pub bzfree: ::std::option::Option<
unsafe extern "C" fn(arg1: *mut ::std::os::raw::c_void, arg2: *mut ::std::os::raw::c_void),
>,
pub opaque: *mut ::std::os::raw::c_void,
}
#[test]
fn bindgen_test_layout_bz_stream() {
assert_eq!(
::std::mem::size_of::<bz_stream>(),
80usize,
concat!("Size of: ", stringify!(bz_stream))
);
assert_eq!(
::std::mem::align_of::<bz_stream>(),
8usize,
concat!("Alignment of ", stringify!(bz_stream))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<bz_stream>())).next_in as *const _ as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(bz_stream),
"::",
stringify!(next_in)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<bz_stream>())).avail_in as *const _ as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(bz_stream),
"::",
stringify!(avail_in)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<bz_stream>())).total_in_lo32 as *const _ as usize },
12usize,
concat!(
"Offset of field: ",
stringify!(bz_stream),
"::",
stringify!(total_in_lo32)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<bz_stream>())).total_in_hi32 as *const _ as usize },
16usize,
concat!(
"Offset of field: ",
stringify!(bz_stream),
"::",
stringify!(total_in_hi32)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<bz_stream>())).next_out as *const _ as usize },
24usize,
concat!(
"Offset of field: ",
stringify!(bz_stream),
"::",
stringify!(next_out)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<bz_stream>())).avail_out as *const _ as usize },
32usize,
concat!(
"Offset of field: ",
stringify!(bz_stream),
"::",
stringify!(avail_out)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<bz_stream>())).total_out_lo32 as *const _ as usize },
36usize,
concat!(
"Offset of field: ",
stringify!(bz_stream),
"::",
stringify!(total_out_lo32)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<bz_stream>())).total_out_hi32 as *const _ as usize },
40usize,
concat!(
"Offset of field: ",
stringify!(bz_stream),
"::",
stringify!(total_out_hi32)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<bz_stream>())).state as *const _ as usize },
48usize,
concat!(
"Offset of field: ",
stringify!(bz_stream),
"::",
stringify!(state)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<bz_stream>())).bzalloc as *const _ as usize },
56usize,
concat!(
"Offset of field: ",
stringify!(bz_stream),
"::",
stringify!(bzalloc)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<bz_stream>())).bzfree as *const _ as usize },
64usize,
concat!(
"Offset of field: ",
stringify!(bz_stream),
"::",
stringify!(bzfree)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<bz_stream>())).opaque as *const _ as usize },
72usize,
concat!(
"Offset of field: ",
stringify!(bz_stream),
"::",
stringify!(opaque)
)
);
}
pub type size_t = ::std::os::raw::c_ulong;
pub type va_list = __builtin_va_list;
pub type __gnuc_va_list = __builtin_va_list;
pub type __u_char = ::std::os::raw::c_uchar;
pub type __u_short = ::std::os::raw::c_ushort;
pub type __u_int = ::std::os::raw::c_uint;
pub type __u_long = ::std::os::raw::c_ulong;
pub type __int8_t = ::std::os::raw::c_schar;
pub type __uint8_t = ::std::os::raw::c_uchar;
pub type __int16_t = ::std::os::raw::c_short;
pub type __uint16_t = ::std::os::raw::c_ushort;
pub type __int32_t = ::std::os::raw::c_int;
pub type __uint32_t = ::std::os::raw::c_uint;
pub type __int64_t = ::std::os::raw::c_long;
pub type __uint64_t = ::std::os::raw::c_ulong;
pub type __int_least8_t = __int8_t;
pub type __uint_least8_t = __uint8_t;
pub type __int_least16_t = __int16_t;
pub type __uint_least16_t = __uint16_t;
pub type __int_least32_t = __int32_t;
pub type __uint_least32_t = __uint32_t;
pub type __int_least64_t = __int64_t;
pub type __uint_least64_t = __uint64_t;
pub type __quad_t = ::std::os::raw::c_long;
pub type __u_quad_t = ::std::os::raw::c_ulong;
pub type __intmax_t = ::std::os::raw::c_long;
pub type __uintmax_t = ::std::os::raw::c_ulong;
pub type __dev_t = ::std::os::raw::c_ulong;
pub type __uid_t = ::std::os::raw::c_uint;
pub type __gid_t = ::std::os::raw::c_uint;
pub type __ino_t = ::std::os::raw::c_ulong;
pub type __ino64_t = ::std::os::raw::c_ulong;
pub type __mode_t = ::std::os::raw::c_uint;
pub type __nlink_t = ::std::os::raw::c_ulong;
pub type __off_t = ::std::os::raw::c_long;
pub type __off64_t = ::std::os::raw::c_long;
pub type __pid_t = ::std::os::raw::c_int;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct __fsid_t {
pub __val: [::std::os::raw::c_int; 2usize],
}
#[test]
fn bindgen_test_layout___fsid_t() {
assert_eq!(
::std::mem::size_of::<__fsid_t>(),
8usize,
concat!("Size of: ", stringify!(__fsid_t))
);
assert_eq!(
::std::mem::align_of::<__fsid_t>(),
4usize,
concat!("Alignment of ", stringify!(__fsid_t))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<__fsid_t>())).__val as *const _ as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(__fsid_t),
"::",
stringify!(__val)
)
);
}
pub type __clock_t = ::std::os::raw::c_long;
pub type __rlim_t = ::std::os::raw::c_ulong;
pub type __rlim64_t = ::std::os::raw::c_ulong;
pub type __id_t = ::std::os::raw::c_uint;
pub type __time_t = ::std::os::raw::c_long;
pub type __useconds_t = ::std::os::raw::c_uint;
pub type __suseconds_t = ::std::os::raw::c_long;
pub type __daddr_t = ::std::os::raw::c_int;
pub type __key_t = ::std::os::raw::c_int;
pub type __clockid_t = ::std::os::raw::c_int;
pub type __timer_t = *mut ::std::os::raw::c_void;
pub type __blksize_t = ::std::os::raw::c_long;
pub type __blkcnt_t = ::std::os::raw::c_long;
pub type __blkcnt64_t = ::std::os::raw::c_long;
pub type __fsblkcnt_t = ::std::os::raw::c_ulong;
pub type __fsblkcnt64_t = ::std::os::raw::c_ulong;
pub type __fsfilcnt_t = ::std::os::raw::c_ulong;
pub type __fsfilcnt64_t = ::std::os::raw::c_ulong;
pub type __fsword_t = ::std::os::raw::c_long;
pub type __ssize_t = ::std::os::raw::c_long;
pub type __syscall_slong_t = ::std::os::raw::c_long;
pub type __syscall_ulong_t = ::std::os::raw::c_ulong;
pub type __loff_t = __off64_t;
pub type __caddr_t = *mut ::std::os::raw::c_char;
pub type __intptr_t = ::std::os::raw::c_long;
pub type __socklen_t = ::std::os::raw::c_uint;
pub type __sig_atomic_t = ::std::os::raw::c_int;
#[repr(C)]
#[derive(Copy, Clone)]
pub struct __mbstate_t {
pub __count: ::std::os::raw::c_int,
pub __value: __mbstate_t__bindgen_ty_1,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union __mbstate_t__bindgen_ty_1 {
pub __wch: ::std::os::raw::c_uint,
pub __wchb: [::std::os::raw::c_char; 4usize],
}
#[test]
fn bindgen_test_layout___mbstate_t__bindgen_ty_1() {
assert_eq!(
::std::mem::size_of::<__mbstate_t__bindgen_ty_1>(),
4usize,
concat!("Size of: ", stringify!(__mbstate_t__bindgen_ty_1))
);
assert_eq!(
::std::mem::align_of::<__mbstate_t__bindgen_ty_1>(),
4usize,
concat!("Alignment of ", stringify!(__mbstate_t__bindgen_ty_1))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<__mbstate_t__bindgen_ty_1>())).__wch as *const _ as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(__mbstate_t__bindgen_ty_1),
"::",
stringify!(__wch)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<__mbstate_t__bindgen_ty_1>())).__wchb as *const _ as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(__mbstate_t__bindgen_ty_1),
"::",
stringify!(__wchb)
)
);
}
#[test]
fn bindgen_test_layout___mbstate_t() {
assert_eq!(
::std::mem::size_of::<__mbstate_t>(),
8usize,
concat!("Size of: ", stringify!(__mbstate_t))
);
assert_eq!(
::std::mem::align_of::<__mbstate_t>(),
4usize,
concat!("Alignment of ", stringify!(__mbstate_t))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<__mbstate_t>())).__count as *const _ as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(__mbstate_t),
"::",
stringify!(__count)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<__mbstate_t>())).__value as *const _ as usize },
4usize,
concat!(
"Offset of field: ",
stringify!(__mbstate_t),
"::",
stringify!(__value)
)
);
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct _G_fpos_t {
pub __pos: __off_t,
pub __state: __mbstate_t,
}
#[test]
fn bindgen_test_layout__G_fpos_t() {
assert_eq!(
::std::mem::size_of::<_G_fpos_t>(),
16usize,
concat!("Size of: ", stringify!(_G_fpos_t))
);
assert_eq!(
::std::mem::align_of::<_G_fpos_t>(),
8usize,
concat!("Alignment of ", stringify!(_G_fpos_t))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<_G_fpos_t>())).__pos as *const _ as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(_G_fpos_t),
"::",
stringify!(__pos)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<_G_fpos_t>())).__state as *const _ as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(_G_fpos_t),
"::",
stringify!(__state)
)
);
}
pub type __fpos_t = _G_fpos_t;
#[repr(C)]
#[derive(Copy, Clone)]
pub struct _G_fpos64_t {
pub __pos: __off64_t,
pub __state: __mbstate_t,
}
#[test]
fn bindgen_test_layout__G_fpos64_t() {
assert_eq!(
::std::mem::size_of::<_G_fpos64_t>(),
16usize,
concat!("Size of: ", stringify!(_G_fpos64_t))
);
assert_eq!(
::std::mem::align_of::<_G_fpos64_t>(),
8usize,
concat!("Alignment of ", stringify!(_G_fpos64_t))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<_G_fpos64_t>())).__pos as *const _ as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(_G_fpos64_t),
"::",
stringify!(__pos)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<_G_fpos64_t>())).__state as *const _ as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(_G_fpos64_t),
"::",
stringify!(__state)
)
);
}
pub type __fpos64_t = _G_fpos64_t;
pub type __FILE = _IO_FILE;
pub type FILE = _IO_FILE;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct _IO_marker {
_unused: [u8; 0],
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct _IO_codecvt {
_unused: [u8; 0],
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct _IO_wide_data {
_unused: [u8; 0],
}
pub type _IO_lock_t = ::std::os::raw::c_void;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct _IO_FILE {
pub _flags: ::std::os::raw::c_int,
pub _IO_read_ptr: *mut ::std::os::raw::c_char,
pub _IO_read_end: *mut ::std::os::raw::c_char,
pub _IO_read_base: *mut ::std::os::raw::c_char,
pub _IO_write_base: *mut ::std::os::raw::c_char,
pub _IO_write_ptr: *mut ::std::os::raw::c_char,
pub _IO_write_end: *mut ::std::os::raw::c_char,
pub _IO_buf_base: *mut ::std::os::raw::c_char,
pub _IO_buf_end: *mut ::std::os::raw::c_char,
pub _IO_save_base: *mut ::std::os::raw::c_char,
pub _IO_backup_base: *mut ::std::os::raw::c_char,
pub _IO_save_end: *mut ::std::os::raw::c_char,
pub _markers: *mut _IO_marker,
pub _chain: *mut _IO_FILE,
pub _fileno: ::std::os::raw::c_int,
pub _flags2: ::std::os::raw::c_int,
pub _old_offset: __off_t,
pub _cur_column: ::std::os::raw::c_ushort,
pub _vtable_offset: ::std::os::raw::c_schar,
pub _shortbuf: [::std::os::raw::c_char; 1usize],
pub _lock: *mut _IO_lock_t,
pub _offset: __off64_t,
pub _codecvt: *mut _IO_codecvt,
pub _wide_data: *mut _IO_wide_data,
pub _freeres_list: *mut _IO_FILE,
pub _freeres_buf: *mut ::std::os::raw::c_void,
pub __pad5: size_t,
pub _mode: ::std::os::raw::c_int,
pub _unused2: [::std::os::raw::c_char; 20usize],
}
#[test]
fn bindgen_test_layout__IO_FILE() {
assert_eq!(
::std::mem::size_of::<_IO_FILE>(),
216usize,
concat!("Size of: ", stringify!(_IO_FILE))
);
assert_eq!(
::std::mem::align_of::<_IO_FILE>(),
8usize,
concat!("Alignment of ", stringify!(_IO_FILE))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<_IO_FILE>()))._flags as *const _ as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(_IO_FILE),
"::",
stringify!(_flags)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<_IO_FILE>()))._IO_read_ptr as *const _ as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(_IO_FILE),
"::",
stringify!(_IO_read_ptr)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<_IO_FILE>()))._IO_read_end as *const _ as usize },
16usize,
concat!(
"Offset of field: ",
stringify!(_IO_FILE),
"::",
stringify!(_IO_read_end)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<_IO_FILE>()))._IO_read_base as *const _ as usize },
24usize,
concat!(
"Offset of field: ",
stringify!(_IO_FILE),
"::",
stringify!(_IO_read_base)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<_IO_FILE>()))._IO_write_base as *const _ as usize },
32usize,
concat!(
"Offset of field: ",
stringify!(_IO_FILE),
"::",
stringify!(_IO_write_base)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<_IO_FILE>()))._IO_write_ptr as *const _ as usize },
40usize,
concat!(
"Offset of field: ",
stringify!(_IO_FILE),
"::",
stringify!(_IO_write_ptr)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<_IO_FILE>()))._IO_write_end as *const _ as usize },
48usize,
concat!(
"Offset of field: ",
stringify!(_IO_FILE),
"::",
stringify!(_IO_write_end)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<_IO_FILE>()))._IO_buf_base as *const _ as usize },
56usize,
concat!(
"Offset of field: ",
stringify!(_IO_FILE),
"::",
stringify!(_IO_buf_base)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<_IO_FILE>()))._IO_buf_end as *const _ as usize },
64usize,
concat!(
"Offset of field: ",
stringify!(_IO_FILE),
"::",
stringify!(_IO_buf_end)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<_IO_FILE>()))._IO_save_base as *const _ as usize },
72usize,
concat!(
"Offset of field: ",
stringify!(_IO_FILE),
"::",
stringify!(_IO_save_base)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<_IO_FILE>()))._IO_backup_base as *const _ as usize },
80usize,
concat!(
"Offset of field: ",
stringify!(_IO_FILE),
"::",
stringify!(_IO_backup_base)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<_IO_FILE>()))._IO_save_end as *const _ as usize },
88usize,
concat!(
"Offset of field: ",
stringify!(_IO_FILE),
"::",
stringify!(_IO_save_end)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<_IO_FILE>()))._markers as *const _ as usize },
96usize,
concat!(
"Offset of field: ",
stringify!(_IO_FILE),
"::",
stringify!(_markers)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<_IO_FILE>()))._chain as *const _ as usize },
104usize,
concat!(
"Offset of field: ",
stringify!(_IO_FILE),
"::",
stringify!(_chain)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<_IO_FILE>()))._fileno as *const _ as usize },
112usize,
concat!(
"Offset of field: ",
stringify!(_IO_FILE),
"::",
stringify!(_fileno)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<_IO_FILE>()))._flags2 as *const _ as usize },
116usize,
concat!(
"Offset of field: ",
stringify!(_IO_FILE),
"::",
stringify!(_flags2)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<_IO_FILE>()))._old_offset as *const _ as usize },
120usize,
concat!(
"Offset of field: ",
stringify!(_IO_FILE),
"::",
stringify!(_old_offset)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<_IO_FILE>()))._cur_column as *const _ as usize },
128usize,
concat!(
"Offset of field: ",
stringify!(_IO_FILE),
"::",
stringify!(_cur_column)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<_IO_FILE>()))._vtable_offset as *const _ as usize },
130usize,
concat!(
"Offset of field: ",
stringify!(_IO_FILE),
"::",
stringify!(_vtable_offset)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<_IO_FILE>()))._shortbuf as *const _ as usize },
131usize,
concat!(
"Offset of field: ",
stringify!(_IO_FILE),
"::",
stringify!(_shortbuf)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<_IO_FILE>()))._lock as *const _ as usize },
136usize,
concat!(
"Offset of field: ",
stringify!(_IO_FILE),
"::",
stringify!(_lock)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<_IO_FILE>()))._offset as *const _ as usize },
144usize,
concat!(
"Offset of field: ",
stringify!(_IO_FILE),
"::",
stringify!(_offset)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<_IO_FILE>()))._codecvt as *const _ as usize },
152usize,
concat!(
"Offset of field: ",
stringify!(_IO_FILE),
"::",
stringify!(_codecvt)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<_IO_FILE>()))._wide_data as *const _ as usize },
160usize,
concat!(
"Offset of field: ",
stringify!(_IO_FILE),
"::",
stringify!(_wide_data)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<_IO_FILE>()))._freeres_list as *const _ as usize },
168usize,
concat!(
"Offset of field: ",
stringify!(_IO_FILE),
"::",
stringify!(_freeres_list)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<_IO_FILE>()))._freeres_buf as *const _ as usize },
176usize,
concat!(
"Offset of field: ",
stringify!(_IO_FILE),
"::",
stringify!(_freeres_buf)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<_IO_FILE>())).__pad5 as *const _ as usize },
184usize,
concat!(
"Offset of field: ",
stringify!(_IO_FILE),
"::",
stringify!(__pad5)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<_IO_FILE>()))._mode as *const _ as usize },
192usize,
concat!(
"Offset of field: ",
stringify!(_IO_FILE),
"::",
stringify!(_mode)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<_IO_FILE>()))._unused2 as *const _ as usize },
196usize,
concat!(
"Offset of field: ",
stringify!(_IO_FILE),
"::",
stringify!(_unused2)
)
);
}
pub type off_t = __off_t;
pub type ssize_t = __ssize_t;
pub type fpos_t = __fpos_t;
extern "C" {
pub static mut stdin: *mut FILE;
}
extern "C" {
pub static mut stdout: *mut FILE;
}
extern "C" {
pub static mut stderr: *mut FILE;
}
extern "C" {
pub fn remove(__filename: *const ::std::os::raw::c_char) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn rename(
__old: *const ::std::os::raw::c_char,
__new: *const ::std::os::raw::c_char,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn renameat(
__oldfd: ::std::os::raw::c_int,
__old: *const ::std::os::raw::c_char,
__newfd: ::std::os::raw::c_int,
__new: *const ::std::os::raw::c_char,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn tmpfile() -> *mut FILE;
}
extern "C" {
pub fn tmpnam(__s: *mut ::std::os::raw::c_char) -> *mut ::std::os::raw::c_char;
}
extern "C" {
pub fn tmpnam_r(__s: *mut ::std::os::raw::c_char) -> *mut ::std::os::raw::c_char;
}
extern "C" {
pub fn tempnam(
__dir: *const ::std::os::raw::c_char,
__pfx: *const ::std::os::raw::c_char,
) -> *mut ::std::os::raw::c_char;
}
extern "C" {
pub fn fclose(__stream: *mut FILE) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn fflush(__stream: *mut FILE) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn fflush_unlocked(__stream: *mut FILE) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn fopen(
__filename: *const ::std::os::raw::c_char,
__modes: *const ::std::os::raw::c_char,
) -> *mut FILE;
}
extern "C" {
pub fn freopen(
__filename: *const ::std::os::raw::c_char,
__modes: *const ::std::os::raw::c_char,
__stream: *mut FILE,
) -> *mut FILE;
}
extern "C" {
pub fn fdopen(__fd: ::std::os::raw::c_int, __modes: *const ::std::os::raw::c_char)
-> *mut FILE;
}
extern "C" {
pub fn fmemopen(
__s: *mut ::std::os::raw::c_void,
__len: size_t,
__modes: *const ::std::os::raw::c_char,
) -> *mut FILE;
}
extern "C" {
pub fn open_memstream(
__bufloc: *mut *mut ::std::os::raw::c_char,
__sizeloc: *mut size_t,
) -> *mut FILE;
}
extern "C" {
pub fn setbuf(__stream: *mut FILE, __buf: *mut ::std::os::raw::c_char);
}
extern "C" {
pub fn setvbuf(
__stream: *mut FILE,
__buf: *mut ::std::os::raw::c_char,
__modes: ::std::os::raw::c_int,
__n: size_t,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn setbuffer(__stream: *mut FILE, __buf: *mut ::std::os::raw::c_char, __size: size_t);
}
extern "C" {
pub fn setlinebuf(__stream: *mut FILE);
}
extern "C" {
pub fn fprintf(
__stream: *mut FILE,
__format: *const ::std::os::raw::c_char,
...
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn printf(__format: *const ::std::os::raw::c_char, ...) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn sprintf(
__s: *mut ::std::os::raw::c_char,
__format: *const ::std::os::raw::c_char,
...
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn vfprintf(
__s: *mut FILE,
__format: *const ::std::os::raw::c_char,
__arg: *mut __va_list_tag,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn vprintf(
__format: *const ::std::os::raw::c_char,
__arg: *mut __va_list_tag,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn vsprintf(
__s: *mut ::std::os::raw::c_char,
__format: *const ::std::os::raw::c_char,
__arg: *mut __va_list_tag,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn snprintf(
__s: *mut ::std::os::raw::c_char,
__maxlen: ::std::os::raw::c_ulong,
__format: *const ::std::os::raw::c_char,
...
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn vsnprintf(
__s: *mut ::std::os::raw::c_char,
__maxlen: ::std::os::raw::c_ulong,
__format: *const ::std::os::raw::c_char,
__arg: *mut __va_list_tag,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn vdprintf(
__fd: ::std::os::raw::c_int,
__fmt: *const ::std::os::raw::c_char,
__arg: *mut __va_list_tag,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn dprintf(
__fd: ::std::os::raw::c_int,
__fmt: *const ::std::os::raw::c_char,
...
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn fscanf(
__stream: *mut FILE,
__format: *const ::std::os::raw::c_char,
...
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn scanf(__format: *const ::std::os::raw::c_char, ...) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn sscanf(
__s: *const ::std::os::raw::c_char,
__format: *const ::std::os::raw::c_char,
...
) -> ::std::os::raw::c_int;
}
extern "C" {
#[link_name = "\u{1}__isoc99_fscanf"]
pub fn fscanf1(
__stream: *mut FILE,
__format: *const ::std::os::raw::c_char,
...
) -> ::std::os::raw::c_int;
}
extern "C" {
#[link_name = "\u{1}__isoc99_scanf"]
pub fn scanf1(__format: *const ::std::os::raw::c_char, ...) -> ::std::os::raw::c_int;
}
extern "C" {
#[link_name = "\u{1}__isoc99_sscanf"]
pub fn sscanf1(
__s: *const ::std::os::raw::c_char,
__format: *const ::std::os::raw::c_char,
...
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn vfscanf(
__s: *mut FILE,
__format: *const ::std::os::raw::c_char,
__arg: *mut __va_list_tag,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn vscanf(
__format: *const ::std::os::raw::c_char,
__arg: *mut __va_list_tag,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn vsscanf(
__s: *const ::std::os::raw::c_char,
__format: *const ::std::os::raw::c_char,
__arg: *mut __va_list_tag,
) -> ::std::os::raw::c_int;
}
extern "C" {
#[link_name = "\u{1}__isoc99_vfscanf"]
pub fn vfscanf1(
__s: *mut FILE,
__format: *const ::std::os::raw::c_char,
__arg: *mut __va_list_tag,
) -> ::std::os::raw::c_int;
}
extern "C" {
#[link_name = "\u{1}__isoc99_vscanf"]
pub fn vscanf1(
__format: *const ::std::os::raw::c_char,
__arg: *mut __va_list_tag,
) -> ::std::os::raw::c_int;
}
extern "C" {
#[link_name = "\u{1}__isoc99_vsscanf"]
pub fn vsscanf1(
__s: *const ::std::os::raw::c_char,
__format: *const ::std::os::raw::c_char,
__arg: *mut __va_list_tag,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn fgetc(__stream: *mut FILE) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn getc(__stream: *mut FILE) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn getchar() -> ::std::os::raw::c_int;
}
extern "C" {
pub fn getc_unlocked(__stream: *mut FILE) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn getchar_unlocked() -> ::std::os::raw::c_int;
}
extern "C" {
pub fn fgetc_unlocked(__stream: *mut FILE) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn fputc(__c: ::std::os::raw::c_int, __stream: *mut FILE) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn putc(__c: ::std::os::raw::c_int, __stream: *mut FILE) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn putchar(__c: ::std::os::raw::c_int) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn fputc_unlocked(__c: ::std::os::raw::c_int, __stream: *mut FILE)
-> ::std::os::raw::c_int;
}
extern "C" {
pub fn putc_unlocked(__c: ::std::os::raw::c_int, __stream: *mut FILE) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn putchar_unlocked(__c: ::std::os::raw::c_int) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn getw(__stream: *mut FILE) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn putw(__w: ::std::os::raw::c_int, __stream: *mut FILE) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn fgets(
__s: *mut ::std::os::raw::c_char,
__n: ::std::os::raw::c_int,
__stream: *mut FILE,
) -> *mut ::std::os::raw::c_char;
}
extern "C" {
pub fn __getdelim(
__lineptr: *mut *mut ::std::os::raw::c_char,
__n: *mut size_t,
__delimiter: ::std::os::raw::c_int,
__stream: *mut FILE,
) -> __ssize_t;
}
extern "C" {
pub fn getdelim(
__lineptr: *mut *mut ::std::os::raw::c_char,
__n: *mut size_t,
__delimiter: ::std::os::raw::c_int,
__stream: *mut FILE,
) -> __ssize_t;
}
extern "C" {
pub fn getline(
__lineptr: *mut *mut ::std::os::raw::c_char,
__n: *mut size_t,
__stream: *mut FILE,
) -> __ssize_t;
}
extern "C" {
pub fn fputs(__s: *const ::std::os::raw::c_char, __stream: *mut FILE) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn puts(__s: *const ::std::os::raw::c_char) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn ungetc(__c: ::std::os::raw::c_int, __stream: *mut FILE) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn fread(
__ptr: *mut ::std::os::raw::c_void,
__size: ::std::os::raw::c_ulong,
__n: ::std::os::raw::c_ulong,
__stream: *mut FILE,
) -> ::std::os::raw::c_ulong;
}
extern "C" {
pub fn fwrite(
__ptr: *const ::std::os::raw::c_void,
__size: ::std::os::raw::c_ulong,
__n: ::std::os::raw::c_ulong,
__s: *mut FILE,
) -> ::std::os::raw::c_ulong;
}
extern "C" {
pub fn fread_unlocked(
__ptr: *mut ::std::os::raw::c_void,
__size: size_t,
__n: size_t,
__stream: *mut FILE,
) -> size_t;
}
extern "C" {
pub fn fwrite_unlocked(
__ptr: *const ::std::os::raw::c_void,
__size: size_t,
__n: size_t,
__stream: *mut FILE,
) -> size_t;
}
extern "C" {
pub fn fseek(
__stream: *mut FILE,
__off: ::std::os::raw::c_long,
__whence: ::std::os::raw::c_int,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn ftell(__stream: *mut FILE) -> ::std::os::raw::c_long;
}
extern "C" {
pub fn rewind(__stream: *mut FILE);
}
extern "C" {
pub fn fseeko(
__stream: *mut FILE,
__off: __off_t,
__whence: ::std::os::raw::c_int,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn ftello(__stream: *mut FILE) -> __off_t;
}
extern "C" {
pub fn fgetpos(__stream: *mut FILE, __pos: *mut fpos_t) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn fsetpos(__stream: *mut FILE, __pos: *const fpos_t) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn clearerr(__stream: *mut FILE);
}
extern "C" {
pub fn feof(__stream: *mut FILE) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn ferror(__stream: *mut FILE) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn clearerr_unlocked(__stream: *mut FILE);
}
extern "C" {
pub fn feof_unlocked(__stream: *mut FILE) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn ferror_unlocked(__stream: *mut FILE) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn perror(__s: *const ::std::os::raw::c_char);
}
extern "C" {
pub static mut sys_nerr: ::std::os::raw::c_int;
}
extern "C" {
pub static mut sys_errlist: [*const ::std::os::raw::c_char; 0usize];
}
extern "C" {
pub fn fileno(__stream: *mut FILE) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn fileno_unlocked(__stream: *mut FILE) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn popen(
__command: *const ::std::os::raw::c_char,
__modes: *const ::std::os::raw::c_char,
) -> *mut FILE;
}
extern "C" {
pub fn pclose(__stream: *mut FILE) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn ctermid(__s: *mut ::std::os::raw::c_char) -> *mut ::std::os::raw::c_char;
}
extern "C" {
pub fn flockfile(__stream: *mut FILE);
}
extern "C" {
pub fn ftrylockfile(__stream: *mut FILE) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn funlockfile(__stream: *mut FILE);
}
extern "C" {
pub fn __uflow(arg1: *mut FILE) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn __overflow(arg1: *mut FILE, arg2: ::std::os::raw::c_int) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn BZ2_bzCompressInit(
strm: *mut bz_stream,
blockSize100k: ::std::os::raw::c_int,
verbosity: ::std::os::raw::c_int,
workFactor: ::std::os::raw::c_int,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn BZ2_bzCompress(
strm: *mut bz_stream,
action: ::std::os::raw::c_int,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn BZ2_bzCompressEnd(strm: *mut bz_stream) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn BZ2_bzDecompressInit(
strm: *mut bz_stream,
verbosity: ::std::os::raw::c_int,
small: ::std::os::raw::c_int,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn BZ2_bzDecompress(strm: *mut bz_stream) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn BZ2_bzDecompressEnd(strm: *mut bz_stream) -> ::std::os::raw::c_int;
}
pub type BZFILE = ::std::os::raw::c_void;
extern "C" {
pub fn BZ2_bzReadOpen(
bzerror: *mut ::std::os::raw::c_int,
f: *mut FILE,
verbosity: ::std::os::raw::c_int,
small: ::std::os::raw::c_int,
unused: *mut ::std::os::raw::c_void,
nUnused: ::std::os::raw::c_int,
) -> *mut BZFILE;
}
extern "C" {
pub fn BZ2_bzReadClose(bzerror: *mut ::std::os::raw::c_int, b: *mut BZFILE);
}
extern "C" {
pub fn BZ2_bzReadGetUnused(
bzerror: *mut ::std::os::raw::c_int,
b: *mut BZFILE,
unused: *mut *mut ::std::os::raw::c_void,
nUnused: *mut ::std::os::raw::c_int,
);
}
extern "C" {
pub fn BZ2_bzRead(
bzerror: *mut ::std::os::raw::c_int,
b: *mut BZFILE,
buf: *mut ::std::os::raw::c_void,
len: ::std::os::raw::c_int,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn BZ2_bzWriteOpen(
bzerror: *mut ::std::os::raw::c_int,
f: *mut FILE,
blockSize100k: ::std::os::raw::c_int,
verbosity: ::std::os::raw::c_int,
workFactor: ::std::os::raw::c_int,
) -> *mut BZFILE;
}
extern "C" {
pub fn BZ2_bzWrite(
bzerror: *mut ::std::os::raw::c_int,
b: *mut BZFILE,
buf: *mut ::std::os::raw::c_void,
len: ::std::os::raw::c_int,
);
}
extern "C" {
pub fn BZ2_bzWriteClose(
bzerror: *mut ::std::os::raw::c_int,
b: *mut BZFILE,
abandon: ::std::os::raw::c_int,
nbytes_in: *mut ::std::os::raw::c_uint,
nbytes_out: *mut ::std::os::raw::c_uint,
);
}
extern "C" {
pub fn BZ2_bzWriteClose64(
bzerror: *mut ::std::os::raw::c_int,
b: *mut BZFILE,
abandon: ::std::os::raw::c_int,
nbytes_in_lo32: *mut ::std::os::raw::c_uint,
nbytes_in_hi32: *mut ::std::os::raw::c_uint,
nbytes_out_lo32: *mut ::std::os::raw::c_uint,
nbytes_out_hi32: *mut ::std::os::raw::c_uint,
);
}
extern "C" {
pub fn BZ2_bzBuffToBuffCompress(
dest: *mut ::std::os::raw::c_char,
destLen: *mut ::std::os::raw::c_uint,
source: *mut ::std::os::raw::c_char,
sourceLen: ::std::os::raw::c_uint,
blockSize100k: ::std::os::raw::c_int,
verbosity: ::std::os::raw::c_int,
workFactor: ::std::os::raw::c_int,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn BZ2_bzBuffToBuffDecompress(
dest: *mut ::std::os::raw::c_char,
destLen: *mut ::std::os::raw::c_uint,
source: *mut ::std::os::raw::c_char,
sourceLen: ::std::os::raw::c_uint,
small: ::std::os::raw::c_int,
verbosity: ::std::os::raw::c_int,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn BZ2_bzlibVersion() -> *const ::std::os::raw::c_char;
}
extern "C" {
pub fn BZ2_bzopen(
path: *const ::std::os::raw::c_char,
mode: *const ::std::os::raw::c_char,
) -> *mut BZFILE;
}
extern "C" {
pub fn BZ2_bzdopen(
fd: ::std::os::raw::c_int,
mode: *const ::std::os::raw::c_char,
) -> *mut BZFILE;
}
extern "C" {
pub fn BZ2_bzread(
b: *mut BZFILE,
buf: *mut ::std::os::raw::c_void,
len: ::std::os::raw::c_int,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn BZ2_bzwrite(
b: *mut BZFILE,
buf: *mut ::std::os::raw::c_void,
len: ::std::os::raw::c_int,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn BZ2_bzflush(b: *mut BZFILE) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn BZ2_bzclose(b: *mut BZFILE);
}
extern "C" {
pub fn BZ2_bzerror(
b: *mut BZFILE,
errnum: *mut ::std::os::raw::c_int,
) -> *const ::std::os::raw::c_char;
}
pub type __builtin_va_list = [__va_list_tag; 1usize];
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct __va_list_tag {
pub gp_offset: ::std::os::raw::c_uint,
pub fp_offset: ::std::os::raw::c_uint,
pub overflow_arg_area: *mut ::std::os::raw::c_void,
pub reg_save_area: *mut ::std::os::raw::c_void,
}
#[test]
fn bindgen_test_layout___va_list_tag() {
assert_eq!(
::std::mem::size_of::<__va_list_tag>(),
24usize,
concat!("Size of: ", stringify!(__va_list_tag))
);
assert_eq!(
::std::mem::align_of::<__va_list_tag>(),
8usize,
concat!("Alignment of ", stringify!(__va_list_tag))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<__va_list_tag>())).gp_offset as *const _ as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(__va_list_tag),
"::",
stringify!(gp_offset)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<__va_list_tag>())).fp_offset as *const _ as usize },
4usize,
concat!(
"Offset of field: ",
stringify!(__va_list_tag),
"::",
stringify!(fp_offset)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<__va_list_tag>())).overflow_arg_area as *const _ as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(__va_list_tag),
"::",
stringify!(overflow_arg_area)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<__va_list_tag>())).reg_save_area as *const _ as usize },
16usize,
concat!(
"Offset of field: ",
stringify!(__va_list_tag),
"::",
stringify!(reg_save_area)
)
);
}
| 28.956238 | 100 | 0.539678 |
2397ae5291f07478778b915244cf98ee301e4c3a | 7,776 | use tarantool::tlua::{
Lua,
AnyLuaValue,
AnyHashableLuaValue,
AnyLuaString,
};
pub fn read_numbers() {
let lua = Lua::new();
lua.set("a", "-2");
lua.set("b", 3.5f32);
lua.set("c", -2.0f32);
let x: AnyLuaValue = lua.get("a").unwrap();
assert_eq!(x, AnyLuaValue::LuaString("-2".to_owned()));
let y: AnyLuaValue = lua.get("b").unwrap();
assert_eq!(y, AnyLuaValue::LuaNumber(3.5));
let z: AnyLuaValue = lua.get("c").unwrap();
assert_eq!(z, AnyLuaValue::LuaNumber(-2.0));
}
pub fn read_hashable_numbers() {
let lua = Lua::new();
lua.set("a", -2.0f32);
lua.set("b", 4.0f32);
lua.set("c", "4");
let x: AnyHashableLuaValue = lua.get("a").unwrap();
assert_eq!(x, AnyHashableLuaValue::LuaNumber(-2));
let y: AnyHashableLuaValue = lua.get("b").unwrap();
assert_eq!(y, AnyHashableLuaValue::LuaNumber(4));
let z: AnyHashableLuaValue = lua.get("c").unwrap();
assert_eq!(z, AnyHashableLuaValue::LuaString("4".to_owned()));
}
pub fn read_strings() {
let lua = Lua::new();
lua.set("a", "hello");
lua.set("b", "3x");
lua.set("c", "false");
let x: AnyLuaValue = lua.get("a").unwrap();
assert_eq!(x, AnyLuaValue::LuaString("hello".to_string()));
let y: AnyLuaValue = lua.get("b").unwrap();
assert_eq!(y, AnyLuaValue::LuaString("3x".to_string()));
let z: AnyLuaValue = lua.get("c").unwrap();
assert_eq!(z, AnyLuaValue::LuaString("false".to_string()));
}
pub fn read_hashable_strings() {
let lua = Lua::new();
lua.set("a", "hello");
lua.set("b", "3x");
lua.set("c", "false");
let x: AnyHashableLuaValue = lua.get("a").unwrap();
assert_eq!(x, AnyHashableLuaValue::LuaString("hello".to_string()));
let y: AnyHashableLuaValue = lua.get("b").unwrap();
assert_eq!(y, AnyHashableLuaValue::LuaString("3x".to_string()));
let z: AnyHashableLuaValue = lua.get("c").unwrap();
assert_eq!(z, AnyHashableLuaValue::LuaString("false".to_string()));
}
pub fn read_booleans() {
let lua = Lua::new();
lua.set("a", true);
lua.set("b", false);
let x: AnyLuaValue = lua.get("a").unwrap();
assert_eq!(x, AnyLuaValue::LuaBoolean(true));
let y: AnyLuaValue = lua.get("b").unwrap();
assert_eq!(y, AnyLuaValue::LuaBoolean(false));
}
pub fn read_hashable_booleans() {
let lua = Lua::new();
lua.set("a", true);
lua.set("b", false);
let x: AnyHashableLuaValue = lua.get("a").unwrap();
assert_eq!(x, AnyHashableLuaValue::LuaBoolean(true));
let y: AnyHashableLuaValue = lua.get("b").unwrap();
assert_eq!(y, AnyHashableLuaValue::LuaBoolean(false));
}
pub fn read_tables() {
let lua = Lua::new();
lua.exec("
a = {x = 12, y = 19}
b = {z = a, w = 'test string'}
c = {'first', 'second'}
").unwrap();
fn get<'a>(table: &'a AnyLuaValue, key: &str) -> &'a AnyLuaValue {
let test_key = AnyLuaValue::LuaString(key.to_owned());
match *table {
AnyLuaValue::LuaArray(ref vec) => {
let &(_, ref value) = vec.iter().find(|&&(ref key, _)| key == &test_key).expect("key not found");
value
},
_ => panic!("not a table")
}
}
fn get_numeric(table: &AnyLuaValue, key: usize) -> &AnyLuaValue {
let test_key = AnyLuaValue::LuaNumber(key as f64);
match *table {
AnyLuaValue::LuaArray(ref vec) => {
let &(_, ref value) = vec.iter().find(|&&(ref key, _)| key == &test_key).expect("key not found");
value
},
_ => panic!("not a table")
}
}
let a: AnyLuaValue = lua.get("a").unwrap();
assert_eq!(get(&a, "x"), &AnyLuaValue::LuaNumber(12.0));
assert_eq!(get(&a, "y"), &AnyLuaValue::LuaNumber(19.0));
let b: AnyLuaValue = lua.get("b").unwrap();
assert_eq!(get(get(&b, "z"), "x"), get(&a, "x"));
assert_eq!(get(get(&b, "z"), "y"), get(&a, "y"));
let c: AnyLuaValue = lua.get("c").unwrap();
assert_eq!(get_numeric(&c, 1), &AnyLuaValue::LuaString("first".to_owned()));
assert_eq!(get_numeric(&c, 2), &AnyLuaValue::LuaString("second".to_owned()));
}
pub fn read_hashable_tables() {
let lua = Lua::new();
lua.exec("
a = {x = 12, y = 19}
b = {z = a, w = 'test string'}
c = {'first', 'second'}
").unwrap();
fn get<'a>(table: &'a AnyHashableLuaValue, key: &str) -> &'a AnyHashableLuaValue {
let test_key = AnyHashableLuaValue::LuaString(key.to_owned());
match *table {
AnyHashableLuaValue::LuaArray(ref vec) => {
let &(_, ref value) = vec.iter().find(|&&(ref key, _)| key == &test_key).expect("key not found");
value
},
_ => panic!("not a table")
}
}
fn get_numeric(table: &AnyHashableLuaValue, key: usize) -> &AnyHashableLuaValue {
let test_key = AnyHashableLuaValue::LuaNumber(key as i32);
match *table {
AnyHashableLuaValue::LuaArray(ref vec) => {
let &(_, ref value) = vec.iter().find(|&&(ref key, _)| key == &test_key).expect("key not found");
value
},
_ => panic!("not a table")
}
}
let a: AnyHashableLuaValue = lua.get("a").unwrap();
assert_eq!(get(&a, "x"), &AnyHashableLuaValue::LuaNumber(12));
assert_eq!(get(&a, "y"), &AnyHashableLuaValue::LuaNumber(19));
let b: AnyHashableLuaValue = lua.get("b").unwrap();
assert_eq!(get(get(&b, "z"), "x"), get(&a, "x"));
assert_eq!(get(get(&b, "z"), "y"), get(&a, "y"));
let c: AnyHashableLuaValue = lua.get("c").unwrap();
assert_eq!(get_numeric(&c, 1), &AnyHashableLuaValue::LuaString("first".to_owned()));
assert_eq!(get_numeric(&c, 2), &AnyHashableLuaValue::LuaString("second".to_owned()));
}
pub fn push_numbers() {
let lua = Lua::new();
lua.set("a", AnyLuaValue::LuaNumber(3.0));
let x: i32 = lua.get("a").unwrap();
assert_eq!(x, 3);
}
pub fn push_hashable_numbers() {
let lua = Lua::new();
lua.set("a", AnyHashableLuaValue::LuaNumber(3));
let x: i32 = lua.get("a").unwrap();
assert_eq!(x, 3);
}
pub fn push_strings() {
let lua = Lua::new();
lua.set("a", AnyLuaValue::LuaString("hello".to_string()));
let x: String = lua.get("a").unwrap();
assert_eq!(x, "hello");
}
pub fn push_hashable_strings() {
let lua = Lua::new();
lua.set("a", AnyHashableLuaValue::LuaString("hello".to_string()));
let x: String = lua.get("a").unwrap();
assert_eq!(x, "hello");
}
pub fn push_booleans() {
let lua = Lua::new();
lua.set("a", AnyLuaValue::LuaBoolean(true));
let x: bool = lua.get("a").unwrap();
assert_eq!(x, true);
}
pub fn push_hashable_booleans() {
let lua = Lua::new();
lua.set("a", AnyHashableLuaValue::LuaBoolean(true));
let x: bool = lua.get("a").unwrap();
assert_eq!(x, true);
}
pub fn push_nil() {
let lua = Lua::new();
lua.set("a", AnyLuaValue::LuaNil);
let x: Option<i32> = lua.get("a");
assert!(x.is_none(),
"x is a Some value when it should be a None value. X: {:?}",
x);
}
pub fn push_hashable_nil() {
let lua = Lua::new();
lua.set("a", AnyHashableLuaValue::LuaNil);
let x: Option<i32> = lua.get("a");
assert!(x.is_none(),
"x is a Some value when it should be a None value. X: {:?}",
x);
}
pub fn non_utf_8_string() {
let lua = Lua::new();
let a = lua.eval::<AnyLuaValue>(r"return '\xff\xfe\xff\xfe'").unwrap();
match a {
AnyLuaValue::LuaAnyString(AnyLuaString(v)) => {
assert_eq!(Vec::from(&b"\xff\xfe\xff\xfe"[..]), v);
},
_ => panic!("Decoded to wrong variant"),
}
}
| 28.379562 | 113 | 0.565201 |
2265a87a052a958a459582f38821892737563bb4 | 5,706 | extern crate utime;
use std::path::Path;
use chrono::{DateTime, FixedOffset, Utc};
use delta;
#[test]
fn read_simple_table() {
let table = delta::open_table("./tests/data/simple_table").unwrap();
assert_eq!(table.version, 4);
assert_eq!(table.min_writer_version, 2);
assert_eq!(table.min_reader_version, 1);
assert_eq!(
table.get_files(),
&vec![
"part-00000-c1777d7d-89d9-4790-b38a-6ee7e24456b1-c000.snappy.parquet",
"part-00001-7891c33d-cedc-47c3-88a6-abcfb049d3b4-c000.snappy.parquet",
"part-00004-315835fe-fb44-4562-98f6-5e6cfa3ae45d-c000.snappy.parquet",
"part-00007-3a0e4727-de0d-41b6-81ef-5223cf40f025-c000.snappy.parquet",
"part-00000-2befed33-c358-4768-a43c-3eda0d2a499d-c000.snappy.parquet",
]
);
let tombstones = table.get_tombstones();
assert_eq!(tombstones.len(), 31);
assert_eq!(
tombstones[0],
delta::action::Remove {
path: "part-00006-63ce9deb-bc0f-482d-b9a1-7e717b67f294-c000.snappy.parquet".to_string(),
deletionTimestamp: 1587968596250,
dataChange: true
}
);
}
#[test]
fn read_simple_table_with_version() {
let mut table = delta::open_table_with_version("./tests/data/simple_table", 0).unwrap();
assert_eq!(table.version, 0);
assert_eq!(table.min_writer_version, 2);
assert_eq!(table.min_reader_version, 1);
assert_eq!(
table.get_files(),
&vec![
"part-00000-a72b1fb3-f2df-41fe-a8f0-e65b746382dd-c000.snappy.parquet",
"part-00001-c506e79a-0bf8-4e2b-a42b-9731b2e490ae-c000.snappy.parquet",
"part-00003-508ae4aa-801c-4c2c-a923-f6f89930a5c1-c000.snappy.parquet",
"part-00004-80938522-09c0-420c-861f-5a649e3d9674-c000.snappy.parquet",
"part-00006-63ce9deb-bc0f-482d-b9a1-7e717b67f294-c000.snappy.parquet",
"part-00007-94f725e2-3963-4b00-9e83-e31021a93cf9-c000.snappy.parquet",
],
);
table = delta::open_table_with_version("./tests/data/simple_table", 2).unwrap();
assert_eq!(table.version, 2);
assert_eq!(table.min_writer_version, 2);
assert_eq!(table.min_reader_version, 1);
assert_eq!(
table.get_files(),
&vec![
"part-00000-c1777d7d-89d9-4790-b38a-6ee7e24456b1-c000.snappy.parquet",
"part-00001-7891c33d-cedc-47c3-88a6-abcfb049d3b4-c000.snappy.parquet",
"part-00003-53f42606-6cda-4f13-8d07-599a21197296-c000.snappy.parquet",
"part-00004-315835fe-fb44-4562-98f6-5e6cfa3ae45d-c000.snappy.parquet",
"part-00006-46f2ff20-eb5d-4dda-8498-7bfb2940713b-c000.snappy.parquet",
"part-00007-3a0e4727-de0d-41b6-81ef-5223cf40f025-c000.snappy.parquet",
]
);
table = delta::open_table_with_version("./tests/data/simple_table", 3).unwrap();
assert_eq!(table.version, 3);
assert_eq!(table.min_writer_version, 2);
assert_eq!(table.min_reader_version, 1);
assert_eq!(
table.get_files(),
&vec![
"part-00000-c1777d7d-89d9-4790-b38a-6ee7e24456b1-c000.snappy.parquet",
"part-00001-7891c33d-cedc-47c3-88a6-abcfb049d3b4-c000.snappy.parquet",
"part-00004-315835fe-fb44-4562-98f6-5e6cfa3ae45d-c000.snappy.parquet",
"part-00007-3a0e4727-de0d-41b6-81ef-5223cf40f025-c000.snappy.parquet",
"part-00000-f17fcbf5-e0dc-40ba-adae-ce66d1fcaef6-c000.snappy.parquet",
"part-00001-bb70d2ba-c196-4df2-9c85-f34969ad3aa9-c000.snappy.parquet",
],
);
}
fn ds_to_ts(ds: &str) -> i64 {
let fixed_dt = DateTime::<FixedOffset>::parse_from_rfc3339(ds).unwrap();
DateTime::<Utc>::from(fixed_dt).timestamp()
}
#[test]
fn time_travel_by_ds() {
// git does not preserve mtime, so we need to manually set it in the test
let log_dir = "./tests/data/simple_table/_delta_log";
let log_mtime_pair = vec![
("00000000000000000000.json", "2020-05-01T22:47:31-07:00"),
("00000000000000000001.json", "2020-05-02T22:47:31-07:00"),
("00000000000000000002.json", "2020-05-03T22:47:31-07:00"),
("00000000000000000003.json", "2020-05-04T22:47:31-07:00"),
("00000000000000000004.json", "2020-05-05T22:47:31-07:00"),
];
for (fname, ds) in log_mtime_pair {
let ts = ds_to_ts(ds) as u64;
let path = format!("{}/{}", log_dir, fname);
utime::set_file_times(Path::new(&path), ts, ts).unwrap();
}
let mut table =
delta::open_table_with_ds("./tests/data/simple_table", "2020-05-01T00:47:31-07:00")
.unwrap();
assert_eq!(table.version, 0);
table = delta::open_table_with_ds("./tests/data/simple_table", "2020-05-02T22:47:31-07:00")
.unwrap();
assert_eq!(table.version, 1);
table = delta::open_table_with_ds("./tests/data/simple_table", "2020-05-02T23:47:31-07:00")
.unwrap();
assert_eq!(table.version, 1);
table = delta::open_table_with_ds("./tests/data/simple_table", "2020-05-03T22:47:31-07:00")
.unwrap();
assert_eq!(table.version, 2);
table = delta::open_table_with_ds("./tests/data/simple_table", "2020-05-04T22:47:31-07:00")
.unwrap();
assert_eq!(table.version, 3);
table = delta::open_table_with_ds("./tests/data/simple_table", "2020-05-05T21:47:31-07:00")
.unwrap();
assert_eq!(table.version, 3);
table = delta::open_table_with_ds("./tests/data/simple_table", "2020-05-05T22:47:31-07:00")
.unwrap();
assert_eq!(table.version, 4);
table = delta::open_table_with_ds("./tests/data/simple_table", "2020-05-25T22:47:31-07:00")
.unwrap();
assert_eq!(table.version, 4);
}
| 39.902098 | 100 | 0.650193 |
e444f4bbf829326c0b37de6147bf283ca307ba1b | 5,192 | #[doc = r"Value read from the register"]
pub struct R {
bits: u8,
}
#[doc = r"Value to write to the register"]
pub struct W {
bits: u8,
}
impl super::TYPE0 {
#[doc = r"Modifies the contents of the register"]
#[inline(always)]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
self.register.set(f(&R { bits }, &mut W { bits }).bits);
}
#[doc = r"Reads the contents of the register"]
#[inline(always)]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r"Writes to the register"]
#[inline(always)]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
self.register.set(
f(&mut W {
bits: Self::reset_value(),
})
.bits,
);
}
#[doc = r"Reset value of the register"]
#[inline(always)]
pub const fn reset_value() -> u8 {
0
}
#[doc = r"Writes the reset value to the register"]
#[inline(always)]
pub fn reset(&self) {
self.register.set(Self::reset_value())
}
}
#[doc = "Possible values of the field `USB_TYPE0_SPEED`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum USB_TYPE0_SPEEDR {
#[doc = "High"]
USB_TYPE0_SPEED_HIGH,
#[doc = "Full"]
USB_TYPE0_SPEED_FULL,
#[doc = "Low"]
USB_TYPE0_SPEED_LOW,
#[doc = r"Reserved"]
_Reserved(u8),
}
impl USB_TYPE0_SPEEDR {
#[doc = r"Value of the field as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u8 {
match *self {
USB_TYPE0_SPEEDR::USB_TYPE0_SPEED_HIGH => 1,
USB_TYPE0_SPEEDR::USB_TYPE0_SPEED_FULL => 2,
USB_TYPE0_SPEEDR::USB_TYPE0_SPEED_LOW => 3,
USB_TYPE0_SPEEDR::_Reserved(bits) => bits,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline(always)]
pub fn _from(value: u8) -> USB_TYPE0_SPEEDR {
match value {
1 => USB_TYPE0_SPEEDR::USB_TYPE0_SPEED_HIGH,
2 => USB_TYPE0_SPEEDR::USB_TYPE0_SPEED_FULL,
3 => USB_TYPE0_SPEEDR::USB_TYPE0_SPEED_LOW,
i => USB_TYPE0_SPEEDR::_Reserved(i),
}
}
#[doc = "Checks if the value of the field is `USB_TYPE0_SPEED_HIGH`"]
#[inline(always)]
pub fn is_usb_type0_speed_high(&self) -> bool {
*self == USB_TYPE0_SPEEDR::USB_TYPE0_SPEED_HIGH
}
#[doc = "Checks if the value of the field is `USB_TYPE0_SPEED_FULL`"]
#[inline(always)]
pub fn is_usb_type0_speed_full(&self) -> bool {
*self == USB_TYPE0_SPEEDR::USB_TYPE0_SPEED_FULL
}
#[doc = "Checks if the value of the field is `USB_TYPE0_SPEED_LOW`"]
#[inline(always)]
pub fn is_usb_type0_speed_low(&self) -> bool {
*self == USB_TYPE0_SPEEDR::USB_TYPE0_SPEED_LOW
}
}
#[doc = "Values that can be written to the field `USB_TYPE0_SPEED`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum USB_TYPE0_SPEEDW {
#[doc = "High"]
USB_TYPE0_SPEED_HIGH,
#[doc = "Full"]
USB_TYPE0_SPEED_FULL,
#[doc = "Low"]
USB_TYPE0_SPEED_LOW,
}
impl USB_TYPE0_SPEEDW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline(always)]
pub fn _bits(&self) -> u8 {
match *self {
USB_TYPE0_SPEEDW::USB_TYPE0_SPEED_HIGH => 1,
USB_TYPE0_SPEEDW::USB_TYPE0_SPEED_FULL => 2,
USB_TYPE0_SPEEDW::USB_TYPE0_SPEED_LOW => 3,
}
}
}
#[doc = r"Proxy"]
pub struct _USB_TYPE0_SPEEDW<'a> {
w: &'a mut W,
}
impl<'a> _USB_TYPE0_SPEEDW<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: USB_TYPE0_SPEEDW) -> &'a mut W {
unsafe { self.bits(variant._bits()) }
}
#[doc = "High"]
#[inline(always)]
pub fn usb_type0_speed_high(self) -> &'a mut W {
self.variant(USB_TYPE0_SPEEDW::USB_TYPE0_SPEED_HIGH)
}
#[doc = "Full"]
#[inline(always)]
pub fn usb_type0_speed_full(self) -> &'a mut W {
self.variant(USB_TYPE0_SPEEDW::USB_TYPE0_SPEED_FULL)
}
#[doc = "Low"]
#[inline(always)]
pub fn usb_type0_speed_low(self) -> &'a mut W {
self.variant(USB_TYPE0_SPEEDW::USB_TYPE0_SPEED_LOW)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits &= !(3 << 6);
self.w.bits |= ((value as u8) & 3) << 6;
self.w
}
}
impl R {
#[doc = r"Value of the register as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u8 {
self.bits
}
#[doc = "Bits 6:7 - Operating Speed"]
#[inline(always)]
pub fn usb_type0_speed(&self) -> USB_TYPE0_SPEEDR {
USB_TYPE0_SPEEDR::_from(((self.bits >> 6) & 3) as u8)
}
}
impl W {
#[doc = r"Writes raw bits to the register"]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u8) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bits 6:7 - Operating Speed"]
#[inline(always)]
pub fn usb_type0_speed(&mut self) -> _USB_TYPE0_SPEEDW {
_USB_TYPE0_SPEEDW { w: self }
}
}
| 28.844444 | 73 | 0.575116 |
f55b11e5c99b73e96398c96086934d61d3ff774a | 12,535 | use crate::{policy, Inbound};
use linkerd_app_core::{
identity, io,
svc::{self, ExtractParam, InsertParam, Param},
tls,
transport::{self, metrics::SensorIo, ClientAddr, OrigDstAddr, Remote, ServerAddr},
transport_header::{self, NewTransportHeaderServer, SessionProtocol, TransportHeader},
Conditional, Error, NameAddr, Result,
};
use std::{convert::TryFrom, fmt::Debug};
use thiserror::Error;
use tracing::{debug_span, info_span};
/// Creates I/O errors when a connection cannot be forwarded because no transport
/// header was present.
#[derive(Debug, Default)]
struct RefusedNoHeader;
#[derive(Debug, Error)]
#[error("direct connections must be mutually authenticated")]
pub struct RefusedNoIdentity(());
#[derive(Debug, Error)]
#[error("a named target must be provided on gateway connections")]
struct RefusedNoTarget;
#[derive(Debug, Clone, PartialEq, Eq)]
pub(crate) struct Local {
addr: Remote<ServerAddr>,
client_id: tls::ClientId,
permit: policy::Permit,
}
#[derive(Debug, Clone)]
pub struct GatewayTransportHeader {
pub target: NameAddr,
pub protocol: Option<SessionProtocol>,
pub client: ClientInfo,
pub policy: policy::AllowPolicy,
}
/// Client connections *must* have an identity.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct ClientInfo {
pub client_id: tls::ClientId,
pub alpn: Option<tls::NegotiatedProtocol>,
pub client_addr: Remote<ClientAddr>,
pub local_addr: OrigDstAddr,
}
type TlsIo<I> = tls::server::Io<identity::ServerIo<tls::server::DetectIo<I>>, I>;
type FwdIo<I> = SensorIo<io::PrefixedIo<TlsIo<I>>>;
pub type GatewayIo<I> = io::EitherIo<FwdIo<I>, SensorIo<TlsIo<I>>>;
#[derive(Clone)]
struct TlsParams {
timeout: tls::server::Timeout,
identity: identity::Server,
}
impl<N> Inbound<N> {
/// Builds a stack that handles connections that target the proxy's inbound port
/// (i.e. without an SO_ORIGINAL_DST setting). This port behaves differently from
/// the main proxy stack:
///
/// 1. Protocol detection is always performed;
/// 2. TLS is required;
/// 3. A transport header is expected. It's not strictly required, as
/// gateways may need to accept HTTP requests from older proxy versions
pub(crate) fn push_direct<T, I, NSvc, G, GSvc>(
self,
policies: impl policy::CheckPolicy + Clone + Send + Sync + 'static,
gateway: G,
) -> Inbound<svc::ArcNewTcp<T, I>>
where
T: Param<Remote<ClientAddr>> + Param<OrigDstAddr>,
T: Clone + Send + 'static,
I: io::AsyncRead + io::AsyncWrite + io::Peek + io::PeerAddr,
I: Debug + Send + Sync + Unpin + 'static,
N: svc::NewService<Local, Service = NSvc> + Clone + Send + Sync + Unpin + 'static,
NSvc: svc::Service<FwdIo<I>, Response = ()> + Clone + Send + Sync + Unpin + 'static,
NSvc::Error: Into<Error>,
NSvc::Future: Send + Unpin,
G: svc::NewService<GatewayTransportHeader, Service = GSvc>
+ Clone
+ Send
+ Sync
+ Unpin
+ 'static,
GSvc: svc::Service<GatewayIo<I>, Response = ()> + Send + 'static,
GSvc::Error: Into<Error>,
GSvc::Future: Send,
{
self.map_stack(|config, rt, inner| {
let detect_timeout = config.proxy.detect_protocol_timeout;
let identity = rt
.identity
.server()
.with_alpn(vec![transport_header::PROTOCOL.into()])
.expect("TLS credential store must be held");
inner
.push(transport::metrics::NewServer::layer(
rt.metrics.proxy.transport.clone(),
))
.instrument(|_: &_| debug_span!("opaque"))
// When the transport header is present, it may be used for either local TCP
// forwarding, or we may be processing an HTTP gateway connection. HTTP gateway
// connections that have a transport header must provide a target name as a part of
// the header.
.push_switch(
{
let policies = policies.clone();
move |(h, client): (TransportHeader, ClientInfo)| -> Result<_> {
match h {
TransportHeader {
port,
name: None,
protocol: None,
} => {
// When the transport header targets an alternate port (but does
// not identify an alternate target name), we check the new
// target's policy to determine whether the client can access
// it.
let addr = (client.local_addr.ip(), port).into();
let allow = policies.check_policy(OrigDstAddr(addr))?;
let tls = tls::ConditionalServerTls::Some(
tls::ServerTls::Established {
client_id: Some(client.client_id.clone()),
negotiated_protocol: client.alpn,
},
);
let permit =
allow.check_authorized(client.client_addr, &tls)?;
Ok(svc::Either::A(Local {
addr: Remote(ServerAddr(addr)),
permit,
client_id: client.client_id,
}))
}
TransportHeader {
port,
name: Some(name),
protocol,
} => {
// When the transport header provides an alternate target, the
// connection is a gateway connection. We check the _gateway
// address's_ policy to determine whether the client is
// authorized to use this gateway.
let policy = policies.check_policy(client.local_addr)?;
Ok(svc::Either::B(GatewayTransportHeader {
target: NameAddr::from((name, port)),
protocol,
client,
policy,
}))
}
TransportHeader {
name: None,
protocol: Some(_),
..
} => Err(RefusedNoTarget.into()),
}
}
},
// HTTP detection is not necessary in this case, since the transport
// header indicates the connection's HTTP version.
svc::stack(gateway.clone())
.push_on_service(svc::MapTargetLayer::new(io::EitherIo::Left))
.push(transport::metrics::NewServer::layer(
rt.metrics.proxy.transport.clone(),
))
.instrument(
|g: &GatewayTransportHeader| info_span!("gateway", dst = %g.target),
)
.into_inner(),
)
// Use ALPN to determine whether a transport header should be read.
.push(NewTransportHeaderServer::layer(detect_timeout))
.push_request_filter(|client: ClientInfo| -> Result<_> {
if client.header_negotiated() {
Ok(client)
} else {
Err(RefusedNoTarget.into())
}
})
// Build a ClientInfo target for each accepted connection. Refuse the
// connection if it doesn't include an mTLS identity.
.push_request_filter(ClientInfo::try_from)
.push(svc::ArcNewService::layer())
.push(tls::NewDetectTls::<identity::Server, _, _>::layer(
TlsParams {
timeout: tls::server::Timeout(detect_timeout),
identity,
},
))
.push_on_service(svc::BoxService::layer())
.push(svc::ArcNewService::layer())
})
}
}
// === impl ClientInfo ===
impl<T> TryFrom<(tls::ConditionalServerTls, T)> for ClientInfo
where
T: Param<OrigDstAddr>,
T: Param<Remote<ClientAddr>>,
{
type Error = Error;
fn try_from((tls, addrs): (tls::ConditionalServerTls, T)) -> Result<Self, Self::Error> {
match tls {
Conditional::Some(tls::ServerTls::Established {
client_id: Some(client_id),
negotiated_protocol,
}) => Ok(Self {
client_id,
alpn: negotiated_protocol,
client_addr: addrs.param(),
local_addr: addrs.param(),
}),
_ => Err(RefusedNoIdentity(()).into()),
}
}
}
impl ClientInfo {
fn header_negotiated(&self) -> bool {
self.alpn
.as_ref()
.map(|tls::NegotiatedProtocol(p)| p == transport_header::PROTOCOL)
.unwrap_or(false)
}
}
// === impl Local ===
impl Param<Remote<ServerAddr>> for Local {
fn param(&self) -> Remote<ServerAddr> {
self.addr
}
}
impl Param<transport::labels::Key> for Local {
fn param(&self) -> transport::labels::Key {
transport::labels::Key::inbound_server(
tls::ConditionalServerTls::Some(tls::ServerTls::Established {
client_id: Some(self.client_id.clone()),
negotiated_protocol: None,
}),
self.addr.into(),
self.permit.labels.server.clone(),
)
}
}
// === impl GatewayTransportHeader ===
impl Param<transport::labels::Key> for GatewayTransportHeader {
fn param(&self) -> transport::labels::Key {
transport::labels::Key::inbound_server(
self.param(),
self.client.local_addr.into(),
self.policy.server_label(),
)
}
}
impl Param<policy::AllowPolicy> for GatewayTransportHeader {
fn param(&self) -> policy::AllowPolicy {
self.policy.clone()
}
}
impl Param<OrigDstAddr> for GatewayTransportHeader {
fn param(&self) -> OrigDstAddr {
self.client.local_addr
}
}
impl Param<Remote<ClientAddr>> for GatewayTransportHeader {
fn param(&self) -> Remote<ClientAddr> {
self.client.client_addr
}
}
impl Param<tls::ConditionalServerTls> for GatewayTransportHeader {
fn param(&self) -> tls::ConditionalServerTls {
tls::ConditionalServerTls::Some(tls::ServerTls::Established {
client_id: Some(self.client.client_id.clone()),
negotiated_protocol: self.client.alpn.clone(),
})
}
}
// === impl RefusedNoHeader ===
impl From<RefusedNoHeader> for Error {
fn from(_: RefusedNoHeader) -> Error {
Error::from(io::Error::new(
io::ErrorKind::ConnectionRefused,
"Non-transport-header connection refused",
))
}
}
// === TlsParams ===
impl<T> ExtractParam<tls::server::Timeout, T> for TlsParams {
#[inline]
fn extract_param(&self, _: &T) -> tls::server::Timeout {
self.timeout
}
}
impl<T> ExtractParam<identity::Server, T> for TlsParams {
#[inline]
fn extract_param(&self, _: &T) -> identity::Server {
self.identity.clone()
}
}
impl<T> InsertParam<tls::ConditionalServerTls, T> for TlsParams {
type Target = (tls::ConditionalServerTls, T);
#[inline]
fn insert_param(&self, tls: tls::ConditionalServerTls, target: T) -> Self::Target {
(tls, target)
}
}
| 37.642643 | 100 | 0.510251 |
f5f487cd10fee335dbdcbd1fcb5fad4f293c5b91 | 543 | // vim: tw=80
//! A method that consumes self
#![deny(warnings)]
use mockall::*;
#[allow(unused)]
struct MethodByValue {}
#[allow(unused)]
#[automock]
impl MethodByValue {
fn foo(self, _x: u32) -> i64 {0}
fn bar(mut self) {}
}
#[test]
fn immutable() {
let mut mock = MockMethodByValue::new();
mock.expect_foo()
.returning(|x| i64::from(x) + 1);
assert_eq!(5, mock.foo(4));
}
#[test]
fn mutable() {
let mut mock = MockMethodByValue::new();
mock.expect_bar()
.returning(|| ());
mock.bar();
}
| 16.96875 | 44 | 0.583794 |
e8b770359bb88a129d5ff18ee5a7bd673f1b2646 | 875 | fn main() {
let string = "It was the best of times, it was the worst of times, it was \
the age of wisdom, it was the age of foolishness, it was the epoch of \
belief, it was the epoch of incredulity, it was the season of Light, \
it was the season of Darkness, it was the spring of hope, it was the \
winter of despair, we had everything before us, we had nothing \
before us, we were all going direct to Heaven, we were all going \
direct the other way—in short, the period was so far like the \
present period, that some of its noisiest authorities insisted on \
its being received, for good or for evil, in the superlative degree \
of comparison only.";
let v: Vec<&str> = string.split(' ').collect();
for word in v[1..10].iter() {
println!("{}", word.to_uppercase());
}
}
| 46.052632 | 79 | 0.637714 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.